python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: GPL-2.0-only
/*
* GPIO support for Cirrus Logic Madera codecs
*
* Copyright (C) 2015-2018 Cirrus Logic
*/
#include <linux/gpio/driver.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/mfd/madera/core.h>
#include <linux/mfd/madera/pdata.h>
#include <linux/mfd/madera/registers.h>
struct madera_gpio {
struct madera *madera;
/* storage space for the gpio_chip we're using */
struct gpio_chip gpio_chip;
};
static int madera_gpio_get_direction(struct gpio_chip *chip,
unsigned int offset)
{
struct madera_gpio *madera_gpio = gpiochip_get_data(chip);
struct madera *madera = madera_gpio->madera;
unsigned int reg_offset = 2 * offset;
unsigned int val;
int ret;
ret = regmap_read(madera->regmap, MADERA_GPIO1_CTRL_2 + reg_offset,
&val);
if (ret < 0)
return ret;
if (val & MADERA_GP1_DIR_MASK)
return GPIO_LINE_DIRECTION_IN;
return GPIO_LINE_DIRECTION_OUT;
}
static int madera_gpio_direction_in(struct gpio_chip *chip, unsigned int offset)
{
struct madera_gpio *madera_gpio = gpiochip_get_data(chip);
struct madera *madera = madera_gpio->madera;
unsigned int reg_offset = 2 * offset;
return regmap_update_bits(madera->regmap,
MADERA_GPIO1_CTRL_2 + reg_offset,
MADERA_GP1_DIR_MASK, MADERA_GP1_DIR);
}
static int madera_gpio_get(struct gpio_chip *chip, unsigned int offset)
{
struct madera_gpio *madera_gpio = gpiochip_get_data(chip);
struct madera *madera = madera_gpio->madera;
unsigned int reg_offset = 2 * offset;
unsigned int val;
int ret;
ret = regmap_read(madera->regmap, MADERA_GPIO1_CTRL_1 + reg_offset,
&val);
if (ret < 0)
return ret;
return !!(val & MADERA_GP1_LVL_MASK);
}
static int madera_gpio_direction_out(struct gpio_chip *chip,
unsigned int offset, int value)
{
struct madera_gpio *madera_gpio = gpiochip_get_data(chip);
struct madera *madera = madera_gpio->madera;
unsigned int reg_offset = 2 * offset;
unsigned int reg_val = value ? MADERA_GP1_LVL : 0;
int ret;
ret = regmap_update_bits(madera->regmap,
MADERA_GPIO1_CTRL_2 + reg_offset,
MADERA_GP1_DIR_MASK, 0);
if (ret < 0)
return ret;
return regmap_update_bits(madera->regmap,
MADERA_GPIO1_CTRL_1 + reg_offset,
MADERA_GP1_LVL_MASK, reg_val);
}
static void madera_gpio_set(struct gpio_chip *chip, unsigned int offset,
int value)
{
struct madera_gpio *madera_gpio = gpiochip_get_data(chip);
struct madera *madera = madera_gpio->madera;
unsigned int reg_offset = 2 * offset;
unsigned int reg_val = value ? MADERA_GP1_LVL : 0;
int ret;
ret = regmap_update_bits(madera->regmap,
MADERA_GPIO1_CTRL_1 + reg_offset,
MADERA_GP1_LVL_MASK, reg_val);
/* set() doesn't return an error so log a warning */
if (ret)
dev_warn(madera->dev, "Failed to write to 0x%x (%d)\n",
MADERA_GPIO1_CTRL_1 + reg_offset, ret);
}
static const struct gpio_chip madera_gpio_chip = {
.label = "madera",
.owner = THIS_MODULE,
.request = gpiochip_generic_request,
.free = gpiochip_generic_free,
.get_direction = madera_gpio_get_direction,
.direction_input = madera_gpio_direction_in,
.get = madera_gpio_get,
.direction_output = madera_gpio_direction_out,
.set = madera_gpio_set,
.set_config = gpiochip_generic_config,
.can_sleep = true,
};
static int madera_gpio_probe(struct platform_device *pdev)
{
struct madera *madera = dev_get_drvdata(pdev->dev.parent);
struct madera_pdata *pdata = &madera->pdata;
struct madera_gpio *madera_gpio;
int ret;
madera_gpio = devm_kzalloc(&pdev->dev, sizeof(*madera_gpio),
GFP_KERNEL);
if (!madera_gpio)
return -ENOMEM;
madera_gpio->madera = madera;
/* Construct suitable gpio_chip from the template in madera_gpio_chip */
madera_gpio->gpio_chip = madera_gpio_chip;
madera_gpio->gpio_chip.parent = pdev->dev.parent;
switch (madera->type) {
case CS47L15:
madera_gpio->gpio_chip.ngpio = CS47L15_NUM_GPIOS;
break;
case CS47L35:
madera_gpio->gpio_chip.ngpio = CS47L35_NUM_GPIOS;
break;
case CS47L85:
case WM1840:
madera_gpio->gpio_chip.ngpio = CS47L85_NUM_GPIOS;
break;
case CS47L90:
case CS47L91:
madera_gpio->gpio_chip.ngpio = CS47L90_NUM_GPIOS;
break;
case CS42L92:
case CS47L92:
case CS47L93:
madera_gpio->gpio_chip.ngpio = CS47L92_NUM_GPIOS;
break;
default:
dev_err(&pdev->dev, "Unknown chip variant %d\n", madera->type);
return -EINVAL;
}
/* We want to be usable on systems that don't use devicetree or acpi */
if (pdata->gpio_base)
madera_gpio->gpio_chip.base = pdata->gpio_base;
else
madera_gpio->gpio_chip.base = -1;
ret = devm_gpiochip_add_data(&pdev->dev,
&madera_gpio->gpio_chip,
madera_gpio);
if (ret < 0) {
dev_dbg(&pdev->dev, "Could not register gpiochip, %d\n", ret);
return ret;
}
/*
* This is part of a composite MFD device which can only be used with
* the corresponding pinctrl driver. On all supported silicon the GPIO
* to pinctrl mapping is fixed in the silicon, so we register it
* explicitly instead of requiring a redundant gpio-ranges in the
* devicetree.
* In any case we also want to work on systems that don't use devicetree
* or acpi.
*/
ret = gpiochip_add_pin_range(&madera_gpio->gpio_chip, "madera-pinctrl",
0, 0, madera_gpio->gpio_chip.ngpio);
if (ret) {
dev_dbg(&pdev->dev, "Failed to add pin range (%d)\n", ret);
return ret;
}
return 0;
}
static struct platform_driver madera_gpio_driver = {
.driver = {
.name = "madera-gpio",
},
.probe = madera_gpio_probe,
};
module_platform_driver(madera_gpio_driver);
MODULE_SOFTDEP("pre: pinctrl-madera");
MODULE_DESCRIPTION("GPIO interface for Madera codecs");
MODULE_AUTHOR("Nariman Poushin <[email protected]>");
MODULE_AUTHOR("Richard Fitzgerald <[email protected]>");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:madera-gpio");
| linux-master | drivers/gpio/gpio-madera.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2013 Altera Corporation
* Based on gpio-mpc8xxx.c
*/
#include <linux/io.h>
#include <linux/module.h>
#include <linux/gpio/driver.h>
#include <linux/gpio/legacy-of-mm-gpiochip.h>
#include <linux/platform_device.h>
#define ALTERA_GPIO_MAX_NGPIO 32
#define ALTERA_GPIO_DATA 0x0
#define ALTERA_GPIO_DIR 0x4
#define ALTERA_GPIO_IRQ_MASK 0x8
#define ALTERA_GPIO_EDGE_CAP 0xc
/**
* struct altera_gpio_chip
* @mmchip : memory mapped chip structure.
* @gpio_lock : synchronization lock so that new irq/set/get requests
* will be blocked until the current one completes.
* @interrupt_trigger : specifies the hardware configured IRQ trigger type
* (rising, falling, both, high)
* @mapped_irq : kernel mapped irq number.
*/
struct altera_gpio_chip {
struct of_mm_gpio_chip mmchip;
raw_spinlock_t gpio_lock;
int interrupt_trigger;
int mapped_irq;
};
static void altera_gpio_irq_unmask(struct irq_data *d)
{
struct altera_gpio_chip *altera_gc;
struct of_mm_gpio_chip *mm_gc;
unsigned long flags;
u32 intmask;
altera_gc = gpiochip_get_data(irq_data_get_irq_chip_data(d));
mm_gc = &altera_gc->mmchip;
gpiochip_enable_irq(&mm_gc->gc, irqd_to_hwirq(d));
raw_spin_lock_irqsave(&altera_gc->gpio_lock, flags);
intmask = readl(mm_gc->regs + ALTERA_GPIO_IRQ_MASK);
/* Set ALTERA_GPIO_IRQ_MASK bit to unmask */
intmask |= BIT(irqd_to_hwirq(d));
writel(intmask, mm_gc->regs + ALTERA_GPIO_IRQ_MASK);
raw_spin_unlock_irqrestore(&altera_gc->gpio_lock, flags);
}
static void altera_gpio_irq_mask(struct irq_data *d)
{
struct altera_gpio_chip *altera_gc;
struct of_mm_gpio_chip *mm_gc;
unsigned long flags;
u32 intmask;
altera_gc = gpiochip_get_data(irq_data_get_irq_chip_data(d));
mm_gc = &altera_gc->mmchip;
raw_spin_lock_irqsave(&altera_gc->gpio_lock, flags);
intmask = readl(mm_gc->regs + ALTERA_GPIO_IRQ_MASK);
/* Clear ALTERA_GPIO_IRQ_MASK bit to mask */
intmask &= ~BIT(irqd_to_hwirq(d));
writel(intmask, mm_gc->regs + ALTERA_GPIO_IRQ_MASK);
raw_spin_unlock_irqrestore(&altera_gc->gpio_lock, flags);
gpiochip_disable_irq(&mm_gc->gc, irqd_to_hwirq(d));
}
/*
* This controller's IRQ type is synthesized in hardware, so this function
* just checks if the requested set_type matches the synthesized IRQ type
*/
static int altera_gpio_irq_set_type(struct irq_data *d,
unsigned int type)
{
struct altera_gpio_chip *altera_gc;
altera_gc = gpiochip_get_data(irq_data_get_irq_chip_data(d));
if (type == IRQ_TYPE_NONE) {
irq_set_handler_locked(d, handle_bad_irq);
return 0;
}
if (type == altera_gc->interrupt_trigger) {
if (type == IRQ_TYPE_LEVEL_HIGH)
irq_set_handler_locked(d, handle_level_irq);
else
irq_set_handler_locked(d, handle_simple_irq);
return 0;
}
irq_set_handler_locked(d, handle_bad_irq);
return -EINVAL;
}
static unsigned int altera_gpio_irq_startup(struct irq_data *d)
{
altera_gpio_irq_unmask(d);
return 0;
}
static int altera_gpio_get(struct gpio_chip *gc, unsigned offset)
{
struct of_mm_gpio_chip *mm_gc;
mm_gc = to_of_mm_gpio_chip(gc);
return !!(readl(mm_gc->regs + ALTERA_GPIO_DATA) & BIT(offset));
}
static void altera_gpio_set(struct gpio_chip *gc, unsigned offset, int value)
{
struct of_mm_gpio_chip *mm_gc;
struct altera_gpio_chip *chip;
unsigned long flags;
unsigned int data_reg;
mm_gc = to_of_mm_gpio_chip(gc);
chip = gpiochip_get_data(gc);
raw_spin_lock_irqsave(&chip->gpio_lock, flags);
data_reg = readl(mm_gc->regs + ALTERA_GPIO_DATA);
if (value)
data_reg |= BIT(offset);
else
data_reg &= ~BIT(offset);
writel(data_reg, mm_gc->regs + ALTERA_GPIO_DATA);
raw_spin_unlock_irqrestore(&chip->gpio_lock, flags);
}
static int altera_gpio_direction_input(struct gpio_chip *gc, unsigned offset)
{
struct of_mm_gpio_chip *mm_gc;
struct altera_gpio_chip *chip;
unsigned long flags;
unsigned int gpio_ddr;
mm_gc = to_of_mm_gpio_chip(gc);
chip = gpiochip_get_data(gc);
raw_spin_lock_irqsave(&chip->gpio_lock, flags);
/* Set pin as input, assumes software controlled IP */
gpio_ddr = readl(mm_gc->regs + ALTERA_GPIO_DIR);
gpio_ddr &= ~BIT(offset);
writel(gpio_ddr, mm_gc->regs + ALTERA_GPIO_DIR);
raw_spin_unlock_irqrestore(&chip->gpio_lock, flags);
return 0;
}
static int altera_gpio_direction_output(struct gpio_chip *gc,
unsigned offset, int value)
{
struct of_mm_gpio_chip *mm_gc;
struct altera_gpio_chip *chip;
unsigned long flags;
unsigned int data_reg, gpio_ddr;
mm_gc = to_of_mm_gpio_chip(gc);
chip = gpiochip_get_data(gc);
raw_spin_lock_irqsave(&chip->gpio_lock, flags);
/* Sets the GPIO value */
data_reg = readl(mm_gc->regs + ALTERA_GPIO_DATA);
if (value)
data_reg |= BIT(offset);
else
data_reg &= ~BIT(offset);
writel(data_reg, mm_gc->regs + ALTERA_GPIO_DATA);
/* Set pin as output, assumes software controlled IP */
gpio_ddr = readl(mm_gc->regs + ALTERA_GPIO_DIR);
gpio_ddr |= BIT(offset);
writel(gpio_ddr, mm_gc->regs + ALTERA_GPIO_DIR);
raw_spin_unlock_irqrestore(&chip->gpio_lock, flags);
return 0;
}
static void altera_gpio_irq_edge_handler(struct irq_desc *desc)
{
struct altera_gpio_chip *altera_gc;
struct irq_chip *chip;
struct of_mm_gpio_chip *mm_gc;
struct irq_domain *irqdomain;
unsigned long status;
int i;
altera_gc = gpiochip_get_data(irq_desc_get_handler_data(desc));
chip = irq_desc_get_chip(desc);
mm_gc = &altera_gc->mmchip;
irqdomain = altera_gc->mmchip.gc.irq.domain;
chained_irq_enter(chip, desc);
while ((status =
(readl(mm_gc->regs + ALTERA_GPIO_EDGE_CAP) &
readl(mm_gc->regs + ALTERA_GPIO_IRQ_MASK)))) {
writel(status, mm_gc->regs + ALTERA_GPIO_EDGE_CAP);
for_each_set_bit(i, &status, mm_gc->gc.ngpio)
generic_handle_domain_irq(irqdomain, i);
}
chained_irq_exit(chip, desc);
}
static void altera_gpio_irq_leveL_high_handler(struct irq_desc *desc)
{
struct altera_gpio_chip *altera_gc;
struct irq_chip *chip;
struct of_mm_gpio_chip *mm_gc;
struct irq_domain *irqdomain;
unsigned long status;
int i;
altera_gc = gpiochip_get_data(irq_desc_get_handler_data(desc));
chip = irq_desc_get_chip(desc);
mm_gc = &altera_gc->mmchip;
irqdomain = altera_gc->mmchip.gc.irq.domain;
chained_irq_enter(chip, desc);
status = readl(mm_gc->regs + ALTERA_GPIO_DATA);
status &= readl(mm_gc->regs + ALTERA_GPIO_IRQ_MASK);
for_each_set_bit(i, &status, mm_gc->gc.ngpio)
generic_handle_domain_irq(irqdomain, i);
chained_irq_exit(chip, desc);
}
static const struct irq_chip altera_gpio_irq_chip = {
.name = "altera-gpio",
.irq_mask = altera_gpio_irq_mask,
.irq_unmask = altera_gpio_irq_unmask,
.irq_set_type = altera_gpio_irq_set_type,
.irq_startup = altera_gpio_irq_startup,
.irq_shutdown = altera_gpio_irq_mask,
.flags = IRQCHIP_IMMUTABLE,
GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
static int altera_gpio_probe(struct platform_device *pdev)
{
struct device_node *node = pdev->dev.of_node;
int reg, ret;
struct altera_gpio_chip *altera_gc;
struct gpio_irq_chip *girq;
altera_gc = devm_kzalloc(&pdev->dev, sizeof(*altera_gc), GFP_KERNEL);
if (!altera_gc)
return -ENOMEM;
raw_spin_lock_init(&altera_gc->gpio_lock);
if (of_property_read_u32(node, "altr,ngpio", ®))
/* By default assume maximum ngpio */
altera_gc->mmchip.gc.ngpio = ALTERA_GPIO_MAX_NGPIO;
else
altera_gc->mmchip.gc.ngpio = reg;
if (altera_gc->mmchip.gc.ngpio > ALTERA_GPIO_MAX_NGPIO) {
dev_warn(&pdev->dev,
"ngpio is greater than %d, defaulting to %d\n",
ALTERA_GPIO_MAX_NGPIO, ALTERA_GPIO_MAX_NGPIO);
altera_gc->mmchip.gc.ngpio = ALTERA_GPIO_MAX_NGPIO;
}
altera_gc->mmchip.gc.direction_input = altera_gpio_direction_input;
altera_gc->mmchip.gc.direction_output = altera_gpio_direction_output;
altera_gc->mmchip.gc.get = altera_gpio_get;
altera_gc->mmchip.gc.set = altera_gpio_set;
altera_gc->mmchip.gc.owner = THIS_MODULE;
altera_gc->mmchip.gc.parent = &pdev->dev;
altera_gc->mapped_irq = platform_get_irq_optional(pdev, 0);
if (altera_gc->mapped_irq < 0)
goto skip_irq;
if (of_property_read_u32(node, "altr,interrupt-type", ®)) {
dev_err(&pdev->dev,
"altr,interrupt-type value not set in device tree\n");
return -EINVAL;
}
altera_gc->interrupt_trigger = reg;
girq = &altera_gc->mmchip.gc.irq;
gpio_irq_chip_set_chip(girq, &altera_gpio_irq_chip);
if (altera_gc->interrupt_trigger == IRQ_TYPE_LEVEL_HIGH)
girq->parent_handler = altera_gpio_irq_leveL_high_handler;
else
girq->parent_handler = altera_gpio_irq_edge_handler;
girq->num_parents = 1;
girq->parents = devm_kcalloc(&pdev->dev, 1, sizeof(*girq->parents),
GFP_KERNEL);
if (!girq->parents)
return -ENOMEM;
girq->default_type = IRQ_TYPE_NONE;
girq->handler = handle_bad_irq;
girq->parents[0] = altera_gc->mapped_irq;
skip_irq:
ret = of_mm_gpiochip_add_data(node, &altera_gc->mmchip, altera_gc);
if (ret) {
dev_err(&pdev->dev, "Failed adding memory mapped gpiochip\n");
return ret;
}
platform_set_drvdata(pdev, altera_gc);
return 0;
}
static int altera_gpio_remove(struct platform_device *pdev)
{
struct altera_gpio_chip *altera_gc = platform_get_drvdata(pdev);
of_mm_gpiochip_remove(&altera_gc->mmchip);
return 0;
}
static const struct of_device_id altera_gpio_of_match[] = {
{ .compatible = "altr,pio-1.0", },
{},
};
MODULE_DEVICE_TABLE(of, altera_gpio_of_match);
static struct platform_driver altera_gpio_driver = {
.driver = {
.name = "altera_gpio",
.of_match_table = altera_gpio_of_match,
},
.probe = altera_gpio_probe,
.remove = altera_gpio_remove,
};
static int __init altera_gpio_init(void)
{
return platform_driver_register(&altera_gpio_driver);
}
subsys_initcall(altera_gpio_init);
static void __exit altera_gpio_exit(void)
{
platform_driver_unregister(&altera_gpio_driver);
}
module_exit(altera_gpio_exit);
MODULE_AUTHOR("Tien Hock Loh <[email protected]>");
MODULE_DESCRIPTION("Altera GPIO driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/gpio/gpio-altera.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* MAXIM MAX77620 GPIO driver
*
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
*/
#include <linux/gpio/driver.h>
#include <linux/interrupt.h>
#include <linux/mfd/max77620.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#define GPIO_REG_ADDR(offset) (MAX77620_REG_GPIO0 + offset)
struct max77620_gpio {
struct gpio_chip gpio_chip;
struct regmap *rmap;
struct device *dev;
struct mutex buslock; /* irq_bus_lock */
unsigned int irq_type[MAX77620_GPIO_NR];
bool irq_enabled[MAX77620_GPIO_NR];
};
static irqreturn_t max77620_gpio_irqhandler(int irq, void *data)
{
struct max77620_gpio *gpio = data;
unsigned int value, offset;
unsigned long pending;
int err;
err = regmap_read(gpio->rmap, MAX77620_REG_IRQ_LVL2_GPIO, &value);
if (err < 0) {
dev_err(gpio->dev, "REG_IRQ_LVL2_GPIO read failed: %d\n", err);
return IRQ_NONE;
}
pending = value;
for_each_set_bit(offset, &pending, MAX77620_GPIO_NR) {
unsigned int virq;
virq = irq_find_mapping(gpio->gpio_chip.irq.domain, offset);
handle_nested_irq(virq);
}
return IRQ_HANDLED;
}
static void max77620_gpio_irq_mask(struct irq_data *data)
{
struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
struct max77620_gpio *gpio = gpiochip_get_data(chip);
gpio->irq_enabled[data->hwirq] = false;
gpiochip_disable_irq(chip, data->hwirq);
}
static void max77620_gpio_irq_unmask(struct irq_data *data)
{
struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
struct max77620_gpio *gpio = gpiochip_get_data(chip);
gpiochip_enable_irq(chip, data->hwirq);
gpio->irq_enabled[data->hwirq] = true;
}
static int max77620_gpio_set_irq_type(struct irq_data *data, unsigned int type)
{
struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
struct max77620_gpio *gpio = gpiochip_get_data(chip);
unsigned int irq_type;
switch (type) {
case IRQ_TYPE_EDGE_RISING:
irq_type = MAX77620_CNFG_GPIO_INT_RISING;
break;
case IRQ_TYPE_EDGE_FALLING:
irq_type = MAX77620_CNFG_GPIO_INT_FALLING;
break;
case IRQ_TYPE_EDGE_BOTH:
irq_type = MAX77620_CNFG_GPIO_INT_RISING |
MAX77620_CNFG_GPIO_INT_FALLING;
break;
default:
return -EINVAL;
}
gpio->irq_type[data->hwirq] = irq_type;
return 0;
}
static void max77620_gpio_bus_lock(struct irq_data *data)
{
struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
struct max77620_gpio *gpio = gpiochip_get_data(chip);
mutex_lock(&gpio->buslock);
}
static void max77620_gpio_bus_sync_unlock(struct irq_data *data)
{
struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
struct max77620_gpio *gpio = gpiochip_get_data(chip);
unsigned int value, offset = data->hwirq;
int err;
value = gpio->irq_enabled[offset] ? gpio->irq_type[offset] : 0;
err = regmap_update_bits(gpio->rmap, GPIO_REG_ADDR(offset),
MAX77620_CNFG_GPIO_INT_MASK, value);
if (err < 0)
dev_err(chip->parent, "failed to update interrupt mask: %d\n",
err);
mutex_unlock(&gpio->buslock);
}
static const struct irq_chip max77620_gpio_irqchip = {
.name = "max77620-gpio",
.irq_mask = max77620_gpio_irq_mask,
.irq_unmask = max77620_gpio_irq_unmask,
.irq_set_type = max77620_gpio_set_irq_type,
.irq_bus_lock = max77620_gpio_bus_lock,
.irq_bus_sync_unlock = max77620_gpio_bus_sync_unlock,
.flags = IRQCHIP_IMMUTABLE | IRQCHIP_MASK_ON_SUSPEND,
GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
static int max77620_gpio_dir_input(struct gpio_chip *gc, unsigned int offset)
{
struct max77620_gpio *mgpio = gpiochip_get_data(gc);
int ret;
ret = regmap_update_bits(mgpio->rmap, GPIO_REG_ADDR(offset),
MAX77620_CNFG_GPIO_DIR_MASK,
MAX77620_CNFG_GPIO_DIR_INPUT);
if (ret < 0)
dev_err(mgpio->dev, "CNFG_GPIOx dir update failed: %d\n", ret);
return ret;
}
static int max77620_gpio_get(struct gpio_chip *gc, unsigned int offset)
{
struct max77620_gpio *mgpio = gpiochip_get_data(gc);
unsigned int val;
int ret;
ret = regmap_read(mgpio->rmap, GPIO_REG_ADDR(offset), &val);
if (ret < 0) {
dev_err(mgpio->dev, "CNFG_GPIOx read failed: %d\n", ret);
return ret;
}
if (val & MAX77620_CNFG_GPIO_DIR_MASK)
return !!(val & MAX77620_CNFG_GPIO_INPUT_VAL_MASK);
else
return !!(val & MAX77620_CNFG_GPIO_OUTPUT_VAL_MASK);
}
static int max77620_gpio_dir_output(struct gpio_chip *gc, unsigned int offset,
int value)
{
struct max77620_gpio *mgpio = gpiochip_get_data(gc);
u8 val;
int ret;
val = (value) ? MAX77620_CNFG_GPIO_OUTPUT_VAL_HIGH :
MAX77620_CNFG_GPIO_OUTPUT_VAL_LOW;
ret = regmap_update_bits(mgpio->rmap, GPIO_REG_ADDR(offset),
MAX77620_CNFG_GPIO_OUTPUT_VAL_MASK, val);
if (ret < 0) {
dev_err(mgpio->dev, "CNFG_GPIOx val update failed: %d\n", ret);
return ret;
}
ret = regmap_update_bits(mgpio->rmap, GPIO_REG_ADDR(offset),
MAX77620_CNFG_GPIO_DIR_MASK,
MAX77620_CNFG_GPIO_DIR_OUTPUT);
if (ret < 0)
dev_err(mgpio->dev, "CNFG_GPIOx dir update failed: %d\n", ret);
return ret;
}
static int max77620_gpio_set_debounce(struct max77620_gpio *mgpio,
unsigned int offset,
unsigned int debounce)
{
u8 val;
int ret;
switch (debounce) {
case 0:
val = MAX77620_CNFG_GPIO_DBNC_None;
break;
case 1 ... 8000:
val = MAX77620_CNFG_GPIO_DBNC_8ms;
break;
case 8001 ... 16000:
val = MAX77620_CNFG_GPIO_DBNC_16ms;
break;
case 16001 ... 32000:
val = MAX77620_CNFG_GPIO_DBNC_32ms;
break;
default:
dev_err(mgpio->dev, "Illegal value %u\n", debounce);
return -EINVAL;
}
ret = regmap_update_bits(mgpio->rmap, GPIO_REG_ADDR(offset),
MAX77620_CNFG_GPIO_DBNC_MASK, val);
if (ret < 0)
dev_err(mgpio->dev, "CNFG_GPIOx_DBNC update failed: %d\n", ret);
return ret;
}
static void max77620_gpio_set(struct gpio_chip *gc, unsigned int offset,
int value)
{
struct max77620_gpio *mgpio = gpiochip_get_data(gc);
u8 val;
int ret;
val = (value) ? MAX77620_CNFG_GPIO_OUTPUT_VAL_HIGH :
MAX77620_CNFG_GPIO_OUTPUT_VAL_LOW;
ret = regmap_update_bits(mgpio->rmap, GPIO_REG_ADDR(offset),
MAX77620_CNFG_GPIO_OUTPUT_VAL_MASK, val);
if (ret < 0)
dev_err(mgpio->dev, "CNFG_GPIO_OUT update failed: %d\n", ret);
}
static int max77620_gpio_set_config(struct gpio_chip *gc, unsigned int offset,
unsigned long config)
{
struct max77620_gpio *mgpio = gpiochip_get_data(gc);
switch (pinconf_to_config_param(config)) {
case PIN_CONFIG_DRIVE_OPEN_DRAIN:
return regmap_update_bits(mgpio->rmap, GPIO_REG_ADDR(offset),
MAX77620_CNFG_GPIO_DRV_MASK,
MAX77620_CNFG_GPIO_DRV_OPENDRAIN);
case PIN_CONFIG_DRIVE_PUSH_PULL:
return regmap_update_bits(mgpio->rmap, GPIO_REG_ADDR(offset),
MAX77620_CNFG_GPIO_DRV_MASK,
MAX77620_CNFG_GPIO_DRV_PUSHPULL);
case PIN_CONFIG_INPUT_DEBOUNCE:
return max77620_gpio_set_debounce(mgpio, offset,
pinconf_to_config_argument(config));
default:
break;
}
return -ENOTSUPP;
}
static int max77620_gpio_irq_init_hw(struct gpio_chip *gc)
{
struct max77620_gpio *gpio = gpiochip_get_data(gc);
unsigned int i;
int err;
/*
* GPIO interrupts may be left ON after bootloader, hence let's
* pre-initialize hardware to the expected state by disabling all
* the interrupts.
*/
for (i = 0; i < MAX77620_GPIO_NR; i++) {
err = regmap_update_bits(gpio->rmap, GPIO_REG_ADDR(i),
MAX77620_CNFG_GPIO_INT_MASK, 0);
if (err < 0) {
dev_err(gpio->dev,
"failed to disable interrupt: %d\n", err);
return err;
}
}
return 0;
}
static int max77620_gpio_probe(struct platform_device *pdev)
{
struct max77620_chip *chip = dev_get_drvdata(pdev->dev.parent);
struct max77620_gpio *mgpio;
struct gpio_irq_chip *girq;
unsigned int gpio_irq;
int ret;
ret = platform_get_irq(pdev, 0);
if (ret < 0)
return ret;
gpio_irq = ret;
mgpio = devm_kzalloc(&pdev->dev, sizeof(*mgpio), GFP_KERNEL);
if (!mgpio)
return -ENOMEM;
mutex_init(&mgpio->buslock);
mgpio->rmap = chip->rmap;
mgpio->dev = &pdev->dev;
mgpio->gpio_chip.label = pdev->name;
mgpio->gpio_chip.parent = pdev->dev.parent;
mgpio->gpio_chip.direction_input = max77620_gpio_dir_input;
mgpio->gpio_chip.get = max77620_gpio_get;
mgpio->gpio_chip.direction_output = max77620_gpio_dir_output;
mgpio->gpio_chip.set = max77620_gpio_set;
mgpio->gpio_chip.set_config = max77620_gpio_set_config;
mgpio->gpio_chip.ngpio = MAX77620_GPIO_NR;
mgpio->gpio_chip.can_sleep = 1;
mgpio->gpio_chip.base = -1;
girq = &mgpio->gpio_chip.irq;
gpio_irq_chip_set_chip(girq, &max77620_gpio_irqchip);
/* This will let us handle the parent IRQ in the driver */
girq->parent_handler = NULL;
girq->num_parents = 0;
girq->parents = NULL;
girq->default_type = IRQ_TYPE_NONE;
girq->handler = handle_edge_irq;
girq->init_hw = max77620_gpio_irq_init_hw;
girq->threaded = true;
ret = devm_gpiochip_add_data(&pdev->dev, &mgpio->gpio_chip, mgpio);
if (ret < 0) {
dev_err(&pdev->dev, "gpio_init: Failed to add max77620_gpio\n");
return ret;
}
ret = devm_request_threaded_irq(&pdev->dev, gpio_irq, NULL,
max77620_gpio_irqhandler, IRQF_ONESHOT,
"max77620-gpio", mgpio);
if (ret < 0) {
dev_err(&pdev->dev, "failed to request IRQ: %d\n", ret);
return ret;
}
return 0;
}
static const struct platform_device_id max77620_gpio_devtype[] = {
{ .name = "max77620-gpio", },
{ .name = "max20024-gpio", },
{},
};
MODULE_DEVICE_TABLE(platform, max77620_gpio_devtype);
static struct platform_driver max77620_gpio_driver = {
.driver.name = "max77620-gpio",
.probe = max77620_gpio_probe,
.id_table = max77620_gpio_devtype,
};
module_platform_driver(max77620_gpio_driver);
MODULE_DESCRIPTION("GPIO interface for MAX77620 and MAX20024 PMIC");
MODULE_AUTHOR("Laxman Dewangan <[email protected]>");
MODULE_AUTHOR("Chaitanya Bandi <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/gpio/gpio-max77620.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* sl28cpld GPIO driver
*
* Copyright 2020 Michael Walle <[email protected]>
*/
#include <linux/device.h>
#include <linux/gpio/driver.h>
#include <linux/gpio/regmap.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
/* GPIO flavor */
#define GPIO_REG_DIR 0x00
#define GPIO_REG_OUT 0x01
#define GPIO_REG_IN 0x02
#define GPIO_REG_IE 0x03
#define GPIO_REG_IP 0x04
/* input-only flavor */
#define GPI_REG_IN 0x00
/* output-only flavor */
#define GPO_REG_OUT 0x00
enum sl28cpld_gpio_type {
SL28CPLD_GPIO = 1,
SL28CPLD_GPI,
SL28CPLD_GPO,
};
static const struct regmap_irq sl28cpld_gpio_irqs[] = {
REGMAP_IRQ_REG_LINE(0, 8),
REGMAP_IRQ_REG_LINE(1, 8),
REGMAP_IRQ_REG_LINE(2, 8),
REGMAP_IRQ_REG_LINE(3, 8),
REGMAP_IRQ_REG_LINE(4, 8),
REGMAP_IRQ_REG_LINE(5, 8),
REGMAP_IRQ_REG_LINE(6, 8),
REGMAP_IRQ_REG_LINE(7, 8),
};
static int sl28cpld_gpio_irq_init(struct platform_device *pdev,
unsigned int base,
struct gpio_regmap_config *config)
{
struct regmap_irq_chip_data *irq_data;
struct regmap_irq_chip *irq_chip;
struct device *dev = &pdev->dev;
int irq, ret;
if (!device_property_read_bool(dev, "interrupt-controller"))
return 0;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
irq_chip = devm_kzalloc(dev, sizeof(*irq_chip), GFP_KERNEL);
if (!irq_chip)
return -ENOMEM;
irq_chip->name = "sl28cpld-gpio-irq";
irq_chip->irqs = sl28cpld_gpio_irqs;
irq_chip->num_irqs = ARRAY_SIZE(sl28cpld_gpio_irqs);
irq_chip->num_regs = 1;
irq_chip->status_base = base + GPIO_REG_IP;
irq_chip->unmask_base = base + GPIO_REG_IE;
irq_chip->ack_base = base + GPIO_REG_IP;
ret = devm_regmap_add_irq_chip_fwnode(dev, dev_fwnode(dev),
config->regmap, irq,
IRQF_SHARED | IRQF_ONESHOT,
0, irq_chip, &irq_data);
if (ret)
return ret;
config->irq_domain = regmap_irq_get_domain(irq_data);
return 0;
}
static int sl28cpld_gpio_probe(struct platform_device *pdev)
{
struct gpio_regmap_config config = {0};
enum sl28cpld_gpio_type type;
struct regmap *regmap;
u32 base;
int ret;
if (!pdev->dev.parent)
return -ENODEV;
type = (uintptr_t)device_get_match_data(&pdev->dev);
if (!type)
return -ENODEV;
ret = device_property_read_u32(&pdev->dev, "reg", &base);
if (ret)
return -EINVAL;
regmap = dev_get_regmap(pdev->dev.parent, NULL);
if (!regmap)
return -ENODEV;
config.regmap = regmap;
config.parent = &pdev->dev;
config.ngpio = 8;
switch (type) {
case SL28CPLD_GPIO:
config.reg_dat_base = base + GPIO_REG_IN;
config.reg_set_base = base + GPIO_REG_OUT;
/* reg_dir_out_base might be zero */
config.reg_dir_out_base = GPIO_REGMAP_ADDR(base + GPIO_REG_DIR);
/* This type supports interrupts */
ret = sl28cpld_gpio_irq_init(pdev, base, &config);
if (ret)
return ret;
break;
case SL28CPLD_GPO:
config.reg_set_base = base + GPO_REG_OUT;
break;
case SL28CPLD_GPI:
config.reg_dat_base = base + GPI_REG_IN;
break;
default:
dev_err(&pdev->dev, "unknown type %d\n", type);
return -ENODEV;
}
return PTR_ERR_OR_ZERO(devm_gpio_regmap_register(&pdev->dev, &config));
}
static const struct of_device_id sl28cpld_gpio_of_match[] = {
{ .compatible = "kontron,sl28cpld-gpio", .data = (void *)SL28CPLD_GPIO },
{ .compatible = "kontron,sl28cpld-gpi", .data = (void *)SL28CPLD_GPI },
{ .compatible = "kontron,sl28cpld-gpo", .data = (void *)SL28CPLD_GPO },
{}
};
MODULE_DEVICE_TABLE(of, sl28cpld_gpio_of_match);
static struct platform_driver sl28cpld_gpio_driver = {
.probe = sl28cpld_gpio_probe,
.driver = {
.name = "sl28cpld-gpio",
.of_match_table = sl28cpld_gpio_of_match,
},
};
module_platform_driver(sl28cpld_gpio_driver);
MODULE_DESCRIPTION("sl28cpld GPIO Driver");
MODULE_AUTHOR("Michael Walle <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/gpio/gpio-sl28cpld.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/anon_inodes.h>
#include <linux/atomic.h>
#include <linux/bitmap.h>
#include <linux/build_bug.h>
#include <linux/cdev.h>
#include <linux/compat.h>
#include <linux/compiler.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/file.h>
#include <linux/gpio.h>
#include <linux/gpio/driver.h>
#include <linux/hte.h>
#include <linux/interrupt.h>
#include <linux/irqreturn.h>
#include <linux/kernel.h>
#include <linux/kfifo.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/pinctrl/consumer.h>
#include <linux/poll.h>
#include <linux/seq_file.h>
#include <linux/spinlock.h>
#include <linux/timekeeping.h>
#include <linux/uaccess.h>
#include <linux/workqueue.h>
#include <uapi/linux/gpio.h>
#include "gpiolib.h"
#include "gpiolib-cdev.h"
/*
* Array sizes must ensure 64-bit alignment and not create holes in the
* struct packing.
*/
static_assert(IS_ALIGNED(GPIO_V2_LINES_MAX, 2));
static_assert(IS_ALIGNED(GPIO_MAX_NAME_SIZE, 8));
/*
* Check that uAPI structs are 64-bit aligned for 32/64-bit compatibility
*/
static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_attribute), 8));
static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_config_attribute), 8));
static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_config), 8));
static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_request), 8));
static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_info), 8));
static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_info_changed), 8));
static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_event), 8));
static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_values), 8));
/* Character device interface to GPIO.
*
* The GPIO character device, /dev/gpiochipN, provides userspace an
* interface to gpiolib GPIOs via ioctl()s.
*/
typedef __poll_t (*poll_fn)(struct file *, struct poll_table_struct *);
typedef long (*ioctl_fn)(struct file *, unsigned int, unsigned long);
typedef ssize_t (*read_fn)(struct file *, char __user *,
size_t count, loff_t *);
static __poll_t call_poll_locked(struct file *file,
struct poll_table_struct *wait,
struct gpio_device *gdev, poll_fn func)
{
__poll_t ret;
down_read(&gdev->sem);
ret = func(file, wait);
up_read(&gdev->sem);
return ret;
}
static long call_ioctl_locked(struct file *file, unsigned int cmd,
unsigned long arg, struct gpio_device *gdev,
ioctl_fn func)
{
long ret;
down_read(&gdev->sem);
ret = func(file, cmd, arg);
up_read(&gdev->sem);
return ret;
}
static ssize_t call_read_locked(struct file *file, char __user *buf,
size_t count, loff_t *f_ps,
struct gpio_device *gdev, read_fn func)
{
ssize_t ret;
down_read(&gdev->sem);
ret = func(file, buf, count, f_ps);
up_read(&gdev->sem);
return ret;
}
/*
* GPIO line handle management
*/
#ifdef CONFIG_GPIO_CDEV_V1
/**
* struct linehandle_state - contains the state of a userspace handle
* @gdev: the GPIO device the handle pertains to
* @label: consumer label used to tag descriptors
* @descs: the GPIO descriptors held by this handle
* @num_descs: the number of descriptors held in the descs array
*/
struct linehandle_state {
struct gpio_device *gdev;
const char *label;
struct gpio_desc *descs[GPIOHANDLES_MAX];
u32 num_descs;
};
#define GPIOHANDLE_REQUEST_VALID_FLAGS \
(GPIOHANDLE_REQUEST_INPUT | \
GPIOHANDLE_REQUEST_OUTPUT | \
GPIOHANDLE_REQUEST_ACTIVE_LOW | \
GPIOHANDLE_REQUEST_BIAS_PULL_UP | \
GPIOHANDLE_REQUEST_BIAS_PULL_DOWN | \
GPIOHANDLE_REQUEST_BIAS_DISABLE | \
GPIOHANDLE_REQUEST_OPEN_DRAIN | \
GPIOHANDLE_REQUEST_OPEN_SOURCE)
static int linehandle_validate_flags(u32 flags)
{
/* Return an error if an unknown flag is set */
if (flags & ~GPIOHANDLE_REQUEST_VALID_FLAGS)
return -EINVAL;
/*
* Do not allow both INPUT & OUTPUT flags to be set as they are
* contradictory.
*/
if ((flags & GPIOHANDLE_REQUEST_INPUT) &&
(flags & GPIOHANDLE_REQUEST_OUTPUT))
return -EINVAL;
/*
* Do not allow OPEN_SOURCE & OPEN_DRAIN flags in a single request. If
* the hardware actually supports enabling both at the same time the
* electrical result would be disastrous.
*/
if ((flags & GPIOHANDLE_REQUEST_OPEN_DRAIN) &&
(flags & GPIOHANDLE_REQUEST_OPEN_SOURCE))
return -EINVAL;
/* OPEN_DRAIN and OPEN_SOURCE flags only make sense for output mode. */
if (!(flags & GPIOHANDLE_REQUEST_OUTPUT) &&
((flags & GPIOHANDLE_REQUEST_OPEN_DRAIN) ||
(flags & GPIOHANDLE_REQUEST_OPEN_SOURCE)))
return -EINVAL;
/* Bias flags only allowed for input or output mode. */
if (!((flags & GPIOHANDLE_REQUEST_INPUT) ||
(flags & GPIOHANDLE_REQUEST_OUTPUT)) &&
((flags & GPIOHANDLE_REQUEST_BIAS_DISABLE) ||
(flags & GPIOHANDLE_REQUEST_BIAS_PULL_UP) ||
(flags & GPIOHANDLE_REQUEST_BIAS_PULL_DOWN)))
return -EINVAL;
/* Only one bias flag can be set. */
if (((flags & GPIOHANDLE_REQUEST_BIAS_DISABLE) &&
(flags & (GPIOHANDLE_REQUEST_BIAS_PULL_DOWN |
GPIOHANDLE_REQUEST_BIAS_PULL_UP))) ||
((flags & GPIOHANDLE_REQUEST_BIAS_PULL_DOWN) &&
(flags & GPIOHANDLE_REQUEST_BIAS_PULL_UP)))
return -EINVAL;
return 0;
}
static void linehandle_flags_to_desc_flags(u32 lflags, unsigned long *flagsp)
{
assign_bit(FLAG_ACTIVE_LOW, flagsp,
lflags & GPIOHANDLE_REQUEST_ACTIVE_LOW);
assign_bit(FLAG_OPEN_DRAIN, flagsp,
lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN);
assign_bit(FLAG_OPEN_SOURCE, flagsp,
lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE);
assign_bit(FLAG_PULL_UP, flagsp,
lflags & GPIOHANDLE_REQUEST_BIAS_PULL_UP);
assign_bit(FLAG_PULL_DOWN, flagsp,
lflags & GPIOHANDLE_REQUEST_BIAS_PULL_DOWN);
assign_bit(FLAG_BIAS_DISABLE, flagsp,
lflags & GPIOHANDLE_REQUEST_BIAS_DISABLE);
}
static long linehandle_set_config(struct linehandle_state *lh,
void __user *ip)
{
struct gpiohandle_config gcnf;
struct gpio_desc *desc;
int i, ret;
u32 lflags;
if (copy_from_user(&gcnf, ip, sizeof(gcnf)))
return -EFAULT;
lflags = gcnf.flags;
ret = linehandle_validate_flags(lflags);
if (ret)
return ret;
for (i = 0; i < lh->num_descs; i++) {
desc = lh->descs[i];
linehandle_flags_to_desc_flags(gcnf.flags, &desc->flags);
/*
* Lines have to be requested explicitly for input
* or output, else the line will be treated "as is".
*/
if (lflags & GPIOHANDLE_REQUEST_OUTPUT) {
int val = !!gcnf.default_values[i];
ret = gpiod_direction_output(desc, val);
if (ret)
return ret;
} else if (lflags & GPIOHANDLE_REQUEST_INPUT) {
ret = gpiod_direction_input(desc);
if (ret)
return ret;
}
gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_CONFIG);
}
return 0;
}
static long linehandle_ioctl_unlocked(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct linehandle_state *lh = file->private_data;
void __user *ip = (void __user *)arg;
struct gpiohandle_data ghd;
DECLARE_BITMAP(vals, GPIOHANDLES_MAX);
unsigned int i;
int ret;
if (!lh->gdev->chip)
return -ENODEV;
switch (cmd) {
case GPIOHANDLE_GET_LINE_VALUES_IOCTL:
/* NOTE: It's okay to read values of output lines */
ret = gpiod_get_array_value_complex(false, true,
lh->num_descs, lh->descs,
NULL, vals);
if (ret)
return ret;
memset(&ghd, 0, sizeof(ghd));
for (i = 0; i < lh->num_descs; i++)
ghd.values[i] = test_bit(i, vals);
if (copy_to_user(ip, &ghd, sizeof(ghd)))
return -EFAULT;
return 0;
case GPIOHANDLE_SET_LINE_VALUES_IOCTL:
/*
* All line descriptors were created at once with the same
* flags so just check if the first one is really output.
*/
if (!test_bit(FLAG_IS_OUT, &lh->descs[0]->flags))
return -EPERM;
if (copy_from_user(&ghd, ip, sizeof(ghd)))
return -EFAULT;
/* Clamp all values to [0,1] */
for (i = 0; i < lh->num_descs; i++)
__assign_bit(i, vals, ghd.values[i]);
/* Reuse the array setting function */
return gpiod_set_array_value_complex(false,
true,
lh->num_descs,
lh->descs,
NULL,
vals);
case GPIOHANDLE_SET_CONFIG_IOCTL:
return linehandle_set_config(lh, ip);
default:
return -EINVAL;
}
}
static long linehandle_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct linehandle_state *lh = file->private_data;
return call_ioctl_locked(file, cmd, arg, lh->gdev,
linehandle_ioctl_unlocked);
}
#ifdef CONFIG_COMPAT
static long linehandle_ioctl_compat(struct file *file, unsigned int cmd,
unsigned long arg)
{
return linehandle_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
}
#endif
static void linehandle_free(struct linehandle_state *lh)
{
int i;
for (i = 0; i < lh->num_descs; i++)
if (lh->descs[i])
gpiod_free(lh->descs[i]);
kfree(lh->label);
gpio_device_put(lh->gdev);
kfree(lh);
}
static int linehandle_release(struct inode *inode, struct file *file)
{
linehandle_free(file->private_data);
return 0;
}
static const struct file_operations linehandle_fileops = {
.release = linehandle_release,
.owner = THIS_MODULE,
.llseek = noop_llseek,
.unlocked_ioctl = linehandle_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = linehandle_ioctl_compat,
#endif
};
static int linehandle_create(struct gpio_device *gdev, void __user *ip)
{
struct gpiohandle_request handlereq;
struct linehandle_state *lh;
struct file *file;
int fd, i, ret;
u32 lflags;
if (copy_from_user(&handlereq, ip, sizeof(handlereq)))
return -EFAULT;
if ((handlereq.lines == 0) || (handlereq.lines > GPIOHANDLES_MAX))
return -EINVAL;
lflags = handlereq.flags;
ret = linehandle_validate_flags(lflags);
if (ret)
return ret;
lh = kzalloc(sizeof(*lh), GFP_KERNEL);
if (!lh)
return -ENOMEM;
lh->gdev = gpio_device_get(gdev);
if (handlereq.consumer_label[0] != '\0') {
/* label is only initialized if consumer_label is set */
lh->label = kstrndup(handlereq.consumer_label,
sizeof(handlereq.consumer_label) - 1,
GFP_KERNEL);
if (!lh->label) {
ret = -ENOMEM;
goto out_free_lh;
}
}
lh->num_descs = handlereq.lines;
/* Request each GPIO */
for (i = 0; i < handlereq.lines; i++) {
u32 offset = handlereq.lineoffsets[i];
struct gpio_desc *desc = gpiochip_get_desc(gdev->chip, offset);
if (IS_ERR(desc)) {
ret = PTR_ERR(desc);
goto out_free_lh;
}
ret = gpiod_request_user(desc, lh->label);
if (ret)
goto out_free_lh;
lh->descs[i] = desc;
linehandle_flags_to_desc_flags(handlereq.flags, &desc->flags);
ret = gpiod_set_transitory(desc, false);
if (ret < 0)
goto out_free_lh;
/*
* Lines have to be requested explicitly for input
* or output, else the line will be treated "as is".
*/
if (lflags & GPIOHANDLE_REQUEST_OUTPUT) {
int val = !!handlereq.default_values[i];
ret = gpiod_direction_output(desc, val);
if (ret)
goto out_free_lh;
} else if (lflags & GPIOHANDLE_REQUEST_INPUT) {
ret = gpiod_direction_input(desc);
if (ret)
goto out_free_lh;
}
gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_REQUESTED);
dev_dbg(&gdev->dev, "registered chardev handle for line %d\n",
offset);
}
fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC);
if (fd < 0) {
ret = fd;
goto out_free_lh;
}
file = anon_inode_getfile("gpio-linehandle",
&linehandle_fileops,
lh,
O_RDONLY | O_CLOEXEC);
if (IS_ERR(file)) {
ret = PTR_ERR(file);
goto out_put_unused_fd;
}
handlereq.fd = fd;
if (copy_to_user(ip, &handlereq, sizeof(handlereq))) {
/*
* fput() will trigger the release() callback, so do not go onto
* the regular error cleanup path here.
*/
fput(file);
put_unused_fd(fd);
return -EFAULT;
}
fd_install(fd, file);
dev_dbg(&gdev->dev, "registered chardev handle for %d lines\n",
lh->num_descs);
return 0;
out_put_unused_fd:
put_unused_fd(fd);
out_free_lh:
linehandle_free(lh);
return ret;
}
#endif /* CONFIG_GPIO_CDEV_V1 */
/**
* struct line - contains the state of a requested line
* @desc: the GPIO descriptor for this line.
* @req: the corresponding line request
* @irq: the interrupt triggered in response to events on this GPIO
* @edflags: the edge flags, GPIO_V2_LINE_FLAG_EDGE_RISING and/or
* GPIO_V2_LINE_FLAG_EDGE_FALLING, indicating the edge detection applied
* @timestamp_ns: cache for the timestamp storing it between hardirq and
* IRQ thread, used to bring the timestamp close to the actual event
* @req_seqno: the seqno for the current edge event in the sequence of
* events for the corresponding line request. This is drawn from the @req.
* @line_seqno: the seqno for the current edge event in the sequence of
* events for this line.
* @work: the worker that implements software debouncing
* @sw_debounced: flag indicating if the software debouncer is active
* @level: the current debounced physical level of the line
* @hdesc: the Hardware Timestamp Engine (HTE) descriptor
* @raw_level: the line level at the time of event
* @total_discard_seq: the running counter of the discarded events
* @last_seqno: the last sequence number before debounce period expires
*/
struct line {
struct gpio_desc *desc;
/*
* -- edge detector specific fields --
*/
struct linereq *req;
unsigned int irq;
/*
* The flags for the active edge detector configuration.
*
* edflags is set by linereq_create(), linereq_free(), and
* linereq_set_config_unlocked(), which are themselves mutually
* exclusive, and is accessed by edge_irq_thread(),
* process_hw_ts_thread() and debounce_work_func(),
* which can all live with a slightly stale value.
*/
u64 edflags;
/*
* timestamp_ns and req_seqno are accessed only by
* edge_irq_handler() and edge_irq_thread(), which are themselves
* mutually exclusive, so no additional protection is necessary.
*/
u64 timestamp_ns;
u32 req_seqno;
/*
* line_seqno is accessed by either edge_irq_thread() or
* debounce_work_func(), which are themselves mutually exclusive,
* so no additional protection is necessary.
*/
u32 line_seqno;
/*
* -- debouncer specific fields --
*/
struct delayed_work work;
/*
* sw_debounce is accessed by linereq_set_config(), which is the
* only setter, and linereq_get_values(), which can live with a
* slightly stale value.
*/
unsigned int sw_debounced;
/*
* level is accessed by debounce_work_func(), which is the only
* setter, and linereq_get_values() which can live with a slightly
* stale value.
*/
unsigned int level;
#ifdef CONFIG_HTE
struct hte_ts_desc hdesc;
/*
* HTE provider sets line level at the time of event. The valid
* value is 0 or 1 and negative value for an error.
*/
int raw_level;
/*
* when sw_debounce is set on HTE enabled line, this is running
* counter of the discarded events.
*/
u32 total_discard_seq;
/*
* when sw_debounce is set on HTE enabled line, this variable records
* last sequence number before debounce period expires.
*/
u32 last_seqno;
#endif /* CONFIG_HTE */
};
/**
* struct linereq - contains the state of a userspace line request
* @gdev: the GPIO device the line request pertains to
* @label: consumer label used to tag GPIO descriptors
* @num_lines: the number of lines in the lines array
* @wait: wait queue that handles blocking reads of events
* @device_unregistered_nb: notifier block for receiving gdev unregister events
* @event_buffer_size: the number of elements allocated in @events
* @events: KFIFO for the GPIO events
* @seqno: the sequence number for edge events generated on all lines in
* this line request. Note that this is not used when @num_lines is 1, as
* the line_seqno is then the same and is cheaper to calculate.
* @config_mutex: mutex for serializing ioctl() calls to ensure consistency
* of configuration, particularly multi-step accesses to desc flags.
* @lines: the lines held by this line request, with @num_lines elements.
*/
struct linereq {
struct gpio_device *gdev;
const char *label;
u32 num_lines;
wait_queue_head_t wait;
struct notifier_block device_unregistered_nb;
u32 event_buffer_size;
DECLARE_KFIFO_PTR(events, struct gpio_v2_line_event);
atomic_t seqno;
struct mutex config_mutex;
struct line lines[];
};
#define GPIO_V2_LINE_BIAS_FLAGS \
(GPIO_V2_LINE_FLAG_BIAS_PULL_UP | \
GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN | \
GPIO_V2_LINE_FLAG_BIAS_DISABLED)
#define GPIO_V2_LINE_DIRECTION_FLAGS \
(GPIO_V2_LINE_FLAG_INPUT | \
GPIO_V2_LINE_FLAG_OUTPUT)
#define GPIO_V2_LINE_DRIVE_FLAGS \
(GPIO_V2_LINE_FLAG_OPEN_DRAIN | \
GPIO_V2_LINE_FLAG_OPEN_SOURCE)
#define GPIO_V2_LINE_EDGE_FLAGS \
(GPIO_V2_LINE_FLAG_EDGE_RISING | \
GPIO_V2_LINE_FLAG_EDGE_FALLING)
#define GPIO_V2_LINE_FLAG_EDGE_BOTH GPIO_V2_LINE_EDGE_FLAGS
#define GPIO_V2_LINE_VALID_FLAGS \
(GPIO_V2_LINE_FLAG_ACTIVE_LOW | \
GPIO_V2_LINE_DIRECTION_FLAGS | \
GPIO_V2_LINE_DRIVE_FLAGS | \
GPIO_V2_LINE_EDGE_FLAGS | \
GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME | \
GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE | \
GPIO_V2_LINE_BIAS_FLAGS)
/* subset of flags relevant for edge detector configuration */
#define GPIO_V2_LINE_EDGE_DETECTOR_FLAGS \
(GPIO_V2_LINE_FLAG_ACTIVE_LOW | \
GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE | \
GPIO_V2_LINE_EDGE_FLAGS)
static int linereq_unregistered_notify(struct notifier_block *nb,
unsigned long action, void *data)
{
struct linereq *lr = container_of(nb, struct linereq,
device_unregistered_nb);
wake_up_poll(&lr->wait, EPOLLIN | EPOLLERR);
return NOTIFY_OK;
}
static void linereq_put_event(struct linereq *lr,
struct gpio_v2_line_event *le)
{
bool overflow = false;
spin_lock(&lr->wait.lock);
if (kfifo_is_full(&lr->events)) {
overflow = true;
kfifo_skip(&lr->events);
}
kfifo_in(&lr->events, le, 1);
spin_unlock(&lr->wait.lock);
if (!overflow)
wake_up_poll(&lr->wait, EPOLLIN);
else
pr_debug_ratelimited("event FIFO is full - event dropped\n");
}
static u64 line_event_timestamp(struct line *line)
{
if (test_bit(FLAG_EVENT_CLOCK_REALTIME, &line->desc->flags))
return ktime_get_real_ns();
else if (IS_ENABLED(CONFIG_HTE) &&
test_bit(FLAG_EVENT_CLOCK_HTE, &line->desc->flags))
return line->timestamp_ns;
return ktime_get_ns();
}
static u32 line_event_id(int level)
{
return level ? GPIO_V2_LINE_EVENT_RISING_EDGE :
GPIO_V2_LINE_EVENT_FALLING_EDGE;
}
#ifdef CONFIG_HTE
static enum hte_return process_hw_ts_thread(void *p)
{
struct line *line;
struct linereq *lr;
struct gpio_v2_line_event le;
u64 edflags;
int level;
if (!p)
return HTE_CB_HANDLED;
line = p;
lr = line->req;
memset(&le, 0, sizeof(le));
le.timestamp_ns = line->timestamp_ns;
edflags = READ_ONCE(line->edflags);
switch (edflags & GPIO_V2_LINE_EDGE_FLAGS) {
case GPIO_V2_LINE_FLAG_EDGE_BOTH:
level = (line->raw_level >= 0) ?
line->raw_level :
gpiod_get_raw_value_cansleep(line->desc);
if (edflags & GPIO_V2_LINE_FLAG_ACTIVE_LOW)
level = !level;
le.id = line_event_id(level);
break;
case GPIO_V2_LINE_FLAG_EDGE_RISING:
le.id = GPIO_V2_LINE_EVENT_RISING_EDGE;
break;
case GPIO_V2_LINE_FLAG_EDGE_FALLING:
le.id = GPIO_V2_LINE_EVENT_FALLING_EDGE;
break;
default:
return HTE_CB_HANDLED;
}
le.line_seqno = line->line_seqno;
le.seqno = (lr->num_lines == 1) ? le.line_seqno : line->req_seqno;
le.offset = gpio_chip_hwgpio(line->desc);
linereq_put_event(lr, &le);
return HTE_CB_HANDLED;
}
static enum hte_return process_hw_ts(struct hte_ts_data *ts, void *p)
{
struct line *line;
struct linereq *lr;
int diff_seqno = 0;
if (!ts || !p)
return HTE_CB_HANDLED;
line = p;
line->timestamp_ns = ts->tsc;
line->raw_level = ts->raw_level;
lr = line->req;
if (READ_ONCE(line->sw_debounced)) {
line->total_discard_seq++;
line->last_seqno = ts->seq;
mod_delayed_work(system_wq, &line->work,
usecs_to_jiffies(READ_ONCE(line->desc->debounce_period_us)));
} else {
if (unlikely(ts->seq < line->line_seqno))
return HTE_CB_HANDLED;
diff_seqno = ts->seq - line->line_seqno;
line->line_seqno = ts->seq;
if (lr->num_lines != 1)
line->req_seqno = atomic_add_return(diff_seqno,
&lr->seqno);
return HTE_RUN_SECOND_CB;
}
return HTE_CB_HANDLED;
}
static int hte_edge_setup(struct line *line, u64 eflags)
{
int ret;
unsigned long flags = 0;
struct hte_ts_desc *hdesc = &line->hdesc;
if (eflags & GPIO_V2_LINE_FLAG_EDGE_RISING)
flags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ?
HTE_FALLING_EDGE_TS :
HTE_RISING_EDGE_TS;
if (eflags & GPIO_V2_LINE_FLAG_EDGE_FALLING)
flags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ?
HTE_RISING_EDGE_TS :
HTE_FALLING_EDGE_TS;
line->total_discard_seq = 0;
hte_init_line_attr(hdesc, desc_to_gpio(line->desc), flags, NULL,
line->desc);
ret = hte_ts_get(NULL, hdesc, 0);
if (ret)
return ret;
return hte_request_ts_ns(hdesc, process_hw_ts, process_hw_ts_thread,
line);
}
#else
static int hte_edge_setup(struct line *line, u64 eflags)
{
return 0;
}
#endif /* CONFIG_HTE */
static irqreturn_t edge_irq_thread(int irq, void *p)
{
struct line *line = p;
struct linereq *lr = line->req;
struct gpio_v2_line_event le;
/* Do not leak kernel stack to userspace */
memset(&le, 0, sizeof(le));
if (line->timestamp_ns) {
le.timestamp_ns = line->timestamp_ns;
} else {
/*
* We may be running from a nested threaded interrupt in
* which case we didn't get the timestamp from
* edge_irq_handler().
*/
le.timestamp_ns = line_event_timestamp(line);
if (lr->num_lines != 1)
line->req_seqno = atomic_inc_return(&lr->seqno);
}
line->timestamp_ns = 0;
switch (READ_ONCE(line->edflags) & GPIO_V2_LINE_EDGE_FLAGS) {
case GPIO_V2_LINE_FLAG_EDGE_BOTH:
le.id = line_event_id(gpiod_get_value_cansleep(line->desc));
break;
case GPIO_V2_LINE_FLAG_EDGE_RISING:
le.id = GPIO_V2_LINE_EVENT_RISING_EDGE;
break;
case GPIO_V2_LINE_FLAG_EDGE_FALLING:
le.id = GPIO_V2_LINE_EVENT_FALLING_EDGE;
break;
default:
return IRQ_NONE;
}
line->line_seqno++;
le.line_seqno = line->line_seqno;
le.seqno = (lr->num_lines == 1) ? le.line_seqno : line->req_seqno;
le.offset = gpio_chip_hwgpio(line->desc);
linereq_put_event(lr, &le);
return IRQ_HANDLED;
}
static irqreturn_t edge_irq_handler(int irq, void *p)
{
struct line *line = p;
struct linereq *lr = line->req;
/*
* Just store the timestamp in hardirq context so we get it as
* close in time as possible to the actual event.
*/
line->timestamp_ns = line_event_timestamp(line);
if (lr->num_lines != 1)
line->req_seqno = atomic_inc_return(&lr->seqno);
return IRQ_WAKE_THREAD;
}
/*
* returns the current debounced logical value.
*/
static bool debounced_value(struct line *line)
{
bool value;
/*
* minor race - debouncer may be stopped here, so edge_detector_stop()
* must leave the value unchanged so the following will read the level
* from when the debouncer was last running.
*/
value = READ_ONCE(line->level);
if (test_bit(FLAG_ACTIVE_LOW, &line->desc->flags))
value = !value;
return value;
}
static irqreturn_t debounce_irq_handler(int irq, void *p)
{
struct line *line = p;
mod_delayed_work(system_wq, &line->work,
usecs_to_jiffies(READ_ONCE(line->desc->debounce_period_us)));
return IRQ_HANDLED;
}
static void debounce_work_func(struct work_struct *work)
{
struct gpio_v2_line_event le;
struct line *line = container_of(work, struct line, work.work);
struct linereq *lr;
u64 eflags, edflags = READ_ONCE(line->edflags);
int level = -1;
#ifdef CONFIG_HTE
int diff_seqno;
if (edflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE)
level = line->raw_level;
#endif
if (level < 0)
level = gpiod_get_raw_value_cansleep(line->desc);
if (level < 0) {
pr_debug_ratelimited("debouncer failed to read line value\n");
return;
}
if (READ_ONCE(line->level) == level)
return;
WRITE_ONCE(line->level, level);
/* -- edge detection -- */
eflags = edflags & GPIO_V2_LINE_EDGE_FLAGS;
if (!eflags)
return;
/* switch from physical level to logical - if they differ */
if (edflags & GPIO_V2_LINE_FLAG_ACTIVE_LOW)
level = !level;
/* ignore edges that are not being monitored */
if (((eflags == GPIO_V2_LINE_FLAG_EDGE_RISING) && !level) ||
((eflags == GPIO_V2_LINE_FLAG_EDGE_FALLING) && level))
return;
/* Do not leak kernel stack to userspace */
memset(&le, 0, sizeof(le));
lr = line->req;
le.timestamp_ns = line_event_timestamp(line);
le.offset = gpio_chip_hwgpio(line->desc);
#ifdef CONFIG_HTE
if (edflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE) {
/* discard events except the last one */
line->total_discard_seq -= 1;
diff_seqno = line->last_seqno - line->total_discard_seq -
line->line_seqno;
line->line_seqno = line->last_seqno - line->total_discard_seq;
le.line_seqno = line->line_seqno;
le.seqno = (lr->num_lines == 1) ?
le.line_seqno : atomic_add_return(diff_seqno, &lr->seqno);
} else
#endif /* CONFIG_HTE */
{
line->line_seqno++;
le.line_seqno = line->line_seqno;
le.seqno = (lr->num_lines == 1) ?
le.line_seqno : atomic_inc_return(&lr->seqno);
}
le.id = line_event_id(level);
linereq_put_event(lr, &le);
}
static int debounce_setup(struct line *line, unsigned int debounce_period_us)
{
unsigned long irqflags;
int ret, level, irq;
/* try hardware */
ret = gpiod_set_debounce(line->desc, debounce_period_us);
if (!ret) {
WRITE_ONCE(line->desc->debounce_period_us, debounce_period_us);
return ret;
}
if (ret != -ENOTSUPP)
return ret;
if (debounce_period_us) {
/* setup software debounce */
level = gpiod_get_raw_value_cansleep(line->desc);
if (level < 0)
return level;
if (!(IS_ENABLED(CONFIG_HTE) &&
test_bit(FLAG_EVENT_CLOCK_HTE, &line->desc->flags))) {
irq = gpiod_to_irq(line->desc);
if (irq < 0)
return -ENXIO;
irqflags = IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING;
ret = request_irq(irq, debounce_irq_handler, irqflags,
line->req->label, line);
if (ret)
return ret;
line->irq = irq;
} else {
ret = hte_edge_setup(line, GPIO_V2_LINE_FLAG_EDGE_BOTH);
if (ret)
return ret;
}
WRITE_ONCE(line->level, level);
WRITE_ONCE(line->sw_debounced, 1);
}
return 0;
}
static bool gpio_v2_line_config_debounced(struct gpio_v2_line_config *lc,
unsigned int line_idx)
{
unsigned int i;
u64 mask = BIT_ULL(line_idx);
for (i = 0; i < lc->num_attrs; i++) {
if ((lc->attrs[i].attr.id == GPIO_V2_LINE_ATTR_ID_DEBOUNCE) &&
(lc->attrs[i].mask & mask))
return true;
}
return false;
}
static u32 gpio_v2_line_config_debounce_period(struct gpio_v2_line_config *lc,
unsigned int line_idx)
{
unsigned int i;
u64 mask = BIT_ULL(line_idx);
for (i = 0; i < lc->num_attrs; i++) {
if ((lc->attrs[i].attr.id == GPIO_V2_LINE_ATTR_ID_DEBOUNCE) &&
(lc->attrs[i].mask & mask))
return lc->attrs[i].attr.debounce_period_us;
}
return 0;
}
static void edge_detector_stop(struct line *line)
{
if (line->irq) {
free_irq(line->irq, line);
line->irq = 0;
}
#ifdef CONFIG_HTE
if (READ_ONCE(line->edflags) & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE)
hte_ts_put(&line->hdesc);
#endif
cancel_delayed_work_sync(&line->work);
WRITE_ONCE(line->sw_debounced, 0);
WRITE_ONCE(line->edflags, 0);
if (line->desc)
WRITE_ONCE(line->desc->debounce_period_us, 0);
/* do not change line->level - see comment in debounced_value() */
}
static int edge_detector_setup(struct line *line,
struct gpio_v2_line_config *lc,
unsigned int line_idx, u64 edflags)
{
u32 debounce_period_us;
unsigned long irqflags = 0;
u64 eflags;
int irq, ret;
eflags = edflags & GPIO_V2_LINE_EDGE_FLAGS;
if (eflags && !kfifo_initialized(&line->req->events)) {
ret = kfifo_alloc(&line->req->events,
line->req->event_buffer_size, GFP_KERNEL);
if (ret)
return ret;
}
if (gpio_v2_line_config_debounced(lc, line_idx)) {
debounce_period_us = gpio_v2_line_config_debounce_period(lc, line_idx);
ret = debounce_setup(line, debounce_period_us);
if (ret)
return ret;
WRITE_ONCE(line->desc->debounce_period_us, debounce_period_us);
}
/* detection disabled or sw debouncer will provide edge detection */
if (!eflags || READ_ONCE(line->sw_debounced))
return 0;
if (IS_ENABLED(CONFIG_HTE) &&
(edflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE))
return hte_edge_setup(line, edflags);
irq = gpiod_to_irq(line->desc);
if (irq < 0)
return -ENXIO;
if (eflags & GPIO_V2_LINE_FLAG_EDGE_RISING)
irqflags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ?
IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING;
if (eflags & GPIO_V2_LINE_FLAG_EDGE_FALLING)
irqflags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ?
IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING;
irqflags |= IRQF_ONESHOT;
/* Request a thread to read the events */
ret = request_threaded_irq(irq, edge_irq_handler, edge_irq_thread,
irqflags, line->req->label, line);
if (ret)
return ret;
line->irq = irq;
return 0;
}
static int edge_detector_update(struct line *line,
struct gpio_v2_line_config *lc,
unsigned int line_idx, u64 edflags)
{
u64 active_edflags = READ_ONCE(line->edflags);
unsigned int debounce_period_us =
gpio_v2_line_config_debounce_period(lc, line_idx);
if ((active_edflags == edflags) &&
(READ_ONCE(line->desc->debounce_period_us) == debounce_period_us))
return 0;
/* sw debounced and still will be...*/
if (debounce_period_us && READ_ONCE(line->sw_debounced)) {
WRITE_ONCE(line->desc->debounce_period_us, debounce_period_us);
return 0;
}
/* reconfiguring edge detection or sw debounce being disabled */
if ((line->irq && !READ_ONCE(line->sw_debounced)) ||
(active_edflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE) ||
(!debounce_period_us && READ_ONCE(line->sw_debounced)))
edge_detector_stop(line);
return edge_detector_setup(line, lc, line_idx, edflags);
}
static u64 gpio_v2_line_config_flags(struct gpio_v2_line_config *lc,
unsigned int line_idx)
{
unsigned int i;
u64 mask = BIT_ULL(line_idx);
for (i = 0; i < lc->num_attrs; i++) {
if ((lc->attrs[i].attr.id == GPIO_V2_LINE_ATTR_ID_FLAGS) &&
(lc->attrs[i].mask & mask))
return lc->attrs[i].attr.flags;
}
return lc->flags;
}
static int gpio_v2_line_config_output_value(struct gpio_v2_line_config *lc,
unsigned int line_idx)
{
unsigned int i;
u64 mask = BIT_ULL(line_idx);
for (i = 0; i < lc->num_attrs; i++) {
if ((lc->attrs[i].attr.id == GPIO_V2_LINE_ATTR_ID_OUTPUT_VALUES) &&
(lc->attrs[i].mask & mask))
return !!(lc->attrs[i].attr.values & mask);
}
return 0;
}
static int gpio_v2_line_flags_validate(u64 flags)
{
/* Return an error if an unknown flag is set */
if (flags & ~GPIO_V2_LINE_VALID_FLAGS)
return -EINVAL;
if (!IS_ENABLED(CONFIG_HTE) &&
(flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE))
return -EOPNOTSUPP;
/*
* Do not allow both INPUT and OUTPUT flags to be set as they are
* contradictory.
*/
if ((flags & GPIO_V2_LINE_FLAG_INPUT) &&
(flags & GPIO_V2_LINE_FLAG_OUTPUT))
return -EINVAL;
/* Only allow one event clock source */
if (IS_ENABLED(CONFIG_HTE) &&
(flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME) &&
(flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE))
return -EINVAL;
/* Edge detection requires explicit input. */
if ((flags & GPIO_V2_LINE_EDGE_FLAGS) &&
!(flags & GPIO_V2_LINE_FLAG_INPUT))
return -EINVAL;
/*
* Do not allow OPEN_SOURCE and OPEN_DRAIN flags in a single
* request. If the hardware actually supports enabling both at the
* same time the electrical result would be disastrous.
*/
if ((flags & GPIO_V2_LINE_FLAG_OPEN_DRAIN) &&
(flags & GPIO_V2_LINE_FLAG_OPEN_SOURCE))
return -EINVAL;
/* Drive requires explicit output direction. */
if ((flags & GPIO_V2_LINE_DRIVE_FLAGS) &&
!(flags & GPIO_V2_LINE_FLAG_OUTPUT))
return -EINVAL;
/* Bias requires explicit direction. */
if ((flags & GPIO_V2_LINE_BIAS_FLAGS) &&
!(flags & GPIO_V2_LINE_DIRECTION_FLAGS))
return -EINVAL;
/* Only one bias flag can be set. */
if (((flags & GPIO_V2_LINE_FLAG_BIAS_DISABLED) &&
(flags & (GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN |
GPIO_V2_LINE_FLAG_BIAS_PULL_UP))) ||
((flags & GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN) &&
(flags & GPIO_V2_LINE_FLAG_BIAS_PULL_UP)))
return -EINVAL;
return 0;
}
static int gpio_v2_line_config_validate(struct gpio_v2_line_config *lc,
unsigned int num_lines)
{
unsigned int i;
u64 flags;
int ret;
if (lc->num_attrs > GPIO_V2_LINE_NUM_ATTRS_MAX)
return -EINVAL;
if (memchr_inv(lc->padding, 0, sizeof(lc->padding)))
return -EINVAL;
for (i = 0; i < num_lines; i++) {
flags = gpio_v2_line_config_flags(lc, i);
ret = gpio_v2_line_flags_validate(flags);
if (ret)
return ret;
/* debounce requires explicit input */
if (gpio_v2_line_config_debounced(lc, i) &&
!(flags & GPIO_V2_LINE_FLAG_INPUT))
return -EINVAL;
}
return 0;
}
static void gpio_v2_line_config_flags_to_desc_flags(u64 flags,
unsigned long *flagsp)
{
assign_bit(FLAG_ACTIVE_LOW, flagsp,
flags & GPIO_V2_LINE_FLAG_ACTIVE_LOW);
if (flags & GPIO_V2_LINE_FLAG_OUTPUT)
set_bit(FLAG_IS_OUT, flagsp);
else if (flags & GPIO_V2_LINE_FLAG_INPUT)
clear_bit(FLAG_IS_OUT, flagsp);
assign_bit(FLAG_EDGE_RISING, flagsp,
flags & GPIO_V2_LINE_FLAG_EDGE_RISING);
assign_bit(FLAG_EDGE_FALLING, flagsp,
flags & GPIO_V2_LINE_FLAG_EDGE_FALLING);
assign_bit(FLAG_OPEN_DRAIN, flagsp,
flags & GPIO_V2_LINE_FLAG_OPEN_DRAIN);
assign_bit(FLAG_OPEN_SOURCE, flagsp,
flags & GPIO_V2_LINE_FLAG_OPEN_SOURCE);
assign_bit(FLAG_PULL_UP, flagsp,
flags & GPIO_V2_LINE_FLAG_BIAS_PULL_UP);
assign_bit(FLAG_PULL_DOWN, flagsp,
flags & GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN);
assign_bit(FLAG_BIAS_DISABLE, flagsp,
flags & GPIO_V2_LINE_FLAG_BIAS_DISABLED);
assign_bit(FLAG_EVENT_CLOCK_REALTIME, flagsp,
flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME);
assign_bit(FLAG_EVENT_CLOCK_HTE, flagsp,
flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE);
}
static long linereq_get_values(struct linereq *lr, void __user *ip)
{
struct gpio_v2_line_values lv;
DECLARE_BITMAP(vals, GPIO_V2_LINES_MAX);
struct gpio_desc **descs;
unsigned int i, didx, num_get;
bool val;
int ret;
/* NOTE: It's ok to read values of output lines. */
if (copy_from_user(&lv, ip, sizeof(lv)))
return -EFAULT;
for (num_get = 0, i = 0; i < lr->num_lines; i++) {
if (lv.mask & BIT_ULL(i)) {
num_get++;
descs = &lr->lines[i].desc;
}
}
if (num_get == 0)
return -EINVAL;
if (num_get != 1) {
descs = kmalloc_array(num_get, sizeof(*descs), GFP_KERNEL);
if (!descs)
return -ENOMEM;
for (didx = 0, i = 0; i < lr->num_lines; i++) {
if (lv.mask & BIT_ULL(i)) {
descs[didx] = lr->lines[i].desc;
didx++;
}
}
}
ret = gpiod_get_array_value_complex(false, true, num_get,
descs, NULL, vals);
if (num_get != 1)
kfree(descs);
if (ret)
return ret;
lv.bits = 0;
for (didx = 0, i = 0; i < lr->num_lines; i++) {
if (lv.mask & BIT_ULL(i)) {
if (lr->lines[i].sw_debounced)
val = debounced_value(&lr->lines[i]);
else
val = test_bit(didx, vals);
if (val)
lv.bits |= BIT_ULL(i);
didx++;
}
}
if (copy_to_user(ip, &lv, sizeof(lv)))
return -EFAULT;
return 0;
}
static long linereq_set_values_unlocked(struct linereq *lr,
struct gpio_v2_line_values *lv)
{
DECLARE_BITMAP(vals, GPIO_V2_LINES_MAX);
struct gpio_desc **descs;
unsigned int i, didx, num_set;
int ret;
bitmap_zero(vals, GPIO_V2_LINES_MAX);
for (num_set = 0, i = 0; i < lr->num_lines; i++) {
if (lv->mask & BIT_ULL(i)) {
if (!test_bit(FLAG_IS_OUT, &lr->lines[i].desc->flags))
return -EPERM;
if (lv->bits & BIT_ULL(i))
__set_bit(num_set, vals);
num_set++;
descs = &lr->lines[i].desc;
}
}
if (num_set == 0)
return -EINVAL;
if (num_set != 1) {
/* build compacted desc array and values */
descs = kmalloc_array(num_set, sizeof(*descs), GFP_KERNEL);
if (!descs)
return -ENOMEM;
for (didx = 0, i = 0; i < lr->num_lines; i++) {
if (lv->mask & BIT_ULL(i)) {
descs[didx] = lr->lines[i].desc;
didx++;
}
}
}
ret = gpiod_set_array_value_complex(false, true, num_set,
descs, NULL, vals);
if (num_set != 1)
kfree(descs);
return ret;
}
static long linereq_set_values(struct linereq *lr, void __user *ip)
{
struct gpio_v2_line_values lv;
int ret;
if (copy_from_user(&lv, ip, sizeof(lv)))
return -EFAULT;
mutex_lock(&lr->config_mutex);
ret = linereq_set_values_unlocked(lr, &lv);
mutex_unlock(&lr->config_mutex);
return ret;
}
static long linereq_set_config_unlocked(struct linereq *lr,
struct gpio_v2_line_config *lc)
{
struct gpio_desc *desc;
struct line *line;
unsigned int i;
u64 flags, edflags;
int ret;
for (i = 0; i < lr->num_lines; i++) {
line = &lr->lines[i];
desc = lr->lines[i].desc;
flags = gpio_v2_line_config_flags(lc, i);
gpio_v2_line_config_flags_to_desc_flags(flags, &desc->flags);
edflags = flags & GPIO_V2_LINE_EDGE_DETECTOR_FLAGS;
/*
* Lines have to be requested explicitly for input
* or output, else the line will be treated "as is".
*/
if (flags & GPIO_V2_LINE_FLAG_OUTPUT) {
int val = gpio_v2_line_config_output_value(lc, i);
edge_detector_stop(line);
ret = gpiod_direction_output(desc, val);
if (ret)
return ret;
} else if (flags & GPIO_V2_LINE_FLAG_INPUT) {
ret = gpiod_direction_input(desc);
if (ret)
return ret;
ret = edge_detector_update(line, lc, i, edflags);
if (ret)
return ret;
}
WRITE_ONCE(line->edflags, edflags);
gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_CONFIG);
}
return 0;
}
static long linereq_set_config(struct linereq *lr, void __user *ip)
{
struct gpio_v2_line_config lc;
int ret;
if (copy_from_user(&lc, ip, sizeof(lc)))
return -EFAULT;
ret = gpio_v2_line_config_validate(&lc, lr->num_lines);
if (ret)
return ret;
mutex_lock(&lr->config_mutex);
ret = linereq_set_config_unlocked(lr, &lc);
mutex_unlock(&lr->config_mutex);
return ret;
}
static long linereq_ioctl_unlocked(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct linereq *lr = file->private_data;
void __user *ip = (void __user *)arg;
if (!lr->gdev->chip)
return -ENODEV;
switch (cmd) {
case GPIO_V2_LINE_GET_VALUES_IOCTL:
return linereq_get_values(lr, ip);
case GPIO_V2_LINE_SET_VALUES_IOCTL:
return linereq_set_values(lr, ip);
case GPIO_V2_LINE_SET_CONFIG_IOCTL:
return linereq_set_config(lr, ip);
default:
return -EINVAL;
}
}
static long linereq_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct linereq *lr = file->private_data;
return call_ioctl_locked(file, cmd, arg, lr->gdev,
linereq_ioctl_unlocked);
}
#ifdef CONFIG_COMPAT
static long linereq_ioctl_compat(struct file *file, unsigned int cmd,
unsigned long arg)
{
return linereq_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
}
#endif
static __poll_t linereq_poll_unlocked(struct file *file,
struct poll_table_struct *wait)
{
struct linereq *lr = file->private_data;
__poll_t events = 0;
if (!lr->gdev->chip)
return EPOLLHUP | EPOLLERR;
poll_wait(file, &lr->wait, wait);
if (!kfifo_is_empty_spinlocked_noirqsave(&lr->events,
&lr->wait.lock))
events = EPOLLIN | EPOLLRDNORM;
return events;
}
static __poll_t linereq_poll(struct file *file,
struct poll_table_struct *wait)
{
struct linereq *lr = file->private_data;
return call_poll_locked(file, wait, lr->gdev, linereq_poll_unlocked);
}
static ssize_t linereq_read_unlocked(struct file *file, char __user *buf,
size_t count, loff_t *f_ps)
{
struct linereq *lr = file->private_data;
struct gpio_v2_line_event le;
ssize_t bytes_read = 0;
int ret;
if (!lr->gdev->chip)
return -ENODEV;
if (count < sizeof(le))
return -EINVAL;
do {
spin_lock(&lr->wait.lock);
if (kfifo_is_empty(&lr->events)) {
if (bytes_read) {
spin_unlock(&lr->wait.lock);
return bytes_read;
}
if (file->f_flags & O_NONBLOCK) {
spin_unlock(&lr->wait.lock);
return -EAGAIN;
}
ret = wait_event_interruptible_locked(lr->wait,
!kfifo_is_empty(&lr->events));
if (ret) {
spin_unlock(&lr->wait.lock);
return ret;
}
}
ret = kfifo_out(&lr->events, &le, 1);
spin_unlock(&lr->wait.lock);
if (ret != 1) {
/*
* This should never happen - we were holding the
* lock from the moment we learned the fifo is no
* longer empty until now.
*/
ret = -EIO;
break;
}
if (copy_to_user(buf + bytes_read, &le, sizeof(le)))
return -EFAULT;
bytes_read += sizeof(le);
} while (count >= bytes_read + sizeof(le));
return bytes_read;
}
static ssize_t linereq_read(struct file *file, char __user *buf,
size_t count, loff_t *f_ps)
{
struct linereq *lr = file->private_data;
return call_read_locked(file, buf, count, f_ps, lr->gdev,
linereq_read_unlocked);
}
static void linereq_free(struct linereq *lr)
{
unsigned int i;
if (lr->device_unregistered_nb.notifier_call)
blocking_notifier_chain_unregister(&lr->gdev->device_notifier,
&lr->device_unregistered_nb);
for (i = 0; i < lr->num_lines; i++) {
if (lr->lines[i].desc) {
edge_detector_stop(&lr->lines[i]);
gpiod_free(lr->lines[i].desc);
}
}
kfifo_free(&lr->events);
kfree(lr->label);
gpio_device_put(lr->gdev);
kfree(lr);
}
static int linereq_release(struct inode *inode, struct file *file)
{
struct linereq *lr = file->private_data;
linereq_free(lr);
return 0;
}
#ifdef CONFIG_PROC_FS
static void linereq_show_fdinfo(struct seq_file *out, struct file *file)
{
struct linereq *lr = file->private_data;
struct device *dev = &lr->gdev->dev;
u16 i;
seq_printf(out, "gpio-chip:\t%s\n", dev_name(dev));
for (i = 0; i < lr->num_lines; i++)
seq_printf(out, "gpio-line:\t%d\n",
gpio_chip_hwgpio(lr->lines[i].desc));
}
#endif
static const struct file_operations line_fileops = {
.release = linereq_release,
.read = linereq_read,
.poll = linereq_poll,
.owner = THIS_MODULE,
.llseek = noop_llseek,
.unlocked_ioctl = linereq_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = linereq_ioctl_compat,
#endif
#ifdef CONFIG_PROC_FS
.show_fdinfo = linereq_show_fdinfo,
#endif
};
static int linereq_create(struct gpio_device *gdev, void __user *ip)
{
struct gpio_v2_line_request ulr;
struct gpio_v2_line_config *lc;
struct linereq *lr;
struct file *file;
u64 flags, edflags;
unsigned int i;
int fd, ret;
if (copy_from_user(&ulr, ip, sizeof(ulr)))
return -EFAULT;
if ((ulr.num_lines == 0) || (ulr.num_lines > GPIO_V2_LINES_MAX))
return -EINVAL;
if (memchr_inv(ulr.padding, 0, sizeof(ulr.padding)))
return -EINVAL;
lc = &ulr.config;
ret = gpio_v2_line_config_validate(lc, ulr.num_lines);
if (ret)
return ret;
lr = kzalloc(struct_size(lr, lines, ulr.num_lines), GFP_KERNEL);
if (!lr)
return -ENOMEM;
lr->gdev = gpio_device_get(gdev);
for (i = 0; i < ulr.num_lines; i++) {
lr->lines[i].req = lr;
WRITE_ONCE(lr->lines[i].sw_debounced, 0);
INIT_DELAYED_WORK(&lr->lines[i].work, debounce_work_func);
}
if (ulr.consumer[0] != '\0') {
/* label is only initialized if consumer is set */
lr->label = kstrndup(ulr.consumer, sizeof(ulr.consumer) - 1,
GFP_KERNEL);
if (!lr->label) {
ret = -ENOMEM;
goto out_free_linereq;
}
}
mutex_init(&lr->config_mutex);
init_waitqueue_head(&lr->wait);
lr->event_buffer_size = ulr.event_buffer_size;
if (lr->event_buffer_size == 0)
lr->event_buffer_size = ulr.num_lines * 16;
else if (lr->event_buffer_size > GPIO_V2_LINES_MAX * 16)
lr->event_buffer_size = GPIO_V2_LINES_MAX * 16;
atomic_set(&lr->seqno, 0);
lr->num_lines = ulr.num_lines;
/* Request each GPIO */
for (i = 0; i < ulr.num_lines; i++) {
u32 offset = ulr.offsets[i];
struct gpio_desc *desc = gpiochip_get_desc(gdev->chip, offset);
if (IS_ERR(desc)) {
ret = PTR_ERR(desc);
goto out_free_linereq;
}
ret = gpiod_request_user(desc, lr->label);
if (ret)
goto out_free_linereq;
lr->lines[i].desc = desc;
flags = gpio_v2_line_config_flags(lc, i);
gpio_v2_line_config_flags_to_desc_flags(flags, &desc->flags);
ret = gpiod_set_transitory(desc, false);
if (ret < 0)
goto out_free_linereq;
edflags = flags & GPIO_V2_LINE_EDGE_DETECTOR_FLAGS;
/*
* Lines have to be requested explicitly for input
* or output, else the line will be treated "as is".
*/
if (flags & GPIO_V2_LINE_FLAG_OUTPUT) {
int val = gpio_v2_line_config_output_value(lc, i);
ret = gpiod_direction_output(desc, val);
if (ret)
goto out_free_linereq;
} else if (flags & GPIO_V2_LINE_FLAG_INPUT) {
ret = gpiod_direction_input(desc);
if (ret)
goto out_free_linereq;
ret = edge_detector_setup(&lr->lines[i], lc, i,
edflags);
if (ret)
goto out_free_linereq;
}
lr->lines[i].edflags = edflags;
gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_REQUESTED);
dev_dbg(&gdev->dev, "registered chardev handle for line %d\n",
offset);
}
lr->device_unregistered_nb.notifier_call = linereq_unregistered_notify;
ret = blocking_notifier_chain_register(&gdev->device_notifier,
&lr->device_unregistered_nb);
if (ret)
goto out_free_linereq;
fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC);
if (fd < 0) {
ret = fd;
goto out_free_linereq;
}
file = anon_inode_getfile("gpio-line", &line_fileops, lr,
O_RDONLY | O_CLOEXEC);
if (IS_ERR(file)) {
ret = PTR_ERR(file);
goto out_put_unused_fd;
}
ulr.fd = fd;
if (copy_to_user(ip, &ulr, sizeof(ulr))) {
/*
* fput() will trigger the release() callback, so do not go onto
* the regular error cleanup path here.
*/
fput(file);
put_unused_fd(fd);
return -EFAULT;
}
fd_install(fd, file);
dev_dbg(&gdev->dev, "registered chardev handle for %d lines\n",
lr->num_lines);
return 0;
out_put_unused_fd:
put_unused_fd(fd);
out_free_linereq:
linereq_free(lr);
return ret;
}
#ifdef CONFIG_GPIO_CDEV_V1
/*
* GPIO line event management
*/
/**
* struct lineevent_state - contains the state of a userspace event
* @gdev: the GPIO device the event pertains to
* @label: consumer label used to tag descriptors
* @desc: the GPIO descriptor held by this event
* @eflags: the event flags this line was requested with
* @irq: the interrupt that trigger in response to events on this GPIO
* @wait: wait queue that handles blocking reads of events
* @device_unregistered_nb: notifier block for receiving gdev unregister events
* @events: KFIFO for the GPIO events
* @timestamp: cache for the timestamp storing it between hardirq
* and IRQ thread, used to bring the timestamp close to the actual
* event
*/
struct lineevent_state {
struct gpio_device *gdev;
const char *label;
struct gpio_desc *desc;
u32 eflags;
int irq;
wait_queue_head_t wait;
struct notifier_block device_unregistered_nb;
DECLARE_KFIFO(events, struct gpioevent_data, 16);
u64 timestamp;
};
#define GPIOEVENT_REQUEST_VALID_FLAGS \
(GPIOEVENT_REQUEST_RISING_EDGE | \
GPIOEVENT_REQUEST_FALLING_EDGE)
static __poll_t lineevent_poll_unlocked(struct file *file,
struct poll_table_struct *wait)
{
struct lineevent_state *le = file->private_data;
__poll_t events = 0;
if (!le->gdev->chip)
return EPOLLHUP | EPOLLERR;
poll_wait(file, &le->wait, wait);
if (!kfifo_is_empty_spinlocked_noirqsave(&le->events, &le->wait.lock))
events = EPOLLIN | EPOLLRDNORM;
return events;
}
static __poll_t lineevent_poll(struct file *file,
struct poll_table_struct *wait)
{
struct lineevent_state *le = file->private_data;
return call_poll_locked(file, wait, le->gdev, lineevent_poll_unlocked);
}
static int lineevent_unregistered_notify(struct notifier_block *nb,
unsigned long action, void *data)
{
struct lineevent_state *le = container_of(nb, struct lineevent_state,
device_unregistered_nb);
wake_up_poll(&le->wait, EPOLLIN | EPOLLERR);
return NOTIFY_OK;
}
struct compat_gpioeevent_data {
compat_u64 timestamp;
u32 id;
};
static ssize_t lineevent_read_unlocked(struct file *file, char __user *buf,
size_t count, loff_t *f_ps)
{
struct lineevent_state *le = file->private_data;
struct gpioevent_data ge;
ssize_t bytes_read = 0;
ssize_t ge_size;
int ret;
if (!le->gdev->chip)
return -ENODEV;
/*
* When compatible system call is being used the struct gpioevent_data,
* in case of at least ia32, has different size due to the alignment
* differences. Because we have first member 64 bits followed by one of
* 32 bits there is no gap between them. The only difference is the
* padding at the end of the data structure. Hence, we calculate the
* actual sizeof() and pass this as an argument to copy_to_user() to
* drop unneeded bytes from the output.
*/
if (compat_need_64bit_alignment_fixup())
ge_size = sizeof(struct compat_gpioeevent_data);
else
ge_size = sizeof(struct gpioevent_data);
if (count < ge_size)
return -EINVAL;
do {
spin_lock(&le->wait.lock);
if (kfifo_is_empty(&le->events)) {
if (bytes_read) {
spin_unlock(&le->wait.lock);
return bytes_read;
}
if (file->f_flags & O_NONBLOCK) {
spin_unlock(&le->wait.lock);
return -EAGAIN;
}
ret = wait_event_interruptible_locked(le->wait,
!kfifo_is_empty(&le->events));
if (ret) {
spin_unlock(&le->wait.lock);
return ret;
}
}
ret = kfifo_out(&le->events, &ge, 1);
spin_unlock(&le->wait.lock);
if (ret != 1) {
/*
* This should never happen - we were holding the lock
* from the moment we learned the fifo is no longer
* empty until now.
*/
ret = -EIO;
break;
}
if (copy_to_user(buf + bytes_read, &ge, ge_size))
return -EFAULT;
bytes_read += ge_size;
} while (count >= bytes_read + ge_size);
return bytes_read;
}
static ssize_t lineevent_read(struct file *file, char __user *buf,
size_t count, loff_t *f_ps)
{
struct lineevent_state *le = file->private_data;
return call_read_locked(file, buf, count, f_ps, le->gdev,
lineevent_read_unlocked);
}
static void lineevent_free(struct lineevent_state *le)
{
if (le->device_unregistered_nb.notifier_call)
blocking_notifier_chain_unregister(&le->gdev->device_notifier,
&le->device_unregistered_nb);
if (le->irq)
free_irq(le->irq, le);
if (le->desc)
gpiod_free(le->desc);
kfree(le->label);
gpio_device_put(le->gdev);
kfree(le);
}
static int lineevent_release(struct inode *inode, struct file *file)
{
lineevent_free(file->private_data);
return 0;
}
static long lineevent_ioctl_unlocked(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct lineevent_state *le = file->private_data;
void __user *ip = (void __user *)arg;
struct gpiohandle_data ghd;
if (!le->gdev->chip)
return -ENODEV;
/*
* We can get the value for an event line but not set it,
* because it is input by definition.
*/
if (cmd == GPIOHANDLE_GET_LINE_VALUES_IOCTL) {
int val;
memset(&ghd, 0, sizeof(ghd));
val = gpiod_get_value_cansleep(le->desc);
if (val < 0)
return val;
ghd.values[0] = val;
if (copy_to_user(ip, &ghd, sizeof(ghd)))
return -EFAULT;
return 0;
}
return -EINVAL;
}
static long lineevent_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct lineevent_state *le = file->private_data;
return call_ioctl_locked(file, cmd, arg, le->gdev,
lineevent_ioctl_unlocked);
}
#ifdef CONFIG_COMPAT
static long lineevent_ioctl_compat(struct file *file, unsigned int cmd,
unsigned long arg)
{
return lineevent_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
}
#endif
static const struct file_operations lineevent_fileops = {
.release = lineevent_release,
.read = lineevent_read,
.poll = lineevent_poll,
.owner = THIS_MODULE,
.llseek = noop_llseek,
.unlocked_ioctl = lineevent_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = lineevent_ioctl_compat,
#endif
};
static irqreturn_t lineevent_irq_thread(int irq, void *p)
{
struct lineevent_state *le = p;
struct gpioevent_data ge;
int ret;
/* Do not leak kernel stack to userspace */
memset(&ge, 0, sizeof(ge));
/*
* We may be running from a nested threaded interrupt in which case
* we didn't get the timestamp from lineevent_irq_handler().
*/
if (!le->timestamp)
ge.timestamp = ktime_get_ns();
else
ge.timestamp = le->timestamp;
if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE
&& le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE) {
int level = gpiod_get_value_cansleep(le->desc);
if (level)
/* Emit low-to-high event */
ge.id = GPIOEVENT_EVENT_RISING_EDGE;
else
/* Emit high-to-low event */
ge.id = GPIOEVENT_EVENT_FALLING_EDGE;
} else if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE) {
/* Emit low-to-high event */
ge.id = GPIOEVENT_EVENT_RISING_EDGE;
} else if (le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE) {
/* Emit high-to-low event */
ge.id = GPIOEVENT_EVENT_FALLING_EDGE;
} else {
return IRQ_NONE;
}
ret = kfifo_in_spinlocked_noirqsave(&le->events, &ge,
1, &le->wait.lock);
if (ret)
wake_up_poll(&le->wait, EPOLLIN);
else
pr_debug_ratelimited("event FIFO is full - event dropped\n");
return IRQ_HANDLED;
}
static irqreturn_t lineevent_irq_handler(int irq, void *p)
{
struct lineevent_state *le = p;
/*
* Just store the timestamp in hardirq context so we get it as
* close in time as possible to the actual event.
*/
le->timestamp = ktime_get_ns();
return IRQ_WAKE_THREAD;
}
static int lineevent_create(struct gpio_device *gdev, void __user *ip)
{
struct gpioevent_request eventreq;
struct lineevent_state *le;
struct gpio_desc *desc;
struct file *file;
u32 offset;
u32 lflags;
u32 eflags;
int fd;
int ret;
int irq, irqflags = 0;
if (copy_from_user(&eventreq, ip, sizeof(eventreq)))
return -EFAULT;
offset = eventreq.lineoffset;
lflags = eventreq.handleflags;
eflags = eventreq.eventflags;
desc = gpiochip_get_desc(gdev->chip, offset);
if (IS_ERR(desc))
return PTR_ERR(desc);
/* Return an error if a unknown flag is set */
if ((lflags & ~GPIOHANDLE_REQUEST_VALID_FLAGS) ||
(eflags & ~GPIOEVENT_REQUEST_VALID_FLAGS))
return -EINVAL;
/* This is just wrong: we don't look for events on output lines */
if ((lflags & GPIOHANDLE_REQUEST_OUTPUT) ||
(lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN) ||
(lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE))
return -EINVAL;
/* Only one bias flag can be set. */
if (((lflags & GPIOHANDLE_REQUEST_BIAS_DISABLE) &&
(lflags & (GPIOHANDLE_REQUEST_BIAS_PULL_DOWN |
GPIOHANDLE_REQUEST_BIAS_PULL_UP))) ||
((lflags & GPIOHANDLE_REQUEST_BIAS_PULL_DOWN) &&
(lflags & GPIOHANDLE_REQUEST_BIAS_PULL_UP)))
return -EINVAL;
le = kzalloc(sizeof(*le), GFP_KERNEL);
if (!le)
return -ENOMEM;
le->gdev = gpio_device_get(gdev);
if (eventreq.consumer_label[0] != '\0') {
/* label is only initialized if consumer_label is set */
le->label = kstrndup(eventreq.consumer_label,
sizeof(eventreq.consumer_label) - 1,
GFP_KERNEL);
if (!le->label) {
ret = -ENOMEM;
goto out_free_le;
}
}
ret = gpiod_request_user(desc, le->label);
if (ret)
goto out_free_le;
le->desc = desc;
le->eflags = eflags;
linehandle_flags_to_desc_flags(lflags, &desc->flags);
ret = gpiod_direction_input(desc);
if (ret)
goto out_free_le;
gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_REQUESTED);
irq = gpiod_to_irq(desc);
if (irq <= 0) {
ret = -ENODEV;
goto out_free_le;
}
if (eflags & GPIOEVENT_REQUEST_RISING_EDGE)
irqflags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ?
IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING;
if (eflags & GPIOEVENT_REQUEST_FALLING_EDGE)
irqflags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ?
IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING;
irqflags |= IRQF_ONESHOT;
INIT_KFIFO(le->events);
init_waitqueue_head(&le->wait);
le->device_unregistered_nb.notifier_call = lineevent_unregistered_notify;
ret = blocking_notifier_chain_register(&gdev->device_notifier,
&le->device_unregistered_nb);
if (ret)
goto out_free_le;
/* Request a thread to read the events */
ret = request_threaded_irq(irq,
lineevent_irq_handler,
lineevent_irq_thread,
irqflags,
le->label,
le);
if (ret)
goto out_free_le;
le->irq = irq;
fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC);
if (fd < 0) {
ret = fd;
goto out_free_le;
}
file = anon_inode_getfile("gpio-event",
&lineevent_fileops,
le,
O_RDONLY | O_CLOEXEC);
if (IS_ERR(file)) {
ret = PTR_ERR(file);
goto out_put_unused_fd;
}
eventreq.fd = fd;
if (copy_to_user(ip, &eventreq, sizeof(eventreq))) {
/*
* fput() will trigger the release() callback, so do not go onto
* the regular error cleanup path here.
*/
fput(file);
put_unused_fd(fd);
return -EFAULT;
}
fd_install(fd, file);
return 0;
out_put_unused_fd:
put_unused_fd(fd);
out_free_le:
lineevent_free(le);
return ret;
}
static void gpio_v2_line_info_to_v1(struct gpio_v2_line_info *info_v2,
struct gpioline_info *info_v1)
{
u64 flagsv2 = info_v2->flags;
memcpy(info_v1->name, info_v2->name, sizeof(info_v1->name));
memcpy(info_v1->consumer, info_v2->consumer, sizeof(info_v1->consumer));
info_v1->line_offset = info_v2->offset;
info_v1->flags = 0;
if (flagsv2 & GPIO_V2_LINE_FLAG_USED)
info_v1->flags |= GPIOLINE_FLAG_KERNEL;
if (flagsv2 & GPIO_V2_LINE_FLAG_OUTPUT)
info_v1->flags |= GPIOLINE_FLAG_IS_OUT;
if (flagsv2 & GPIO_V2_LINE_FLAG_ACTIVE_LOW)
info_v1->flags |= GPIOLINE_FLAG_ACTIVE_LOW;
if (flagsv2 & GPIO_V2_LINE_FLAG_OPEN_DRAIN)
info_v1->flags |= GPIOLINE_FLAG_OPEN_DRAIN;
if (flagsv2 & GPIO_V2_LINE_FLAG_OPEN_SOURCE)
info_v1->flags |= GPIOLINE_FLAG_OPEN_SOURCE;
if (flagsv2 & GPIO_V2_LINE_FLAG_BIAS_PULL_UP)
info_v1->flags |= GPIOLINE_FLAG_BIAS_PULL_UP;
if (flagsv2 & GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN)
info_v1->flags |= GPIOLINE_FLAG_BIAS_PULL_DOWN;
if (flagsv2 & GPIO_V2_LINE_FLAG_BIAS_DISABLED)
info_v1->flags |= GPIOLINE_FLAG_BIAS_DISABLE;
}
static void gpio_v2_line_info_changed_to_v1(
struct gpio_v2_line_info_changed *lic_v2,
struct gpioline_info_changed *lic_v1)
{
memset(lic_v1, 0, sizeof(*lic_v1));
gpio_v2_line_info_to_v1(&lic_v2->info, &lic_v1->info);
lic_v1->timestamp = lic_v2->timestamp_ns;
lic_v1->event_type = lic_v2->event_type;
}
#endif /* CONFIG_GPIO_CDEV_V1 */
static void gpio_desc_to_lineinfo(struct gpio_desc *desc,
struct gpio_v2_line_info *info)
{
struct gpio_chip *gc = desc->gdev->chip;
bool ok_for_pinctrl;
unsigned long flags;
u32 debounce_period_us;
unsigned int num_attrs = 0;
memset(info, 0, sizeof(*info));
info->offset = gpio_chip_hwgpio(desc);
/*
* This function takes a mutex so we must check this before taking
* the spinlock.
*
* FIXME: find a non-racy way to retrieve this information. Maybe a
* lock common to both frameworks?
*/
ok_for_pinctrl =
pinctrl_gpio_can_use_line(gc->base + info->offset);
spin_lock_irqsave(&gpio_lock, flags);
if (desc->name)
strscpy(info->name, desc->name, sizeof(info->name));
if (desc->label)
strscpy(info->consumer, desc->label, sizeof(info->consumer));
/*
* Userspace only need to know that the kernel is using this GPIO so
* it can't use it.
*/
info->flags = 0;
if (test_bit(FLAG_REQUESTED, &desc->flags) ||
test_bit(FLAG_IS_HOGGED, &desc->flags) ||
test_bit(FLAG_USED_AS_IRQ, &desc->flags) ||
test_bit(FLAG_EXPORT, &desc->flags) ||
test_bit(FLAG_SYSFS, &desc->flags) ||
!gpiochip_line_is_valid(gc, info->offset) ||
!ok_for_pinctrl)
info->flags |= GPIO_V2_LINE_FLAG_USED;
if (test_bit(FLAG_IS_OUT, &desc->flags))
info->flags |= GPIO_V2_LINE_FLAG_OUTPUT;
else
info->flags |= GPIO_V2_LINE_FLAG_INPUT;
if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
info->flags |= GPIO_V2_LINE_FLAG_ACTIVE_LOW;
if (test_bit(FLAG_OPEN_DRAIN, &desc->flags))
info->flags |= GPIO_V2_LINE_FLAG_OPEN_DRAIN;
if (test_bit(FLAG_OPEN_SOURCE, &desc->flags))
info->flags |= GPIO_V2_LINE_FLAG_OPEN_SOURCE;
if (test_bit(FLAG_BIAS_DISABLE, &desc->flags))
info->flags |= GPIO_V2_LINE_FLAG_BIAS_DISABLED;
if (test_bit(FLAG_PULL_DOWN, &desc->flags))
info->flags |= GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN;
if (test_bit(FLAG_PULL_UP, &desc->flags))
info->flags |= GPIO_V2_LINE_FLAG_BIAS_PULL_UP;
if (test_bit(FLAG_EDGE_RISING, &desc->flags))
info->flags |= GPIO_V2_LINE_FLAG_EDGE_RISING;
if (test_bit(FLAG_EDGE_FALLING, &desc->flags))
info->flags |= GPIO_V2_LINE_FLAG_EDGE_FALLING;
if (test_bit(FLAG_EVENT_CLOCK_REALTIME, &desc->flags))
info->flags |= GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME;
else if (test_bit(FLAG_EVENT_CLOCK_HTE, &desc->flags))
info->flags |= GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE;
debounce_period_us = READ_ONCE(desc->debounce_period_us);
if (debounce_period_us) {
info->attrs[num_attrs].id = GPIO_V2_LINE_ATTR_ID_DEBOUNCE;
info->attrs[num_attrs].debounce_period_us = debounce_period_us;
num_attrs++;
}
info->num_attrs = num_attrs;
spin_unlock_irqrestore(&gpio_lock, flags);
}
struct gpio_chardev_data {
struct gpio_device *gdev;
wait_queue_head_t wait;
DECLARE_KFIFO(events, struct gpio_v2_line_info_changed, 32);
struct notifier_block lineinfo_changed_nb;
struct notifier_block device_unregistered_nb;
unsigned long *watched_lines;
#ifdef CONFIG_GPIO_CDEV_V1
atomic_t watch_abi_version;
#endif
};
static int chipinfo_get(struct gpio_chardev_data *cdev, void __user *ip)
{
struct gpio_device *gdev = cdev->gdev;
struct gpiochip_info chipinfo;
memset(&chipinfo, 0, sizeof(chipinfo));
strscpy(chipinfo.name, dev_name(&gdev->dev), sizeof(chipinfo.name));
strscpy(chipinfo.label, gdev->label, sizeof(chipinfo.label));
chipinfo.lines = gdev->ngpio;
if (copy_to_user(ip, &chipinfo, sizeof(chipinfo)))
return -EFAULT;
return 0;
}
#ifdef CONFIG_GPIO_CDEV_V1
/*
* returns 0 if the versions match, else the previously selected ABI version
*/
static int lineinfo_ensure_abi_version(struct gpio_chardev_data *cdata,
unsigned int version)
{
int abiv = atomic_cmpxchg(&cdata->watch_abi_version, 0, version);
if (abiv == version)
return 0;
return abiv;
}
static int lineinfo_get_v1(struct gpio_chardev_data *cdev, void __user *ip,
bool watch)
{
struct gpio_desc *desc;
struct gpioline_info lineinfo;
struct gpio_v2_line_info lineinfo_v2;
if (copy_from_user(&lineinfo, ip, sizeof(lineinfo)))
return -EFAULT;
/* this doubles as a range check on line_offset */
desc = gpiochip_get_desc(cdev->gdev->chip, lineinfo.line_offset);
if (IS_ERR(desc))
return PTR_ERR(desc);
if (watch) {
if (lineinfo_ensure_abi_version(cdev, 1))
return -EPERM;
if (test_and_set_bit(lineinfo.line_offset, cdev->watched_lines))
return -EBUSY;
}
gpio_desc_to_lineinfo(desc, &lineinfo_v2);
gpio_v2_line_info_to_v1(&lineinfo_v2, &lineinfo);
if (copy_to_user(ip, &lineinfo, sizeof(lineinfo))) {
if (watch)
clear_bit(lineinfo.line_offset, cdev->watched_lines);
return -EFAULT;
}
return 0;
}
#endif
static int lineinfo_get(struct gpio_chardev_data *cdev, void __user *ip,
bool watch)
{
struct gpio_desc *desc;
struct gpio_v2_line_info lineinfo;
if (copy_from_user(&lineinfo, ip, sizeof(lineinfo)))
return -EFAULT;
if (memchr_inv(lineinfo.padding, 0, sizeof(lineinfo.padding)))
return -EINVAL;
desc = gpiochip_get_desc(cdev->gdev->chip, lineinfo.offset);
if (IS_ERR(desc))
return PTR_ERR(desc);
if (watch) {
#ifdef CONFIG_GPIO_CDEV_V1
if (lineinfo_ensure_abi_version(cdev, 2))
return -EPERM;
#endif
if (test_and_set_bit(lineinfo.offset, cdev->watched_lines))
return -EBUSY;
}
gpio_desc_to_lineinfo(desc, &lineinfo);
if (copy_to_user(ip, &lineinfo, sizeof(lineinfo))) {
if (watch)
clear_bit(lineinfo.offset, cdev->watched_lines);
return -EFAULT;
}
return 0;
}
static int lineinfo_unwatch(struct gpio_chardev_data *cdev, void __user *ip)
{
__u32 offset;
if (copy_from_user(&offset, ip, sizeof(offset)))
return -EFAULT;
if (offset >= cdev->gdev->ngpio)
return -EINVAL;
if (!test_and_clear_bit(offset, cdev->watched_lines))
return -EBUSY;
return 0;
}
/*
* gpio_ioctl() - ioctl handler for the GPIO chardev
*/
static long gpio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct gpio_chardev_data *cdev = file->private_data;
struct gpio_device *gdev = cdev->gdev;
void __user *ip = (void __user *)arg;
/* We fail any subsequent ioctl():s when the chip is gone */
if (!gdev->chip)
return -ENODEV;
/* Fill in the struct and pass to userspace */
switch (cmd) {
case GPIO_GET_CHIPINFO_IOCTL:
return chipinfo_get(cdev, ip);
#ifdef CONFIG_GPIO_CDEV_V1
case GPIO_GET_LINEHANDLE_IOCTL:
return linehandle_create(gdev, ip);
case GPIO_GET_LINEEVENT_IOCTL:
return lineevent_create(gdev, ip);
case GPIO_GET_LINEINFO_IOCTL:
return lineinfo_get_v1(cdev, ip, false);
case GPIO_GET_LINEINFO_WATCH_IOCTL:
return lineinfo_get_v1(cdev, ip, true);
#endif /* CONFIG_GPIO_CDEV_V1 */
case GPIO_V2_GET_LINEINFO_IOCTL:
return lineinfo_get(cdev, ip, false);
case GPIO_V2_GET_LINEINFO_WATCH_IOCTL:
return lineinfo_get(cdev, ip, true);
case GPIO_V2_GET_LINE_IOCTL:
return linereq_create(gdev, ip);
case GPIO_GET_LINEINFO_UNWATCH_IOCTL:
return lineinfo_unwatch(cdev, ip);
default:
return -EINVAL;
}
}
#ifdef CONFIG_COMPAT
static long gpio_ioctl_compat(struct file *file, unsigned int cmd,
unsigned long arg)
{
return gpio_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
}
#endif
static int lineinfo_changed_notify(struct notifier_block *nb,
unsigned long action, void *data)
{
struct gpio_chardev_data *cdev =
container_of(nb, struct gpio_chardev_data, lineinfo_changed_nb);
struct gpio_v2_line_info_changed chg;
struct gpio_desc *desc = data;
int ret;
if (!test_bit(gpio_chip_hwgpio(desc), cdev->watched_lines))
return NOTIFY_DONE;
memset(&chg, 0, sizeof(chg));
chg.event_type = action;
chg.timestamp_ns = ktime_get_ns();
gpio_desc_to_lineinfo(desc, &chg.info);
ret = kfifo_in_spinlocked(&cdev->events, &chg, 1, &cdev->wait.lock);
if (ret)
wake_up_poll(&cdev->wait, EPOLLIN);
else
pr_debug_ratelimited("lineinfo event FIFO is full - event dropped\n");
return NOTIFY_OK;
}
static int gpio_device_unregistered_notify(struct notifier_block *nb,
unsigned long action, void *data)
{
struct gpio_chardev_data *cdev = container_of(nb,
struct gpio_chardev_data,
device_unregistered_nb);
wake_up_poll(&cdev->wait, EPOLLIN | EPOLLERR);
return NOTIFY_OK;
}
static __poll_t lineinfo_watch_poll_unlocked(struct file *file,
struct poll_table_struct *pollt)
{
struct gpio_chardev_data *cdev = file->private_data;
__poll_t events = 0;
if (!cdev->gdev->chip)
return EPOLLHUP | EPOLLERR;
poll_wait(file, &cdev->wait, pollt);
if (!kfifo_is_empty_spinlocked_noirqsave(&cdev->events,
&cdev->wait.lock))
events = EPOLLIN | EPOLLRDNORM;
return events;
}
static __poll_t lineinfo_watch_poll(struct file *file,
struct poll_table_struct *pollt)
{
struct gpio_chardev_data *cdev = file->private_data;
return call_poll_locked(file, pollt, cdev->gdev,
lineinfo_watch_poll_unlocked);
}
static ssize_t lineinfo_watch_read_unlocked(struct file *file, char __user *buf,
size_t count, loff_t *off)
{
struct gpio_chardev_data *cdev = file->private_data;
struct gpio_v2_line_info_changed event;
ssize_t bytes_read = 0;
int ret;
size_t event_size;
if (!cdev->gdev->chip)
return -ENODEV;
#ifndef CONFIG_GPIO_CDEV_V1
event_size = sizeof(struct gpio_v2_line_info_changed);
if (count < event_size)
return -EINVAL;
#endif
do {
spin_lock(&cdev->wait.lock);
if (kfifo_is_empty(&cdev->events)) {
if (bytes_read) {
spin_unlock(&cdev->wait.lock);
return bytes_read;
}
if (file->f_flags & O_NONBLOCK) {
spin_unlock(&cdev->wait.lock);
return -EAGAIN;
}
ret = wait_event_interruptible_locked(cdev->wait,
!kfifo_is_empty(&cdev->events));
if (ret) {
spin_unlock(&cdev->wait.lock);
return ret;
}
}
#ifdef CONFIG_GPIO_CDEV_V1
/* must be after kfifo check so watch_abi_version is set */
if (atomic_read(&cdev->watch_abi_version) == 2)
event_size = sizeof(struct gpio_v2_line_info_changed);
else
event_size = sizeof(struct gpioline_info_changed);
if (count < event_size) {
spin_unlock(&cdev->wait.lock);
return -EINVAL;
}
#endif
ret = kfifo_out(&cdev->events, &event, 1);
spin_unlock(&cdev->wait.lock);
if (ret != 1) {
ret = -EIO;
break;
/* We should never get here. See lineevent_read(). */
}
#ifdef CONFIG_GPIO_CDEV_V1
if (event_size == sizeof(struct gpio_v2_line_info_changed)) {
if (copy_to_user(buf + bytes_read, &event, event_size))
return -EFAULT;
} else {
struct gpioline_info_changed event_v1;
gpio_v2_line_info_changed_to_v1(&event, &event_v1);
if (copy_to_user(buf + bytes_read, &event_v1,
event_size))
return -EFAULT;
}
#else
if (copy_to_user(buf + bytes_read, &event, event_size))
return -EFAULT;
#endif
bytes_read += event_size;
} while (count >= bytes_read + sizeof(event));
return bytes_read;
}
static ssize_t lineinfo_watch_read(struct file *file, char __user *buf,
size_t count, loff_t *off)
{
struct gpio_chardev_data *cdev = file->private_data;
return call_read_locked(file, buf, count, off, cdev->gdev,
lineinfo_watch_read_unlocked);
}
/**
* gpio_chrdev_open() - open the chardev for ioctl operations
* @inode: inode for this chardev
* @file: file struct for storing private data
* Returns 0 on success
*/
static int gpio_chrdev_open(struct inode *inode, struct file *file)
{
struct gpio_device *gdev = container_of(inode->i_cdev,
struct gpio_device, chrdev);
struct gpio_chardev_data *cdev;
int ret = -ENOMEM;
down_read(&gdev->sem);
/* Fail on open if the backing gpiochip is gone */
if (!gdev->chip) {
ret = -ENODEV;
goto out_unlock;
}
cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
if (!cdev)
goto out_unlock;
cdev->watched_lines = bitmap_zalloc(gdev->chip->ngpio, GFP_KERNEL);
if (!cdev->watched_lines)
goto out_free_cdev;
init_waitqueue_head(&cdev->wait);
INIT_KFIFO(cdev->events);
cdev->gdev = gpio_device_get(gdev);
cdev->lineinfo_changed_nb.notifier_call = lineinfo_changed_notify;
ret = blocking_notifier_chain_register(&gdev->line_state_notifier,
&cdev->lineinfo_changed_nb);
if (ret)
goto out_free_bitmap;
cdev->device_unregistered_nb.notifier_call =
gpio_device_unregistered_notify;
ret = blocking_notifier_chain_register(&gdev->device_notifier,
&cdev->device_unregistered_nb);
if (ret)
goto out_unregister_line_notifier;
file->private_data = cdev;
ret = nonseekable_open(inode, file);
if (ret)
goto out_unregister_device_notifier;
up_read(&gdev->sem);
return ret;
out_unregister_device_notifier:
blocking_notifier_chain_unregister(&gdev->device_notifier,
&cdev->device_unregistered_nb);
out_unregister_line_notifier:
blocking_notifier_chain_unregister(&gdev->line_state_notifier,
&cdev->lineinfo_changed_nb);
out_free_bitmap:
gpio_device_put(gdev);
bitmap_free(cdev->watched_lines);
out_free_cdev:
kfree(cdev);
out_unlock:
up_read(&gdev->sem);
return ret;
}
/**
* gpio_chrdev_release() - close chardev after ioctl operations
* @inode: inode for this chardev
* @file: file struct for storing private data
* Returns 0 on success
*/
static int gpio_chrdev_release(struct inode *inode, struct file *file)
{
struct gpio_chardev_data *cdev = file->private_data;
struct gpio_device *gdev = cdev->gdev;
bitmap_free(cdev->watched_lines);
blocking_notifier_chain_unregister(&gdev->device_notifier,
&cdev->device_unregistered_nb);
blocking_notifier_chain_unregister(&gdev->line_state_notifier,
&cdev->lineinfo_changed_nb);
gpio_device_put(gdev);
kfree(cdev);
return 0;
}
static const struct file_operations gpio_fileops = {
.release = gpio_chrdev_release,
.open = gpio_chrdev_open,
.poll = lineinfo_watch_poll,
.read = lineinfo_watch_read,
.owner = THIS_MODULE,
.llseek = no_llseek,
.unlocked_ioctl = gpio_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = gpio_ioctl_compat,
#endif
};
int gpiolib_cdev_register(struct gpio_device *gdev, dev_t devt)
{
int ret;
cdev_init(&gdev->chrdev, &gpio_fileops);
gdev->chrdev.owner = THIS_MODULE;
gdev->dev.devt = MKDEV(MAJOR(devt), gdev->id);
ret = cdev_device_add(&gdev->chrdev, &gdev->dev);
if (ret)
return ret;
chip_dbg(gdev->chip, "added GPIO chardev (%d:%d)\n",
MAJOR(devt), gdev->id);
return 0;
}
void gpiolib_cdev_unregister(struct gpio_device *gdev)
{
cdev_device_del(&gdev->chrdev, &gdev->dev);
blocking_notifier_call_chain(&gdev->device_notifier, 0, NULL);
}
| linux-master | drivers/gpio/gpiolib-cdev.c |
// SPDX-License-Identifier: GPL-2.0+
// Copyright (C) 2008-2009 The GameCube Linux Team
// Copyright (C) 2008,2009 Albert Herranz
// Copyright (C) 2017-2018 Jonathan Neuschäfer
//
// Nintendo Wii (Hollywood) GPIO driver
#include <linux/gpio/driver.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
/*
* Register names and offsets courtesy of WiiBrew:
* https://wiibrew.org/wiki/Hardware/Hollywood_GPIOs
*
* Note that for most registers, there are two versions:
* - HW_GPIOB_* Is always accessible by the Broadway PowerPC core, but does
* always give access to all GPIO lines
* - HW_GPIO_* Is only accessible by the Broadway PowerPC code if the memory
* firewall (AHBPROT) in the Hollywood chipset has been configured to allow
* such access.
*
* The ownership of each GPIO line can be configured in the HW_GPIO_OWNER
* register: A one bit configures the line for access via the HW_GPIOB_*
* registers, a zero bit indicates access via HW_GPIO_*. This driver uses
* HW_GPIOB_*.
*/
#define HW_GPIOB_OUT 0x00
#define HW_GPIOB_DIR 0x04
#define HW_GPIOB_IN 0x08
#define HW_GPIOB_INTLVL 0x0c
#define HW_GPIOB_INTFLAG 0x10
#define HW_GPIOB_INTMASK 0x14
#define HW_GPIOB_INMIR 0x18
#define HW_GPIO_ENABLE 0x1c
#define HW_GPIO_OUT 0x20
#define HW_GPIO_DIR 0x24
#define HW_GPIO_IN 0x28
#define HW_GPIO_INTLVL 0x2c
#define HW_GPIO_INTFLAG 0x30
#define HW_GPIO_INTMASK 0x34
#define HW_GPIO_INMIR 0x38
#define HW_GPIO_OWNER 0x3c
struct hlwd_gpio {
struct gpio_chip gpioc;
struct device *dev;
void __iomem *regs;
int irq;
u32 edge_emulation;
u32 rising_edge, falling_edge;
};
static void hlwd_gpio_irqhandler(struct irq_desc *desc)
{
struct hlwd_gpio *hlwd =
gpiochip_get_data(irq_desc_get_handler_data(desc));
struct irq_chip *chip = irq_desc_get_chip(desc);
unsigned long flags;
unsigned long pending;
int hwirq;
u32 emulated_pending;
raw_spin_lock_irqsave(&hlwd->gpioc.bgpio_lock, flags);
pending = ioread32be(hlwd->regs + HW_GPIOB_INTFLAG);
pending &= ioread32be(hlwd->regs + HW_GPIOB_INTMASK);
/* Treat interrupts due to edge trigger emulation separately */
emulated_pending = hlwd->edge_emulation & pending;
pending &= ~emulated_pending;
if (emulated_pending) {
u32 level, rising, falling;
level = ioread32be(hlwd->regs + HW_GPIOB_INTLVL);
rising = level & emulated_pending;
falling = ~level & emulated_pending;
/* Invert the levels */
iowrite32be(level ^ emulated_pending,
hlwd->regs + HW_GPIOB_INTLVL);
/* Ack all emulated-edge interrupts */
iowrite32be(emulated_pending, hlwd->regs + HW_GPIOB_INTFLAG);
/* Signal interrupts only on the correct edge */
rising &= hlwd->rising_edge;
falling &= hlwd->falling_edge;
/* Mark emulated interrupts as pending */
pending |= rising | falling;
}
raw_spin_unlock_irqrestore(&hlwd->gpioc.bgpio_lock, flags);
chained_irq_enter(chip, desc);
for_each_set_bit(hwirq, &pending, 32)
generic_handle_domain_irq(hlwd->gpioc.irq.domain, hwirq);
chained_irq_exit(chip, desc);
}
static void hlwd_gpio_irq_ack(struct irq_data *data)
{
struct hlwd_gpio *hlwd =
gpiochip_get_data(irq_data_get_irq_chip_data(data));
iowrite32be(BIT(data->hwirq), hlwd->regs + HW_GPIOB_INTFLAG);
}
static void hlwd_gpio_irq_mask(struct irq_data *data)
{
struct hlwd_gpio *hlwd =
gpiochip_get_data(irq_data_get_irq_chip_data(data));
unsigned long flags;
u32 mask;
raw_spin_lock_irqsave(&hlwd->gpioc.bgpio_lock, flags);
mask = ioread32be(hlwd->regs + HW_GPIOB_INTMASK);
mask &= ~BIT(data->hwirq);
iowrite32be(mask, hlwd->regs + HW_GPIOB_INTMASK);
raw_spin_unlock_irqrestore(&hlwd->gpioc.bgpio_lock, flags);
gpiochip_disable_irq(&hlwd->gpioc, irqd_to_hwirq(data));
}
static void hlwd_gpio_irq_unmask(struct irq_data *data)
{
struct hlwd_gpio *hlwd =
gpiochip_get_data(irq_data_get_irq_chip_data(data));
unsigned long flags;
u32 mask;
gpiochip_enable_irq(&hlwd->gpioc, irqd_to_hwirq(data));
raw_spin_lock_irqsave(&hlwd->gpioc.bgpio_lock, flags);
mask = ioread32be(hlwd->regs + HW_GPIOB_INTMASK);
mask |= BIT(data->hwirq);
iowrite32be(mask, hlwd->regs + HW_GPIOB_INTMASK);
raw_spin_unlock_irqrestore(&hlwd->gpioc.bgpio_lock, flags);
}
static void hlwd_gpio_irq_enable(struct irq_data *data)
{
hlwd_gpio_irq_ack(data);
hlwd_gpio_irq_unmask(data);
}
static void hlwd_gpio_irq_setup_emulation(struct hlwd_gpio *hlwd, int hwirq,
unsigned int flow_type)
{
u32 level, state;
/* Set the trigger level to the inactive level */
level = ioread32be(hlwd->regs + HW_GPIOB_INTLVL);
state = ioread32be(hlwd->regs + HW_GPIOB_IN) & BIT(hwirq);
level &= ~BIT(hwirq);
level |= state ^ BIT(hwirq);
iowrite32be(level, hlwd->regs + HW_GPIOB_INTLVL);
hlwd->edge_emulation |= BIT(hwirq);
hlwd->rising_edge &= ~BIT(hwirq);
hlwd->falling_edge &= ~BIT(hwirq);
if (flow_type & IRQ_TYPE_EDGE_RISING)
hlwd->rising_edge |= BIT(hwirq);
if (flow_type & IRQ_TYPE_EDGE_FALLING)
hlwd->falling_edge |= BIT(hwirq);
}
static int hlwd_gpio_irq_set_type(struct irq_data *data, unsigned int flow_type)
{
struct hlwd_gpio *hlwd =
gpiochip_get_data(irq_data_get_irq_chip_data(data));
unsigned long flags;
u32 level;
raw_spin_lock_irqsave(&hlwd->gpioc.bgpio_lock, flags);
hlwd->edge_emulation &= ~BIT(data->hwirq);
switch (flow_type) {
case IRQ_TYPE_LEVEL_HIGH:
level = ioread32be(hlwd->regs + HW_GPIOB_INTLVL);
level |= BIT(data->hwirq);
iowrite32be(level, hlwd->regs + HW_GPIOB_INTLVL);
break;
case IRQ_TYPE_LEVEL_LOW:
level = ioread32be(hlwd->regs + HW_GPIOB_INTLVL);
level &= ~BIT(data->hwirq);
iowrite32be(level, hlwd->regs + HW_GPIOB_INTLVL);
break;
case IRQ_TYPE_EDGE_RISING:
case IRQ_TYPE_EDGE_FALLING:
case IRQ_TYPE_EDGE_BOTH:
hlwd_gpio_irq_setup_emulation(hlwd, data->hwirq, flow_type);
break;
default:
raw_spin_unlock_irqrestore(&hlwd->gpioc.bgpio_lock, flags);
return -EINVAL;
}
raw_spin_unlock_irqrestore(&hlwd->gpioc.bgpio_lock, flags);
return 0;
}
static void hlwd_gpio_irq_print_chip(struct irq_data *data, struct seq_file *p)
{
struct hlwd_gpio *hlwd =
gpiochip_get_data(irq_data_get_irq_chip_data(data));
seq_printf(p, dev_name(hlwd->dev));
}
static const struct irq_chip hlwd_gpio_irq_chip = {
.irq_mask = hlwd_gpio_irq_mask,
.irq_unmask = hlwd_gpio_irq_unmask,
.irq_enable = hlwd_gpio_irq_enable,
.irq_set_type = hlwd_gpio_irq_set_type,
.irq_print_chip = hlwd_gpio_irq_print_chip,
.flags = IRQCHIP_IMMUTABLE,
GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
static int hlwd_gpio_probe(struct platform_device *pdev)
{
struct hlwd_gpio *hlwd;
u32 ngpios;
int res;
hlwd = devm_kzalloc(&pdev->dev, sizeof(*hlwd), GFP_KERNEL);
if (!hlwd)
return -ENOMEM;
hlwd->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(hlwd->regs))
return PTR_ERR(hlwd->regs);
hlwd->dev = &pdev->dev;
/*
* Claim all GPIOs using the OWNER register. This will not work on
* systems where the AHBPROT memory firewall hasn't been configured to
* permit PPC access to HW_GPIO_*.
*
* Note that this has to happen before bgpio_init reads the
* HW_GPIOB_OUT and HW_GPIOB_DIR, because otherwise it reads the wrong
* values.
*/
iowrite32be(0xffffffff, hlwd->regs + HW_GPIO_OWNER);
res = bgpio_init(&hlwd->gpioc, &pdev->dev, 4,
hlwd->regs + HW_GPIOB_IN, hlwd->regs + HW_GPIOB_OUT,
NULL, hlwd->regs + HW_GPIOB_DIR, NULL,
BGPIOF_BIG_ENDIAN_BYTE_ORDER);
if (res < 0) {
dev_warn(&pdev->dev, "bgpio_init failed: %d\n", res);
return res;
}
res = of_property_read_u32(pdev->dev.of_node, "ngpios", &ngpios);
if (res)
ngpios = 32;
hlwd->gpioc.ngpio = ngpios;
/* Mask and ack all interrupts */
iowrite32be(0, hlwd->regs + HW_GPIOB_INTMASK);
iowrite32be(0xffffffff, hlwd->regs + HW_GPIOB_INTFLAG);
/*
* If this GPIO controller is not marked as an interrupt controller in
* the DT, skip interrupt support.
*/
if (of_property_read_bool(pdev->dev.of_node, "interrupt-controller")) {
struct gpio_irq_chip *girq;
hlwd->irq = platform_get_irq(pdev, 0);
if (hlwd->irq < 0) {
dev_info(&pdev->dev, "platform_get_irq returned %d\n",
hlwd->irq);
return hlwd->irq;
}
girq = &hlwd->gpioc.irq;
gpio_irq_chip_set_chip(girq, &hlwd_gpio_irq_chip);
girq->parent_handler = hlwd_gpio_irqhandler;
girq->num_parents = 1;
girq->parents = devm_kcalloc(&pdev->dev, 1,
sizeof(*girq->parents),
GFP_KERNEL);
if (!girq->parents)
return -ENOMEM;
girq->parents[0] = hlwd->irq;
girq->default_type = IRQ_TYPE_NONE;
girq->handler = handle_level_irq;
}
return devm_gpiochip_add_data(&pdev->dev, &hlwd->gpioc, hlwd);
}
static const struct of_device_id hlwd_gpio_match[] = {
{ .compatible = "nintendo,hollywood-gpio", },
{},
};
MODULE_DEVICE_TABLE(of, hlwd_gpio_match);
static struct platform_driver hlwd_gpio_driver = {
.driver = {
.name = "gpio-hlwd",
.of_match_table = hlwd_gpio_match,
},
.probe = hlwd_gpio_probe,
};
module_platform_driver(hlwd_gpio_driver);
MODULE_AUTHOR("Jonathan Neuschäfer <[email protected]>");
MODULE_DESCRIPTION("Nintendo Wii GPIO driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/gpio/gpio-hlwd.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Support to GPOs on ROHM BD71815
* Copyright 2021 ROHM Semiconductors.
* Author: Matti Vaittinen <[email protected]>
*
* Copyright 2014 Embest Technology Co. Ltd. Inc.
* Author: [email protected]
*/
#include <linux/gpio/driver.h>
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
/* For the BD71815 register definitions */
#include <linux/mfd/rohm-bd71815.h>
struct bd71815_gpio {
/* chip.parent points the MFD which provides DT node and regmap */
struct gpio_chip chip;
/* dev points to the platform device for devm and prints */
struct device *dev;
struct regmap *regmap;
};
static int bd71815gpo_get(struct gpio_chip *chip, unsigned int offset)
{
struct bd71815_gpio *bd71815 = gpiochip_get_data(chip);
int ret, val;
ret = regmap_read(bd71815->regmap, BD71815_REG_GPO, &val);
if (ret)
return ret;
return (val >> offset) & 1;
}
static void bd71815gpo_set(struct gpio_chip *chip, unsigned int offset,
int value)
{
struct bd71815_gpio *bd71815 = gpiochip_get_data(chip);
int ret, bit;
bit = BIT(offset);
if (value)
ret = regmap_set_bits(bd71815->regmap, BD71815_REG_GPO, bit);
else
ret = regmap_clear_bits(bd71815->regmap, BD71815_REG_GPO, bit);
if (ret)
dev_warn(bd71815->dev, "failed to toggle GPO\n");
}
static int bd71815_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
unsigned long config)
{
struct bd71815_gpio *bdgpio = gpiochip_get_data(chip);
switch (pinconf_to_config_param(config)) {
case PIN_CONFIG_DRIVE_OPEN_DRAIN:
return regmap_update_bits(bdgpio->regmap,
BD71815_REG_GPO,
BD71815_GPIO_DRIVE_MASK << offset,
BD71815_GPIO_OPEN_DRAIN << offset);
case PIN_CONFIG_DRIVE_PUSH_PULL:
return regmap_update_bits(bdgpio->regmap,
BD71815_REG_GPO,
BD71815_GPIO_DRIVE_MASK << offset,
BD71815_GPIO_CMOS << offset);
default:
break;
}
return -ENOTSUPP;
}
/* BD71815 GPIO is actually GPO */
static int bd71815gpo_direction_get(struct gpio_chip *gc, unsigned int offset)
{
return GPIO_LINE_DIRECTION_OUT;
}
/* Template for GPIO chip */
static const struct gpio_chip bd71815gpo_chip = {
.label = "bd71815",
.owner = THIS_MODULE,
.get = bd71815gpo_get,
.get_direction = bd71815gpo_direction_get,
.set = bd71815gpo_set,
.set_config = bd71815_gpio_set_config,
.can_sleep = true,
};
#define BD71815_TWO_GPIOS GENMASK(1, 0)
#define BD71815_ONE_GPIO BIT(0)
/*
* Sigh. The BD71815 and BD71817 were originally designed to support two GPO
* pins. At some point it was noticed the second GPO pin which is the E5 pin
* located at the center of IC is hard to use on PCB (due to the location). It
* was decided to not promote this second GPO and the pin is marked as GND in
* the datasheet. The functionality is still there though! I guess driving a GPO
* connected to the ground is a bad idea. Thus we do not support it by default.
* OTOH - the original driver written by colleagues at Embest did support
* controlling this second GPO. It is thus possible this is used in some of the
* products.
*
* This driver does not by default support configuring this second GPO
* but allows using it by providing the DT property
* "rohm,enable-hidden-gpo".
*/
static int bd71815_init_valid_mask(struct gpio_chip *gc,
unsigned long *valid_mask,
unsigned int ngpios)
{
if (ngpios != 2)
return 0;
if (gc->parent && device_property_present(gc->parent,
"rohm,enable-hidden-gpo"))
*valid_mask = BD71815_TWO_GPIOS;
else
*valid_mask = BD71815_ONE_GPIO;
return 0;
}
static int gpo_bd71815_probe(struct platform_device *pdev)
{
struct bd71815_gpio *g;
struct device *parent, *dev;
/*
* Bind devm lifetime to this platform device => use dev for devm.
* also the prints should originate from this device.
*/
dev = &pdev->dev;
/* The device-tree and regmap come from MFD => use parent for that */
parent = dev->parent;
g = devm_kzalloc(dev, sizeof(*g), GFP_KERNEL);
if (!g)
return -ENOMEM;
g->chip = bd71815gpo_chip;
/*
* FIXME: As writing of this the sysfs interface for GPIO control does
* not respect the valid_mask. Do not trust it but rather set the ngpios
* to 1 if "rohm,enable-hidden-gpo" is not given.
*
* This check can be removed later if the sysfs export is fixed and
* if the fix is backported.
*
* For now it is safest to just set the ngpios though.
*/
if (device_property_present(parent, "rohm,enable-hidden-gpo"))
g->chip.ngpio = 2;
else
g->chip.ngpio = 1;
g->chip.init_valid_mask = bd71815_init_valid_mask;
g->chip.base = -1;
g->chip.parent = parent;
g->regmap = dev_get_regmap(parent, NULL);
g->dev = dev;
return devm_gpiochip_add_data(dev, &g->chip, g);
}
static struct platform_driver gpo_bd71815_driver = {
.driver = {
.name = "bd71815-gpo",
},
.probe = gpo_bd71815_probe,
};
module_platform_driver(gpo_bd71815_driver);
MODULE_ALIAS("platform:bd71815-gpo");
MODULE_AUTHOR("Matti Vaittinen <[email protected]>");
MODULE_AUTHOR("Peter Yang <[email protected]>");
MODULE_DESCRIPTION("GPO interface for BD71815");
MODULE_LICENSE("GPL");
| linux-master | drivers/gpio/gpio-bd71815.c |
// SPDX-License-Identifier: GPL-2.0
/*
* TQ-Systems TQMx86 PLD GPIO driver
*
* Based on vendor driver by:
* Vadim V.Vlasov <[email protected]>
*/
#include <linux/bitops.h>
#include <linux/errno.h>
#include <linux/gpio/driver.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#define TQMX86_NGPIO 8
#define TQMX86_NGPO 4 /* 0-3 - output */
#define TQMX86_NGPI 4 /* 4-7 - input */
#define TQMX86_DIR_INPUT_MASK 0xf0 /* 0-3 - output, 4-7 - input */
#define TQMX86_GPIODD 0 /* GPIO Data Direction Register */
#define TQMX86_GPIOD 1 /* GPIO Data Register */
#define TQMX86_GPIIC 3 /* GPI Interrupt Configuration Register */
#define TQMX86_GPIIS 4 /* GPI Interrupt Status Register */
#define TQMX86_GPII_FALLING BIT(0)
#define TQMX86_GPII_RISING BIT(1)
#define TQMX86_GPII_MASK (BIT(0) | BIT(1))
#define TQMX86_GPII_BITS 2
struct tqmx86_gpio_data {
struct gpio_chip chip;
void __iomem *io_base;
int irq;
raw_spinlock_t spinlock;
u8 irq_type[TQMX86_NGPI];
};
static u8 tqmx86_gpio_read(struct tqmx86_gpio_data *gd, unsigned int reg)
{
return ioread8(gd->io_base + reg);
}
static void tqmx86_gpio_write(struct tqmx86_gpio_data *gd, u8 val,
unsigned int reg)
{
iowrite8(val, gd->io_base + reg);
}
static int tqmx86_gpio_get(struct gpio_chip *chip, unsigned int offset)
{
struct tqmx86_gpio_data *gpio = gpiochip_get_data(chip);
return !!(tqmx86_gpio_read(gpio, TQMX86_GPIOD) & BIT(offset));
}
static void tqmx86_gpio_set(struct gpio_chip *chip, unsigned int offset,
int value)
{
struct tqmx86_gpio_data *gpio = gpiochip_get_data(chip);
unsigned long flags;
u8 val;
raw_spin_lock_irqsave(&gpio->spinlock, flags);
val = tqmx86_gpio_read(gpio, TQMX86_GPIOD);
if (value)
val |= BIT(offset);
else
val &= ~BIT(offset);
tqmx86_gpio_write(gpio, val, TQMX86_GPIOD);
raw_spin_unlock_irqrestore(&gpio->spinlock, flags);
}
static int tqmx86_gpio_direction_input(struct gpio_chip *chip,
unsigned int offset)
{
/* Direction cannot be changed. Validate is an input. */
if (BIT(offset) & TQMX86_DIR_INPUT_MASK)
return 0;
else
return -EINVAL;
}
static int tqmx86_gpio_direction_output(struct gpio_chip *chip,
unsigned int offset,
int value)
{
/* Direction cannot be changed, validate is an output */
if (BIT(offset) & TQMX86_DIR_INPUT_MASK)
return -EINVAL;
tqmx86_gpio_set(chip, offset, value);
return 0;
}
static int tqmx86_gpio_get_direction(struct gpio_chip *chip,
unsigned int offset)
{
if (TQMX86_DIR_INPUT_MASK & BIT(offset))
return GPIO_LINE_DIRECTION_IN;
return GPIO_LINE_DIRECTION_OUT;
}
static void tqmx86_gpio_irq_mask(struct irq_data *data)
{
unsigned int offset = (data->hwirq - TQMX86_NGPO);
struct tqmx86_gpio_data *gpio = gpiochip_get_data(
irq_data_get_irq_chip_data(data));
unsigned long flags;
u8 gpiic, mask;
mask = TQMX86_GPII_MASK << (offset * TQMX86_GPII_BITS);
raw_spin_lock_irqsave(&gpio->spinlock, flags);
gpiic = tqmx86_gpio_read(gpio, TQMX86_GPIIC);
gpiic &= ~mask;
tqmx86_gpio_write(gpio, gpiic, TQMX86_GPIIC);
raw_spin_unlock_irqrestore(&gpio->spinlock, flags);
gpiochip_disable_irq(&gpio->chip, irqd_to_hwirq(data));
}
static void tqmx86_gpio_irq_unmask(struct irq_data *data)
{
unsigned int offset = (data->hwirq - TQMX86_NGPO);
struct tqmx86_gpio_data *gpio = gpiochip_get_data(
irq_data_get_irq_chip_data(data));
unsigned long flags;
u8 gpiic, mask;
mask = TQMX86_GPII_MASK << (offset * TQMX86_GPII_BITS);
gpiochip_enable_irq(&gpio->chip, irqd_to_hwirq(data));
raw_spin_lock_irqsave(&gpio->spinlock, flags);
gpiic = tqmx86_gpio_read(gpio, TQMX86_GPIIC);
gpiic &= ~mask;
gpiic |= gpio->irq_type[offset] << (offset * TQMX86_GPII_BITS);
tqmx86_gpio_write(gpio, gpiic, TQMX86_GPIIC);
raw_spin_unlock_irqrestore(&gpio->spinlock, flags);
}
static int tqmx86_gpio_irq_set_type(struct irq_data *data, unsigned int type)
{
struct tqmx86_gpio_data *gpio = gpiochip_get_data(
irq_data_get_irq_chip_data(data));
unsigned int offset = (data->hwirq - TQMX86_NGPO);
unsigned int edge_type = type & IRQF_TRIGGER_MASK;
unsigned long flags;
u8 new_type, gpiic;
switch (edge_type) {
case IRQ_TYPE_EDGE_RISING:
new_type = TQMX86_GPII_RISING;
break;
case IRQ_TYPE_EDGE_FALLING:
new_type = TQMX86_GPII_FALLING;
break;
case IRQ_TYPE_EDGE_BOTH:
new_type = TQMX86_GPII_FALLING | TQMX86_GPII_RISING;
break;
default:
return -EINVAL; /* not supported */
}
gpio->irq_type[offset] = new_type;
raw_spin_lock_irqsave(&gpio->spinlock, flags);
gpiic = tqmx86_gpio_read(gpio, TQMX86_GPIIC);
gpiic &= ~((TQMX86_GPII_MASK) << (offset * TQMX86_GPII_BITS));
gpiic |= new_type << (offset * TQMX86_GPII_BITS);
tqmx86_gpio_write(gpio, gpiic, TQMX86_GPIIC);
raw_spin_unlock_irqrestore(&gpio->spinlock, flags);
return 0;
}
static void tqmx86_gpio_irq_handler(struct irq_desc *desc)
{
struct gpio_chip *chip = irq_desc_get_handler_data(desc);
struct tqmx86_gpio_data *gpio = gpiochip_get_data(chip);
struct irq_chip *irq_chip = irq_desc_get_chip(desc);
unsigned long irq_bits;
int i = 0;
u8 irq_status;
chained_irq_enter(irq_chip, desc);
irq_status = tqmx86_gpio_read(gpio, TQMX86_GPIIS);
tqmx86_gpio_write(gpio, irq_status, TQMX86_GPIIS);
irq_bits = irq_status;
for_each_set_bit(i, &irq_bits, TQMX86_NGPI)
generic_handle_domain_irq(gpio->chip.irq.domain,
i + TQMX86_NGPO);
chained_irq_exit(irq_chip, desc);
}
/* Minimal runtime PM is needed by the IRQ subsystem */
static int __maybe_unused tqmx86_gpio_runtime_suspend(struct device *dev)
{
return 0;
}
static int __maybe_unused tqmx86_gpio_runtime_resume(struct device *dev)
{
return 0;
}
static const struct dev_pm_ops tqmx86_gpio_dev_pm_ops = {
SET_RUNTIME_PM_OPS(tqmx86_gpio_runtime_suspend,
tqmx86_gpio_runtime_resume, NULL)
};
static void tqmx86_init_irq_valid_mask(struct gpio_chip *chip,
unsigned long *valid_mask,
unsigned int ngpios)
{
/* Only GPIOs 4-7 are valid for interrupts. Clear the others */
clear_bit(0, valid_mask);
clear_bit(1, valid_mask);
clear_bit(2, valid_mask);
clear_bit(3, valid_mask);
}
static void tqmx86_gpio_irq_print_chip(struct irq_data *d, struct seq_file *p)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
seq_printf(p, gc->label);
}
static const struct irq_chip tqmx86_gpio_irq_chip = {
.irq_mask = tqmx86_gpio_irq_mask,
.irq_unmask = tqmx86_gpio_irq_unmask,
.irq_set_type = tqmx86_gpio_irq_set_type,
.irq_print_chip = tqmx86_gpio_irq_print_chip,
.flags = IRQCHIP_IMMUTABLE,
GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
static int tqmx86_gpio_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct tqmx86_gpio_data *gpio;
struct gpio_chip *chip;
struct gpio_irq_chip *girq;
void __iomem *io_base;
struct resource *res;
int ret, irq;
irq = platform_get_irq_optional(pdev, 0);
if (irq < 0 && irq != -ENXIO)
return irq;
res = platform_get_resource(pdev, IORESOURCE_IO, 0);
if (!res) {
dev_err(&pdev->dev, "Cannot get I/O\n");
return -ENODEV;
}
io_base = devm_ioport_map(&pdev->dev, res->start, resource_size(res));
if (!io_base)
return -ENOMEM;
gpio = devm_kzalloc(dev, sizeof(*gpio), GFP_KERNEL);
if (!gpio)
return -ENOMEM;
raw_spin_lock_init(&gpio->spinlock);
gpio->io_base = io_base;
tqmx86_gpio_write(gpio, (u8)~TQMX86_DIR_INPUT_MASK, TQMX86_GPIODD);
chip = &gpio->chip;
chip->label = "gpio-tqmx86";
chip->owner = THIS_MODULE;
chip->can_sleep = false;
chip->base = -1;
chip->direction_input = tqmx86_gpio_direction_input;
chip->direction_output = tqmx86_gpio_direction_output;
chip->get_direction = tqmx86_gpio_get_direction;
chip->get = tqmx86_gpio_get;
chip->set = tqmx86_gpio_set;
chip->ngpio = TQMX86_NGPIO;
chip->parent = pdev->dev.parent;
pm_runtime_enable(&pdev->dev);
if (irq > 0) {
u8 irq_status;
/* Mask all interrupts */
tqmx86_gpio_write(gpio, 0, TQMX86_GPIIC);
/* Clear all pending interrupts */
irq_status = tqmx86_gpio_read(gpio, TQMX86_GPIIS);
tqmx86_gpio_write(gpio, irq_status, TQMX86_GPIIS);
girq = &chip->irq;
gpio_irq_chip_set_chip(girq, &tqmx86_gpio_irq_chip);
girq->parent_handler = tqmx86_gpio_irq_handler;
girq->num_parents = 1;
girq->parents = devm_kcalloc(&pdev->dev, 1,
sizeof(*girq->parents),
GFP_KERNEL);
if (!girq->parents) {
ret = -ENOMEM;
goto out_pm_dis;
}
girq->parents[0] = irq;
girq->default_type = IRQ_TYPE_NONE;
girq->handler = handle_simple_irq;
girq->init_valid_mask = tqmx86_init_irq_valid_mask;
irq_domain_set_pm_device(girq->domain, dev);
}
ret = devm_gpiochip_add_data(dev, chip, gpio);
if (ret) {
dev_err(dev, "Could not register GPIO chip\n");
goto out_pm_dis;
}
dev_info(dev, "GPIO functionality initialized with %d pins\n",
chip->ngpio);
return 0;
out_pm_dis:
pm_runtime_disable(&pdev->dev);
return ret;
}
static struct platform_driver tqmx86_gpio_driver = {
.driver = {
.name = "tqmx86-gpio",
.pm = &tqmx86_gpio_dev_pm_ops,
},
.probe = tqmx86_gpio_probe,
};
module_platform_driver(tqmx86_gpio_driver);
MODULE_DESCRIPTION("TQMx86 PLD GPIO Driver");
MODULE_AUTHOR("Andrew Lunn <[email protected]>");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:tqmx86-gpio");
| linux-master | drivers/gpio/gpio-tqmx86.c |
// SPDX-License-Identifier: GPL-2.0
/*
* GPIO driver for NXP LPC18xx/43xx.
*
* Copyright (C) 2018 Vladimir Zapolskiy <[email protected]>
* Copyright (C) 2015 Joachim Eastwood <[email protected]>
*
*/
#include <linux/clk.h>
#include <linux/gpio/driver.h>
#include <linux/io.h>
#include <linux/irqdomain.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
/* LPC18xx GPIO register offsets */
#define LPC18XX_REG_DIR(n) (0x2000 + n * sizeof(u32))
#define LPC18XX_MAX_PORTS 8
#define LPC18XX_PINS_PER_PORT 32
/* LPC18xx GPIO pin interrupt controller register offsets */
#define LPC18XX_GPIO_PIN_IC_ISEL 0x00
#define LPC18XX_GPIO_PIN_IC_IENR 0x04
#define LPC18XX_GPIO_PIN_IC_SIENR 0x08
#define LPC18XX_GPIO_PIN_IC_CIENR 0x0c
#define LPC18XX_GPIO_PIN_IC_IENF 0x10
#define LPC18XX_GPIO_PIN_IC_SIENF 0x14
#define LPC18XX_GPIO_PIN_IC_CIENF 0x18
#define LPC18XX_GPIO_PIN_IC_RISE 0x1c
#define LPC18XX_GPIO_PIN_IC_FALL 0x20
#define LPC18XX_GPIO_PIN_IC_IST 0x24
#define NR_LPC18XX_GPIO_PIN_IC_IRQS 8
struct lpc18xx_gpio_pin_ic {
void __iomem *base;
struct irq_domain *domain;
struct raw_spinlock lock;
};
struct lpc18xx_gpio_chip {
struct gpio_chip gpio;
void __iomem *base;
struct clk *clk;
struct lpc18xx_gpio_pin_ic *pin_ic;
spinlock_t lock;
};
static inline void lpc18xx_gpio_pin_ic_isel(struct lpc18xx_gpio_pin_ic *ic,
u32 pin, bool set)
{
u32 val = readl_relaxed(ic->base + LPC18XX_GPIO_PIN_IC_ISEL);
if (set)
val &= ~BIT(pin);
else
val |= BIT(pin);
writel_relaxed(val, ic->base + LPC18XX_GPIO_PIN_IC_ISEL);
}
static inline void lpc18xx_gpio_pin_ic_set(struct lpc18xx_gpio_pin_ic *ic,
u32 pin, u32 reg)
{
writel_relaxed(BIT(pin), ic->base + reg);
}
static void lpc18xx_gpio_pin_ic_mask(struct irq_data *d)
{
struct lpc18xx_gpio_pin_ic *ic = d->chip_data;
u32 type = irqd_get_trigger_type(d);
raw_spin_lock(&ic->lock);
if (type & IRQ_TYPE_LEVEL_MASK || type & IRQ_TYPE_EDGE_RISING)
lpc18xx_gpio_pin_ic_set(ic, d->hwirq,
LPC18XX_GPIO_PIN_IC_CIENR);
if (type & IRQ_TYPE_EDGE_FALLING)
lpc18xx_gpio_pin_ic_set(ic, d->hwirq,
LPC18XX_GPIO_PIN_IC_CIENF);
raw_spin_unlock(&ic->lock);
irq_chip_mask_parent(d);
}
static void lpc18xx_gpio_pin_ic_unmask(struct irq_data *d)
{
struct lpc18xx_gpio_pin_ic *ic = d->chip_data;
u32 type = irqd_get_trigger_type(d);
raw_spin_lock(&ic->lock);
if (type & IRQ_TYPE_LEVEL_MASK || type & IRQ_TYPE_EDGE_RISING)
lpc18xx_gpio_pin_ic_set(ic, d->hwirq,
LPC18XX_GPIO_PIN_IC_SIENR);
if (type & IRQ_TYPE_EDGE_FALLING)
lpc18xx_gpio_pin_ic_set(ic, d->hwirq,
LPC18XX_GPIO_PIN_IC_SIENF);
raw_spin_unlock(&ic->lock);
irq_chip_unmask_parent(d);
}
static void lpc18xx_gpio_pin_ic_eoi(struct irq_data *d)
{
struct lpc18xx_gpio_pin_ic *ic = d->chip_data;
u32 type = irqd_get_trigger_type(d);
raw_spin_lock(&ic->lock);
if (type & IRQ_TYPE_EDGE_BOTH)
lpc18xx_gpio_pin_ic_set(ic, d->hwirq,
LPC18XX_GPIO_PIN_IC_IST);
raw_spin_unlock(&ic->lock);
irq_chip_eoi_parent(d);
}
static int lpc18xx_gpio_pin_ic_set_type(struct irq_data *d, unsigned int type)
{
struct lpc18xx_gpio_pin_ic *ic = d->chip_data;
raw_spin_lock(&ic->lock);
if (type & IRQ_TYPE_LEVEL_HIGH) {
lpc18xx_gpio_pin_ic_isel(ic, d->hwirq, true);
lpc18xx_gpio_pin_ic_set(ic, d->hwirq,
LPC18XX_GPIO_PIN_IC_SIENF);
} else if (type & IRQ_TYPE_LEVEL_LOW) {
lpc18xx_gpio_pin_ic_isel(ic, d->hwirq, true);
lpc18xx_gpio_pin_ic_set(ic, d->hwirq,
LPC18XX_GPIO_PIN_IC_CIENF);
} else {
lpc18xx_gpio_pin_ic_isel(ic, d->hwirq, false);
}
raw_spin_unlock(&ic->lock);
return 0;
}
static struct irq_chip lpc18xx_gpio_pin_ic = {
.name = "LPC18xx GPIO pin",
.irq_mask = lpc18xx_gpio_pin_ic_mask,
.irq_unmask = lpc18xx_gpio_pin_ic_unmask,
.irq_eoi = lpc18xx_gpio_pin_ic_eoi,
.irq_set_type = lpc18xx_gpio_pin_ic_set_type,
.flags = IRQCHIP_SET_TYPE_MASKED,
};
static int lpc18xx_gpio_pin_ic_domain_alloc(struct irq_domain *domain,
unsigned int virq,
unsigned int nr_irqs, void *data)
{
struct irq_fwspec parent_fwspec, *fwspec = data;
struct lpc18xx_gpio_pin_ic *ic = domain->host_data;
irq_hw_number_t hwirq;
int ret;
if (nr_irqs != 1)
return -EINVAL;
hwirq = fwspec->param[0];
if (hwirq >= NR_LPC18XX_GPIO_PIN_IC_IRQS)
return -EINVAL;
/*
* All LPC18xx/LPC43xx GPIO pin hardware interrupts are translated
* into edge interrupts 32...39 on parent Cortex-M3/M4 NVIC
*/
parent_fwspec.fwnode = domain->parent->fwnode;
parent_fwspec.param_count = 1;
parent_fwspec.param[0] = hwirq + 32;
ret = irq_domain_alloc_irqs_parent(domain, virq, 1, &parent_fwspec);
if (ret < 0) {
pr_err("failed to allocate parent irq %u: %d\n",
parent_fwspec.param[0], ret);
return ret;
}
return irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
&lpc18xx_gpio_pin_ic, ic);
}
static const struct irq_domain_ops lpc18xx_gpio_pin_ic_domain_ops = {
.alloc = lpc18xx_gpio_pin_ic_domain_alloc,
.xlate = irq_domain_xlate_twocell,
.free = irq_domain_free_irqs_common,
};
static int lpc18xx_gpio_pin_ic_probe(struct lpc18xx_gpio_chip *gc)
{
struct device *dev = gc->gpio.parent;
struct irq_domain *parent_domain;
struct device_node *parent_node;
struct lpc18xx_gpio_pin_ic *ic;
struct resource res;
int ret, index;
parent_node = of_irq_find_parent(dev->of_node);
if (!parent_node)
return -ENXIO;
parent_domain = irq_find_host(parent_node);
of_node_put(parent_node);
if (!parent_domain)
return -ENXIO;
ic = devm_kzalloc(dev, sizeof(*ic), GFP_KERNEL);
if (!ic)
return -ENOMEM;
index = of_property_match_string(dev->of_node, "reg-names",
"gpio-pin-ic");
if (index < 0) {
ret = -ENODEV;
goto free_ic;
}
ret = of_address_to_resource(dev->of_node, index, &res);
if (ret < 0)
goto free_ic;
ic->base = devm_ioremap_resource(dev, &res);
if (IS_ERR(ic->base)) {
ret = PTR_ERR(ic->base);
goto free_ic;
}
raw_spin_lock_init(&ic->lock);
ic->domain = irq_domain_add_hierarchy(parent_domain, 0,
NR_LPC18XX_GPIO_PIN_IC_IRQS,
dev->of_node,
&lpc18xx_gpio_pin_ic_domain_ops,
ic);
if (!ic->domain) {
pr_err("unable to add irq domain\n");
ret = -ENODEV;
goto free_iomap;
}
gc->pin_ic = ic;
return 0;
free_iomap:
devm_iounmap(dev, ic->base);
free_ic:
devm_kfree(dev, ic);
return ret;
}
static void lpc18xx_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
{
struct lpc18xx_gpio_chip *gc = gpiochip_get_data(chip);
writeb(value ? 1 : 0, gc->base + offset);
}
static int lpc18xx_gpio_get(struct gpio_chip *chip, unsigned offset)
{
struct lpc18xx_gpio_chip *gc = gpiochip_get_data(chip);
return !!readb(gc->base + offset);
}
static int lpc18xx_gpio_direction(struct gpio_chip *chip, unsigned offset,
bool out)
{
struct lpc18xx_gpio_chip *gc = gpiochip_get_data(chip);
unsigned long flags;
u32 port, pin, dir;
port = offset / LPC18XX_PINS_PER_PORT;
pin = offset % LPC18XX_PINS_PER_PORT;
spin_lock_irqsave(&gc->lock, flags);
dir = readl(gc->base + LPC18XX_REG_DIR(port));
if (out)
dir |= BIT(pin);
else
dir &= ~BIT(pin);
writel(dir, gc->base + LPC18XX_REG_DIR(port));
spin_unlock_irqrestore(&gc->lock, flags);
return 0;
}
static int lpc18xx_gpio_direction_input(struct gpio_chip *chip,
unsigned offset)
{
return lpc18xx_gpio_direction(chip, offset, false);
}
static int lpc18xx_gpio_direction_output(struct gpio_chip *chip,
unsigned offset, int value)
{
lpc18xx_gpio_set(chip, offset, value);
return lpc18xx_gpio_direction(chip, offset, true);
}
static const struct gpio_chip lpc18xx_chip = {
.label = "lpc18xx/43xx-gpio",
.request = gpiochip_generic_request,
.free = gpiochip_generic_free,
.direction_input = lpc18xx_gpio_direction_input,
.direction_output = lpc18xx_gpio_direction_output,
.set = lpc18xx_gpio_set,
.get = lpc18xx_gpio_get,
.ngpio = LPC18XX_MAX_PORTS * LPC18XX_PINS_PER_PORT,
.owner = THIS_MODULE,
};
static int lpc18xx_gpio_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct lpc18xx_gpio_chip *gc;
int index, ret;
gc = devm_kzalloc(dev, sizeof(*gc), GFP_KERNEL);
if (!gc)
return -ENOMEM;
gc->gpio = lpc18xx_chip;
platform_set_drvdata(pdev, gc);
index = of_property_match_string(dev->of_node, "reg-names", "gpio");
if (index < 0) {
/* To support backward compatibility take the first resource */
gc->base = devm_platform_ioremap_resource(pdev, 0);
} else {
struct resource res;
ret = of_address_to_resource(dev->of_node, index, &res);
if (ret < 0)
return ret;
gc->base = devm_ioremap_resource(dev, &res);
}
if (IS_ERR(gc->base))
return PTR_ERR(gc->base);
gc->clk = devm_clk_get(dev, NULL);
if (IS_ERR(gc->clk)) {
dev_err(dev, "input clock not found\n");
return PTR_ERR(gc->clk);
}
ret = clk_prepare_enable(gc->clk);
if (ret) {
dev_err(dev, "unable to enable clock\n");
return ret;
}
spin_lock_init(&gc->lock);
gc->gpio.parent = dev;
ret = devm_gpiochip_add_data(dev, &gc->gpio, gc);
if (ret) {
dev_err(dev, "failed to add gpio chip\n");
clk_disable_unprepare(gc->clk);
return ret;
}
/* On error GPIO pin interrupt controller just won't be registered */
lpc18xx_gpio_pin_ic_probe(gc);
return 0;
}
static int lpc18xx_gpio_remove(struct platform_device *pdev)
{
struct lpc18xx_gpio_chip *gc = platform_get_drvdata(pdev);
if (gc->pin_ic)
irq_domain_remove(gc->pin_ic->domain);
clk_disable_unprepare(gc->clk);
return 0;
}
static const struct of_device_id lpc18xx_gpio_match[] = {
{ .compatible = "nxp,lpc1850-gpio" },
{ }
};
MODULE_DEVICE_TABLE(of, lpc18xx_gpio_match);
static struct platform_driver lpc18xx_gpio_driver = {
.probe = lpc18xx_gpio_probe,
.remove = lpc18xx_gpio_remove,
.driver = {
.name = "lpc18xx-gpio",
.of_match_table = lpc18xx_gpio_match,
},
};
module_platform_driver(lpc18xx_gpio_driver);
MODULE_AUTHOR("Joachim Eastwood <[email protected]>");
MODULE_AUTHOR("Vladimir Zapolskiy <[email protected]>");
MODULE_DESCRIPTION("GPIO driver for LPC18xx/43xx");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/gpio/gpio-lpc18xx.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* MEN 16Z127 GPIO driver
*
* Copyright (C) 2016 MEN Mikroelektronik GmbH (www.men.de)
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/io.h>
#include <linux/err.h>
#include <linux/mcb.h>
#include <linux/bitops.h>
#include <linux/gpio/driver.h>
#define MEN_Z127_CTRL 0x00
#define MEN_Z127_PSR 0x04
#define MEN_Z127_IRQR 0x08
#define MEN_Z127_GPIODR 0x0c
#define MEN_Z127_IER1 0x10
#define MEN_Z127_IER2 0x14
#define MEN_Z127_DBER 0x18
#define MEN_Z127_ODER 0x1C
#define GPIO_TO_DBCNT_REG(gpio) ((gpio * 4) + 0x80)
#define MEN_Z127_DB_MIN_US 50
/* 16 bit compare register. Each bit represents 50us */
#define MEN_Z127_DB_MAX_US (0xffff * MEN_Z127_DB_MIN_US)
#define MEN_Z127_DB_IN_RANGE(db) ((db >= MEN_Z127_DB_MIN_US) && \
(db <= MEN_Z127_DB_MAX_US))
struct men_z127_gpio {
struct gpio_chip gc;
void __iomem *reg_base;
struct resource *mem;
};
static int men_z127_debounce(struct gpio_chip *gc, unsigned gpio,
unsigned debounce)
{
struct men_z127_gpio *priv = gpiochip_get_data(gc);
struct device *dev = gc->parent;
unsigned int rnd;
u32 db_en, db_cnt;
if (!MEN_Z127_DB_IN_RANGE(debounce)) {
dev_err(dev, "debounce value %u out of range", debounce);
return -EINVAL;
}
if (debounce > 0) {
/* round up or down depending on MSB-1 */
rnd = fls(debounce) - 1;
if (rnd && (debounce & BIT(rnd - 1)))
debounce = roundup(debounce, MEN_Z127_DB_MIN_US);
else
debounce = rounddown(debounce, MEN_Z127_DB_MIN_US);
if (debounce > MEN_Z127_DB_MAX_US)
debounce = MEN_Z127_DB_MAX_US;
/* 50us per register unit */
debounce /= 50;
}
raw_spin_lock(&gc->bgpio_lock);
db_en = readl(priv->reg_base + MEN_Z127_DBER);
if (debounce == 0) {
db_en &= ~BIT(gpio);
db_cnt = 0;
} else {
db_en |= BIT(gpio);
db_cnt = debounce;
}
writel(db_en, priv->reg_base + MEN_Z127_DBER);
writel(db_cnt, priv->reg_base + GPIO_TO_DBCNT_REG(gpio));
raw_spin_unlock(&gc->bgpio_lock);
return 0;
}
static int men_z127_set_single_ended(struct gpio_chip *gc,
unsigned offset,
enum pin_config_param param)
{
struct men_z127_gpio *priv = gpiochip_get_data(gc);
u32 od_en;
raw_spin_lock(&gc->bgpio_lock);
od_en = readl(priv->reg_base + MEN_Z127_ODER);
if (param == PIN_CONFIG_DRIVE_OPEN_DRAIN)
od_en |= BIT(offset);
else
/* Implicitly PIN_CONFIG_DRIVE_PUSH_PULL */
od_en &= ~BIT(offset);
writel(od_en, priv->reg_base + MEN_Z127_ODER);
raw_spin_unlock(&gc->bgpio_lock);
return 0;
}
static int men_z127_set_config(struct gpio_chip *gc, unsigned offset,
unsigned long config)
{
enum pin_config_param param = pinconf_to_config_param(config);
switch (param) {
case PIN_CONFIG_DRIVE_OPEN_DRAIN:
case PIN_CONFIG_DRIVE_PUSH_PULL:
return men_z127_set_single_ended(gc, offset, param);
case PIN_CONFIG_INPUT_DEBOUNCE:
return men_z127_debounce(gc, offset,
pinconf_to_config_argument(config));
default:
break;
}
return -ENOTSUPP;
}
static int men_z127_probe(struct mcb_device *mdev,
const struct mcb_device_id *id)
{
struct men_z127_gpio *men_z127_gpio;
struct device *dev = &mdev->dev;
int ret;
men_z127_gpio = devm_kzalloc(dev, sizeof(struct men_z127_gpio),
GFP_KERNEL);
if (!men_z127_gpio)
return -ENOMEM;
men_z127_gpio->mem = mcb_request_mem(mdev, dev_name(dev));
if (IS_ERR(men_z127_gpio->mem)) {
dev_err(dev, "failed to request device memory");
return PTR_ERR(men_z127_gpio->mem);
}
men_z127_gpio->reg_base = ioremap(men_z127_gpio->mem->start,
resource_size(men_z127_gpio->mem));
if (men_z127_gpio->reg_base == NULL) {
ret = -ENXIO;
goto err_release;
}
mcb_set_drvdata(mdev, men_z127_gpio);
ret = bgpio_init(&men_z127_gpio->gc, &mdev->dev, 4,
men_z127_gpio->reg_base + MEN_Z127_PSR,
men_z127_gpio->reg_base + MEN_Z127_CTRL,
NULL,
men_z127_gpio->reg_base + MEN_Z127_GPIODR,
NULL, 0);
if (ret)
goto err_unmap;
men_z127_gpio->gc.set_config = men_z127_set_config;
ret = gpiochip_add_data(&men_z127_gpio->gc, men_z127_gpio);
if (ret) {
dev_err(dev, "failed to register MEN 16Z127 GPIO controller");
goto err_unmap;
}
dev_info(dev, "MEN 16Z127 GPIO driver registered");
return 0;
err_unmap:
iounmap(men_z127_gpio->reg_base);
err_release:
mcb_release_mem(men_z127_gpio->mem);
return ret;
}
static void men_z127_remove(struct mcb_device *mdev)
{
struct men_z127_gpio *men_z127_gpio = mcb_get_drvdata(mdev);
gpiochip_remove(&men_z127_gpio->gc);
iounmap(men_z127_gpio->reg_base);
mcb_release_mem(men_z127_gpio->mem);
}
static const struct mcb_device_id men_z127_ids[] = {
{ .device = 0x7f },
{ }
};
MODULE_DEVICE_TABLE(mcb, men_z127_ids);
static struct mcb_driver men_z127_driver = {
.driver = {
.name = "z127-gpio",
},
.probe = men_z127_probe,
.remove = men_z127_remove,
.id_table = men_z127_ids,
};
module_mcb_driver(men_z127_driver);
MODULE_AUTHOR("Andreas Werner <[email protected]>");
MODULE_DESCRIPTION("MEN 16z127 GPIO Controller");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("mcb:16z127");
MODULE_IMPORT_NS(MCB);
| linux-master | drivers/gpio/gpio-menz127.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* GPIO driver for LPC32xx SoC
*
* Author: Kevin Wells <[email protected]>
*
* Copyright (C) 2010 NXP Semiconductors
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/errno.h>
#include <linux/gpio/driver.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/module.h>
#define LPC32XX_GPIO_P3_INP_STATE (0x000)
#define LPC32XX_GPIO_P3_OUTP_SET (0x004)
#define LPC32XX_GPIO_P3_OUTP_CLR (0x008)
#define LPC32XX_GPIO_P3_OUTP_STATE (0x00C)
#define LPC32XX_GPIO_P2_DIR_SET (0x010)
#define LPC32XX_GPIO_P2_DIR_CLR (0x014)
#define LPC32XX_GPIO_P2_DIR_STATE (0x018)
#define LPC32XX_GPIO_P2_INP_STATE (0x01C)
#define LPC32XX_GPIO_P2_OUTP_SET (0x020)
#define LPC32XX_GPIO_P2_OUTP_CLR (0x024)
#define LPC32XX_GPIO_P2_MUX_SET (0x028)
#define LPC32XX_GPIO_P2_MUX_CLR (0x02C)
#define LPC32XX_GPIO_P2_MUX_STATE (0x030)
#define LPC32XX_GPIO_P0_INP_STATE (0x040)
#define LPC32XX_GPIO_P0_OUTP_SET (0x044)
#define LPC32XX_GPIO_P0_OUTP_CLR (0x048)
#define LPC32XX_GPIO_P0_OUTP_STATE (0x04C)
#define LPC32XX_GPIO_P0_DIR_SET (0x050)
#define LPC32XX_GPIO_P0_DIR_CLR (0x054)
#define LPC32XX_GPIO_P0_DIR_STATE (0x058)
#define LPC32XX_GPIO_P1_INP_STATE (0x060)
#define LPC32XX_GPIO_P1_OUTP_SET (0x064)
#define LPC32XX_GPIO_P1_OUTP_CLR (0x068)
#define LPC32XX_GPIO_P1_OUTP_STATE (0x06C)
#define LPC32XX_GPIO_P1_DIR_SET (0x070)
#define LPC32XX_GPIO_P1_DIR_CLR (0x074)
#define LPC32XX_GPIO_P1_DIR_STATE (0x078)
#define GPIO012_PIN_TO_BIT(x) (1 << (x))
#define GPIO3_PIN_TO_BIT(x) (1 << ((x) + 25))
#define GPO3_PIN_TO_BIT(x) (1 << (x))
#define GPIO012_PIN_IN_SEL(x, y) (((x) >> (y)) & 1)
#define GPIO3_PIN_IN_SHIFT(x) ((x) == 5 ? 24 : 10 + (x))
#define GPIO3_PIN_IN_SEL(x, y) (((x) >> GPIO3_PIN_IN_SHIFT(y)) & 1)
#define GPIO3_PIN5_IN_SEL(x) (((x) >> 24) & 1)
#define GPI3_PIN_IN_SEL(x, y) (((x) >> (y)) & 1)
#define GPO3_PIN_IN_SEL(x, y) (((x) >> (y)) & 1)
#define LPC32XX_GPIO_P0_MAX 8
#define LPC32XX_GPIO_P1_MAX 24
#define LPC32XX_GPIO_P2_MAX 13
#define LPC32XX_GPIO_P3_MAX 6
#define LPC32XX_GPI_P3_MAX 29
#define LPC32XX_GPO_P3_MAX 24
#define LPC32XX_GPIO_P0_GRP 0
#define LPC32XX_GPIO_P1_GRP (LPC32XX_GPIO_P0_GRP + LPC32XX_GPIO_P0_MAX)
#define LPC32XX_GPIO_P2_GRP (LPC32XX_GPIO_P1_GRP + LPC32XX_GPIO_P1_MAX)
#define LPC32XX_GPIO_P3_GRP (LPC32XX_GPIO_P2_GRP + LPC32XX_GPIO_P2_MAX)
#define LPC32XX_GPI_P3_GRP (LPC32XX_GPIO_P3_GRP + LPC32XX_GPIO_P3_MAX)
#define LPC32XX_GPO_P3_GRP (LPC32XX_GPI_P3_GRP + LPC32XX_GPI_P3_MAX)
struct gpio_regs {
unsigned long inp_state;
unsigned long outp_state;
unsigned long outp_set;
unsigned long outp_clr;
unsigned long dir_set;
unsigned long dir_clr;
};
/*
* GPIO names
*/
static const char *gpio_p0_names[LPC32XX_GPIO_P0_MAX] = {
"p0.0", "p0.1", "p0.2", "p0.3",
"p0.4", "p0.5", "p0.6", "p0.7"
};
static const char *gpio_p1_names[LPC32XX_GPIO_P1_MAX] = {
"p1.0", "p1.1", "p1.2", "p1.3",
"p1.4", "p1.5", "p1.6", "p1.7",
"p1.8", "p1.9", "p1.10", "p1.11",
"p1.12", "p1.13", "p1.14", "p1.15",
"p1.16", "p1.17", "p1.18", "p1.19",
"p1.20", "p1.21", "p1.22", "p1.23",
};
static const char *gpio_p2_names[LPC32XX_GPIO_P2_MAX] = {
"p2.0", "p2.1", "p2.2", "p2.3",
"p2.4", "p2.5", "p2.6", "p2.7",
"p2.8", "p2.9", "p2.10", "p2.11",
"p2.12"
};
static const char *gpio_p3_names[LPC32XX_GPIO_P3_MAX] = {
"gpio00", "gpio01", "gpio02", "gpio03",
"gpio04", "gpio05"
};
static const char *gpi_p3_names[LPC32XX_GPI_P3_MAX] = {
"gpi00", "gpi01", "gpi02", "gpi03",
"gpi04", "gpi05", "gpi06", "gpi07",
"gpi08", "gpi09", NULL, NULL,
NULL, NULL, NULL, "gpi15",
"gpi16", "gpi17", "gpi18", "gpi19",
"gpi20", "gpi21", "gpi22", "gpi23",
"gpi24", "gpi25", "gpi26", "gpi27",
"gpi28"
};
static const char *gpo_p3_names[LPC32XX_GPO_P3_MAX] = {
"gpo00", "gpo01", "gpo02", "gpo03",
"gpo04", "gpo05", "gpo06", "gpo07",
"gpo08", "gpo09", "gpo10", "gpo11",
"gpo12", "gpo13", "gpo14", "gpo15",
"gpo16", "gpo17", "gpo18", "gpo19",
"gpo20", "gpo21", "gpo22", "gpo23"
};
static struct gpio_regs gpio_grp_regs_p0 = {
.inp_state = LPC32XX_GPIO_P0_INP_STATE,
.outp_set = LPC32XX_GPIO_P0_OUTP_SET,
.outp_clr = LPC32XX_GPIO_P0_OUTP_CLR,
.dir_set = LPC32XX_GPIO_P0_DIR_SET,
.dir_clr = LPC32XX_GPIO_P0_DIR_CLR,
};
static struct gpio_regs gpio_grp_regs_p1 = {
.inp_state = LPC32XX_GPIO_P1_INP_STATE,
.outp_set = LPC32XX_GPIO_P1_OUTP_SET,
.outp_clr = LPC32XX_GPIO_P1_OUTP_CLR,
.dir_set = LPC32XX_GPIO_P1_DIR_SET,
.dir_clr = LPC32XX_GPIO_P1_DIR_CLR,
};
static struct gpio_regs gpio_grp_regs_p2 = {
.inp_state = LPC32XX_GPIO_P2_INP_STATE,
.outp_set = LPC32XX_GPIO_P2_OUTP_SET,
.outp_clr = LPC32XX_GPIO_P2_OUTP_CLR,
.dir_set = LPC32XX_GPIO_P2_DIR_SET,
.dir_clr = LPC32XX_GPIO_P2_DIR_CLR,
};
static struct gpio_regs gpio_grp_regs_p3 = {
.inp_state = LPC32XX_GPIO_P3_INP_STATE,
.outp_state = LPC32XX_GPIO_P3_OUTP_STATE,
.outp_set = LPC32XX_GPIO_P3_OUTP_SET,
.outp_clr = LPC32XX_GPIO_P3_OUTP_CLR,
.dir_set = LPC32XX_GPIO_P2_DIR_SET,
.dir_clr = LPC32XX_GPIO_P2_DIR_CLR,
};
struct lpc32xx_gpio_chip {
struct gpio_chip chip;
struct gpio_regs *gpio_grp;
void __iomem *reg_base;
};
static inline u32 gpreg_read(struct lpc32xx_gpio_chip *group, unsigned long offset)
{
return __raw_readl(group->reg_base + offset);
}
static inline void gpreg_write(struct lpc32xx_gpio_chip *group, u32 val, unsigned long offset)
{
__raw_writel(val, group->reg_base + offset);
}
static void __set_gpio_dir_p012(struct lpc32xx_gpio_chip *group,
unsigned pin, int input)
{
if (input)
gpreg_write(group, GPIO012_PIN_TO_BIT(pin),
group->gpio_grp->dir_clr);
else
gpreg_write(group, GPIO012_PIN_TO_BIT(pin),
group->gpio_grp->dir_set);
}
static void __set_gpio_dir_p3(struct lpc32xx_gpio_chip *group,
unsigned pin, int input)
{
u32 u = GPIO3_PIN_TO_BIT(pin);
if (input)
gpreg_write(group, u, group->gpio_grp->dir_clr);
else
gpreg_write(group, u, group->gpio_grp->dir_set);
}
static void __set_gpio_level_p012(struct lpc32xx_gpio_chip *group,
unsigned pin, int high)
{
if (high)
gpreg_write(group, GPIO012_PIN_TO_BIT(pin),
group->gpio_grp->outp_set);
else
gpreg_write(group, GPIO012_PIN_TO_BIT(pin),
group->gpio_grp->outp_clr);
}
static void __set_gpio_level_p3(struct lpc32xx_gpio_chip *group,
unsigned pin, int high)
{
u32 u = GPIO3_PIN_TO_BIT(pin);
if (high)
gpreg_write(group, u, group->gpio_grp->outp_set);
else
gpreg_write(group, u, group->gpio_grp->outp_clr);
}
static void __set_gpo_level_p3(struct lpc32xx_gpio_chip *group,
unsigned pin, int high)
{
if (high)
gpreg_write(group, GPO3_PIN_TO_BIT(pin), group->gpio_grp->outp_set);
else
gpreg_write(group, GPO3_PIN_TO_BIT(pin), group->gpio_grp->outp_clr);
}
static int __get_gpio_state_p012(struct lpc32xx_gpio_chip *group,
unsigned pin)
{
return GPIO012_PIN_IN_SEL(gpreg_read(group, group->gpio_grp->inp_state),
pin);
}
static int __get_gpio_state_p3(struct lpc32xx_gpio_chip *group,
unsigned pin)
{
int state = gpreg_read(group, group->gpio_grp->inp_state);
/*
* P3 GPIO pin input mapping is not contiguous, GPIOP3-0..4 is mapped
* to bits 10..14, while GPIOP3-5 is mapped to bit 24.
*/
return GPIO3_PIN_IN_SEL(state, pin);
}
static int __get_gpi_state_p3(struct lpc32xx_gpio_chip *group,
unsigned pin)
{
return GPI3_PIN_IN_SEL(gpreg_read(group, group->gpio_grp->inp_state), pin);
}
static int __get_gpo_state_p3(struct lpc32xx_gpio_chip *group,
unsigned pin)
{
return GPO3_PIN_IN_SEL(gpreg_read(group, group->gpio_grp->outp_state), pin);
}
/*
* GPIO primitives.
*/
static int lpc32xx_gpio_dir_input_p012(struct gpio_chip *chip,
unsigned pin)
{
struct lpc32xx_gpio_chip *group = gpiochip_get_data(chip);
__set_gpio_dir_p012(group, pin, 1);
return 0;
}
static int lpc32xx_gpio_dir_input_p3(struct gpio_chip *chip,
unsigned pin)
{
struct lpc32xx_gpio_chip *group = gpiochip_get_data(chip);
__set_gpio_dir_p3(group, pin, 1);
return 0;
}
static int lpc32xx_gpio_dir_in_always(struct gpio_chip *chip,
unsigned pin)
{
return 0;
}
static int lpc32xx_gpio_get_value_p012(struct gpio_chip *chip, unsigned pin)
{
struct lpc32xx_gpio_chip *group = gpiochip_get_data(chip);
return !!__get_gpio_state_p012(group, pin);
}
static int lpc32xx_gpio_get_value_p3(struct gpio_chip *chip, unsigned pin)
{
struct lpc32xx_gpio_chip *group = gpiochip_get_data(chip);
return !!__get_gpio_state_p3(group, pin);
}
static int lpc32xx_gpi_get_value(struct gpio_chip *chip, unsigned pin)
{
struct lpc32xx_gpio_chip *group = gpiochip_get_data(chip);
return !!__get_gpi_state_p3(group, pin);
}
static int lpc32xx_gpio_dir_output_p012(struct gpio_chip *chip, unsigned pin,
int value)
{
struct lpc32xx_gpio_chip *group = gpiochip_get_data(chip);
__set_gpio_level_p012(group, pin, value);
__set_gpio_dir_p012(group, pin, 0);
return 0;
}
static int lpc32xx_gpio_dir_output_p3(struct gpio_chip *chip, unsigned pin,
int value)
{
struct lpc32xx_gpio_chip *group = gpiochip_get_data(chip);
__set_gpio_level_p3(group, pin, value);
__set_gpio_dir_p3(group, pin, 0);
return 0;
}
static int lpc32xx_gpio_dir_out_always(struct gpio_chip *chip, unsigned pin,
int value)
{
struct lpc32xx_gpio_chip *group = gpiochip_get_data(chip);
__set_gpo_level_p3(group, pin, value);
return 0;
}
static void lpc32xx_gpio_set_value_p012(struct gpio_chip *chip, unsigned pin,
int value)
{
struct lpc32xx_gpio_chip *group = gpiochip_get_data(chip);
__set_gpio_level_p012(group, pin, value);
}
static void lpc32xx_gpio_set_value_p3(struct gpio_chip *chip, unsigned pin,
int value)
{
struct lpc32xx_gpio_chip *group = gpiochip_get_data(chip);
__set_gpio_level_p3(group, pin, value);
}
static void lpc32xx_gpo_set_value(struct gpio_chip *chip, unsigned pin,
int value)
{
struct lpc32xx_gpio_chip *group = gpiochip_get_data(chip);
__set_gpo_level_p3(group, pin, value);
}
static int lpc32xx_gpo_get_value(struct gpio_chip *chip, unsigned pin)
{
struct lpc32xx_gpio_chip *group = gpiochip_get_data(chip);
return !!__get_gpo_state_p3(group, pin);
}
static int lpc32xx_gpio_request(struct gpio_chip *chip, unsigned pin)
{
if (pin < chip->ngpio)
return 0;
return -EINVAL;
}
static int lpc32xx_gpio_to_irq_p01(struct gpio_chip *chip, unsigned offset)
{
return -ENXIO;
}
static int lpc32xx_gpio_to_irq_gpio_p3(struct gpio_chip *chip, unsigned offset)
{
return -ENXIO;
}
static int lpc32xx_gpio_to_irq_gpi_p3(struct gpio_chip *chip, unsigned offset)
{
return -ENXIO;
}
static struct lpc32xx_gpio_chip lpc32xx_gpiochip[] = {
{
.chip = {
.label = "gpio_p0",
.direction_input = lpc32xx_gpio_dir_input_p012,
.get = lpc32xx_gpio_get_value_p012,
.direction_output = lpc32xx_gpio_dir_output_p012,
.set = lpc32xx_gpio_set_value_p012,
.request = lpc32xx_gpio_request,
.to_irq = lpc32xx_gpio_to_irq_p01,
.base = LPC32XX_GPIO_P0_GRP,
.ngpio = LPC32XX_GPIO_P0_MAX,
.names = gpio_p0_names,
.can_sleep = false,
},
.gpio_grp = &gpio_grp_regs_p0,
},
{
.chip = {
.label = "gpio_p1",
.direction_input = lpc32xx_gpio_dir_input_p012,
.get = lpc32xx_gpio_get_value_p012,
.direction_output = lpc32xx_gpio_dir_output_p012,
.set = lpc32xx_gpio_set_value_p012,
.request = lpc32xx_gpio_request,
.to_irq = lpc32xx_gpio_to_irq_p01,
.base = LPC32XX_GPIO_P1_GRP,
.ngpio = LPC32XX_GPIO_P1_MAX,
.names = gpio_p1_names,
.can_sleep = false,
},
.gpio_grp = &gpio_grp_regs_p1,
},
{
.chip = {
.label = "gpio_p2",
.direction_input = lpc32xx_gpio_dir_input_p012,
.get = lpc32xx_gpio_get_value_p012,
.direction_output = lpc32xx_gpio_dir_output_p012,
.set = lpc32xx_gpio_set_value_p012,
.request = lpc32xx_gpio_request,
.base = LPC32XX_GPIO_P2_GRP,
.ngpio = LPC32XX_GPIO_P2_MAX,
.names = gpio_p2_names,
.can_sleep = false,
},
.gpio_grp = &gpio_grp_regs_p2,
},
{
.chip = {
.label = "gpio_p3",
.direction_input = lpc32xx_gpio_dir_input_p3,
.get = lpc32xx_gpio_get_value_p3,
.direction_output = lpc32xx_gpio_dir_output_p3,
.set = lpc32xx_gpio_set_value_p3,
.request = lpc32xx_gpio_request,
.to_irq = lpc32xx_gpio_to_irq_gpio_p3,
.base = LPC32XX_GPIO_P3_GRP,
.ngpio = LPC32XX_GPIO_P3_MAX,
.names = gpio_p3_names,
.can_sleep = false,
},
.gpio_grp = &gpio_grp_regs_p3,
},
{
.chip = {
.label = "gpi_p3",
.direction_input = lpc32xx_gpio_dir_in_always,
.get = lpc32xx_gpi_get_value,
.request = lpc32xx_gpio_request,
.to_irq = lpc32xx_gpio_to_irq_gpi_p3,
.base = LPC32XX_GPI_P3_GRP,
.ngpio = LPC32XX_GPI_P3_MAX,
.names = gpi_p3_names,
.can_sleep = false,
},
.gpio_grp = &gpio_grp_regs_p3,
},
{
.chip = {
.label = "gpo_p3",
.direction_output = lpc32xx_gpio_dir_out_always,
.set = lpc32xx_gpo_set_value,
.get = lpc32xx_gpo_get_value,
.request = lpc32xx_gpio_request,
.base = LPC32XX_GPO_P3_GRP,
.ngpio = LPC32XX_GPO_P3_MAX,
.names = gpo_p3_names,
.can_sleep = false,
},
.gpio_grp = &gpio_grp_regs_p3,
},
};
static int lpc32xx_of_xlate(struct gpio_chip *gc,
const struct of_phandle_args *gpiospec, u32 *flags)
{
/* Is this the correct bank? */
u32 bank = gpiospec->args[0];
if ((bank >= ARRAY_SIZE(lpc32xx_gpiochip) ||
(gc != &lpc32xx_gpiochip[bank].chip)))
return -EINVAL;
if (flags)
*flags = gpiospec->args[2];
return gpiospec->args[1];
}
static int lpc32xx_gpio_probe(struct platform_device *pdev)
{
int i;
void __iomem *reg_base;
reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(reg_base))
return PTR_ERR(reg_base);
for (i = 0; i < ARRAY_SIZE(lpc32xx_gpiochip); i++) {
lpc32xx_gpiochip[i].chip.parent = &pdev->dev;
if (pdev->dev.of_node) {
lpc32xx_gpiochip[i].chip.of_xlate = lpc32xx_of_xlate;
lpc32xx_gpiochip[i].chip.of_gpio_n_cells = 3;
lpc32xx_gpiochip[i].reg_base = reg_base;
}
devm_gpiochip_add_data(&pdev->dev, &lpc32xx_gpiochip[i].chip,
&lpc32xx_gpiochip[i]);
}
return 0;
}
static const struct of_device_id lpc32xx_gpio_of_match[] = {
{ .compatible = "nxp,lpc3220-gpio", },
{ },
};
static struct platform_driver lpc32xx_gpio_driver = {
.driver = {
.name = "lpc32xx-gpio",
.of_match_table = lpc32xx_gpio_of_match,
},
.probe = lpc32xx_gpio_probe,
};
module_platform_driver(lpc32xx_gpio_driver);
MODULE_AUTHOR("Kevin Wells <[email protected]>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("GPIO driver for LPC32xx SoC");
| linux-master | drivers/gpio/gpio-lpc32xx.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/gpio/consumer.h>
#include <linux/gpio/driver.h>
#include <linux/gpio.h>
#include "gpiolib.h"
void gpio_free(unsigned gpio)
{
gpiod_free(gpio_to_desc(gpio));
}
EXPORT_SYMBOL_GPL(gpio_free);
/**
* gpio_request_one - request a single GPIO with initial configuration
* @gpio: the GPIO number
* @flags: GPIO configuration as specified by GPIOF_*
* @label: a literal description string of this GPIO
*/
int gpio_request_one(unsigned gpio, unsigned long flags, const char *label)
{
struct gpio_desc *desc;
int err;
desc = gpio_to_desc(gpio);
/* Compatibility: assume unavailable "valid" GPIOs will appear later */
if (!desc && gpio_is_valid(gpio))
return -EPROBE_DEFER;
err = gpiod_request(desc, label);
if (err)
return err;
if (flags & GPIOF_ACTIVE_LOW)
set_bit(FLAG_ACTIVE_LOW, &desc->flags);
if (flags & GPIOF_DIR_IN)
err = gpiod_direction_input(desc);
else
err = gpiod_direction_output_raw(desc,
(flags & GPIOF_INIT_HIGH) ? 1 : 0);
if (err)
goto free_gpio;
return 0;
free_gpio:
gpiod_free(desc);
return err;
}
EXPORT_SYMBOL_GPL(gpio_request_one);
int gpio_request(unsigned gpio, const char *label)
{
struct gpio_desc *desc = gpio_to_desc(gpio);
/* Compatibility: assume unavailable "valid" GPIOs will appear later */
if (!desc && gpio_is_valid(gpio))
return -EPROBE_DEFER;
return gpiod_request(desc, label);
}
EXPORT_SYMBOL_GPL(gpio_request);
/**
* gpio_request_array - request multiple GPIOs in a single call
* @array: array of the 'struct gpio'
* @num: how many GPIOs in the array
*/
int gpio_request_array(const struct gpio *array, size_t num)
{
int i, err;
for (i = 0; i < num; i++, array++) {
err = gpio_request_one(array->gpio, array->flags, array->label);
if (err)
goto err_free;
}
return 0;
err_free:
while (i--)
gpio_free((--array)->gpio);
return err;
}
EXPORT_SYMBOL_GPL(gpio_request_array);
/**
* gpio_free_array - release multiple GPIOs in a single call
* @array: array of the 'struct gpio'
* @num: how many GPIOs in the array
*/
void gpio_free_array(const struct gpio *array, size_t num)
{
while (num--)
gpio_free((array++)->gpio);
}
EXPORT_SYMBOL_GPL(gpio_free_array);
| linux-master | drivers/gpio/gpiolib-legacy.c |
/* SPDX-License-Identifier: GPL-2.0 */
/*
* devres.c - managed gpio resources
* This file is based on kernel/irq/devres.c
*
* Copyright (c) 2011 John Crispin <[email protected]>
*/
#include <linux/module.h>
#include <linux/err.h>
#include <linux/gpio.h>
#include <linux/gpio/consumer.h>
#include <linux/device.h>
#include <linux/gfp.h>
#include "gpiolib.h"
static void devm_gpiod_release(struct device *dev, void *res)
{
struct gpio_desc **desc = res;
gpiod_put(*desc);
}
static int devm_gpiod_match(struct device *dev, void *res, void *data)
{
struct gpio_desc **this = res, **gpio = data;
return *this == *gpio;
}
static void devm_gpiod_release_array(struct device *dev, void *res)
{
struct gpio_descs **descs = res;
gpiod_put_array(*descs);
}
static int devm_gpiod_match_array(struct device *dev, void *res, void *data)
{
struct gpio_descs **this = res, **gpios = data;
return *this == *gpios;
}
/**
* devm_gpiod_get - Resource-managed gpiod_get()
* @dev: GPIO consumer
* @con_id: function within the GPIO consumer
* @flags: optional GPIO initialization flags
*
* Managed gpiod_get(). GPIO descriptors returned from this function are
* automatically disposed on driver detach. See gpiod_get() for detailed
* information about behavior and return values.
*/
struct gpio_desc *__must_check devm_gpiod_get(struct device *dev,
const char *con_id,
enum gpiod_flags flags)
{
return devm_gpiod_get_index(dev, con_id, 0, flags);
}
EXPORT_SYMBOL_GPL(devm_gpiod_get);
/**
* devm_gpiod_get_optional - Resource-managed gpiod_get_optional()
* @dev: GPIO consumer
* @con_id: function within the GPIO consumer
* @flags: optional GPIO initialization flags
*
* Managed gpiod_get_optional(). GPIO descriptors returned from this function
* are automatically disposed on driver detach. See gpiod_get_optional() for
* detailed information about behavior and return values.
*/
struct gpio_desc *__must_check devm_gpiod_get_optional(struct device *dev,
const char *con_id,
enum gpiod_flags flags)
{
return devm_gpiod_get_index_optional(dev, con_id, 0, flags);
}
EXPORT_SYMBOL_GPL(devm_gpiod_get_optional);
/**
* devm_gpiod_get_index - Resource-managed gpiod_get_index()
* @dev: GPIO consumer
* @con_id: function within the GPIO consumer
* @idx: index of the GPIO to obtain in the consumer
* @flags: optional GPIO initialization flags
*
* Managed gpiod_get_index(). GPIO descriptors returned from this function are
* automatically disposed on driver detach. See gpiod_get_index() for detailed
* information about behavior and return values.
*/
struct gpio_desc *__must_check devm_gpiod_get_index(struct device *dev,
const char *con_id,
unsigned int idx,
enum gpiod_flags flags)
{
struct gpio_desc **dr;
struct gpio_desc *desc;
desc = gpiod_get_index(dev, con_id, idx, flags);
if (IS_ERR(desc))
return desc;
/*
* For non-exclusive GPIO descriptors, check if this descriptor is
* already under resource management by this device.
*/
if (flags & GPIOD_FLAGS_BIT_NONEXCLUSIVE) {
struct devres *dres;
dres = devres_find(dev, devm_gpiod_release,
devm_gpiod_match, &desc);
if (dres)
return desc;
}
dr = devres_alloc(devm_gpiod_release, sizeof(struct gpio_desc *),
GFP_KERNEL);
if (!dr) {
gpiod_put(desc);
return ERR_PTR(-ENOMEM);
}
*dr = desc;
devres_add(dev, dr);
return desc;
}
EXPORT_SYMBOL_GPL(devm_gpiod_get_index);
/**
* devm_fwnode_gpiod_get_index - get a GPIO descriptor from a given node
* @dev: GPIO consumer
* @fwnode: firmware node containing GPIO reference
* @con_id: function within the GPIO consumer
* @index: index of the GPIO to obtain in the consumer
* @flags: GPIO initialization flags
* @label: label to attach to the requested GPIO
*
* GPIO descriptors returned from this function are automatically disposed on
* driver detach.
*
* On successful request the GPIO pin is configured in accordance with
* provided @flags.
*/
struct gpio_desc *devm_fwnode_gpiod_get_index(struct device *dev,
struct fwnode_handle *fwnode,
const char *con_id, int index,
enum gpiod_flags flags,
const char *label)
{
struct gpio_desc **dr;
struct gpio_desc *desc;
dr = devres_alloc(devm_gpiod_release, sizeof(struct gpio_desc *),
GFP_KERNEL);
if (!dr)
return ERR_PTR(-ENOMEM);
desc = fwnode_gpiod_get_index(fwnode, con_id, index, flags, label);
if (IS_ERR(desc)) {
devres_free(dr);
return desc;
}
*dr = desc;
devres_add(dev, dr);
return desc;
}
EXPORT_SYMBOL_GPL(devm_fwnode_gpiod_get_index);
/**
* devm_gpiod_get_index_optional - Resource-managed gpiod_get_index_optional()
* @dev: GPIO consumer
* @con_id: function within the GPIO consumer
* @index: index of the GPIO to obtain in the consumer
* @flags: optional GPIO initialization flags
*
* Managed gpiod_get_index_optional(). GPIO descriptors returned from this
* function are automatically disposed on driver detach. See
* gpiod_get_index_optional() for detailed information about behavior and
* return values.
*/
struct gpio_desc *__must_check devm_gpiod_get_index_optional(struct device *dev,
const char *con_id,
unsigned int index,
enum gpiod_flags flags)
{
struct gpio_desc *desc;
desc = devm_gpiod_get_index(dev, con_id, index, flags);
if (gpiod_not_found(desc))
return NULL;
return desc;
}
EXPORT_SYMBOL_GPL(devm_gpiod_get_index_optional);
/**
* devm_gpiod_get_array - Resource-managed gpiod_get_array()
* @dev: GPIO consumer
* @con_id: function within the GPIO consumer
* @flags: optional GPIO initialization flags
*
* Managed gpiod_get_array(). GPIO descriptors returned from this function are
* automatically disposed on driver detach. See gpiod_get_array() for detailed
* information about behavior and return values.
*/
struct gpio_descs *__must_check devm_gpiod_get_array(struct device *dev,
const char *con_id,
enum gpiod_flags flags)
{
struct gpio_descs **dr;
struct gpio_descs *descs;
dr = devres_alloc(devm_gpiod_release_array,
sizeof(struct gpio_descs *), GFP_KERNEL);
if (!dr)
return ERR_PTR(-ENOMEM);
descs = gpiod_get_array(dev, con_id, flags);
if (IS_ERR(descs)) {
devres_free(dr);
return descs;
}
*dr = descs;
devres_add(dev, dr);
return descs;
}
EXPORT_SYMBOL_GPL(devm_gpiod_get_array);
/**
* devm_gpiod_get_array_optional - Resource-managed gpiod_get_array_optional()
* @dev: GPIO consumer
* @con_id: function within the GPIO consumer
* @flags: optional GPIO initialization flags
*
* Managed gpiod_get_array_optional(). GPIO descriptors returned from this
* function are automatically disposed on driver detach.
* See gpiod_get_array_optional() for detailed information about behavior and
* return values.
*/
struct gpio_descs *__must_check
devm_gpiod_get_array_optional(struct device *dev, const char *con_id,
enum gpiod_flags flags)
{
struct gpio_descs *descs;
descs = devm_gpiod_get_array(dev, con_id, flags);
if (gpiod_not_found(descs))
return NULL;
return descs;
}
EXPORT_SYMBOL_GPL(devm_gpiod_get_array_optional);
/**
* devm_gpiod_put - Resource-managed gpiod_put()
* @dev: GPIO consumer
* @desc: GPIO descriptor to dispose of
*
* Dispose of a GPIO descriptor obtained with devm_gpiod_get() or
* devm_gpiod_get_index(). Normally this function will not be called as the GPIO
* will be disposed of by the resource management code.
*/
void devm_gpiod_put(struct device *dev, struct gpio_desc *desc)
{
WARN_ON(devres_release(dev, devm_gpiod_release, devm_gpiod_match,
&desc));
}
EXPORT_SYMBOL_GPL(devm_gpiod_put);
/**
* devm_gpiod_unhinge - Remove resource management from a gpio descriptor
* @dev: GPIO consumer
* @desc: GPIO descriptor to remove resource management from
*
* Remove resource management from a GPIO descriptor. This is needed when
* you want to hand over lifecycle management of a descriptor to another
* mechanism.
*/
void devm_gpiod_unhinge(struct device *dev, struct gpio_desc *desc)
{
int ret;
if (IS_ERR_OR_NULL(desc))
return;
ret = devres_destroy(dev, devm_gpiod_release,
devm_gpiod_match, &desc);
/*
* If the GPIO descriptor is requested as nonexclusive, we
* may call this function several times on the same descriptor
* so it is OK if devres_destroy() returns -ENOENT.
*/
if (ret == -ENOENT)
return;
/* Anything else we should warn about */
WARN_ON(ret);
}
EXPORT_SYMBOL_GPL(devm_gpiod_unhinge);
/**
* devm_gpiod_put_array - Resource-managed gpiod_put_array()
* @dev: GPIO consumer
* @descs: GPIO descriptor array to dispose of
*
* Dispose of an array of GPIO descriptors obtained with devm_gpiod_get_array().
* Normally this function will not be called as the GPIOs will be disposed of
* by the resource management code.
*/
void devm_gpiod_put_array(struct device *dev, struct gpio_descs *descs)
{
WARN_ON(devres_release(dev, devm_gpiod_release_array,
devm_gpiod_match_array, &descs));
}
EXPORT_SYMBOL_GPL(devm_gpiod_put_array);
static void devm_gpio_release(struct device *dev, void *res)
{
unsigned *gpio = res;
gpio_free(*gpio);
}
/**
* devm_gpio_request - request a GPIO for a managed device
* @dev: device to request the GPIO for
* @gpio: GPIO to allocate
* @label: the name of the requested GPIO
*
* Except for the extra @dev argument, this function takes the
* same arguments and performs the same function as
* gpio_request(). GPIOs requested with this function will be
* automatically freed on driver detach.
*/
int devm_gpio_request(struct device *dev, unsigned gpio, const char *label)
{
unsigned *dr;
int rc;
dr = devres_alloc(devm_gpio_release, sizeof(unsigned), GFP_KERNEL);
if (!dr)
return -ENOMEM;
rc = gpio_request(gpio, label);
if (rc) {
devres_free(dr);
return rc;
}
*dr = gpio;
devres_add(dev, dr);
return 0;
}
EXPORT_SYMBOL_GPL(devm_gpio_request);
/**
* devm_gpio_request_one - request a single GPIO with initial setup
* @dev: device to request for
* @gpio: the GPIO number
* @flags: GPIO configuration as specified by GPIOF_*
* @label: a literal description string of this GPIO
*/
int devm_gpio_request_one(struct device *dev, unsigned gpio,
unsigned long flags, const char *label)
{
unsigned *dr;
int rc;
dr = devres_alloc(devm_gpio_release, sizeof(unsigned), GFP_KERNEL);
if (!dr)
return -ENOMEM;
rc = gpio_request_one(gpio, flags, label);
if (rc) {
devres_free(dr);
return rc;
}
*dr = gpio;
devres_add(dev, dr);
return 0;
}
EXPORT_SYMBOL_GPL(devm_gpio_request_one);
static void devm_gpio_chip_release(void *data)
{
struct gpio_chip *gc = data;
gpiochip_remove(gc);
}
/**
* devm_gpiochip_add_data_with_key() - Resource managed gpiochip_add_data_with_key()
* @dev: pointer to the device that gpio_chip belongs to.
* @gc: the GPIO chip to register
* @data: driver-private data associated with this chip
* @lock_key: lockdep class for IRQ lock
* @request_key: lockdep class for IRQ request
*
* Context: potentially before irqs will work
*
* The gpio chip automatically be released when the device is unbound.
*
* Returns:
* A negative errno if the chip can't be registered, such as because the
* gc->base is invalid or already associated with a different chip.
* Otherwise it returns zero as a success code.
*/
int devm_gpiochip_add_data_with_key(struct device *dev, struct gpio_chip *gc, void *data,
struct lock_class_key *lock_key,
struct lock_class_key *request_key)
{
int ret;
ret = gpiochip_add_data_with_key(gc, data, lock_key, request_key);
if (ret < 0)
return ret;
return devm_add_action_or_reset(dev, devm_gpio_chip_release, gc);
}
EXPORT_SYMBOL_GPL(devm_gpiochip_add_data_with_key);
| linux-master | drivers/gpio/gpiolib-devres.c |
// SPDX-License-Identifier: GPL-2.0
//
// Copyright (C) 2017 Socionext Inc.
// Author: Masahiro Yamada <[email protected]>
#include <linux/bits.h>
#include <linux/gpio/driver.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/spinlock.h>
#include <dt-bindings/gpio/uniphier-gpio.h>
#define UNIPHIER_GPIO_IRQ_MAX_NUM 24
#define UNIPHIER_GPIO_PORT_DATA 0x0 /* data */
#define UNIPHIER_GPIO_PORT_DIR 0x4 /* direction (1:in, 0:out) */
#define UNIPHIER_GPIO_IRQ_EN 0x90 /* irq enable */
#define UNIPHIER_GPIO_IRQ_MODE 0x94 /* irq mode (1: both edge) */
#define UNIPHIER_GPIO_IRQ_FLT_EN 0x98 /* noise filter enable */
#define UNIPHIER_GPIO_IRQ_FLT_CYC 0x9c /* noise filter clock cycle */
struct uniphier_gpio_priv {
struct gpio_chip chip;
struct irq_chip irq_chip;
struct irq_domain *domain;
void __iomem *regs;
spinlock_t lock;
u32 saved_vals[];
};
static unsigned int uniphier_gpio_bank_to_reg(unsigned int bank)
{
unsigned int reg;
reg = (bank + 1) * 8;
/*
* Unfortunately, the GPIO port registers are not contiguous because
* offset 0x90-0x9f is used for IRQ. Add 0x10 when crossing the region.
*/
if (reg >= UNIPHIER_GPIO_IRQ_EN)
reg += 0x10;
return reg;
}
static void uniphier_gpio_get_bank_and_mask(unsigned int offset,
unsigned int *bank, u32 *mask)
{
*bank = offset / UNIPHIER_GPIO_LINES_PER_BANK;
*mask = BIT(offset % UNIPHIER_GPIO_LINES_PER_BANK);
}
static void uniphier_gpio_reg_update(struct uniphier_gpio_priv *priv,
unsigned int reg, u32 mask, u32 val)
{
unsigned long flags;
u32 tmp;
spin_lock_irqsave(&priv->lock, flags);
tmp = readl(priv->regs + reg);
tmp &= ~mask;
tmp |= mask & val;
writel(tmp, priv->regs + reg);
spin_unlock_irqrestore(&priv->lock, flags);
}
static void uniphier_gpio_bank_write(struct gpio_chip *chip, unsigned int bank,
unsigned int reg, u32 mask, u32 val)
{
struct uniphier_gpio_priv *priv = gpiochip_get_data(chip);
if (!mask)
return;
uniphier_gpio_reg_update(priv, uniphier_gpio_bank_to_reg(bank) + reg,
mask, val);
}
static void uniphier_gpio_offset_write(struct gpio_chip *chip,
unsigned int offset, unsigned int reg,
int val)
{
unsigned int bank;
u32 mask;
uniphier_gpio_get_bank_and_mask(offset, &bank, &mask);
uniphier_gpio_bank_write(chip, bank, reg, mask, val ? mask : 0);
}
static int uniphier_gpio_offset_read(struct gpio_chip *chip,
unsigned int offset, unsigned int reg)
{
struct uniphier_gpio_priv *priv = gpiochip_get_data(chip);
unsigned int bank, reg_offset;
u32 mask;
uniphier_gpio_get_bank_and_mask(offset, &bank, &mask);
reg_offset = uniphier_gpio_bank_to_reg(bank) + reg;
return !!(readl(priv->regs + reg_offset) & mask);
}
static int uniphier_gpio_get_direction(struct gpio_chip *chip,
unsigned int offset)
{
if (uniphier_gpio_offset_read(chip, offset, UNIPHIER_GPIO_PORT_DIR))
return GPIO_LINE_DIRECTION_IN;
return GPIO_LINE_DIRECTION_OUT;
}
static int uniphier_gpio_direction_input(struct gpio_chip *chip,
unsigned int offset)
{
uniphier_gpio_offset_write(chip, offset, UNIPHIER_GPIO_PORT_DIR, 1);
return 0;
}
static int uniphier_gpio_direction_output(struct gpio_chip *chip,
unsigned int offset, int val)
{
uniphier_gpio_offset_write(chip, offset, UNIPHIER_GPIO_PORT_DATA, val);
uniphier_gpio_offset_write(chip, offset, UNIPHIER_GPIO_PORT_DIR, 0);
return 0;
}
static int uniphier_gpio_get(struct gpio_chip *chip, unsigned int offset)
{
return uniphier_gpio_offset_read(chip, offset, UNIPHIER_GPIO_PORT_DATA);
}
static void uniphier_gpio_set(struct gpio_chip *chip,
unsigned int offset, int val)
{
uniphier_gpio_offset_write(chip, offset, UNIPHIER_GPIO_PORT_DATA, val);
}
static void uniphier_gpio_set_multiple(struct gpio_chip *chip,
unsigned long *mask, unsigned long *bits)
{
unsigned long i, bank, bank_mask, bank_bits;
for_each_set_clump8(i, bank_mask, mask, chip->ngpio) {
bank = i / UNIPHIER_GPIO_LINES_PER_BANK;
bank_bits = bitmap_get_value8(bits, i);
uniphier_gpio_bank_write(chip, bank, UNIPHIER_GPIO_PORT_DATA,
bank_mask, bank_bits);
}
}
static int uniphier_gpio_to_irq(struct gpio_chip *chip, unsigned int offset)
{
struct irq_fwspec fwspec;
if (offset < UNIPHIER_GPIO_IRQ_OFFSET)
return -ENXIO;
fwspec.fwnode = of_node_to_fwnode(chip->parent->of_node);
fwspec.param_count = 2;
fwspec.param[0] = offset - UNIPHIER_GPIO_IRQ_OFFSET;
/*
* IRQ_TYPE_NONE is rejected by the parent irq domain. Set LEVEL_HIGH
* temporarily. Anyway, ->irq_set_type() will override it later.
*/
fwspec.param[1] = IRQ_TYPE_LEVEL_HIGH;
return irq_create_fwspec_mapping(&fwspec);
}
static void uniphier_gpio_irq_mask(struct irq_data *data)
{
struct uniphier_gpio_priv *priv = irq_data_get_irq_chip_data(data);
u32 mask = BIT(irqd_to_hwirq(data));
uniphier_gpio_reg_update(priv, UNIPHIER_GPIO_IRQ_EN, mask, 0);
irq_chip_mask_parent(data);
}
static void uniphier_gpio_irq_unmask(struct irq_data *data)
{
struct uniphier_gpio_priv *priv = irq_data_get_irq_chip_data(data);
u32 mask = BIT(irqd_to_hwirq(data));
uniphier_gpio_reg_update(priv, UNIPHIER_GPIO_IRQ_EN, mask, mask);
irq_chip_unmask_parent(data);
}
static int uniphier_gpio_irq_set_type(struct irq_data *data, unsigned int type)
{
struct uniphier_gpio_priv *priv = irq_data_get_irq_chip_data(data);
u32 mask = BIT(irqd_to_hwirq(data));
u32 val = 0;
if (type == IRQ_TYPE_EDGE_BOTH) {
val = mask;
type = IRQ_TYPE_EDGE_FALLING;
}
uniphier_gpio_reg_update(priv, UNIPHIER_GPIO_IRQ_MODE, mask, val);
/* To enable both edge detection, the noise filter must be enabled. */
uniphier_gpio_reg_update(priv, UNIPHIER_GPIO_IRQ_FLT_EN, mask, val);
return irq_chip_set_type_parent(data, type);
}
static int uniphier_gpio_irq_get_parent_hwirq(struct uniphier_gpio_priv *priv,
unsigned int hwirq)
{
struct device_node *np = priv->chip.parent->of_node;
const __be32 *range;
u32 base, parent_base, size;
int len;
range = of_get_property(np, "socionext,interrupt-ranges", &len);
if (!range)
return -EINVAL;
len /= sizeof(*range);
for (; len >= 3; len -= 3) {
base = be32_to_cpu(*range++);
parent_base = be32_to_cpu(*range++);
size = be32_to_cpu(*range++);
if (base <= hwirq && hwirq < base + size)
return hwirq - base + parent_base;
}
return -ENOENT;
}
static int uniphier_gpio_irq_domain_translate(struct irq_domain *domain,
struct irq_fwspec *fwspec,
unsigned long *out_hwirq,
unsigned int *out_type)
{
if (WARN_ON(fwspec->param_count < 2))
return -EINVAL;
*out_hwirq = fwspec->param[0];
*out_type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK;
return 0;
}
static int uniphier_gpio_irq_domain_alloc(struct irq_domain *domain,
unsigned int virq,
unsigned int nr_irqs, void *arg)
{
struct uniphier_gpio_priv *priv = domain->host_data;
struct irq_fwspec parent_fwspec;
irq_hw_number_t hwirq;
unsigned int type;
int ret;
if (WARN_ON(nr_irqs != 1))
return -EINVAL;
ret = uniphier_gpio_irq_domain_translate(domain, arg, &hwirq, &type);
if (ret)
return ret;
ret = uniphier_gpio_irq_get_parent_hwirq(priv, hwirq);
if (ret < 0)
return ret;
/* parent is UniPhier AIDET */
parent_fwspec.fwnode = domain->parent->fwnode;
parent_fwspec.param_count = 2;
parent_fwspec.param[0] = ret;
parent_fwspec.param[1] = (type == IRQ_TYPE_EDGE_BOTH) ?
IRQ_TYPE_EDGE_FALLING : type;
ret = irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
&priv->irq_chip, priv);
if (ret)
return ret;
return irq_domain_alloc_irqs_parent(domain, virq, 1, &parent_fwspec);
}
static int uniphier_gpio_irq_domain_activate(struct irq_domain *domain,
struct irq_data *data, bool early)
{
struct uniphier_gpio_priv *priv = domain->host_data;
struct gpio_chip *chip = &priv->chip;
return gpiochip_lock_as_irq(chip,
irqd_to_hwirq(data) + UNIPHIER_GPIO_IRQ_OFFSET);
}
static void uniphier_gpio_irq_domain_deactivate(struct irq_domain *domain,
struct irq_data *data)
{
struct uniphier_gpio_priv *priv = domain->host_data;
struct gpio_chip *chip = &priv->chip;
gpiochip_unlock_as_irq(chip,
irqd_to_hwirq(data) + UNIPHIER_GPIO_IRQ_OFFSET);
}
static const struct irq_domain_ops uniphier_gpio_irq_domain_ops = {
.alloc = uniphier_gpio_irq_domain_alloc,
.free = irq_domain_free_irqs_common,
.activate = uniphier_gpio_irq_domain_activate,
.deactivate = uniphier_gpio_irq_domain_deactivate,
.translate = uniphier_gpio_irq_domain_translate,
};
static void uniphier_gpio_hw_init(struct uniphier_gpio_priv *priv)
{
/*
* Due to the hardware design, the noise filter must be enabled to
* detect both edge interrupts. This filter is intended to remove the
* noise from the irq lines. It does not work for GPIO input, so GPIO
* debounce is not supported. Unfortunately, the filter period is
* shared among all irq lines. Just choose a sensible period here.
*/
writel(0xff, priv->regs + UNIPHIER_GPIO_IRQ_FLT_CYC);
}
static unsigned int uniphier_gpio_get_nbanks(unsigned int ngpio)
{
return DIV_ROUND_UP(ngpio, UNIPHIER_GPIO_LINES_PER_BANK);
}
static int uniphier_gpio_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *parent_np;
struct irq_domain *parent_domain;
struct uniphier_gpio_priv *priv;
struct gpio_chip *chip;
struct irq_chip *irq_chip;
unsigned int nregs;
u32 ngpios;
int ret;
parent_np = of_irq_find_parent(dev->of_node);
if (!parent_np)
return -ENXIO;
parent_domain = irq_find_host(parent_np);
of_node_put(parent_np);
if (!parent_domain)
return -EPROBE_DEFER;
ret = of_property_read_u32(dev->of_node, "ngpios", &ngpios);
if (ret)
return ret;
nregs = uniphier_gpio_get_nbanks(ngpios) * 2 + 3;
priv = devm_kzalloc(dev, struct_size(priv, saved_vals, nregs),
GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->regs))
return PTR_ERR(priv->regs);
spin_lock_init(&priv->lock);
chip = &priv->chip;
chip->label = dev_name(dev);
chip->parent = dev;
chip->request = gpiochip_generic_request;
chip->free = gpiochip_generic_free;
chip->get_direction = uniphier_gpio_get_direction;
chip->direction_input = uniphier_gpio_direction_input;
chip->direction_output = uniphier_gpio_direction_output;
chip->get = uniphier_gpio_get;
chip->set = uniphier_gpio_set;
chip->set_multiple = uniphier_gpio_set_multiple;
chip->to_irq = uniphier_gpio_to_irq;
chip->base = -1;
chip->ngpio = ngpios;
irq_chip = &priv->irq_chip;
irq_chip->name = dev_name(dev);
irq_chip->irq_mask = uniphier_gpio_irq_mask;
irq_chip->irq_unmask = uniphier_gpio_irq_unmask;
irq_chip->irq_eoi = irq_chip_eoi_parent;
irq_chip->irq_set_affinity = irq_chip_set_affinity_parent;
irq_chip->irq_set_type = uniphier_gpio_irq_set_type;
uniphier_gpio_hw_init(priv);
ret = devm_gpiochip_add_data(dev, chip, priv);
if (ret)
return ret;
priv->domain = irq_domain_create_hierarchy(
parent_domain, 0,
UNIPHIER_GPIO_IRQ_MAX_NUM,
of_node_to_fwnode(dev->of_node),
&uniphier_gpio_irq_domain_ops, priv);
if (!priv->domain)
return -ENOMEM;
platform_set_drvdata(pdev, priv);
return 0;
}
static int uniphier_gpio_remove(struct platform_device *pdev)
{
struct uniphier_gpio_priv *priv = platform_get_drvdata(pdev);
irq_domain_remove(priv->domain);
return 0;
}
static int __maybe_unused uniphier_gpio_suspend(struct device *dev)
{
struct uniphier_gpio_priv *priv = dev_get_drvdata(dev);
unsigned int nbanks = uniphier_gpio_get_nbanks(priv->chip.ngpio);
u32 *val = priv->saved_vals;
unsigned int reg;
int i;
for (i = 0; i < nbanks; i++) {
reg = uniphier_gpio_bank_to_reg(i);
*val++ = readl(priv->regs + reg + UNIPHIER_GPIO_PORT_DATA);
*val++ = readl(priv->regs + reg + UNIPHIER_GPIO_PORT_DIR);
}
*val++ = readl(priv->regs + UNIPHIER_GPIO_IRQ_EN);
*val++ = readl(priv->regs + UNIPHIER_GPIO_IRQ_MODE);
*val++ = readl(priv->regs + UNIPHIER_GPIO_IRQ_FLT_EN);
return 0;
}
static int __maybe_unused uniphier_gpio_resume(struct device *dev)
{
struct uniphier_gpio_priv *priv = dev_get_drvdata(dev);
unsigned int nbanks = uniphier_gpio_get_nbanks(priv->chip.ngpio);
const u32 *val = priv->saved_vals;
unsigned int reg;
int i;
for (i = 0; i < nbanks; i++) {
reg = uniphier_gpio_bank_to_reg(i);
writel(*val++, priv->regs + reg + UNIPHIER_GPIO_PORT_DATA);
writel(*val++, priv->regs + reg + UNIPHIER_GPIO_PORT_DIR);
}
writel(*val++, priv->regs + UNIPHIER_GPIO_IRQ_EN);
writel(*val++, priv->regs + UNIPHIER_GPIO_IRQ_MODE);
writel(*val++, priv->regs + UNIPHIER_GPIO_IRQ_FLT_EN);
uniphier_gpio_hw_init(priv);
return 0;
}
static const struct dev_pm_ops uniphier_gpio_pm_ops = {
SET_LATE_SYSTEM_SLEEP_PM_OPS(uniphier_gpio_suspend,
uniphier_gpio_resume)
};
static const struct of_device_id uniphier_gpio_match[] = {
{ .compatible = "socionext,uniphier-gpio" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, uniphier_gpio_match);
static struct platform_driver uniphier_gpio_driver = {
.probe = uniphier_gpio_probe,
.remove = uniphier_gpio_remove,
.driver = {
.name = "uniphier-gpio",
.of_match_table = uniphier_gpio_match,
.pm = &uniphier_gpio_pm_ops,
},
};
module_platform_driver(uniphier_gpio_driver);
MODULE_AUTHOR("Masahiro Yamada <[email protected]>");
MODULE_DESCRIPTION("UniPhier GPIO driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/gpio/gpio-uniphier.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Digital I/O driver for Technologic Systems TS-5500
*
* Copyright (c) 2012 Savoir-faire Linux Inc.
* Vivien Didelot <[email protected]>
*
* Technologic Systems platforms have pin blocks, exposing several Digital
* Input/Output lines (DIO). This driver aims to support single pin blocks.
* In that sense, the support is not limited to the TS-5500 blocks.
* Actually, the following platforms have DIO support:
*
* TS-5500:
* Documentation: https://docs.embeddedts.com/TS-5500
* Blocks: DIO1, DIO2 and LCD port.
*
* TS-5600:
* Documentation: https://docs.embeddedts.com/TS-5600
* Blocks: LCD port (identical to TS-5500 LCD).
*/
#include <linux/bitops.h>
#include <linux/gpio/driver.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
/* List of supported Technologic Systems platforms DIO blocks */
enum ts5500_blocks { TS5500_DIO1, TS5500_DIO2, TS5500_LCD, TS5600_LCD };
struct ts5500_priv {
const struct ts5500_dio *pinout;
struct gpio_chip gpio_chip;
spinlock_t lock;
bool strap;
u8 hwirq;
};
/*
* Hex 7D is used to control several blocks (e.g. DIO2 and LCD port).
* This flag ensures that the region has been requested by this driver.
*/
static bool hex7d_reserved;
/*
* This structure is used to describe capabilities of DIO lines,
* such as available directions and connected interrupt (if any).
*/
struct ts5500_dio {
const u8 value_addr;
const u8 value_mask;
const u8 control_addr;
const u8 control_mask;
const bool no_input;
const bool no_output;
const u8 irq;
};
#define TS5500_DIO_IN_OUT(vaddr, vbit, caddr, cbit) \
{ \
.value_addr = vaddr, \
.value_mask = BIT(vbit), \
.control_addr = caddr, \
.control_mask = BIT(cbit), \
}
#define TS5500_DIO_IN(addr, bit) \
{ \
.value_addr = addr, \
.value_mask = BIT(bit), \
.no_output = true, \
}
#define TS5500_DIO_IN_IRQ(addr, bit, _irq) \
{ \
.value_addr = addr, \
.value_mask = BIT(bit), \
.no_output = true, \
.irq = _irq, \
}
#define TS5500_DIO_OUT(addr, bit) \
{ \
.value_addr = addr, \
.value_mask = BIT(bit), \
.no_input = true, \
}
/*
* Input/Output DIO lines are programmed in groups of 4. Their values are
* available through 4 consecutive bits in a value port, whereas the direction
* of these 4 lines is driven by only 1 bit in a control port.
*/
#define TS5500_DIO_GROUP(vaddr, vbitfrom, caddr, cbit) \
TS5500_DIO_IN_OUT(vaddr, vbitfrom + 0, caddr, cbit), \
TS5500_DIO_IN_OUT(vaddr, vbitfrom + 1, caddr, cbit), \
TS5500_DIO_IN_OUT(vaddr, vbitfrom + 2, caddr, cbit), \
TS5500_DIO_IN_OUT(vaddr, vbitfrom + 3, caddr, cbit)
/*
* TS-5500 DIO1 block
*
* value control dir hw
* addr bit addr bit in out irq name pin offset
*
* 0x7b 0 0x7a 0 x x DIO1_0 1 0
* 0x7b 1 0x7a 0 x x DIO1_1 3 1
* 0x7b 2 0x7a 0 x x DIO1_2 5 2
* 0x7b 3 0x7a 0 x x DIO1_3 7 3
* 0x7b 4 0x7a 1 x x DIO1_4 9 4
* 0x7b 5 0x7a 1 x x DIO1_5 11 5
* 0x7b 6 0x7a 1 x x DIO1_6 13 6
* 0x7b 7 0x7a 1 x x DIO1_7 15 7
* 0x7c 0 0x7a 5 x x DIO1_8 4 8
* 0x7c 1 0x7a 5 x x DIO1_9 6 9
* 0x7c 2 0x7a 5 x x DIO1_10 8 10
* 0x7c 3 0x7a 5 x x DIO1_11 10 11
* 0x7c 4 x DIO1_12 12 12
* 0x7c 5 x 7 DIO1_13 14 13
*/
static const struct ts5500_dio ts5500_dio1[] = {
TS5500_DIO_GROUP(0x7b, 0, 0x7a, 0),
TS5500_DIO_GROUP(0x7b, 4, 0x7a, 1),
TS5500_DIO_GROUP(0x7c, 0, 0x7a, 5),
TS5500_DIO_IN(0x7c, 4),
TS5500_DIO_IN_IRQ(0x7c, 5, 7),
};
/*
* TS-5500 DIO2 block
*
* value control dir hw
* addr bit addr bit in out irq name pin offset
*
* 0x7e 0 0x7d 0 x x DIO2_0 1 0
* 0x7e 1 0x7d 0 x x DIO2_1 3 1
* 0x7e 2 0x7d 0 x x DIO2_2 5 2
* 0x7e 3 0x7d 0 x x DIO2_3 7 3
* 0x7e 4 0x7d 1 x x DIO2_4 9 4
* 0x7e 5 0x7d 1 x x DIO2_5 11 5
* 0x7e 6 0x7d 1 x x DIO2_6 13 6
* 0x7e 7 0x7d 1 x x DIO2_7 15 7
* 0x7f 0 0x7d 5 x x DIO2_8 4 8
* 0x7f 1 0x7d 5 x x DIO2_9 6 9
* 0x7f 2 0x7d 5 x x DIO2_10 8 10
* 0x7f 3 0x7d 5 x x DIO2_11 10 11
* 0x7f 4 x 6 DIO2_13 14 12
*/
static const struct ts5500_dio ts5500_dio2[] = {
TS5500_DIO_GROUP(0x7e, 0, 0x7d, 0),
TS5500_DIO_GROUP(0x7e, 4, 0x7d, 1),
TS5500_DIO_GROUP(0x7f, 0, 0x7d, 5),
TS5500_DIO_IN_IRQ(0x7f, 4, 6),
};
/*
* TS-5500 LCD port used as DIO block
* TS-5600 LCD port is identical
*
* value control dir hw
* addr bit addr bit in out irq name pin offset
*
* 0x72 0 0x7d 2 x x LCD_0 8 0
* 0x72 1 0x7d 2 x x LCD_1 7 1
* 0x72 2 0x7d 2 x x LCD_2 10 2
* 0x72 3 0x7d 2 x x LCD_3 9 3
* 0x72 4 0x7d 3 x x LCD_4 12 4
* 0x72 5 0x7d 3 x x LCD_5 11 5
* 0x72 6 0x7d 3 x x LCD_6 14 6
* 0x72 7 0x7d 3 x x LCD_7 13 7
* 0x73 0 x LCD_EN 5 8
* 0x73 6 x LCD_WR 6 9
* 0x73 7 x 1 LCD_RS 3 10
*/
static const struct ts5500_dio ts5500_lcd[] = {
TS5500_DIO_GROUP(0x72, 0, 0x7d, 2),
TS5500_DIO_GROUP(0x72, 4, 0x7d, 3),
TS5500_DIO_OUT(0x73, 0),
TS5500_DIO_IN(0x73, 6),
TS5500_DIO_IN_IRQ(0x73, 7, 1),
};
static inline void ts5500_set_mask(u8 mask, u8 addr)
{
u8 val = inb(addr);
val |= mask;
outb(val, addr);
}
static inline void ts5500_clear_mask(u8 mask, u8 addr)
{
u8 val = inb(addr);
val &= ~mask;
outb(val, addr);
}
static int ts5500_gpio_input(struct gpio_chip *chip, unsigned offset)
{
struct ts5500_priv *priv = gpiochip_get_data(chip);
const struct ts5500_dio line = priv->pinout[offset];
unsigned long flags;
if (line.no_input)
return -ENXIO;
if (line.no_output)
return 0;
spin_lock_irqsave(&priv->lock, flags);
ts5500_clear_mask(line.control_mask, line.control_addr);
spin_unlock_irqrestore(&priv->lock, flags);
return 0;
}
static int ts5500_gpio_get(struct gpio_chip *chip, unsigned offset)
{
struct ts5500_priv *priv = gpiochip_get_data(chip);
const struct ts5500_dio line = priv->pinout[offset];
return !!(inb(line.value_addr) & line.value_mask);
}
static int ts5500_gpio_output(struct gpio_chip *chip, unsigned offset, int val)
{
struct ts5500_priv *priv = gpiochip_get_data(chip);
const struct ts5500_dio line = priv->pinout[offset];
unsigned long flags;
if (line.no_output)
return -ENXIO;
spin_lock_irqsave(&priv->lock, flags);
if (!line.no_input)
ts5500_set_mask(line.control_mask, line.control_addr);
if (val)
ts5500_set_mask(line.value_mask, line.value_addr);
else
ts5500_clear_mask(line.value_mask, line.value_addr);
spin_unlock_irqrestore(&priv->lock, flags);
return 0;
}
static void ts5500_gpio_set(struct gpio_chip *chip, unsigned offset, int val)
{
struct ts5500_priv *priv = gpiochip_get_data(chip);
const struct ts5500_dio line = priv->pinout[offset];
unsigned long flags;
spin_lock_irqsave(&priv->lock, flags);
if (val)
ts5500_set_mask(line.value_mask, line.value_addr);
else
ts5500_clear_mask(line.value_mask, line.value_addr);
spin_unlock_irqrestore(&priv->lock, flags);
}
static int ts5500_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
{
struct ts5500_priv *priv = gpiochip_get_data(chip);
const struct ts5500_dio *block = priv->pinout;
const struct ts5500_dio line = block[offset];
/* Only one pin is connected to an interrupt */
if (line.irq)
return line.irq;
/* As this pin is input-only, we may strap it to another in/out pin */
if (priv->strap)
return priv->hwirq;
return -ENXIO;
}
static int ts5500_enable_irq(struct ts5500_priv *priv)
{
int ret = 0;
unsigned long flags;
spin_lock_irqsave(&priv->lock, flags);
if (priv->hwirq == 7)
ts5500_set_mask(BIT(7), 0x7a); /* DIO1_13 on IRQ7 */
else if (priv->hwirq == 6)
ts5500_set_mask(BIT(7), 0x7d); /* DIO2_13 on IRQ6 */
else if (priv->hwirq == 1)
ts5500_set_mask(BIT(6), 0x7d); /* LCD_RS on IRQ1 */
else
ret = -EINVAL;
spin_unlock_irqrestore(&priv->lock, flags);
return ret;
}
static void ts5500_disable_irq(struct ts5500_priv *priv)
{
unsigned long flags;
spin_lock_irqsave(&priv->lock, flags);
if (priv->hwirq == 7)
ts5500_clear_mask(BIT(7), 0x7a); /* DIO1_13 on IRQ7 */
else if (priv->hwirq == 6)
ts5500_clear_mask(BIT(7), 0x7d); /* DIO2_13 on IRQ6 */
else if (priv->hwirq == 1)
ts5500_clear_mask(BIT(6), 0x7d); /* LCD_RS on IRQ1 */
else
dev_err(priv->gpio_chip.parent, "invalid hwirq %d\n",
priv->hwirq);
spin_unlock_irqrestore(&priv->lock, flags);
}
static int ts5500_dio_probe(struct platform_device *pdev)
{
enum ts5500_blocks block = platform_get_device_id(pdev)->driver_data;
struct device *dev = &pdev->dev;
const char *name = dev_name(dev);
struct ts5500_priv *priv;
unsigned long flags;
int ret;
ret = platform_get_irq(pdev, 0);
if (ret < 0)
return ret;
priv = devm_kzalloc(dev, sizeof(struct ts5500_priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
platform_set_drvdata(pdev, priv);
priv->hwirq = ret;
spin_lock_init(&priv->lock);
priv->gpio_chip.owner = THIS_MODULE;
priv->gpio_chip.label = name;
priv->gpio_chip.parent = dev;
priv->gpio_chip.direction_input = ts5500_gpio_input;
priv->gpio_chip.direction_output = ts5500_gpio_output;
priv->gpio_chip.get = ts5500_gpio_get;
priv->gpio_chip.set = ts5500_gpio_set;
priv->gpio_chip.to_irq = ts5500_gpio_to_irq;
priv->gpio_chip.base = -1;
switch (block) {
case TS5500_DIO1:
priv->pinout = ts5500_dio1;
priv->gpio_chip.ngpio = ARRAY_SIZE(ts5500_dio1);
if (!devm_request_region(dev, 0x7a, 3, name)) {
dev_err(dev, "failed to request %s ports\n", name);
return -EBUSY;
}
break;
case TS5500_DIO2:
priv->pinout = ts5500_dio2;
priv->gpio_chip.ngpio = ARRAY_SIZE(ts5500_dio2);
if (!devm_request_region(dev, 0x7e, 2, name)) {
dev_err(dev, "failed to request %s ports\n", name);
return -EBUSY;
}
if (hex7d_reserved)
break;
if (!devm_request_region(dev, 0x7d, 1, name)) {
dev_err(dev, "failed to request %s 7D\n", name);
return -EBUSY;
}
hex7d_reserved = true;
break;
case TS5500_LCD:
case TS5600_LCD:
priv->pinout = ts5500_lcd;
priv->gpio_chip.ngpio = ARRAY_SIZE(ts5500_lcd);
if (!devm_request_region(dev, 0x72, 2, name)) {
dev_err(dev, "failed to request %s ports\n", name);
return -EBUSY;
}
if (!hex7d_reserved) {
if (!devm_request_region(dev, 0x7d, 1, name)) {
dev_err(dev, "failed to request %s 7D\n", name);
return -EBUSY;
}
hex7d_reserved = true;
}
/* Ensure usage of LCD port as DIO */
spin_lock_irqsave(&priv->lock, flags);
ts5500_clear_mask(BIT(4), 0x7d);
spin_unlock_irqrestore(&priv->lock, flags);
break;
}
ret = devm_gpiochip_add_data(dev, &priv->gpio_chip, priv);
if (ret) {
dev_err(dev, "failed to register the gpio chip\n");
return ret;
}
ret = ts5500_enable_irq(priv);
if (ret) {
dev_err(dev, "invalid interrupt %d\n", priv->hwirq);
return ret;
}
return 0;
}
static int ts5500_dio_remove(struct platform_device *pdev)
{
struct ts5500_priv *priv = platform_get_drvdata(pdev);
ts5500_disable_irq(priv);
return 0;
}
static const struct platform_device_id ts5500_dio_ids[] = {
{ "ts5500-dio1", TS5500_DIO1 },
{ "ts5500-dio2", TS5500_DIO2 },
{ "ts5500-dio-lcd", TS5500_LCD },
{ "ts5600-dio-lcd", TS5600_LCD },
{ }
};
MODULE_DEVICE_TABLE(platform, ts5500_dio_ids);
static struct platform_driver ts5500_dio_driver = {
.driver = {
.name = "ts5500-dio",
},
.probe = ts5500_dio_probe,
.remove = ts5500_dio_remove,
.id_table = ts5500_dio_ids,
};
module_platform_driver(ts5500_dio_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Savoir-faire Linux Inc. <[email protected]>");
MODULE_DESCRIPTION("Technologic Systems TS-5500 Digital I/O driver");
| linux-master | drivers/gpio/gpio-ts5500.c |
// SPDX-License-Identifier: GPL-2.0-only
//
// GPIO Aggregator
//
// Copyright (C) 2019-2020 Glider bv
#define DRV_NAME "gpio-aggregator"
#define pr_fmt(fmt) DRV_NAME ": " fmt
#include <linux/bitmap.h>
#include <linux/bitops.h>
#include <linux/ctype.h>
#include <linux/delay.h>
#include <linux/idr.h>
#include <linux/kernel.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/overflow.h>
#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/gpio/consumer.h>
#include <linux/gpio/driver.h>
#include <linux/gpio/machine.h>
#define AGGREGATOR_MAX_GPIOS 512
/*
* GPIO Aggregator sysfs interface
*/
struct gpio_aggregator {
struct gpiod_lookup_table *lookups;
struct platform_device *pdev;
char args[];
};
static DEFINE_MUTEX(gpio_aggregator_lock); /* protects idr */
static DEFINE_IDR(gpio_aggregator_idr);
static int aggr_add_gpio(struct gpio_aggregator *aggr, const char *key,
int hwnum, unsigned int *n)
{
struct gpiod_lookup_table *lookups;
lookups = krealloc(aggr->lookups, struct_size(lookups, table, *n + 2),
GFP_KERNEL);
if (!lookups)
return -ENOMEM;
lookups->table[*n] = GPIO_LOOKUP_IDX(key, hwnum, NULL, *n, 0);
(*n)++;
memset(&lookups->table[*n], 0, sizeof(lookups->table[*n]));
aggr->lookups = lookups;
return 0;
}
static int aggr_parse(struct gpio_aggregator *aggr)
{
char *args = skip_spaces(aggr->args);
char *name, *offsets, *p;
unsigned long *bitmap;
unsigned int i, n = 0;
int error = 0;
bitmap = bitmap_alloc(AGGREGATOR_MAX_GPIOS, GFP_KERNEL);
if (!bitmap)
return -ENOMEM;
args = next_arg(args, &name, &p);
while (*args) {
args = next_arg(args, &offsets, &p);
p = get_options(offsets, 0, &error);
if (error == 0 || *p) {
/* Named GPIO line */
error = aggr_add_gpio(aggr, name, U16_MAX, &n);
if (error)
goto free_bitmap;
name = offsets;
continue;
}
/* GPIO chip + offset(s) */
error = bitmap_parselist(offsets, bitmap, AGGREGATOR_MAX_GPIOS);
if (error) {
pr_err("Cannot parse %s: %d\n", offsets, error);
goto free_bitmap;
}
for_each_set_bit(i, bitmap, AGGREGATOR_MAX_GPIOS) {
error = aggr_add_gpio(aggr, name, i, &n);
if (error)
goto free_bitmap;
}
args = next_arg(args, &name, &p);
}
if (!n) {
pr_err("No GPIOs specified\n");
error = -EINVAL;
}
free_bitmap:
bitmap_free(bitmap);
return error;
}
static ssize_t new_device_store(struct device_driver *driver, const char *buf,
size_t count)
{
struct gpio_aggregator *aggr;
struct platform_device *pdev;
int res, id;
/* kernfs guarantees string termination, so count + 1 is safe */
aggr = kzalloc(sizeof(*aggr) + count + 1, GFP_KERNEL);
if (!aggr)
return -ENOMEM;
memcpy(aggr->args, buf, count + 1);
aggr->lookups = kzalloc(struct_size(aggr->lookups, table, 1),
GFP_KERNEL);
if (!aggr->lookups) {
res = -ENOMEM;
goto free_ga;
}
mutex_lock(&gpio_aggregator_lock);
id = idr_alloc(&gpio_aggregator_idr, aggr, 0, 0, GFP_KERNEL);
mutex_unlock(&gpio_aggregator_lock);
if (id < 0) {
res = id;
goto free_table;
}
aggr->lookups->dev_id = kasprintf(GFP_KERNEL, "%s.%d", DRV_NAME, id);
if (!aggr->lookups->dev_id) {
res = -ENOMEM;
goto remove_idr;
}
res = aggr_parse(aggr);
if (res)
goto free_dev_id;
gpiod_add_lookup_table(aggr->lookups);
pdev = platform_device_register_simple(DRV_NAME, id, NULL, 0);
if (IS_ERR(pdev)) {
res = PTR_ERR(pdev);
goto remove_table;
}
aggr->pdev = pdev;
return count;
remove_table:
gpiod_remove_lookup_table(aggr->lookups);
free_dev_id:
kfree(aggr->lookups->dev_id);
remove_idr:
mutex_lock(&gpio_aggregator_lock);
idr_remove(&gpio_aggregator_idr, id);
mutex_unlock(&gpio_aggregator_lock);
free_table:
kfree(aggr->lookups);
free_ga:
kfree(aggr);
return res;
}
static DRIVER_ATTR_WO(new_device);
static void gpio_aggregator_free(struct gpio_aggregator *aggr)
{
platform_device_unregister(aggr->pdev);
gpiod_remove_lookup_table(aggr->lookups);
kfree(aggr->lookups->dev_id);
kfree(aggr->lookups);
kfree(aggr);
}
static ssize_t delete_device_store(struct device_driver *driver,
const char *buf, size_t count)
{
struct gpio_aggregator *aggr;
unsigned int id;
int error;
if (!str_has_prefix(buf, DRV_NAME "."))
return -EINVAL;
error = kstrtouint(buf + strlen(DRV_NAME "."), 10, &id);
if (error)
return error;
mutex_lock(&gpio_aggregator_lock);
aggr = idr_remove(&gpio_aggregator_idr, id);
mutex_unlock(&gpio_aggregator_lock);
if (!aggr)
return -ENOENT;
gpio_aggregator_free(aggr);
return count;
}
static DRIVER_ATTR_WO(delete_device);
static struct attribute *gpio_aggregator_attrs[] = {
&driver_attr_new_device.attr,
&driver_attr_delete_device.attr,
NULL
};
ATTRIBUTE_GROUPS(gpio_aggregator);
static int __exit gpio_aggregator_idr_remove(int id, void *p, void *data)
{
gpio_aggregator_free(p);
return 0;
}
static void __exit gpio_aggregator_remove_all(void)
{
mutex_lock(&gpio_aggregator_lock);
idr_for_each(&gpio_aggregator_idr, gpio_aggregator_idr_remove, NULL);
idr_destroy(&gpio_aggregator_idr);
mutex_unlock(&gpio_aggregator_lock);
}
/*
* GPIO Forwarder
*/
struct gpiochip_fwd_timing {
u32 ramp_up_us;
u32 ramp_down_us;
};
struct gpiochip_fwd {
struct gpio_chip chip;
struct gpio_desc **descs;
union {
struct mutex mlock; /* protects tmp[] if can_sleep */
spinlock_t slock; /* protects tmp[] if !can_sleep */
};
struct gpiochip_fwd_timing *delay_timings;
unsigned long tmp[]; /* values and descs for multiple ops */
};
#define fwd_tmp_values(fwd) &(fwd)->tmp[0]
#define fwd_tmp_descs(fwd) (void *)&(fwd)->tmp[BITS_TO_LONGS((fwd)->chip.ngpio)]
#define fwd_tmp_size(ngpios) (BITS_TO_LONGS((ngpios)) + (ngpios))
static int gpio_fwd_get_direction(struct gpio_chip *chip, unsigned int offset)
{
struct gpiochip_fwd *fwd = gpiochip_get_data(chip);
return gpiod_get_direction(fwd->descs[offset]);
}
static int gpio_fwd_direction_input(struct gpio_chip *chip, unsigned int offset)
{
struct gpiochip_fwd *fwd = gpiochip_get_data(chip);
return gpiod_direction_input(fwd->descs[offset]);
}
static int gpio_fwd_direction_output(struct gpio_chip *chip,
unsigned int offset, int value)
{
struct gpiochip_fwd *fwd = gpiochip_get_data(chip);
return gpiod_direction_output(fwd->descs[offset], value);
}
static int gpio_fwd_get(struct gpio_chip *chip, unsigned int offset)
{
struct gpiochip_fwd *fwd = gpiochip_get_data(chip);
return chip->can_sleep ? gpiod_get_value_cansleep(fwd->descs[offset])
: gpiod_get_value(fwd->descs[offset]);
}
static int gpio_fwd_get_multiple(struct gpiochip_fwd *fwd, unsigned long *mask,
unsigned long *bits)
{
struct gpio_desc **descs = fwd_tmp_descs(fwd);
unsigned long *values = fwd_tmp_values(fwd);
unsigned int i, j = 0;
int error;
bitmap_clear(values, 0, fwd->chip.ngpio);
for_each_set_bit(i, mask, fwd->chip.ngpio)
descs[j++] = fwd->descs[i];
if (fwd->chip.can_sleep)
error = gpiod_get_array_value_cansleep(j, descs, NULL, values);
else
error = gpiod_get_array_value(j, descs, NULL, values);
if (error)
return error;
j = 0;
for_each_set_bit(i, mask, fwd->chip.ngpio)
__assign_bit(i, bits, test_bit(j++, values));
return 0;
}
static int gpio_fwd_get_multiple_locked(struct gpio_chip *chip,
unsigned long *mask, unsigned long *bits)
{
struct gpiochip_fwd *fwd = gpiochip_get_data(chip);
unsigned long flags;
int error;
if (chip->can_sleep) {
mutex_lock(&fwd->mlock);
error = gpio_fwd_get_multiple(fwd, mask, bits);
mutex_unlock(&fwd->mlock);
} else {
spin_lock_irqsave(&fwd->slock, flags);
error = gpio_fwd_get_multiple(fwd, mask, bits);
spin_unlock_irqrestore(&fwd->slock, flags);
}
return error;
}
static void gpio_fwd_delay(struct gpio_chip *chip, unsigned int offset, int value)
{
struct gpiochip_fwd *fwd = gpiochip_get_data(chip);
const struct gpiochip_fwd_timing *delay_timings;
bool is_active_low = gpiod_is_active_low(fwd->descs[offset]);
u32 delay_us;
delay_timings = &fwd->delay_timings[offset];
if ((!is_active_low && value) || (is_active_low && !value))
delay_us = delay_timings->ramp_up_us;
else
delay_us = delay_timings->ramp_down_us;
if (!delay_us)
return;
if (chip->can_sleep)
fsleep(delay_us);
else
udelay(delay_us);
}
static void gpio_fwd_set(struct gpio_chip *chip, unsigned int offset, int value)
{
struct gpiochip_fwd *fwd = gpiochip_get_data(chip);
if (chip->can_sleep)
gpiod_set_value_cansleep(fwd->descs[offset], value);
else
gpiod_set_value(fwd->descs[offset], value);
if (fwd->delay_timings)
gpio_fwd_delay(chip, offset, value);
}
static void gpio_fwd_set_multiple(struct gpiochip_fwd *fwd, unsigned long *mask,
unsigned long *bits)
{
struct gpio_desc **descs = fwd_tmp_descs(fwd);
unsigned long *values = fwd_tmp_values(fwd);
unsigned int i, j = 0;
for_each_set_bit(i, mask, fwd->chip.ngpio) {
__assign_bit(j, values, test_bit(i, bits));
descs[j++] = fwd->descs[i];
}
if (fwd->chip.can_sleep)
gpiod_set_array_value_cansleep(j, descs, NULL, values);
else
gpiod_set_array_value(j, descs, NULL, values);
}
static void gpio_fwd_set_multiple_locked(struct gpio_chip *chip,
unsigned long *mask, unsigned long *bits)
{
struct gpiochip_fwd *fwd = gpiochip_get_data(chip);
unsigned long flags;
if (chip->can_sleep) {
mutex_lock(&fwd->mlock);
gpio_fwd_set_multiple(fwd, mask, bits);
mutex_unlock(&fwd->mlock);
} else {
spin_lock_irqsave(&fwd->slock, flags);
gpio_fwd_set_multiple(fwd, mask, bits);
spin_unlock_irqrestore(&fwd->slock, flags);
}
}
static int gpio_fwd_set_config(struct gpio_chip *chip, unsigned int offset,
unsigned long config)
{
struct gpiochip_fwd *fwd = gpiochip_get_data(chip);
return gpiod_set_config(fwd->descs[offset], config);
}
static int gpio_fwd_to_irq(struct gpio_chip *chip, unsigned int offset)
{
struct gpiochip_fwd *fwd = gpiochip_get_data(chip);
return gpiod_to_irq(fwd->descs[offset]);
}
/*
* The GPIO delay provides a way to configure platform specific delays
* for the GPIO ramp-up or ramp-down delays. This can serve the following
* purposes:
* - Open-drain output using an RC filter
*/
#define FWD_FEATURE_DELAY BIT(0)
#ifdef CONFIG_OF_GPIO
static int gpiochip_fwd_delay_of_xlate(struct gpio_chip *chip,
const struct of_phandle_args *gpiospec,
u32 *flags)
{
struct gpiochip_fwd *fwd = gpiochip_get_data(chip);
struct gpiochip_fwd_timing *timings;
u32 line;
if (gpiospec->args_count != chip->of_gpio_n_cells)
return -EINVAL;
line = gpiospec->args[0];
if (line >= chip->ngpio)
return -EINVAL;
timings = &fwd->delay_timings[line];
timings->ramp_up_us = gpiospec->args[1];
timings->ramp_down_us = gpiospec->args[2];
return line;
}
static int gpiochip_fwd_setup_delay_line(struct device *dev, struct gpio_chip *chip,
struct gpiochip_fwd *fwd)
{
fwd->delay_timings = devm_kcalloc(dev, chip->ngpio,
sizeof(*fwd->delay_timings),
GFP_KERNEL);
if (!fwd->delay_timings)
return -ENOMEM;
chip->of_xlate = gpiochip_fwd_delay_of_xlate;
chip->of_gpio_n_cells = 3;
return 0;
}
#else
static int gpiochip_fwd_setup_delay_line(struct device *dev, struct gpio_chip *chip,
struct gpiochip_fwd *fwd)
{
return 0;
}
#endif /* !CONFIG_OF_GPIO */
/**
* gpiochip_fwd_create() - Create a new GPIO forwarder
* @dev: Parent device pointer
* @ngpios: Number of GPIOs in the forwarder.
* @descs: Array containing the GPIO descriptors to forward to.
* This array must contain @ngpios entries, and must not be deallocated
* before the forwarder has been destroyed again.
* @features: Bitwise ORed features as defined with FWD_FEATURE_*.
*
* This function creates a new gpiochip, which forwards all GPIO operations to
* the passed GPIO descriptors.
*
* Return: An opaque object pointer, or an ERR_PTR()-encoded negative error
* code on failure.
*/
static struct gpiochip_fwd *gpiochip_fwd_create(struct device *dev,
unsigned int ngpios,
struct gpio_desc *descs[],
unsigned long features)
{
const char *label = dev_name(dev);
struct gpiochip_fwd *fwd;
struct gpio_chip *chip;
unsigned int i;
int error;
fwd = devm_kzalloc(dev, struct_size(fwd, tmp, fwd_tmp_size(ngpios)),
GFP_KERNEL);
if (!fwd)
return ERR_PTR(-ENOMEM);
chip = &fwd->chip;
/*
* If any of the GPIO lines are sleeping, then the entire forwarder
* will be sleeping.
* If any of the chips support .set_config(), then the forwarder will
* support setting configs.
*/
for (i = 0; i < ngpios; i++) {
struct gpio_chip *parent = gpiod_to_chip(descs[i]);
dev_dbg(dev, "%u => gpio %d irq %d\n", i,
desc_to_gpio(descs[i]), gpiod_to_irq(descs[i]));
if (gpiod_cansleep(descs[i]))
chip->can_sleep = true;
if (parent && parent->set_config)
chip->set_config = gpio_fwd_set_config;
}
chip->label = label;
chip->parent = dev;
chip->owner = THIS_MODULE;
chip->get_direction = gpio_fwd_get_direction;
chip->direction_input = gpio_fwd_direction_input;
chip->direction_output = gpio_fwd_direction_output;
chip->get = gpio_fwd_get;
chip->get_multiple = gpio_fwd_get_multiple_locked;
chip->set = gpio_fwd_set;
chip->set_multiple = gpio_fwd_set_multiple_locked;
chip->to_irq = gpio_fwd_to_irq;
chip->base = -1;
chip->ngpio = ngpios;
fwd->descs = descs;
if (chip->can_sleep)
mutex_init(&fwd->mlock);
else
spin_lock_init(&fwd->slock);
if (features & FWD_FEATURE_DELAY) {
error = gpiochip_fwd_setup_delay_line(dev, chip, fwd);
if (error)
return ERR_PTR(error);
}
error = devm_gpiochip_add_data(dev, chip, fwd);
if (error)
return ERR_PTR(error);
return fwd;
}
/*
* GPIO Aggregator platform device
*/
static int gpio_aggregator_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct gpio_desc **descs;
struct gpiochip_fwd *fwd;
unsigned long features;
int i, n;
n = gpiod_count(dev, NULL);
if (n < 0)
return n;
descs = devm_kmalloc_array(dev, n, sizeof(*descs), GFP_KERNEL);
if (!descs)
return -ENOMEM;
for (i = 0; i < n; i++) {
descs[i] = devm_gpiod_get_index(dev, NULL, i, GPIOD_ASIS);
if (IS_ERR(descs[i]))
return PTR_ERR(descs[i]);
}
features = (uintptr_t)device_get_match_data(dev);
fwd = gpiochip_fwd_create(dev, n, descs, features);
if (IS_ERR(fwd))
return PTR_ERR(fwd);
platform_set_drvdata(pdev, fwd);
return 0;
}
static const struct of_device_id gpio_aggregator_dt_ids[] = {
{
.compatible = "gpio-delay",
.data = (void *)FWD_FEATURE_DELAY,
},
/*
* Add GPIO-operated devices controlled from userspace below,
* or use "driver_override" in sysfs.
*/
{}
};
MODULE_DEVICE_TABLE(of, gpio_aggregator_dt_ids);
static struct platform_driver gpio_aggregator_driver = {
.probe = gpio_aggregator_probe,
.driver = {
.name = DRV_NAME,
.groups = gpio_aggregator_groups,
.of_match_table = gpio_aggregator_dt_ids,
},
};
static int __init gpio_aggregator_init(void)
{
return platform_driver_register(&gpio_aggregator_driver);
}
module_init(gpio_aggregator_init);
static void __exit gpio_aggregator_exit(void)
{
gpio_aggregator_remove_all();
platform_driver_unregister(&gpio_aggregator_driver);
}
module_exit(gpio_aggregator_exit);
MODULE_AUTHOR("Geert Uytterhoeven <[email protected]>");
MODULE_DESCRIPTION("GPIO Aggregator");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/gpio/gpio-aggregator.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) STMicroelectronics 2018 - All Rights Reserved
* Author: Olivier Bideau <[email protected]> for STMicroelectronics.
* Author: Gabriel Fernandez <[email protected]> for STMicroelectronics.
*/
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/reset-controller.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <dt-bindings/clock/stm32mp1-clks.h>
static DEFINE_SPINLOCK(rlock);
#define RCC_OCENSETR 0x0C
#define RCC_HSICFGR 0x18
#define RCC_RDLSICR 0x144
#define RCC_PLL1CR 0x80
#define RCC_PLL1CFGR1 0x84
#define RCC_PLL1CFGR2 0x88
#define RCC_PLL2CR 0x94
#define RCC_PLL2CFGR1 0x98
#define RCC_PLL2CFGR2 0x9C
#define RCC_PLL3CR 0x880
#define RCC_PLL3CFGR1 0x884
#define RCC_PLL3CFGR2 0x888
#define RCC_PLL4CR 0x894
#define RCC_PLL4CFGR1 0x898
#define RCC_PLL4CFGR2 0x89C
#define RCC_APB1ENSETR 0xA00
#define RCC_APB2ENSETR 0xA08
#define RCC_APB3ENSETR 0xA10
#define RCC_APB4ENSETR 0x200
#define RCC_APB5ENSETR 0x208
#define RCC_AHB2ENSETR 0xA18
#define RCC_AHB3ENSETR 0xA20
#define RCC_AHB4ENSETR 0xA28
#define RCC_AHB5ENSETR 0x210
#define RCC_AHB6ENSETR 0x218
#define RCC_AHB6LPENSETR 0x318
#define RCC_RCK12SELR 0x28
#define RCC_RCK3SELR 0x820
#define RCC_RCK4SELR 0x824
#define RCC_MPCKSELR 0x20
#define RCC_ASSCKSELR 0x24
#define RCC_MSSCKSELR 0x48
#define RCC_SPI6CKSELR 0xC4
#define RCC_SDMMC12CKSELR 0x8F4
#define RCC_SDMMC3CKSELR 0x8F8
#define RCC_FMCCKSELR 0x904
#define RCC_I2C46CKSELR 0xC0
#define RCC_I2C12CKSELR 0x8C0
#define RCC_I2C35CKSELR 0x8C4
#define RCC_UART1CKSELR 0xC8
#define RCC_QSPICKSELR 0x900
#define RCC_ETHCKSELR 0x8FC
#define RCC_RNG1CKSELR 0xCC
#define RCC_RNG2CKSELR 0x920
#define RCC_GPUCKSELR 0x938
#define RCC_USBCKSELR 0x91C
#define RCC_STGENCKSELR 0xD4
#define RCC_SPDIFCKSELR 0x914
#define RCC_SPI2S1CKSELR 0x8D8
#define RCC_SPI2S23CKSELR 0x8DC
#define RCC_SPI2S45CKSELR 0x8E0
#define RCC_CECCKSELR 0x918
#define RCC_LPTIM1CKSELR 0x934
#define RCC_LPTIM23CKSELR 0x930
#define RCC_LPTIM45CKSELR 0x92C
#define RCC_UART24CKSELR 0x8E8
#define RCC_UART35CKSELR 0x8EC
#define RCC_UART6CKSELR 0x8E4
#define RCC_UART78CKSELR 0x8F0
#define RCC_FDCANCKSELR 0x90C
#define RCC_SAI1CKSELR 0x8C8
#define RCC_SAI2CKSELR 0x8CC
#define RCC_SAI3CKSELR 0x8D0
#define RCC_SAI4CKSELR 0x8D4
#define RCC_ADCCKSELR 0x928
#define RCC_MPCKDIVR 0x2C
#define RCC_DSICKSELR 0x924
#define RCC_CPERCKSELR 0xD0
#define RCC_MCO1CFGR 0x800
#define RCC_MCO2CFGR 0x804
#define RCC_BDCR 0x140
#define RCC_AXIDIVR 0x30
#define RCC_MCUDIVR 0x830
#define RCC_APB1DIVR 0x834
#define RCC_APB2DIVR 0x838
#define RCC_APB3DIVR 0x83C
#define RCC_APB4DIVR 0x3C
#define RCC_APB5DIVR 0x40
#define RCC_TIMG1PRER 0x828
#define RCC_TIMG2PRER 0x82C
#define RCC_RTCDIVR 0x44
#define RCC_DBGCFGR 0x80C
#define RCC_CLR 0x4
static const char * const ref12_parents[] = {
"ck_hsi", "ck_hse"
};
static const char * const ref3_parents[] = {
"ck_hsi", "ck_hse", "ck_csi"
};
static const char * const ref4_parents[] = {
"ck_hsi", "ck_hse", "ck_csi"
};
static const char * const cpu_src[] = {
"ck_hsi", "ck_hse", "pll1_p"
};
static const char * const axi_src[] = {
"ck_hsi", "ck_hse", "pll2_p"
};
static const char * const per_src[] = {
"ck_hsi", "ck_csi", "ck_hse"
};
static const char * const mcu_src[] = {
"ck_hsi", "ck_hse", "ck_csi", "pll3_p"
};
static const char * const sdmmc12_src[] = {
"ck_axi", "pll3_r", "pll4_p", "ck_hsi"
};
static const char * const sdmmc3_src[] = {
"ck_mcu", "pll3_r", "pll4_p", "ck_hsi"
};
static const char * const fmc_src[] = {
"ck_axi", "pll3_r", "pll4_p", "ck_per"
};
static const char * const qspi_src[] = {
"ck_axi", "pll3_r", "pll4_p", "ck_per"
};
static const char * const eth_src[] = {
"pll4_p", "pll3_q"
};
static const struct clk_parent_data ethrx_src[] = {
{ .name = "ethck_k", .fw_name = "ETH_RX_CLK/ETH_REF_CLK" },
};
static const char * const rng_src[] = {
"ck_csi", "pll4_r", "ck_lse", "ck_lsi"
};
static const char * const usbphy_src[] = {
"ck_hse", "pll4_r", "clk-hse-div2"
};
static const char * const usbo_src[] = {
"pll4_r", "ck_usbo_48m"
};
static const char * const stgen_src[] = {
"ck_hsi", "ck_hse"
};
static const char * const spdif_src[] = {
"pll4_p", "pll3_q", "ck_hsi"
};
static const char * const spi123_src[] = {
"pll4_p", "pll3_q", "i2s_ckin", "ck_per", "pll3_r"
};
static const char * const spi45_src[] = {
"pclk2", "pll4_q", "ck_hsi", "ck_csi", "ck_hse"
};
static const char * const spi6_src[] = {
"pclk5", "pll4_q", "ck_hsi", "ck_csi", "ck_hse", "pll3_q"
};
static const char * const cec_src[] = {
"ck_lse", "ck_lsi", "ck_csi"
};
static const char * const i2c12_src[] = {
"pclk1", "pll4_r", "ck_hsi", "ck_csi"
};
static const char * const i2c35_src[] = {
"pclk1", "pll4_r", "ck_hsi", "ck_csi"
};
static const char * const i2c46_src[] = {
"pclk5", "pll3_q", "ck_hsi", "ck_csi"
};
static const char * const lptim1_src[] = {
"pclk1", "pll4_p", "pll3_q", "ck_lse", "ck_lsi", "ck_per"
};
static const char * const lptim23_src[] = {
"pclk3", "pll4_q", "ck_per", "ck_lse", "ck_lsi"
};
static const char * const lptim45_src[] = {
"pclk3", "pll4_p", "pll3_q", "ck_lse", "ck_lsi", "ck_per"
};
static const char * const usart1_src[] = {
"pclk5", "pll3_q", "ck_hsi", "ck_csi", "pll4_q", "ck_hse"
};
static const char * const usart234578_src[] = {
"pclk1", "pll4_q", "ck_hsi", "ck_csi", "ck_hse"
};
static const char * const usart6_src[] = {
"pclk2", "pll4_q", "ck_hsi", "ck_csi", "ck_hse"
};
static const char * const fdcan_src[] = {
"ck_hse", "pll3_q", "pll4_q", "pll4_r"
};
static const char * const sai_src[] = {
"pll4_q", "pll3_q", "i2s_ckin", "ck_per", "pll3_r"
};
static const char * const sai2_src[] = {
"pll4_q", "pll3_q", "i2s_ckin", "ck_per", "spdif_ck_symb", "pll3_r"
};
static const char * const adc12_src[] = {
"pll4_r", "ck_per", "pll3_q"
};
static const char * const dsi_src[] = {
"ck_dsi_phy", "pll4_p"
};
static const char * const rtc_src[] = {
"off", "ck_lse", "ck_lsi", "ck_hse"
};
static const char * const mco1_src[] = {
"ck_hsi", "ck_hse", "ck_csi", "ck_lsi", "ck_lse"
};
static const char * const mco2_src[] = {
"ck_mpu", "ck_axi", "ck_mcu", "pll4_p", "ck_hse", "ck_hsi"
};
static const char * const ck_trace_src[] = {
"ck_axi"
};
static const struct clk_div_table axi_div_table[] = {
{ 0, 1 }, { 1, 2 }, { 2, 3 }, { 3, 4 },
{ 4, 4 }, { 5, 4 }, { 6, 4 }, { 7, 4 },
{ 0 },
};
static const struct clk_div_table mcu_div_table[] = {
{ 0, 1 }, { 1, 2 }, { 2, 4 }, { 3, 8 },
{ 4, 16 }, { 5, 32 }, { 6, 64 }, { 7, 128 },
{ 8, 256 }, { 9, 512 }, { 10, 512}, { 11, 512 },
{ 12, 512 }, { 13, 512 }, { 14, 512}, { 15, 512 },
{ 0 },
};
static const struct clk_div_table apb_div_table[] = {
{ 0, 1 }, { 1, 2 }, { 2, 4 }, { 3, 8 },
{ 4, 16 }, { 5, 16 }, { 6, 16 }, { 7, 16 },
{ 0 },
};
static const struct clk_div_table ck_trace_div_table[] = {
{ 0, 1 }, { 1, 2 }, { 2, 4 }, { 3, 8 },
{ 4, 16 }, { 5, 16 }, { 6, 16 }, { 7, 16 },
{ 0 },
};
#define MAX_MUX_CLK 2
struct stm32_mmux {
u8 nbr_clk;
struct clk_hw *hws[MAX_MUX_CLK];
};
struct stm32_clk_mmux {
struct clk_mux mux;
struct stm32_mmux *mmux;
};
struct stm32_mgate {
u8 nbr_clk;
u32 flag;
};
struct stm32_clk_mgate {
struct clk_gate gate;
struct stm32_mgate *mgate;
u32 mask;
};
struct clock_config {
u32 id;
const char *name;
const char *parent_name;
const char * const *parent_names;
const struct clk_parent_data *parent_data;
int num_parents;
unsigned long flags;
void *cfg;
struct clk_hw * (*func)(struct device *dev,
struct clk_hw_onecell_data *clk_data,
void __iomem *base, spinlock_t *lock,
const struct clock_config *cfg);
};
#define NO_ID ~0
struct gate_cfg {
u32 reg_off;
u8 bit_idx;
u8 gate_flags;
};
struct fixed_factor_cfg {
unsigned int mult;
unsigned int div;
};
struct div_cfg {
u32 reg_off;
u8 shift;
u8 width;
u8 div_flags;
const struct clk_div_table *table;
};
struct mux_cfg {
u32 reg_off;
u8 shift;
u8 width;
u8 mux_flags;
u32 *table;
};
struct stm32_gate_cfg {
struct gate_cfg *gate;
struct stm32_mgate *mgate;
const struct clk_ops *ops;
};
struct stm32_div_cfg {
struct div_cfg *div;
const struct clk_ops *ops;
};
struct stm32_mux_cfg {
struct mux_cfg *mux;
struct stm32_mmux *mmux;
const struct clk_ops *ops;
};
/* STM32 Composite clock */
struct stm32_composite_cfg {
const struct stm32_gate_cfg *gate;
const struct stm32_div_cfg *div;
const struct stm32_mux_cfg *mux;
};
static struct clk_hw *
_clk_hw_register_gate(struct device *dev,
struct clk_hw_onecell_data *clk_data,
void __iomem *base, spinlock_t *lock,
const struct clock_config *cfg)
{
struct gate_cfg *gate_cfg = cfg->cfg;
return clk_hw_register_gate(dev,
cfg->name,
cfg->parent_name,
cfg->flags,
gate_cfg->reg_off + base,
gate_cfg->bit_idx,
gate_cfg->gate_flags,
lock);
}
static struct clk_hw *
_clk_hw_register_fixed_factor(struct device *dev,
struct clk_hw_onecell_data *clk_data,
void __iomem *base, spinlock_t *lock,
const struct clock_config *cfg)
{
struct fixed_factor_cfg *ff_cfg = cfg->cfg;
return clk_hw_register_fixed_factor(dev, cfg->name, cfg->parent_name,
cfg->flags, ff_cfg->mult,
ff_cfg->div);
}
static struct clk_hw *
_clk_hw_register_divider_table(struct device *dev,
struct clk_hw_onecell_data *clk_data,
void __iomem *base, spinlock_t *lock,
const struct clock_config *cfg)
{
struct div_cfg *div_cfg = cfg->cfg;
return clk_hw_register_divider_table(dev,
cfg->name,
cfg->parent_name,
cfg->flags,
div_cfg->reg_off + base,
div_cfg->shift,
div_cfg->width,
div_cfg->div_flags,
div_cfg->table,
lock);
}
static struct clk_hw *
_clk_hw_register_mux(struct device *dev,
struct clk_hw_onecell_data *clk_data,
void __iomem *base, spinlock_t *lock,
const struct clock_config *cfg)
{
struct mux_cfg *mux_cfg = cfg->cfg;
return clk_hw_register_mux(dev, cfg->name, cfg->parent_names,
cfg->num_parents, cfg->flags,
mux_cfg->reg_off + base, mux_cfg->shift,
mux_cfg->width, mux_cfg->mux_flags, lock);
}
/* MP1 Gate clock with set & clear registers */
static int mp1_gate_clk_enable(struct clk_hw *hw)
{
if (!clk_gate_ops.is_enabled(hw))
clk_gate_ops.enable(hw);
return 0;
}
static void mp1_gate_clk_disable(struct clk_hw *hw)
{
struct clk_gate *gate = to_clk_gate(hw);
unsigned long flags = 0;
if (clk_gate_ops.is_enabled(hw)) {
spin_lock_irqsave(gate->lock, flags);
writel_relaxed(BIT(gate->bit_idx), gate->reg + RCC_CLR);
spin_unlock_irqrestore(gate->lock, flags);
}
}
static const struct clk_ops mp1_gate_clk_ops = {
.enable = mp1_gate_clk_enable,
.disable = mp1_gate_clk_disable,
.is_enabled = clk_gate_is_enabled,
};
static struct clk_hw *_get_stm32_mux(struct device *dev, void __iomem *base,
const struct stm32_mux_cfg *cfg,
spinlock_t *lock)
{
struct stm32_clk_mmux *mmux;
struct clk_mux *mux;
struct clk_hw *mux_hw;
if (cfg->mmux) {
mmux = devm_kzalloc(dev, sizeof(*mmux), GFP_KERNEL);
if (!mmux)
return ERR_PTR(-ENOMEM);
mmux->mux.reg = cfg->mux->reg_off + base;
mmux->mux.shift = cfg->mux->shift;
mmux->mux.mask = (1 << cfg->mux->width) - 1;
mmux->mux.flags = cfg->mux->mux_flags;
mmux->mux.table = cfg->mux->table;
mmux->mux.lock = lock;
mmux->mmux = cfg->mmux;
mux_hw = &mmux->mux.hw;
cfg->mmux->hws[cfg->mmux->nbr_clk++] = mux_hw;
} else {
mux = devm_kzalloc(dev, sizeof(*mux), GFP_KERNEL);
if (!mux)
return ERR_PTR(-ENOMEM);
mux->reg = cfg->mux->reg_off + base;
mux->shift = cfg->mux->shift;
mux->mask = (1 << cfg->mux->width) - 1;
mux->flags = cfg->mux->mux_flags;
mux->table = cfg->mux->table;
mux->lock = lock;
mux_hw = &mux->hw;
}
return mux_hw;
}
static struct clk_hw *_get_stm32_div(struct device *dev, void __iomem *base,
const struct stm32_div_cfg *cfg,
spinlock_t *lock)
{
struct clk_divider *div;
div = devm_kzalloc(dev, sizeof(*div), GFP_KERNEL);
if (!div)
return ERR_PTR(-ENOMEM);
div->reg = cfg->div->reg_off + base;
div->shift = cfg->div->shift;
div->width = cfg->div->width;
div->flags = cfg->div->div_flags;
div->table = cfg->div->table;
div->lock = lock;
return &div->hw;
}
static struct clk_hw *_get_stm32_gate(struct device *dev, void __iomem *base,
const struct stm32_gate_cfg *cfg,
spinlock_t *lock)
{
struct stm32_clk_mgate *mgate;
struct clk_gate *gate;
struct clk_hw *gate_hw;
if (cfg->mgate) {
mgate = devm_kzalloc(dev, sizeof(*mgate), GFP_KERNEL);
if (!mgate)
return ERR_PTR(-ENOMEM);
mgate->gate.reg = cfg->gate->reg_off + base;
mgate->gate.bit_idx = cfg->gate->bit_idx;
mgate->gate.flags = cfg->gate->gate_flags;
mgate->gate.lock = lock;
mgate->mask = BIT(cfg->mgate->nbr_clk++);
mgate->mgate = cfg->mgate;
gate_hw = &mgate->gate.hw;
} else {
gate = devm_kzalloc(dev, sizeof(*gate), GFP_KERNEL);
if (!gate)
return ERR_PTR(-ENOMEM);
gate->reg = cfg->gate->reg_off + base;
gate->bit_idx = cfg->gate->bit_idx;
gate->flags = cfg->gate->gate_flags;
gate->lock = lock;
gate_hw = &gate->hw;
}
return gate_hw;
}
static struct clk_hw *
clk_stm32_register_gate_ops(struct device *dev,
const char *name,
const char *parent_name,
const struct clk_parent_data *parent_data,
unsigned long flags,
void __iomem *base,
const struct stm32_gate_cfg *cfg,
spinlock_t *lock)
{
struct clk_init_data init = { NULL };
struct clk_hw *hw;
int ret;
init.name = name;
if (parent_name)
init.parent_names = &parent_name;
if (parent_data)
init.parent_data = parent_data;
init.num_parents = 1;
init.flags = flags;
init.ops = &clk_gate_ops;
if (cfg->ops)
init.ops = cfg->ops;
hw = _get_stm32_gate(dev, base, cfg, lock);
if (IS_ERR(hw))
return ERR_PTR(-ENOMEM);
hw->init = &init;
ret = clk_hw_register(dev, hw);
if (ret)
hw = ERR_PTR(ret);
return hw;
}
static struct clk_hw *
clk_stm32_register_composite(struct device *dev,
const char *name, const char * const *parent_names,
const struct clk_parent_data *parent_data,
int num_parents, void __iomem *base,
const struct stm32_composite_cfg *cfg,
unsigned long flags, spinlock_t *lock)
{
const struct clk_ops *mux_ops, *div_ops, *gate_ops;
struct clk_hw *mux_hw, *div_hw, *gate_hw;
mux_hw = NULL;
div_hw = NULL;
gate_hw = NULL;
mux_ops = NULL;
div_ops = NULL;
gate_ops = NULL;
if (cfg->mux) {
mux_hw = _get_stm32_mux(dev, base, cfg->mux, lock);
if (!IS_ERR(mux_hw)) {
mux_ops = &clk_mux_ops;
if (cfg->mux->ops)
mux_ops = cfg->mux->ops;
}
}
if (cfg->div) {
div_hw = _get_stm32_div(dev, base, cfg->div, lock);
if (!IS_ERR(div_hw)) {
div_ops = &clk_divider_ops;
if (cfg->div->ops)
div_ops = cfg->div->ops;
}
}
if (cfg->gate) {
gate_hw = _get_stm32_gate(dev, base, cfg->gate, lock);
if (!IS_ERR(gate_hw)) {
gate_ops = &clk_gate_ops;
if (cfg->gate->ops)
gate_ops = cfg->gate->ops;
}
}
return clk_hw_register_composite(dev, name, parent_names, num_parents,
mux_hw, mux_ops, div_hw, div_ops,
gate_hw, gate_ops, flags);
}
#define to_clk_mgate(_gate) container_of(_gate, struct stm32_clk_mgate, gate)
static int mp1_mgate_clk_enable(struct clk_hw *hw)
{
struct clk_gate *gate = to_clk_gate(hw);
struct stm32_clk_mgate *clk_mgate = to_clk_mgate(gate);
clk_mgate->mgate->flag |= clk_mgate->mask;
mp1_gate_clk_enable(hw);
return 0;
}
static void mp1_mgate_clk_disable(struct clk_hw *hw)
{
struct clk_gate *gate = to_clk_gate(hw);
struct stm32_clk_mgate *clk_mgate = to_clk_mgate(gate);
clk_mgate->mgate->flag &= ~clk_mgate->mask;
if (clk_mgate->mgate->flag == 0)
mp1_gate_clk_disable(hw);
}
static const struct clk_ops mp1_mgate_clk_ops = {
.enable = mp1_mgate_clk_enable,
.disable = mp1_mgate_clk_disable,
.is_enabled = clk_gate_is_enabled,
};
#define to_clk_mmux(_mux) container_of(_mux, struct stm32_clk_mmux, mux)
static u8 clk_mmux_get_parent(struct clk_hw *hw)
{
return clk_mux_ops.get_parent(hw);
}
static int clk_mmux_set_parent(struct clk_hw *hw, u8 index)
{
struct clk_mux *mux = to_clk_mux(hw);
struct stm32_clk_mmux *clk_mmux = to_clk_mmux(mux);
struct clk_hw *hwp;
int ret, n;
ret = clk_mux_ops.set_parent(hw, index);
if (ret)
return ret;
hwp = clk_hw_get_parent(hw);
for (n = 0; n < clk_mmux->mmux->nbr_clk; n++)
if (clk_mmux->mmux->hws[n] != hw)
clk_hw_reparent(clk_mmux->mmux->hws[n], hwp);
return 0;
}
static const struct clk_ops clk_mmux_ops = {
.get_parent = clk_mmux_get_parent,
.set_parent = clk_mmux_set_parent,
.determine_rate = __clk_mux_determine_rate,
};
/* STM32 PLL */
struct stm32_pll_obj {
/* lock pll enable/disable registers */
spinlock_t *lock;
void __iomem *reg;
struct clk_hw hw;
struct clk_mux mux;
};
#define to_pll(_hw) container_of(_hw, struct stm32_pll_obj, hw)
#define PLL_ON BIT(0)
#define PLL_RDY BIT(1)
#define DIVN_MASK 0x1FF
#define DIVM_MASK 0x3F
#define DIVM_SHIFT 16
#define DIVN_SHIFT 0
#define FRAC_OFFSET 0xC
#define FRAC_MASK 0x1FFF
#define FRAC_SHIFT 3
#define FRACLE BIT(16)
#define PLL_MUX_SHIFT 0
#define PLL_MUX_MASK 3
static int __pll_is_enabled(struct clk_hw *hw)
{
struct stm32_pll_obj *clk_elem = to_pll(hw);
return readl_relaxed(clk_elem->reg) & PLL_ON;
}
#define TIMEOUT 5
static int pll_enable(struct clk_hw *hw)
{
struct stm32_pll_obj *clk_elem = to_pll(hw);
u32 reg;
unsigned long flags = 0;
unsigned int timeout = TIMEOUT;
int bit_status = 0;
spin_lock_irqsave(clk_elem->lock, flags);
if (__pll_is_enabled(hw))
goto unlock;
reg = readl_relaxed(clk_elem->reg);
reg |= PLL_ON;
writel_relaxed(reg, clk_elem->reg);
/* We can't use readl_poll_timeout() because we can be blocked if
* someone enables this clock before clocksource changes.
* Only jiffies counter is available. Jiffies are incremented by
* interruptions and enable op does not allow to be interrupted.
*/
do {
bit_status = !(readl_relaxed(clk_elem->reg) & PLL_RDY);
if (bit_status)
udelay(120);
} while (bit_status && --timeout);
unlock:
spin_unlock_irqrestore(clk_elem->lock, flags);
return bit_status;
}
static void pll_disable(struct clk_hw *hw)
{
struct stm32_pll_obj *clk_elem = to_pll(hw);
u32 reg;
unsigned long flags = 0;
spin_lock_irqsave(clk_elem->lock, flags);
reg = readl_relaxed(clk_elem->reg);
reg &= ~PLL_ON;
writel_relaxed(reg, clk_elem->reg);
spin_unlock_irqrestore(clk_elem->lock, flags);
}
static u32 pll_frac_val(struct clk_hw *hw)
{
struct stm32_pll_obj *clk_elem = to_pll(hw);
u32 reg, frac = 0;
reg = readl_relaxed(clk_elem->reg + FRAC_OFFSET);
if (reg & FRACLE)
frac = (reg >> FRAC_SHIFT) & FRAC_MASK;
return frac;
}
static unsigned long pll_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct stm32_pll_obj *clk_elem = to_pll(hw);
u32 reg;
u32 frac, divm, divn;
u64 rate, rate_frac = 0;
reg = readl_relaxed(clk_elem->reg + 4);
divm = ((reg >> DIVM_SHIFT) & DIVM_MASK) + 1;
divn = ((reg >> DIVN_SHIFT) & DIVN_MASK) + 1;
rate = (u64)parent_rate * divn;
do_div(rate, divm);
frac = pll_frac_val(hw);
if (frac) {
rate_frac = (u64)parent_rate * (u64)frac;
do_div(rate_frac, (divm * 8192));
}
return rate + rate_frac;
}
static int pll_is_enabled(struct clk_hw *hw)
{
struct stm32_pll_obj *clk_elem = to_pll(hw);
unsigned long flags = 0;
int ret;
spin_lock_irqsave(clk_elem->lock, flags);
ret = __pll_is_enabled(hw);
spin_unlock_irqrestore(clk_elem->lock, flags);
return ret;
}
static u8 pll_get_parent(struct clk_hw *hw)
{
struct stm32_pll_obj *clk_elem = to_pll(hw);
struct clk_hw *mux_hw = &clk_elem->mux.hw;
__clk_hw_set_clk(mux_hw, hw);
return clk_mux_ops.get_parent(mux_hw);
}
static const struct clk_ops pll_ops = {
.enable = pll_enable,
.disable = pll_disable,
.recalc_rate = pll_recalc_rate,
.is_enabled = pll_is_enabled,
.get_parent = pll_get_parent,
};
static struct clk_hw *clk_register_pll(struct device *dev, const char *name,
const char * const *parent_names,
int num_parents,
void __iomem *reg,
void __iomem *mux_reg,
unsigned long flags,
spinlock_t *lock)
{
struct stm32_pll_obj *element;
struct clk_init_data init;
struct clk_hw *hw;
int err;
element = devm_kzalloc(dev, sizeof(*element), GFP_KERNEL);
if (!element)
return ERR_PTR(-ENOMEM);
init.name = name;
init.ops = &pll_ops;
init.flags = flags;
init.parent_names = parent_names;
init.num_parents = num_parents;
element->mux.lock = lock;
element->mux.reg = mux_reg;
element->mux.shift = PLL_MUX_SHIFT;
element->mux.mask = PLL_MUX_MASK;
element->mux.flags = CLK_MUX_READ_ONLY;
element->mux.reg = mux_reg;
element->hw.init = &init;
element->reg = reg;
element->lock = lock;
hw = &element->hw;
err = clk_hw_register(dev, hw);
if (err)
return ERR_PTR(err);
return hw;
}
/* Kernel Timer */
struct timer_cker {
/* lock the kernel output divider register */
spinlock_t *lock;
void __iomem *apbdiv;
void __iomem *timpre;
struct clk_hw hw;
};
#define to_timer_cker(_hw) container_of(_hw, struct timer_cker, hw)
#define APB_DIV_MASK 0x07
#define TIM_PRE_MASK 0x01
static unsigned long __bestmult(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct timer_cker *tim_ker = to_timer_cker(hw);
u32 prescaler;
unsigned int mult = 0;
prescaler = readl_relaxed(tim_ker->apbdiv) & APB_DIV_MASK;
if (prescaler < 2)
return 1;
mult = 2;
if (rate / parent_rate >= 4)
mult = 4;
return mult;
}
static long timer_ker_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *parent_rate)
{
unsigned long factor = __bestmult(hw, rate, *parent_rate);
return *parent_rate * factor;
}
static int timer_ker_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct timer_cker *tim_ker = to_timer_cker(hw);
unsigned long flags = 0;
unsigned long factor = __bestmult(hw, rate, parent_rate);
int ret = 0;
spin_lock_irqsave(tim_ker->lock, flags);
switch (factor) {
case 1:
break;
case 2:
writel_relaxed(0, tim_ker->timpre);
break;
case 4:
writel_relaxed(1, tim_ker->timpre);
break;
default:
ret = -EINVAL;
}
spin_unlock_irqrestore(tim_ker->lock, flags);
return ret;
}
static unsigned long timer_ker_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct timer_cker *tim_ker = to_timer_cker(hw);
u32 prescaler, timpre;
u32 mul;
prescaler = readl_relaxed(tim_ker->apbdiv) & APB_DIV_MASK;
timpre = readl_relaxed(tim_ker->timpre) & TIM_PRE_MASK;
if (!prescaler)
return parent_rate;
mul = (timpre + 1) * 2;
return parent_rate * mul;
}
static const struct clk_ops timer_ker_ops = {
.recalc_rate = timer_ker_recalc_rate,
.round_rate = timer_ker_round_rate,
.set_rate = timer_ker_set_rate,
};
static struct clk_hw *clk_register_cktim(struct device *dev, const char *name,
const char *parent_name,
unsigned long flags,
void __iomem *apbdiv,
void __iomem *timpre,
spinlock_t *lock)
{
struct timer_cker *tim_ker;
struct clk_init_data init;
struct clk_hw *hw;
int err;
tim_ker = devm_kzalloc(dev, sizeof(*tim_ker), GFP_KERNEL);
if (!tim_ker)
return ERR_PTR(-ENOMEM);
init.name = name;
init.ops = &timer_ker_ops;
init.flags = flags;
init.parent_names = &parent_name;
init.num_parents = 1;
tim_ker->hw.init = &init;
tim_ker->lock = lock;
tim_ker->apbdiv = apbdiv;
tim_ker->timpre = timpre;
hw = &tim_ker->hw;
err = clk_hw_register(dev, hw);
if (err)
return ERR_PTR(err);
return hw;
}
/* The divider of RTC clock concerns only ck_hse clock */
#define HSE_RTC 3
static unsigned long clk_divider_rtc_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
if (clk_hw_get_parent(hw) == clk_hw_get_parent_by_index(hw, HSE_RTC))
return clk_divider_ops.recalc_rate(hw, parent_rate);
return parent_rate;
}
static int clk_divider_rtc_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
if (clk_hw_get_parent(hw) == clk_hw_get_parent_by_index(hw, HSE_RTC))
return clk_divider_ops.set_rate(hw, rate, parent_rate);
return parent_rate;
}
static int clk_divider_rtc_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
{
if (req->best_parent_hw == clk_hw_get_parent_by_index(hw, HSE_RTC))
return clk_divider_ops.determine_rate(hw, req);
req->rate = req->best_parent_rate;
return 0;
}
static const struct clk_ops rtc_div_clk_ops = {
.recalc_rate = clk_divider_rtc_recalc_rate,
.set_rate = clk_divider_rtc_set_rate,
.determine_rate = clk_divider_rtc_determine_rate
};
struct stm32_pll_cfg {
u32 offset;
u32 muxoff;
};
static struct clk_hw *_clk_register_pll(struct device *dev,
struct clk_hw_onecell_data *clk_data,
void __iomem *base, spinlock_t *lock,
const struct clock_config *cfg)
{
struct stm32_pll_cfg *stm_pll_cfg = cfg->cfg;
return clk_register_pll(dev, cfg->name, cfg->parent_names,
cfg->num_parents,
base + stm_pll_cfg->offset,
base + stm_pll_cfg->muxoff,
cfg->flags, lock);
}
struct stm32_cktim_cfg {
u32 offset_apbdiv;
u32 offset_timpre;
};
static struct clk_hw *_clk_register_cktim(struct device *dev,
struct clk_hw_onecell_data *clk_data,
void __iomem *base, spinlock_t *lock,
const struct clock_config *cfg)
{
struct stm32_cktim_cfg *cktim_cfg = cfg->cfg;
return clk_register_cktim(dev, cfg->name, cfg->parent_name, cfg->flags,
cktim_cfg->offset_apbdiv + base,
cktim_cfg->offset_timpre + base, lock);
}
static struct clk_hw *
_clk_stm32_register_gate(struct device *dev,
struct clk_hw_onecell_data *clk_data,
void __iomem *base, spinlock_t *lock,
const struct clock_config *cfg)
{
return clk_stm32_register_gate_ops(dev,
cfg->name,
cfg->parent_name,
cfg->parent_data,
cfg->flags,
base,
cfg->cfg,
lock);
}
static struct clk_hw *
_clk_stm32_register_composite(struct device *dev,
struct clk_hw_onecell_data *clk_data,
void __iomem *base, spinlock_t *lock,
const struct clock_config *cfg)
{
return clk_stm32_register_composite(dev, cfg->name, cfg->parent_names,
cfg->parent_data, cfg->num_parents,
base, cfg->cfg, cfg->flags, lock);
}
#define GATE(_id, _name, _parent, _flags, _offset, _bit_idx, _gate_flags)\
{\
.id = _id,\
.name = _name,\
.parent_name = _parent,\
.flags = _flags,\
.cfg = &(struct gate_cfg) {\
.reg_off = _offset,\
.bit_idx = _bit_idx,\
.gate_flags = _gate_flags,\
},\
.func = _clk_hw_register_gate,\
}
#define FIXED_FACTOR(_id, _name, _parent, _flags, _mult, _div)\
{\
.id = _id,\
.name = _name,\
.parent_name = _parent,\
.flags = _flags,\
.cfg = &(struct fixed_factor_cfg) {\
.mult = _mult,\
.div = _div,\
},\
.func = _clk_hw_register_fixed_factor,\
}
#define DIV_TABLE(_id, _name, _parent, _flags, _offset, _shift, _width,\
_div_flags, _div_table)\
{\
.id = _id,\
.name = _name,\
.parent_name = _parent,\
.flags = _flags,\
.cfg = &(struct div_cfg) {\
.reg_off = _offset,\
.shift = _shift,\
.width = _width,\
.div_flags = _div_flags,\
.table = _div_table,\
},\
.func = _clk_hw_register_divider_table,\
}
#define DIV(_id, _name, _parent, _flags, _offset, _shift, _width, _div_flags)\
DIV_TABLE(_id, _name, _parent, _flags, _offset, _shift, _width,\
_div_flags, NULL)
#define MUX(_id, _name, _parents, _flags, _offset, _shift, _width, _mux_flags)\
{\
.id = _id,\
.name = _name,\
.parent_names = _parents,\
.num_parents = ARRAY_SIZE(_parents),\
.flags = _flags,\
.cfg = &(struct mux_cfg) {\
.reg_off = _offset,\
.shift = _shift,\
.width = _width,\
.mux_flags = _mux_flags,\
},\
.func = _clk_hw_register_mux,\
}
#define PLL(_id, _name, _parents, _flags, _offset_p, _offset_mux)\
{\
.id = _id,\
.name = _name,\
.parent_names = _parents,\
.num_parents = ARRAY_SIZE(_parents),\
.flags = CLK_IGNORE_UNUSED | (_flags),\
.cfg = &(struct stm32_pll_cfg) {\
.offset = _offset_p,\
.muxoff = _offset_mux,\
},\
.func = _clk_register_pll,\
}
#define STM32_CKTIM(_name, _parent, _flags, _offset_apbdiv, _offset_timpre)\
{\
.id = NO_ID,\
.name = _name,\
.parent_name = _parent,\
.flags = _flags,\
.cfg = &(struct stm32_cktim_cfg) {\
.offset_apbdiv = _offset_apbdiv,\
.offset_timpre = _offset_timpre,\
},\
.func = _clk_register_cktim,\
}
#define STM32_TIM(_id, _name, _parent, _offset_set, _bit_idx)\
GATE_MP1(_id, _name, _parent, CLK_SET_RATE_PARENT,\
_offset_set, _bit_idx, 0)
/* STM32 GATE */
#define STM32_GATE(_id, _name, _parent, _flags, _gate)\
{\
.id = _id,\
.name = _name,\
.parent_name = _parent,\
.flags = _flags,\
.cfg = (struct stm32_gate_cfg *) {_gate},\
.func = _clk_stm32_register_gate,\
}
#define STM32_GATE_PDATA(_id, _name, _parent, _flags, _gate)\
{\
.id = _id,\
.name = _name,\
.parent_data = _parent,\
.flags = _flags,\
.cfg = (struct stm32_gate_cfg *) {_gate},\
.func = _clk_stm32_register_gate,\
}
#define _STM32_GATE(_gate_offset, _gate_bit_idx, _gate_flags, _mgate, _ops)\
(&(struct stm32_gate_cfg) {\
&(struct gate_cfg) {\
.reg_off = _gate_offset,\
.bit_idx = _gate_bit_idx,\
.gate_flags = _gate_flags,\
},\
.mgate = _mgate,\
.ops = _ops,\
})
#define _STM32_MGATE(_mgate)\
(&per_gate_cfg[_mgate])
#define _GATE(_gate_offset, _gate_bit_idx, _gate_flags)\
_STM32_GATE(_gate_offset, _gate_bit_idx, _gate_flags,\
NULL, NULL)\
#define _GATE_MP1(_gate_offset, _gate_bit_idx, _gate_flags)\
_STM32_GATE(_gate_offset, _gate_bit_idx, _gate_flags,\
NULL, &mp1_gate_clk_ops)\
#define _MGATE_MP1(_mgate)\
.gate = &per_gate_cfg[_mgate]
#define GATE_MP1(_id, _name, _parent, _flags, _offset, _bit_idx, _gate_flags)\
STM32_GATE(_id, _name, _parent, _flags,\
_GATE_MP1(_offset, _bit_idx, _gate_flags))
#define MGATE_MP1(_id, _name, _parent, _flags, _mgate)\
STM32_GATE(_id, _name, _parent, _flags,\
_STM32_MGATE(_mgate))
#define MGATE_MP1_PDATA(_id, _name, _parent, _flags, _mgate)\
STM32_GATE_PDATA(_id, _name, _parent, _flags,\
_STM32_MGATE(_mgate))
#define _STM32_DIV(_div_offset, _div_shift, _div_width,\
_div_flags, _div_table, _ops)\
.div = &(struct stm32_div_cfg) {\
&(struct div_cfg) {\
.reg_off = _div_offset,\
.shift = _div_shift,\
.width = _div_width,\
.div_flags = _div_flags,\
.table = _div_table,\
},\
.ops = _ops,\
}
#define _DIV(_div_offset, _div_shift, _div_width, _div_flags, _div_table)\
_STM32_DIV(_div_offset, _div_shift, _div_width,\
_div_flags, _div_table, NULL)\
#define _DIV_RTC(_div_offset, _div_shift, _div_width, _div_flags, _div_table)\
_STM32_DIV(_div_offset, _div_shift, _div_width,\
_div_flags, _div_table, &rtc_div_clk_ops)
#define _STM32_MUX(_offset, _shift, _width, _mux_flags, _mmux, _ops)\
.mux = &(struct stm32_mux_cfg) {\
&(struct mux_cfg) {\
.reg_off = _offset,\
.shift = _shift,\
.width = _width,\
.mux_flags = _mux_flags,\
.table = NULL,\
},\
.mmux = _mmux,\
.ops = _ops,\
}
#define _MUX(_offset, _shift, _width, _mux_flags)\
_STM32_MUX(_offset, _shift, _width, _mux_flags, NULL, NULL)\
#define _MMUX(_mmux) .mux = &ker_mux_cfg[_mmux]
#define PARENT(_parent) ((const char *[]) { _parent})
#define _NO_MUX .mux = NULL
#define _NO_DIV .div = NULL
#define _NO_GATE .gate = NULL
#define COMPOSITE(_id, _name, _parents, _flags, _gate, _mux, _div)\
{\
.id = _id,\
.name = _name,\
.parent_names = _parents,\
.num_parents = ARRAY_SIZE(_parents),\
.flags = _flags,\
.cfg = &(struct stm32_composite_cfg) {\
_gate,\
_mux,\
_div,\
},\
.func = _clk_stm32_register_composite,\
}
#define PCLK(_id, _name, _parent, _flags, _mgate)\
MGATE_MP1(_id, _name, _parent, _flags, _mgate)
#define PCLK_PDATA(_id, _name, _parent, _flags, _mgate)\
MGATE_MP1_PDATA(_id, _name, _parent, _flags, _mgate)
#define KCLK(_id, _name, _parents, _flags, _mgate, _mmux)\
COMPOSITE(_id, _name, _parents, CLK_OPS_PARENT_ENABLE |\
CLK_SET_RATE_NO_REPARENT | _flags,\
_MGATE_MP1(_mgate),\
_MMUX(_mmux),\
_NO_DIV)
enum {
G_SAI1,
G_SAI2,
G_SAI3,
G_SAI4,
G_SPI1,
G_SPI2,
G_SPI3,
G_SPI4,
G_SPI5,
G_SPI6,
G_SPDIF,
G_I2C1,
G_I2C2,
G_I2C3,
G_I2C4,
G_I2C5,
G_I2C6,
G_USART2,
G_UART4,
G_USART3,
G_UART5,
G_USART1,
G_USART6,
G_UART7,
G_UART8,
G_LPTIM1,
G_LPTIM2,
G_LPTIM3,
G_LPTIM4,
G_LPTIM5,
G_LTDC,
G_DSI,
G_QSPI,
G_FMC,
G_SDMMC1,
G_SDMMC2,
G_SDMMC3,
G_USBO,
G_USBPHY,
G_RNG1,
G_RNG2,
G_FDCAN,
G_DAC12,
G_CEC,
G_ADC12,
G_GPU,
G_STGEN,
G_DFSDM,
G_ADFSDM,
G_TIM2,
G_TIM3,
G_TIM4,
G_TIM5,
G_TIM6,
G_TIM7,
G_TIM12,
G_TIM13,
G_TIM14,
G_MDIO,
G_TIM1,
G_TIM8,
G_TIM15,
G_TIM16,
G_TIM17,
G_SYSCFG,
G_VREF,
G_TMPSENS,
G_PMBCTRL,
G_HDP,
G_IWDG2,
G_STGENRO,
G_DMA1,
G_DMA2,
G_DMAMUX,
G_DCMI,
G_CRYP2,
G_HASH2,
G_CRC2,
G_HSEM,
G_IPCC,
G_GPIOA,
G_GPIOB,
G_GPIOC,
G_GPIOD,
G_GPIOE,
G_GPIOF,
G_GPIOG,
G_GPIOH,
G_GPIOI,
G_GPIOJ,
G_GPIOK,
G_MDMA,
G_ETHCK,
G_ETHTX,
G_ETHRX,
G_ETHMAC,
G_CRC1,
G_USBH,
G_ETHSTP,
G_RTCAPB,
G_TZC1,
G_TZC2,
G_TZPC,
G_IWDG1,
G_BSEC,
G_GPIOZ,
G_CRYP1,
G_HASH1,
G_BKPSRAM,
G_DDRPERFM,
G_LAST
};
static struct stm32_mgate mp1_mgate[G_LAST];
#define _K_GATE(_id, _gate_offset, _gate_bit_idx, _gate_flags,\
_mgate, _ops)\
[_id] = {\
&(struct gate_cfg) {\
.reg_off = _gate_offset,\
.bit_idx = _gate_bit_idx,\
.gate_flags = _gate_flags,\
},\
.mgate = _mgate,\
.ops = _ops,\
}
#define K_GATE(_id, _gate_offset, _gate_bit_idx, _gate_flags)\
_K_GATE(_id, _gate_offset, _gate_bit_idx, _gate_flags,\
NULL, &mp1_gate_clk_ops)
#define K_MGATE(_id, _gate_offset, _gate_bit_idx, _gate_flags)\
_K_GATE(_id, _gate_offset, _gate_bit_idx, _gate_flags,\
&mp1_mgate[_id], &mp1_mgate_clk_ops)
/* Peripheral gates */
static struct stm32_gate_cfg per_gate_cfg[G_LAST] = {
/* Multi gates */
K_GATE(G_MDIO, RCC_APB1ENSETR, 31, 0),
K_MGATE(G_DAC12, RCC_APB1ENSETR, 29, 0),
K_MGATE(G_CEC, RCC_APB1ENSETR, 27, 0),
K_MGATE(G_SPDIF, RCC_APB1ENSETR, 26, 0),
K_MGATE(G_I2C5, RCC_APB1ENSETR, 24, 0),
K_MGATE(G_I2C3, RCC_APB1ENSETR, 23, 0),
K_MGATE(G_I2C2, RCC_APB1ENSETR, 22, 0),
K_MGATE(G_I2C1, RCC_APB1ENSETR, 21, 0),
K_MGATE(G_UART8, RCC_APB1ENSETR, 19, 0),
K_MGATE(G_UART7, RCC_APB1ENSETR, 18, 0),
K_MGATE(G_UART5, RCC_APB1ENSETR, 17, 0),
K_MGATE(G_UART4, RCC_APB1ENSETR, 16, 0),
K_MGATE(G_USART3, RCC_APB1ENSETR, 15, 0),
K_MGATE(G_USART2, RCC_APB1ENSETR, 14, 0),
K_MGATE(G_SPI3, RCC_APB1ENSETR, 12, 0),
K_MGATE(G_SPI2, RCC_APB1ENSETR, 11, 0),
K_MGATE(G_LPTIM1, RCC_APB1ENSETR, 9, 0),
K_GATE(G_TIM14, RCC_APB1ENSETR, 8, 0),
K_GATE(G_TIM13, RCC_APB1ENSETR, 7, 0),
K_GATE(G_TIM12, RCC_APB1ENSETR, 6, 0),
K_GATE(G_TIM7, RCC_APB1ENSETR, 5, 0),
K_GATE(G_TIM6, RCC_APB1ENSETR, 4, 0),
K_GATE(G_TIM5, RCC_APB1ENSETR, 3, 0),
K_GATE(G_TIM4, RCC_APB1ENSETR, 2, 0),
K_GATE(G_TIM3, RCC_APB1ENSETR, 1, 0),
K_GATE(G_TIM2, RCC_APB1ENSETR, 0, 0),
K_MGATE(G_FDCAN, RCC_APB2ENSETR, 24, 0),
K_GATE(G_ADFSDM, RCC_APB2ENSETR, 21, 0),
K_GATE(G_DFSDM, RCC_APB2ENSETR, 20, 0),
K_MGATE(G_SAI3, RCC_APB2ENSETR, 18, 0),
K_MGATE(G_SAI2, RCC_APB2ENSETR, 17, 0),
K_MGATE(G_SAI1, RCC_APB2ENSETR, 16, 0),
K_MGATE(G_USART6, RCC_APB2ENSETR, 13, 0),
K_MGATE(G_SPI5, RCC_APB2ENSETR, 10, 0),
K_MGATE(G_SPI4, RCC_APB2ENSETR, 9, 0),
K_MGATE(G_SPI1, RCC_APB2ENSETR, 8, 0),
K_GATE(G_TIM17, RCC_APB2ENSETR, 4, 0),
K_GATE(G_TIM16, RCC_APB2ENSETR, 3, 0),
K_GATE(G_TIM15, RCC_APB2ENSETR, 2, 0),
K_GATE(G_TIM8, RCC_APB2ENSETR, 1, 0),
K_GATE(G_TIM1, RCC_APB2ENSETR, 0, 0),
K_GATE(G_HDP, RCC_APB3ENSETR, 20, 0),
K_GATE(G_PMBCTRL, RCC_APB3ENSETR, 17, 0),
K_GATE(G_TMPSENS, RCC_APB3ENSETR, 16, 0),
K_GATE(G_VREF, RCC_APB3ENSETR, 13, 0),
K_GATE(G_SYSCFG, RCC_APB3ENSETR, 11, 0),
K_MGATE(G_SAI4, RCC_APB3ENSETR, 8, 0),
K_MGATE(G_LPTIM5, RCC_APB3ENSETR, 3, 0),
K_MGATE(G_LPTIM4, RCC_APB3ENSETR, 2, 0),
K_MGATE(G_LPTIM3, RCC_APB3ENSETR, 1, 0),
K_MGATE(G_LPTIM2, RCC_APB3ENSETR, 0, 0),
K_GATE(G_STGENRO, RCC_APB4ENSETR, 20, 0),
K_MGATE(G_USBPHY, RCC_APB4ENSETR, 16, 0),
K_GATE(G_IWDG2, RCC_APB4ENSETR, 15, 0),
K_GATE(G_DDRPERFM, RCC_APB4ENSETR, 8, 0),
K_MGATE(G_DSI, RCC_APB4ENSETR, 4, 0),
K_MGATE(G_LTDC, RCC_APB4ENSETR, 0, 0),
K_GATE(G_STGEN, RCC_APB5ENSETR, 20, 0),
K_GATE(G_BSEC, RCC_APB5ENSETR, 16, 0),
K_GATE(G_IWDG1, RCC_APB5ENSETR, 15, 0),
K_GATE(G_TZPC, RCC_APB5ENSETR, 13, 0),
K_GATE(G_TZC2, RCC_APB5ENSETR, 12, 0),
K_GATE(G_TZC1, RCC_APB5ENSETR, 11, 0),
K_GATE(G_RTCAPB, RCC_APB5ENSETR, 8, 0),
K_MGATE(G_USART1, RCC_APB5ENSETR, 4, 0),
K_MGATE(G_I2C6, RCC_APB5ENSETR, 3, 0),
K_MGATE(G_I2C4, RCC_APB5ENSETR, 2, 0),
K_MGATE(G_SPI6, RCC_APB5ENSETR, 0, 0),
K_MGATE(G_SDMMC3, RCC_AHB2ENSETR, 16, 0),
K_MGATE(G_USBO, RCC_AHB2ENSETR, 8, 0),
K_MGATE(G_ADC12, RCC_AHB2ENSETR, 5, 0),
K_GATE(G_DMAMUX, RCC_AHB2ENSETR, 2, 0),
K_GATE(G_DMA2, RCC_AHB2ENSETR, 1, 0),
K_GATE(G_DMA1, RCC_AHB2ENSETR, 0, 0),
K_GATE(G_IPCC, RCC_AHB3ENSETR, 12, 0),
K_GATE(G_HSEM, RCC_AHB3ENSETR, 11, 0),
K_GATE(G_CRC2, RCC_AHB3ENSETR, 7, 0),
K_MGATE(G_RNG2, RCC_AHB3ENSETR, 6, 0),
K_GATE(G_HASH2, RCC_AHB3ENSETR, 5, 0),
K_GATE(G_CRYP2, RCC_AHB3ENSETR, 4, 0),
K_GATE(G_DCMI, RCC_AHB3ENSETR, 0, 0),
K_GATE(G_GPIOK, RCC_AHB4ENSETR, 10, 0),
K_GATE(G_GPIOJ, RCC_AHB4ENSETR, 9, 0),
K_GATE(G_GPIOI, RCC_AHB4ENSETR, 8, 0),
K_GATE(G_GPIOH, RCC_AHB4ENSETR, 7, 0),
K_GATE(G_GPIOG, RCC_AHB4ENSETR, 6, 0),
K_GATE(G_GPIOF, RCC_AHB4ENSETR, 5, 0),
K_GATE(G_GPIOE, RCC_AHB4ENSETR, 4, 0),
K_GATE(G_GPIOD, RCC_AHB4ENSETR, 3, 0),
K_GATE(G_GPIOC, RCC_AHB4ENSETR, 2, 0),
K_GATE(G_GPIOB, RCC_AHB4ENSETR, 1, 0),
K_GATE(G_GPIOA, RCC_AHB4ENSETR, 0, 0),
K_GATE(G_BKPSRAM, RCC_AHB5ENSETR, 8, 0),
K_MGATE(G_RNG1, RCC_AHB5ENSETR, 6, 0),
K_GATE(G_HASH1, RCC_AHB5ENSETR, 5, 0),
K_GATE(G_CRYP1, RCC_AHB5ENSETR, 4, 0),
K_GATE(G_GPIOZ, RCC_AHB5ENSETR, 0, 0),
K_GATE(G_USBH, RCC_AHB6ENSETR, 24, 0),
K_GATE(G_CRC1, RCC_AHB6ENSETR, 20, 0),
K_MGATE(G_SDMMC2, RCC_AHB6ENSETR, 17, 0),
K_MGATE(G_SDMMC1, RCC_AHB6ENSETR, 16, 0),
K_MGATE(G_QSPI, RCC_AHB6ENSETR, 14, 0),
K_MGATE(G_FMC, RCC_AHB6ENSETR, 12, 0),
K_GATE(G_ETHMAC, RCC_AHB6ENSETR, 10, 0),
K_GATE(G_ETHRX, RCC_AHB6ENSETR, 9, 0),
K_GATE(G_ETHTX, RCC_AHB6ENSETR, 8, 0),
K_GATE(G_ETHCK, RCC_AHB6ENSETR, 7, 0),
K_MGATE(G_GPU, RCC_AHB6ENSETR, 5, 0),
K_GATE(G_MDMA, RCC_AHB6ENSETR, 0, 0),
K_GATE(G_ETHSTP, RCC_AHB6LPENSETR, 11, 0),
};
enum {
M_SDMMC12,
M_SDMMC3,
M_FMC,
M_QSPI,
M_RNG1,
M_RNG2,
M_USBPHY,
M_USBO,
M_STGEN,
M_SPDIF,
M_SPI1,
M_SPI23,
M_SPI45,
M_SPI6,
M_CEC,
M_I2C12,
M_I2C35,
M_I2C46,
M_LPTIM1,
M_LPTIM23,
M_LPTIM45,
M_USART1,
M_UART24,
M_UART35,
M_USART6,
M_UART78,
M_SAI1,
M_SAI2,
M_SAI3,
M_SAI4,
M_DSI,
M_FDCAN,
M_ADC12,
M_ETHCK,
M_CKPER,
M_LAST
};
static struct stm32_mmux ker_mux[M_LAST];
#define _K_MUX(_id, _offset, _shift, _width, _mux_flags, _mmux, _ops)\
[_id] = {\
&(struct mux_cfg) {\
.reg_off = _offset,\
.shift = _shift,\
.width = _width,\
.mux_flags = _mux_flags,\
.table = NULL,\
},\
.mmux = _mmux,\
.ops = _ops,\
}
#define K_MUX(_id, _offset, _shift, _width, _mux_flags)\
_K_MUX(_id, _offset, _shift, _width, _mux_flags,\
NULL, NULL)
#define K_MMUX(_id, _offset, _shift, _width, _mux_flags)\
_K_MUX(_id, _offset, _shift, _width, _mux_flags,\
&ker_mux[_id], &clk_mmux_ops)
static const struct stm32_mux_cfg ker_mux_cfg[M_LAST] = {
/* Kernel multi mux */
K_MMUX(M_SDMMC12, RCC_SDMMC12CKSELR, 0, 3, 0),
K_MMUX(M_SPI23, RCC_SPI2S23CKSELR, 0, 3, 0),
K_MMUX(M_SPI45, RCC_SPI2S45CKSELR, 0, 3, 0),
K_MMUX(M_I2C12, RCC_I2C12CKSELR, 0, 3, 0),
K_MMUX(M_I2C35, RCC_I2C35CKSELR, 0, 3, 0),
K_MMUX(M_LPTIM23, RCC_LPTIM23CKSELR, 0, 3, 0),
K_MMUX(M_LPTIM45, RCC_LPTIM45CKSELR, 0, 3, 0),
K_MMUX(M_UART24, RCC_UART24CKSELR, 0, 3, 0),
K_MMUX(M_UART35, RCC_UART35CKSELR, 0, 3, 0),
K_MMUX(M_UART78, RCC_UART78CKSELR, 0, 3, 0),
K_MMUX(M_SAI1, RCC_SAI1CKSELR, 0, 3, 0),
K_MMUX(M_ETHCK, RCC_ETHCKSELR, 0, 2, 0),
K_MMUX(M_I2C46, RCC_I2C46CKSELR, 0, 3, 0),
/* Kernel simple mux */
K_MUX(M_RNG2, RCC_RNG2CKSELR, 0, 2, 0),
K_MUX(M_SDMMC3, RCC_SDMMC3CKSELR, 0, 3, 0),
K_MUX(M_FMC, RCC_FMCCKSELR, 0, 2, 0),
K_MUX(M_QSPI, RCC_QSPICKSELR, 0, 2, 0),
K_MUX(M_USBPHY, RCC_USBCKSELR, 0, 2, 0),
K_MUX(M_USBO, RCC_USBCKSELR, 4, 1, 0),
K_MUX(M_SPDIF, RCC_SPDIFCKSELR, 0, 2, 0),
K_MUX(M_SPI1, RCC_SPI2S1CKSELR, 0, 3, 0),
K_MUX(M_CEC, RCC_CECCKSELR, 0, 2, 0),
K_MUX(M_LPTIM1, RCC_LPTIM1CKSELR, 0, 3, 0),
K_MUX(M_USART6, RCC_UART6CKSELR, 0, 3, 0),
K_MUX(M_FDCAN, RCC_FDCANCKSELR, 0, 2, 0),
K_MUX(M_SAI2, RCC_SAI2CKSELR, 0, 3, 0),
K_MUX(M_SAI3, RCC_SAI3CKSELR, 0, 3, 0),
K_MUX(M_SAI4, RCC_SAI4CKSELR, 0, 3, 0),
K_MUX(M_ADC12, RCC_ADCCKSELR, 0, 2, 0),
K_MUX(M_DSI, RCC_DSICKSELR, 0, 1, 0),
K_MUX(M_CKPER, RCC_CPERCKSELR, 0, 2, 0),
K_MUX(M_RNG1, RCC_RNG1CKSELR, 0, 2, 0),
K_MUX(M_STGEN, RCC_STGENCKSELR, 0, 2, 0),
K_MUX(M_USART1, RCC_UART1CKSELR, 0, 3, 0),
K_MUX(M_SPI6, RCC_SPI6CKSELR, 0, 3, 0),
};
static const struct clock_config stm32mp1_clock_cfg[] = {
/* External / Internal Oscillators */
GATE_MP1(CK_HSE, "ck_hse", "clk-hse", 0, RCC_OCENSETR, 8, 0),
/* ck_csi is used by IO compensation and should be critical */
GATE_MP1(CK_CSI, "ck_csi", "clk-csi", CLK_IS_CRITICAL,
RCC_OCENSETR, 4, 0),
COMPOSITE(CK_HSI, "ck_hsi", PARENT("clk-hsi"), 0,
_GATE_MP1(RCC_OCENSETR, 0, 0),
_NO_MUX,
_DIV(RCC_HSICFGR, 0, 2, CLK_DIVIDER_POWER_OF_TWO |
CLK_DIVIDER_READ_ONLY, NULL)),
GATE(CK_LSI, "ck_lsi", "clk-lsi", 0, RCC_RDLSICR, 0, 0),
GATE(CK_LSE, "ck_lse", "clk-lse", 0, RCC_BDCR, 0, 0),
FIXED_FACTOR(CK_HSE_DIV2, "clk-hse-div2", "ck_hse", 0, 1, 2),
/* PLLs */
PLL(PLL1, "pll1", ref12_parents, 0, RCC_PLL1CR, RCC_RCK12SELR),
PLL(PLL2, "pll2", ref12_parents, 0, RCC_PLL2CR, RCC_RCK12SELR),
PLL(PLL3, "pll3", ref3_parents, 0, RCC_PLL3CR, RCC_RCK3SELR),
PLL(PLL4, "pll4", ref4_parents, 0, RCC_PLL4CR, RCC_RCK4SELR),
/* ODF */
COMPOSITE(PLL1_P, "pll1_p", PARENT("pll1"), 0,
_GATE(RCC_PLL1CR, 4, 0),
_NO_MUX,
_DIV(RCC_PLL1CFGR2, 0, 7, 0, NULL)),
COMPOSITE(PLL2_P, "pll2_p", PARENT("pll2"), 0,
_GATE(RCC_PLL2CR, 4, 0),
_NO_MUX,
_DIV(RCC_PLL2CFGR2, 0, 7, 0, NULL)),
COMPOSITE(PLL2_Q, "pll2_q", PARENT("pll2"), 0,
_GATE(RCC_PLL2CR, 5, 0),
_NO_MUX,
_DIV(RCC_PLL2CFGR2, 8, 7, 0, NULL)),
COMPOSITE(PLL2_R, "pll2_r", PARENT("pll2"), CLK_IS_CRITICAL,
_GATE(RCC_PLL2CR, 6, 0),
_NO_MUX,
_DIV(RCC_PLL2CFGR2, 16, 7, 0, NULL)),
COMPOSITE(PLL3_P, "pll3_p", PARENT("pll3"), 0,
_GATE(RCC_PLL3CR, 4, 0),
_NO_MUX,
_DIV(RCC_PLL3CFGR2, 0, 7, 0, NULL)),
COMPOSITE(PLL3_Q, "pll3_q", PARENT("pll3"), 0,
_GATE(RCC_PLL3CR, 5, 0),
_NO_MUX,
_DIV(RCC_PLL3CFGR2, 8, 7, 0, NULL)),
COMPOSITE(PLL3_R, "pll3_r", PARENT("pll3"), 0,
_GATE(RCC_PLL3CR, 6, 0),
_NO_MUX,
_DIV(RCC_PLL3CFGR2, 16, 7, 0, NULL)),
COMPOSITE(PLL4_P, "pll4_p", PARENT("pll4"), 0,
_GATE(RCC_PLL4CR, 4, 0),
_NO_MUX,
_DIV(RCC_PLL4CFGR2, 0, 7, 0, NULL)),
COMPOSITE(PLL4_Q, "pll4_q", PARENT("pll4"), 0,
_GATE(RCC_PLL4CR, 5, 0),
_NO_MUX,
_DIV(RCC_PLL4CFGR2, 8, 7, 0, NULL)),
COMPOSITE(PLL4_R, "pll4_r", PARENT("pll4"), 0,
_GATE(RCC_PLL4CR, 6, 0),
_NO_MUX,
_DIV(RCC_PLL4CFGR2, 16, 7, 0, NULL)),
/* MUX system clocks */
MUX(CK_PER, "ck_per", per_src, CLK_OPS_PARENT_ENABLE,
RCC_CPERCKSELR, 0, 2, 0),
MUX(CK_MPU, "ck_mpu", cpu_src, CLK_OPS_PARENT_ENABLE |
CLK_IS_CRITICAL, RCC_MPCKSELR, 0, 2, 0),
COMPOSITE(CK_AXI, "ck_axi", axi_src, CLK_IS_CRITICAL |
CLK_OPS_PARENT_ENABLE,
_NO_GATE,
_MUX(RCC_ASSCKSELR, 0, 2, 0),
_DIV(RCC_AXIDIVR, 0, 3, 0, axi_div_table)),
COMPOSITE(CK_MCU, "ck_mcu", mcu_src, CLK_IS_CRITICAL |
CLK_OPS_PARENT_ENABLE,
_NO_GATE,
_MUX(RCC_MSSCKSELR, 0, 2, 0),
_DIV(RCC_MCUDIVR, 0, 4, 0, mcu_div_table)),
DIV_TABLE(NO_ID, "pclk1", "ck_mcu", CLK_IGNORE_UNUSED, RCC_APB1DIVR, 0,
3, CLK_DIVIDER_READ_ONLY, apb_div_table),
DIV_TABLE(NO_ID, "pclk2", "ck_mcu", CLK_IGNORE_UNUSED, RCC_APB2DIVR, 0,
3, CLK_DIVIDER_READ_ONLY, apb_div_table),
DIV_TABLE(NO_ID, "pclk3", "ck_mcu", CLK_IGNORE_UNUSED, RCC_APB3DIVR, 0,
3, CLK_DIVIDER_READ_ONLY, apb_div_table),
DIV_TABLE(NO_ID, "pclk4", "ck_axi", CLK_IGNORE_UNUSED, RCC_APB4DIVR, 0,
3, CLK_DIVIDER_READ_ONLY, apb_div_table),
DIV_TABLE(NO_ID, "pclk5", "ck_axi", CLK_IGNORE_UNUSED, RCC_APB5DIVR, 0,
3, CLK_DIVIDER_READ_ONLY, apb_div_table),
/* Kernel Timers */
STM32_CKTIM("ck1_tim", "pclk1", 0, RCC_APB1DIVR, RCC_TIMG1PRER),
STM32_CKTIM("ck2_tim", "pclk2", 0, RCC_APB2DIVR, RCC_TIMG2PRER),
STM32_TIM(TIM2_K, "tim2_k", "ck1_tim", RCC_APB1ENSETR, 0),
STM32_TIM(TIM3_K, "tim3_k", "ck1_tim", RCC_APB1ENSETR, 1),
STM32_TIM(TIM4_K, "tim4_k", "ck1_tim", RCC_APB1ENSETR, 2),
STM32_TIM(TIM5_K, "tim5_k", "ck1_tim", RCC_APB1ENSETR, 3),
STM32_TIM(TIM6_K, "tim6_k", "ck1_tim", RCC_APB1ENSETR, 4),
STM32_TIM(TIM7_K, "tim7_k", "ck1_tim", RCC_APB1ENSETR, 5),
STM32_TIM(TIM12_K, "tim12_k", "ck1_tim", RCC_APB1ENSETR, 6),
STM32_TIM(TIM13_K, "tim13_k", "ck1_tim", RCC_APB1ENSETR, 7),
STM32_TIM(TIM14_K, "tim14_k", "ck1_tim", RCC_APB1ENSETR, 8),
STM32_TIM(TIM1_K, "tim1_k", "ck2_tim", RCC_APB2ENSETR, 0),
STM32_TIM(TIM8_K, "tim8_k", "ck2_tim", RCC_APB2ENSETR, 1),
STM32_TIM(TIM15_K, "tim15_k", "ck2_tim", RCC_APB2ENSETR, 2),
STM32_TIM(TIM16_K, "tim16_k", "ck2_tim", RCC_APB2ENSETR, 3),
STM32_TIM(TIM17_K, "tim17_k", "ck2_tim", RCC_APB2ENSETR, 4),
/* Peripheral clocks */
PCLK(TIM2, "tim2", "pclk1", CLK_IGNORE_UNUSED, G_TIM2),
PCLK(TIM3, "tim3", "pclk1", CLK_IGNORE_UNUSED, G_TIM3),
PCLK(TIM4, "tim4", "pclk1", CLK_IGNORE_UNUSED, G_TIM4),
PCLK(TIM5, "tim5", "pclk1", CLK_IGNORE_UNUSED, G_TIM5),
PCLK(TIM6, "tim6", "pclk1", CLK_IGNORE_UNUSED, G_TIM6),
PCLK(TIM7, "tim7", "pclk1", CLK_IGNORE_UNUSED, G_TIM7),
PCLK(TIM12, "tim12", "pclk1", CLK_IGNORE_UNUSED, G_TIM12),
PCLK(TIM13, "tim13", "pclk1", CLK_IGNORE_UNUSED, G_TIM13),
PCLK(TIM14, "tim14", "pclk1", CLK_IGNORE_UNUSED, G_TIM14),
PCLK(LPTIM1, "lptim1", "pclk1", 0, G_LPTIM1),
PCLK(SPI2, "spi2", "pclk1", 0, G_SPI2),
PCLK(SPI3, "spi3", "pclk1", 0, G_SPI3),
PCLK(USART2, "usart2", "pclk1", 0, G_USART2),
PCLK(USART3, "usart3", "pclk1", 0, G_USART3),
PCLK(UART4, "uart4", "pclk1", 0, G_UART4),
PCLK(UART5, "uart5", "pclk1", 0, G_UART5),
PCLK(UART7, "uart7", "pclk1", 0, G_UART7),
PCLK(UART8, "uart8", "pclk1", 0, G_UART8),
PCLK(I2C1, "i2c1", "pclk1", 0, G_I2C1),
PCLK(I2C2, "i2c2", "pclk1", 0, G_I2C2),
PCLK(I2C3, "i2c3", "pclk1", 0, G_I2C3),
PCLK(I2C5, "i2c5", "pclk1", 0, G_I2C5),
PCLK(SPDIF, "spdif", "pclk1", 0, G_SPDIF),
PCLK(CEC, "cec", "pclk1", 0, G_CEC),
PCLK(DAC12, "dac12", "pclk1", 0, G_DAC12),
PCLK(MDIO, "mdio", "pclk1", 0, G_MDIO),
PCLK(TIM1, "tim1", "pclk2", CLK_IGNORE_UNUSED, G_TIM1),
PCLK(TIM8, "tim8", "pclk2", CLK_IGNORE_UNUSED, G_TIM8),
PCLK(TIM15, "tim15", "pclk2", CLK_IGNORE_UNUSED, G_TIM15),
PCLK(TIM16, "tim16", "pclk2", CLK_IGNORE_UNUSED, G_TIM16),
PCLK(TIM17, "tim17", "pclk2", CLK_IGNORE_UNUSED, G_TIM17),
PCLK(SPI1, "spi1", "pclk2", 0, G_SPI1),
PCLK(SPI4, "spi4", "pclk2", 0, G_SPI4),
PCLK(SPI5, "spi5", "pclk2", 0, G_SPI5),
PCLK(USART6, "usart6", "pclk2", 0, G_USART6),
PCLK(SAI1, "sai1", "pclk2", 0, G_SAI1),
PCLK(SAI2, "sai2", "pclk2", 0, G_SAI2),
PCLK(SAI3, "sai3", "pclk2", 0, G_SAI3),
PCLK(DFSDM, "dfsdm", "pclk2", 0, G_DFSDM),
PCLK(FDCAN, "fdcan", "pclk2", 0, G_FDCAN),
PCLK(LPTIM2, "lptim2", "pclk3", 0, G_LPTIM2),
PCLK(LPTIM3, "lptim3", "pclk3", 0, G_LPTIM3),
PCLK(LPTIM4, "lptim4", "pclk3", 0, G_LPTIM4),
PCLK(LPTIM5, "lptim5", "pclk3", 0, G_LPTIM5),
PCLK(SAI4, "sai4", "pclk3", 0, G_SAI4),
PCLK(SYSCFG, "syscfg", "pclk3", 0, G_SYSCFG),
PCLK(VREF, "vref", "pclk3", 13, G_VREF),
PCLK(TMPSENS, "tmpsens", "pclk3", 0, G_TMPSENS),
PCLK(PMBCTRL, "pmbctrl", "pclk3", 0, G_PMBCTRL),
PCLK(HDP, "hdp", "pclk3", 0, G_HDP),
PCLK(LTDC, "ltdc", "pclk4", 0, G_LTDC),
PCLK(DSI, "dsi", "pclk4", 0, G_DSI),
PCLK(IWDG2, "iwdg2", "pclk4", 0, G_IWDG2),
PCLK(USBPHY, "usbphy", "pclk4", 0, G_USBPHY),
PCLK(STGENRO, "stgenro", "pclk4", 0, G_STGENRO),
PCLK(SPI6, "spi6", "pclk5", 0, G_SPI6),
PCLK(I2C4, "i2c4", "pclk5", 0, G_I2C4),
PCLK(I2C6, "i2c6", "pclk5", 0, G_I2C6),
PCLK(USART1, "usart1", "pclk5", 0, G_USART1),
PCLK(RTCAPB, "rtcapb", "pclk5", CLK_IGNORE_UNUSED |
CLK_IS_CRITICAL, G_RTCAPB),
PCLK(TZC1, "tzc1", "ck_axi", CLK_IGNORE_UNUSED, G_TZC1),
PCLK(TZC2, "tzc2", "ck_axi", CLK_IGNORE_UNUSED, G_TZC2),
PCLK(TZPC, "tzpc", "pclk5", CLK_IGNORE_UNUSED, G_TZPC),
PCLK(IWDG1, "iwdg1", "pclk5", 0, G_IWDG1),
PCLK(BSEC, "bsec", "pclk5", CLK_IGNORE_UNUSED, G_BSEC),
PCLK(STGEN, "stgen", "pclk5", CLK_IGNORE_UNUSED, G_STGEN),
PCLK(DMA1, "dma1", "ck_mcu", 0, G_DMA1),
PCLK(DMA2, "dma2", "ck_mcu", 0, G_DMA2),
PCLK(DMAMUX, "dmamux", "ck_mcu", 0, G_DMAMUX),
PCLK(ADC12, "adc12", "ck_mcu", 0, G_ADC12),
PCLK(USBO, "usbo", "ck_mcu", 0, G_USBO),
PCLK(SDMMC3, "sdmmc3", "ck_mcu", 0, G_SDMMC3),
PCLK(DCMI, "dcmi", "ck_mcu", 0, G_DCMI),
PCLK(CRYP2, "cryp2", "ck_mcu", 0, G_CRYP2),
PCLK(HASH2, "hash2", "ck_mcu", 0, G_HASH2),
PCLK(RNG2, "rng2", "ck_mcu", 0, G_RNG2),
PCLK(CRC2, "crc2", "ck_mcu", 0, G_CRC2),
PCLK(HSEM, "hsem", "ck_mcu", 0, G_HSEM),
PCLK(IPCC, "ipcc", "ck_mcu", 0, G_IPCC),
PCLK(GPIOA, "gpioa", "ck_mcu", 0, G_GPIOA),
PCLK(GPIOB, "gpiob", "ck_mcu", 0, G_GPIOB),
PCLK(GPIOC, "gpioc", "ck_mcu", 0, G_GPIOC),
PCLK(GPIOD, "gpiod", "ck_mcu", 0, G_GPIOD),
PCLK(GPIOE, "gpioe", "ck_mcu", 0, G_GPIOE),
PCLK(GPIOF, "gpiof", "ck_mcu", 0, G_GPIOF),
PCLK(GPIOG, "gpiog", "ck_mcu", 0, G_GPIOG),
PCLK(GPIOH, "gpioh", "ck_mcu", 0, G_GPIOH),
PCLK(GPIOI, "gpioi", "ck_mcu", 0, G_GPIOI),
PCLK(GPIOJ, "gpioj", "ck_mcu", 0, G_GPIOJ),
PCLK(GPIOK, "gpiok", "ck_mcu", 0, G_GPIOK),
PCLK(GPIOZ, "gpioz", "ck_axi", CLK_IGNORE_UNUSED, G_GPIOZ),
PCLK(CRYP1, "cryp1", "ck_axi", CLK_IGNORE_UNUSED, G_CRYP1),
PCLK(HASH1, "hash1", "ck_axi", CLK_IGNORE_UNUSED, G_HASH1),
PCLK(RNG1, "rng1", "ck_axi", 0, G_RNG1),
PCLK(BKPSRAM, "bkpsram", "ck_axi", CLK_IGNORE_UNUSED, G_BKPSRAM),
PCLK(MDMA, "mdma", "ck_axi", 0, G_MDMA),
PCLK(GPU, "gpu", "ck_axi", 0, G_GPU),
PCLK(ETHTX, "ethtx", "ck_axi", 0, G_ETHTX),
PCLK_PDATA(ETHRX, "ethrx", ethrx_src, 0, G_ETHRX),
PCLK(ETHMAC, "ethmac", "ck_axi", 0, G_ETHMAC),
PCLK(FMC, "fmc", "ck_axi", CLK_IGNORE_UNUSED, G_FMC),
PCLK(QSPI, "qspi", "ck_axi", CLK_IGNORE_UNUSED, G_QSPI),
PCLK(SDMMC1, "sdmmc1", "ck_axi", 0, G_SDMMC1),
PCLK(SDMMC2, "sdmmc2", "ck_axi", 0, G_SDMMC2),
PCLK(CRC1, "crc1", "ck_axi", 0, G_CRC1),
PCLK(USBH, "usbh", "ck_axi", 0, G_USBH),
PCLK(ETHSTP, "ethstp", "ck_axi", 0, G_ETHSTP),
PCLK(DDRPERFM, "ddrperfm", "pclk4", 0, G_DDRPERFM),
/* Kernel clocks */
KCLK(SDMMC1_K, "sdmmc1_k", sdmmc12_src, 0, G_SDMMC1, M_SDMMC12),
KCLK(SDMMC2_K, "sdmmc2_k", sdmmc12_src, 0, G_SDMMC2, M_SDMMC12),
KCLK(SDMMC3_K, "sdmmc3_k", sdmmc3_src, 0, G_SDMMC3, M_SDMMC3),
KCLK(FMC_K, "fmc_k", fmc_src, 0, G_FMC, M_FMC),
KCLK(QSPI_K, "qspi_k", qspi_src, 0, G_QSPI, M_QSPI),
KCLK(RNG1_K, "rng1_k", rng_src, 0, G_RNG1, M_RNG1),
KCLK(RNG2_K, "rng2_k", rng_src, 0, G_RNG2, M_RNG2),
KCLK(USBPHY_K, "usbphy_k", usbphy_src, 0, G_USBPHY, M_USBPHY),
KCLK(STGEN_K, "stgen_k", stgen_src, CLK_IS_CRITICAL, G_STGEN, M_STGEN),
KCLK(SPDIF_K, "spdif_k", spdif_src, 0, G_SPDIF, M_SPDIF),
KCLK(SPI1_K, "spi1_k", spi123_src, 0, G_SPI1, M_SPI1),
KCLK(SPI2_K, "spi2_k", spi123_src, 0, G_SPI2, M_SPI23),
KCLK(SPI3_K, "spi3_k", spi123_src, 0, G_SPI3, M_SPI23),
KCLK(SPI4_K, "spi4_k", spi45_src, 0, G_SPI4, M_SPI45),
KCLK(SPI5_K, "spi5_k", spi45_src, 0, G_SPI5, M_SPI45),
KCLK(SPI6_K, "spi6_k", spi6_src, 0, G_SPI6, M_SPI6),
KCLK(CEC_K, "cec_k", cec_src, 0, G_CEC, M_CEC),
KCLK(I2C1_K, "i2c1_k", i2c12_src, 0, G_I2C1, M_I2C12),
KCLK(I2C2_K, "i2c2_k", i2c12_src, 0, G_I2C2, M_I2C12),
KCLK(I2C3_K, "i2c3_k", i2c35_src, 0, G_I2C3, M_I2C35),
KCLK(I2C5_K, "i2c5_k", i2c35_src, 0, G_I2C5, M_I2C35),
KCLK(I2C4_K, "i2c4_k", i2c46_src, 0, G_I2C4, M_I2C46),
KCLK(I2C6_K, "i2c6_k", i2c46_src, 0, G_I2C6, M_I2C46),
KCLK(LPTIM1_K, "lptim1_k", lptim1_src, 0, G_LPTIM1, M_LPTIM1),
KCLK(LPTIM2_K, "lptim2_k", lptim23_src, 0, G_LPTIM2, M_LPTIM23),
KCLK(LPTIM3_K, "lptim3_k", lptim23_src, 0, G_LPTIM3, M_LPTIM23),
KCLK(LPTIM4_K, "lptim4_k", lptim45_src, 0, G_LPTIM4, M_LPTIM45),
KCLK(LPTIM5_K, "lptim5_k", lptim45_src, 0, G_LPTIM5, M_LPTIM45),
KCLK(USART1_K, "usart1_k", usart1_src, 0, G_USART1, M_USART1),
KCLK(USART2_K, "usart2_k", usart234578_src, 0, G_USART2, M_UART24),
KCLK(USART3_K, "usart3_k", usart234578_src, 0, G_USART3, M_UART35),
KCLK(UART4_K, "uart4_k", usart234578_src, 0, G_UART4, M_UART24),
KCLK(UART5_K, "uart5_k", usart234578_src, 0, G_UART5, M_UART35),
KCLK(USART6_K, "uart6_k", usart6_src, 0, G_USART6, M_USART6),
KCLK(UART7_K, "uart7_k", usart234578_src, 0, G_UART7, M_UART78),
KCLK(UART8_K, "uart8_k", usart234578_src, 0, G_UART8, M_UART78),
KCLK(FDCAN_K, "fdcan_k", fdcan_src, 0, G_FDCAN, M_FDCAN),
KCLK(SAI1_K, "sai1_k", sai_src, 0, G_SAI1, M_SAI1),
KCLK(SAI2_K, "sai2_k", sai2_src, 0, G_SAI2, M_SAI2),
KCLK(SAI3_K, "sai3_k", sai_src, 0, G_SAI3, M_SAI3),
KCLK(SAI4_K, "sai4_k", sai_src, 0, G_SAI4, M_SAI4),
KCLK(ADC12_K, "adc12_k", adc12_src, 0, G_ADC12, M_ADC12),
KCLK(DSI_K, "dsi_k", dsi_src, 0, G_DSI, M_DSI),
KCLK(ADFSDM_K, "adfsdm_k", sai_src, 0, G_ADFSDM, M_SAI1),
KCLK(USBO_K, "usbo_k", usbo_src, 0, G_USBO, M_USBO),
/* Particulary Kernel Clocks (no mux or no gate) */
MGATE_MP1(DFSDM_K, "dfsdm_k", "ck_mcu", 0, G_DFSDM),
MGATE_MP1(DSI_PX, "dsi_px", "pll4_q", CLK_SET_RATE_PARENT, G_DSI),
MGATE_MP1(LTDC_PX, "ltdc_px", "pll4_q", CLK_SET_RATE_PARENT, G_LTDC),
MGATE_MP1(GPU_K, "gpu_k", "pll2_q", 0, G_GPU),
MGATE_MP1(DAC12_K, "dac12_k", "ck_lsi", 0, G_DAC12),
COMPOSITE(NO_ID, "ck_ker_eth", eth_src, CLK_OPS_PARENT_ENABLE |
CLK_SET_RATE_NO_REPARENT,
_NO_GATE,
_MMUX(M_ETHCK),
_NO_DIV),
MGATE_MP1(ETHCK_K, "ethck_k", "ck_ker_eth", 0, G_ETHCK),
DIV(ETHPTP_K, "ethptp_k", "ck_ker_eth", CLK_OPS_PARENT_ENABLE |
CLK_SET_RATE_NO_REPARENT, RCC_ETHCKSELR, 4, 4, 0),
/* RTC clock */
COMPOSITE(RTC, "ck_rtc", rtc_src, CLK_OPS_PARENT_ENABLE,
_GATE(RCC_BDCR, 20, 0),
_MUX(RCC_BDCR, 16, 2, 0),
_DIV_RTC(RCC_RTCDIVR, 0, 6, 0, NULL)),
/* MCO clocks */
COMPOSITE(CK_MCO1, "ck_mco1", mco1_src, CLK_OPS_PARENT_ENABLE |
CLK_SET_RATE_NO_REPARENT,
_GATE(RCC_MCO1CFGR, 12, 0),
_MUX(RCC_MCO1CFGR, 0, 3, 0),
_DIV(RCC_MCO1CFGR, 4, 4, 0, NULL)),
COMPOSITE(CK_MCO2, "ck_mco2", mco2_src, CLK_OPS_PARENT_ENABLE |
CLK_SET_RATE_NO_REPARENT,
_GATE(RCC_MCO2CFGR, 12, 0),
_MUX(RCC_MCO2CFGR, 0, 3, 0),
_DIV(RCC_MCO2CFGR, 4, 4, 0, NULL)),
/* Debug clocks */
GATE(CK_DBG, "ck_sys_dbg", "ck_axi", CLK_IGNORE_UNUSED,
RCC_DBGCFGR, 8, 0),
COMPOSITE(CK_TRACE, "ck_trace", ck_trace_src, CLK_OPS_PARENT_ENABLE,
_GATE(RCC_DBGCFGR, 9, 0),
_NO_MUX,
_DIV(RCC_DBGCFGR, 0, 3, 0, ck_trace_div_table)),
};
static const u32 stm32mp1_clock_secured[] = {
CK_HSE,
CK_HSI,
CK_CSI,
CK_LSI,
CK_LSE,
PLL1,
PLL2,
PLL1_P,
PLL2_P,
PLL2_Q,
PLL2_R,
CK_MPU,
CK_AXI,
SPI6,
I2C4,
I2C6,
USART1,
RTCAPB,
TZC1,
TZC2,
TZPC,
IWDG1,
BSEC,
STGEN,
GPIOZ,
CRYP1,
HASH1,
RNG1,
BKPSRAM,
RNG1_K,
STGEN_K,
SPI6_K,
I2C4_K,
I2C6_K,
USART1_K,
RTC,
};
static bool stm32_check_security(const struct clock_config *cfg)
{
int i;
for (i = 0; i < ARRAY_SIZE(stm32mp1_clock_secured); i++)
if (cfg->id == stm32mp1_clock_secured[i])
return true;
return false;
}
struct stm32_rcc_match_data {
const struct clock_config *cfg;
unsigned int num;
unsigned int maxbinding;
u32 clear_offset;
bool (*check_security)(const struct clock_config *cfg);
};
static struct stm32_rcc_match_data stm32mp1_data = {
.cfg = stm32mp1_clock_cfg,
.num = ARRAY_SIZE(stm32mp1_clock_cfg),
.maxbinding = STM32MP1_LAST_CLK,
.clear_offset = RCC_CLR,
};
static struct stm32_rcc_match_data stm32mp1_data_secure = {
.cfg = stm32mp1_clock_cfg,
.num = ARRAY_SIZE(stm32mp1_clock_cfg),
.maxbinding = STM32MP1_LAST_CLK,
.clear_offset = RCC_CLR,
.check_security = &stm32_check_security
};
static const struct of_device_id stm32mp1_match_data[] = {
{
.compatible = "st,stm32mp1-rcc",
.data = &stm32mp1_data,
},
{
.compatible = "st,stm32mp1-rcc-secure",
.data = &stm32mp1_data_secure,
},
{ }
};
MODULE_DEVICE_TABLE(of, stm32mp1_match_data);
static int stm32_register_hw_clk(struct device *dev,
struct clk_hw_onecell_data *clk_data,
void __iomem *base, spinlock_t *lock,
const struct clock_config *cfg)
{
struct clk_hw **hws;
struct clk_hw *hw = ERR_PTR(-ENOENT);
hws = clk_data->hws;
if (cfg->func)
hw = (*cfg->func)(dev, clk_data, base, lock, cfg);
if (IS_ERR(hw)) {
pr_err("Unable to register %s\n", cfg->name);
return PTR_ERR(hw);
}
if (cfg->id != NO_ID)
hws[cfg->id] = hw;
return 0;
}
#define STM32_RESET_ID_MASK GENMASK(15, 0)
struct stm32_reset_data {
/* reset lock */
spinlock_t lock;
struct reset_controller_dev rcdev;
void __iomem *membase;
u32 clear_offset;
};
static inline struct stm32_reset_data *
to_stm32_reset_data(struct reset_controller_dev *rcdev)
{
return container_of(rcdev, struct stm32_reset_data, rcdev);
}
static int stm32_reset_update(struct reset_controller_dev *rcdev,
unsigned long id, bool assert)
{
struct stm32_reset_data *data = to_stm32_reset_data(rcdev);
int reg_width = sizeof(u32);
int bank = id / (reg_width * BITS_PER_BYTE);
int offset = id % (reg_width * BITS_PER_BYTE);
if (data->clear_offset) {
void __iomem *addr;
addr = data->membase + (bank * reg_width);
if (!assert)
addr += data->clear_offset;
writel(BIT(offset), addr);
} else {
unsigned long flags;
u32 reg;
spin_lock_irqsave(&data->lock, flags);
reg = readl(data->membase + (bank * reg_width));
if (assert)
reg |= BIT(offset);
else
reg &= ~BIT(offset);
writel(reg, data->membase + (bank * reg_width));
spin_unlock_irqrestore(&data->lock, flags);
}
return 0;
}
static int stm32_reset_assert(struct reset_controller_dev *rcdev,
unsigned long id)
{
return stm32_reset_update(rcdev, id, true);
}
static int stm32_reset_deassert(struct reset_controller_dev *rcdev,
unsigned long id)
{
return stm32_reset_update(rcdev, id, false);
}
static int stm32_reset_status(struct reset_controller_dev *rcdev,
unsigned long id)
{
struct stm32_reset_data *data = to_stm32_reset_data(rcdev);
int reg_width = sizeof(u32);
int bank = id / (reg_width * BITS_PER_BYTE);
int offset = id % (reg_width * BITS_PER_BYTE);
u32 reg;
reg = readl(data->membase + (bank * reg_width));
return !!(reg & BIT(offset));
}
static const struct reset_control_ops stm32_reset_ops = {
.assert = stm32_reset_assert,
.deassert = stm32_reset_deassert,
.status = stm32_reset_status,
};
static int stm32_rcc_reset_init(struct device *dev, void __iomem *base,
const struct of_device_id *match)
{
const struct stm32_rcc_match_data *data = match->data;
struct stm32_reset_data *reset_data = NULL;
reset_data = kzalloc(sizeof(*reset_data), GFP_KERNEL);
if (!reset_data)
return -ENOMEM;
spin_lock_init(&reset_data->lock);
reset_data->membase = base;
reset_data->rcdev.owner = THIS_MODULE;
reset_data->rcdev.ops = &stm32_reset_ops;
reset_data->rcdev.of_node = dev_of_node(dev);
reset_data->rcdev.nr_resets = STM32_RESET_ID_MASK;
reset_data->clear_offset = data->clear_offset;
return reset_controller_register(&reset_data->rcdev);
}
static int stm32_rcc_clock_init(struct device *dev, void __iomem *base,
const struct of_device_id *match)
{
const struct stm32_rcc_match_data *data = match->data;
struct clk_hw_onecell_data *clk_data;
struct clk_hw **hws;
int err, n, max_binding;
max_binding = data->maxbinding;
clk_data = devm_kzalloc(dev, struct_size(clk_data, hws, max_binding),
GFP_KERNEL);
if (!clk_data)
return -ENOMEM;
clk_data->num = max_binding;
hws = clk_data->hws;
for (n = 0; n < max_binding; n++)
hws[n] = ERR_PTR(-ENOENT);
for (n = 0; n < data->num; n++) {
if (data->check_security && data->check_security(&data->cfg[n]))
continue;
err = stm32_register_hw_clk(dev, clk_data, base, &rlock,
&data->cfg[n]);
if (err) {
dev_err(dev, "Can't register clk %s: %d\n",
data->cfg[n].name, err);
return err;
}
}
return of_clk_add_hw_provider(dev_of_node(dev), of_clk_hw_onecell_get, clk_data);
}
static int stm32_rcc_init(struct device *dev, void __iomem *base,
const struct of_device_id *match_data)
{
const struct of_device_id *match;
int err;
match = of_match_node(match_data, dev_of_node(dev));
if (!match) {
dev_err(dev, "match data not found\n");
return -ENODEV;
}
/* RCC Reset Configuration */
err = stm32_rcc_reset_init(dev, base, match);
if (err) {
pr_err("stm32mp1 reset failed to initialize\n");
return err;
}
/* RCC Clock Configuration */
err = stm32_rcc_clock_init(dev, base, match);
if (err) {
pr_err("stm32mp1 clock failed to initialize\n");
return err;
}
return 0;
}
static int stm32mp1_rcc_init(struct device *dev)
{
void __iomem *base;
int ret;
base = of_iomap(dev_of_node(dev), 0);
if (!base) {
pr_err("%pOFn: unable to map resource", dev_of_node(dev));
ret = -ENOMEM;
goto out;
}
ret = stm32_rcc_init(dev, base, stm32mp1_match_data);
out:
if (ret) {
if (base)
iounmap(base);
of_node_put(dev_of_node(dev));
}
return ret;
}
static int get_clock_deps(struct device *dev)
{
static const char * const clock_deps_name[] = {
"hsi", "hse", "csi", "lsi", "lse",
};
size_t deps_size = sizeof(struct clk *) * ARRAY_SIZE(clock_deps_name);
struct clk **clk_deps;
int i;
clk_deps = devm_kzalloc(dev, deps_size, GFP_KERNEL);
if (!clk_deps)
return -ENOMEM;
for (i = 0; i < ARRAY_SIZE(clock_deps_name); i++) {
struct clk *clk = of_clk_get_by_name(dev_of_node(dev),
clock_deps_name[i]);
if (IS_ERR(clk)) {
if (PTR_ERR(clk) != -EINVAL && PTR_ERR(clk) != -ENOENT)
return PTR_ERR(clk);
} else {
/* Device gets a reference count on the clock */
clk_deps[i] = devm_clk_get(dev, __clk_get_name(clk));
clk_put(clk);
}
}
return 0;
}
static int stm32mp1_rcc_clocks_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
int ret = get_clock_deps(dev);
if (!ret)
ret = stm32mp1_rcc_init(dev);
return ret;
}
static void stm32mp1_rcc_clocks_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *child, *np = dev_of_node(dev);
for_each_available_child_of_node(np, child)
of_clk_del_provider(child);
}
static struct platform_driver stm32mp1_rcc_clocks_driver = {
.driver = {
.name = "stm32mp1_rcc",
.of_match_table = stm32mp1_match_data,
},
.probe = stm32mp1_rcc_clocks_probe,
.remove_new = stm32mp1_rcc_clocks_remove,
};
static int __init stm32mp1_clocks_init(void)
{
return platform_driver_register(&stm32mp1_rcc_clocks_driver);
}
core_initcall(stm32mp1_clocks_init);
| linux-master | drivers/clk/clk-stm32mp1.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Clkout driver for Rockchip RK808
*
* Copyright (c) 2014, Fuzhou Rockchip Electronics Co., Ltd
*
* Author:Chris Zhong <[email protected]>
*/
#include <linux/clk-provider.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/mfd/rk808.h>
struct rk808_clkout {
struct regmap *regmap;
struct clk_hw clkout1_hw;
struct clk_hw clkout2_hw;
};
static unsigned long rk808_clkout_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
return 32768;
}
static int rk808_clkout2_enable(struct clk_hw *hw, bool enable)
{
struct rk808_clkout *rk808_clkout = container_of(hw,
struct rk808_clkout,
clkout2_hw);
return regmap_update_bits(rk808_clkout->regmap, RK808_CLK32OUT_REG,
CLK32KOUT2_EN, enable ? CLK32KOUT2_EN : 0);
}
static int rk808_clkout2_prepare(struct clk_hw *hw)
{
return rk808_clkout2_enable(hw, true);
}
static void rk808_clkout2_unprepare(struct clk_hw *hw)
{
rk808_clkout2_enable(hw, false);
}
static int rk808_clkout2_is_prepared(struct clk_hw *hw)
{
struct rk808_clkout *rk808_clkout = container_of(hw,
struct rk808_clkout,
clkout2_hw);
uint32_t val;
int ret = regmap_read(rk808_clkout->regmap, RK808_CLK32OUT_REG, &val);
if (ret < 0)
return ret;
return (val & CLK32KOUT2_EN) ? 1 : 0;
}
static const struct clk_ops rk808_clkout1_ops = {
.recalc_rate = rk808_clkout_recalc_rate,
};
static const struct clk_ops rk808_clkout2_ops = {
.prepare = rk808_clkout2_prepare,
.unprepare = rk808_clkout2_unprepare,
.is_prepared = rk808_clkout2_is_prepared,
.recalc_rate = rk808_clkout_recalc_rate,
};
static struct clk_hw *
of_clk_rk808_get(struct of_phandle_args *clkspec, void *data)
{
struct rk808_clkout *rk808_clkout = data;
unsigned int idx = clkspec->args[0];
if (idx >= 2) {
pr_err("%s: invalid index %u\n", __func__, idx);
return ERR_PTR(-EINVAL);
}
return idx ? &rk808_clkout->clkout2_hw : &rk808_clkout->clkout1_hw;
}
static int rk817_clkout2_enable(struct clk_hw *hw, bool enable)
{
struct rk808_clkout *rk808_clkout = container_of(hw,
struct rk808_clkout,
clkout2_hw);
return regmap_update_bits(rk808_clkout->regmap, RK817_SYS_CFG(1),
RK817_CLK32KOUT2_EN,
enable ? RK817_CLK32KOUT2_EN : 0);
}
static int rk817_clkout2_prepare(struct clk_hw *hw)
{
return rk817_clkout2_enable(hw, true);
}
static void rk817_clkout2_unprepare(struct clk_hw *hw)
{
rk817_clkout2_enable(hw, false);
}
static int rk817_clkout2_is_prepared(struct clk_hw *hw)
{
struct rk808_clkout *rk808_clkout = container_of(hw,
struct rk808_clkout,
clkout2_hw);
unsigned int val;
int ret = regmap_read(rk808_clkout->regmap, RK817_SYS_CFG(1), &val);
if (ret < 0)
return 0;
return (val & RK817_CLK32KOUT2_EN) ? 1 : 0;
}
static const struct clk_ops rk817_clkout2_ops = {
.prepare = rk817_clkout2_prepare,
.unprepare = rk817_clkout2_unprepare,
.is_prepared = rk817_clkout2_is_prepared,
.recalc_rate = rk808_clkout_recalc_rate,
};
static const struct clk_ops *rkpmic_get_ops(long variant)
{
switch (variant) {
case RK809_ID:
case RK817_ID:
return &rk817_clkout2_ops;
/*
* For the default case, it match the following PMIC type.
* RK805_ID
* RK808_ID
* RK818_ID
*/
default:
return &rk808_clkout2_ops;
}
}
static int rk808_clkout_probe(struct platform_device *pdev)
{
struct rk808 *rk808 = dev_get_drvdata(pdev->dev.parent);
struct device *dev = &pdev->dev;
struct clk_init_data init = {};
struct rk808_clkout *rk808_clkout;
int ret;
dev->of_node = pdev->dev.parent->of_node;
rk808_clkout = devm_kzalloc(dev,
sizeof(*rk808_clkout), GFP_KERNEL);
if (!rk808_clkout)
return -ENOMEM;
rk808_clkout->regmap = dev_get_regmap(pdev->dev.parent, NULL);
if (!rk808_clkout->regmap)
return -ENODEV;
init.parent_names = NULL;
init.num_parents = 0;
init.name = "rk808-clkout1";
init.ops = &rk808_clkout1_ops;
rk808_clkout->clkout1_hw.init = &init;
/* optional override of the clockname */
of_property_read_string_index(dev->of_node, "clock-output-names",
0, &init.name);
ret = devm_clk_hw_register(dev, &rk808_clkout->clkout1_hw);
if (ret)
return ret;
init.name = "rk808-clkout2";
init.ops = rkpmic_get_ops(rk808->variant);
rk808_clkout->clkout2_hw.init = &init;
/* optional override of the clockname */
of_property_read_string_index(dev->of_node, "clock-output-names",
1, &init.name);
ret = devm_clk_hw_register(dev, &rk808_clkout->clkout2_hw);
if (ret)
return ret;
return devm_of_clk_add_hw_provider(&pdev->dev, of_clk_rk808_get,
rk808_clkout);
}
static struct platform_driver rk808_clkout_driver = {
.probe = rk808_clkout_probe,
.driver = {
.name = "rk808-clkout",
},
};
module_platform_driver(rk808_clkout_driver);
MODULE_DESCRIPTION("Clkout driver for the rk808 series PMICs");
MODULE_AUTHOR("Chris Zhong <[email protected]>");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:rk808-clkout");
| linux-master | drivers/clk/clk-rk808.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Clock driver for Palmas device.
*
* Copyright (c) 2013, NVIDIA Corporation.
* Copyright (c) 2013-2014 Texas Instruments, Inc.
*
* Author: Laxman Dewangan <[email protected]>
* Peter Ujfalusi <[email protected]>
*/
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/mfd/palmas.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#define PALMAS_CLOCK_DT_EXT_CONTROL_ENABLE1 1
#define PALMAS_CLOCK_DT_EXT_CONTROL_ENABLE2 2
#define PALMAS_CLOCK_DT_EXT_CONTROL_NSLEEP 3
struct palmas_clk32k_desc {
const char *clk_name;
unsigned int control_reg;
unsigned int enable_mask;
unsigned int sleep_mask;
unsigned int sleep_reqstr_id;
int delay;
};
struct palmas_clock_info {
struct device *dev;
struct clk_hw hw;
struct palmas *palmas;
const struct palmas_clk32k_desc *clk_desc;
int ext_control_pin;
};
static inline struct palmas_clock_info *to_palmas_clks_info(struct clk_hw *hw)
{
return container_of(hw, struct palmas_clock_info, hw);
}
static unsigned long palmas_clks_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
return 32768;
}
static int palmas_clks_prepare(struct clk_hw *hw)
{
struct palmas_clock_info *cinfo = to_palmas_clks_info(hw);
int ret;
ret = palmas_update_bits(cinfo->palmas, PALMAS_RESOURCE_BASE,
cinfo->clk_desc->control_reg,
cinfo->clk_desc->enable_mask,
cinfo->clk_desc->enable_mask);
if (ret < 0)
dev_err(cinfo->dev, "Reg 0x%02x update failed, %d\n",
cinfo->clk_desc->control_reg, ret);
else if (cinfo->clk_desc->delay)
udelay(cinfo->clk_desc->delay);
return ret;
}
static void palmas_clks_unprepare(struct clk_hw *hw)
{
struct palmas_clock_info *cinfo = to_palmas_clks_info(hw);
int ret;
/*
* Clock can be disabled through external pin if it is externally
* controlled.
*/
if (cinfo->ext_control_pin)
return;
ret = palmas_update_bits(cinfo->palmas, PALMAS_RESOURCE_BASE,
cinfo->clk_desc->control_reg,
cinfo->clk_desc->enable_mask, 0);
if (ret < 0)
dev_err(cinfo->dev, "Reg 0x%02x update failed, %d\n",
cinfo->clk_desc->control_reg, ret);
}
static int palmas_clks_is_prepared(struct clk_hw *hw)
{
struct palmas_clock_info *cinfo = to_palmas_clks_info(hw);
int ret;
u32 val;
if (cinfo->ext_control_pin)
return 1;
ret = palmas_read(cinfo->palmas, PALMAS_RESOURCE_BASE,
cinfo->clk_desc->control_reg, &val);
if (ret < 0) {
dev_err(cinfo->dev, "Reg 0x%02x read failed, %d\n",
cinfo->clk_desc->control_reg, ret);
return ret;
}
return !!(val & cinfo->clk_desc->enable_mask);
}
static const struct clk_ops palmas_clks_ops = {
.prepare = palmas_clks_prepare,
.unprepare = palmas_clks_unprepare,
.is_prepared = palmas_clks_is_prepared,
.recalc_rate = palmas_clks_recalc_rate,
};
struct palmas_clks_of_match_data {
struct clk_init_data init;
const struct palmas_clk32k_desc desc;
};
static const struct palmas_clks_of_match_data palmas_of_clk32kg = {
.init = {
.name = "clk32kg",
.ops = &palmas_clks_ops,
.flags = CLK_IGNORE_UNUSED,
},
.desc = {
.clk_name = "clk32kg",
.control_reg = PALMAS_CLK32KG_CTRL,
.enable_mask = PALMAS_CLK32KG_CTRL_MODE_ACTIVE,
.sleep_mask = PALMAS_CLK32KG_CTRL_MODE_SLEEP,
.sleep_reqstr_id = PALMAS_EXTERNAL_REQSTR_ID_CLK32KG,
.delay = 200,
},
};
static const struct palmas_clks_of_match_data palmas_of_clk32kgaudio = {
.init = {
.name = "clk32kgaudio",
.ops = &palmas_clks_ops,
.flags = CLK_IGNORE_UNUSED,
},
.desc = {
.clk_name = "clk32kgaudio",
.control_reg = PALMAS_CLK32KGAUDIO_CTRL,
.enable_mask = PALMAS_CLK32KG_CTRL_MODE_ACTIVE,
.sleep_mask = PALMAS_CLK32KG_CTRL_MODE_SLEEP,
.sleep_reqstr_id = PALMAS_EXTERNAL_REQSTR_ID_CLK32KGAUDIO,
.delay = 200,
},
};
static const struct of_device_id palmas_clks_of_match[] = {
{
.compatible = "ti,palmas-clk32kg",
.data = &palmas_of_clk32kg,
},
{
.compatible = "ti,palmas-clk32kgaudio",
.data = &palmas_of_clk32kgaudio,
},
{ },
};
MODULE_DEVICE_TABLE(of, palmas_clks_of_match);
static void palmas_clks_get_clk_data(struct platform_device *pdev,
struct palmas_clock_info *cinfo)
{
struct device_node *node = pdev->dev.of_node;
unsigned int prop;
int ret;
ret = of_property_read_u32(node, "ti,external-sleep-control",
&prop);
if (ret)
return;
switch (prop) {
case PALMAS_CLOCK_DT_EXT_CONTROL_ENABLE1:
prop = PALMAS_EXT_CONTROL_ENABLE1;
break;
case PALMAS_CLOCK_DT_EXT_CONTROL_ENABLE2:
prop = PALMAS_EXT_CONTROL_ENABLE2;
break;
case PALMAS_CLOCK_DT_EXT_CONTROL_NSLEEP:
prop = PALMAS_EXT_CONTROL_NSLEEP;
break;
default:
dev_warn(&pdev->dev, "%pOFn: Invalid ext control option: %u\n",
node, prop);
prop = 0;
break;
}
cinfo->ext_control_pin = prop;
}
static int palmas_clks_init_configure(struct palmas_clock_info *cinfo)
{
int ret;
ret = palmas_update_bits(cinfo->palmas, PALMAS_RESOURCE_BASE,
cinfo->clk_desc->control_reg,
cinfo->clk_desc->sleep_mask, 0);
if (ret < 0) {
dev_err(cinfo->dev, "Reg 0x%02x update failed, %d\n",
cinfo->clk_desc->control_reg, ret);
return ret;
}
if (cinfo->ext_control_pin) {
ret = clk_prepare(cinfo->hw.clk);
if (ret < 0) {
dev_err(cinfo->dev, "Clock prep failed, %d\n", ret);
return ret;
}
ret = palmas_ext_control_req_config(cinfo->palmas,
cinfo->clk_desc->sleep_reqstr_id,
cinfo->ext_control_pin, true);
if (ret < 0) {
dev_err(cinfo->dev, "Ext config for %s failed, %d\n",
cinfo->clk_desc->clk_name, ret);
clk_unprepare(cinfo->hw.clk);
return ret;
}
}
return ret;
}
static int palmas_clks_probe(struct platform_device *pdev)
{
struct palmas *palmas = dev_get_drvdata(pdev->dev.parent);
struct device_node *node = pdev->dev.of_node;
const struct palmas_clks_of_match_data *match_data;
struct palmas_clock_info *cinfo;
int ret;
match_data = of_device_get_match_data(&pdev->dev);
if (!match_data)
return 1;
cinfo = devm_kzalloc(&pdev->dev, sizeof(*cinfo), GFP_KERNEL);
if (!cinfo)
return -ENOMEM;
palmas_clks_get_clk_data(pdev, cinfo);
platform_set_drvdata(pdev, cinfo);
cinfo->dev = &pdev->dev;
cinfo->palmas = palmas;
cinfo->clk_desc = &match_data->desc;
cinfo->hw.init = &match_data->init;
ret = devm_clk_hw_register(&pdev->dev, &cinfo->hw);
if (ret) {
dev_err(&pdev->dev, "Fail to register clock %s, %d\n",
match_data->desc.clk_name, ret);
return ret;
}
ret = palmas_clks_init_configure(cinfo);
if (ret < 0) {
dev_err(&pdev->dev, "Clock config failed, %d\n", ret);
return ret;
}
ret = of_clk_add_hw_provider(node, of_clk_hw_simple_get, &cinfo->hw);
if (ret < 0)
dev_err(&pdev->dev, "Fail to add clock driver, %d\n", ret);
return ret;
}
static void palmas_clks_remove(struct platform_device *pdev)
{
of_clk_del_provider(pdev->dev.of_node);
}
static struct platform_driver palmas_clks_driver = {
.driver = {
.name = "palmas-clk",
.of_match_table = palmas_clks_of_match,
},
.probe = palmas_clks_probe,
.remove_new = palmas_clks_remove,
};
module_platform_driver(palmas_clks_driver);
MODULE_DESCRIPTION("Clock driver for Palmas Series Devices");
MODULE_ALIAS("platform:palmas-clk");
MODULE_AUTHOR("Peter Ujfalusi <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/clk/clk-palmas.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/clk.h>
#include <linux/device.h>
#include <linux/export.h>
#include <linux/gfp.h>
struct devm_clk_state {
struct clk *clk;
void (*exit)(struct clk *clk);
};
static void devm_clk_release(struct device *dev, void *res)
{
struct devm_clk_state *state = res;
if (state->exit)
state->exit(state->clk);
clk_put(state->clk);
}
static struct clk *__devm_clk_get(struct device *dev, const char *id,
struct clk *(*get)(struct device *dev, const char *id),
int (*init)(struct clk *clk),
void (*exit)(struct clk *clk))
{
struct devm_clk_state *state;
struct clk *clk;
int ret;
state = devres_alloc(devm_clk_release, sizeof(*state), GFP_KERNEL);
if (!state)
return ERR_PTR(-ENOMEM);
clk = get(dev, id);
if (IS_ERR(clk)) {
ret = PTR_ERR(clk);
goto err_clk_get;
}
if (init) {
ret = init(clk);
if (ret)
goto err_clk_init;
}
state->clk = clk;
state->exit = exit;
devres_add(dev, state);
return clk;
err_clk_init:
clk_put(clk);
err_clk_get:
devres_free(state);
return ERR_PTR(ret);
}
struct clk *devm_clk_get(struct device *dev, const char *id)
{
return __devm_clk_get(dev, id, clk_get, NULL, NULL);
}
EXPORT_SYMBOL(devm_clk_get);
struct clk *devm_clk_get_prepared(struct device *dev, const char *id)
{
return __devm_clk_get(dev, id, clk_get, clk_prepare, clk_unprepare);
}
EXPORT_SYMBOL_GPL(devm_clk_get_prepared);
struct clk *devm_clk_get_enabled(struct device *dev, const char *id)
{
return __devm_clk_get(dev, id, clk_get,
clk_prepare_enable, clk_disable_unprepare);
}
EXPORT_SYMBOL_GPL(devm_clk_get_enabled);
struct clk *devm_clk_get_optional(struct device *dev, const char *id)
{
return __devm_clk_get(dev, id, clk_get_optional, NULL, NULL);
}
EXPORT_SYMBOL(devm_clk_get_optional);
struct clk *devm_clk_get_optional_prepared(struct device *dev, const char *id)
{
return __devm_clk_get(dev, id, clk_get_optional,
clk_prepare, clk_unprepare);
}
EXPORT_SYMBOL_GPL(devm_clk_get_optional_prepared);
struct clk *devm_clk_get_optional_enabled(struct device *dev, const char *id)
{
return __devm_clk_get(dev, id, clk_get_optional,
clk_prepare_enable, clk_disable_unprepare);
}
EXPORT_SYMBOL_GPL(devm_clk_get_optional_enabled);
struct clk_bulk_devres {
struct clk_bulk_data *clks;
int num_clks;
};
static void devm_clk_bulk_release(struct device *dev, void *res)
{
struct clk_bulk_devres *devres = res;
clk_bulk_put(devres->num_clks, devres->clks);
}
static int __devm_clk_bulk_get(struct device *dev, int num_clks,
struct clk_bulk_data *clks, bool optional)
{
struct clk_bulk_devres *devres;
int ret;
devres = devres_alloc(devm_clk_bulk_release,
sizeof(*devres), GFP_KERNEL);
if (!devres)
return -ENOMEM;
if (optional)
ret = clk_bulk_get_optional(dev, num_clks, clks);
else
ret = clk_bulk_get(dev, num_clks, clks);
if (!ret) {
devres->clks = clks;
devres->num_clks = num_clks;
devres_add(dev, devres);
} else {
devres_free(devres);
}
return ret;
}
int __must_check devm_clk_bulk_get(struct device *dev, int num_clks,
struct clk_bulk_data *clks)
{
return __devm_clk_bulk_get(dev, num_clks, clks, false);
}
EXPORT_SYMBOL_GPL(devm_clk_bulk_get);
int __must_check devm_clk_bulk_get_optional(struct device *dev, int num_clks,
struct clk_bulk_data *clks)
{
return __devm_clk_bulk_get(dev, num_clks, clks, true);
}
EXPORT_SYMBOL_GPL(devm_clk_bulk_get_optional);
static void devm_clk_bulk_release_all(struct device *dev, void *res)
{
struct clk_bulk_devres *devres = res;
clk_bulk_put_all(devres->num_clks, devres->clks);
}
int __must_check devm_clk_bulk_get_all(struct device *dev,
struct clk_bulk_data **clks)
{
struct clk_bulk_devres *devres;
int ret;
devres = devres_alloc(devm_clk_bulk_release_all,
sizeof(*devres), GFP_KERNEL);
if (!devres)
return -ENOMEM;
ret = clk_bulk_get_all(dev, &devres->clks);
if (ret > 0) {
*clks = devres->clks;
devres->num_clks = ret;
devres_add(dev, devres);
} else {
devres_free(devres);
}
return ret;
}
EXPORT_SYMBOL_GPL(devm_clk_bulk_get_all);
static int devm_clk_match(struct device *dev, void *res, void *data)
{
struct clk **c = res;
if (!c || !*c) {
WARN_ON(!c || !*c);
return 0;
}
return *c == data;
}
void devm_clk_put(struct device *dev, struct clk *clk)
{
int ret;
ret = devres_release(dev, devm_clk_release, devm_clk_match, clk);
WARN_ON(ret);
}
EXPORT_SYMBOL(devm_clk_put);
struct clk *devm_get_clk_from_child(struct device *dev,
struct device_node *np, const char *con_id)
{
struct devm_clk_state *state;
struct clk *clk;
state = devres_alloc(devm_clk_release, sizeof(*state), GFP_KERNEL);
if (!state)
return ERR_PTR(-ENOMEM);
clk = of_clk_get_by_name(np, con_id);
if (!IS_ERR(clk)) {
state->clk = clk;
devres_add(dev, state);
} else {
devres_free(state);
}
return clk;
}
EXPORT_SYMBOL(devm_get_clk_from_child);
| linux-master | drivers/clk/clk-devres.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Synopsys HSDK SDP Generic PLL clock driver
*
* Copyright (C) 2017 Synopsys
*/
#include <linux/clk-provider.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#define CGU_PLL_CTRL 0x000 /* ARC PLL control register */
#define CGU_PLL_STATUS 0x004 /* ARC PLL status register */
#define CGU_PLL_FMEAS 0x008 /* ARC PLL frequency measurement register */
#define CGU_PLL_MON 0x00C /* ARC PLL monitor register */
#define CGU_PLL_CTRL_ODIV_SHIFT 2
#define CGU_PLL_CTRL_IDIV_SHIFT 4
#define CGU_PLL_CTRL_FBDIV_SHIFT 9
#define CGU_PLL_CTRL_BAND_SHIFT 20
#define CGU_PLL_CTRL_ODIV_MASK GENMASK(3, CGU_PLL_CTRL_ODIV_SHIFT)
#define CGU_PLL_CTRL_IDIV_MASK GENMASK(8, CGU_PLL_CTRL_IDIV_SHIFT)
#define CGU_PLL_CTRL_FBDIV_MASK GENMASK(15, CGU_PLL_CTRL_FBDIV_SHIFT)
#define CGU_PLL_CTRL_PD BIT(0)
#define CGU_PLL_CTRL_BYPASS BIT(1)
#define CGU_PLL_STATUS_LOCK BIT(0)
#define CGU_PLL_STATUS_ERR BIT(1)
#define HSDK_PLL_MAX_LOCK_TIME 100 /* 100 us */
#define CGU_PLL_SOURCE_MAX 1
#define CORE_IF_CLK_THRESHOLD_HZ 500000000
#define CREG_CORE_IF_CLK_DIV_1 0x0
#define CREG_CORE_IF_CLK_DIV_2 0x1
struct hsdk_pll_cfg {
u32 rate;
u32 idiv;
u32 fbdiv;
u32 odiv;
u32 band;
u32 bypass;
};
static const struct hsdk_pll_cfg asdt_pll_cfg[] = {
{ 100000000, 0, 11, 3, 0, 0 },
{ 133000000, 0, 15, 3, 0, 0 },
{ 200000000, 1, 47, 3, 0, 0 },
{ 233000000, 1, 27, 2, 0, 0 },
{ 300000000, 1, 35, 2, 0, 0 },
{ 333000000, 1, 39, 2, 0, 0 },
{ 400000000, 1, 47, 2, 0, 0 },
{ 500000000, 0, 14, 1, 0, 0 },
{ 600000000, 0, 17, 1, 0, 0 },
{ 700000000, 0, 20, 1, 0, 0 },
{ 800000000, 0, 23, 1, 0, 0 },
{ 900000000, 1, 26, 0, 0, 0 },
{ 1000000000, 1, 29, 0, 0, 0 },
{ 1100000000, 1, 32, 0, 0, 0 },
{ 1200000000, 1, 35, 0, 0, 0 },
{ 1300000000, 1, 38, 0, 0, 0 },
{ 1400000000, 1, 41, 0, 0, 0 },
{ 1500000000, 1, 44, 0, 0, 0 },
{ 1600000000, 1, 47, 0, 0, 0 },
{}
};
static const struct hsdk_pll_cfg hdmi_pll_cfg[] = {
{ 27000000, 0, 0, 0, 0, 1 },
{ 148500000, 0, 21, 3, 0, 0 },
{ 297000000, 0, 21, 2, 0, 0 },
{ 540000000, 0, 19, 1, 0, 0 },
{ 594000000, 0, 21, 1, 0, 0 },
{}
};
struct hsdk_pll_clk {
struct clk_hw hw;
void __iomem *regs;
void __iomem *spec_regs;
const struct hsdk_pll_devdata *pll_devdata;
struct device *dev;
};
struct hsdk_pll_devdata {
const struct hsdk_pll_cfg *pll_cfg;
int (*update_rate)(struct hsdk_pll_clk *clk, unsigned long rate,
const struct hsdk_pll_cfg *cfg);
};
static int hsdk_pll_core_update_rate(struct hsdk_pll_clk *, unsigned long,
const struct hsdk_pll_cfg *);
static int hsdk_pll_comm_update_rate(struct hsdk_pll_clk *, unsigned long,
const struct hsdk_pll_cfg *);
static const struct hsdk_pll_devdata core_pll_devdata = {
.pll_cfg = asdt_pll_cfg,
.update_rate = hsdk_pll_core_update_rate,
};
static const struct hsdk_pll_devdata sdt_pll_devdata = {
.pll_cfg = asdt_pll_cfg,
.update_rate = hsdk_pll_comm_update_rate,
};
static const struct hsdk_pll_devdata hdmi_pll_devdata = {
.pll_cfg = hdmi_pll_cfg,
.update_rate = hsdk_pll_comm_update_rate,
};
static inline void hsdk_pll_write(struct hsdk_pll_clk *clk, u32 reg, u32 val)
{
iowrite32(val, clk->regs + reg);
}
static inline u32 hsdk_pll_read(struct hsdk_pll_clk *clk, u32 reg)
{
return ioread32(clk->regs + reg);
}
static inline void hsdk_pll_set_cfg(struct hsdk_pll_clk *clk,
const struct hsdk_pll_cfg *cfg)
{
u32 val = 0;
if (cfg->bypass) {
val = hsdk_pll_read(clk, CGU_PLL_CTRL);
val |= CGU_PLL_CTRL_BYPASS;
} else {
/* Powerdown and Bypass bits should be cleared */
val |= cfg->idiv << CGU_PLL_CTRL_IDIV_SHIFT;
val |= cfg->fbdiv << CGU_PLL_CTRL_FBDIV_SHIFT;
val |= cfg->odiv << CGU_PLL_CTRL_ODIV_SHIFT;
val |= cfg->band << CGU_PLL_CTRL_BAND_SHIFT;
}
dev_dbg(clk->dev, "write configuration: %#x\n", val);
hsdk_pll_write(clk, CGU_PLL_CTRL, val);
}
static inline bool hsdk_pll_is_locked(struct hsdk_pll_clk *clk)
{
return !!(hsdk_pll_read(clk, CGU_PLL_STATUS) & CGU_PLL_STATUS_LOCK);
}
static inline bool hsdk_pll_is_err(struct hsdk_pll_clk *clk)
{
return !!(hsdk_pll_read(clk, CGU_PLL_STATUS) & CGU_PLL_STATUS_ERR);
}
static inline struct hsdk_pll_clk *to_hsdk_pll_clk(struct clk_hw *hw)
{
return container_of(hw, struct hsdk_pll_clk, hw);
}
static unsigned long hsdk_pll_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
u32 val;
u64 rate;
u32 idiv, fbdiv, odiv;
struct hsdk_pll_clk *clk = to_hsdk_pll_clk(hw);
val = hsdk_pll_read(clk, CGU_PLL_CTRL);
dev_dbg(clk->dev, "current configuration: %#x\n", val);
/* Check if PLL is bypassed */
if (val & CGU_PLL_CTRL_BYPASS)
return parent_rate;
/* Check if PLL is disabled */
if (val & CGU_PLL_CTRL_PD)
return 0;
/* input divider = reg.idiv + 1 */
idiv = 1 + ((val & CGU_PLL_CTRL_IDIV_MASK) >> CGU_PLL_CTRL_IDIV_SHIFT);
/* fb divider = 2*(reg.fbdiv + 1) */
fbdiv = 2 * (1 + ((val & CGU_PLL_CTRL_FBDIV_MASK) >> CGU_PLL_CTRL_FBDIV_SHIFT));
/* output divider = 2^(reg.odiv) */
odiv = 1 << ((val & CGU_PLL_CTRL_ODIV_MASK) >> CGU_PLL_CTRL_ODIV_SHIFT);
rate = (u64)parent_rate * fbdiv;
do_div(rate, idiv * odiv);
return rate;
}
static long hsdk_pll_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *prate)
{
int i;
unsigned long best_rate;
struct hsdk_pll_clk *clk = to_hsdk_pll_clk(hw);
const struct hsdk_pll_cfg *pll_cfg = clk->pll_devdata->pll_cfg;
if (pll_cfg[0].rate == 0)
return -EINVAL;
best_rate = pll_cfg[0].rate;
for (i = 1; pll_cfg[i].rate != 0; i++) {
if (abs(rate - pll_cfg[i].rate) < abs(rate - best_rate))
best_rate = pll_cfg[i].rate;
}
dev_dbg(clk->dev, "chosen best rate: %lu\n", best_rate);
return best_rate;
}
static int hsdk_pll_comm_update_rate(struct hsdk_pll_clk *clk,
unsigned long rate,
const struct hsdk_pll_cfg *cfg)
{
hsdk_pll_set_cfg(clk, cfg);
/*
* Wait until CGU relocks and check error status.
* If after timeout CGU is unlocked yet return error.
*/
udelay(HSDK_PLL_MAX_LOCK_TIME);
if (!hsdk_pll_is_locked(clk))
return -ETIMEDOUT;
if (hsdk_pll_is_err(clk))
return -EINVAL;
return 0;
}
static int hsdk_pll_core_update_rate(struct hsdk_pll_clk *clk,
unsigned long rate,
const struct hsdk_pll_cfg *cfg)
{
/*
* When core clock exceeds 500MHz, the divider for the interface
* clock must be programmed to div-by-2.
*/
if (rate > CORE_IF_CLK_THRESHOLD_HZ)
iowrite32(CREG_CORE_IF_CLK_DIV_2, clk->spec_regs);
hsdk_pll_set_cfg(clk, cfg);
/*
* Wait until CGU relocks and check error status.
* If after timeout CGU is unlocked yet return error.
*/
udelay(HSDK_PLL_MAX_LOCK_TIME);
if (!hsdk_pll_is_locked(clk))
return -ETIMEDOUT;
if (hsdk_pll_is_err(clk))
return -EINVAL;
/*
* Program divider to div-by-1 if we succesfuly set core clock below
* 500MHz threshold.
*/
if (rate <= CORE_IF_CLK_THRESHOLD_HZ)
iowrite32(CREG_CORE_IF_CLK_DIV_1, clk->spec_regs);
return 0;
}
static int hsdk_pll_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
int i;
struct hsdk_pll_clk *clk = to_hsdk_pll_clk(hw);
const struct hsdk_pll_cfg *pll_cfg = clk->pll_devdata->pll_cfg;
for (i = 0; pll_cfg[i].rate != 0; i++) {
if (pll_cfg[i].rate == rate) {
return clk->pll_devdata->update_rate(clk, rate,
&pll_cfg[i]);
}
}
dev_err(clk->dev, "invalid rate=%ld, parent_rate=%ld\n", rate,
parent_rate);
return -EINVAL;
}
static const struct clk_ops hsdk_pll_ops = {
.recalc_rate = hsdk_pll_recalc_rate,
.round_rate = hsdk_pll_round_rate,
.set_rate = hsdk_pll_set_rate,
};
static int hsdk_pll_clk_probe(struct platform_device *pdev)
{
int ret;
const char *parent_name;
unsigned int num_parents;
struct hsdk_pll_clk *pll_clk;
struct clk_init_data init = { };
struct device *dev = &pdev->dev;
pll_clk = devm_kzalloc(dev, sizeof(*pll_clk), GFP_KERNEL);
if (!pll_clk)
return -ENOMEM;
pll_clk->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pll_clk->regs))
return PTR_ERR(pll_clk->regs);
init.name = dev->of_node->name;
init.ops = &hsdk_pll_ops;
parent_name = of_clk_get_parent_name(dev->of_node, 0);
init.parent_names = &parent_name;
num_parents = of_clk_get_parent_count(dev->of_node);
if (num_parents == 0 || num_parents > CGU_PLL_SOURCE_MAX) {
dev_err(dev, "wrong clock parents number: %u\n", num_parents);
return -EINVAL;
}
init.num_parents = num_parents;
pll_clk->hw.init = &init;
pll_clk->dev = dev;
pll_clk->pll_devdata = of_device_get_match_data(dev);
if (!pll_clk->pll_devdata) {
dev_err(dev, "No OF match data provided\n");
return -EINVAL;
}
ret = devm_clk_hw_register(dev, &pll_clk->hw);
if (ret) {
dev_err(dev, "failed to register %s clock\n", init.name);
return ret;
}
return devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get,
&pll_clk->hw);
}
static void __init of_hsdk_pll_clk_setup(struct device_node *node)
{
int ret;
const char *parent_name;
unsigned int num_parents;
struct hsdk_pll_clk *pll_clk;
struct clk_init_data init = { };
pll_clk = kzalloc(sizeof(*pll_clk), GFP_KERNEL);
if (!pll_clk)
return;
pll_clk->regs = of_iomap(node, 0);
if (!pll_clk->regs) {
pr_err("failed to map pll registers\n");
goto err_free_pll_clk;
}
pll_clk->spec_regs = of_iomap(node, 1);
if (!pll_clk->spec_regs) {
pr_err("failed to map pll registers\n");
goto err_unmap_comm_regs;
}
init.name = node->name;
init.ops = &hsdk_pll_ops;
parent_name = of_clk_get_parent_name(node, 0);
init.parent_names = &parent_name;
num_parents = of_clk_get_parent_count(node);
if (num_parents > CGU_PLL_SOURCE_MAX) {
pr_err("too much clock parents: %u\n", num_parents);
goto err_unmap_spec_regs;
}
init.num_parents = num_parents;
pll_clk->hw.init = &init;
pll_clk->pll_devdata = &core_pll_devdata;
ret = clk_hw_register(NULL, &pll_clk->hw);
if (ret) {
pr_err("failed to register %pOFn clock\n", node);
goto err_unmap_spec_regs;
}
ret = of_clk_add_hw_provider(node, of_clk_hw_simple_get, &pll_clk->hw);
if (ret) {
pr_err("failed to add hw provider for %pOFn clock\n", node);
goto err_unmap_spec_regs;
}
return;
err_unmap_spec_regs:
iounmap(pll_clk->spec_regs);
err_unmap_comm_regs:
iounmap(pll_clk->regs);
err_free_pll_clk:
kfree(pll_clk);
}
/* Core PLL needed early for ARC cpus timers */
CLK_OF_DECLARE(hsdk_pll_clock, "snps,hsdk-core-pll-clock",
of_hsdk_pll_clk_setup);
static const struct of_device_id hsdk_pll_clk_id[] = {
{ .compatible = "snps,hsdk-gp-pll-clock", .data = &sdt_pll_devdata},
{ .compatible = "snps,hsdk-hdmi-pll-clock", .data = &hdmi_pll_devdata},
{ }
};
static struct platform_driver hsdk_pll_clk_driver = {
.driver = {
.name = "hsdk-gp-pll-clock",
.of_match_table = hsdk_pll_clk_id,
},
.probe = hsdk_pll_clk_probe,
};
builtin_platform_driver(hsdk_pll_clk_driver);
| linux-master | drivers/clk/clk-hsdk-pll.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Driver for Silicon Labs Si570/Si571 Programmable XO/VCXO
*
* Copyright (C) 2010, 2011 Ericsson AB.
* Copyright (C) 2011 Guenter Roeck.
* Copyright (C) 2011 - 2021 Xilinx Inc.
*
* Author: Guenter Roeck <[email protected]>
* Sören Brinkmann <[email protected]>
*/
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/i2c.h>
#include <linux/regmap.h>
#include <linux/slab.h>
/* Si570 registers */
#define SI570_REG_HS_N1 7
#define SI570_REG_N1_RFREQ0 8
#define SI570_REG_RFREQ1 9
#define SI570_REG_RFREQ2 10
#define SI570_REG_RFREQ3 11
#define SI570_REG_RFREQ4 12
#define SI570_REG_CONTROL 135
#define SI570_REG_FREEZE_DCO 137
#define SI570_DIV_OFFSET_7PPM 6
#define HS_DIV_SHIFT 5
#define HS_DIV_MASK 0xe0
#define HS_DIV_OFFSET 4
#define N1_6_2_MASK 0x1f
#define N1_1_0_MASK 0xc0
#define RFREQ_37_32_MASK 0x3f
#define SI570_MIN_FREQ 10000000L
#define SI570_MAX_FREQ 1417500000L
#define SI598_MAX_FREQ 525000000L
#define FDCO_MIN 4850000000LL
#define FDCO_MAX 5670000000LL
#define SI570_CNTRL_RECALL (1 << 0)
#define SI570_CNTRL_FREEZE_M (1 << 5)
#define SI570_CNTRL_NEWFREQ (1 << 6)
#define SI570_FREEZE_DCO (1 << 4)
/**
* struct clk_si570:
* @hw: Clock hw struct
* @regmap: Device's regmap
* @div_offset: Rgister offset for dividers
* @max_freq: Maximum frequency for this device
* @fxtal: Factory xtal frequency
* @n1: Clock divider N1
* @hs_div: Clock divider HSDIV
* @rfreq: Clock multiplier RFREQ
* @frequency: Current output frequency
* @i2c_client: I2C client pointer
*/
struct clk_si570 {
struct clk_hw hw;
struct regmap *regmap;
unsigned int div_offset;
u64 max_freq;
u64 fxtal;
unsigned int n1;
unsigned int hs_div;
u64 rfreq;
u64 frequency;
struct i2c_client *i2c_client;
};
#define to_clk_si570(_hw) container_of(_hw, struct clk_si570, hw)
enum clk_si570_variant {
si57x,
si59x
};
/**
* si570_get_divs() - Read clock dividers from HW
* @data: Pointer to struct clk_si570
* @rfreq: Fractional multiplier (output)
* @n1: Divider N1 (output)
* @hs_div: Divider HSDIV (output)
* Returns 0 on success, negative errno otherwise.
*
* Retrieve clock dividers and multipliers from the HW.
*/
static int si570_get_divs(struct clk_si570 *data, u64 *rfreq,
unsigned int *n1, unsigned int *hs_div)
{
int err;
u8 reg[6];
u64 tmp;
err = regmap_bulk_read(data->regmap, SI570_REG_HS_N1 + data->div_offset,
reg, ARRAY_SIZE(reg));
if (err)
return err;
*hs_div = ((reg[0] & HS_DIV_MASK) >> HS_DIV_SHIFT) + HS_DIV_OFFSET;
*n1 = ((reg[0] & N1_6_2_MASK) << 2) + ((reg[1] & N1_1_0_MASK) >> 6) + 1;
/* Handle invalid cases */
if (*n1 > 1)
*n1 &= ~1;
tmp = reg[1] & RFREQ_37_32_MASK;
tmp = (tmp << 8) + reg[2];
tmp = (tmp << 8) + reg[3];
tmp = (tmp << 8) + reg[4];
tmp = (tmp << 8) + reg[5];
*rfreq = tmp;
return 0;
}
/**
* si570_get_defaults() - Get default values
* @data: Driver data structure
* @fout: Factory frequency output
* @skip_recall: If true, don't recall NVM into RAM
* Returns 0 on success, negative errno otherwise.
*/
static int si570_get_defaults(struct clk_si570 *data, u64 fout,
bool skip_recall)
{
int err;
u64 fdco;
if (!skip_recall)
regmap_write(data->regmap, SI570_REG_CONTROL,
SI570_CNTRL_RECALL);
err = si570_get_divs(data, &data->rfreq, &data->n1, &data->hs_div);
if (err)
return err;
/*
* Accept optional precision loss to avoid arithmetic overflows.
* Acceptable per Silicon Labs Application Note AN334.
*/
fdco = fout * data->n1 * data->hs_div;
if (fdco >= (1LL << 36))
data->fxtal = div64_u64(fdco << 24, data->rfreq >> 4);
else
data->fxtal = div64_u64(fdco << 28, data->rfreq);
data->frequency = fout;
return 0;
}
/**
* si570_update_rfreq() - Update clock multiplier
* @data: Driver data structure
* Passes on regmap_bulk_write() return value.
*/
static int si570_update_rfreq(struct clk_si570 *data)
{
u8 reg[5];
reg[0] = ((data->n1 - 1) << 6) |
((data->rfreq >> 32) & RFREQ_37_32_MASK);
reg[1] = (data->rfreq >> 24) & 0xff;
reg[2] = (data->rfreq >> 16) & 0xff;
reg[3] = (data->rfreq >> 8) & 0xff;
reg[4] = data->rfreq & 0xff;
return regmap_bulk_write(data->regmap, SI570_REG_N1_RFREQ0 +
data->div_offset, reg, ARRAY_SIZE(reg));
}
/**
* si570_calc_divs() - Caluclate clock dividers
* @frequency: Target frequency
* @data: Driver data structure
* @out_rfreq: RFREG fractional multiplier (output)
* @out_n1: Clock divider N1 (output)
* @out_hs_div: Clock divider HSDIV (output)
* Returns 0 on success, negative errno otherwise.
*
* Calculate the clock dividers (@out_hs_div, @out_n1) and clock multiplier
* (@out_rfreq) for a given target @frequency.
*/
static int si570_calc_divs(unsigned long frequency, struct clk_si570 *data,
u64 *out_rfreq, unsigned int *out_n1, unsigned int *out_hs_div)
{
int i;
unsigned int n1, hs_div;
u64 fdco, best_fdco = ULLONG_MAX;
static const uint8_t si570_hs_div_values[] = { 11, 9, 7, 6, 5, 4 };
for (i = 0; i < ARRAY_SIZE(si570_hs_div_values); i++) {
hs_div = si570_hs_div_values[i];
/* Calculate lowest possible value for n1 */
n1 = div_u64(div_u64(FDCO_MIN, hs_div), frequency);
if (!n1 || (n1 & 1))
n1++;
while (n1 <= 128) {
fdco = (u64)frequency * (u64)hs_div * (u64)n1;
if (fdco > FDCO_MAX)
break;
if (fdco >= FDCO_MIN && fdco < best_fdco) {
*out_n1 = n1;
*out_hs_div = hs_div;
*out_rfreq = div64_u64(fdco << 28, data->fxtal);
best_fdco = fdco;
}
n1 += (n1 == 1 ? 1 : 2);
}
}
if (best_fdco == ULLONG_MAX)
return -EINVAL;
return 0;
}
static unsigned long si570_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
int err;
u64 rfreq, rate;
unsigned int n1, hs_div;
struct clk_si570 *data = to_clk_si570(hw);
err = si570_get_divs(data, &rfreq, &n1, &hs_div);
if (err) {
dev_err(&data->i2c_client->dev, "unable to recalc rate\n");
return data->frequency;
}
rfreq = div_u64(rfreq, hs_div * n1);
rate = (data->fxtal * rfreq) >> 28;
return rate;
}
static long si570_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *parent_rate)
{
int err;
u64 rfreq;
unsigned int n1, hs_div;
struct clk_si570 *data = to_clk_si570(hw);
if (!rate)
return 0;
if (div64_u64(abs(rate - data->frequency) * 10000LL,
data->frequency) < 35) {
rfreq = div64_u64((data->rfreq * rate) +
div64_u64(data->frequency, 2), data->frequency);
n1 = data->n1;
hs_div = data->hs_div;
} else {
err = si570_calc_divs(rate, data, &rfreq, &n1, &hs_div);
if (err) {
dev_err(&data->i2c_client->dev,
"unable to round rate\n");
return 0;
}
}
return rate;
}
/**
* si570_set_frequency() - Adjust output frequency
* @data: Driver data structure
* @frequency: Target frequency
* Returns 0 on success.
*
* Update output frequency for big frequency changes (> 3,500 ppm).
*/
static int si570_set_frequency(struct clk_si570 *data, unsigned long frequency)
{
int err;
err = si570_calc_divs(frequency, data, &data->rfreq, &data->n1,
&data->hs_div);
if (err)
return err;
/*
* The DCO reg should be accessed with a read-modify-write operation
* per AN334
*/
regmap_write(data->regmap, SI570_REG_FREEZE_DCO, SI570_FREEZE_DCO);
regmap_write(data->regmap, SI570_REG_HS_N1 + data->div_offset,
((data->hs_div - HS_DIV_OFFSET) << HS_DIV_SHIFT) |
(((data->n1 - 1) >> 2) & N1_6_2_MASK));
si570_update_rfreq(data);
regmap_write(data->regmap, SI570_REG_FREEZE_DCO, 0);
regmap_write(data->regmap, SI570_REG_CONTROL, SI570_CNTRL_NEWFREQ);
/* Applying a new frequency can take up to 10ms */
usleep_range(10000, 12000);
return 0;
}
/**
* si570_set_frequency_small() - Adjust output frequency
* @data: Driver data structure
* @frequency: Target frequency
* Returns 0 on success.
*
* Update output frequency for small frequency changes (< 3,500 ppm).
*/
static int si570_set_frequency_small(struct clk_si570 *data,
unsigned long frequency)
{
/*
* This is a re-implementation of DIV_ROUND_CLOSEST
* using the div64_u64 function lieu of letting the compiler
* insert EABI calls
*/
data->rfreq = div64_u64((data->rfreq * frequency) +
div_u64(data->frequency, 2), data->frequency);
regmap_write(data->regmap, SI570_REG_CONTROL, SI570_CNTRL_FREEZE_M);
si570_update_rfreq(data);
regmap_write(data->regmap, SI570_REG_CONTROL, 0);
/* Applying a new frequency (small change) can take up to 100us */
usleep_range(100, 200);
return 0;
}
static int si570_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct clk_si570 *data = to_clk_si570(hw);
struct i2c_client *client = data->i2c_client;
int err;
if (rate < SI570_MIN_FREQ || rate > data->max_freq) {
dev_err(&client->dev,
"requested frequency %lu Hz is out of range\n", rate);
return -EINVAL;
}
if (div64_u64(abs(rate - data->frequency) * 10000LL,
data->frequency) < 35)
err = si570_set_frequency_small(data, rate);
else
err = si570_set_frequency(data, rate);
if (err)
return err;
data->frequency = rate;
return 0;
}
static const struct clk_ops si570_clk_ops = {
.recalc_rate = si570_recalc_rate,
.round_rate = si570_round_rate,
.set_rate = si570_set_rate,
};
static bool si570_regmap_is_volatile(struct device *dev, unsigned int reg)
{
switch (reg) {
case SI570_REG_CONTROL:
return true;
default:
return false;
}
}
static bool si570_regmap_is_writeable(struct device *dev, unsigned int reg)
{
switch (reg) {
case SI570_REG_HS_N1 ... (SI570_REG_RFREQ4 + SI570_DIV_OFFSET_7PPM):
case SI570_REG_CONTROL:
case SI570_REG_FREEZE_DCO:
return true;
default:
return false;
}
}
static const struct regmap_config si570_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.cache_type = REGCACHE_RBTREE,
.max_register = 137,
.writeable_reg = si570_regmap_is_writeable,
.volatile_reg = si570_regmap_is_volatile,
};
static const struct i2c_device_id si570_id[] = {
{ "si570", si57x },
{ "si571", si57x },
{ "si598", si59x },
{ "si599", si59x },
{ }
};
MODULE_DEVICE_TABLE(i2c, si570_id);
static int si570_probe(struct i2c_client *client)
{
struct clk_si570 *data;
struct clk_init_data init;
const struct i2c_device_id *id = i2c_match_id(si570_id, client);
u32 initial_fout, factory_fout, stability;
bool skip_recall;
int err;
enum clk_si570_variant variant = id->driver_data;
data = devm_kzalloc(&client->dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
init.ops = &si570_clk_ops;
init.flags = 0;
init.num_parents = 0;
data->hw.init = &init;
data->i2c_client = client;
if (variant == si57x) {
err = of_property_read_u32(client->dev.of_node,
"temperature-stability", &stability);
if (err) {
dev_err(&client->dev,
"'temperature-stability' property missing\n");
return err;
}
/* adjust register offsets for 7ppm devices */
if (stability == 7)
data->div_offset = SI570_DIV_OFFSET_7PPM;
data->max_freq = SI570_MAX_FREQ;
} else {
data->max_freq = SI598_MAX_FREQ;
}
if (of_property_read_string(client->dev.of_node, "clock-output-names",
&init.name))
init.name = client->dev.of_node->name;
err = of_property_read_u32(client->dev.of_node, "factory-fout",
&factory_fout);
if (err) {
dev_err(&client->dev, "'factory-fout' property missing\n");
return err;
}
skip_recall = of_property_read_bool(client->dev.of_node,
"silabs,skip-recall");
data->regmap = devm_regmap_init_i2c(client, &si570_regmap_config);
if (IS_ERR(data->regmap)) {
dev_err(&client->dev, "failed to allocate register map\n");
return PTR_ERR(data->regmap);
}
i2c_set_clientdata(client, data);
err = si570_get_defaults(data, factory_fout, skip_recall);
if (err)
return err;
err = devm_clk_hw_register(&client->dev, &data->hw);
if (err) {
dev_err(&client->dev, "clock registration failed\n");
return err;
}
err = devm_of_clk_add_hw_provider(&client->dev, of_clk_hw_simple_get,
&data->hw);
if (err) {
dev_err(&client->dev, "unable to add clk provider\n");
return err;
}
/* Read the requested initial output frequency from device tree */
if (!of_property_read_u32(client->dev.of_node, "clock-frequency",
&initial_fout)) {
err = clk_set_rate(data->hw.clk, initial_fout);
if (err)
return err;
}
/* Display a message indicating that we've successfully registered */
dev_info(&client->dev, "registered, current frequency %llu Hz\n",
data->frequency);
return 0;
}
static const struct of_device_id clk_si570_of_match[] = {
{ .compatible = "silabs,si570" },
{ .compatible = "silabs,si571" },
{ .compatible = "silabs,si598" },
{ .compatible = "silabs,si599" },
{ },
};
MODULE_DEVICE_TABLE(of, clk_si570_of_match);
static struct i2c_driver si570_driver = {
.driver = {
.name = "si570",
.of_match_table = clk_si570_of_match,
},
.probe = si570_probe,
.id_table = si570_id,
};
module_i2c_driver(si570_driver);
MODULE_AUTHOR("Guenter Roeck <[email protected]>");
MODULE_AUTHOR("Soeren Brinkmann <[email protected]>");
MODULE_DESCRIPTION("Si570 driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/clk/clk-si570.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Cirrus Logic CLPS711X CLK driver
*
* Copyright (C) 2014 Alexander Shiyan <[email protected]>
*/
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/of_address.h>
#include <linux/slab.h>
#include <linux/mfd/syscon/clps711x.h>
#include <dt-bindings/clock/clps711x-clock.h>
#define CLPS711X_SYSCON1 (0x0100)
#define CLPS711X_SYSCON2 (0x1100)
#define CLPS711X_SYSFLG2 (CLPS711X_SYSCON2 + SYSFLG_OFFSET)
#define CLPS711X_PLLR (0xa5a8)
#define CLPS711X_EXT_FREQ (13000000)
#define CLPS711X_OSC_FREQ (3686400)
static const struct clk_div_table spi_div_table[] = {
{ .val = 0, .div = 32, },
{ .val = 1, .div = 8, },
{ .val = 2, .div = 2, },
{ .val = 3, .div = 1, },
{ /* sentinel */ }
};
static const struct clk_div_table timer_div_table[] = {
{ .val = 0, .div = 256, },
{ .val = 1, .div = 1, },
{ /* sentinel */ }
};
struct clps711x_clk {
spinlock_t lock;
struct clk_hw_onecell_data clk_data;
};
static void __init clps711x_clk_init_dt(struct device_node *np)
{
u32 tmp, f_cpu, f_pll, f_bus, f_tim, f_pwm, f_spi, fref = 0;
struct clps711x_clk *clps711x_clk;
void __iomem *base;
WARN_ON(of_property_read_u32(np, "startup-frequency", &fref));
base = of_iomap(np, 0);
BUG_ON(!base);
clps711x_clk = kzalloc(struct_size(clps711x_clk, clk_data.hws,
CLPS711X_CLK_MAX),
GFP_KERNEL);
BUG_ON(!clps711x_clk);
spin_lock_init(&clps711x_clk->lock);
/* Read PLL multiplier value and sanity check */
tmp = readl(base + CLPS711X_PLLR) >> 24;
if (((tmp >= 10) && (tmp <= 50)) || !fref)
f_pll = DIV_ROUND_UP(CLPS711X_OSC_FREQ * tmp, 2);
else
f_pll = fref;
tmp = readl(base + CLPS711X_SYSFLG2);
if (tmp & SYSFLG2_CKMODE) {
f_cpu = CLPS711X_EXT_FREQ;
f_bus = CLPS711X_EXT_FREQ;
f_spi = DIV_ROUND_CLOSEST(CLPS711X_EXT_FREQ, 96);
f_pll = 0;
f_pwm = DIV_ROUND_CLOSEST(CLPS711X_EXT_FREQ, 128);
} else {
f_cpu = f_pll;
if (f_cpu > 36864000)
f_bus = DIV_ROUND_UP(f_cpu, 2);
else
f_bus = 36864000 / 2;
f_spi = DIV_ROUND_CLOSEST(f_cpu, 576);
f_pwm = DIV_ROUND_CLOSEST(f_cpu, 768);
}
if (tmp & SYSFLG2_CKMODE) {
if (readl(base + CLPS711X_SYSCON2) & SYSCON2_OSTB)
f_tim = DIV_ROUND_CLOSEST(CLPS711X_EXT_FREQ, 26);
else
f_tim = DIV_ROUND_CLOSEST(CLPS711X_EXT_FREQ, 24);
} else
f_tim = DIV_ROUND_CLOSEST(f_cpu, 144);
tmp = readl(base + CLPS711X_SYSCON1);
/* Timer1 in free running mode.
* Counter will wrap around to 0xffff when it underflows
* and will continue to count down.
*/
tmp &= ~(SYSCON1_TC1M | SYSCON1_TC1S);
/* Timer2 in prescale mode.
* Value writen is automatically re-loaded when
* the counter underflows.
*/
tmp |= SYSCON1_TC2M | SYSCON1_TC2S;
writel(tmp, base + CLPS711X_SYSCON1);
clps711x_clk->clk_data.hws[CLPS711X_CLK_DUMMY] =
clk_hw_register_fixed_rate(NULL, "dummy", NULL, 0, 0);
clps711x_clk->clk_data.hws[CLPS711X_CLK_CPU] =
clk_hw_register_fixed_rate(NULL, "cpu", NULL, 0, f_cpu);
clps711x_clk->clk_data.hws[CLPS711X_CLK_BUS] =
clk_hw_register_fixed_rate(NULL, "bus", NULL, 0, f_bus);
clps711x_clk->clk_data.hws[CLPS711X_CLK_PLL] =
clk_hw_register_fixed_rate(NULL, "pll", NULL, 0, f_pll);
clps711x_clk->clk_data.hws[CLPS711X_CLK_TIMERREF] =
clk_hw_register_fixed_rate(NULL, "timer_ref", NULL, 0, f_tim);
clps711x_clk->clk_data.hws[CLPS711X_CLK_TIMER1] =
clk_hw_register_divider_table(NULL, "timer1", "timer_ref", 0,
base + CLPS711X_SYSCON1, 5, 1, 0,
timer_div_table, &clps711x_clk->lock);
clps711x_clk->clk_data.hws[CLPS711X_CLK_TIMER2] =
clk_hw_register_divider_table(NULL, "timer2", "timer_ref", 0,
base + CLPS711X_SYSCON1, 7, 1, 0,
timer_div_table, &clps711x_clk->lock);
clps711x_clk->clk_data.hws[CLPS711X_CLK_PWM] =
clk_hw_register_fixed_rate(NULL, "pwm", NULL, 0, f_pwm);
clps711x_clk->clk_data.hws[CLPS711X_CLK_SPIREF] =
clk_hw_register_fixed_rate(NULL, "spi_ref", NULL, 0, f_spi);
clps711x_clk->clk_data.hws[CLPS711X_CLK_SPI] =
clk_hw_register_divider_table(NULL, "spi", "spi_ref", 0,
base + CLPS711X_SYSCON1, 16, 2, 0,
spi_div_table, &clps711x_clk->lock);
clps711x_clk->clk_data.hws[CLPS711X_CLK_UART] =
clk_hw_register_fixed_factor(NULL, "uart", "bus", 0, 1, 10);
clps711x_clk->clk_data.hws[CLPS711X_CLK_TICK] =
clk_hw_register_fixed_rate(NULL, "tick", NULL, 0, 64);
for (tmp = 0; tmp < CLPS711X_CLK_MAX; tmp++)
if (IS_ERR(clps711x_clk->clk_data.hws[tmp]))
pr_err("clk %i: register failed with %ld\n",
tmp, PTR_ERR(clps711x_clk->clk_data.hws[tmp]));
clps711x_clk->clk_data.num = CLPS711X_CLK_MAX;
of_clk_add_hw_provider(np, of_clk_hw_onecell_get,
&clps711x_clk->clk_data);
}
CLK_OF_DECLARE(clps711x, "cirrus,ep7209-clk", clps711x_clk_init_dt);
| linux-master | drivers/clk/clk-clps711x.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
*
* Copyright (C) 2013 Daniel Tang <[email protected]>
*/
#include <linux/clk-provider.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#define MHZ (1000 * 1000)
#define BASE_CPU_SHIFT 1
#define BASE_CPU_MASK 0x7F
#define CPU_AHB_SHIFT 12
#define CPU_AHB_MASK 0x07
#define FIXED_BASE_SHIFT 8
#define FIXED_BASE_MASK 0x01
#define CLASSIC_BASE_SHIFT 16
#define CLASSIC_BASE_MASK 0x1F
#define CX_BASE_SHIFT 15
#define CX_BASE_MASK 0x3F
#define CX_UNKNOWN_SHIFT 21
#define CX_UNKNOWN_MASK 0x03
struct nspire_clk_info {
u32 base_clock;
u16 base_cpu_ratio;
u16 base_ahb_ratio;
};
#define EXTRACT(var, prop) (((var)>>prop##_SHIFT) & prop##_MASK)
static void nspire_clkinfo_cx(u32 val, struct nspire_clk_info *clk)
{
if (EXTRACT(val, FIXED_BASE))
clk->base_clock = 48 * MHZ;
else
clk->base_clock = 6 * EXTRACT(val, CX_BASE) * MHZ;
clk->base_cpu_ratio = EXTRACT(val, BASE_CPU) * EXTRACT(val, CX_UNKNOWN);
clk->base_ahb_ratio = clk->base_cpu_ratio * (EXTRACT(val, CPU_AHB) + 1);
}
static void nspire_clkinfo_classic(u32 val, struct nspire_clk_info *clk)
{
if (EXTRACT(val, FIXED_BASE))
clk->base_clock = 27 * MHZ;
else
clk->base_clock = (300 - 6 * EXTRACT(val, CLASSIC_BASE)) * MHZ;
clk->base_cpu_ratio = EXTRACT(val, BASE_CPU) * 2;
clk->base_ahb_ratio = clk->base_cpu_ratio * (EXTRACT(val, CPU_AHB) + 1);
}
static void __init nspire_ahbdiv_setup(struct device_node *node,
void (*get_clkinfo)(u32, struct nspire_clk_info *))
{
u32 val;
void __iomem *io;
struct clk_hw *hw;
const char *clk_name = node->name;
const char *parent_name;
struct nspire_clk_info info;
io = of_iomap(node, 0);
if (!io)
return;
val = readl(io);
iounmap(io);
get_clkinfo(val, &info);
of_property_read_string(node, "clock-output-names", &clk_name);
parent_name = of_clk_get_parent_name(node, 0);
hw = clk_hw_register_fixed_factor(NULL, clk_name, parent_name, 0,
1, info.base_ahb_ratio);
if (!IS_ERR(hw))
of_clk_add_hw_provider(node, of_clk_hw_simple_get, hw);
}
static void __init nspire_ahbdiv_setup_cx(struct device_node *node)
{
nspire_ahbdiv_setup(node, nspire_clkinfo_cx);
}
static void __init nspire_ahbdiv_setup_classic(struct device_node *node)
{
nspire_ahbdiv_setup(node, nspire_clkinfo_classic);
}
CLK_OF_DECLARE(nspire_ahbdiv_cx, "lsi,nspire-cx-ahb-divider",
nspire_ahbdiv_setup_cx);
CLK_OF_DECLARE(nspire_ahbdiv_classic, "lsi,nspire-classic-ahb-divider",
nspire_ahbdiv_setup_classic);
static void __init nspire_clk_setup(struct device_node *node,
void (*get_clkinfo)(u32, struct nspire_clk_info *))
{
u32 val;
void __iomem *io;
struct clk_hw *hw;
const char *clk_name = node->name;
struct nspire_clk_info info;
io = of_iomap(node, 0);
if (!io)
return;
val = readl(io);
iounmap(io);
get_clkinfo(val, &info);
of_property_read_string(node, "clock-output-names", &clk_name);
hw = clk_hw_register_fixed_rate(NULL, clk_name, NULL, 0,
info.base_clock);
if (!IS_ERR(hw))
of_clk_add_hw_provider(node, of_clk_hw_simple_get, hw);
else
return;
pr_info("TI-NSPIRE Base: %uMHz CPU: %uMHz AHB: %uMHz\n",
info.base_clock / MHZ,
info.base_clock / info.base_cpu_ratio / MHZ,
info.base_clock / info.base_ahb_ratio / MHZ);
}
static void __init nspire_clk_setup_cx(struct device_node *node)
{
nspire_clk_setup(node, nspire_clkinfo_cx);
}
static void __init nspire_clk_setup_classic(struct device_node *node)
{
nspire_clk_setup(node, nspire_clkinfo_classic);
}
CLK_OF_DECLARE(nspire_clk_cx, "lsi,nspire-cx-clock", nspire_clk_setup_cx);
CLK_OF_DECLARE(nspire_clk_classic, "lsi,nspire-classic-clock",
nspire_clk_setup_classic);
| linux-master | drivers/clk/clk-nspire.c |
// SPDX-License-Identifier: GPL-2.0
/*
* CS2000 -- CIRRUS LOGIC Fractional-N Clock Synthesizer & Clock Multiplier
*
* Copyright (C) 2015 Renesas Electronics Corporation
* Kuninori Morimoto <[email protected]>
*/
#include <linux/clk-provider.h>
#include <linux/delay.h>
#include <linux/clk.h>
#include <linux/i2c.h>
#include <linux/of.h>
#include <linux/module.h>
#include <linux/regmap.h>
#define CH_MAX 4
#define RATIO_REG_SIZE 4
#define DEVICE_ID 0x1
#define DEVICE_CTRL 0x2
#define DEVICE_CFG1 0x3
#define DEVICE_CFG2 0x4
#define GLOBAL_CFG 0x5
#define Ratio_Add(x, nth) (6 + (x * 4) + (nth))
#define Ratio_Val(x, nth) ((x >> (24 - (8 * nth))) & 0xFF)
#define Val_Ratio(x, nth) ((x & 0xFF) << (24 - (8 * nth)))
#define FUNC_CFG1 0x16
#define FUNC_CFG2 0x17
/* DEVICE_ID */
#define REVISION_MASK (0x7)
#define REVISION_B2_B3 (0x4)
#define REVISION_C1 (0x6)
/* DEVICE_CTRL */
#define PLL_UNLOCK (1 << 7)
#define AUXOUTDIS (1 << 1)
#define CLKOUTDIS (1 << 0)
/* DEVICE_CFG1 */
#define RSEL(x) (((x) & 0x3) << 3)
#define RSEL_MASK RSEL(0x3)
#define AUXOUTSRC(x) (((x) & 0x3) << 1)
#define AUXOUTSRC_MASK AUXOUTSRC(0x3)
#define ENDEV1 (0x1)
/* DEVICE_CFG2 */
#define AUTORMOD (1 << 3)
#define LOCKCLK(x) (((x) & 0x3) << 1)
#define LOCKCLK_MASK LOCKCLK(0x3)
#define FRACNSRC_MASK (1 << 0)
#define FRACNSRC_STATIC (0 << 0)
#define FRACNSRC_DYNAMIC (1 << 0)
/* GLOBAL_CFG */
#define FREEZE (1 << 7)
#define ENDEV2 (0x1)
/* FUNC_CFG1 */
#define CLKSKIPEN (1 << 7)
#define REFCLKDIV(x) (((x) & 0x3) << 3)
#define REFCLKDIV_MASK REFCLKDIV(0x3)
/* FUNC_CFG2 */
#define LFRATIO_MASK (1 << 3)
#define LFRATIO_20_12 (0 << 3)
#define LFRATIO_12_20 (1 << 3)
#define CH_SIZE_ERR(ch) ((ch < 0) || (ch >= CH_MAX))
#define hw_to_priv(_hw) container_of(_hw, struct cs2000_priv, hw)
#define priv_to_client(priv) (priv->client)
#define priv_to_dev(priv) (&(priv_to_client(priv)->dev))
#define CLK_IN 0
#define REF_CLK 1
#define CLK_MAX 2
static bool cs2000_readable_reg(struct device *dev, unsigned int reg)
{
return reg > 0;
}
static bool cs2000_writeable_reg(struct device *dev, unsigned int reg)
{
return reg != DEVICE_ID;
}
static bool cs2000_volatile_reg(struct device *dev, unsigned int reg)
{
return reg == DEVICE_CTRL;
}
static const struct regmap_config cs2000_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.max_register = FUNC_CFG2,
.readable_reg = cs2000_readable_reg,
.writeable_reg = cs2000_writeable_reg,
.volatile_reg = cs2000_volatile_reg,
};
struct cs2000_priv {
struct clk_hw hw;
struct i2c_client *client;
struct clk *clk_in;
struct clk *ref_clk;
struct regmap *regmap;
bool dynamic_mode;
bool lf_ratio;
bool clk_skip;
/* suspend/resume */
unsigned long saved_rate;
unsigned long saved_parent_rate;
};
static const struct of_device_id cs2000_of_match[] = {
{ .compatible = "cirrus,cs2000-cp", },
{},
};
MODULE_DEVICE_TABLE(of, cs2000_of_match);
static const struct i2c_device_id cs2000_id[] = {
{ "cs2000-cp", },
{}
};
MODULE_DEVICE_TABLE(i2c, cs2000_id);
static int cs2000_enable_dev_config(struct cs2000_priv *priv, bool enable)
{
int ret;
ret = regmap_update_bits(priv->regmap, DEVICE_CFG1, ENDEV1,
enable ? ENDEV1 : 0);
if (ret < 0)
return ret;
ret = regmap_update_bits(priv->regmap, GLOBAL_CFG, ENDEV2,
enable ? ENDEV2 : 0);
if (ret < 0)
return ret;
ret = regmap_update_bits(priv->regmap, FUNC_CFG1, CLKSKIPEN,
(enable && priv->clk_skip) ? CLKSKIPEN : 0);
if (ret < 0)
return ret;
return 0;
}
static int cs2000_ref_clk_bound_rate(struct cs2000_priv *priv,
u32 rate_in)
{
u32 val;
if (rate_in >= 32000000 && rate_in < 56000000)
val = 0x0;
else if (rate_in >= 16000000 && rate_in < 28000000)
val = 0x1;
else if (rate_in >= 8000000 && rate_in < 14000000)
val = 0x2;
else
return -EINVAL;
return regmap_update_bits(priv->regmap, FUNC_CFG1,
REFCLKDIV_MASK,
REFCLKDIV(val));
}
static int cs2000_wait_pll_lock(struct cs2000_priv *priv)
{
struct device *dev = priv_to_dev(priv);
unsigned int i, val;
int ret;
for (i = 0; i < 256; i++) {
ret = regmap_read(priv->regmap, DEVICE_CTRL, &val);
if (ret < 0)
return ret;
if (!(val & PLL_UNLOCK))
return 0;
udelay(1);
}
dev_err(dev, "pll lock failed\n");
return -ETIMEDOUT;
}
static int cs2000_clk_out_enable(struct cs2000_priv *priv, bool enable)
{
/* enable both AUX_OUT, CLK_OUT */
return regmap_update_bits(priv->regmap, DEVICE_CTRL,
(AUXOUTDIS | CLKOUTDIS),
enable ? 0 :
(AUXOUTDIS | CLKOUTDIS));
}
static u32 cs2000_rate_to_ratio(u32 rate_in, u32 rate_out, bool lf_ratio)
{
u64 ratio;
u32 multiplier = lf_ratio ? 12 : 20;
/*
* ratio = rate_out / rate_in * 2^multiplier
*
* To avoid over flow, rate_out is u64.
* The result should be u32.
*/
ratio = (u64)rate_out << multiplier;
do_div(ratio, rate_in);
return ratio;
}
static unsigned long cs2000_ratio_to_rate(u32 ratio, u32 rate_in, bool lf_ratio)
{
u64 rate_out;
u32 multiplier = lf_ratio ? 12 : 20;
/*
* ratio = rate_out / rate_in * 2^multiplier
*
* To avoid over flow, rate_out is u64.
* The result should be u32 or unsigned long.
*/
rate_out = (u64)ratio * rate_in;
return rate_out >> multiplier;
}
static int cs2000_ratio_set(struct cs2000_priv *priv,
int ch, u32 rate_in, u32 rate_out)
{
u32 val;
unsigned int i;
int ret;
if (CH_SIZE_ERR(ch))
return -EINVAL;
val = cs2000_rate_to_ratio(rate_in, rate_out, priv->lf_ratio);
for (i = 0; i < RATIO_REG_SIZE; i++) {
ret = regmap_write(priv->regmap,
Ratio_Add(ch, i),
Ratio_Val(val, i));
if (ret < 0)
return ret;
}
return 0;
}
static u32 cs2000_ratio_get(struct cs2000_priv *priv, int ch)
{
unsigned int tmp, i;
u32 val;
int ret;
val = 0;
for (i = 0; i < RATIO_REG_SIZE; i++) {
ret = regmap_read(priv->regmap, Ratio_Add(ch, i), &tmp);
if (ret < 0)
return 0;
val |= Val_Ratio(tmp, i);
}
return val;
}
static int cs2000_ratio_select(struct cs2000_priv *priv, int ch)
{
int ret;
u8 fracnsrc;
if (CH_SIZE_ERR(ch))
return -EINVAL;
ret = regmap_update_bits(priv->regmap, DEVICE_CFG1, RSEL_MASK, RSEL(ch));
if (ret < 0)
return ret;
fracnsrc = priv->dynamic_mode ? FRACNSRC_DYNAMIC : FRACNSRC_STATIC;
ret = regmap_update_bits(priv->regmap, DEVICE_CFG2,
AUTORMOD | LOCKCLK_MASK | FRACNSRC_MASK,
LOCKCLK(ch) | fracnsrc);
if (ret < 0)
return ret;
return 0;
}
static unsigned long cs2000_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct cs2000_priv *priv = hw_to_priv(hw);
int ch = 0; /* it uses ch0 only at this point */
u32 ratio;
ratio = cs2000_ratio_get(priv, ch);
return cs2000_ratio_to_rate(ratio, parent_rate, priv->lf_ratio);
}
static long cs2000_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *parent_rate)
{
struct cs2000_priv *priv = hw_to_priv(hw);
u32 ratio;
ratio = cs2000_rate_to_ratio(*parent_rate, rate, priv->lf_ratio);
return cs2000_ratio_to_rate(ratio, *parent_rate, priv->lf_ratio);
}
static int cs2000_select_ratio_mode(struct cs2000_priv *priv,
unsigned long rate,
unsigned long parent_rate)
{
/*
* From the datasheet:
*
* | It is recommended that the 12.20 High-Resolution format be
* | utilized whenever the desired ratio is less than 4096 since
* | the output frequency accuracy of the PLL is directly proportional
* | to the accuracy of the timing reference clock and the resolution
* | of the R_UD.
*
* This mode is only available in dynamic mode.
*/
priv->lf_ratio = priv->dynamic_mode && ((rate / parent_rate) > 4096);
return regmap_update_bits(priv->regmap, FUNC_CFG2, LFRATIO_MASK,
priv->lf_ratio ? LFRATIO_20_12 : LFRATIO_12_20);
}
static int __cs2000_set_rate(struct cs2000_priv *priv, int ch,
unsigned long rate, unsigned long parent_rate)
{
int ret;
ret = regmap_update_bits(priv->regmap, GLOBAL_CFG, FREEZE, FREEZE);
if (ret < 0)
return ret;
ret = cs2000_select_ratio_mode(priv, rate, parent_rate);
if (ret < 0)
return ret;
ret = cs2000_ratio_set(priv, ch, parent_rate, rate);
if (ret < 0)
return ret;
ret = cs2000_ratio_select(priv, ch);
if (ret < 0)
return ret;
ret = regmap_update_bits(priv->regmap, GLOBAL_CFG, FREEZE, 0);
if (ret < 0)
return ret;
priv->saved_rate = rate;
priv->saved_parent_rate = parent_rate;
return 0;
}
static int cs2000_set_rate(struct clk_hw *hw,
unsigned long rate, unsigned long parent_rate)
{
struct cs2000_priv *priv = hw_to_priv(hw);
int ch = 0; /* it uses ch0 only at this point */
return __cs2000_set_rate(priv, ch, rate, parent_rate);
}
static int cs2000_set_saved_rate(struct cs2000_priv *priv)
{
int ch = 0; /* it uses ch0 only at this point */
return __cs2000_set_rate(priv, ch,
priv->saved_rate,
priv->saved_parent_rate);
}
static int cs2000_enable(struct clk_hw *hw)
{
struct cs2000_priv *priv = hw_to_priv(hw);
int ret;
ret = cs2000_enable_dev_config(priv, true);
if (ret < 0)
return ret;
ret = cs2000_clk_out_enable(priv, true);
if (ret < 0)
return ret;
ret = cs2000_wait_pll_lock(priv);
if (ret < 0)
return ret;
return ret;
}
static void cs2000_disable(struct clk_hw *hw)
{
struct cs2000_priv *priv = hw_to_priv(hw);
cs2000_enable_dev_config(priv, false);
cs2000_clk_out_enable(priv, false);
}
static u8 cs2000_get_parent(struct clk_hw *hw)
{
struct cs2000_priv *priv = hw_to_priv(hw);
/*
* In dynamic mode, output rates are derived from CLK_IN.
* In static mode, CLK_IN is ignored, so we return REF_CLK instead.
*/
return priv->dynamic_mode ? CLK_IN : REF_CLK;
}
static const struct clk_ops cs2000_ops = {
.get_parent = cs2000_get_parent,
.recalc_rate = cs2000_recalc_rate,
.round_rate = cs2000_round_rate,
.set_rate = cs2000_set_rate,
.prepare = cs2000_enable,
.unprepare = cs2000_disable,
};
static int cs2000_clk_get(struct cs2000_priv *priv)
{
struct device *dev = priv_to_dev(priv);
struct clk *clk_in, *ref_clk;
clk_in = devm_clk_get(dev, "clk_in");
/* not yet provided */
if (IS_ERR(clk_in))
return -EPROBE_DEFER;
ref_clk = devm_clk_get(dev, "ref_clk");
/* not yet provided */
if (IS_ERR(ref_clk))
return -EPROBE_DEFER;
priv->clk_in = clk_in;
priv->ref_clk = ref_clk;
return 0;
}
static int cs2000_clk_register(struct cs2000_priv *priv)
{
struct device *dev = priv_to_dev(priv);
struct device_node *np = dev->of_node;
struct clk_init_data init;
const char *name = np->name;
static const char *parent_names[CLK_MAX];
u32 aux_out = 0;
int ref_clk_rate;
int ch = 0; /* it uses ch0 only at this point */
int ret;
of_property_read_string(np, "clock-output-names", &name);
priv->dynamic_mode = of_property_read_bool(np, "cirrus,dynamic-mode");
dev_info(dev, "operating in %s mode\n",
priv->dynamic_mode ? "dynamic" : "static");
of_property_read_u32(np, "cirrus,aux-output-source", &aux_out);
ret = regmap_update_bits(priv->regmap, DEVICE_CFG1,
AUXOUTSRC_MASK, AUXOUTSRC(aux_out));
if (ret < 0)
return ret;
priv->clk_skip = of_property_read_bool(np, "cirrus,clock-skip");
ref_clk_rate = clk_get_rate(priv->ref_clk);
ret = cs2000_ref_clk_bound_rate(priv, ref_clk_rate);
if (ret < 0)
return ret;
if (priv->dynamic_mode) {
/* Default to low-frequency mode to allow for large ratios */
priv->lf_ratio = true;
} else {
/*
* set default rate as 1/1.
* otherwise .set_rate which setup ratio
* is never called if user requests 1/1 rate
*/
ret = __cs2000_set_rate(priv, ch, ref_clk_rate, ref_clk_rate);
if (ret < 0)
return ret;
}
parent_names[CLK_IN] = __clk_get_name(priv->clk_in);
parent_names[REF_CLK] = __clk_get_name(priv->ref_clk);
init.name = name;
init.ops = &cs2000_ops;
init.flags = CLK_SET_RATE_GATE;
init.parent_names = parent_names;
init.num_parents = ARRAY_SIZE(parent_names);
priv->hw.init = &init;
ret = clk_hw_register(dev, &priv->hw);
if (ret)
return ret;
ret = of_clk_add_hw_provider(np, of_clk_hw_simple_get, &priv->hw);
if (ret < 0) {
clk_hw_unregister(&priv->hw);
return ret;
}
return 0;
}
static int cs2000_version_print(struct cs2000_priv *priv)
{
struct device *dev = priv_to_dev(priv);
const char *revision;
unsigned int val;
int ret;
ret = regmap_read(priv->regmap, DEVICE_ID, &val);
if (ret < 0)
return ret;
/* CS2000 should be 0x0 */
if (val >> 3)
return -EIO;
switch (val & REVISION_MASK) {
case REVISION_B2_B3:
revision = "B2 / B3";
break;
case REVISION_C1:
revision = "C1";
break;
default:
return -EIO;
}
dev_info(dev, "revision - %s\n", revision);
return 0;
}
static void cs2000_remove(struct i2c_client *client)
{
struct cs2000_priv *priv = i2c_get_clientdata(client);
struct device *dev = priv_to_dev(priv);
struct device_node *np = dev->of_node;
of_clk_del_provider(np);
clk_hw_unregister(&priv->hw);
}
static int cs2000_probe(struct i2c_client *client)
{
struct cs2000_priv *priv;
struct device *dev = &client->dev;
int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->client = client;
i2c_set_clientdata(client, priv);
priv->regmap = devm_regmap_init_i2c(client, &cs2000_regmap_config);
if (IS_ERR(priv->regmap))
return PTR_ERR(priv->regmap);
ret = cs2000_clk_get(priv);
if (ret < 0)
return ret;
ret = cs2000_clk_register(priv);
if (ret < 0)
return ret;
ret = cs2000_version_print(priv);
if (ret < 0)
goto probe_err;
return 0;
probe_err:
cs2000_remove(client);
return ret;
}
static int __maybe_unused cs2000_resume(struct device *dev)
{
struct cs2000_priv *priv = dev_get_drvdata(dev);
return cs2000_set_saved_rate(priv);
}
static const struct dev_pm_ops cs2000_pm_ops = {
SET_LATE_SYSTEM_SLEEP_PM_OPS(NULL, cs2000_resume)
};
static struct i2c_driver cs2000_driver = {
.driver = {
.name = "cs2000-cp",
.pm = &cs2000_pm_ops,
.of_match_table = cs2000_of_match,
},
.probe = cs2000_probe,
.remove = cs2000_remove,
.id_table = cs2000_id,
};
module_i2c_driver(cs2000_driver);
MODULE_DESCRIPTION("CS2000-CP driver");
MODULE_AUTHOR("Kuninori Morimoto <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/clk/clk-cs2000-cp.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Clock driver for Hi655x
*
* Copyright (c) 2017, Linaro Ltd.
*
* Author: Daniel Lezcano <[email protected]>
*/
#include <linux/clk-provider.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/slab.h>
#include <linux/mfd/core.h>
#include <linux/mfd/hi655x-pmic.h>
#define HI655X_CLK_BASE HI655X_BUS_ADDR(0x1c)
#define HI655X_CLK_SET BIT(6)
struct hi655x_clk {
struct hi655x_pmic *hi655x;
struct clk_hw clk_hw;
};
static unsigned long hi655x_clk_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
return 32768;
}
static int hi655x_clk_enable(struct clk_hw *hw, bool enable)
{
struct hi655x_clk *hi655x_clk =
container_of(hw, struct hi655x_clk, clk_hw);
struct hi655x_pmic *hi655x = hi655x_clk->hi655x;
return regmap_update_bits(hi655x->regmap, HI655X_CLK_BASE,
HI655X_CLK_SET, enable ? HI655X_CLK_SET : 0);
}
static int hi655x_clk_prepare(struct clk_hw *hw)
{
return hi655x_clk_enable(hw, true);
}
static void hi655x_clk_unprepare(struct clk_hw *hw)
{
hi655x_clk_enable(hw, false);
}
static int hi655x_clk_is_prepared(struct clk_hw *hw)
{
struct hi655x_clk *hi655x_clk =
container_of(hw, struct hi655x_clk, clk_hw);
struct hi655x_pmic *hi655x = hi655x_clk->hi655x;
int ret;
uint32_t val;
ret = regmap_read(hi655x->regmap, HI655X_CLK_BASE, &val);
if (ret < 0)
return ret;
return val & HI655X_CLK_BASE;
}
static const struct clk_ops hi655x_clk_ops = {
.prepare = hi655x_clk_prepare,
.unprepare = hi655x_clk_unprepare,
.is_prepared = hi655x_clk_is_prepared,
.recalc_rate = hi655x_clk_recalc_rate,
};
static int hi655x_clk_probe(struct platform_device *pdev)
{
struct device *parent = pdev->dev.parent;
struct hi655x_pmic *hi655x = dev_get_drvdata(parent);
struct hi655x_clk *hi655x_clk;
const char *clk_name = "hi655x-clk";
struct clk_init_data init = {
.name = clk_name,
.ops = &hi655x_clk_ops
};
int ret;
hi655x_clk = devm_kzalloc(&pdev->dev, sizeof(*hi655x_clk), GFP_KERNEL);
if (!hi655x_clk)
return -ENOMEM;
of_property_read_string_index(parent->of_node, "clock-output-names",
0, &clk_name);
hi655x_clk->clk_hw.init = &init;
hi655x_clk->hi655x = hi655x;
platform_set_drvdata(pdev, hi655x_clk);
ret = devm_clk_hw_register(&pdev->dev, &hi655x_clk->clk_hw);
if (ret)
return ret;
return devm_of_clk_add_hw_provider(&pdev->dev, of_clk_hw_simple_get,
&hi655x_clk->clk_hw);
}
static struct platform_driver hi655x_clk_driver = {
.probe = hi655x_clk_probe,
.driver = {
.name = "hi655x-clk",
},
};
module_platform_driver(hi655x_clk_driver);
MODULE_DESCRIPTION("Clk driver for the hi655x series PMICs");
MODULE_AUTHOR("Daniel Lezcano <[email protected]>");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:hi655x-clk");
| linux-master | drivers/clk/clk-hi655x.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Nomadik clock implementation
* Copyright (C) 2013 ST-Ericsson AB
* Author: Linus Walleij <[email protected]>
*/
#define pr_fmt(fmt) "Nomadik SRC clocks: " fmt
#include <linux/bitops.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/clk-provider.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/spinlock.h>
#include <linux/reboot.h>
/*
* The Nomadik clock tree is described in the STN8815A12 DB V4.2
* reference manual for the chip, page 94 ff.
* Clock IDs are in the STn8815 Reference Manual table 3, page 27.
*/
#define SRC_CR 0x00U
#define SRC_CR_T0_ENSEL BIT(15)
#define SRC_CR_T1_ENSEL BIT(17)
#define SRC_CR_T2_ENSEL BIT(19)
#define SRC_CR_T3_ENSEL BIT(21)
#define SRC_CR_T4_ENSEL BIT(23)
#define SRC_CR_T5_ENSEL BIT(25)
#define SRC_CR_T6_ENSEL BIT(27)
#define SRC_CR_T7_ENSEL BIT(29)
#define SRC_XTALCR 0x0CU
#define SRC_XTALCR_XTALTIMEN BIT(20)
#define SRC_XTALCR_SXTALDIS BIT(19)
#define SRC_XTALCR_MXTALSTAT BIT(2)
#define SRC_XTALCR_MXTALEN BIT(1)
#define SRC_XTALCR_MXTALOVER BIT(0)
#define SRC_PLLCR 0x10U
#define SRC_PLLCR_PLLTIMEN BIT(29)
#define SRC_PLLCR_PLL2EN BIT(28)
#define SRC_PLLCR_PLL1STAT BIT(2)
#define SRC_PLLCR_PLL1EN BIT(1)
#define SRC_PLLCR_PLL1OVER BIT(0)
#define SRC_PLLFR 0x14U
#define SRC_PCKEN0 0x24U
#define SRC_PCKDIS0 0x28U
#define SRC_PCKENSR0 0x2CU
#define SRC_PCKSR0 0x30U
#define SRC_PCKEN1 0x34U
#define SRC_PCKDIS1 0x38U
#define SRC_PCKENSR1 0x3CU
#define SRC_PCKSR1 0x40U
/* Lock protecting the SRC_CR register */
static DEFINE_SPINLOCK(src_lock);
/* Base address of the SRC */
static void __iomem *src_base;
static int nomadik_clk_reboot_handler(struct notifier_block *this,
unsigned long code,
void *unused)
{
u32 val;
/* The main chrystal need to be enabled for reboot to work */
val = readl(src_base + SRC_XTALCR);
val &= ~SRC_XTALCR_MXTALOVER;
val |= SRC_XTALCR_MXTALEN;
pr_crit("force-enabling MXTALO\n");
writel(val, src_base + SRC_XTALCR);
return NOTIFY_OK;
}
static struct notifier_block nomadik_clk_reboot_notifier = {
.notifier_call = nomadik_clk_reboot_handler,
};
static const struct of_device_id nomadik_src_match[] __initconst = {
{ .compatible = "stericsson,nomadik-src" },
{ /* sentinel */ }
};
static void __init nomadik_src_init(void)
{
struct device_node *np;
u32 val;
np = of_find_matching_node(NULL, nomadik_src_match);
if (!np) {
pr_crit("no matching node for SRC, aborting clock init\n");
return;
}
src_base = of_iomap(np, 0);
if (!src_base) {
pr_err("%s: must have src parent node with REGS (%pOFn)\n",
__func__, np);
goto out_put;
}
/* Set all timers to use the 2.4 MHz TIMCLK */
val = readl(src_base + SRC_CR);
val |= SRC_CR_T0_ENSEL;
val |= SRC_CR_T1_ENSEL;
val |= SRC_CR_T2_ENSEL;
val |= SRC_CR_T3_ENSEL;
val |= SRC_CR_T4_ENSEL;
val |= SRC_CR_T5_ENSEL;
val |= SRC_CR_T6_ENSEL;
val |= SRC_CR_T7_ENSEL;
writel(val, src_base + SRC_CR);
val = readl(src_base + SRC_XTALCR);
pr_info("SXTALO is %s\n",
(val & SRC_XTALCR_SXTALDIS) ? "disabled" : "enabled");
pr_info("MXTAL is %s\n",
(val & SRC_XTALCR_MXTALSTAT) ? "enabled" : "disabled");
if (of_property_read_bool(np, "disable-sxtalo")) {
/* The machine uses an external oscillator circuit */
val |= SRC_XTALCR_SXTALDIS;
pr_info("disabling SXTALO\n");
}
if (of_property_read_bool(np, "disable-mxtalo")) {
/* Disable this too: also run by external oscillator */
val |= SRC_XTALCR_MXTALOVER;
val &= ~SRC_XTALCR_MXTALEN;
pr_info("disabling MXTALO\n");
}
writel(val, src_base + SRC_XTALCR);
register_reboot_notifier(&nomadik_clk_reboot_notifier);
out_put:
of_node_put(np);
}
/**
* struct clk_pll - Nomadik PLL clock
* @hw: corresponding clock hardware entry
* @id: PLL instance: 1 or 2
*/
struct clk_pll {
struct clk_hw hw;
int id;
};
/**
* struct clk_src - Nomadik src clock
* @hw: corresponding clock hardware entry
* @id: the clock ID
* @group1: true if the clock is in group1, else it is in group0
* @clkbit: bit 0...31 corresponding to the clock in each clock register
*/
struct clk_src {
struct clk_hw hw;
int id;
bool group1;
u32 clkbit;
};
#define to_pll(_hw) container_of(_hw, struct clk_pll, hw)
#define to_src(_hw) container_of(_hw, struct clk_src, hw)
static int pll_clk_enable(struct clk_hw *hw)
{
struct clk_pll *pll = to_pll(hw);
u32 val;
spin_lock(&src_lock);
val = readl(src_base + SRC_PLLCR);
if (pll->id == 1) {
if (val & SRC_PLLCR_PLL1OVER) {
val |= SRC_PLLCR_PLL1EN;
writel(val, src_base + SRC_PLLCR);
}
} else if (pll->id == 2) {
val |= SRC_PLLCR_PLL2EN;
writel(val, src_base + SRC_PLLCR);
}
spin_unlock(&src_lock);
return 0;
}
static void pll_clk_disable(struct clk_hw *hw)
{
struct clk_pll *pll = to_pll(hw);
u32 val;
spin_lock(&src_lock);
val = readl(src_base + SRC_PLLCR);
if (pll->id == 1) {
if (val & SRC_PLLCR_PLL1OVER) {
val &= ~SRC_PLLCR_PLL1EN;
writel(val, src_base + SRC_PLLCR);
}
} else if (pll->id == 2) {
val &= ~SRC_PLLCR_PLL2EN;
writel(val, src_base + SRC_PLLCR);
}
spin_unlock(&src_lock);
}
static int pll_clk_is_enabled(struct clk_hw *hw)
{
struct clk_pll *pll = to_pll(hw);
u32 val;
val = readl(src_base + SRC_PLLCR);
if (pll->id == 1) {
if (val & SRC_PLLCR_PLL1OVER)
return !!(val & SRC_PLLCR_PLL1EN);
} else if (pll->id == 2) {
return !!(val & SRC_PLLCR_PLL2EN);
}
return 1;
}
static unsigned long pll_clk_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct clk_pll *pll = to_pll(hw);
u32 val;
val = readl(src_base + SRC_PLLFR);
if (pll->id == 1) {
u8 mul;
u8 div;
mul = (val >> 8) & 0x3FU;
mul += 2;
div = val & 0x07U;
return (parent_rate * mul) >> div;
}
if (pll->id == 2) {
u8 mul;
mul = (val >> 24) & 0x3FU;
mul += 2;
return (parent_rate * mul);
}
/* Unknown PLL */
return 0;
}
static const struct clk_ops pll_clk_ops = {
.enable = pll_clk_enable,
.disable = pll_clk_disable,
.is_enabled = pll_clk_is_enabled,
.recalc_rate = pll_clk_recalc_rate,
};
static struct clk_hw * __init
pll_clk_register(struct device *dev, const char *name,
const char *parent_name, u32 id)
{
int ret;
struct clk_pll *pll;
struct clk_init_data init;
if (id != 1 && id != 2) {
pr_err("%s: the Nomadik has only PLL 1 & 2\n", __func__);
return ERR_PTR(-EINVAL);
}
pll = kzalloc(sizeof(*pll), GFP_KERNEL);
if (!pll)
return ERR_PTR(-ENOMEM);
init.name = name;
init.ops = &pll_clk_ops;
init.parent_names = (parent_name ? &parent_name : NULL);
init.num_parents = (parent_name ? 1 : 0);
pll->hw.init = &init;
pll->id = id;
pr_debug("register PLL1 clock \"%s\"\n", name);
ret = clk_hw_register(dev, &pll->hw);
if (ret) {
kfree(pll);
return ERR_PTR(ret);
}
return &pll->hw;
}
/*
* The Nomadik SRC clocks are gated, but not in the sense that
* you read-modify-write a register. Instead there are separate
* clock enable and clock disable registers. Writing a '1' bit in
* the enable register for a certain clock ungates that clock without
* affecting the other clocks. The disable register works the opposite
* way.
*/
static int src_clk_enable(struct clk_hw *hw)
{
struct clk_src *sclk = to_src(hw);
u32 enreg = sclk->group1 ? SRC_PCKEN1 : SRC_PCKEN0;
u32 sreg = sclk->group1 ? SRC_PCKSR1 : SRC_PCKSR0;
writel(sclk->clkbit, src_base + enreg);
/* spin until enabled */
while (!(readl(src_base + sreg) & sclk->clkbit))
cpu_relax();
return 0;
}
static void src_clk_disable(struct clk_hw *hw)
{
struct clk_src *sclk = to_src(hw);
u32 disreg = sclk->group1 ? SRC_PCKDIS1 : SRC_PCKDIS0;
u32 sreg = sclk->group1 ? SRC_PCKSR1 : SRC_PCKSR0;
writel(sclk->clkbit, src_base + disreg);
/* spin until disabled */
while (readl(src_base + sreg) & sclk->clkbit)
cpu_relax();
}
static int src_clk_is_enabled(struct clk_hw *hw)
{
struct clk_src *sclk = to_src(hw);
u32 sreg = sclk->group1 ? SRC_PCKSR1 : SRC_PCKSR0;
u32 val = readl(src_base + sreg);
return !!(val & sclk->clkbit);
}
static unsigned long
src_clk_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
return parent_rate;
}
static const struct clk_ops src_clk_ops = {
.enable = src_clk_enable,
.disable = src_clk_disable,
.is_enabled = src_clk_is_enabled,
.recalc_rate = src_clk_recalc_rate,
};
static struct clk_hw * __init
src_clk_register(struct device *dev, const char *name,
const char *parent_name, u8 id)
{
int ret;
struct clk_src *sclk;
struct clk_init_data init;
sclk = kzalloc(sizeof(*sclk), GFP_KERNEL);
if (!sclk)
return ERR_PTR(-ENOMEM);
init.name = name;
init.ops = &src_clk_ops;
/* Do not force-disable the static SDRAM controller */
if (id == 2)
init.flags = CLK_IGNORE_UNUSED;
else
init.flags = 0;
init.parent_names = (parent_name ? &parent_name : NULL);
init.num_parents = (parent_name ? 1 : 0);
sclk->hw.init = &init;
sclk->id = id;
sclk->group1 = (id > 31);
sclk->clkbit = BIT(id & 0x1f);
pr_debug("register clock \"%s\" ID: %d group: %d bits: %08x\n",
name, id, sclk->group1, sclk->clkbit);
ret = clk_hw_register(dev, &sclk->hw);
if (ret) {
kfree(sclk);
return ERR_PTR(ret);
}
return &sclk->hw;
}
#ifdef CONFIG_DEBUG_FS
static u32 src_pcksr0_boot;
static u32 src_pcksr1_boot;
static const char * const src_clk_names[] = {
"HCLKDMA0 ",
"HCLKSMC ",
"HCLKSDRAM ",
"HCLKDMA1 ",
"HCLKCLCD ",
"PCLKIRDA ",
"PCLKSSP ",
"PCLKUART0 ",
"PCLKSDI ",
"PCLKI2C0 ",
"PCLKI2C1 ",
"PCLKUART1 ",
"PCLMSP0 ",
"HCLKUSB ",
"HCLKDIF ",
"HCLKSAA ",
"HCLKSVA ",
"PCLKHSI ",
"PCLKXTI ",
"PCLKUART2 ",
"PCLKMSP1 ",
"PCLKMSP2 ",
"PCLKOWM ",
"HCLKHPI ",
"PCLKSKE ",
"PCLKHSEM ",
"HCLK3D ",
"HCLKHASH ",
"HCLKCRYP ",
"PCLKMSHC ",
"HCLKUSBM ",
"HCLKRNG ",
"RESERVED ",
"RESERVED ",
"RESERVED ",
"RESERVED ",
"CLDCLK ",
"IRDACLK ",
"SSPICLK ",
"UART0CLK ",
"SDICLK ",
"I2C0CLK ",
"I2C1CLK ",
"UART1CLK ",
"MSPCLK0 ",
"USBCLK ",
"DIFCLK ",
"IPI2CCLK ",
"IPBMCCLK ",
"HSICLKRX ",
"HSICLKTX ",
"UART2CLK ",
"MSPCLK1 ",
"MSPCLK2 ",
"OWMCLK ",
"RESERVED ",
"SKECLK ",
"RESERVED ",
"3DCLK ",
"PCLKMSP3 ",
"MSPCLK3 ",
"MSHCCLK ",
"USBMCLK ",
"RNGCCLK ",
};
static int nomadik_src_clk_debugfs_show(struct seq_file *s, void *what)
{
int i;
u32 src_pcksr0 = readl(src_base + SRC_PCKSR0);
u32 src_pcksr1 = readl(src_base + SRC_PCKSR1);
u32 src_pckensr0 = readl(src_base + SRC_PCKENSR0);
u32 src_pckensr1 = readl(src_base + SRC_PCKENSR1);
seq_puts(s, "Clock: Boot: Now: Request: ASKED:\n");
for (i = 0; i < ARRAY_SIZE(src_clk_names); i++) {
u32 pcksrb = (i < 0x20) ? src_pcksr0_boot : src_pcksr1_boot;
u32 pcksr = (i < 0x20) ? src_pcksr0 : src_pcksr1;
u32 pckreq = (i < 0x20) ? src_pckensr0 : src_pckensr1;
u32 mask = BIT(i & 0x1f);
seq_printf(s, "%s %s %s %s\n",
src_clk_names[i],
(pcksrb & mask) ? "on " : "off",
(pcksr & mask) ? "on " : "off",
(pckreq & mask) ? "on " : "off");
}
return 0;
}
DEFINE_SHOW_ATTRIBUTE(nomadik_src_clk_debugfs);
static int __init nomadik_src_clk_init_debugfs(void)
{
/* Vital for multiplatform */
if (!src_base)
return -ENODEV;
src_pcksr0_boot = readl(src_base + SRC_PCKSR0);
src_pcksr1_boot = readl(src_base + SRC_PCKSR1);
debugfs_create_file("nomadik-src-clk", S_IFREG | S_IRUGO,
NULL, NULL, &nomadik_src_clk_debugfs_fops);
return 0;
}
device_initcall(nomadik_src_clk_init_debugfs);
#endif
static void __init of_nomadik_pll_setup(struct device_node *np)
{
struct clk_hw *hw;
const char *clk_name = np->name;
const char *parent_name;
u32 pll_id;
if (!src_base)
nomadik_src_init();
if (of_property_read_u32(np, "pll-id", &pll_id)) {
pr_err("%s: PLL \"%s\" missing pll-id property\n",
__func__, clk_name);
return;
}
parent_name = of_clk_get_parent_name(np, 0);
hw = pll_clk_register(NULL, clk_name, parent_name, pll_id);
if (!IS_ERR(hw))
of_clk_add_hw_provider(np, of_clk_hw_simple_get, hw);
}
CLK_OF_DECLARE(nomadik_pll_clk,
"st,nomadik-pll-clock", of_nomadik_pll_setup);
static void __init of_nomadik_hclk_setup(struct device_node *np)
{
struct clk_hw *hw;
const char *clk_name = np->name;
const char *parent_name;
if (!src_base)
nomadik_src_init();
parent_name = of_clk_get_parent_name(np, 0);
/*
* The HCLK divides PLL1 with 1 (passthru), 2, 3 or 4.
*/
hw = clk_hw_register_divider(NULL, clk_name, parent_name,
0, src_base + SRC_CR,
13, 2,
CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
&src_lock);
if (!IS_ERR(hw))
of_clk_add_hw_provider(np, of_clk_hw_simple_get, hw);
}
CLK_OF_DECLARE(nomadik_hclk_clk,
"st,nomadik-hclk-clock", of_nomadik_hclk_setup);
static void __init of_nomadik_src_clk_setup(struct device_node *np)
{
struct clk_hw *hw;
const char *clk_name = np->name;
const char *parent_name;
u32 clk_id;
if (!src_base)
nomadik_src_init();
if (of_property_read_u32(np, "clock-id", &clk_id)) {
pr_err("%s: SRC clock \"%s\" missing clock-id property\n",
__func__, clk_name);
return;
}
parent_name = of_clk_get_parent_name(np, 0);
hw = src_clk_register(NULL, clk_name, parent_name, clk_id);
if (!IS_ERR(hw))
of_clk_add_hw_provider(np, of_clk_hw_simple_get, hw);
}
CLK_OF_DECLARE(nomadik_src_clk,
"st,nomadik-src-clock", of_nomadik_src_clk_setup);
| linux-master | drivers/clk/clk-nomadik.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2011 Sascha Hauer, Pengutronix <[email protected]>
* Copyright (C) 2011 Richard Zhao, Linaro <[email protected]>
* Copyright (C) 2011-2012 Mike Turquette, Linaro Ltd <[email protected]>
*
* Simple multiplexer clock implementation
*/
#include <linux/clk-provider.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/err.h>
/*
* DOC: basic adjustable multiplexer clock that cannot gate
*
* Traits of this clock:
* prepare - clk_prepare only ensures that parents are prepared
* enable - clk_enable only ensures that parents are enabled
* rate - rate is only affected by parent switching. No clk_set_rate support
* parent - parent is adjustable through clk_set_parent
*/
static inline u32 clk_mux_readl(struct clk_mux *mux)
{
if (mux->flags & CLK_MUX_BIG_ENDIAN)
return ioread32be(mux->reg);
return readl(mux->reg);
}
static inline void clk_mux_writel(struct clk_mux *mux, u32 val)
{
if (mux->flags & CLK_MUX_BIG_ENDIAN)
iowrite32be(val, mux->reg);
else
writel(val, mux->reg);
}
int clk_mux_val_to_index(struct clk_hw *hw, const u32 *table, unsigned int flags,
unsigned int val)
{
int num_parents = clk_hw_get_num_parents(hw);
if (table) {
int i;
for (i = 0; i < num_parents; i++)
if (table[i] == val)
return i;
return -EINVAL;
}
if (val && (flags & CLK_MUX_INDEX_BIT))
val = ffs(val) - 1;
if (val && (flags & CLK_MUX_INDEX_ONE))
val--;
if (val >= num_parents)
return -EINVAL;
return val;
}
EXPORT_SYMBOL_GPL(clk_mux_val_to_index);
unsigned int clk_mux_index_to_val(const u32 *table, unsigned int flags, u8 index)
{
unsigned int val = index;
if (table) {
val = table[index];
} else {
if (flags & CLK_MUX_INDEX_BIT)
val = 1 << index;
if (flags & CLK_MUX_INDEX_ONE)
val++;
}
return val;
}
EXPORT_SYMBOL_GPL(clk_mux_index_to_val);
static u8 clk_mux_get_parent(struct clk_hw *hw)
{
struct clk_mux *mux = to_clk_mux(hw);
u32 val;
val = clk_mux_readl(mux) >> mux->shift;
val &= mux->mask;
return clk_mux_val_to_index(hw, mux->table, mux->flags, val);
}
static int clk_mux_set_parent(struct clk_hw *hw, u8 index)
{
struct clk_mux *mux = to_clk_mux(hw);
u32 val = clk_mux_index_to_val(mux->table, mux->flags, index);
unsigned long flags = 0;
u32 reg;
if (mux->lock)
spin_lock_irqsave(mux->lock, flags);
else
__acquire(mux->lock);
if (mux->flags & CLK_MUX_HIWORD_MASK) {
reg = mux->mask << (mux->shift + 16);
} else {
reg = clk_mux_readl(mux);
reg &= ~(mux->mask << mux->shift);
}
val = val << mux->shift;
reg |= val;
clk_mux_writel(mux, reg);
if (mux->lock)
spin_unlock_irqrestore(mux->lock, flags);
else
__release(mux->lock);
return 0;
}
static int clk_mux_determine_rate(struct clk_hw *hw,
struct clk_rate_request *req)
{
struct clk_mux *mux = to_clk_mux(hw);
return clk_mux_determine_rate_flags(hw, req, mux->flags);
}
const struct clk_ops clk_mux_ops = {
.get_parent = clk_mux_get_parent,
.set_parent = clk_mux_set_parent,
.determine_rate = clk_mux_determine_rate,
};
EXPORT_SYMBOL_GPL(clk_mux_ops);
const struct clk_ops clk_mux_ro_ops = {
.get_parent = clk_mux_get_parent,
};
EXPORT_SYMBOL_GPL(clk_mux_ro_ops);
struct clk_hw *__clk_hw_register_mux(struct device *dev, struct device_node *np,
const char *name, u8 num_parents,
const char * const *parent_names,
const struct clk_hw **parent_hws,
const struct clk_parent_data *parent_data,
unsigned long flags, void __iomem *reg, u8 shift, u32 mask,
u8 clk_mux_flags, const u32 *table, spinlock_t *lock)
{
struct clk_mux *mux;
struct clk_hw *hw;
struct clk_init_data init = {};
int ret = -EINVAL;
if (clk_mux_flags & CLK_MUX_HIWORD_MASK) {
u8 width = fls(mask) - ffs(mask) + 1;
if (width + shift > 16) {
pr_err("mux value exceeds LOWORD field\n");
return ERR_PTR(-EINVAL);
}
}
/* allocate the mux */
mux = kzalloc(sizeof(*mux), GFP_KERNEL);
if (!mux)
return ERR_PTR(-ENOMEM);
init.name = name;
if (clk_mux_flags & CLK_MUX_READ_ONLY)
init.ops = &clk_mux_ro_ops;
else
init.ops = &clk_mux_ops;
init.flags = flags;
init.parent_names = parent_names;
init.parent_data = parent_data;
init.parent_hws = parent_hws;
init.num_parents = num_parents;
/* struct clk_mux assignments */
mux->reg = reg;
mux->shift = shift;
mux->mask = mask;
mux->flags = clk_mux_flags;
mux->lock = lock;
mux->table = table;
mux->hw.init = &init;
hw = &mux->hw;
if (dev || !np)
ret = clk_hw_register(dev, hw);
else if (np)
ret = of_clk_hw_register(np, hw);
if (ret) {
kfree(mux);
hw = ERR_PTR(ret);
}
return hw;
}
EXPORT_SYMBOL_GPL(__clk_hw_register_mux);
static void devm_clk_hw_release_mux(struct device *dev, void *res)
{
clk_hw_unregister_mux(*(struct clk_hw **)res);
}
struct clk_hw *__devm_clk_hw_register_mux(struct device *dev, struct device_node *np,
const char *name, u8 num_parents,
const char * const *parent_names,
const struct clk_hw **parent_hws,
const struct clk_parent_data *parent_data,
unsigned long flags, void __iomem *reg, u8 shift, u32 mask,
u8 clk_mux_flags, const u32 *table, spinlock_t *lock)
{
struct clk_hw **ptr, *hw;
ptr = devres_alloc(devm_clk_hw_release_mux, sizeof(*ptr), GFP_KERNEL);
if (!ptr)
return ERR_PTR(-ENOMEM);
hw = __clk_hw_register_mux(dev, np, name, num_parents, parent_names, parent_hws,
parent_data, flags, reg, shift, mask,
clk_mux_flags, table, lock);
if (!IS_ERR(hw)) {
*ptr = hw;
devres_add(dev, ptr);
} else {
devres_free(ptr);
}
return hw;
}
EXPORT_SYMBOL_GPL(__devm_clk_hw_register_mux);
struct clk *clk_register_mux_table(struct device *dev, const char *name,
const char * const *parent_names, u8 num_parents,
unsigned long flags, void __iomem *reg, u8 shift, u32 mask,
u8 clk_mux_flags, const u32 *table, spinlock_t *lock)
{
struct clk_hw *hw;
hw = clk_hw_register_mux_table(dev, name, parent_names,
num_parents, flags, reg, shift, mask,
clk_mux_flags, table, lock);
if (IS_ERR(hw))
return ERR_CAST(hw);
return hw->clk;
}
EXPORT_SYMBOL_GPL(clk_register_mux_table);
void clk_unregister_mux(struct clk *clk)
{
struct clk_mux *mux;
struct clk_hw *hw;
hw = __clk_get_hw(clk);
if (!hw)
return;
mux = to_clk_mux(hw);
clk_unregister(clk);
kfree(mux);
}
EXPORT_SYMBOL_GPL(clk_unregister_mux);
void clk_hw_unregister_mux(struct clk_hw *hw)
{
struct clk_mux *mux;
mux = to_clk_mux(hw);
clk_hw_unregister(hw);
kfree(mux);
}
EXPORT_SYMBOL_GPL(clk_hw_unregister_mux);
| linux-master | drivers/clk/clk-mux.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Memory Mapped IO Fixed clock driver
*
* Copyright (C) 2018 Cadence Design Systems, Inc.
*
* Authors:
* Jan Kotas <[email protected]>
*/
#include <linux/clk-provider.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
static struct clk_hw *fixed_mmio_clk_setup(struct device_node *node)
{
struct clk_hw *clk;
const char *clk_name = node->name;
void __iomem *base;
u32 freq;
int ret;
base = of_iomap(node, 0);
if (!base) {
pr_err("%pOFn: failed to map address\n", node);
return ERR_PTR(-EIO);
}
freq = readl(base);
iounmap(base);
of_property_read_string(node, "clock-output-names", &clk_name);
clk = clk_hw_register_fixed_rate(NULL, clk_name, NULL, 0, freq);
if (IS_ERR(clk)) {
pr_err("%pOFn: failed to register fixed rate clock\n", node);
return clk;
}
ret = of_clk_add_hw_provider(node, of_clk_hw_simple_get, clk);
if (ret) {
pr_err("%pOFn: failed to add clock provider\n", node);
clk_hw_unregister(clk);
clk = ERR_PTR(ret);
}
return clk;
}
static void __init of_fixed_mmio_clk_setup(struct device_node *node)
{
fixed_mmio_clk_setup(node);
}
CLK_OF_DECLARE(fixed_mmio_clk, "fixed-mmio-clock", of_fixed_mmio_clk_setup);
/*
* This is not executed when of_fixed_mmio_clk_setup succeeded.
*/
static int of_fixed_mmio_clk_probe(struct platform_device *pdev)
{
struct clk_hw *clk;
clk = fixed_mmio_clk_setup(pdev->dev.of_node);
if (IS_ERR(clk))
return PTR_ERR(clk);
platform_set_drvdata(pdev, clk);
return 0;
}
static void of_fixed_mmio_clk_remove(struct platform_device *pdev)
{
struct clk_hw *clk = platform_get_drvdata(pdev);
of_clk_del_provider(pdev->dev.of_node);
clk_hw_unregister_fixed_rate(clk);
}
static const struct of_device_id of_fixed_mmio_clk_ids[] = {
{ .compatible = "fixed-mmio-clock" },
{ }
};
MODULE_DEVICE_TABLE(of, of_fixed_mmio_clk_ids);
static struct platform_driver of_fixed_mmio_clk_driver = {
.driver = {
.name = "of_fixed_mmio_clk",
.of_match_table = of_fixed_mmio_clk_ids,
},
.probe = of_fixed_mmio_clk_probe,
.remove_new = of_fixed_mmio_clk_remove,
};
module_platform_driver(of_fixed_mmio_clk_driver);
MODULE_AUTHOR("Jan Kotas <[email protected]>");
MODULE_DESCRIPTION("Memory Mapped IO Fixed clock driver");
| linux-master | drivers/clk/clk-fixed-mmio.c |
/*
* Driver for TI Multi PLL CDCE913/925/937/949 clock synthesizer
*
* This driver always connects the Y1 to the input clock, Y2/Y3 to PLL1,
* Y4/Y5 to PLL2, and so on. PLL frequency is set on a first-come-first-serve
* basis. Clients can directly request any frequency that the chip can
* deliver using the standard clk framework. In addition, the device can
* be configured and activated via the devicetree.
*
* Copyright (C) 2014, Topic Embedded Products
* Licenced under GPL
*/
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/i2c.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#include <linux/gcd.h>
/* Each chip has different number of PLLs and outputs, for example:
* The CECE925 has 2 PLLs which can be routed through dividers to 5 outputs.
* Model this as 2 PLL clocks which are parents to the outputs.
*/
enum {
CDCE913,
CDCE925,
CDCE937,
CDCE949,
};
struct clk_cdce925_chip_info {
int num_plls;
int num_outputs;
};
static const struct clk_cdce925_chip_info clk_cdce925_chip_info_tbl[] = {
[CDCE913] = { .num_plls = 1, .num_outputs = 3 },
[CDCE925] = { .num_plls = 2, .num_outputs = 5 },
[CDCE937] = { .num_plls = 3, .num_outputs = 7 },
[CDCE949] = { .num_plls = 4, .num_outputs = 9 },
};
#define MAX_NUMBER_OF_PLLS 4
#define MAX_NUMBER_OF_OUTPUTS 9
#define CDCE925_REG_GLOBAL1 0x01
#define CDCE925_REG_Y1SPIPDIVH 0x02
#define CDCE925_REG_PDIVL 0x03
#define CDCE925_REG_XCSEL 0x05
/* PLL parameters start at 0x10, steps of 0x10 */
#define CDCE925_OFFSET_PLL 0x10
/* Add CDCE925_OFFSET_PLL * (pll) to these registers before sending */
#define CDCE925_PLL_MUX_OUTPUTS 0x14
#define CDCE925_PLL_MULDIV 0x18
#define CDCE925_PLL_FREQUENCY_MIN 80000000ul
#define CDCE925_PLL_FREQUENCY_MAX 230000000ul
struct clk_cdce925_chip;
struct clk_cdce925_output {
struct clk_hw hw;
struct clk_cdce925_chip *chip;
u8 index;
u16 pdiv; /* 1..127 for Y2-Y9; 1..1023 for Y1 */
};
#define to_clk_cdce925_output(_hw) \
container_of(_hw, struct clk_cdce925_output, hw)
struct clk_cdce925_pll {
struct clk_hw hw;
struct clk_cdce925_chip *chip;
u8 index;
u16 m; /* 1..511 */
u16 n; /* 1..4095 */
};
#define to_clk_cdce925_pll(_hw) container_of(_hw, struct clk_cdce925_pll, hw)
struct clk_cdce925_chip {
struct regmap *regmap;
struct i2c_client *i2c_client;
const struct clk_cdce925_chip_info *chip_info;
struct clk_cdce925_pll pll[MAX_NUMBER_OF_PLLS];
struct clk_cdce925_output clk[MAX_NUMBER_OF_OUTPUTS];
};
/* ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** */
static unsigned long cdce925_pll_calculate_rate(unsigned long parent_rate,
u16 n, u16 m)
{
if ((!m || !n) || (m == n))
return parent_rate; /* In bypass mode runs at same frequency */
return mult_frac(parent_rate, (unsigned long)n, (unsigned long)m);
}
static unsigned long cdce925_pll_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
/* Output frequency of PLL is Fout = (Fin/Pdiv)*(N/M) */
struct clk_cdce925_pll *data = to_clk_cdce925_pll(hw);
return cdce925_pll_calculate_rate(parent_rate, data->n, data->m);
}
static void cdce925_pll_find_rate(unsigned long rate,
unsigned long parent_rate, u16 *n, u16 *m)
{
unsigned long un;
unsigned long um;
unsigned long g;
if (rate <= parent_rate) {
/* Can always deliver parent_rate in bypass mode */
rate = parent_rate;
*n = 0;
*m = 0;
} else {
/* In PLL mode, need to apply min/max range */
if (rate < CDCE925_PLL_FREQUENCY_MIN)
rate = CDCE925_PLL_FREQUENCY_MIN;
else if (rate > CDCE925_PLL_FREQUENCY_MAX)
rate = CDCE925_PLL_FREQUENCY_MAX;
g = gcd(rate, parent_rate);
um = parent_rate / g;
un = rate / g;
/* When outside hw range, reduce to fit (rounding errors) */
while ((un > 4095) || (um > 511)) {
un >>= 1;
um >>= 1;
}
if (un == 0)
un = 1;
if (um == 0)
um = 1;
*n = un;
*m = um;
}
}
static long cdce925_pll_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *parent_rate)
{
u16 n, m;
cdce925_pll_find_rate(rate, *parent_rate, &n, &m);
return (long)cdce925_pll_calculate_rate(*parent_rate, n, m);
}
static int cdce925_pll_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct clk_cdce925_pll *data = to_clk_cdce925_pll(hw);
if (!rate || (rate == parent_rate)) {
data->m = 0; /* Bypass mode */
data->n = 0;
return 0;
}
if ((rate < CDCE925_PLL_FREQUENCY_MIN) ||
(rate > CDCE925_PLL_FREQUENCY_MAX)) {
pr_debug("%s: rate %lu outside PLL range.\n", __func__, rate);
return -EINVAL;
}
if (rate < parent_rate) {
pr_debug("%s: rate %lu less than parent rate %lu.\n", __func__,
rate, parent_rate);
return -EINVAL;
}
cdce925_pll_find_rate(rate, parent_rate, &data->n, &data->m);
return 0;
}
/* calculate p = max(0, 4 - int(log2 (n/m))) */
static u8 cdce925_pll_calc_p(u16 n, u16 m)
{
u8 p;
u16 r = n / m;
if (r >= 16)
return 0;
p = 4;
while (r > 1) {
r >>= 1;
--p;
}
return p;
}
/* Returns VCO range bits for VCO1_0_RANGE */
static u8 cdce925_pll_calc_range_bits(struct clk_hw *hw, u16 n, u16 m)
{
struct clk *parent = clk_get_parent(hw->clk);
unsigned long rate = clk_get_rate(parent);
rate = mult_frac(rate, (unsigned long)n, (unsigned long)m);
if (rate >= 175000000)
return 0x3;
if (rate >= 150000000)
return 0x02;
if (rate >= 125000000)
return 0x01;
return 0x00;
}
/* I2C clock, hence everything must happen in (un)prepare because this
* may sleep */
static int cdce925_pll_prepare(struct clk_hw *hw)
{
struct clk_cdce925_pll *data = to_clk_cdce925_pll(hw);
u16 n = data->n;
u16 m = data->m;
u16 r;
u8 q;
u8 p;
u16 nn;
u8 pll[4]; /* Bits are spread out over 4 byte registers */
u8 reg_ofs = data->index * CDCE925_OFFSET_PLL;
unsigned i;
if ((!m || !n) || (m == n)) {
/* Set PLL mux to bypass mode, leave the rest as is */
regmap_update_bits(data->chip->regmap,
reg_ofs + CDCE925_PLL_MUX_OUTPUTS, 0x80, 0x80);
} else {
/* According to data sheet: */
/* p = max(0, 4 - int(log2 (n/m))) */
p = cdce925_pll_calc_p(n, m);
/* nn = n * 2^p */
nn = n * BIT(p);
/* q = int(nn/m) */
q = nn / m;
if ((q < 16) || (q > 63)) {
pr_debug("%s invalid q=%d\n", __func__, q);
return -EINVAL;
}
r = nn - (m*q);
if (r > 511) {
pr_debug("%s invalid r=%d\n", __func__, r);
return -EINVAL;
}
pr_debug("%s n=%d m=%d p=%d q=%d r=%d\n", __func__,
n, m, p, q, r);
/* encode into register bits */
pll[0] = n >> 4;
pll[1] = ((n & 0x0F) << 4) | ((r >> 5) & 0x0F);
pll[2] = ((r & 0x1F) << 3) | ((q >> 3) & 0x07);
pll[3] = ((q & 0x07) << 5) | (p << 2) |
cdce925_pll_calc_range_bits(hw, n, m);
/* Write to registers */
for (i = 0; i < ARRAY_SIZE(pll); ++i)
regmap_write(data->chip->regmap,
reg_ofs + CDCE925_PLL_MULDIV + i, pll[i]);
/* Enable PLL */
regmap_update_bits(data->chip->regmap,
reg_ofs + CDCE925_PLL_MUX_OUTPUTS, 0x80, 0x00);
}
return 0;
}
static void cdce925_pll_unprepare(struct clk_hw *hw)
{
struct clk_cdce925_pll *data = to_clk_cdce925_pll(hw);
u8 reg_ofs = data->index * CDCE925_OFFSET_PLL;
regmap_update_bits(data->chip->regmap,
reg_ofs + CDCE925_PLL_MUX_OUTPUTS, 0x80, 0x80);
}
static const struct clk_ops cdce925_pll_ops = {
.prepare = cdce925_pll_prepare,
.unprepare = cdce925_pll_unprepare,
.recalc_rate = cdce925_pll_recalc_rate,
.round_rate = cdce925_pll_round_rate,
.set_rate = cdce925_pll_set_rate,
};
static void cdce925_clk_set_pdiv(struct clk_cdce925_output *data, u16 pdiv)
{
switch (data->index) {
case 0:
regmap_update_bits(data->chip->regmap,
CDCE925_REG_Y1SPIPDIVH,
0x03, (pdiv >> 8) & 0x03);
regmap_write(data->chip->regmap, 0x03, pdiv & 0xFF);
break;
case 1:
regmap_update_bits(data->chip->regmap, 0x16, 0x7F, pdiv);
break;
case 2:
regmap_update_bits(data->chip->regmap, 0x17, 0x7F, pdiv);
break;
case 3:
regmap_update_bits(data->chip->regmap, 0x26, 0x7F, pdiv);
break;
case 4:
regmap_update_bits(data->chip->regmap, 0x27, 0x7F, pdiv);
break;
case 5:
regmap_update_bits(data->chip->regmap, 0x36, 0x7F, pdiv);
break;
case 6:
regmap_update_bits(data->chip->regmap, 0x37, 0x7F, pdiv);
break;
case 7:
regmap_update_bits(data->chip->regmap, 0x46, 0x7F, pdiv);
break;
case 8:
regmap_update_bits(data->chip->regmap, 0x47, 0x7F, pdiv);
break;
}
}
static void cdce925_clk_activate(struct clk_cdce925_output *data)
{
switch (data->index) {
case 0:
regmap_update_bits(data->chip->regmap,
CDCE925_REG_Y1SPIPDIVH, 0x0c, 0x0c);
break;
case 1:
case 2:
regmap_update_bits(data->chip->regmap, 0x14, 0x03, 0x03);
break;
case 3:
case 4:
regmap_update_bits(data->chip->regmap, 0x24, 0x03, 0x03);
break;
case 5:
case 6:
regmap_update_bits(data->chip->regmap, 0x34, 0x03, 0x03);
break;
case 7:
case 8:
regmap_update_bits(data->chip->regmap, 0x44, 0x03, 0x03);
break;
}
}
static int cdce925_clk_prepare(struct clk_hw *hw)
{
struct clk_cdce925_output *data = to_clk_cdce925_output(hw);
cdce925_clk_set_pdiv(data, data->pdiv);
cdce925_clk_activate(data);
return 0;
}
static void cdce925_clk_unprepare(struct clk_hw *hw)
{
struct clk_cdce925_output *data = to_clk_cdce925_output(hw);
/* Disable clock by setting divider to "0" */
cdce925_clk_set_pdiv(data, 0);
}
static unsigned long cdce925_clk_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct clk_cdce925_output *data = to_clk_cdce925_output(hw);
if (data->pdiv)
return parent_rate / data->pdiv;
return 0;
}
static u16 cdce925_calc_divider(unsigned long rate,
unsigned long parent_rate)
{
unsigned long divider;
if (!rate)
return 0;
if (rate >= parent_rate)
return 1;
divider = DIV_ROUND_CLOSEST(parent_rate, rate);
if (divider > 0x7F)
divider = 0x7F;
return (u16)divider;
}
static unsigned long cdce925_clk_best_parent_rate(
struct clk_hw *hw, unsigned long rate)
{
struct clk *pll = clk_get_parent(hw->clk);
struct clk *root = clk_get_parent(pll);
unsigned long root_rate = clk_get_rate(root);
unsigned long best_rate_error = rate;
u16 pdiv_min;
u16 pdiv_max;
u16 pdiv_best;
u16 pdiv_now;
if (root_rate % rate == 0)
return root_rate; /* Don't need the PLL, use bypass */
pdiv_min = (u16)max(1ul, DIV_ROUND_UP(CDCE925_PLL_FREQUENCY_MIN, rate));
pdiv_max = (u16)min(127ul, CDCE925_PLL_FREQUENCY_MAX / rate);
if (pdiv_min > pdiv_max)
return 0; /* No can do? */
pdiv_best = pdiv_min;
for (pdiv_now = pdiv_min; pdiv_now < pdiv_max; ++pdiv_now) {
unsigned long target_rate = rate * pdiv_now;
long pll_rate = clk_round_rate(pll, target_rate);
unsigned long actual_rate;
unsigned long rate_error;
if (pll_rate <= 0)
continue;
actual_rate = pll_rate / pdiv_now;
rate_error = abs((long)actual_rate - (long)rate);
if (rate_error < best_rate_error) {
pdiv_best = pdiv_now;
best_rate_error = rate_error;
}
/* TODO: Consider PLL frequency based on smaller n/m values
* and pick the better one if the error is equal */
}
return rate * pdiv_best;
}
static long cdce925_clk_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *parent_rate)
{
unsigned long l_parent_rate = *parent_rate;
u16 divider = cdce925_calc_divider(rate, l_parent_rate);
if (l_parent_rate / divider != rate) {
l_parent_rate = cdce925_clk_best_parent_rate(hw, rate);
divider = cdce925_calc_divider(rate, l_parent_rate);
*parent_rate = l_parent_rate;
}
if (divider)
return (long)(l_parent_rate / divider);
return 0;
}
static int cdce925_clk_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct clk_cdce925_output *data = to_clk_cdce925_output(hw);
data->pdiv = cdce925_calc_divider(rate, parent_rate);
return 0;
}
static const struct clk_ops cdce925_clk_ops = {
.prepare = cdce925_clk_prepare,
.unprepare = cdce925_clk_unprepare,
.recalc_rate = cdce925_clk_recalc_rate,
.round_rate = cdce925_clk_round_rate,
.set_rate = cdce925_clk_set_rate,
};
static u16 cdce925_y1_calc_divider(unsigned long rate,
unsigned long parent_rate)
{
unsigned long divider;
if (!rate)
return 0;
if (rate >= parent_rate)
return 1;
divider = DIV_ROUND_CLOSEST(parent_rate, rate);
if (divider > 0x3FF) /* Y1 has 10-bit divider */
divider = 0x3FF;
return (u16)divider;
}
static long cdce925_clk_y1_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *parent_rate)
{
unsigned long l_parent_rate = *parent_rate;
u16 divider = cdce925_y1_calc_divider(rate, l_parent_rate);
if (divider)
return (long)(l_parent_rate / divider);
return 0;
}
static int cdce925_clk_y1_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct clk_cdce925_output *data = to_clk_cdce925_output(hw);
data->pdiv = cdce925_y1_calc_divider(rate, parent_rate);
return 0;
}
static const struct clk_ops cdce925_clk_y1_ops = {
.prepare = cdce925_clk_prepare,
.unprepare = cdce925_clk_unprepare,
.recalc_rate = cdce925_clk_recalc_rate,
.round_rate = cdce925_clk_y1_round_rate,
.set_rate = cdce925_clk_y1_set_rate,
};
#define CDCE925_I2C_COMMAND_BLOCK_TRANSFER 0x00
#define CDCE925_I2C_COMMAND_BYTE_TRANSFER 0x80
static int cdce925_regmap_i2c_write(
void *context, const void *data, size_t count)
{
struct device *dev = context;
struct i2c_client *i2c = to_i2c_client(dev);
int ret;
u8 reg_data[2];
if (count != 2)
return -ENOTSUPP;
/* First byte is command code */
reg_data[0] = CDCE925_I2C_COMMAND_BYTE_TRANSFER | ((u8 *)data)[0];
reg_data[1] = ((u8 *)data)[1];
dev_dbg(&i2c->dev, "%s(%zu) %#x %#x\n", __func__, count,
reg_data[0], reg_data[1]);
ret = i2c_master_send(i2c, reg_data, count);
if (likely(ret == count))
return 0;
else if (ret < 0)
return ret;
else
return -EIO;
}
static int cdce925_regmap_i2c_read(void *context,
const void *reg, size_t reg_size, void *val, size_t val_size)
{
struct device *dev = context;
struct i2c_client *i2c = to_i2c_client(dev);
struct i2c_msg xfer[2];
int ret;
u8 reg_data[2];
if (reg_size != 1)
return -ENOTSUPP;
xfer[0].addr = i2c->addr;
xfer[0].flags = 0;
xfer[0].buf = reg_data;
if (val_size == 1) {
reg_data[0] =
CDCE925_I2C_COMMAND_BYTE_TRANSFER | ((u8 *)reg)[0];
xfer[0].len = 1;
} else {
reg_data[0] =
CDCE925_I2C_COMMAND_BLOCK_TRANSFER | ((u8 *)reg)[0];
reg_data[1] = val_size;
xfer[0].len = 2;
}
xfer[1].addr = i2c->addr;
xfer[1].flags = I2C_M_RD;
xfer[1].len = val_size;
xfer[1].buf = val;
ret = i2c_transfer(i2c->adapter, xfer, 2);
if (likely(ret == 2)) {
dev_dbg(&i2c->dev, "%s(%zu, %zu) %#x %#x\n", __func__,
reg_size, val_size, reg_data[0], *((u8 *)val));
return 0;
} else if (ret < 0)
return ret;
else
return -EIO;
}
static struct clk_hw *
of_clk_cdce925_get(struct of_phandle_args *clkspec, void *_data)
{
struct clk_cdce925_chip *data = _data;
unsigned int idx = clkspec->args[0];
if (idx >= ARRAY_SIZE(data->clk)) {
pr_err("%s: invalid index %u\n", __func__, idx);
return ERR_PTR(-EINVAL);
}
return &data->clk[idx].hw;
}
static int cdce925_regulator_enable(struct device *dev, const char *name)
{
int err;
err = devm_regulator_get_enable(dev, name);
if (err)
dev_err_probe(dev, err, "Failed to enable %s:\n", name);
return err;
}
/* The CDCE925 uses a funky way to read/write registers. Bulk mode is
* just weird, so just use the single byte mode exclusively. */
static struct regmap_bus regmap_cdce925_bus = {
.write = cdce925_regmap_i2c_write,
.read = cdce925_regmap_i2c_read,
};
static const struct i2c_device_id cdce925_id[] = {
{ "cdce913", CDCE913 },
{ "cdce925", CDCE925 },
{ "cdce937", CDCE937 },
{ "cdce949", CDCE949 },
{ }
};
MODULE_DEVICE_TABLE(i2c, cdce925_id);
static int cdce925_probe(struct i2c_client *client)
{
struct clk_cdce925_chip *data;
struct device_node *node = client->dev.of_node;
const struct i2c_device_id *id = i2c_match_id(cdce925_id, client);
const char *parent_name;
const char *pll_clk_name[MAX_NUMBER_OF_PLLS] = {NULL,};
struct clk_init_data init;
u32 value;
int i;
int err;
struct device_node *np_output;
char child_name[6];
struct regmap_config config = {
.name = "configuration0",
.reg_bits = 8,
.val_bits = 8,
.cache_type = REGCACHE_RBTREE,
};
dev_dbg(&client->dev, "%s\n", __func__);
err = cdce925_regulator_enable(&client->dev, "vdd");
if (err)
return err;
err = cdce925_regulator_enable(&client->dev, "vddout");
if (err)
return err;
data = devm_kzalloc(&client->dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
data->i2c_client = client;
data->chip_info = &clk_cdce925_chip_info_tbl[id->driver_data];
config.max_register = CDCE925_OFFSET_PLL +
data->chip_info->num_plls * 0x10 - 1;
data->regmap = devm_regmap_init(&client->dev, ®map_cdce925_bus,
&client->dev, &config);
if (IS_ERR(data->regmap)) {
dev_err(&client->dev, "failed to allocate register map\n");
return PTR_ERR(data->regmap);
}
i2c_set_clientdata(client, data);
parent_name = of_clk_get_parent_name(node, 0);
if (!parent_name) {
dev_err(&client->dev, "missing parent clock\n");
return -ENODEV;
}
dev_dbg(&client->dev, "parent is: %s\n", parent_name);
if (of_property_read_u32(node, "xtal-load-pf", &value) == 0)
regmap_write(data->regmap,
CDCE925_REG_XCSEL, (value << 3) & 0xF8);
/* PWDN bit */
regmap_update_bits(data->regmap, CDCE925_REG_GLOBAL1, BIT(4), 0);
/* Set input source for Y1 to be the XTAL */
regmap_update_bits(data->regmap, 0x02, BIT(7), 0);
init.ops = &cdce925_pll_ops;
init.flags = 0;
init.parent_names = &parent_name;
init.num_parents = 1;
/* Register PLL clocks */
for (i = 0; i < data->chip_info->num_plls; ++i) {
pll_clk_name[i] = kasprintf(GFP_KERNEL, "%pOFn.pll%d",
client->dev.of_node, i);
if (!pll_clk_name[i]) {
err = -ENOMEM;
goto error;
}
init.name = pll_clk_name[i];
data->pll[i].chip = data;
data->pll[i].hw.init = &init;
data->pll[i].index = i;
err = devm_clk_hw_register(&client->dev, &data->pll[i].hw);
if (err) {
dev_err(&client->dev, "Failed register PLL %d\n", i);
goto error;
}
sprintf(child_name, "PLL%d", i+1);
np_output = of_get_child_by_name(node, child_name);
if (!np_output)
continue;
if (!of_property_read_u32(np_output,
"clock-frequency", &value)) {
err = clk_set_rate(data->pll[i].hw.clk, value);
if (err)
dev_err(&client->dev,
"unable to set PLL frequency %ud\n",
value);
}
if (!of_property_read_u32(np_output,
"spread-spectrum", &value)) {
u8 flag = of_property_read_bool(np_output,
"spread-spectrum-center") ? 0x80 : 0x00;
regmap_update_bits(data->regmap,
0x16 + (i*CDCE925_OFFSET_PLL),
0x80, flag);
regmap_update_bits(data->regmap,
0x12 + (i*CDCE925_OFFSET_PLL),
0x07, value & 0x07);
}
of_node_put(np_output);
}
/* Register output clock Y1 */
init.ops = &cdce925_clk_y1_ops;
init.flags = 0;
init.num_parents = 1;
init.parent_names = &parent_name; /* Mux Y1 to input */
init.name = kasprintf(GFP_KERNEL, "%pOFn.Y1", client->dev.of_node);
if (!init.name) {
err = -ENOMEM;
goto error;
}
data->clk[0].chip = data;
data->clk[0].hw.init = &init;
data->clk[0].index = 0;
data->clk[0].pdiv = 1;
err = devm_clk_hw_register(&client->dev, &data->clk[0].hw);
kfree(init.name); /* clock framework made a copy of the name */
if (err) {
dev_err(&client->dev, "clock registration Y1 failed\n");
goto error;
}
/* Register output clocks Y2 .. Y5*/
init.ops = &cdce925_clk_ops;
init.flags = CLK_SET_RATE_PARENT;
init.num_parents = 1;
for (i = 1; i < data->chip_info->num_outputs; ++i) {
init.name = kasprintf(GFP_KERNEL, "%pOFn.Y%d",
client->dev.of_node, i+1);
if (!init.name) {
err = -ENOMEM;
goto error;
}
data->clk[i].chip = data;
data->clk[i].hw.init = &init;
data->clk[i].index = i;
data->clk[i].pdiv = 1;
switch (i) {
case 1:
case 2:
/* Mux Y2/3 to PLL1 */
init.parent_names = &pll_clk_name[0];
break;
case 3:
case 4:
/* Mux Y4/5 to PLL2 */
init.parent_names = &pll_clk_name[1];
break;
case 5:
case 6:
/* Mux Y6/7 to PLL3 */
init.parent_names = &pll_clk_name[2];
break;
case 7:
case 8:
/* Mux Y8/9 to PLL4 */
init.parent_names = &pll_clk_name[3];
break;
}
err = devm_clk_hw_register(&client->dev, &data->clk[i].hw);
kfree(init.name); /* clock framework made a copy of the name */
if (err) {
dev_err(&client->dev, "clock registration failed\n");
goto error;
}
}
/* Register the output clocks */
err = of_clk_add_hw_provider(client->dev.of_node, of_clk_cdce925_get,
data);
if (err)
dev_err(&client->dev, "unable to add OF clock provider\n");
err = 0;
error:
for (i = 0; i < data->chip_info->num_plls; ++i)
/* clock framework made a copy of the name */
kfree(pll_clk_name[i]);
return err;
}
static const struct of_device_id clk_cdce925_of_match[] = {
{ .compatible = "ti,cdce913" },
{ .compatible = "ti,cdce925" },
{ .compatible = "ti,cdce937" },
{ .compatible = "ti,cdce949" },
{ },
};
MODULE_DEVICE_TABLE(of, clk_cdce925_of_match);
static struct i2c_driver cdce925_driver = {
.driver = {
.name = "cdce925",
.of_match_table = clk_cdce925_of_match,
},
.probe = cdce925_probe,
.id_table = cdce925_id,
};
module_i2c_driver(cdce925_driver);
MODULE_AUTHOR("Mike Looijmans <[email protected]>");
MODULE_DESCRIPTION("TI CDCE913/925/937/949 driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/clk/clk-cdce925.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* WM831x clock control
*
* Copyright 2011-2 Wolfson Microelectronics PLC.
*
* Author: Mark Brown <[email protected]>
*/
#include <linux/clk-provider.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/mfd/wm831x/core.h>
struct wm831x_clk {
struct wm831x *wm831x;
struct clk_hw xtal_hw;
struct clk_hw fll_hw;
struct clk_hw clkout_hw;
bool xtal_ena;
};
static int wm831x_xtal_is_prepared(struct clk_hw *hw)
{
struct wm831x_clk *clkdata = container_of(hw, struct wm831x_clk,
xtal_hw);
return clkdata->xtal_ena;
}
static unsigned long wm831x_xtal_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct wm831x_clk *clkdata = container_of(hw, struct wm831x_clk,
xtal_hw);
if (clkdata->xtal_ena)
return 32768;
else
return 0;
}
static const struct clk_ops wm831x_xtal_ops = {
.is_prepared = wm831x_xtal_is_prepared,
.recalc_rate = wm831x_xtal_recalc_rate,
};
static const struct clk_init_data wm831x_xtal_init = {
.name = "xtal",
.ops = &wm831x_xtal_ops,
};
static const unsigned long wm831x_fll_auto_rates[] = {
2048000,
11289600,
12000000,
12288000,
19200000,
22579600,
24000000,
24576000,
};
static int wm831x_fll_is_prepared(struct clk_hw *hw)
{
struct wm831x_clk *clkdata = container_of(hw, struct wm831x_clk,
fll_hw);
struct wm831x *wm831x = clkdata->wm831x;
int ret;
ret = wm831x_reg_read(wm831x, WM831X_FLL_CONTROL_1);
if (ret < 0) {
dev_err(wm831x->dev, "Unable to read FLL_CONTROL_1: %d\n",
ret);
return true;
}
return (ret & WM831X_FLL_ENA) != 0;
}
static int wm831x_fll_prepare(struct clk_hw *hw)
{
struct wm831x_clk *clkdata = container_of(hw, struct wm831x_clk,
fll_hw);
struct wm831x *wm831x = clkdata->wm831x;
int ret;
ret = wm831x_set_bits(wm831x, WM831X_FLL_CONTROL_1,
WM831X_FLL_ENA, WM831X_FLL_ENA);
if (ret != 0)
dev_crit(wm831x->dev, "Failed to enable FLL: %d\n", ret);
/* wait 2-3 ms for new frequency taking effect */
usleep_range(2000, 3000);
return ret;
}
static void wm831x_fll_unprepare(struct clk_hw *hw)
{
struct wm831x_clk *clkdata = container_of(hw, struct wm831x_clk,
fll_hw);
struct wm831x *wm831x = clkdata->wm831x;
int ret;
ret = wm831x_set_bits(wm831x, WM831X_FLL_CONTROL_1, WM831X_FLL_ENA, 0);
if (ret != 0)
dev_crit(wm831x->dev, "Failed to disable FLL: %d\n", ret);
}
static unsigned long wm831x_fll_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct wm831x_clk *clkdata = container_of(hw, struct wm831x_clk,
fll_hw);
struct wm831x *wm831x = clkdata->wm831x;
int ret;
ret = wm831x_reg_read(wm831x, WM831X_CLOCK_CONTROL_2);
if (ret < 0) {
dev_err(wm831x->dev, "Unable to read CLOCK_CONTROL_2: %d\n",
ret);
return 0;
}
if (ret & WM831X_FLL_AUTO)
return wm831x_fll_auto_rates[ret & WM831X_FLL_AUTO_FREQ_MASK];
dev_err(wm831x->dev, "FLL only supported in AUTO mode\n");
return 0;
}
static long wm831x_fll_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *unused)
{
int best = 0;
int i;
for (i = 0; i < ARRAY_SIZE(wm831x_fll_auto_rates); i++)
if (abs(wm831x_fll_auto_rates[i] - rate) <
abs(wm831x_fll_auto_rates[best] - rate))
best = i;
return wm831x_fll_auto_rates[best];
}
static int wm831x_fll_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct wm831x_clk *clkdata = container_of(hw, struct wm831x_clk,
fll_hw);
struct wm831x *wm831x = clkdata->wm831x;
int i;
for (i = 0; i < ARRAY_SIZE(wm831x_fll_auto_rates); i++)
if (wm831x_fll_auto_rates[i] == rate)
break;
if (i == ARRAY_SIZE(wm831x_fll_auto_rates))
return -EINVAL;
if (wm831x_fll_is_prepared(hw))
return -EPERM;
return wm831x_set_bits(wm831x, WM831X_CLOCK_CONTROL_2,
WM831X_FLL_AUTO_FREQ_MASK, i);
}
static const char *wm831x_fll_parents[] = {
"xtal",
"clkin",
};
static u8 wm831x_fll_get_parent(struct clk_hw *hw)
{
struct wm831x_clk *clkdata = container_of(hw, struct wm831x_clk,
fll_hw);
struct wm831x *wm831x = clkdata->wm831x;
int ret;
/* AUTO mode is always clocked from the crystal */
ret = wm831x_reg_read(wm831x, WM831X_CLOCK_CONTROL_2);
if (ret < 0) {
dev_err(wm831x->dev, "Unable to read CLOCK_CONTROL_2: %d\n",
ret);
return 0;
}
if (ret & WM831X_FLL_AUTO)
return 0;
ret = wm831x_reg_read(wm831x, WM831X_FLL_CONTROL_5);
if (ret < 0) {
dev_err(wm831x->dev, "Unable to read FLL_CONTROL_5: %d\n",
ret);
return 0;
}
switch (ret & WM831X_FLL_CLK_SRC_MASK) {
case 0:
return 0;
case 1:
return 1;
default:
dev_err(wm831x->dev, "Unsupported FLL clock source %d\n",
ret & WM831X_FLL_CLK_SRC_MASK);
return 0;
}
}
static const struct clk_ops wm831x_fll_ops = {
.is_prepared = wm831x_fll_is_prepared,
.prepare = wm831x_fll_prepare,
.unprepare = wm831x_fll_unprepare,
.round_rate = wm831x_fll_round_rate,
.recalc_rate = wm831x_fll_recalc_rate,
.set_rate = wm831x_fll_set_rate,
.get_parent = wm831x_fll_get_parent,
};
static const struct clk_init_data wm831x_fll_init = {
.name = "fll",
.ops = &wm831x_fll_ops,
.parent_names = wm831x_fll_parents,
.num_parents = ARRAY_SIZE(wm831x_fll_parents),
.flags = CLK_SET_RATE_GATE,
};
static int wm831x_clkout_is_prepared(struct clk_hw *hw)
{
struct wm831x_clk *clkdata = container_of(hw, struct wm831x_clk,
clkout_hw);
struct wm831x *wm831x = clkdata->wm831x;
int ret;
ret = wm831x_reg_read(wm831x, WM831X_CLOCK_CONTROL_1);
if (ret < 0) {
dev_err(wm831x->dev, "Unable to read CLOCK_CONTROL_1: %d\n",
ret);
return false;
}
return (ret & WM831X_CLKOUT_ENA) != 0;
}
static int wm831x_clkout_prepare(struct clk_hw *hw)
{
struct wm831x_clk *clkdata = container_of(hw, struct wm831x_clk,
clkout_hw);
struct wm831x *wm831x = clkdata->wm831x;
int ret;
ret = wm831x_reg_unlock(wm831x);
if (ret != 0) {
dev_crit(wm831x->dev, "Failed to lock registers: %d\n", ret);
return ret;
}
ret = wm831x_set_bits(wm831x, WM831X_CLOCK_CONTROL_1,
WM831X_CLKOUT_ENA, WM831X_CLKOUT_ENA);
if (ret != 0)
dev_crit(wm831x->dev, "Failed to enable CLKOUT: %d\n", ret);
wm831x_reg_lock(wm831x);
return ret;
}
static void wm831x_clkout_unprepare(struct clk_hw *hw)
{
struct wm831x_clk *clkdata = container_of(hw, struct wm831x_clk,
clkout_hw);
struct wm831x *wm831x = clkdata->wm831x;
int ret;
ret = wm831x_reg_unlock(wm831x);
if (ret != 0) {
dev_crit(wm831x->dev, "Failed to lock registers: %d\n", ret);
return;
}
ret = wm831x_set_bits(wm831x, WM831X_CLOCK_CONTROL_1,
WM831X_CLKOUT_ENA, 0);
if (ret != 0)
dev_crit(wm831x->dev, "Failed to disable CLKOUT: %d\n", ret);
wm831x_reg_lock(wm831x);
}
static const char *wm831x_clkout_parents[] = {
"fll",
"xtal",
};
static u8 wm831x_clkout_get_parent(struct clk_hw *hw)
{
struct wm831x_clk *clkdata = container_of(hw, struct wm831x_clk,
clkout_hw);
struct wm831x *wm831x = clkdata->wm831x;
int ret;
ret = wm831x_reg_read(wm831x, WM831X_CLOCK_CONTROL_1);
if (ret < 0) {
dev_err(wm831x->dev, "Unable to read CLOCK_CONTROL_1: %d\n",
ret);
return 0;
}
if (ret & WM831X_CLKOUT_SRC)
return 1;
else
return 0;
}
static int wm831x_clkout_set_parent(struct clk_hw *hw, u8 parent)
{
struct wm831x_clk *clkdata = container_of(hw, struct wm831x_clk,
clkout_hw);
struct wm831x *wm831x = clkdata->wm831x;
return wm831x_set_bits(wm831x, WM831X_CLOCK_CONTROL_1,
WM831X_CLKOUT_SRC,
parent << WM831X_CLKOUT_SRC_SHIFT);
}
static const struct clk_ops wm831x_clkout_ops = {
.is_prepared = wm831x_clkout_is_prepared,
.prepare = wm831x_clkout_prepare,
.unprepare = wm831x_clkout_unprepare,
.determine_rate = clk_hw_determine_rate_no_reparent,
.get_parent = wm831x_clkout_get_parent,
.set_parent = wm831x_clkout_set_parent,
};
static const struct clk_init_data wm831x_clkout_init = {
.name = "clkout",
.ops = &wm831x_clkout_ops,
.parent_names = wm831x_clkout_parents,
.num_parents = ARRAY_SIZE(wm831x_clkout_parents),
.flags = CLK_SET_RATE_PARENT,
};
static int wm831x_clk_probe(struct platform_device *pdev)
{
struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
struct wm831x_clk *clkdata;
int ret;
clkdata = devm_kzalloc(&pdev->dev, sizeof(*clkdata), GFP_KERNEL);
if (!clkdata)
return -ENOMEM;
clkdata->wm831x = wm831x;
/* XTAL_ENA can only be set via OTP/InstantConfig so just read once */
ret = wm831x_reg_read(wm831x, WM831X_CLOCK_CONTROL_2);
if (ret < 0) {
dev_err(wm831x->dev, "Unable to read CLOCK_CONTROL_2: %d\n",
ret);
return ret;
}
clkdata->xtal_ena = ret & WM831X_XTAL_ENA;
clkdata->xtal_hw.init = &wm831x_xtal_init;
ret = devm_clk_hw_register(&pdev->dev, &clkdata->xtal_hw);
if (ret)
return ret;
clkdata->fll_hw.init = &wm831x_fll_init;
ret = devm_clk_hw_register(&pdev->dev, &clkdata->fll_hw);
if (ret)
return ret;
clkdata->clkout_hw.init = &wm831x_clkout_init;
ret = devm_clk_hw_register(&pdev->dev, &clkdata->clkout_hw);
if (ret)
return ret;
platform_set_drvdata(pdev, clkdata);
return 0;
}
static struct platform_driver wm831x_clk_driver = {
.probe = wm831x_clk_probe,
.driver = {
.name = "wm831x-clk",
},
};
module_platform_driver(wm831x_clk_driver);
/* Module information */
MODULE_AUTHOR("Mark Brown <[email protected]>");
MODULE_DESCRIPTION("WM831x clock driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:wm831x-clk");
| linux-master | drivers/clk/clk-wm831x.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2014 Samsung Electronics Co., Ltd.
* Sylwester Nawrocki <[email protected]>
*/
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/clk/clk-conf.h>
#include <linux/device.h>
#include <linux/of.h>
#include <linux/printk.h>
static int __set_clk_parents(struct device_node *node, bool clk_supplier)
{
struct of_phandle_args clkspec;
int index, rc, num_parents;
struct clk *clk, *pclk;
num_parents = of_count_phandle_with_args(node, "assigned-clock-parents",
"#clock-cells");
if (num_parents == -EINVAL)
pr_err("clk: invalid value of clock-parents property at %pOF\n",
node);
for (index = 0; index < num_parents; index++) {
rc = of_parse_phandle_with_args(node, "assigned-clock-parents",
"#clock-cells", index, &clkspec);
if (rc < 0) {
/* skip empty (null) phandles */
if (rc == -ENOENT)
continue;
else
return rc;
}
if (clkspec.np == node && !clk_supplier) {
of_node_put(clkspec.np);
return 0;
}
pclk = of_clk_get_from_provider(&clkspec);
of_node_put(clkspec.np);
if (IS_ERR(pclk)) {
if (PTR_ERR(pclk) != -EPROBE_DEFER)
pr_warn("clk: couldn't get parent clock %d for %pOF\n",
index, node);
return PTR_ERR(pclk);
}
rc = of_parse_phandle_with_args(node, "assigned-clocks",
"#clock-cells", index, &clkspec);
if (rc < 0)
goto err;
if (clkspec.np == node && !clk_supplier) {
of_node_put(clkspec.np);
rc = 0;
goto err;
}
clk = of_clk_get_from_provider(&clkspec);
of_node_put(clkspec.np);
if (IS_ERR(clk)) {
if (PTR_ERR(clk) != -EPROBE_DEFER)
pr_warn("clk: couldn't get assigned clock %d for %pOF\n",
index, node);
rc = PTR_ERR(clk);
goto err;
}
rc = clk_set_parent(clk, pclk);
if (rc < 0)
pr_err("clk: failed to reparent %s to %s: %d\n",
__clk_get_name(clk), __clk_get_name(pclk), rc);
clk_put(clk);
clk_put(pclk);
}
return 0;
err:
clk_put(pclk);
return rc;
}
static int __set_clk_rates(struct device_node *node, bool clk_supplier)
{
struct of_phandle_args clkspec;
struct property *prop;
const __be32 *cur;
int rc, index = 0;
struct clk *clk;
u32 rate;
of_property_for_each_u32(node, "assigned-clock-rates", prop, cur, rate) {
if (rate) {
rc = of_parse_phandle_with_args(node, "assigned-clocks",
"#clock-cells", index, &clkspec);
if (rc < 0) {
/* skip empty (null) phandles */
if (rc == -ENOENT)
continue;
else
return rc;
}
if (clkspec.np == node && !clk_supplier) {
of_node_put(clkspec.np);
return 0;
}
clk = of_clk_get_from_provider(&clkspec);
of_node_put(clkspec.np);
if (IS_ERR(clk)) {
if (PTR_ERR(clk) != -EPROBE_DEFER)
pr_warn("clk: couldn't get clock %d for %pOF\n",
index, node);
return PTR_ERR(clk);
}
rc = clk_set_rate(clk, rate);
if (rc < 0)
pr_err("clk: couldn't set %s clk rate to %u (%d), current rate: %lu\n",
__clk_get_name(clk), rate, rc,
clk_get_rate(clk));
clk_put(clk);
}
index++;
}
return 0;
}
/**
* of_clk_set_defaults() - parse and set assigned clocks configuration
* @node: device node to apply clock settings for
* @clk_supplier: true if clocks supplied by @node should also be considered
*
* This function parses 'assigned-{clocks/clock-parents/clock-rates}' properties
* and sets any specified clock parents and rates. The @clk_supplier argument
* should be set to true if @node may be also a clock supplier of any clock
* listed in its 'assigned-clocks' or 'assigned-clock-parents' properties.
* If @clk_supplier is false the function exits returning 0 as soon as it
* determines the @node is also a supplier of any of the clocks.
*/
int of_clk_set_defaults(struct device_node *node, bool clk_supplier)
{
int rc;
if (!node)
return 0;
rc = __set_clk_parents(node, clk_supplier);
if (rc < 0)
return rc;
return __set_clk_rates(node, clk_supplier);
}
EXPORT_SYMBOL_GPL(of_clk_set_defaults);
| linux-master | drivers/clk/clk-conf.c |
// SPDX-License-Identifier: GPL-2.0
/*
* LMK04832 Ultra Low-Noise JESD204B Compliant Clock Jitter Cleaner
* Pin compatible with the LMK0482x family
*
* Datasheet: https://www.ti.com/lit/ds/symlink/lmk04832.pdf
*
* Copyright (c) 2020, Xiphos Systems Corp.
*
*/
#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/device.h>
#include <linux/gcd.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/regmap.h>
#include <linux/spi/spi.h>
/* 0x000 - 0x00d System Functions */
#define LMK04832_REG_RST3W 0x000
#define LMK04832_BIT_RESET BIT(7)
#define LMK04832_BIT_SPI_3WIRE_DIS BIT(4)
#define LMK04832_REG_POWERDOWN 0x002
#define LMK04832_REG_ID_DEV_TYPE 0x003
#define LMK04832_REG_ID_PROD_MSB 0x004
#define LMK04832_REG_ID_PROD_LSB 0x005
#define LMK04832_REG_ID_MASKREV 0x006
#define LMK04832_REG_ID_VNDR_MSB 0x00c
#define LMK04832_REG_ID_VNDR_LSB 0x00d
/* 0x100 - 0x137 Device Clock and SYSREF Clock Output Control */
#define LMK04832_REG_CLKOUT_CTRL0(ch) (0x100 + (ch >> 1) * 8)
#define LMK04832_BIT_DCLK_DIV_LSB GENMASK(7, 0)
#define LMK04832_REG_CLKOUT_CTRL1(ch) (0x101 + (ch >> 1) * 8)
#define LMK04832_BIT_DCLKX_Y_DDLY_LSB GENMASK(7, 0)
#define LMK04832_REG_CLKOUT_CTRL2(ch) (0x102 + (ch >> 1) * 8)
#define LMK04832_BIT_CLKOUTX_Y_PD BIT(7)
#define LMK04832_BIT_DCLKX_Y_DDLY_PD BIT(4)
#define LMK04832_BIT_DCLKX_Y_DDLY_MSB GENMASK(3, 2)
#define LMK04832_BIT_DCLK_DIV_MSB GENMASK(1, 0)
#define LMK04832_REG_CLKOUT_SRC_MUX(ch) (0x103 + (ch % 2) + (ch >> 1) * 8)
#define LMK04832_BIT_CLKOUT_SRC_MUX BIT(5)
#define LMK04832_REG_CLKOUT_CTRL3(ch) (0x103 + (ch >> 1) * 8)
#define LMK04832_BIT_DCLKX_Y_PD BIT(4)
#define LMK04832_BIT_DCLKX_Y_DCC BIT(2)
#define LMK04832_BIT_DCLKX_Y_HS BIT(0)
#define LMK04832_REG_CLKOUT_CTRL4(ch) (0x104 + (ch >> 1) * 8)
#define LMK04832_BIT_SCLK_PD BIT(4)
#define LMK04832_BIT_SCLKX_Y_DIS_MODE GENMASK(3, 2)
#define LMK04832_REG_SCLKX_Y_ADLY(ch) (0x105 + (ch >> 1) * 8)
#define LMK04832_REG_SCLKX_Y_DDLY(ch) (0x106 + (ch >> 1) * 8)
#define LMK04832_BIT_SCLKX_Y_DDLY GENMASK(3, 0)
#define LMK04832_REG_CLKOUT_FMT(ch) (0x107 + (ch >> 1) * 8)
#define LMK04832_BIT_CLKOUT_FMT(ch) (ch % 2 ? 0xf0 : 0x0f)
#define LMK04832_VAL_CLKOUT_FMT_POWERDOWN 0x00
#define LMK04832_VAL_CLKOUT_FMT_LVDS 0x01
#define LMK04832_VAL_CLKOUT_FMT_HSDS6 0x02
#define LMK04832_VAL_CLKOUT_FMT_HSDS8 0x03
#define LMK04832_VAL_CLKOUT_FMT_LVPECL1600 0x04
#define LMK04832_VAL_CLKOUT_FMT_LVPECL2000 0x05
#define LMK04832_VAL_CLKOUT_FMT_LCPECL 0x06
#define LMK04832_VAL_CLKOUT_FMT_CML16 0x07
#define LMK04832_VAL_CLKOUT_FMT_CML24 0x08
#define LMK04832_VAL_CLKOUT_FMT_CML32 0x09
#define LMK04832_VAL_CLKOUT_FMT_CMOS_OFF_INV 0x0a
#define LMK04832_VAL_CLKOUT_FMT_CMOS_NOR_OFF 0x0b
#define LMK04832_VAL_CLKOUT_FMT_CMOS_INV_INV 0x0c
#define LMK04832_VAL_CLKOUT_FMT_CMOS_INV_NOR 0x0d
#define LMK04832_VAL_CLKOUT_FMT_CMOS_NOR_INV 0x0e
#define LMK04832_VAL_CLKOUT_FMT_CMOS_NOR_NOR 0x0f
/* 0x138 - 0x145 SYSREF, SYNC, and Device Config */
#define LMK04832_REG_VCO_OSCOUT 0x138
#define LMK04832_BIT_VCO_MUX GENMASK(6, 5)
#define LMK04832_VAL_VCO_MUX_VCO0 0x00
#define LMK04832_VAL_VCO_MUX_VCO1 0x01
#define LMK04832_VAL_VCO_MUX_EXT 0x02
#define LMK04832_REG_SYSREF_OUT 0x139
#define LMK04832_BIT_SYSREF_REQ_EN BIT(6)
#define LMK04832_BIT_SYSREF_MUX GENMASK(1, 0)
#define LMK04832_VAL_SYSREF_MUX_NORMAL_SYNC 0x00
#define LMK04832_VAL_SYSREF_MUX_RECLK 0x01
#define LMK04832_VAL_SYSREF_MUX_PULSER 0x02
#define LMK04832_VAL_SYSREF_MUX_CONTINUOUS 0x03
#define LMK04832_REG_SYSREF_DIV_MSB 0x13a
#define LMK04832_BIT_SYSREF_DIV_MSB GENMASK(4, 0)
#define LMK04832_REG_SYSREF_DIV_LSB 0x13b
#define LMK04832_REG_SYSREF_DDLY_MSB 0x13c
#define LMK04832_BIT_SYSREF_DDLY_MSB GENMASK(4, 0)
#define LMK04832_REG_SYSREF_DDLY_LSB 0x13d
#define LMK04832_REG_SYSREF_PULSE_CNT 0x13e
#define LMK04832_REG_FB_CTRL 0x13f
#define LMK04832_BIT_PLL2_RCLK_MUX BIT(7)
#define LMK04832_VAL_PLL2_RCLK_MUX_OSCIN 0x00
#define LMK04832_VAL_PLL2_RCLK_MUX_CLKIN 0x01
#define LMK04832_BIT_PLL2_NCLK_MUX BIT(5)
#define LMK04832_VAL_PLL2_NCLK_MUX_PLL2_P 0x00
#define LMK04832_VAL_PLL2_NCLK_MUX_FB_MUX 0x01
#define LMK04832_BIT_FB_MUX_EN BIT(0)
#define LMK04832_REG_MAIN_PD 0x140
#define LMK04832_BIT_PLL1_PD BIT(7)
#define LMK04832_BIT_VCO_LDO_PD BIT(6)
#define LMK04832_BIT_VCO_PD BIT(5)
#define LMK04832_BIT_OSCIN_PD BIT(4)
#define LMK04832_BIT_SYSREF_GBL_PD BIT(3)
#define LMK04832_BIT_SYSREF_PD BIT(2)
#define LMK04832_BIT_SYSREF_DDLY_PD BIT(1)
#define LMK04832_BIT_SYSREF_PLSR_PD BIT(0)
#define LMK04832_REG_SYNC 0x143
#define LMK04832_BIT_SYNC_CLR BIT(7)
#define LMK04832_BIT_SYNC_1SHOT_EN BIT(6)
#define LMK04832_BIT_SYNC_POL BIT(5)
#define LMK04832_BIT_SYNC_EN BIT(4)
#define LMK04832_BIT_SYNC_MODE GENMASK(1, 0)
#define LMK04832_VAL_SYNC_MODE_OFF 0x00
#define LMK04832_VAL_SYNC_MODE_ON 0x01
#define LMK04832_VAL_SYNC_MODE_PULSER_PIN 0x02
#define LMK04832_VAL_SYNC_MODE_PULSER_SPI 0x03
#define LMK04832_REG_SYNC_DIS 0x144
/* 0x146 - 0x14a CLKin Control */
#define LMK04832_REG_CLKIN_SEL0 0x148
#define LMK04832_REG_CLKIN_SEL1 0x149
#define LMK04832_REG_CLKIN_RST 0x14a
#define LMK04832_BIT_SDIO_RDBK_TYPE BIT(6)
#define LMK04832_BIT_CLKIN_SEL_MUX GENMASK(5, 3)
#define LMK04832_VAL_CLKIN_SEL_MUX_SPI_RDBK 0x06
#define LMK04832_BIT_CLKIN_SEL_TYPE GENMASK(2, 0)
#define LMK04832_VAL_CLKIN_SEL_TYPE_OUT 0x03
/* 0x14b - 0x152 Holdover */
/* 0x153 - 0x15f PLL1 Configuration */
#define LMK04832_REG_PLL1_LD 0x15f
#define LMK04832_BIT_PLL1_LD_MUX GENMASK(7, 3)
#define LMK04832_VAL_PLL1_LD_MUX_SPI_RDBK 0x07
#define LMK04832_BIT_PLL1_LD_TYPE GENMASK(2, 0)
#define LMK04832_VAL_PLL1_LD_TYPE_OUT_PP 0x03
/* 0x160 - 0x16e PLL2 Configuration */
#define LMK04832_REG_PLL2_R_MSB 0x160
#define LMK04832_BIT_PLL2_R_MSB GENMASK(3, 0)
#define LMK04832_REG_PLL2_R_LSB 0x161
#define LMK04832_REG_PLL2_MISC 0x162
#define LMK04832_BIT_PLL2_MISC_P GENMASK(7, 5)
#define LMK04832_BIT_PLL2_MISC_REF_2X_EN BIT(0)
#define LMK04832_REG_PLL2_N_CAL_0 0x163
#define LMK04832_BIT_PLL2_N_CAL_0 GENMASK(1, 0)
#define LMK04832_REG_PLL2_N_CAL_1 0x164
#define LMK04832_REG_PLL2_N_CAL_2 0x165
#define LMK04832_REG_PLL2_N_0 0x166
#define LMK04832_BIT_PLL2_N_0 GENMASK(1, 0)
#define LMK04832_REG_PLL2_N_1 0x167
#define LMK04832_REG_PLL2_N_2 0x168
#define LMK04832_REG_PLL2_DLD_CNT_MSB 0x16a
#define LMK04832_REG_PLL2_DLD_CNT_LSB 0x16b
#define LMK04832_REG_PLL2_LD 0x16e
#define LMK04832_BIT_PLL2_LD_MUX GENMASK(7, 3)
#define LMK04832_VAL_PLL2_LD_MUX_PLL2_DLD 0x02
#define LMK04832_BIT_PLL2_LD_TYPE GENMASK(2, 0)
#define LMK04832_VAL_PLL2_LD_TYPE_OUT_PP 0x03
/* 0x16F - 0x555 Misc Registers */
#define LMK04832_REG_PLL2_PD 0x173
#define LMK04832_BIT_PLL2_PRE_PD BIT(6)
#define LMK04832_BIT_PLL2_PD BIT(5)
#define LMK04832_REG_PLL1R_RST 0x177
#define LMK04832_REG_CLR_PLL_LOST 0x182
#define LMK04832_REG_RB_PLL_LD 0x183
#define LMK04832_REG_RB_CLK_DAC_VAL_MSB 0x184
#define LMK04832_REG_RB_DAC_VAL_LSB 0x185
#define LMK04832_REG_RB_HOLDOVER 0x188
#define LMK04832_REG_SPI_LOCK 0x555
enum lmk04832_device_types {
LMK04832,
};
/**
* struct lmk04832_device_info - Holds static device information that is
* specific to the chip revision
*
* @pid: Product Identifier
* @maskrev: IC version identifier
* @num_channels: Number of available output channels (clkout count)
* @vco0_range: {min, max} of the VCO0 operating range (in MHz)
* @vco1_range: {min, max} of the VCO1 operating range (in MHz)
*/
struct lmk04832_device_info {
u16 pid;
u8 maskrev;
size_t num_channels;
unsigned int vco0_range[2];
unsigned int vco1_range[2];
};
static const struct lmk04832_device_info lmk04832_device_info[] = {
[LMK04832] = {
.pid = 0x63d1, /* WARNING PROD_ID is inverted in the datasheet */
.maskrev = 0x70,
.num_channels = 14,
.vco0_range = { 2440, 2580 },
.vco1_range = { 2945, 3255 },
},
};
enum lmk04832_rdbk_type {
RDBK_CLKIN_SEL0,
RDBK_CLKIN_SEL1,
RDBK_RESET,
RDBK_PLL1_LD,
};
struct lmk_dclk {
struct lmk04832 *lmk;
struct clk_hw hw;
u8 id;
};
struct lmk_clkout {
struct lmk04832 *lmk;
struct clk_hw hw;
bool sysref;
u32 format;
u8 id;
};
/**
* struct lmk04832 - The LMK04832 device structure
*
* @dev: reference to a struct device, linked to the spi_device
* @regmap: struct regmap instance use to access the chip
* @sync_mode: operational mode for SYNC signal
* @sysref_mux: select SYSREF source
* @sysref_pulse_cnt: number of SYSREF pulses generated while not in continuous
* mode.
* @sysref_ddly: SYSREF digital delay value
* @oscin: PLL2 input clock
* @vco: reference to the internal VCO clock
* @sclk: reference to the internal sysref clock (SCLK)
* @vco_rate: user provided VCO rate
* @reset_gpio: reference to the reset GPIO
* @dclk: list of internal device clock references.
* Each pair of clkout clocks share a single device clock (DCLKX_Y)
* @clkout: list of output clock references
* @clk_data: holds clkout related data like clk_hw* and number of clocks
*/
struct lmk04832 {
struct device *dev;
struct regmap *regmap;
unsigned int sync_mode;
unsigned int sysref_mux;
unsigned int sysref_pulse_cnt;
unsigned int sysref_ddly;
struct clk *oscin;
struct clk_hw vco;
struct clk_hw sclk;
unsigned int vco_rate;
struct gpio_desc *reset_gpio;
struct lmk_dclk *dclk;
struct lmk_clkout *clkout;
struct clk_hw_onecell_data *clk_data;
};
static bool lmk04832_regmap_rd_regs(struct device *dev, unsigned int reg)
{
switch (reg) {
case LMK04832_REG_RST3W ... LMK04832_REG_ID_MASKREV:
case LMK04832_REG_ID_VNDR_MSB:
case LMK04832_REG_ID_VNDR_LSB:
case LMK04832_REG_CLKOUT_CTRL0(0) ... LMK04832_REG_PLL2_DLD_CNT_LSB:
case LMK04832_REG_PLL2_LD:
case LMK04832_REG_PLL2_PD:
case LMK04832_REG_PLL1R_RST:
case LMK04832_REG_CLR_PLL_LOST ... LMK04832_REG_RB_DAC_VAL_LSB:
case LMK04832_REG_RB_HOLDOVER:
case LMK04832_REG_SPI_LOCK:
return true;
default:
return false;
};
}
static bool lmk04832_regmap_wr_regs(struct device *dev, unsigned int reg)
{
switch (reg) {
case LMK04832_REG_RST3W:
case LMK04832_REG_POWERDOWN:
return true;
case LMK04832_REG_ID_DEV_TYPE ... LMK04832_REG_ID_MASKREV:
case LMK04832_REG_ID_VNDR_MSB:
case LMK04832_REG_ID_VNDR_LSB:
return false;
case LMK04832_REG_CLKOUT_CTRL0(0) ... LMK04832_REG_PLL2_DLD_CNT_LSB:
case LMK04832_REG_PLL2_LD:
case LMK04832_REG_PLL2_PD:
case LMK04832_REG_PLL1R_RST:
case LMK04832_REG_CLR_PLL_LOST ... LMK04832_REG_RB_DAC_VAL_LSB:
case LMK04832_REG_RB_HOLDOVER:
case LMK04832_REG_SPI_LOCK:
return true;
default:
return false;
};
}
static const struct regmap_config regmap_config = {
.name = "lmk04832",
.reg_bits = 16,
.val_bits = 8,
.use_single_read = 1,
.use_single_write = 1,
.read_flag_mask = 0x80,
.write_flag_mask = 0x00,
.readable_reg = lmk04832_regmap_rd_regs,
.writeable_reg = lmk04832_regmap_wr_regs,
.cache_type = REGCACHE_NONE,
.max_register = LMK04832_REG_SPI_LOCK,
};
static int lmk04832_vco_is_enabled(struct clk_hw *hw)
{
struct lmk04832 *lmk = container_of(hw, struct lmk04832, vco);
unsigned int tmp;
int ret;
ret = regmap_read(lmk->regmap, LMK04832_REG_MAIN_PD, &tmp);
if (ret)
return ret;
return !(FIELD_GET(LMK04832_BIT_OSCIN_PD, tmp) |
FIELD_GET(LMK04832_BIT_VCO_PD, tmp) |
FIELD_GET(LMK04832_BIT_VCO_LDO_PD, tmp));
}
static int lmk04832_vco_prepare(struct clk_hw *hw)
{
struct lmk04832 *lmk = container_of(hw, struct lmk04832, vco);
int ret;
ret = regmap_update_bits(lmk->regmap, LMK04832_REG_PLL2_PD,
LMK04832_BIT_PLL2_PRE_PD |
LMK04832_BIT_PLL2_PD,
0x00);
if (ret)
return ret;
return regmap_update_bits(lmk->regmap, LMK04832_REG_MAIN_PD,
LMK04832_BIT_VCO_LDO_PD |
LMK04832_BIT_VCO_PD |
LMK04832_BIT_OSCIN_PD, 0x00);
}
static void lmk04832_vco_unprepare(struct clk_hw *hw)
{
struct lmk04832 *lmk = container_of(hw, struct lmk04832, vco);
regmap_update_bits(lmk->regmap, LMK04832_REG_PLL2_PD,
LMK04832_BIT_PLL2_PRE_PD | LMK04832_BIT_PLL2_PD,
0xff);
/* Don't set LMK04832_BIT_OSCIN_PD since other clocks depend on it */
regmap_update_bits(lmk->regmap, LMK04832_REG_MAIN_PD,
LMK04832_BIT_VCO_LDO_PD | LMK04832_BIT_VCO_PD, 0xff);
}
static unsigned long lmk04832_vco_recalc_rate(struct clk_hw *hw,
unsigned long prate)
{
struct lmk04832 *lmk = container_of(hw, struct lmk04832, vco);
const unsigned int pll2_p[] = {8, 2, 2, 3, 4, 5, 6, 7};
unsigned int pll2_n, p, pll2_r;
unsigned int pll2_misc;
unsigned long vco_rate;
u8 tmp[3];
int ret;
ret = regmap_read(lmk->regmap, LMK04832_REG_PLL2_MISC, &pll2_misc);
if (ret)
return ret;
p = FIELD_GET(LMK04832_BIT_PLL2_MISC_P, pll2_misc);
ret = regmap_bulk_read(lmk->regmap, LMK04832_REG_PLL2_N_0, &tmp, 3);
if (ret)
return ret;
pll2_n = FIELD_PREP(0x030000, tmp[0]) |
FIELD_PREP(0x00ff00, tmp[1]) |
FIELD_PREP(0x0000ff, tmp[2]);
ret = regmap_bulk_read(lmk->regmap, LMK04832_REG_PLL2_R_MSB, &tmp, 2);
if (ret)
return ret;
pll2_r = FIELD_PREP(0x0f00, tmp[0]) |
FIELD_PREP(0x00ff, tmp[1]);
vco_rate = (prate << FIELD_GET(LMK04832_BIT_PLL2_MISC_REF_2X_EN,
pll2_misc)) * pll2_n * pll2_p[p] / pll2_r;
return vco_rate;
}
/**
* lmk04832_check_vco_ranges - Check requested VCO frequency against VCO ranges
*
* @lmk: Reference to the lmk device
* @rate: Desired output rate for the VCO
*
* The LMK04832 has 2 internal VCO, each with independent operating ranges.
* Use the device_info structure to determine which VCO to use based on rate.
*
* Returns: VCO_MUX value or negative errno.
*/
static int lmk04832_check_vco_ranges(struct lmk04832 *lmk, unsigned long rate)
{
struct spi_device *spi = to_spi_device(lmk->dev);
const struct lmk04832_device_info *info;
unsigned long mhz = rate / 1000000;
info = &lmk04832_device_info[spi_get_device_id(spi)->driver_data];
if (mhz >= info->vco0_range[0] && mhz <= info->vco0_range[1])
return LMK04832_VAL_VCO_MUX_VCO0;
if (mhz >= info->vco1_range[0] && mhz <= info->vco1_range[1])
return LMK04832_VAL_VCO_MUX_VCO1;
dev_err(lmk->dev, "%lu Hz is out of VCO ranges\n", rate);
return -ERANGE;
}
/**
* lmk04832_calc_pll2_params - Get PLL2 parameters used to set the VCO frequency
*
* @prate: parent rate to the PLL2, usually OSCin
* @rate: Desired output rate for the VCO
* @n: reference to PLL2_N
* @p: reference to PLL2_P
* @r: reference to PLL2_R
*
* This functions assumes LMK04832_BIT_PLL2_MISC_REF_2X_EN is set since it is
* recommended in the datasheet because a higher phase detector frequencies
* makes the design of wider loop bandwidth filters possible.
*
* the VCO rate can be calculated using the following expression:
*
* VCO = OSCin * 2 * PLL2_N * PLL2_P / PLL2_R
*
* Returns: vco rate or negative errno.
*/
static long lmk04832_calc_pll2_params(unsigned long prate, unsigned long rate,
unsigned int *n, unsigned int *p,
unsigned int *r)
{
unsigned int pll2_n, pll2_p, pll2_r;
unsigned long num, div;
/* Set PLL2_P to a fixed value to simplify optimizations */
pll2_p = 2;
div = gcd(rate, prate);
num = DIV_ROUND_CLOSEST(rate, div);
pll2_r = DIV_ROUND_CLOSEST(prate, div);
if (num > 4) {
pll2_n = num >> 2;
} else {
pll2_r = pll2_r << 2;
pll2_n = num;
}
if (pll2_n < 1 || pll2_n > 0x03ffff)
return -EINVAL;
if (pll2_r < 1 || pll2_r > 0xfff)
return -EINVAL;
*n = pll2_n;
*p = pll2_p;
*r = pll2_r;
return DIV_ROUND_CLOSEST(prate * 2 * pll2_p * pll2_n, pll2_r);
}
static long lmk04832_vco_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *prate)
{
struct lmk04832 *lmk = container_of(hw, struct lmk04832, vco);
unsigned int n, p, r;
long vco_rate;
int ret;
ret = lmk04832_check_vco_ranges(lmk, rate);
if (ret < 0)
return ret;
vco_rate = lmk04832_calc_pll2_params(*prate, rate, &n, &p, &r);
if (vco_rate < 0) {
dev_err(lmk->dev, "PLL2 parameters out of range\n");
return vco_rate;
}
if (rate != vco_rate)
return -EINVAL;
return vco_rate;
}
static int lmk04832_vco_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long prate)
{
struct lmk04832 *lmk = container_of(hw, struct lmk04832, vco);
unsigned int n, p, r;
long vco_rate;
int vco_mux;
int ret;
vco_mux = lmk04832_check_vco_ranges(lmk, rate);
if (vco_mux < 0)
return vco_mux;
ret = regmap_update_bits(lmk->regmap, LMK04832_REG_VCO_OSCOUT,
LMK04832_BIT_VCO_MUX,
FIELD_PREP(LMK04832_BIT_VCO_MUX, vco_mux));
if (ret)
return ret;
vco_rate = lmk04832_calc_pll2_params(prate, rate, &n, &p, &r);
if (vco_rate < 0) {
dev_err(lmk->dev, "failed to determine PLL2 parameters\n");
return vco_rate;
}
ret = regmap_update_bits(lmk->regmap, LMK04832_REG_PLL2_R_MSB,
LMK04832_BIT_PLL2_R_MSB,
FIELD_GET(0x000700, r));
if (ret)
return ret;
ret = regmap_write(lmk->regmap, LMK04832_REG_PLL2_R_LSB,
FIELD_GET(0x0000ff, r));
if (ret)
return ret;
ret = regmap_update_bits(lmk->regmap, LMK04832_REG_PLL2_MISC,
LMK04832_BIT_PLL2_MISC_P,
FIELD_PREP(LMK04832_BIT_PLL2_MISC_P, p));
if (ret)
return ret;
/*
* PLL2_N registers must be programmed after other PLL2 dividers are
* programmed to ensure proper VCO frequency calibration
*/
ret = regmap_write(lmk->regmap, LMK04832_REG_PLL2_N_0,
FIELD_GET(0x030000, n));
if (ret)
return ret;
ret = regmap_write(lmk->regmap, LMK04832_REG_PLL2_N_1,
FIELD_GET(0x00ff00, n));
if (ret)
return ret;
return regmap_write(lmk->regmap, LMK04832_REG_PLL2_N_2,
FIELD_GET(0x0000ff, n));
}
static const struct clk_ops lmk04832_vco_ops = {
.is_enabled = lmk04832_vco_is_enabled,
.prepare = lmk04832_vco_prepare,
.unprepare = lmk04832_vco_unprepare,
.recalc_rate = lmk04832_vco_recalc_rate,
.round_rate = lmk04832_vco_round_rate,
.set_rate = lmk04832_vco_set_rate,
};
/*
* lmk04832_register_vco - Initialize the internal VCO and clock distribution
* path in PLL2 single loop mode.
*/
static int lmk04832_register_vco(struct lmk04832 *lmk)
{
const char *parent_names[1];
struct clk_init_data init;
int ret;
init.name = "lmk-vco";
parent_names[0] = __clk_get_name(lmk->oscin);
init.parent_names = parent_names;
init.ops = &lmk04832_vco_ops;
init.num_parents = 1;
ret = regmap_update_bits(lmk->regmap, LMK04832_REG_VCO_OSCOUT,
LMK04832_BIT_VCO_MUX,
FIELD_PREP(LMK04832_BIT_VCO_MUX,
LMK04832_VAL_VCO_MUX_VCO1));
if (ret)
return ret;
ret = regmap_update_bits(lmk->regmap, LMK04832_REG_FB_CTRL,
LMK04832_BIT_PLL2_RCLK_MUX |
LMK04832_BIT_PLL2_NCLK_MUX,
FIELD_PREP(LMK04832_BIT_PLL2_RCLK_MUX,
LMK04832_VAL_PLL2_RCLK_MUX_OSCIN)|
FIELD_PREP(LMK04832_BIT_PLL2_NCLK_MUX,
LMK04832_VAL_PLL2_NCLK_MUX_PLL2_P));
if (ret)
return ret;
ret = regmap_update_bits(lmk->regmap, LMK04832_REG_PLL2_MISC,
LMK04832_BIT_PLL2_MISC_REF_2X_EN,
LMK04832_BIT_PLL2_MISC_REF_2X_EN);
if (ret)
return ret;
ret = regmap_write(lmk->regmap, LMK04832_REG_PLL2_LD,
FIELD_PREP(LMK04832_BIT_PLL2_LD_MUX,
LMK04832_VAL_PLL2_LD_MUX_PLL2_DLD) |
FIELD_PREP(LMK04832_BIT_PLL2_LD_TYPE,
LMK04832_VAL_PLL2_LD_TYPE_OUT_PP));
if (ret)
return ret;
lmk->vco.init = &init;
return devm_clk_hw_register(lmk->dev, &lmk->vco);
}
static int lmk04832_clkout_set_ddly(struct lmk04832 *lmk, int id)
{
const int dclk_div_adj[] = {0, 0, -2, -2, 0, 3, -1, 0};
unsigned int sclkx_y_ddly = 10;
unsigned int dclkx_y_ddly;
unsigned int dclkx_y_div;
unsigned int sysref_ddly;
unsigned int dclkx_y_hs;
unsigned int lsb, msb;
int ret;
ret = regmap_update_bits(lmk->regmap,
LMK04832_REG_CLKOUT_CTRL2(id),
LMK04832_BIT_DCLKX_Y_DDLY_PD,
FIELD_PREP(LMK04832_BIT_DCLKX_Y_DDLY_PD, 0));
if (ret)
return ret;
ret = regmap_read(lmk->regmap, LMK04832_REG_SYSREF_DDLY_LSB, &lsb);
if (ret)
return ret;
ret = regmap_read(lmk->regmap, LMK04832_REG_SYSREF_DDLY_MSB, &msb);
if (ret)
return ret;
sysref_ddly = FIELD_GET(LMK04832_BIT_SYSREF_DDLY_MSB, msb) << 8 | lsb;
ret = regmap_read(lmk->regmap, LMK04832_REG_CLKOUT_CTRL0(id), &lsb);
if (ret)
return ret;
ret = regmap_read(lmk->regmap, LMK04832_REG_CLKOUT_CTRL2(id), &msb);
if (ret)
return ret;
dclkx_y_div = FIELD_GET(LMK04832_BIT_DCLK_DIV_MSB, msb) << 8 | lsb;
ret = regmap_read(lmk->regmap, LMK04832_REG_CLKOUT_CTRL3(id), &lsb);
if (ret)
return ret;
dclkx_y_hs = FIELD_GET(LMK04832_BIT_DCLKX_Y_HS, lsb);
dclkx_y_ddly = sysref_ddly + 1 -
dclk_div_adj[dclkx_y_div < 6 ? dclkx_y_div : 7] -
dclkx_y_hs + sclkx_y_ddly;
if (dclkx_y_ddly < 7 || dclkx_y_ddly > 0x3fff) {
dev_err(lmk->dev, "DCLKX_Y_DDLY out of range (%d)\n",
dclkx_y_ddly);
return -EINVAL;
}
ret = regmap_write(lmk->regmap,
LMK04832_REG_SCLKX_Y_DDLY(id),
FIELD_GET(LMK04832_BIT_SCLKX_Y_DDLY, sclkx_y_ddly));
if (ret)
return ret;
ret = regmap_write(lmk->regmap, LMK04832_REG_CLKOUT_CTRL1(id),
FIELD_GET(0x00ff, dclkx_y_ddly));
if (ret)
return ret;
dev_dbg(lmk->dev, "clkout%02u: sysref_ddly=%u, dclkx_y_ddly=%u, "
"dclk_div_adj=%+d, dclkx_y_hs=%u, sclkx_y_ddly=%u\n",
id, sysref_ddly, dclkx_y_ddly,
dclk_div_adj[dclkx_y_div < 6 ? dclkx_y_div : 7],
dclkx_y_hs, sclkx_y_ddly);
return regmap_update_bits(lmk->regmap, LMK04832_REG_CLKOUT_CTRL2(id),
LMK04832_BIT_DCLKX_Y_DDLY_MSB,
FIELD_GET(0x0300, dclkx_y_ddly));
}
/** lmk04832_sclk_sync - Establish deterministic phase relationship between sclk
* and dclk
*
* @lmk: Reference to the lmk device
*
* The synchronization sequence:
* - in the datasheet https://www.ti.com/lit/ds/symlink/lmk04832.pdf, p.31
* (8.3.3.1 How to enable SYSREF)
* - Ti forum: https://e2e.ti.com/support/clock-and-timing/f/48/t/970972
*
* Returns 0 or negative errno.
*/
static int lmk04832_sclk_sync_sequence(struct lmk04832 *lmk)
{
int ret;
int i;
/* 1. (optional) mute all sysref_outputs during synchronization */
/* 2. Enable and write device clock digital delay to applicable clocks */
ret = regmap_update_bits(lmk->regmap, LMK04832_REG_MAIN_PD,
LMK04832_BIT_SYSREF_DDLY_PD,
FIELD_PREP(LMK04832_BIT_SYSREF_DDLY_PD, 0));
if (ret)
return ret;
for (i = 0; i < lmk->clk_data->num; i += 2) {
ret = lmk04832_clkout_set_ddly(lmk, i);
if (ret)
return ret;
}
/*
* 3. Configure SYNC_MODE to SYNC_PIN and SYSREF_MUX to Normal SYNC,
* and clear SYSREF_REQ_EN (see 6.)
*/
ret = regmap_update_bits(lmk->regmap, LMK04832_REG_SYSREF_OUT,
LMK04832_BIT_SYSREF_REQ_EN |
LMK04832_BIT_SYSREF_MUX,
FIELD_PREP(LMK04832_BIT_SYSREF_REQ_EN, 0) |
FIELD_PREP(LMK04832_BIT_SYSREF_MUX,
LMK04832_VAL_SYSREF_MUX_NORMAL_SYNC));
if (ret)
return ret;
ret = regmap_update_bits(lmk->regmap, LMK04832_REG_SYNC,
LMK04832_BIT_SYNC_MODE,
FIELD_PREP(LMK04832_BIT_SYNC_MODE,
LMK04832_VAL_SYNC_MODE_ON));
if (ret)
return ret;
/* 4. Clear SYNXC_DISx or applicable clocks and clear SYNC_DISSYSREF */
ret = regmap_write(lmk->regmap, LMK04832_REG_SYNC_DIS, 0x00);
if (ret)
return ret;
/*
* 5. If SCLKX_Y_DDLY != 0, Set SYSREF_CLR=1 for at least 15 clock
* distribution path cycles (VCO cycles), then back to 0. In
* PLL2-only use case, this will be complete in less than one SPI
* transaction. If SYSREF local digital delay is not used, this step
* can be skipped.
*/
ret = regmap_update_bits(lmk->regmap, LMK04832_REG_SYNC,
LMK04832_BIT_SYNC_CLR,
FIELD_PREP(LMK04832_BIT_SYNC_CLR, 0x01));
if (ret)
return ret;
ret = regmap_update_bits(lmk->regmap, LMK04832_REG_SYNC,
LMK04832_BIT_SYNC_CLR,
FIELD_PREP(LMK04832_BIT_SYNC_CLR, 0x00));
if (ret)
return ret;
/*
* 6. Toggle SYNC_POL state between inverted and not inverted.
* If you use an external signal on the SYNC pin instead of toggling
* SYNC_POL, make sure that SYSREF_REQ_EN=0 so that the SYSREF_MUX
* does not shift into continuous SYSREF mode.
*/
ret = regmap_update_bits(lmk->regmap, LMK04832_REG_SYNC,
LMK04832_BIT_SYNC_POL,
FIELD_PREP(LMK04832_BIT_SYNC_POL, 0x01));
if (ret)
return ret;
ret = regmap_update_bits(lmk->regmap, LMK04832_REG_SYNC,
LMK04832_BIT_SYNC_POL,
FIELD_PREP(LMK04832_BIT_SYNC_POL, 0x00));
if (ret)
return ret;
/* 7. Set all SYNC_DISx=1, including SYNC_DISSYSREF */
ret = regmap_write(lmk->regmap, LMK04832_REG_SYNC_DIS, 0xff);
if (ret)
return ret;
/* 8. Restore state of SYNC_MODE and SYSREF_MUX to desired values */
ret = regmap_update_bits(lmk->regmap, LMK04832_REG_SYSREF_OUT,
LMK04832_BIT_SYSREF_MUX,
FIELD_PREP(LMK04832_BIT_SYSREF_MUX,
lmk->sysref_mux));
if (ret)
return ret;
ret = regmap_update_bits(lmk->regmap, LMK04832_REG_SYNC,
LMK04832_BIT_SYNC_MODE,
FIELD_PREP(LMK04832_BIT_SYNC_MODE,
lmk->sync_mode));
if (ret)
return ret;
/*
* 9. (optional) if SCLKx_y_DIS_MODE was used to mute SYSREF outputs
* during the SYNC event, restore SCLKx_y_DIS_MODE=0 for active state,
* or set SYSREF_GBL_PD=0 if SCLKx_y_DIS_MODE is set to a conditional
* option.
*/
/*
* 10. (optional) To reduce power consumption, after the synchronization
* event is complete, DCLKx_y_DDLY_PD=1 and SYSREF_DDLY_PD=1 disable the
* digital delay counters (which are only used immediately after the
* SYNC pulse to delay the output by some number of VCO counts).
*/
return ret;
}
static int lmk04832_sclk_is_enabled(struct clk_hw *hw)
{
struct lmk04832 *lmk = container_of(hw, struct lmk04832, sclk);
unsigned int tmp;
int ret;
ret = regmap_read(lmk->regmap, LMK04832_REG_MAIN_PD, &tmp);
if (ret)
return ret;
return FIELD_GET(LMK04832_BIT_SYSREF_PD, tmp);
}
static int lmk04832_sclk_prepare(struct clk_hw *hw)
{
struct lmk04832 *lmk = container_of(hw, struct lmk04832, sclk);
return regmap_update_bits(lmk->regmap, LMK04832_REG_MAIN_PD,
LMK04832_BIT_SYSREF_PD, 0x00);
}
static void lmk04832_sclk_unprepare(struct clk_hw *hw)
{
struct lmk04832 *lmk = container_of(hw, struct lmk04832, sclk);
regmap_update_bits(lmk->regmap, LMK04832_REG_MAIN_PD,
LMK04832_BIT_SYSREF_PD, LMK04832_BIT_SYSREF_PD);
}
static unsigned long lmk04832_sclk_recalc_rate(struct clk_hw *hw,
unsigned long prate)
{
struct lmk04832 *lmk = container_of(hw, struct lmk04832, sclk);
unsigned int sysref_div;
u8 tmp[2];
int ret;
ret = regmap_bulk_read(lmk->regmap, LMK04832_REG_SYSREF_DIV_MSB, &tmp, 2);
if (ret)
return ret;
sysref_div = FIELD_GET(LMK04832_BIT_SYSREF_DIV_MSB, tmp[0]) << 8 |
tmp[1];
return DIV_ROUND_CLOSEST(prate, sysref_div);
}
static long lmk04832_sclk_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *prate)
{
struct lmk04832 *lmk = container_of(hw, struct lmk04832, sclk);
unsigned long sclk_rate;
unsigned int sysref_div;
sysref_div = DIV_ROUND_CLOSEST(*prate, rate);
sclk_rate = DIV_ROUND_CLOSEST(*prate, sysref_div);
if (sysref_div < 0x07 || sysref_div > 0x1fff) {
dev_err(lmk->dev, "SYSREF divider out of range\n");
return -EINVAL;
}
if (rate != sclk_rate)
return -EINVAL;
return sclk_rate;
}
static int lmk04832_sclk_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long prate)
{
struct lmk04832 *lmk = container_of(hw, struct lmk04832, sclk);
unsigned int sysref_div;
int ret;
sysref_div = DIV_ROUND_CLOSEST(prate, rate);
if (sysref_div < 0x07 || sysref_div > 0x1fff) {
dev_err(lmk->dev, "SYSREF divider out of range\n");
return -EINVAL;
}
ret = regmap_write(lmk->regmap, LMK04832_REG_SYSREF_DIV_MSB,
FIELD_GET(0x1f00, sysref_div));
if (ret)
return ret;
ret = regmap_write(lmk->regmap, LMK04832_REG_SYSREF_DIV_LSB,
FIELD_GET(0x00ff, sysref_div));
if (ret)
return ret;
ret = lmk04832_sclk_sync_sequence(lmk);
if (ret)
dev_err(lmk->dev, "SYNC sequence failed\n");
return ret;
}
static const struct clk_ops lmk04832_sclk_ops = {
.is_enabled = lmk04832_sclk_is_enabled,
.prepare = lmk04832_sclk_prepare,
.unprepare = lmk04832_sclk_unprepare,
.recalc_rate = lmk04832_sclk_recalc_rate,
.round_rate = lmk04832_sclk_round_rate,
.set_rate = lmk04832_sclk_set_rate,
};
static int lmk04832_register_sclk(struct lmk04832 *lmk)
{
const char *parent_names[1];
struct clk_init_data init;
int ret;
init.name = "lmk-sclk";
parent_names[0] = clk_hw_get_name(&lmk->vco);
init.parent_names = parent_names;
init.ops = &lmk04832_sclk_ops;
init.flags = CLK_SET_RATE_PARENT;
init.num_parents = 1;
ret = regmap_update_bits(lmk->regmap, LMK04832_REG_SYSREF_OUT,
LMK04832_BIT_SYSREF_MUX,
FIELD_PREP(LMK04832_BIT_SYSREF_MUX,
lmk->sysref_mux));
if (ret)
return ret;
ret = regmap_write(lmk->regmap, LMK04832_REG_SYSREF_DDLY_LSB,
FIELD_GET(0x00ff, lmk->sysref_ddly));
if (ret)
return ret;
ret = regmap_write(lmk->regmap, LMK04832_REG_SYSREF_DDLY_MSB,
FIELD_GET(0x1f00, lmk->sysref_ddly));
if (ret)
return ret;
ret = regmap_write(lmk->regmap, LMK04832_REG_SYSREF_PULSE_CNT,
ilog2(lmk->sysref_pulse_cnt));
if (ret)
return ret;
ret = regmap_update_bits(lmk->regmap, LMK04832_REG_MAIN_PD,
LMK04832_BIT_SYSREF_DDLY_PD |
LMK04832_BIT_SYSREF_PLSR_PD,
FIELD_PREP(LMK04832_BIT_SYSREF_DDLY_PD, 0) |
FIELD_PREP(LMK04832_BIT_SYSREF_PLSR_PD, 0));
if (ret)
return ret;
ret = regmap_write(lmk->regmap, LMK04832_REG_SYNC,
FIELD_PREP(LMK04832_BIT_SYNC_POL, 0) |
FIELD_PREP(LMK04832_BIT_SYNC_EN, 1) |
FIELD_PREP(LMK04832_BIT_SYNC_MODE, lmk->sync_mode));
if (ret)
return ret;
ret = regmap_write(lmk->regmap, LMK04832_REG_SYNC_DIS, 0xff);
if (ret)
return ret;
lmk->sclk.init = &init;
return devm_clk_hw_register(lmk->dev, &lmk->sclk);
}
static int lmk04832_dclk_is_enabled(struct clk_hw *hw)
{
struct lmk_dclk *dclk = container_of(hw, struct lmk_dclk, hw);
struct lmk04832 *lmk = dclk->lmk;
unsigned int tmp;
int ret;
ret = regmap_read(lmk->regmap, LMK04832_REG_CLKOUT_CTRL3(dclk->id),
&tmp);
if (ret)
return ret;
return !FIELD_GET(LMK04832_BIT_DCLKX_Y_PD, tmp);
}
static int lmk04832_dclk_prepare(struct clk_hw *hw)
{
struct lmk_dclk *dclk = container_of(hw, struct lmk_dclk, hw);
struct lmk04832 *lmk = dclk->lmk;
return regmap_update_bits(lmk->regmap,
LMK04832_REG_CLKOUT_CTRL3(dclk->id),
LMK04832_BIT_DCLKX_Y_PD, 0x00);
}
static void lmk04832_dclk_unprepare(struct clk_hw *hw)
{
struct lmk_dclk *dclk = container_of(hw, struct lmk_dclk, hw);
struct lmk04832 *lmk = dclk->lmk;
regmap_update_bits(lmk->regmap,
LMK04832_REG_CLKOUT_CTRL3(dclk->id),
LMK04832_BIT_DCLKX_Y_PD, 0xff);
}
static unsigned long lmk04832_dclk_recalc_rate(struct clk_hw *hw,
unsigned long prate)
{
struct lmk_dclk *dclk = container_of(hw, struct lmk_dclk, hw);
struct lmk04832 *lmk = dclk->lmk;
unsigned int dclk_div;
unsigned int lsb, msb;
unsigned long rate;
int ret;
ret = regmap_read(lmk->regmap, LMK04832_REG_CLKOUT_CTRL0(dclk->id),
&lsb);
if (ret)
return ret;
ret = regmap_read(lmk->regmap, LMK04832_REG_CLKOUT_CTRL2(dclk->id),
&msb);
if (ret)
return ret;
dclk_div = FIELD_GET(LMK04832_BIT_DCLK_DIV_MSB, msb) << 8 | lsb;
rate = DIV_ROUND_CLOSEST(prate, dclk_div);
return rate;
}
static long lmk04832_dclk_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *prate)
{
struct lmk_dclk *dclk = container_of(hw, struct lmk_dclk, hw);
struct lmk04832 *lmk = dclk->lmk;
unsigned long dclk_rate;
unsigned int dclk_div;
dclk_div = DIV_ROUND_CLOSEST(*prate, rate);
dclk_rate = DIV_ROUND_CLOSEST(*prate, dclk_div);
if (dclk_div < 1 || dclk_div > 0x3ff) {
dev_err(lmk->dev, "%s_div out of range\n", clk_hw_get_name(hw));
return -EINVAL;
}
if (rate != dclk_rate)
return -EINVAL;
return dclk_rate;
}
static int lmk04832_dclk_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long prate)
{
struct lmk_dclk *dclk = container_of(hw, struct lmk_dclk, hw);
struct lmk04832 *lmk = dclk->lmk;
unsigned int dclk_div;
int ret;
dclk_div = DIV_ROUND_CLOSEST(prate, rate);
if (dclk_div > 0x3ff) {
dev_err(lmk->dev, "%s_div out of range\n", clk_hw_get_name(hw));
return -EINVAL;
}
/* Enable Duty Cycle Correction */
if (dclk_div == 1) {
ret = regmap_update_bits(lmk->regmap,
LMK04832_REG_CLKOUT_CTRL3(dclk->id),
LMK04832_BIT_DCLKX_Y_DCC,
FIELD_PREP(LMK04832_BIT_DCLKX_Y_DCC, 1));
if (ret)
return ret;
}
/*
* While using Divide-by-2 or Divide-by-3 for DCLK_X_Y_DIV, SYNC
* procedure requires to first program Divide-by-4 and then back to
* Divide-by-2 or Divide-by-3 before doing SYNC.
*/
if (dclk_div == 2 || dclk_div == 3) {
ret = regmap_update_bits(lmk->regmap,
LMK04832_REG_CLKOUT_CTRL2(dclk->id),
LMK04832_BIT_DCLK_DIV_MSB, 0x00);
if (ret)
return ret;
ret = regmap_write(lmk->regmap,
LMK04832_REG_CLKOUT_CTRL0(dclk->id), 0x04);
if (ret)
return ret;
}
ret = regmap_write(lmk->regmap, LMK04832_REG_CLKOUT_CTRL0(dclk->id),
FIELD_GET(0x0ff, dclk_div));
if (ret)
return ret;
ret = regmap_update_bits(lmk->regmap,
LMK04832_REG_CLKOUT_CTRL2(dclk->id),
LMK04832_BIT_DCLK_DIV_MSB,
FIELD_GET(0x300, dclk_div));
if (ret)
return ret;
ret = lmk04832_sclk_sync_sequence(lmk);
if (ret)
dev_err(lmk->dev, "SYNC sequence failed\n");
return ret;
}
static const struct clk_ops lmk04832_dclk_ops = {
.is_enabled = lmk04832_dclk_is_enabled,
.prepare = lmk04832_dclk_prepare,
.unprepare = lmk04832_dclk_unprepare,
.recalc_rate = lmk04832_dclk_recalc_rate,
.round_rate = lmk04832_dclk_round_rate,
.set_rate = lmk04832_dclk_set_rate,
};
static int lmk04832_clkout_is_enabled(struct clk_hw *hw)
{
struct lmk_clkout *clkout = container_of(hw, struct lmk_clkout, hw);
struct lmk04832 *lmk = clkout->lmk;
unsigned int clkoutx_y_pd;
unsigned int sclkx_y_pd;
unsigned int tmp;
u32 enabled;
int ret;
u8 fmt;
ret = regmap_read(lmk->regmap, LMK04832_REG_CLKOUT_CTRL2(clkout->id),
&clkoutx_y_pd);
if (ret)
return ret;
enabled = !FIELD_GET(LMK04832_BIT_CLKOUTX_Y_PD, clkoutx_y_pd);
ret = regmap_read(lmk->regmap, LMK04832_REG_CLKOUT_SRC_MUX(clkout->id),
&tmp);
if (ret)
return ret;
if (FIELD_GET(LMK04832_BIT_CLKOUT_SRC_MUX, tmp)) {
ret = regmap_read(lmk->regmap,
LMK04832_REG_CLKOUT_CTRL4(clkout->id),
&sclkx_y_pd);
if (ret)
return ret;
enabled = enabled && !FIELD_GET(LMK04832_BIT_SCLK_PD, sclkx_y_pd);
}
ret = regmap_read(lmk->regmap, LMK04832_REG_CLKOUT_FMT(clkout->id),
&tmp);
if (ret)
return ret;
if (clkout->id % 2)
fmt = FIELD_GET(0xf0, tmp);
else
fmt = FIELD_GET(0x0f, tmp);
return enabled && !fmt;
}
static int lmk04832_clkout_prepare(struct clk_hw *hw)
{
struct lmk_clkout *clkout = container_of(hw, struct lmk_clkout, hw);
struct lmk04832 *lmk = clkout->lmk;
unsigned int tmp;
int ret;
if (clkout->format == LMK04832_VAL_CLKOUT_FMT_POWERDOWN)
dev_err(lmk->dev, "prepared %s but format is powerdown\n",
clk_hw_get_name(hw));
ret = regmap_update_bits(lmk->regmap,
LMK04832_REG_CLKOUT_CTRL2(clkout->id),
LMK04832_BIT_CLKOUTX_Y_PD, 0x00);
if (ret)
return ret;
ret = regmap_read(lmk->regmap, LMK04832_REG_CLKOUT_SRC_MUX(clkout->id),
&tmp);
if (ret)
return ret;
if (FIELD_GET(LMK04832_BIT_CLKOUT_SRC_MUX, tmp)) {
ret = regmap_update_bits(lmk->regmap,
LMK04832_REG_CLKOUT_CTRL4(clkout->id),
LMK04832_BIT_SCLK_PD, 0x00);
if (ret)
return ret;
}
return regmap_update_bits(lmk->regmap,
LMK04832_REG_CLKOUT_FMT(clkout->id),
LMK04832_BIT_CLKOUT_FMT(clkout->id),
clkout->format << 4 * (clkout->id % 2));
}
static void lmk04832_clkout_unprepare(struct clk_hw *hw)
{
struct lmk_clkout *clkout = container_of(hw, struct lmk_clkout, hw);
struct lmk04832 *lmk = clkout->lmk;
regmap_update_bits(lmk->regmap, LMK04832_REG_CLKOUT_FMT(clkout->id),
LMK04832_BIT_CLKOUT_FMT(clkout->id),
0x00);
}
static int lmk04832_clkout_set_parent(struct clk_hw *hw, uint8_t index)
{
struct lmk_clkout *clkout = container_of(hw, struct lmk_clkout, hw);
struct lmk04832 *lmk = clkout->lmk;
return regmap_update_bits(lmk->regmap,
LMK04832_REG_CLKOUT_SRC_MUX(clkout->id),
LMK04832_BIT_CLKOUT_SRC_MUX,
FIELD_PREP(LMK04832_BIT_CLKOUT_SRC_MUX,
index));
}
static uint8_t lmk04832_clkout_get_parent(struct clk_hw *hw)
{
struct lmk_clkout *clkout = container_of(hw, struct lmk_clkout, hw);
struct lmk04832 *lmk = clkout->lmk;
unsigned int tmp;
int ret;
ret = regmap_read(lmk->regmap, LMK04832_REG_CLKOUT_SRC_MUX(clkout->id),
&tmp);
if (ret)
return ret;
return FIELD_GET(LMK04832_BIT_CLKOUT_SRC_MUX, tmp);
}
static const struct clk_ops lmk04832_clkout_ops = {
.is_enabled = lmk04832_clkout_is_enabled,
.prepare = lmk04832_clkout_prepare,
.unprepare = lmk04832_clkout_unprepare,
.determine_rate = __clk_mux_determine_rate,
.set_parent = lmk04832_clkout_set_parent,
.get_parent = lmk04832_clkout_get_parent,
};
static int lmk04832_register_clkout(struct lmk04832 *lmk, const int num)
{
char name[] = "lmk-clkoutXX";
char dclk_name[] = "lmk-dclkXX_YY";
const char *parent_names[2];
struct clk_init_data init;
int dclk_num = num / 2;
int ret;
if (num % 2 == 0) {
sprintf(dclk_name, "lmk-dclk%02d_%02d", num, num + 1);
init.name = dclk_name;
parent_names[0] = clk_hw_get_name(&lmk->vco);
init.parent_names = parent_names;
init.ops = &lmk04832_dclk_ops;
init.flags = CLK_SET_RATE_PARENT;
init.num_parents = 1;
lmk->dclk[dclk_num].id = num;
lmk->dclk[dclk_num].lmk = lmk;
lmk->dclk[dclk_num].hw.init = &init;
ret = devm_clk_hw_register(lmk->dev, &lmk->dclk[dclk_num].hw);
if (ret)
return ret;
} else {
sprintf(dclk_name, "lmk-dclk%02d_%02d", num - 1, num);
}
if (of_property_read_string_index(lmk->dev->of_node,
"clock-output-names",
num, &init.name)) {
sprintf(name, "lmk-clkout%02d", num);
init.name = name;
}
parent_names[0] = dclk_name;
parent_names[1] = clk_hw_get_name(&lmk->sclk);
init.parent_names = parent_names;
init.ops = &lmk04832_clkout_ops;
init.flags = CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT;
init.num_parents = ARRAY_SIZE(parent_names);
lmk->clkout[num].id = num;
lmk->clkout[num].lmk = lmk;
lmk->clkout[num].hw.init = &init;
lmk->clk_data->hws[num] = &lmk->clkout[num].hw;
/* Set initial parent */
regmap_update_bits(lmk->regmap,
LMK04832_REG_CLKOUT_SRC_MUX(num),
LMK04832_BIT_CLKOUT_SRC_MUX,
FIELD_PREP(LMK04832_BIT_CLKOUT_SRC_MUX,
lmk->clkout[num].sysref));
return devm_clk_hw_register(lmk->dev, &lmk->clkout[num].hw);
}
static int lmk04832_set_spi_rdbk(const struct lmk04832 *lmk, const int rdbk_pin)
{
int reg;
int ret;
int val = FIELD_PREP(LMK04832_BIT_CLKIN_SEL_MUX,
LMK04832_VAL_CLKIN_SEL_MUX_SPI_RDBK) |
FIELD_PREP(LMK04832_BIT_CLKIN_SEL_TYPE,
LMK04832_VAL_CLKIN_SEL_TYPE_OUT);
dev_info(lmk->dev, "setting up 4-wire mode\n");
ret = regmap_write(lmk->regmap, LMK04832_REG_RST3W,
LMK04832_BIT_SPI_3WIRE_DIS);
if (ret)
return ret;
switch (rdbk_pin) {
case RDBK_CLKIN_SEL0:
reg = LMK04832_REG_CLKIN_SEL0;
break;
case RDBK_CLKIN_SEL1:
reg = LMK04832_REG_CLKIN_SEL1;
break;
case RDBK_RESET:
reg = LMK04832_REG_CLKIN_RST;
break;
case RDBK_PLL1_LD:
reg = LMK04832_REG_PLL1_LD;
val = FIELD_PREP(LMK04832_BIT_PLL1_LD_MUX,
LMK04832_VAL_PLL1_LD_MUX_SPI_RDBK) |
FIELD_PREP(LMK04832_BIT_PLL1_LD_TYPE,
LMK04832_VAL_PLL1_LD_TYPE_OUT_PP);
break;
default:
return -EINVAL;
}
return regmap_write(lmk->regmap, reg, val);
}
static int lmk04832_probe(struct spi_device *spi)
{
const struct lmk04832_device_info *info;
int rdbk_pin = RDBK_CLKIN_SEL1;
struct device_node *child;
struct lmk04832 *lmk;
u8 tmp[3];
int ret;
int i;
info = &lmk04832_device_info[spi_get_device_id(spi)->driver_data];
lmk = devm_kzalloc(&spi->dev, sizeof(struct lmk04832), GFP_KERNEL);
if (!lmk)
return -ENOMEM;
lmk->dev = &spi->dev;
lmk->oscin = devm_clk_get(lmk->dev, "oscin");
if (IS_ERR(lmk->oscin)) {
dev_err(lmk->dev, "failed to get oscin clock\n");
return PTR_ERR(lmk->oscin);
}
ret = clk_prepare_enable(lmk->oscin);
if (ret)
return ret;
lmk->reset_gpio = devm_gpiod_get_optional(&spi->dev, "reset",
GPIOD_OUT_LOW);
lmk->dclk = devm_kcalloc(lmk->dev, info->num_channels >> 1,
sizeof(struct lmk_dclk), GFP_KERNEL);
if (!lmk->dclk) {
ret = -ENOMEM;
goto err_disable_oscin;
}
lmk->clkout = devm_kcalloc(lmk->dev, info->num_channels,
sizeof(*lmk->clkout), GFP_KERNEL);
if (!lmk->clkout) {
ret = -ENOMEM;
goto err_disable_oscin;
}
lmk->clk_data = devm_kzalloc(lmk->dev, struct_size(lmk->clk_data, hws,
info->num_channels),
GFP_KERNEL);
if (!lmk->clk_data) {
ret = -ENOMEM;
goto err_disable_oscin;
}
device_property_read_u32(lmk->dev, "ti,vco-hz", &lmk->vco_rate);
lmk->sysref_ddly = 8;
device_property_read_u32(lmk->dev, "ti,sysref-ddly", &lmk->sysref_ddly);
lmk->sysref_mux = LMK04832_VAL_SYSREF_MUX_CONTINUOUS;
device_property_read_u32(lmk->dev, "ti,sysref-mux",
&lmk->sysref_mux);
lmk->sync_mode = LMK04832_VAL_SYNC_MODE_OFF;
device_property_read_u32(lmk->dev, "ti,sync-mode",
&lmk->sync_mode);
lmk->sysref_pulse_cnt = 4;
device_property_read_u32(lmk->dev, "ti,sysref-pulse-count",
&lmk->sysref_pulse_cnt);
for_each_child_of_node(lmk->dev->of_node, child) {
int reg;
ret = of_property_read_u32(child, "reg", ®);
if (ret) {
dev_err(lmk->dev, "missing reg property in child: %s\n",
child->full_name);
of_node_put(child);
goto err_disable_oscin;
}
of_property_read_u32(child, "ti,clkout-fmt",
&lmk->clkout[reg].format);
if (lmk->clkout[reg].format >= 0x0a && reg % 2 == 0
&& reg != 8 && reg != 10)
dev_err(lmk->dev, "invalid format for clkout%02d\n",
reg);
lmk->clkout[reg].sysref =
of_property_read_bool(child, "ti,clkout-sysref");
}
lmk->regmap = devm_regmap_init_spi(spi, ®map_config);
if (IS_ERR(lmk->regmap)) {
dev_err(lmk->dev, "%s: regmap allocation failed: %ld\n",
__func__, PTR_ERR(lmk->regmap));
ret = PTR_ERR(lmk->regmap);
goto err_disable_oscin;
}
regmap_write(lmk->regmap, LMK04832_REG_RST3W, LMK04832_BIT_RESET);
if (!(spi->mode & SPI_3WIRE)) {
device_property_read_u32(lmk->dev, "ti,spi-4wire-rdbk",
&rdbk_pin);
ret = lmk04832_set_spi_rdbk(lmk, rdbk_pin);
if (ret)
goto err_disable_oscin;
}
regmap_bulk_read(lmk->regmap, LMK04832_REG_ID_PROD_MSB, &tmp, 3);
if ((tmp[0] << 8 | tmp[1]) != info->pid || tmp[2] != info->maskrev) {
dev_err(lmk->dev, "unsupported device type: pid 0x%04x, maskrev 0x%02x\n",
tmp[0] << 8 | tmp[1], tmp[2]);
ret = -EINVAL;
goto err_disable_oscin;
}
ret = lmk04832_register_vco(lmk);
if (ret) {
dev_err(lmk->dev, "failed to init device clock path\n");
goto err_disable_oscin;
}
if (lmk->vco_rate) {
dev_info(lmk->dev, "setting VCO rate to %u Hz\n", lmk->vco_rate);
ret = clk_set_rate(lmk->vco.clk, lmk->vco_rate);
if (ret) {
dev_err(lmk->dev, "failed to set VCO rate\n");
goto err_disable_oscin;
}
}
ret = lmk04832_register_sclk(lmk);
if (ret) {
dev_err(lmk->dev, "failed to init SYNC/SYSREF clock path\n");
goto err_disable_oscin;
}
for (i = 0; i < info->num_channels; i++) {
ret = lmk04832_register_clkout(lmk, i);
if (ret) {
dev_err(lmk->dev, "failed to register clk %d\n", i);
goto err_disable_oscin;
}
}
lmk->clk_data->num = info->num_channels;
ret = devm_of_clk_add_hw_provider(lmk->dev, of_clk_hw_onecell_get,
lmk->clk_data);
if (ret) {
dev_err(lmk->dev, "failed to add provider (%d)\n", ret);
goto err_disable_oscin;
}
spi_set_drvdata(spi, lmk);
return 0;
err_disable_oscin:
clk_disable_unprepare(lmk->oscin);
return ret;
}
static void lmk04832_remove(struct spi_device *spi)
{
struct lmk04832 *lmk = spi_get_drvdata(spi);
clk_disable_unprepare(lmk->oscin);
}
static const struct spi_device_id lmk04832_id[] = {
{ "lmk04832", LMK04832 },
{}
};
MODULE_DEVICE_TABLE(spi, lmk04832_id);
static const struct of_device_id lmk04832_of_id[] = {
{ .compatible = "ti,lmk04832" },
{}
};
MODULE_DEVICE_TABLE(of, lmk04832_of_id);
static struct spi_driver lmk04832_driver = {
.driver = {
.name = "lmk04832",
.of_match_table = lmk04832_of_id,
},
.probe = lmk04832_probe,
.remove = lmk04832_remove,
.id_table = lmk04832_id,
};
module_spi_driver(lmk04832_driver);
MODULE_AUTHOR("Liam Beguin <[email protected]>");
MODULE_DESCRIPTION("Texas Instruments LMK04832");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/clk/clk-lmk04832.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* TI CDCE706 programmable 3-PLL clock synthesizer driver
*
* Copyright (c) 2014 Cadence Design Systems Inc.
*
* Reference: https://www.ti.com/lit/ds/symlink/cdce706.pdf
*/
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/delay.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/rational.h>
#include <linux/regmap.h>
#include <linux/slab.h>
#define CDCE706_CLKIN_CLOCK 10
#define CDCE706_CLKIN_SOURCE 11
#define CDCE706_PLL_M_LOW(pll) (1 + 3 * (pll))
#define CDCE706_PLL_N_LOW(pll) (2 + 3 * (pll))
#define CDCE706_PLL_HI(pll) (3 + 3 * (pll))
#define CDCE706_PLL_MUX 3
#define CDCE706_PLL_FVCO 6
#define CDCE706_DIVIDER(div) (13 + (div))
#define CDCE706_CLKOUT(out) (19 + (out))
#define CDCE706_CLKIN_CLOCK_MASK 0x10
#define CDCE706_CLKIN_SOURCE_SHIFT 6
#define CDCE706_CLKIN_SOURCE_MASK 0xc0
#define CDCE706_CLKIN_SOURCE_LVCMOS 0x40
#define CDCE706_PLL_MUX_MASK(pll) (0x80 >> (pll))
#define CDCE706_PLL_LOW_M_MASK 0xff
#define CDCE706_PLL_LOW_N_MASK 0xff
#define CDCE706_PLL_HI_M_MASK 0x1
#define CDCE706_PLL_HI_N_MASK 0x1e
#define CDCE706_PLL_HI_N_SHIFT 1
#define CDCE706_PLL_M_MAX 0x1ff
#define CDCE706_PLL_N_MAX 0xfff
#define CDCE706_PLL_FVCO_MASK(pll) (0x80 >> (pll))
#define CDCE706_PLL_FREQ_MIN 80000000
#define CDCE706_PLL_FREQ_MAX 300000000
#define CDCE706_PLL_FREQ_HI 180000000
#define CDCE706_DIVIDER_PLL(div) (9 + (div) - ((div) > 2) - ((div) > 4))
#define CDCE706_DIVIDER_PLL_SHIFT(div) ((div) < 2 ? 5 : 3 * ((div) & 1))
#define CDCE706_DIVIDER_PLL_MASK(div) (0x7 << CDCE706_DIVIDER_PLL_SHIFT(div))
#define CDCE706_DIVIDER_DIVIDER_MASK 0x7f
#define CDCE706_DIVIDER_DIVIDER_MAX 0x7f
#define CDCE706_CLKOUT_DIVIDER_MASK 0x7
#define CDCE706_CLKOUT_ENABLE_MASK 0x8
static const struct regmap_config cdce706_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.val_format_endian = REGMAP_ENDIAN_NATIVE,
};
#define to_hw_data(phw) (container_of((phw), struct cdce706_hw_data, hw))
struct cdce706_hw_data {
struct cdce706_dev_data *dev_data;
unsigned idx;
unsigned parent;
struct clk_hw hw;
unsigned div;
unsigned mul;
unsigned mux;
};
struct cdce706_dev_data {
struct i2c_client *client;
struct regmap *regmap;
struct clk *clkin_clk[2];
const char *clkin_name[2];
struct cdce706_hw_data clkin[1];
struct cdce706_hw_data pll[3];
struct cdce706_hw_data divider[6];
struct cdce706_hw_data clkout[6];
};
static const char * const cdce706_source_name[] = {
"clk_in0", "clk_in1",
};
static const char * const cdce706_clkin_name[] = {
"clk_in",
};
static const char * const cdce706_pll_name[] = {
"pll1", "pll2", "pll3",
};
static const char * const cdce706_divider_parent_name[] = {
"clk_in", "pll1", "pll2", "pll2", "pll3",
};
static const char *cdce706_divider_name[] = {
"p0", "p1", "p2", "p3", "p4", "p5",
};
static const char * const cdce706_clkout_name[] = {
"clk_out0", "clk_out1", "clk_out2", "clk_out3", "clk_out4", "clk_out5",
};
static int cdce706_reg_read(struct cdce706_dev_data *dev_data, unsigned reg,
unsigned *val)
{
int rc = regmap_read(dev_data->regmap, reg | 0x80, val);
if (rc < 0)
dev_err(&dev_data->client->dev, "error reading reg %u", reg);
return rc;
}
static int cdce706_reg_write(struct cdce706_dev_data *dev_data, unsigned reg,
unsigned val)
{
int rc = regmap_write(dev_data->regmap, reg | 0x80, val);
if (rc < 0)
dev_err(&dev_data->client->dev, "error writing reg %u", reg);
return rc;
}
static int cdce706_reg_update(struct cdce706_dev_data *dev_data, unsigned reg,
unsigned mask, unsigned val)
{
int rc = regmap_update_bits(dev_data->regmap, reg | 0x80, mask, val);
if (rc < 0)
dev_err(&dev_data->client->dev, "error updating reg %u", reg);
return rc;
}
static int cdce706_clkin_set_parent(struct clk_hw *hw, u8 index)
{
struct cdce706_hw_data *hwd = to_hw_data(hw);
hwd->parent = index;
return 0;
}
static u8 cdce706_clkin_get_parent(struct clk_hw *hw)
{
struct cdce706_hw_data *hwd = to_hw_data(hw);
return hwd->parent;
}
static const struct clk_ops cdce706_clkin_ops = {
.determine_rate = clk_hw_determine_rate_no_reparent,
.set_parent = cdce706_clkin_set_parent,
.get_parent = cdce706_clkin_get_parent,
};
static unsigned long cdce706_pll_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct cdce706_hw_data *hwd = to_hw_data(hw);
dev_dbg(&hwd->dev_data->client->dev,
"%s, pll: %d, mux: %d, mul: %u, div: %u\n",
__func__, hwd->idx, hwd->mux, hwd->mul, hwd->div);
if (!hwd->mux) {
if (hwd->div && hwd->mul) {
u64 res = (u64)parent_rate * hwd->mul;
do_div(res, hwd->div);
return res;
}
} else {
if (hwd->div)
return parent_rate / hwd->div;
}
return 0;
}
static long cdce706_pll_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *parent_rate)
{
struct cdce706_hw_data *hwd = to_hw_data(hw);
unsigned long mul, div;
u64 res;
dev_dbg(&hwd->dev_data->client->dev,
"%s, rate: %lu, parent_rate: %lu\n",
__func__, rate, *parent_rate);
rational_best_approximation(rate, *parent_rate,
CDCE706_PLL_N_MAX, CDCE706_PLL_M_MAX,
&mul, &div);
hwd->mul = mul;
hwd->div = div;
dev_dbg(&hwd->dev_data->client->dev,
"%s, pll: %d, mul: %lu, div: %lu\n",
__func__, hwd->idx, mul, div);
res = (u64)*parent_rate * hwd->mul;
do_div(res, hwd->div);
return res;
}
static int cdce706_pll_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct cdce706_hw_data *hwd = to_hw_data(hw);
unsigned long mul = hwd->mul, div = hwd->div;
int err;
dev_dbg(&hwd->dev_data->client->dev,
"%s, pll: %d, mul: %lu, div: %lu\n",
__func__, hwd->idx, mul, div);
err = cdce706_reg_update(hwd->dev_data,
CDCE706_PLL_HI(hwd->idx),
CDCE706_PLL_HI_M_MASK | CDCE706_PLL_HI_N_MASK,
((div >> 8) & CDCE706_PLL_HI_M_MASK) |
((mul >> (8 - CDCE706_PLL_HI_N_SHIFT)) &
CDCE706_PLL_HI_N_MASK));
if (err < 0)
return err;
err = cdce706_reg_write(hwd->dev_data,
CDCE706_PLL_M_LOW(hwd->idx),
div & CDCE706_PLL_LOW_M_MASK);
if (err < 0)
return err;
err = cdce706_reg_write(hwd->dev_data,
CDCE706_PLL_N_LOW(hwd->idx),
mul & CDCE706_PLL_LOW_N_MASK);
if (err < 0)
return err;
err = cdce706_reg_update(hwd->dev_data,
CDCE706_PLL_FVCO,
CDCE706_PLL_FVCO_MASK(hwd->idx),
rate > CDCE706_PLL_FREQ_HI ?
CDCE706_PLL_FVCO_MASK(hwd->idx) : 0);
return err;
}
static const struct clk_ops cdce706_pll_ops = {
.recalc_rate = cdce706_pll_recalc_rate,
.round_rate = cdce706_pll_round_rate,
.set_rate = cdce706_pll_set_rate,
};
static int cdce706_divider_set_parent(struct clk_hw *hw, u8 index)
{
struct cdce706_hw_data *hwd = to_hw_data(hw);
if (hwd->parent == index)
return 0;
hwd->parent = index;
return cdce706_reg_update(hwd->dev_data,
CDCE706_DIVIDER_PLL(hwd->idx),
CDCE706_DIVIDER_PLL_MASK(hwd->idx),
index << CDCE706_DIVIDER_PLL_SHIFT(hwd->idx));
}
static u8 cdce706_divider_get_parent(struct clk_hw *hw)
{
struct cdce706_hw_data *hwd = to_hw_data(hw);
return hwd->parent;
}
static unsigned long cdce706_divider_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct cdce706_hw_data *hwd = to_hw_data(hw);
dev_dbg(&hwd->dev_data->client->dev,
"%s, divider: %d, div: %u\n",
__func__, hwd->idx, hwd->div);
if (hwd->div)
return parent_rate / hwd->div;
return 0;
}
static int cdce706_divider_determine_rate(struct clk_hw *hw,
struct clk_rate_request *req)
{
struct cdce706_hw_data *hwd = to_hw_data(hw);
struct cdce706_dev_data *cdce = hwd->dev_data;
unsigned long rate = req->rate;
unsigned long mul, div;
dev_dbg(&hwd->dev_data->client->dev,
"%s, rate: %lu, parent_rate: %lu\n",
__func__, rate, req->best_parent_rate);
rational_best_approximation(rate, req->best_parent_rate,
1, CDCE706_DIVIDER_DIVIDER_MAX,
&mul, &div);
if (!mul)
div = CDCE706_DIVIDER_DIVIDER_MAX;
if (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) {
unsigned long best_diff = rate;
unsigned long best_div = 0;
struct clk *gp_clk = cdce->clkin_clk[cdce->clkin[0].parent];
unsigned long gp_rate = gp_clk ? clk_get_rate(gp_clk) : 0;
for (div = CDCE706_PLL_FREQ_MIN / rate; best_diff &&
div <= CDCE706_PLL_FREQ_MAX / rate; ++div) {
unsigned long n, m;
unsigned long diff;
unsigned long div_rate;
u64 div_rate64;
if (rate * div < CDCE706_PLL_FREQ_MIN)
continue;
rational_best_approximation(rate * div, gp_rate,
CDCE706_PLL_N_MAX,
CDCE706_PLL_M_MAX,
&n, &m);
div_rate64 = (u64)gp_rate * n;
do_div(div_rate64, m);
do_div(div_rate64, div);
div_rate = div_rate64;
diff = max(div_rate, rate) - min(div_rate, rate);
if (diff < best_diff) {
best_diff = diff;
best_div = div;
dev_dbg(&hwd->dev_data->client->dev,
"%s, %lu * %lu / %lu / %lu = %lu\n",
__func__, gp_rate, n, m, div, div_rate);
}
}
div = best_div;
dev_dbg(&hwd->dev_data->client->dev,
"%s, altering parent rate: %lu -> %lu\n",
__func__, req->best_parent_rate, rate * div);
req->best_parent_rate = rate * div;
}
hwd->div = div;
dev_dbg(&hwd->dev_data->client->dev,
"%s, divider: %d, div: %lu\n",
__func__, hwd->idx, div);
req->rate = req->best_parent_rate / div;
return 0;
}
static int cdce706_divider_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct cdce706_hw_data *hwd = to_hw_data(hw);
dev_dbg(&hwd->dev_data->client->dev,
"%s, divider: %d, div: %u\n",
__func__, hwd->idx, hwd->div);
return cdce706_reg_update(hwd->dev_data,
CDCE706_DIVIDER(hwd->idx),
CDCE706_DIVIDER_DIVIDER_MASK,
hwd->div);
}
static const struct clk_ops cdce706_divider_ops = {
.set_parent = cdce706_divider_set_parent,
.get_parent = cdce706_divider_get_parent,
.recalc_rate = cdce706_divider_recalc_rate,
.determine_rate = cdce706_divider_determine_rate,
.set_rate = cdce706_divider_set_rate,
};
static int cdce706_clkout_prepare(struct clk_hw *hw)
{
struct cdce706_hw_data *hwd = to_hw_data(hw);
return cdce706_reg_update(hwd->dev_data, CDCE706_CLKOUT(hwd->idx),
CDCE706_CLKOUT_ENABLE_MASK,
CDCE706_CLKOUT_ENABLE_MASK);
}
static void cdce706_clkout_unprepare(struct clk_hw *hw)
{
struct cdce706_hw_data *hwd = to_hw_data(hw);
cdce706_reg_update(hwd->dev_data, CDCE706_CLKOUT(hwd->idx),
CDCE706_CLKOUT_ENABLE_MASK, 0);
}
static int cdce706_clkout_set_parent(struct clk_hw *hw, u8 index)
{
struct cdce706_hw_data *hwd = to_hw_data(hw);
if (hwd->parent == index)
return 0;
hwd->parent = index;
return cdce706_reg_update(hwd->dev_data,
CDCE706_CLKOUT(hwd->idx),
CDCE706_CLKOUT_ENABLE_MASK, index);
}
static u8 cdce706_clkout_get_parent(struct clk_hw *hw)
{
struct cdce706_hw_data *hwd = to_hw_data(hw);
return hwd->parent;
}
static unsigned long cdce706_clkout_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
return parent_rate;
}
static int cdce706_clkout_determine_rate(struct clk_hw *hw,
struct clk_rate_request *req)
{
req->best_parent_rate = req->rate;
return 0;
}
static int cdce706_clkout_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
return 0;
}
static const struct clk_ops cdce706_clkout_ops = {
.prepare = cdce706_clkout_prepare,
.unprepare = cdce706_clkout_unprepare,
.set_parent = cdce706_clkout_set_parent,
.get_parent = cdce706_clkout_get_parent,
.recalc_rate = cdce706_clkout_recalc_rate,
.determine_rate = cdce706_clkout_determine_rate,
.set_rate = cdce706_clkout_set_rate,
};
static int cdce706_register_hw(struct cdce706_dev_data *cdce,
struct cdce706_hw_data *hw, unsigned num_hw,
const char * const *clk_names,
struct clk_init_data *init)
{
unsigned i;
int ret;
for (i = 0; i < num_hw; ++i, ++hw) {
init->name = clk_names[i];
hw->dev_data = cdce;
hw->idx = i;
hw->hw.init = init;
ret = devm_clk_hw_register(&cdce->client->dev,
&hw->hw);
if (ret) {
dev_err(&cdce->client->dev, "Failed to register %s\n",
clk_names[i]);
return ret;
}
}
return 0;
}
static int cdce706_register_clkin(struct cdce706_dev_data *cdce)
{
struct clk_init_data init = {
.ops = &cdce706_clkin_ops,
.parent_names = cdce->clkin_name,
.num_parents = ARRAY_SIZE(cdce->clkin_name),
};
unsigned i;
int ret;
unsigned clock, source;
for (i = 0; i < ARRAY_SIZE(cdce->clkin_name); ++i) {
struct clk *parent = devm_clk_get(&cdce->client->dev,
cdce706_source_name[i]);
if (IS_ERR(parent)) {
cdce->clkin_name[i] = cdce706_source_name[i];
} else {
cdce->clkin_name[i] = __clk_get_name(parent);
cdce->clkin_clk[i] = parent;
}
}
ret = cdce706_reg_read(cdce, CDCE706_CLKIN_SOURCE, &source);
if (ret < 0)
return ret;
if ((source & CDCE706_CLKIN_SOURCE_MASK) ==
CDCE706_CLKIN_SOURCE_LVCMOS) {
ret = cdce706_reg_read(cdce, CDCE706_CLKIN_CLOCK, &clock);
if (ret < 0)
return ret;
cdce->clkin[0].parent = !!(clock & CDCE706_CLKIN_CLOCK_MASK);
}
ret = cdce706_register_hw(cdce, cdce->clkin,
ARRAY_SIZE(cdce->clkin),
cdce706_clkin_name, &init);
return ret;
}
static int cdce706_register_plls(struct cdce706_dev_data *cdce)
{
struct clk_init_data init = {
.ops = &cdce706_pll_ops,
.parent_names = cdce706_clkin_name,
.num_parents = ARRAY_SIZE(cdce706_clkin_name),
};
unsigned i;
int ret;
unsigned mux;
ret = cdce706_reg_read(cdce, CDCE706_PLL_MUX, &mux);
if (ret < 0)
return ret;
for (i = 0; i < ARRAY_SIZE(cdce->pll); ++i) {
unsigned m, n, v;
ret = cdce706_reg_read(cdce, CDCE706_PLL_M_LOW(i), &m);
if (ret < 0)
return ret;
ret = cdce706_reg_read(cdce, CDCE706_PLL_N_LOW(i), &n);
if (ret < 0)
return ret;
ret = cdce706_reg_read(cdce, CDCE706_PLL_HI(i), &v);
if (ret < 0)
return ret;
cdce->pll[i].div = m | ((v & CDCE706_PLL_HI_M_MASK) << 8);
cdce->pll[i].mul = n | ((v & CDCE706_PLL_HI_N_MASK) <<
(8 - CDCE706_PLL_HI_N_SHIFT));
cdce->pll[i].mux = mux & CDCE706_PLL_MUX_MASK(i);
dev_dbg(&cdce->client->dev,
"%s: i: %u, div: %u, mul: %u, mux: %d\n", __func__, i,
cdce->pll[i].div, cdce->pll[i].mul, cdce->pll[i].mux);
}
ret = cdce706_register_hw(cdce, cdce->pll,
ARRAY_SIZE(cdce->pll),
cdce706_pll_name, &init);
return ret;
}
static int cdce706_register_dividers(struct cdce706_dev_data *cdce)
{
struct clk_init_data init = {
.ops = &cdce706_divider_ops,
.parent_names = cdce706_divider_parent_name,
.num_parents = ARRAY_SIZE(cdce706_divider_parent_name),
.flags = CLK_SET_RATE_PARENT,
};
unsigned i;
int ret;
for (i = 0; i < ARRAY_SIZE(cdce->divider); ++i) {
unsigned val;
ret = cdce706_reg_read(cdce, CDCE706_DIVIDER_PLL(i), &val);
if (ret < 0)
return ret;
cdce->divider[i].parent =
(val & CDCE706_DIVIDER_PLL_MASK(i)) >>
CDCE706_DIVIDER_PLL_SHIFT(i);
ret = cdce706_reg_read(cdce, CDCE706_DIVIDER(i), &val);
if (ret < 0)
return ret;
cdce->divider[i].div = val & CDCE706_DIVIDER_DIVIDER_MASK;
dev_dbg(&cdce->client->dev,
"%s: i: %u, parent: %u, div: %u\n", __func__, i,
cdce->divider[i].parent, cdce->divider[i].div);
}
ret = cdce706_register_hw(cdce, cdce->divider,
ARRAY_SIZE(cdce->divider),
cdce706_divider_name, &init);
return ret;
}
static int cdce706_register_clkouts(struct cdce706_dev_data *cdce)
{
struct clk_init_data init = {
.ops = &cdce706_clkout_ops,
.parent_names = cdce706_divider_name,
.num_parents = ARRAY_SIZE(cdce706_divider_name),
.flags = CLK_SET_RATE_PARENT,
};
unsigned i;
int ret;
for (i = 0; i < ARRAY_SIZE(cdce->clkout); ++i) {
unsigned val;
ret = cdce706_reg_read(cdce, CDCE706_CLKOUT(i), &val);
if (ret < 0)
return ret;
cdce->clkout[i].parent = val & CDCE706_CLKOUT_DIVIDER_MASK;
dev_dbg(&cdce->client->dev,
"%s: i: %u, parent: %u\n", __func__, i,
cdce->clkout[i].parent);
}
return cdce706_register_hw(cdce, cdce->clkout,
ARRAY_SIZE(cdce->clkout),
cdce706_clkout_name, &init);
}
static struct clk_hw *
of_clk_cdce_get(struct of_phandle_args *clkspec, void *data)
{
struct cdce706_dev_data *cdce = data;
unsigned int idx = clkspec->args[0];
if (idx >= ARRAY_SIZE(cdce->clkout)) {
pr_err("%s: invalid index %u\n", __func__, idx);
return ERR_PTR(-EINVAL);
}
return &cdce->clkout[idx].hw;
}
static int cdce706_probe(struct i2c_client *client)
{
struct i2c_adapter *adapter = client->adapter;
struct cdce706_dev_data *cdce;
int ret;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
return -EIO;
cdce = devm_kzalloc(&client->dev, sizeof(*cdce), GFP_KERNEL);
if (!cdce)
return -ENOMEM;
cdce->client = client;
cdce->regmap = devm_regmap_init_i2c(client, &cdce706_regmap_config);
if (IS_ERR(cdce->regmap)) {
dev_err(&client->dev, "Failed to initialize regmap\n");
return -EINVAL;
}
i2c_set_clientdata(client, cdce);
ret = cdce706_register_clkin(cdce);
if (ret < 0)
return ret;
ret = cdce706_register_plls(cdce);
if (ret < 0)
return ret;
ret = cdce706_register_dividers(cdce);
if (ret < 0)
return ret;
ret = cdce706_register_clkouts(cdce);
if (ret < 0)
return ret;
return devm_of_clk_add_hw_provider(&client->dev, of_clk_cdce_get,
cdce);
}
#ifdef CONFIG_OF
static const struct of_device_id cdce706_dt_match[] = {
{ .compatible = "ti,cdce706" },
{ },
};
MODULE_DEVICE_TABLE(of, cdce706_dt_match);
#endif
static const struct i2c_device_id cdce706_id[] = {
{ "cdce706", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, cdce706_id);
static struct i2c_driver cdce706_i2c_driver = {
.driver = {
.name = "cdce706",
.of_match_table = of_match_ptr(cdce706_dt_match),
},
.probe = cdce706_probe,
.id_table = cdce706_id,
};
module_i2c_driver(cdce706_i2c_driver);
MODULE_AUTHOR("Max Filippov <[email protected]>");
MODULE_DESCRIPTION("TI CDCE 706 clock synthesizer driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/clk/clk-cdce706.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Cortina Gemini SoC Clock Controller driver
* Copyright (c) 2017 Linus Walleij <[email protected]>
*/
#define pr_fmt(fmt) "clk-gemini: " fmt
#include <linux/init.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/clk-provider.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
#include <linux/spinlock.h>
#include <linux/reset-controller.h>
#include <dt-bindings/reset/cortina,gemini-reset.h>
#include <dt-bindings/clock/cortina,gemini-clock.h>
/* Globally visible clocks */
static DEFINE_SPINLOCK(gemini_clk_lock);
#define GEMINI_GLOBAL_STATUS 0x04
#define PLL_OSC_SEL BIT(30)
#define AHBSPEED_SHIFT (15)
#define AHBSPEED_MASK 0x07
#define CPU_AHB_RATIO_SHIFT (18)
#define CPU_AHB_RATIO_MASK 0x03
#define GEMINI_GLOBAL_PLL_CONTROL 0x08
#define GEMINI_GLOBAL_SOFT_RESET 0x0c
#define GEMINI_GLOBAL_MISC_CONTROL 0x30
#define PCI_CLK_66MHZ BIT(18)
#define GEMINI_GLOBAL_CLOCK_CONTROL 0x34
#define PCI_CLKRUN_EN BIT(16)
#define TVC_HALFDIV_SHIFT (24)
#define TVC_HALFDIV_MASK 0x1f
#define SECURITY_CLK_SEL BIT(29)
#define GEMINI_GLOBAL_PCI_DLL_CONTROL 0x44
#define PCI_DLL_BYPASS BIT(31)
#define PCI_DLL_TAP_SEL_MASK 0x1f
/**
* struct gemini_gate_data - Gemini gated clocks
* @bit_idx: the bit used to gate this clock in the clock register
* @name: the clock name
* @parent_name: the name of the parent clock
* @flags: standard clock framework flags
*/
struct gemini_gate_data {
u8 bit_idx;
const char *name;
const char *parent_name;
unsigned long flags;
};
/**
* struct clk_gemini_pci - Gemini PCI clock
* @hw: corresponding clock hardware entry
* @map: regmap to access the registers
* @rate: current rate
*/
struct clk_gemini_pci {
struct clk_hw hw;
struct regmap *map;
unsigned long rate;
};
/**
* struct gemini_reset - gemini reset controller
* @map: regmap to access the containing system controller
* @rcdev: reset controller device
*/
struct gemini_reset {
struct regmap *map;
struct reset_controller_dev rcdev;
};
/* Keeps track of all clocks */
static struct clk_hw_onecell_data *gemini_clk_data;
static const struct gemini_gate_data gemini_gates[] = {
{ 1, "security-gate", "secdiv", 0 },
{ 2, "gmac0-gate", "ahb", 0 },
{ 3, "gmac1-gate", "ahb", 0 },
{ 4, "sata0-gate", "ahb", 0 },
{ 5, "sata1-gate", "ahb", 0 },
{ 6, "usb0-gate", "ahb", 0 },
{ 7, "usb1-gate", "ahb", 0 },
{ 8, "ide-gate", "ahb", 0 },
{ 9, "pci-gate", "ahb", 0 },
/*
* The DDR controller may never have a driver, but certainly must
* not be gated off.
*/
{ 10, "ddr-gate", "ahb", CLK_IS_CRITICAL },
/*
* The flash controller must be on to access NOR flash through the
* memory map.
*/
{ 11, "flash-gate", "ahb", CLK_IGNORE_UNUSED },
{ 12, "tvc-gate", "ahb", 0 },
{ 13, "boot-gate", "apb", 0 },
};
#define to_pciclk(_hw) container_of(_hw, struct clk_gemini_pci, hw)
#define to_gemini_reset(p) container_of((p), struct gemini_reset, rcdev)
static unsigned long gemini_pci_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct clk_gemini_pci *pciclk = to_pciclk(hw);
u32 val;
regmap_read(pciclk->map, GEMINI_GLOBAL_MISC_CONTROL, &val);
if (val & PCI_CLK_66MHZ)
return 66000000;
return 33000000;
}
static long gemini_pci_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *prate)
{
/* We support 33 and 66 MHz */
if (rate < 48000000)
return 33000000;
return 66000000;
}
static int gemini_pci_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct clk_gemini_pci *pciclk = to_pciclk(hw);
if (rate == 33000000)
return regmap_update_bits(pciclk->map,
GEMINI_GLOBAL_MISC_CONTROL,
PCI_CLK_66MHZ, 0);
if (rate == 66000000)
return regmap_update_bits(pciclk->map,
GEMINI_GLOBAL_MISC_CONTROL,
0, PCI_CLK_66MHZ);
return -EINVAL;
}
static int gemini_pci_enable(struct clk_hw *hw)
{
struct clk_gemini_pci *pciclk = to_pciclk(hw);
regmap_update_bits(pciclk->map, GEMINI_GLOBAL_CLOCK_CONTROL,
0, PCI_CLKRUN_EN);
return 0;
}
static void gemini_pci_disable(struct clk_hw *hw)
{
struct clk_gemini_pci *pciclk = to_pciclk(hw);
regmap_update_bits(pciclk->map, GEMINI_GLOBAL_CLOCK_CONTROL,
PCI_CLKRUN_EN, 0);
}
static int gemini_pci_is_enabled(struct clk_hw *hw)
{
struct clk_gemini_pci *pciclk = to_pciclk(hw);
unsigned int val;
regmap_read(pciclk->map, GEMINI_GLOBAL_CLOCK_CONTROL, &val);
return !!(val & PCI_CLKRUN_EN);
}
static const struct clk_ops gemini_pci_clk_ops = {
.recalc_rate = gemini_pci_recalc_rate,
.round_rate = gemini_pci_round_rate,
.set_rate = gemini_pci_set_rate,
.enable = gemini_pci_enable,
.disable = gemini_pci_disable,
.is_enabled = gemini_pci_is_enabled,
};
static struct clk_hw *gemini_pci_clk_setup(const char *name,
const char *parent_name,
struct regmap *map)
{
struct clk_gemini_pci *pciclk;
struct clk_init_data init;
int ret;
pciclk = kzalloc(sizeof(*pciclk), GFP_KERNEL);
if (!pciclk)
return ERR_PTR(-ENOMEM);
init.name = name;
init.ops = &gemini_pci_clk_ops;
init.flags = 0;
init.parent_names = &parent_name;
init.num_parents = 1;
pciclk->map = map;
pciclk->hw.init = &init;
ret = clk_hw_register(NULL, &pciclk->hw);
if (ret) {
kfree(pciclk);
return ERR_PTR(ret);
}
return &pciclk->hw;
}
/*
* This is a self-deasserting reset controller.
*/
static int gemini_reset(struct reset_controller_dev *rcdev,
unsigned long id)
{
struct gemini_reset *gr = to_gemini_reset(rcdev);
/* Manual says to always set BIT 30 (CPU1) to 1 */
return regmap_write(gr->map,
GEMINI_GLOBAL_SOFT_RESET,
BIT(GEMINI_RESET_CPU1) | BIT(id));
}
static int gemini_reset_assert(struct reset_controller_dev *rcdev,
unsigned long id)
{
return 0;
}
static int gemini_reset_deassert(struct reset_controller_dev *rcdev,
unsigned long id)
{
return 0;
}
static int gemini_reset_status(struct reset_controller_dev *rcdev,
unsigned long id)
{
struct gemini_reset *gr = to_gemini_reset(rcdev);
u32 val;
int ret;
ret = regmap_read(gr->map, GEMINI_GLOBAL_SOFT_RESET, &val);
if (ret)
return ret;
return !!(val & BIT(id));
}
static const struct reset_control_ops gemini_reset_ops = {
.reset = gemini_reset,
.assert = gemini_reset_assert,
.deassert = gemini_reset_deassert,
.status = gemini_reset_status,
};
static int gemini_clk_probe(struct platform_device *pdev)
{
/* Gives the fracions 1x, 1.5x, 1.85x and 2x */
unsigned int cpu_ahb_mult[4] = { 1, 3, 24, 2 };
unsigned int cpu_ahb_div[4] = { 1, 2, 13, 1 };
void __iomem *base;
struct gemini_reset *gr;
struct regmap *map;
struct clk_hw *hw;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
unsigned int mult, div;
u32 val;
int ret;
int i;
gr = devm_kzalloc(dev, sizeof(*gr), GFP_KERNEL);
if (!gr)
return -ENOMEM;
/* Remap the system controller for the exclusive register */
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
map = syscon_node_to_regmap(np);
if (IS_ERR(map)) {
dev_err(dev, "no syscon regmap\n");
return PTR_ERR(map);
}
gr->map = map;
gr->rcdev.owner = THIS_MODULE;
gr->rcdev.nr_resets = 32;
gr->rcdev.ops = &gemini_reset_ops;
gr->rcdev.of_node = np;
ret = devm_reset_controller_register(dev, &gr->rcdev);
if (ret) {
dev_err(dev, "could not register reset controller\n");
return ret;
}
/* RTC clock 32768 Hz */
hw = clk_hw_register_fixed_rate(NULL, "rtc", NULL, 0, 32768);
gemini_clk_data->hws[GEMINI_CLK_RTC] = hw;
/* CPU clock derived as a fixed ratio from the AHB clock */
regmap_read(map, GEMINI_GLOBAL_STATUS, &val);
val >>= CPU_AHB_RATIO_SHIFT;
val &= CPU_AHB_RATIO_MASK;
hw = clk_hw_register_fixed_factor(NULL, "cpu", "ahb", 0,
cpu_ahb_mult[val],
cpu_ahb_div[val]);
gemini_clk_data->hws[GEMINI_CLK_CPU] = hw;
/* Security clock is 1:1 or 0.75 of APB */
regmap_read(map, GEMINI_GLOBAL_CLOCK_CONTROL, &val);
if (val & SECURITY_CLK_SEL) {
mult = 1;
div = 1;
} else {
mult = 3;
div = 4;
}
hw = clk_hw_register_fixed_factor(NULL, "secdiv", "ahb", 0, mult, div);
/*
* These are the leaf gates, at boot no clocks are gated.
*/
for (i = 0; i < ARRAY_SIZE(gemini_gates); i++) {
const struct gemini_gate_data *gd;
gd = &gemini_gates[i];
gemini_clk_data->hws[GEMINI_CLK_GATES + i] =
clk_hw_register_gate(NULL, gd->name,
gd->parent_name,
gd->flags,
base + GEMINI_GLOBAL_CLOCK_CONTROL,
gd->bit_idx,
CLK_GATE_SET_TO_DISABLE,
&gemini_clk_lock);
}
/*
* The TV Interface Controller has a 5-bit half divider register.
* This clock is supposed to be 27MHz as this is an exact multiple
* of PAL and NTSC frequencies. The register is undocumented :(
* FIXME: figure out the parent and how the divider works.
*/
mult = 1;
div = ((val >> TVC_HALFDIV_SHIFT) & TVC_HALFDIV_MASK);
dev_dbg(dev, "TVC half divider value = %d\n", div);
div += 1;
hw = clk_hw_register_fixed_rate(NULL, "tvcdiv", "xtal", 0, 27000000);
gemini_clk_data->hws[GEMINI_CLK_TVC] = hw;
/* FIXME: very unclear what the parent is */
hw = gemini_pci_clk_setup("PCI", "xtal", map);
gemini_clk_data->hws[GEMINI_CLK_PCI] = hw;
/* FIXME: very unclear what the parent is */
hw = clk_hw_register_fixed_rate(NULL, "uart", "xtal", 0, 48000000);
gemini_clk_data->hws[GEMINI_CLK_UART] = hw;
return 0;
}
static const struct of_device_id gemini_clk_dt_ids[] = {
{ .compatible = "cortina,gemini-syscon", },
{ /* sentinel */ },
};
static struct platform_driver gemini_clk_driver = {
.probe = gemini_clk_probe,
.driver = {
.name = "gemini-clk",
.of_match_table = gemini_clk_dt_ids,
.suppress_bind_attrs = true,
},
};
builtin_platform_driver(gemini_clk_driver);
static void __init gemini_cc_init(struct device_node *np)
{
struct regmap *map;
struct clk_hw *hw;
unsigned long freq;
unsigned int mult, div;
u32 val;
int ret;
int i;
gemini_clk_data = kzalloc(struct_size(gemini_clk_data, hws,
GEMINI_NUM_CLKS),
GFP_KERNEL);
if (!gemini_clk_data)
return;
gemini_clk_data->num = GEMINI_NUM_CLKS;
/*
* This way all clock fetched before the platform device probes,
* except those we assign here for early use, will be deferred.
*/
for (i = 0; i < GEMINI_NUM_CLKS; i++)
gemini_clk_data->hws[i] = ERR_PTR(-EPROBE_DEFER);
map = syscon_node_to_regmap(np);
if (IS_ERR(map)) {
pr_err("no syscon regmap\n");
return;
}
/*
* We check that the regmap works on this very first access,
* but as this is an MMIO-backed regmap, subsequent regmap
* access is not going to fail and we skip error checks from
* this point.
*/
ret = regmap_read(map, GEMINI_GLOBAL_STATUS, &val);
if (ret) {
pr_err("failed to read global status register\n");
return;
}
/*
* XTAL is the crystal oscillator, 60 or 30 MHz selected from
* strap pin E6
*/
if (val & PLL_OSC_SEL)
freq = 30000000;
else
freq = 60000000;
hw = clk_hw_register_fixed_rate(NULL, "xtal", NULL, 0, freq);
pr_debug("main crystal @%lu MHz\n", freq / 1000000);
/* VCO clock derived from the crystal */
mult = 13 + ((val >> AHBSPEED_SHIFT) & AHBSPEED_MASK);
div = 2;
/* If we run on 30 MHz crystal we have to multiply with two */
if (val & PLL_OSC_SEL)
mult *= 2;
hw = clk_hw_register_fixed_factor(NULL, "vco", "xtal", 0, mult, div);
/* The AHB clock is always 1/3 of the VCO */
hw = clk_hw_register_fixed_factor(NULL, "ahb", "vco", 0, 1, 3);
gemini_clk_data->hws[GEMINI_CLK_AHB] = hw;
/* The APB clock is always 1/6 of the AHB */
hw = clk_hw_register_fixed_factor(NULL, "apb", "ahb", 0, 1, 6);
gemini_clk_data->hws[GEMINI_CLK_APB] = hw;
/* Register the clocks to be accessed by the device tree */
of_clk_add_hw_provider(np, of_clk_hw_onecell_get, gemini_clk_data);
}
CLK_OF_DECLARE_DRIVER(gemini_cc, "cortina,gemini-syscon", gemini_cc_init);
| linux-master | drivers/clk/clk-gemini.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2015 Maxime Ripard <[email protected]>
*/
#include <linux/bitops.h>
#include <linux/clk-provider.h>
#include <linux/err.h>
#include <linux/export.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/slab.h>
static inline u32 clk_mult_readl(struct clk_multiplier *mult)
{
if (mult->flags & CLK_MULTIPLIER_BIG_ENDIAN)
return ioread32be(mult->reg);
return readl(mult->reg);
}
static inline void clk_mult_writel(struct clk_multiplier *mult, u32 val)
{
if (mult->flags & CLK_MULTIPLIER_BIG_ENDIAN)
iowrite32be(val, mult->reg);
else
writel(val, mult->reg);
}
static unsigned long __get_mult(struct clk_multiplier *mult,
unsigned long rate,
unsigned long parent_rate)
{
if (mult->flags & CLK_MULTIPLIER_ROUND_CLOSEST)
return DIV_ROUND_CLOSEST(rate, parent_rate);
return rate / parent_rate;
}
static unsigned long clk_multiplier_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct clk_multiplier *mult = to_clk_multiplier(hw);
unsigned long val;
val = clk_mult_readl(mult) >> mult->shift;
val &= GENMASK(mult->width - 1, 0);
if (!val && mult->flags & CLK_MULTIPLIER_ZERO_BYPASS)
val = 1;
return parent_rate * val;
}
static bool __is_best_rate(unsigned long rate, unsigned long new,
unsigned long best, unsigned long flags)
{
if (flags & CLK_MULTIPLIER_ROUND_CLOSEST)
return abs(rate - new) < abs(rate - best);
return new >= rate && new < best;
}
static unsigned long __bestmult(struct clk_hw *hw, unsigned long rate,
unsigned long *best_parent_rate,
u8 width, unsigned long flags)
{
struct clk_multiplier *mult = to_clk_multiplier(hw);
unsigned long orig_parent_rate = *best_parent_rate;
unsigned long parent_rate, current_rate, best_rate = ~0;
unsigned int i, bestmult = 0;
unsigned int maxmult = (1 << width) - 1;
if (!(clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT)) {
bestmult = rate / orig_parent_rate;
/* Make sure we don't end up with a 0 multiplier */
if ((bestmult == 0) &&
!(mult->flags & CLK_MULTIPLIER_ZERO_BYPASS))
bestmult = 1;
/* Make sure we don't overflow the multiplier */
if (bestmult > maxmult)
bestmult = maxmult;
return bestmult;
}
for (i = 1; i < maxmult; i++) {
if (rate == orig_parent_rate * i) {
/*
* This is the best case for us if we have a
* perfect match without changing the parent
* rate.
*/
*best_parent_rate = orig_parent_rate;
return i;
}
parent_rate = clk_hw_round_rate(clk_hw_get_parent(hw),
rate / i);
current_rate = parent_rate * i;
if (__is_best_rate(rate, current_rate, best_rate, flags)) {
bestmult = i;
best_rate = current_rate;
*best_parent_rate = parent_rate;
}
}
return bestmult;
}
static long clk_multiplier_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *parent_rate)
{
struct clk_multiplier *mult = to_clk_multiplier(hw);
unsigned long factor = __bestmult(hw, rate, parent_rate,
mult->width, mult->flags);
return *parent_rate * factor;
}
static int clk_multiplier_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct clk_multiplier *mult = to_clk_multiplier(hw);
unsigned long factor = __get_mult(mult, rate, parent_rate);
unsigned long flags = 0;
unsigned long val;
if (mult->lock)
spin_lock_irqsave(mult->lock, flags);
else
__acquire(mult->lock);
val = clk_mult_readl(mult);
val &= ~GENMASK(mult->width + mult->shift - 1, mult->shift);
val |= factor << mult->shift;
clk_mult_writel(mult, val);
if (mult->lock)
spin_unlock_irqrestore(mult->lock, flags);
else
__release(mult->lock);
return 0;
}
const struct clk_ops clk_multiplier_ops = {
.recalc_rate = clk_multiplier_recalc_rate,
.round_rate = clk_multiplier_round_rate,
.set_rate = clk_multiplier_set_rate,
};
EXPORT_SYMBOL_GPL(clk_multiplier_ops);
| linux-master | drivers/clk/clk-multiplier.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2019 NXP
*
* Clock driver for LS1028A Display output interfaces(LCD, DPHY).
*/
#include <linux/clk-provider.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/bitfield.h>
/* PLLDIG register offsets and bit masks */
#define PLLDIG_REG_PLLSR 0x24
#define PLLDIG_LOCK_MASK BIT(2)
#define PLLDIG_REG_PLLDV 0x28
#define PLLDIG_MFD_MASK GENMASK(7, 0)
#define PLLDIG_RFDPHI1_MASK GENMASK(30, 25)
#define PLLDIG_REG_PLLFM 0x2c
#define PLLDIG_SSCGBYP_ENABLE BIT(30)
#define PLLDIG_REG_PLLFD 0x30
#define PLLDIG_FDEN BIT(30)
#define PLLDIG_FRAC_MASK GENMASK(15, 0)
#define PLLDIG_REG_PLLCAL1 0x38
#define PLLDIG_REG_PLLCAL2 0x3c
/* Range of the VCO frequencies, in Hz */
#define PLLDIG_MIN_VCO_FREQ 650000000
#define PLLDIG_MAX_VCO_FREQ 1300000000
/* Range of the output frequencies, in Hz */
#define PHI1_MIN_FREQ 27000000UL
#define PHI1_MAX_FREQ 600000000UL
/* Maximum value of the reduced frequency divider */
#define MAX_RFDPHI1 63UL
/* Best value of multiplication factor divider */
#define PLLDIG_DEFAULT_MFD 44
/*
* Denominator part of the fractional part of the
* loop multiplication factor.
*/
#define MFDEN 20480
static const struct clk_parent_data parent_data[] = {
{ .index = 0 },
};
struct clk_plldig {
struct clk_hw hw;
void __iomem *regs;
unsigned int vco_freq;
};
#define to_clk_plldig(_hw) container_of(_hw, struct clk_plldig, hw)
static int plldig_enable(struct clk_hw *hw)
{
struct clk_plldig *data = to_clk_plldig(hw);
u32 val;
val = readl(data->regs + PLLDIG_REG_PLLFM);
/*
* Use Bypass mode with PLL off by default, the frequency overshoot
* detector output was disable. SSCG Bypass mode should be enable.
*/
val |= PLLDIG_SSCGBYP_ENABLE;
writel(val, data->regs + PLLDIG_REG_PLLFM);
return 0;
}
static void plldig_disable(struct clk_hw *hw)
{
struct clk_plldig *data = to_clk_plldig(hw);
u32 val;
val = readl(data->regs + PLLDIG_REG_PLLFM);
val &= ~PLLDIG_SSCGBYP_ENABLE;
val |= FIELD_PREP(PLLDIG_SSCGBYP_ENABLE, 0x0);
writel(val, data->regs + PLLDIG_REG_PLLFM);
}
static int plldig_is_enabled(struct clk_hw *hw)
{
struct clk_plldig *data = to_clk_plldig(hw);
return readl(data->regs + PLLDIG_REG_PLLFM) &
PLLDIG_SSCGBYP_ENABLE;
}
static unsigned long plldig_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct clk_plldig *data = to_clk_plldig(hw);
u32 val, rfdphi1;
val = readl(data->regs + PLLDIG_REG_PLLDV);
/* Check if PLL is bypassed */
if (val & PLLDIG_SSCGBYP_ENABLE)
return parent_rate;
rfdphi1 = FIELD_GET(PLLDIG_RFDPHI1_MASK, val);
/*
* If RFDPHI1 has a value of 1 the VCO frequency is also divided by
* one.
*/
if (!rfdphi1)
rfdphi1 = 1;
return DIV_ROUND_UP(data->vco_freq, rfdphi1);
}
static unsigned long plldig_calc_target_div(unsigned long vco_freq,
unsigned long target_rate)
{
unsigned long div;
div = DIV_ROUND_CLOSEST(vco_freq, target_rate);
div = clamp(div, 1UL, MAX_RFDPHI1);
return div;
}
static int plldig_determine_rate(struct clk_hw *hw,
struct clk_rate_request *req)
{
struct clk_plldig *data = to_clk_plldig(hw);
unsigned int div;
req->rate = clamp(req->rate, PHI1_MIN_FREQ, PHI1_MAX_FREQ);
div = plldig_calc_target_div(data->vco_freq, req->rate);
req->rate = DIV_ROUND_UP(data->vco_freq, div);
return 0;
}
static int plldig_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct clk_plldig *data = to_clk_plldig(hw);
unsigned int val, cond;
unsigned int rfdphi1;
rate = clamp(rate, PHI1_MIN_FREQ, PHI1_MAX_FREQ);
rfdphi1 = plldig_calc_target_div(data->vco_freq, rate);
/* update the divider value */
val = readl(data->regs + PLLDIG_REG_PLLDV);
val &= ~PLLDIG_RFDPHI1_MASK;
val |= FIELD_PREP(PLLDIG_RFDPHI1_MASK, rfdphi1);
writel(val, data->regs + PLLDIG_REG_PLLDV);
/* waiting for old lock state to clear */
udelay(200);
/* Wait until PLL is locked or timeout */
return readl_poll_timeout_atomic(data->regs + PLLDIG_REG_PLLSR, cond,
cond & PLLDIG_LOCK_MASK, 0,
USEC_PER_MSEC);
}
static const struct clk_ops plldig_clk_ops = {
.enable = plldig_enable,
.disable = plldig_disable,
.is_enabled = plldig_is_enabled,
.recalc_rate = plldig_recalc_rate,
.determine_rate = plldig_determine_rate,
.set_rate = plldig_set_rate,
};
static int plldig_init(struct clk_hw *hw)
{
struct clk_plldig *data = to_clk_plldig(hw);
struct clk_hw *parent = clk_hw_get_parent(hw);
unsigned long parent_rate;
unsigned long val;
unsigned long long lltmp;
unsigned int mfd, fracdiv = 0;
if (!parent)
return -EINVAL;
parent_rate = clk_hw_get_rate(parent);
if (data->vco_freq) {
mfd = data->vco_freq / parent_rate;
lltmp = data->vco_freq % parent_rate;
lltmp *= MFDEN;
do_div(lltmp, parent_rate);
fracdiv = lltmp;
} else {
mfd = PLLDIG_DEFAULT_MFD;
data->vco_freq = parent_rate * mfd;
}
val = FIELD_PREP(PLLDIG_MFD_MASK, mfd);
writel(val, data->regs + PLLDIG_REG_PLLDV);
/* Enable fractional divider */
if (fracdiv) {
val = FIELD_PREP(PLLDIG_FRAC_MASK, fracdiv);
val |= PLLDIG_FDEN;
writel(val, data->regs + PLLDIG_REG_PLLFD);
}
return 0;
}
static int plldig_clk_probe(struct platform_device *pdev)
{
struct clk_plldig *data;
struct device *dev = &pdev->dev;
int ret;
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
data->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(data->regs))
return PTR_ERR(data->regs);
data->hw.init = CLK_HW_INIT_PARENTS_DATA("dpclk",
parent_data,
&plldig_clk_ops,
0);
ret = devm_clk_hw_register(dev, &data->hw);
if (ret) {
dev_err(dev, "failed to register %s clock\n",
dev->of_node->name);
return ret;
}
ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get,
&data->hw);
if (ret) {
dev_err(dev, "unable to add clk provider\n");
return ret;
}
/*
* The frequency of the VCO cannot be changed during runtime.
* Therefore, let the user specify a desired frequency.
*/
if (!of_property_read_u32(dev->of_node, "fsl,vco-hz",
&data->vco_freq)) {
if (data->vco_freq < PLLDIG_MIN_VCO_FREQ ||
data->vco_freq > PLLDIG_MAX_VCO_FREQ)
return -EINVAL;
}
return plldig_init(&data->hw);
}
static const struct of_device_id plldig_clk_id[] = {
{ .compatible = "fsl,ls1028a-plldig" },
{ }
};
MODULE_DEVICE_TABLE(of, plldig_clk_id);
static struct platform_driver plldig_clk_driver = {
.driver = {
.name = "plldig-clock",
.of_match_table = plldig_clk_id,
},
.probe = plldig_clk_probe,
};
module_platform_driver(plldig_clk_driver);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Wen He <[email protected]>");
MODULE_DESCRIPTION("LS1028A Display output interface pixel clock driver");
| linux-master | drivers/clk/clk-plldig.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2013 NVIDIA CORPORATION. All rights reserved.
*/
#include <linux/clk-provider.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/slab.h>
static u8 clk_composite_get_parent(struct clk_hw *hw)
{
struct clk_composite *composite = to_clk_composite(hw);
const struct clk_ops *mux_ops = composite->mux_ops;
struct clk_hw *mux_hw = composite->mux_hw;
__clk_hw_set_clk(mux_hw, hw);
return mux_ops->get_parent(mux_hw);
}
static int clk_composite_set_parent(struct clk_hw *hw, u8 index)
{
struct clk_composite *composite = to_clk_composite(hw);
const struct clk_ops *mux_ops = composite->mux_ops;
struct clk_hw *mux_hw = composite->mux_hw;
__clk_hw_set_clk(mux_hw, hw);
return mux_ops->set_parent(mux_hw, index);
}
static unsigned long clk_composite_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct clk_composite *composite = to_clk_composite(hw);
const struct clk_ops *rate_ops = composite->rate_ops;
struct clk_hw *rate_hw = composite->rate_hw;
__clk_hw_set_clk(rate_hw, hw);
return rate_ops->recalc_rate(rate_hw, parent_rate);
}
static int clk_composite_determine_rate_for_parent(struct clk_hw *rate_hw,
struct clk_rate_request *req,
struct clk_hw *parent_hw,
const struct clk_ops *rate_ops)
{
long rate;
req->best_parent_hw = parent_hw;
req->best_parent_rate = clk_hw_get_rate(parent_hw);
if (rate_ops->determine_rate)
return rate_ops->determine_rate(rate_hw, req);
rate = rate_ops->round_rate(rate_hw, req->rate,
&req->best_parent_rate);
if (rate < 0)
return rate;
req->rate = rate;
return 0;
}
static int clk_composite_determine_rate(struct clk_hw *hw,
struct clk_rate_request *req)
{
struct clk_composite *composite = to_clk_composite(hw);
const struct clk_ops *rate_ops = composite->rate_ops;
const struct clk_ops *mux_ops = composite->mux_ops;
struct clk_hw *rate_hw = composite->rate_hw;
struct clk_hw *mux_hw = composite->mux_hw;
struct clk_hw *parent;
unsigned long rate_diff;
unsigned long best_rate_diff = ULONG_MAX;
unsigned long best_rate = 0;
int i, ret;
if (rate_hw && rate_ops &&
(rate_ops->determine_rate || rate_ops->round_rate) &&
mux_hw && mux_ops && mux_ops->set_parent) {
req->best_parent_hw = NULL;
if (clk_hw_get_flags(hw) & CLK_SET_RATE_NO_REPARENT) {
struct clk_rate_request tmp_req;
parent = clk_hw_get_parent(mux_hw);
clk_hw_forward_rate_request(hw, req, parent, &tmp_req, req->rate);
ret = clk_composite_determine_rate_for_parent(rate_hw,
&tmp_req,
parent,
rate_ops);
if (ret)
return ret;
req->rate = tmp_req.rate;
req->best_parent_hw = tmp_req.best_parent_hw;
req->best_parent_rate = tmp_req.best_parent_rate;
return 0;
}
for (i = 0; i < clk_hw_get_num_parents(mux_hw); i++) {
struct clk_rate_request tmp_req;
parent = clk_hw_get_parent_by_index(mux_hw, i);
if (!parent)
continue;
clk_hw_forward_rate_request(hw, req, parent, &tmp_req, req->rate);
ret = clk_composite_determine_rate_for_parent(rate_hw,
&tmp_req,
parent,
rate_ops);
if (ret)
continue;
if (req->rate >= tmp_req.rate)
rate_diff = req->rate - tmp_req.rate;
else
rate_diff = tmp_req.rate - req->rate;
if (!rate_diff || !req->best_parent_hw
|| best_rate_diff > rate_diff) {
req->best_parent_hw = parent;
req->best_parent_rate = tmp_req.best_parent_rate;
best_rate_diff = rate_diff;
best_rate = tmp_req.rate;
}
if (!rate_diff)
return 0;
}
req->rate = best_rate;
return 0;
} else if (rate_hw && rate_ops && rate_ops->determine_rate) {
__clk_hw_set_clk(rate_hw, hw);
return rate_ops->determine_rate(rate_hw, req);
} else if (mux_hw && mux_ops && mux_ops->determine_rate) {
__clk_hw_set_clk(mux_hw, hw);
return mux_ops->determine_rate(mux_hw, req);
} else {
pr_err("clk: clk_composite_determine_rate function called, but no mux or rate callback set!\n");
return -EINVAL;
}
}
static long clk_composite_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *prate)
{
struct clk_composite *composite = to_clk_composite(hw);
const struct clk_ops *rate_ops = composite->rate_ops;
struct clk_hw *rate_hw = composite->rate_hw;
__clk_hw_set_clk(rate_hw, hw);
return rate_ops->round_rate(rate_hw, rate, prate);
}
static int clk_composite_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct clk_composite *composite = to_clk_composite(hw);
const struct clk_ops *rate_ops = composite->rate_ops;
struct clk_hw *rate_hw = composite->rate_hw;
__clk_hw_set_clk(rate_hw, hw);
return rate_ops->set_rate(rate_hw, rate, parent_rate);
}
static int clk_composite_set_rate_and_parent(struct clk_hw *hw,
unsigned long rate,
unsigned long parent_rate,
u8 index)
{
struct clk_composite *composite = to_clk_composite(hw);
const struct clk_ops *rate_ops = composite->rate_ops;
const struct clk_ops *mux_ops = composite->mux_ops;
struct clk_hw *rate_hw = composite->rate_hw;
struct clk_hw *mux_hw = composite->mux_hw;
unsigned long temp_rate;
__clk_hw_set_clk(rate_hw, hw);
__clk_hw_set_clk(mux_hw, hw);
temp_rate = rate_ops->recalc_rate(rate_hw, parent_rate);
if (temp_rate > rate) {
rate_ops->set_rate(rate_hw, rate, parent_rate);
mux_ops->set_parent(mux_hw, index);
} else {
mux_ops->set_parent(mux_hw, index);
rate_ops->set_rate(rate_hw, rate, parent_rate);
}
return 0;
}
static int clk_composite_is_enabled(struct clk_hw *hw)
{
struct clk_composite *composite = to_clk_composite(hw);
const struct clk_ops *gate_ops = composite->gate_ops;
struct clk_hw *gate_hw = composite->gate_hw;
__clk_hw_set_clk(gate_hw, hw);
return gate_ops->is_enabled(gate_hw);
}
static int clk_composite_enable(struct clk_hw *hw)
{
struct clk_composite *composite = to_clk_composite(hw);
const struct clk_ops *gate_ops = composite->gate_ops;
struct clk_hw *gate_hw = composite->gate_hw;
__clk_hw_set_clk(gate_hw, hw);
return gate_ops->enable(gate_hw);
}
static void clk_composite_disable(struct clk_hw *hw)
{
struct clk_composite *composite = to_clk_composite(hw);
const struct clk_ops *gate_ops = composite->gate_ops;
struct clk_hw *gate_hw = composite->gate_hw;
__clk_hw_set_clk(gate_hw, hw);
gate_ops->disable(gate_hw);
}
static struct clk_hw *__clk_hw_register_composite(struct device *dev,
const char *name, const char * const *parent_names,
const struct clk_parent_data *pdata, int num_parents,
struct clk_hw *mux_hw, const struct clk_ops *mux_ops,
struct clk_hw *rate_hw, const struct clk_ops *rate_ops,
struct clk_hw *gate_hw, const struct clk_ops *gate_ops,
unsigned long flags)
{
struct clk_hw *hw;
struct clk_init_data init = {};
struct clk_composite *composite;
struct clk_ops *clk_composite_ops;
int ret;
composite = kzalloc(sizeof(*composite), GFP_KERNEL);
if (!composite)
return ERR_PTR(-ENOMEM);
init.name = name;
init.flags = flags;
if (parent_names)
init.parent_names = parent_names;
else
init.parent_data = pdata;
init.num_parents = num_parents;
hw = &composite->hw;
clk_composite_ops = &composite->ops;
if (mux_hw && mux_ops) {
if (!mux_ops->get_parent) {
hw = ERR_PTR(-EINVAL);
goto err;
}
composite->mux_hw = mux_hw;
composite->mux_ops = mux_ops;
clk_composite_ops->get_parent = clk_composite_get_parent;
if (mux_ops->set_parent)
clk_composite_ops->set_parent = clk_composite_set_parent;
if (mux_ops->determine_rate)
clk_composite_ops->determine_rate = clk_composite_determine_rate;
}
if (rate_hw && rate_ops) {
if (!rate_ops->recalc_rate) {
hw = ERR_PTR(-EINVAL);
goto err;
}
clk_composite_ops->recalc_rate = clk_composite_recalc_rate;
if (rate_ops->determine_rate)
clk_composite_ops->determine_rate =
clk_composite_determine_rate;
else if (rate_ops->round_rate)
clk_composite_ops->round_rate =
clk_composite_round_rate;
/* .set_rate requires either .round_rate or .determine_rate */
if (rate_ops->set_rate) {
if (rate_ops->determine_rate || rate_ops->round_rate)
clk_composite_ops->set_rate =
clk_composite_set_rate;
else
WARN(1, "%s: missing round_rate op is required\n",
__func__);
}
composite->rate_hw = rate_hw;
composite->rate_ops = rate_ops;
}
if (mux_hw && mux_ops && rate_hw && rate_ops) {
if (mux_ops->set_parent && rate_ops->set_rate)
clk_composite_ops->set_rate_and_parent =
clk_composite_set_rate_and_parent;
}
if (gate_hw && gate_ops) {
if (!gate_ops->is_enabled || !gate_ops->enable ||
!gate_ops->disable) {
hw = ERR_PTR(-EINVAL);
goto err;
}
composite->gate_hw = gate_hw;
composite->gate_ops = gate_ops;
clk_composite_ops->is_enabled = clk_composite_is_enabled;
clk_composite_ops->enable = clk_composite_enable;
clk_composite_ops->disable = clk_composite_disable;
}
init.ops = clk_composite_ops;
composite->hw.init = &init;
ret = clk_hw_register(dev, hw);
if (ret) {
hw = ERR_PTR(ret);
goto err;
}
if (composite->mux_hw)
composite->mux_hw->clk = hw->clk;
if (composite->rate_hw)
composite->rate_hw->clk = hw->clk;
if (composite->gate_hw)
composite->gate_hw->clk = hw->clk;
return hw;
err:
kfree(composite);
return hw;
}
struct clk_hw *clk_hw_register_composite(struct device *dev, const char *name,
const char * const *parent_names, int num_parents,
struct clk_hw *mux_hw, const struct clk_ops *mux_ops,
struct clk_hw *rate_hw, const struct clk_ops *rate_ops,
struct clk_hw *gate_hw, const struct clk_ops *gate_ops,
unsigned long flags)
{
return __clk_hw_register_composite(dev, name, parent_names, NULL,
num_parents, mux_hw, mux_ops,
rate_hw, rate_ops, gate_hw,
gate_ops, flags);
}
EXPORT_SYMBOL_GPL(clk_hw_register_composite);
struct clk_hw *clk_hw_register_composite_pdata(struct device *dev,
const char *name,
const struct clk_parent_data *parent_data,
int num_parents,
struct clk_hw *mux_hw, const struct clk_ops *mux_ops,
struct clk_hw *rate_hw, const struct clk_ops *rate_ops,
struct clk_hw *gate_hw, const struct clk_ops *gate_ops,
unsigned long flags)
{
return __clk_hw_register_composite(dev, name, NULL, parent_data,
num_parents, mux_hw, mux_ops,
rate_hw, rate_ops, gate_hw,
gate_ops, flags);
}
struct clk *clk_register_composite(struct device *dev, const char *name,
const char * const *parent_names, int num_parents,
struct clk_hw *mux_hw, const struct clk_ops *mux_ops,
struct clk_hw *rate_hw, const struct clk_ops *rate_ops,
struct clk_hw *gate_hw, const struct clk_ops *gate_ops,
unsigned long flags)
{
struct clk_hw *hw;
hw = clk_hw_register_composite(dev, name, parent_names, num_parents,
mux_hw, mux_ops, rate_hw, rate_ops, gate_hw, gate_ops,
flags);
if (IS_ERR(hw))
return ERR_CAST(hw);
return hw->clk;
}
EXPORT_SYMBOL_GPL(clk_register_composite);
struct clk *clk_register_composite_pdata(struct device *dev, const char *name,
const struct clk_parent_data *parent_data,
int num_parents,
struct clk_hw *mux_hw, const struct clk_ops *mux_ops,
struct clk_hw *rate_hw, const struct clk_ops *rate_ops,
struct clk_hw *gate_hw, const struct clk_ops *gate_ops,
unsigned long flags)
{
struct clk_hw *hw;
hw = clk_hw_register_composite_pdata(dev, name, parent_data,
num_parents, mux_hw, mux_ops, rate_hw, rate_ops,
gate_hw, gate_ops, flags);
if (IS_ERR(hw))
return ERR_CAST(hw);
return hw->clk;
}
void clk_unregister_composite(struct clk *clk)
{
struct clk_composite *composite;
struct clk_hw *hw;
hw = __clk_get_hw(clk);
if (!hw)
return;
composite = to_clk_composite(hw);
clk_unregister(clk);
kfree(composite);
}
void clk_hw_unregister_composite(struct clk_hw *hw)
{
struct clk_composite *composite;
composite = to_clk_composite(hw);
clk_hw_unregister(hw);
kfree(composite);
}
EXPORT_SYMBOL_GPL(clk_hw_unregister_composite);
static void devm_clk_hw_release_composite(struct device *dev, void *res)
{
clk_hw_unregister_composite(*(struct clk_hw **)res);
}
static struct clk_hw *__devm_clk_hw_register_composite(struct device *dev,
const char *name, const char * const *parent_names,
const struct clk_parent_data *pdata, int num_parents,
struct clk_hw *mux_hw, const struct clk_ops *mux_ops,
struct clk_hw *rate_hw, const struct clk_ops *rate_ops,
struct clk_hw *gate_hw, const struct clk_ops *gate_ops,
unsigned long flags)
{
struct clk_hw **ptr, *hw;
ptr = devres_alloc(devm_clk_hw_release_composite, sizeof(*ptr),
GFP_KERNEL);
if (!ptr)
return ERR_PTR(-ENOMEM);
hw = __clk_hw_register_composite(dev, name, parent_names, pdata,
num_parents, mux_hw, mux_ops, rate_hw,
rate_ops, gate_hw, gate_ops, flags);
if (!IS_ERR(hw)) {
*ptr = hw;
devres_add(dev, ptr);
} else {
devres_free(ptr);
}
return hw;
}
struct clk_hw *devm_clk_hw_register_composite_pdata(struct device *dev,
const char *name,
const struct clk_parent_data *parent_data,
int num_parents,
struct clk_hw *mux_hw, const struct clk_ops *mux_ops,
struct clk_hw *rate_hw, const struct clk_ops *rate_ops,
struct clk_hw *gate_hw, const struct clk_ops *gate_ops,
unsigned long flags)
{
return __devm_clk_hw_register_composite(dev, name, NULL, parent_data,
num_parents, mux_hw, mux_ops,
rate_hw, rate_ops, gate_hw,
gate_ops, flags);
}
| linux-master | drivers/clk/clk-composite.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2019-20 Sean Anderson <[email protected]>
* Copyright (c) 2019 Western Digital Corporation or its affiliates.
*/
#define pr_fmt(fmt) "k210-clk: " fmt
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/of_clk.h>
#include <linux/of_address.h>
#include <linux/clk-provider.h>
#include <linux/bitfield.h>
#include <linux/delay.h>
#include <soc/canaan/k210-sysctl.h>
#include <dt-bindings/clock/k210-clk.h>
struct k210_sysclk;
struct k210_clk {
int id;
struct k210_sysclk *ksc;
struct clk_hw hw;
};
struct k210_clk_cfg {
const char *name;
u8 gate_reg;
u8 gate_bit;
u8 div_reg;
u8 div_shift;
u8 div_width;
u8 div_type;
u8 mux_reg;
u8 mux_bit;
};
enum k210_clk_div_type {
K210_DIV_NONE,
K210_DIV_ONE_BASED,
K210_DIV_DOUBLE_ONE_BASED,
K210_DIV_POWER_OF_TWO,
};
#define K210_GATE(_reg, _bit) \
.gate_reg = (_reg), \
.gate_bit = (_bit)
#define K210_DIV(_reg, _shift, _width, _type) \
.div_reg = (_reg), \
.div_shift = (_shift), \
.div_width = (_width), \
.div_type = (_type)
#define K210_MUX(_reg, _bit) \
.mux_reg = (_reg), \
.mux_bit = (_bit)
static struct k210_clk_cfg k210_clk_cfgs[K210_NUM_CLKS] = {
/* Gated clocks, no mux, no divider */
[K210_CLK_CPU] = {
.name = "cpu",
K210_GATE(K210_SYSCTL_EN_CENT, 0)
},
[K210_CLK_DMA] = {
.name = "dma",
K210_GATE(K210_SYSCTL_EN_PERI, 1)
},
[K210_CLK_FFT] = {
.name = "fft",
K210_GATE(K210_SYSCTL_EN_PERI, 4)
},
[K210_CLK_GPIO] = {
.name = "gpio",
K210_GATE(K210_SYSCTL_EN_PERI, 5)
},
[K210_CLK_UART1] = {
.name = "uart1",
K210_GATE(K210_SYSCTL_EN_PERI, 16)
},
[K210_CLK_UART2] = {
.name = "uart2",
K210_GATE(K210_SYSCTL_EN_PERI, 17)
},
[K210_CLK_UART3] = {
.name = "uart3",
K210_GATE(K210_SYSCTL_EN_PERI, 18)
},
[K210_CLK_FPIOA] = {
.name = "fpioa",
K210_GATE(K210_SYSCTL_EN_PERI, 20)
},
[K210_CLK_SHA] = {
.name = "sha",
K210_GATE(K210_SYSCTL_EN_PERI, 26)
},
[K210_CLK_AES] = {
.name = "aes",
K210_GATE(K210_SYSCTL_EN_PERI, 19)
},
[K210_CLK_OTP] = {
.name = "otp",
K210_GATE(K210_SYSCTL_EN_PERI, 27)
},
[K210_CLK_RTC] = {
.name = "rtc",
K210_GATE(K210_SYSCTL_EN_PERI, 29)
},
/* Gated divider clocks */
[K210_CLK_SRAM0] = {
.name = "sram0",
K210_GATE(K210_SYSCTL_EN_CENT, 1),
K210_DIV(K210_SYSCTL_THR0, 0, 4, K210_DIV_ONE_BASED)
},
[K210_CLK_SRAM1] = {
.name = "sram1",
K210_GATE(K210_SYSCTL_EN_CENT, 2),
K210_DIV(K210_SYSCTL_THR0, 4, 4, K210_DIV_ONE_BASED)
},
[K210_CLK_ROM] = {
.name = "rom",
K210_GATE(K210_SYSCTL_EN_PERI, 0),
K210_DIV(K210_SYSCTL_THR0, 16, 4, K210_DIV_ONE_BASED)
},
[K210_CLK_DVP] = {
.name = "dvp",
K210_GATE(K210_SYSCTL_EN_PERI, 3),
K210_DIV(K210_SYSCTL_THR0, 12, 4, K210_DIV_ONE_BASED)
},
[K210_CLK_APB0] = {
.name = "apb0",
K210_GATE(K210_SYSCTL_EN_CENT, 3),
K210_DIV(K210_SYSCTL_SEL0, 3, 3, K210_DIV_ONE_BASED)
},
[K210_CLK_APB1] = {
.name = "apb1",
K210_GATE(K210_SYSCTL_EN_CENT, 4),
K210_DIV(K210_SYSCTL_SEL0, 6, 3, K210_DIV_ONE_BASED)
},
[K210_CLK_APB2] = {
.name = "apb2",
K210_GATE(K210_SYSCTL_EN_CENT, 5),
K210_DIV(K210_SYSCTL_SEL0, 9, 3, K210_DIV_ONE_BASED)
},
[K210_CLK_AI] = {
.name = "ai",
K210_GATE(K210_SYSCTL_EN_PERI, 2),
K210_DIV(K210_SYSCTL_THR0, 8, 4, K210_DIV_ONE_BASED)
},
[K210_CLK_SPI0] = {
.name = "spi0",
K210_GATE(K210_SYSCTL_EN_PERI, 6),
K210_DIV(K210_SYSCTL_THR1, 0, 8, K210_DIV_DOUBLE_ONE_BASED)
},
[K210_CLK_SPI1] = {
.name = "spi1",
K210_GATE(K210_SYSCTL_EN_PERI, 7),
K210_DIV(K210_SYSCTL_THR1, 8, 8, K210_DIV_DOUBLE_ONE_BASED)
},
[K210_CLK_SPI2] = {
.name = "spi2",
K210_GATE(K210_SYSCTL_EN_PERI, 8),
K210_DIV(K210_SYSCTL_THR1, 16, 8, K210_DIV_DOUBLE_ONE_BASED)
},
[K210_CLK_I2C0] = {
.name = "i2c0",
K210_GATE(K210_SYSCTL_EN_PERI, 13),
K210_DIV(K210_SYSCTL_THR5, 8, 8, K210_DIV_DOUBLE_ONE_BASED)
},
[K210_CLK_I2C1] = {
.name = "i2c1",
K210_GATE(K210_SYSCTL_EN_PERI, 14),
K210_DIV(K210_SYSCTL_THR5, 16, 8, K210_DIV_DOUBLE_ONE_BASED)
},
[K210_CLK_I2C2] = {
.name = "i2c2",
K210_GATE(K210_SYSCTL_EN_PERI, 15),
K210_DIV(K210_SYSCTL_THR5, 24, 8, K210_DIV_DOUBLE_ONE_BASED)
},
[K210_CLK_WDT0] = {
.name = "wdt0",
K210_GATE(K210_SYSCTL_EN_PERI, 24),
K210_DIV(K210_SYSCTL_THR6, 0, 8, K210_DIV_DOUBLE_ONE_BASED)
},
[K210_CLK_WDT1] = {
.name = "wdt1",
K210_GATE(K210_SYSCTL_EN_PERI, 25),
K210_DIV(K210_SYSCTL_THR6, 8, 8, K210_DIV_DOUBLE_ONE_BASED)
},
[K210_CLK_I2S0] = {
.name = "i2s0",
K210_GATE(K210_SYSCTL_EN_PERI, 10),
K210_DIV(K210_SYSCTL_THR3, 0, 16, K210_DIV_DOUBLE_ONE_BASED)
},
[K210_CLK_I2S1] = {
.name = "i2s1",
K210_GATE(K210_SYSCTL_EN_PERI, 11),
K210_DIV(K210_SYSCTL_THR3, 16, 16, K210_DIV_DOUBLE_ONE_BASED)
},
[K210_CLK_I2S2] = {
.name = "i2s2",
K210_GATE(K210_SYSCTL_EN_PERI, 12),
K210_DIV(K210_SYSCTL_THR4, 0, 16, K210_DIV_DOUBLE_ONE_BASED)
},
/* Divider clocks, no gate, no mux */
[K210_CLK_I2S0_M] = {
.name = "i2s0_m",
K210_DIV(K210_SYSCTL_THR4, 16, 8, K210_DIV_DOUBLE_ONE_BASED)
},
[K210_CLK_I2S1_M] = {
.name = "i2s1_m",
K210_DIV(K210_SYSCTL_THR4, 24, 8, K210_DIV_DOUBLE_ONE_BASED)
},
[K210_CLK_I2S2_M] = {
.name = "i2s2_m",
K210_DIV(K210_SYSCTL_THR4, 0, 8, K210_DIV_DOUBLE_ONE_BASED)
},
/* Muxed gated divider clocks */
[K210_CLK_SPI3] = {
.name = "spi3",
K210_GATE(K210_SYSCTL_EN_PERI, 9),
K210_DIV(K210_SYSCTL_THR1, 24, 8, K210_DIV_DOUBLE_ONE_BASED),
K210_MUX(K210_SYSCTL_SEL0, 12)
},
[K210_CLK_TIMER0] = {
.name = "timer0",
K210_GATE(K210_SYSCTL_EN_PERI, 21),
K210_DIV(K210_SYSCTL_THR2, 0, 8, K210_DIV_DOUBLE_ONE_BASED),
K210_MUX(K210_SYSCTL_SEL0, 13)
},
[K210_CLK_TIMER1] = {
.name = "timer1",
K210_GATE(K210_SYSCTL_EN_PERI, 22),
K210_DIV(K210_SYSCTL_THR2, 8, 8, K210_DIV_DOUBLE_ONE_BASED),
K210_MUX(K210_SYSCTL_SEL0, 14)
},
[K210_CLK_TIMER2] = {
.name = "timer2",
K210_GATE(K210_SYSCTL_EN_PERI, 23),
K210_DIV(K210_SYSCTL_THR2, 16, 8, K210_DIV_DOUBLE_ONE_BASED),
K210_MUX(K210_SYSCTL_SEL0, 15)
},
};
/*
* PLL control register bits.
*/
#define K210_PLL_CLKR GENMASK(3, 0)
#define K210_PLL_CLKF GENMASK(9, 4)
#define K210_PLL_CLKOD GENMASK(13, 10)
#define K210_PLL_BWADJ GENMASK(19, 14)
#define K210_PLL_RESET (1 << 20)
#define K210_PLL_PWRD (1 << 21)
#define K210_PLL_INTFB (1 << 22)
#define K210_PLL_BYPASS (1 << 23)
#define K210_PLL_TEST (1 << 24)
#define K210_PLL_EN (1 << 25)
#define K210_PLL_SEL GENMASK(27, 26) /* PLL2 only */
/*
* PLL lock register bits.
*/
#define K210_PLL_LOCK 0
#define K210_PLL_CLEAR_SLIP 2
#define K210_PLL_TEST_OUT 3
/*
* Clock selector register bits.
*/
#define K210_ACLK_SEL BIT(0)
#define K210_ACLK_DIV GENMASK(2, 1)
/*
* PLLs.
*/
enum k210_pll_id {
K210_PLL0, K210_PLL1, K210_PLL2, K210_PLL_NUM
};
struct k210_pll {
enum k210_pll_id id;
struct k210_sysclk *ksc;
void __iomem *base;
void __iomem *reg;
void __iomem *lock;
u8 lock_shift;
u8 lock_width;
struct clk_hw hw;
};
#define to_k210_pll(_hw) container_of(_hw, struct k210_pll, hw)
/*
* PLLs configuration: by default PLL0 runs at 780 MHz and PLL1 at 299 MHz.
* The first 2 SRAM banks depend on ACLK/CPU clock which is by default PLL0
* rate divided by 2. Set PLL1 to 390 MHz so that the third SRAM bank has the
* same clock as the first 2.
*/
struct k210_pll_cfg {
u32 reg;
u8 lock_shift;
u8 lock_width;
u32 r;
u32 f;
u32 od;
u32 bwadj;
};
static struct k210_pll_cfg k210_plls_cfg[] = {
{ K210_SYSCTL_PLL0, 0, 2, 0, 59, 1, 59 }, /* 780 MHz */
{ K210_SYSCTL_PLL1, 8, 1, 0, 59, 3, 59 }, /* 390 MHz */
{ K210_SYSCTL_PLL2, 16, 1, 0, 22, 1, 22 }, /* 299 MHz */
};
/**
* struct k210_sysclk - sysclk driver data
* @regs: system controller registers start address
* @clk_lock: clock setting spinlock
* @plls: SoC PLLs descriptors
* @aclk: ACLK clock
* @clks: All other clocks
*/
struct k210_sysclk {
void __iomem *regs;
spinlock_t clk_lock;
struct k210_pll plls[K210_PLL_NUM];
struct clk_hw aclk;
struct k210_clk clks[K210_NUM_CLKS];
};
#define to_k210_sysclk(_hw) container_of(_hw, struct k210_sysclk, aclk)
/*
* Set ACLK parent selector: 0 for IN0, 1 for PLL0.
*/
static void k210_aclk_set_selector(void __iomem *regs, u8 sel)
{
u32 reg = readl(regs + K210_SYSCTL_SEL0);
if (sel)
reg |= K210_ACLK_SEL;
else
reg &= K210_ACLK_SEL;
writel(reg, regs + K210_SYSCTL_SEL0);
}
static void k210_init_pll(void __iomem *regs, enum k210_pll_id pllid,
struct k210_pll *pll)
{
pll->id = pllid;
pll->reg = regs + k210_plls_cfg[pllid].reg;
pll->lock = regs + K210_SYSCTL_PLL_LOCK;
pll->lock_shift = k210_plls_cfg[pllid].lock_shift;
pll->lock_width = k210_plls_cfg[pllid].lock_width;
}
static void k210_pll_wait_for_lock(struct k210_pll *pll)
{
u32 reg, mask = GENMASK(pll->lock_shift + pll->lock_width - 1,
pll->lock_shift);
while (true) {
reg = readl(pll->lock);
if ((reg & mask) == mask)
break;
reg |= BIT(pll->lock_shift + K210_PLL_CLEAR_SLIP);
writel(reg, pll->lock);
}
}
static bool k210_pll_hw_is_enabled(struct k210_pll *pll)
{
u32 reg = readl(pll->reg);
u32 mask = K210_PLL_PWRD | K210_PLL_EN;
if (reg & K210_PLL_RESET)
return false;
return (reg & mask) == mask;
}
static void k210_pll_enable_hw(void __iomem *regs, struct k210_pll *pll)
{
struct k210_pll_cfg *pll_cfg = &k210_plls_cfg[pll->id];
u32 reg;
if (k210_pll_hw_is_enabled(pll))
return;
/*
* For PLL0, we need to re-parent ACLK to IN0 to keep the CPU cores and
* SRAM running.
*/
if (pll->id == K210_PLL0)
k210_aclk_set_selector(regs, 0);
/* Set PLL factors */
reg = readl(pll->reg);
reg &= ~GENMASK(19, 0);
reg |= FIELD_PREP(K210_PLL_CLKR, pll_cfg->r);
reg |= FIELD_PREP(K210_PLL_CLKF, pll_cfg->f);
reg |= FIELD_PREP(K210_PLL_CLKOD, pll_cfg->od);
reg |= FIELD_PREP(K210_PLL_BWADJ, pll_cfg->bwadj);
reg |= K210_PLL_PWRD;
writel(reg, pll->reg);
/*
* Reset the PLL: ensure reset is low before asserting it.
* The magic NOPs come from the Kendryte reference SDK.
*/
reg &= ~K210_PLL_RESET;
writel(reg, pll->reg);
reg |= K210_PLL_RESET;
writel(reg, pll->reg);
nop();
nop();
reg &= ~K210_PLL_RESET;
writel(reg, pll->reg);
k210_pll_wait_for_lock(pll);
reg &= ~K210_PLL_BYPASS;
reg |= K210_PLL_EN;
writel(reg, pll->reg);
if (pll->id == K210_PLL0)
k210_aclk_set_selector(regs, 1);
}
static int k210_pll_enable(struct clk_hw *hw)
{
struct k210_pll *pll = to_k210_pll(hw);
struct k210_sysclk *ksc = pll->ksc;
unsigned long flags;
spin_lock_irqsave(&ksc->clk_lock, flags);
k210_pll_enable_hw(ksc->regs, pll);
spin_unlock_irqrestore(&ksc->clk_lock, flags);
return 0;
}
static void k210_pll_disable(struct clk_hw *hw)
{
struct k210_pll *pll = to_k210_pll(hw);
struct k210_sysclk *ksc = pll->ksc;
unsigned long flags;
u32 reg;
/*
* Bypassing before powering off is important so child clocks do not
* stop working. This is especially important for pll0, the indirect
* parent of the cpu clock.
*/
spin_lock_irqsave(&ksc->clk_lock, flags);
reg = readl(pll->reg);
reg |= K210_PLL_BYPASS;
writel(reg, pll->reg);
reg &= ~K210_PLL_PWRD;
reg &= ~K210_PLL_EN;
writel(reg, pll->reg);
spin_unlock_irqrestore(&ksc->clk_lock, flags);
}
static int k210_pll_is_enabled(struct clk_hw *hw)
{
return k210_pll_hw_is_enabled(to_k210_pll(hw));
}
static unsigned long k210_pll_get_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct k210_pll *pll = to_k210_pll(hw);
u32 reg = readl(pll->reg);
u32 r, f, od;
if (reg & K210_PLL_BYPASS)
return parent_rate;
if (!(reg & K210_PLL_PWRD))
return 0;
r = FIELD_GET(K210_PLL_CLKR, reg) + 1;
f = FIELD_GET(K210_PLL_CLKF, reg) + 1;
od = FIELD_GET(K210_PLL_CLKOD, reg) + 1;
return div_u64((u64)parent_rate * f, r * od);
}
static const struct clk_ops k210_pll_ops = {
.enable = k210_pll_enable,
.disable = k210_pll_disable,
.is_enabled = k210_pll_is_enabled,
.recalc_rate = k210_pll_get_rate,
};
static int k210_pll2_set_parent(struct clk_hw *hw, u8 index)
{
struct k210_pll *pll = to_k210_pll(hw);
struct k210_sysclk *ksc = pll->ksc;
unsigned long flags;
u32 reg;
spin_lock_irqsave(&ksc->clk_lock, flags);
reg = readl(pll->reg);
reg &= ~K210_PLL_SEL;
reg |= FIELD_PREP(K210_PLL_SEL, index);
writel(reg, pll->reg);
spin_unlock_irqrestore(&ksc->clk_lock, flags);
return 0;
}
static u8 k210_pll2_get_parent(struct clk_hw *hw)
{
struct k210_pll *pll = to_k210_pll(hw);
u32 reg = readl(pll->reg);
return FIELD_GET(K210_PLL_SEL, reg);
}
static const struct clk_ops k210_pll2_ops = {
.enable = k210_pll_enable,
.disable = k210_pll_disable,
.is_enabled = k210_pll_is_enabled,
.recalc_rate = k210_pll_get_rate,
.determine_rate = clk_hw_determine_rate_no_reparent,
.set_parent = k210_pll2_set_parent,
.get_parent = k210_pll2_get_parent,
};
static int __init k210_register_pll(struct device_node *np,
struct k210_sysclk *ksc,
enum k210_pll_id pllid, const char *name,
int num_parents, const struct clk_ops *ops)
{
struct k210_pll *pll = &ksc->plls[pllid];
struct clk_init_data init = {};
const struct clk_parent_data parent_data[] = {
{ /* .index = 0 for in0 */ },
{ .hw = &ksc->plls[K210_PLL0].hw },
{ .hw = &ksc->plls[K210_PLL1].hw },
};
init.name = name;
init.parent_data = parent_data;
init.num_parents = num_parents;
init.ops = ops;
pll->hw.init = &init;
pll->ksc = ksc;
return of_clk_hw_register(np, &pll->hw);
}
static int __init k210_register_plls(struct device_node *np,
struct k210_sysclk *ksc)
{
int i, ret;
for (i = 0; i < K210_PLL_NUM; i++)
k210_init_pll(ksc->regs, i, &ksc->plls[i]);
/* PLL0 and PLL1 only have IN0 as parent */
ret = k210_register_pll(np, ksc, K210_PLL0, "pll0", 1, &k210_pll_ops);
if (ret) {
pr_err("%pOFP: register PLL0 failed\n", np);
return ret;
}
ret = k210_register_pll(np, ksc, K210_PLL1, "pll1", 1, &k210_pll_ops);
if (ret) {
pr_err("%pOFP: register PLL1 failed\n", np);
return ret;
}
/* PLL2 has IN0, PLL0 and PLL1 as parents */
ret = k210_register_pll(np, ksc, K210_PLL2, "pll2", 3, &k210_pll2_ops);
if (ret) {
pr_err("%pOFP: register PLL2 failed\n", np);
return ret;
}
return 0;
}
static int k210_aclk_set_parent(struct clk_hw *hw, u8 index)
{
struct k210_sysclk *ksc = to_k210_sysclk(hw);
unsigned long flags;
spin_lock_irqsave(&ksc->clk_lock, flags);
k210_aclk_set_selector(ksc->regs, index);
spin_unlock_irqrestore(&ksc->clk_lock, flags);
return 0;
}
static u8 k210_aclk_get_parent(struct clk_hw *hw)
{
struct k210_sysclk *ksc = to_k210_sysclk(hw);
u32 sel;
sel = readl(ksc->regs + K210_SYSCTL_SEL0) & K210_ACLK_SEL;
return sel ? 1 : 0;
}
static unsigned long k210_aclk_get_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct k210_sysclk *ksc = to_k210_sysclk(hw);
u32 reg = readl(ksc->regs + K210_SYSCTL_SEL0);
unsigned int shift;
if (!(reg & 0x1))
return parent_rate;
shift = FIELD_GET(K210_ACLK_DIV, reg);
return parent_rate / (2UL << shift);
}
static const struct clk_ops k210_aclk_ops = {
.determine_rate = clk_hw_determine_rate_no_reparent,
.set_parent = k210_aclk_set_parent,
.get_parent = k210_aclk_get_parent,
.recalc_rate = k210_aclk_get_rate,
};
/*
* ACLK has IN0 and PLL0 as parents.
*/
static int __init k210_register_aclk(struct device_node *np,
struct k210_sysclk *ksc)
{
struct clk_init_data init = {};
const struct clk_parent_data parent_data[] = {
{ /* .index = 0 for in0 */ },
{ .hw = &ksc->plls[K210_PLL0].hw },
};
int ret;
init.name = "aclk";
init.parent_data = parent_data;
init.num_parents = 2;
init.ops = &k210_aclk_ops;
ksc->aclk.init = &init;
ret = of_clk_hw_register(np, &ksc->aclk);
if (ret) {
pr_err("%pOFP: register aclk failed\n", np);
return ret;
}
return 0;
}
#define to_k210_clk(_hw) container_of(_hw, struct k210_clk, hw)
static int k210_clk_enable(struct clk_hw *hw)
{
struct k210_clk *kclk = to_k210_clk(hw);
struct k210_sysclk *ksc = kclk->ksc;
struct k210_clk_cfg *cfg = &k210_clk_cfgs[kclk->id];
unsigned long flags;
u32 reg;
if (!cfg->gate_reg)
return 0;
spin_lock_irqsave(&ksc->clk_lock, flags);
reg = readl(ksc->regs + cfg->gate_reg);
reg |= BIT(cfg->gate_bit);
writel(reg, ksc->regs + cfg->gate_reg);
spin_unlock_irqrestore(&ksc->clk_lock, flags);
return 0;
}
static void k210_clk_disable(struct clk_hw *hw)
{
struct k210_clk *kclk = to_k210_clk(hw);
struct k210_sysclk *ksc = kclk->ksc;
struct k210_clk_cfg *cfg = &k210_clk_cfgs[kclk->id];
unsigned long flags;
u32 reg;
if (!cfg->gate_reg)
return;
spin_lock_irqsave(&ksc->clk_lock, flags);
reg = readl(ksc->regs + cfg->gate_reg);
reg &= ~BIT(cfg->gate_bit);
writel(reg, ksc->regs + cfg->gate_reg);
spin_unlock_irqrestore(&ksc->clk_lock, flags);
}
static int k210_clk_set_parent(struct clk_hw *hw, u8 index)
{
struct k210_clk *kclk = to_k210_clk(hw);
struct k210_sysclk *ksc = kclk->ksc;
struct k210_clk_cfg *cfg = &k210_clk_cfgs[kclk->id];
unsigned long flags;
u32 reg;
spin_lock_irqsave(&ksc->clk_lock, flags);
reg = readl(ksc->regs + cfg->mux_reg);
if (index)
reg |= BIT(cfg->mux_bit);
else
reg &= ~BIT(cfg->mux_bit);
writel(reg, ksc->regs + cfg->mux_reg);
spin_unlock_irqrestore(&ksc->clk_lock, flags);
return 0;
}
static u8 k210_clk_get_parent(struct clk_hw *hw)
{
struct k210_clk *kclk = to_k210_clk(hw);
struct k210_sysclk *ksc = kclk->ksc;
struct k210_clk_cfg *cfg = &k210_clk_cfgs[kclk->id];
unsigned long flags;
u32 reg, idx;
spin_lock_irqsave(&ksc->clk_lock, flags);
reg = readl(ksc->regs + cfg->mux_reg);
idx = (reg & BIT(cfg->mux_bit)) ? 1 : 0;
spin_unlock_irqrestore(&ksc->clk_lock, flags);
return idx;
}
static unsigned long k210_clk_get_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct k210_clk *kclk = to_k210_clk(hw);
struct k210_sysclk *ksc = kclk->ksc;
struct k210_clk_cfg *cfg = &k210_clk_cfgs[kclk->id];
u32 reg, div_val;
if (!cfg->div_reg)
return parent_rate;
reg = readl(ksc->regs + cfg->div_reg);
div_val = (reg >> cfg->div_shift) & GENMASK(cfg->div_width - 1, 0);
switch (cfg->div_type) {
case K210_DIV_ONE_BASED:
return parent_rate / (div_val + 1);
case K210_DIV_DOUBLE_ONE_BASED:
return parent_rate / ((div_val + 1) * 2);
case K210_DIV_POWER_OF_TWO:
return parent_rate / (2UL << div_val);
case K210_DIV_NONE:
default:
return 0;
}
}
static const struct clk_ops k210_clk_mux_ops = {
.enable = k210_clk_enable,
.disable = k210_clk_disable,
.determine_rate = clk_hw_determine_rate_no_reparent,
.set_parent = k210_clk_set_parent,
.get_parent = k210_clk_get_parent,
.recalc_rate = k210_clk_get_rate,
};
static const struct clk_ops k210_clk_ops = {
.enable = k210_clk_enable,
.disable = k210_clk_disable,
.recalc_rate = k210_clk_get_rate,
};
static void __init k210_register_clk(struct device_node *np,
struct k210_sysclk *ksc, int id,
const struct clk_parent_data *parent_data,
int num_parents, unsigned long flags)
{
struct k210_clk *kclk = &ksc->clks[id];
struct clk_init_data init = {};
int ret;
init.name = k210_clk_cfgs[id].name;
init.flags = flags;
init.parent_data = parent_data;
init.num_parents = num_parents;
if (num_parents > 1)
init.ops = &k210_clk_mux_ops;
else
init.ops = &k210_clk_ops;
kclk->id = id;
kclk->ksc = ksc;
kclk->hw.init = &init;
ret = of_clk_hw_register(np, &kclk->hw);
if (ret) {
pr_err("%pOFP: register clock %s failed\n",
np, k210_clk_cfgs[id].name);
kclk->id = -1;
}
}
/*
* All muxed clocks have IN0 and PLL0 as parents.
*/
static inline void __init k210_register_mux_clk(struct device_node *np,
struct k210_sysclk *ksc, int id)
{
const struct clk_parent_data parent_data[2] = {
{ /* .index = 0 for in0 */ },
{ .hw = &ksc->plls[K210_PLL0].hw }
};
k210_register_clk(np, ksc, id, parent_data, 2, 0);
}
static inline void __init k210_register_in0_child(struct device_node *np,
struct k210_sysclk *ksc, int id)
{
const struct clk_parent_data parent_data = {
/* .index = 0 for in0 */
};
k210_register_clk(np, ksc, id, &parent_data, 1, 0);
}
static inline void __init k210_register_pll_child(struct device_node *np,
struct k210_sysclk *ksc, int id,
enum k210_pll_id pllid,
unsigned long flags)
{
const struct clk_parent_data parent_data = {
.hw = &ksc->plls[pllid].hw,
};
k210_register_clk(np, ksc, id, &parent_data, 1, flags);
}
static inline void __init k210_register_aclk_child(struct device_node *np,
struct k210_sysclk *ksc, int id,
unsigned long flags)
{
const struct clk_parent_data parent_data = {
.hw = &ksc->aclk,
};
k210_register_clk(np, ksc, id, &parent_data, 1, flags);
}
static inline void __init k210_register_clk_child(struct device_node *np,
struct k210_sysclk *ksc, int id,
int parent_id)
{
const struct clk_parent_data parent_data = {
.hw = &ksc->clks[parent_id].hw,
};
k210_register_clk(np, ksc, id, &parent_data, 1, 0);
}
static struct clk_hw *k210_clk_hw_onecell_get(struct of_phandle_args *clkspec,
void *data)
{
struct k210_sysclk *ksc = data;
unsigned int idx = clkspec->args[0];
if (idx >= K210_NUM_CLKS)
return ERR_PTR(-EINVAL);
return &ksc->clks[idx].hw;
}
static void __init k210_clk_init(struct device_node *np)
{
struct device_node *sysctl_np;
struct k210_sysclk *ksc;
int i, ret;
ksc = kzalloc(sizeof(*ksc), GFP_KERNEL);
if (!ksc)
return;
spin_lock_init(&ksc->clk_lock);
sysctl_np = of_get_parent(np);
ksc->regs = of_iomap(sysctl_np, 0);
of_node_put(sysctl_np);
if (!ksc->regs) {
pr_err("%pOFP: failed to map registers\n", np);
return;
}
ret = k210_register_plls(np, ksc);
if (ret)
return;
ret = k210_register_aclk(np, ksc);
if (ret)
return;
/*
* Critical clocks: there are no consumers of the SRAM clocks,
* including the AI clock for the third SRAM bank. The CPU clock
* is only referenced by the uarths serial device and so would be
* disabled if the serial console is disabled to switch to another
* console. Mark all these clocks as critical so that they are never
* disabled by the core clock management.
*/
k210_register_aclk_child(np, ksc, K210_CLK_CPU, CLK_IS_CRITICAL);
k210_register_aclk_child(np, ksc, K210_CLK_SRAM0, CLK_IS_CRITICAL);
k210_register_aclk_child(np, ksc, K210_CLK_SRAM1, CLK_IS_CRITICAL);
k210_register_pll_child(np, ksc, K210_CLK_AI, K210_PLL1,
CLK_IS_CRITICAL);
/* Clocks with aclk as source */
k210_register_aclk_child(np, ksc, K210_CLK_DMA, 0);
k210_register_aclk_child(np, ksc, K210_CLK_FFT, 0);
k210_register_aclk_child(np, ksc, K210_CLK_ROM, 0);
k210_register_aclk_child(np, ksc, K210_CLK_DVP, 0);
k210_register_aclk_child(np, ksc, K210_CLK_APB0, 0);
k210_register_aclk_child(np, ksc, K210_CLK_APB1, 0);
k210_register_aclk_child(np, ksc, K210_CLK_APB2, 0);
/* Clocks with PLL0 as source */
k210_register_pll_child(np, ksc, K210_CLK_SPI0, K210_PLL0, 0);
k210_register_pll_child(np, ksc, K210_CLK_SPI1, K210_PLL0, 0);
k210_register_pll_child(np, ksc, K210_CLK_SPI2, K210_PLL0, 0);
k210_register_pll_child(np, ksc, K210_CLK_I2C0, K210_PLL0, 0);
k210_register_pll_child(np, ksc, K210_CLK_I2C1, K210_PLL0, 0);
k210_register_pll_child(np, ksc, K210_CLK_I2C2, K210_PLL0, 0);
/* Clocks with PLL2 as source */
k210_register_pll_child(np, ksc, K210_CLK_I2S0, K210_PLL2, 0);
k210_register_pll_child(np, ksc, K210_CLK_I2S1, K210_PLL2, 0);
k210_register_pll_child(np, ksc, K210_CLK_I2S2, K210_PLL2, 0);
k210_register_pll_child(np, ksc, K210_CLK_I2S0_M, K210_PLL2, 0);
k210_register_pll_child(np, ksc, K210_CLK_I2S1_M, K210_PLL2, 0);
k210_register_pll_child(np, ksc, K210_CLK_I2S2_M, K210_PLL2, 0);
/* Clocks with IN0 as source */
k210_register_in0_child(np, ksc, K210_CLK_WDT0);
k210_register_in0_child(np, ksc, K210_CLK_WDT1);
k210_register_in0_child(np, ksc, K210_CLK_RTC);
/* Clocks with APB0 as source */
k210_register_clk_child(np, ksc, K210_CLK_GPIO, K210_CLK_APB0);
k210_register_clk_child(np, ksc, K210_CLK_UART1, K210_CLK_APB0);
k210_register_clk_child(np, ksc, K210_CLK_UART2, K210_CLK_APB0);
k210_register_clk_child(np, ksc, K210_CLK_UART3, K210_CLK_APB0);
k210_register_clk_child(np, ksc, K210_CLK_FPIOA, K210_CLK_APB0);
k210_register_clk_child(np, ksc, K210_CLK_SHA, K210_CLK_APB0);
/* Clocks with APB1 as source */
k210_register_clk_child(np, ksc, K210_CLK_AES, K210_CLK_APB1);
k210_register_clk_child(np, ksc, K210_CLK_OTP, K210_CLK_APB1);
/* Mux clocks with in0 or pll0 as source */
k210_register_mux_clk(np, ksc, K210_CLK_SPI3);
k210_register_mux_clk(np, ksc, K210_CLK_TIMER0);
k210_register_mux_clk(np, ksc, K210_CLK_TIMER1);
k210_register_mux_clk(np, ksc, K210_CLK_TIMER2);
/* Check for registration errors */
for (i = 0; i < K210_NUM_CLKS; i++) {
if (ksc->clks[i].id != i)
return;
}
ret = of_clk_add_hw_provider(np, k210_clk_hw_onecell_get, ksc);
if (ret) {
pr_err("%pOFP: add clock provider failed %d\n", np, ret);
return;
}
pr_info("%pOFP: CPU running at %lu MHz\n",
np, clk_hw_get_rate(&ksc->clks[K210_CLK_CPU].hw) / 1000000);
}
CLK_OF_DECLARE(k210_clk, "canaan,k210-clk", k210_clk_init);
/*
* Enable PLL1 to be able to use the AI SRAM.
*/
void __init k210_clk_early_init(void __iomem *regs)
{
struct k210_pll pll1;
/* Make sure ACLK selector is set to PLL0 */
k210_aclk_set_selector(regs, 1);
/* Startup PLL1 to enable the aisram bank for general memory use */
k210_init_pll(regs, K210_PLL1, &pll1);
k210_pll_enable_hw(regs, &pll1);
}
| linux-master | drivers/clk/clk-k210.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2014 Philipp Zabel, Pengutronix
*
* PWM (mis)used as clock output
*/
#include <linux/clk-provider.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pwm.h>
struct clk_pwm {
struct clk_hw hw;
struct pwm_device *pwm;
u32 fixed_rate;
};
static inline struct clk_pwm *to_clk_pwm(struct clk_hw *hw)
{
return container_of(hw, struct clk_pwm, hw);
}
static int clk_pwm_prepare(struct clk_hw *hw)
{
struct clk_pwm *clk_pwm = to_clk_pwm(hw);
return pwm_enable(clk_pwm->pwm);
}
static void clk_pwm_unprepare(struct clk_hw *hw)
{
struct clk_pwm *clk_pwm = to_clk_pwm(hw);
pwm_disable(clk_pwm->pwm);
}
static unsigned long clk_pwm_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct clk_pwm *clk_pwm = to_clk_pwm(hw);
return clk_pwm->fixed_rate;
}
static int clk_pwm_get_duty_cycle(struct clk_hw *hw, struct clk_duty *duty)
{
struct clk_pwm *clk_pwm = to_clk_pwm(hw);
struct pwm_state state;
pwm_get_state(clk_pwm->pwm, &state);
duty->num = state.duty_cycle;
duty->den = state.period;
return 0;
}
static const struct clk_ops clk_pwm_ops = {
.prepare = clk_pwm_prepare,
.unprepare = clk_pwm_unprepare,
.recalc_rate = clk_pwm_recalc_rate,
.get_duty_cycle = clk_pwm_get_duty_cycle,
};
static int clk_pwm_probe(struct platform_device *pdev)
{
struct device_node *node = pdev->dev.of_node;
struct clk_init_data init;
struct clk_pwm *clk_pwm;
struct pwm_device *pwm;
struct pwm_args pargs;
const char *clk_name;
int ret;
clk_pwm = devm_kzalloc(&pdev->dev, sizeof(*clk_pwm), GFP_KERNEL);
if (!clk_pwm)
return -ENOMEM;
pwm = devm_pwm_get(&pdev->dev, NULL);
if (IS_ERR(pwm))
return PTR_ERR(pwm);
pwm_get_args(pwm, &pargs);
if (!pargs.period) {
dev_err(&pdev->dev, "invalid PWM period\n");
return -EINVAL;
}
if (of_property_read_u32(node, "clock-frequency", &clk_pwm->fixed_rate))
clk_pwm->fixed_rate = div64_u64(NSEC_PER_SEC, pargs.period);
if (!clk_pwm->fixed_rate) {
dev_err(&pdev->dev, "fixed_rate cannot be zero\n");
return -EINVAL;
}
if (pargs.period != NSEC_PER_SEC / clk_pwm->fixed_rate &&
pargs.period != DIV_ROUND_UP(NSEC_PER_SEC, clk_pwm->fixed_rate)) {
dev_err(&pdev->dev,
"clock-frequency does not match PWM period\n");
return -EINVAL;
}
/*
* FIXME: pwm_apply_args() should be removed when switching to the
* atomic PWM API.
*/
pwm_apply_args(pwm);
ret = pwm_config(pwm, (pargs.period + 1) >> 1, pargs.period);
if (ret < 0)
return ret;
clk_name = node->name;
of_property_read_string(node, "clock-output-names", &clk_name);
init.name = clk_name;
init.ops = &clk_pwm_ops;
init.flags = 0;
init.num_parents = 0;
clk_pwm->pwm = pwm;
clk_pwm->hw.init = &init;
ret = devm_clk_hw_register(&pdev->dev, &clk_pwm->hw);
if (ret)
return ret;
return of_clk_add_hw_provider(node, of_clk_hw_simple_get, &clk_pwm->hw);
}
static void clk_pwm_remove(struct platform_device *pdev)
{
of_clk_del_provider(pdev->dev.of_node);
}
static const struct of_device_id clk_pwm_dt_ids[] = {
{ .compatible = "pwm-clock" },
{ }
};
MODULE_DEVICE_TABLE(of, clk_pwm_dt_ids);
static struct platform_driver clk_pwm_driver = {
.probe = clk_pwm_probe,
.remove_new = clk_pwm_remove,
.driver = {
.name = "pwm-clock",
.of_match_table = clk_pwm_dt_ids,
},
};
module_platform_driver(clk_pwm_driver);
MODULE_AUTHOR("Philipp Zabel <[email protected]>");
MODULE_DESCRIPTION("PWM clock driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/clk/clk-pwm.c |
// SPDX-License-Identifier: GPL-2.0
/*
* System Control and Power Interface (SCMI) Protocol based clock driver
*
* Copyright (C) 2018-2022 ARM Ltd.
*/
#include <linux/clk-provider.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/of.h>
#include <linux/module.h>
#include <linux/scmi_protocol.h>
#include <asm/div64.h>
static const struct scmi_clk_proto_ops *scmi_proto_clk_ops;
struct scmi_clk {
u32 id;
struct clk_hw hw;
const struct scmi_clock_info *info;
const struct scmi_protocol_handle *ph;
};
#define to_scmi_clk(clk) container_of(clk, struct scmi_clk, hw)
static unsigned long scmi_clk_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
int ret;
u64 rate;
struct scmi_clk *clk = to_scmi_clk(hw);
ret = scmi_proto_clk_ops->rate_get(clk->ph, clk->id, &rate);
if (ret)
return 0;
return rate;
}
static long scmi_clk_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *parent_rate)
{
u64 fmin, fmax, ftmp;
struct scmi_clk *clk = to_scmi_clk(hw);
/*
* We can't figure out what rate it will be, so just return the
* rate back to the caller. scmi_clk_recalc_rate() will be called
* after the rate is set and we'll know what rate the clock is
* running at then.
*/
if (clk->info->rate_discrete)
return rate;
fmin = clk->info->range.min_rate;
fmax = clk->info->range.max_rate;
if (rate <= fmin)
return fmin;
else if (rate >= fmax)
return fmax;
ftmp = rate - fmin;
ftmp += clk->info->range.step_size - 1; /* to round up */
do_div(ftmp, clk->info->range.step_size);
return ftmp * clk->info->range.step_size + fmin;
}
static int scmi_clk_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct scmi_clk *clk = to_scmi_clk(hw);
return scmi_proto_clk_ops->rate_set(clk->ph, clk->id, rate);
}
static int scmi_clk_enable(struct clk_hw *hw)
{
struct scmi_clk *clk = to_scmi_clk(hw);
return scmi_proto_clk_ops->enable(clk->ph, clk->id);
}
static void scmi_clk_disable(struct clk_hw *hw)
{
struct scmi_clk *clk = to_scmi_clk(hw);
scmi_proto_clk_ops->disable(clk->ph, clk->id);
}
static int scmi_clk_atomic_enable(struct clk_hw *hw)
{
struct scmi_clk *clk = to_scmi_clk(hw);
return scmi_proto_clk_ops->enable_atomic(clk->ph, clk->id);
}
static void scmi_clk_atomic_disable(struct clk_hw *hw)
{
struct scmi_clk *clk = to_scmi_clk(hw);
scmi_proto_clk_ops->disable_atomic(clk->ph, clk->id);
}
/*
* We can provide enable/disable atomic callbacks only if the underlying SCMI
* transport for an SCMI instance is configured to handle SCMI commands in an
* atomic manner.
*
* When no SCMI atomic transport support is available we instead provide only
* the prepare/unprepare API, as allowed by the clock framework when atomic
* calls are not available.
*
* Two distinct sets of clk_ops are provided since we could have multiple SCMI
* instances with different underlying transport quality, so they cannot be
* shared.
*/
static const struct clk_ops scmi_clk_ops = {
.recalc_rate = scmi_clk_recalc_rate,
.round_rate = scmi_clk_round_rate,
.set_rate = scmi_clk_set_rate,
.prepare = scmi_clk_enable,
.unprepare = scmi_clk_disable,
};
static const struct clk_ops scmi_atomic_clk_ops = {
.recalc_rate = scmi_clk_recalc_rate,
.round_rate = scmi_clk_round_rate,
.set_rate = scmi_clk_set_rate,
.enable = scmi_clk_atomic_enable,
.disable = scmi_clk_atomic_disable,
};
static int scmi_clk_ops_init(struct device *dev, struct scmi_clk *sclk,
const struct clk_ops *scmi_ops)
{
int ret;
unsigned long min_rate, max_rate;
struct clk_init_data init = {
.flags = CLK_GET_RATE_NOCACHE,
.num_parents = 0,
.ops = scmi_ops,
.name = sclk->info->name,
};
sclk->hw.init = &init;
ret = devm_clk_hw_register(dev, &sclk->hw);
if (ret)
return ret;
if (sclk->info->rate_discrete) {
int num_rates = sclk->info->list.num_rates;
if (num_rates <= 0)
return -EINVAL;
min_rate = sclk->info->list.rates[0];
max_rate = sclk->info->list.rates[num_rates - 1];
} else {
min_rate = sclk->info->range.min_rate;
max_rate = sclk->info->range.max_rate;
}
clk_hw_set_rate_range(&sclk->hw, min_rate, max_rate);
return ret;
}
static int scmi_clocks_probe(struct scmi_device *sdev)
{
int idx, count, err;
unsigned int atomic_threshold;
bool is_atomic;
struct clk_hw **hws;
struct clk_hw_onecell_data *clk_data;
struct device *dev = &sdev->dev;
struct device_node *np = dev->of_node;
const struct scmi_handle *handle = sdev->handle;
struct scmi_protocol_handle *ph;
if (!handle)
return -ENODEV;
scmi_proto_clk_ops =
handle->devm_protocol_get(sdev, SCMI_PROTOCOL_CLOCK, &ph);
if (IS_ERR(scmi_proto_clk_ops))
return PTR_ERR(scmi_proto_clk_ops);
count = scmi_proto_clk_ops->count_get(ph);
if (count < 0) {
dev_err(dev, "%pOFn: invalid clock output count\n", np);
return -EINVAL;
}
clk_data = devm_kzalloc(dev, struct_size(clk_data, hws, count),
GFP_KERNEL);
if (!clk_data)
return -ENOMEM;
clk_data->num = count;
hws = clk_data->hws;
is_atomic = handle->is_transport_atomic(handle, &atomic_threshold);
for (idx = 0; idx < count; idx++) {
struct scmi_clk *sclk;
const struct clk_ops *scmi_ops;
sclk = devm_kzalloc(dev, sizeof(*sclk), GFP_KERNEL);
if (!sclk)
return -ENOMEM;
sclk->info = scmi_proto_clk_ops->info_get(ph, idx);
if (!sclk->info) {
dev_dbg(dev, "invalid clock info for idx %d\n", idx);
continue;
}
sclk->id = idx;
sclk->ph = ph;
/*
* Note that when transport is atomic but SCMI protocol did not
* specify (or support) an enable_latency associated with a
* clock, we default to use atomic operations mode.
*/
if (is_atomic &&
sclk->info->enable_latency <= atomic_threshold)
scmi_ops = &scmi_atomic_clk_ops;
else
scmi_ops = &scmi_clk_ops;
err = scmi_clk_ops_init(dev, sclk, scmi_ops);
if (err) {
dev_err(dev, "failed to register clock %d\n", idx);
devm_kfree(dev, sclk);
hws[idx] = NULL;
} else {
dev_dbg(dev, "Registered clock:%s%s\n",
sclk->info->name,
scmi_ops == &scmi_atomic_clk_ops ?
" (atomic ops)" : "");
hws[idx] = &sclk->hw;
}
}
return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
clk_data);
}
static const struct scmi_device_id scmi_id_table[] = {
{ SCMI_PROTOCOL_CLOCK, "clocks" },
{ },
};
MODULE_DEVICE_TABLE(scmi, scmi_id_table);
static struct scmi_driver scmi_clocks_driver = {
.name = "scmi-clocks",
.probe = scmi_clocks_probe,
.id_table = scmi_id_table,
};
module_scmi_driver(scmi_clocks_driver);
MODULE_AUTHOR("Sudeep Holla <[email protected]>");
MODULE_DESCRIPTION("ARM SCMI clock driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/clk/clk-scmi.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/delay.h>
#include <linux/clk-provider.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <dt-bindings/clock/en7523-clk.h>
#define REG_PCI_CONTROL 0x88
#define REG_PCI_CONTROL_PERSTOUT BIT(29)
#define REG_PCI_CONTROL_PERSTOUT1 BIT(26)
#define REG_PCI_CONTROL_REFCLK_EN1 BIT(22)
#define REG_GSW_CLK_DIV_SEL 0x1b4
#define REG_EMI_CLK_DIV_SEL 0x1b8
#define REG_BUS_CLK_DIV_SEL 0x1bc
#define REG_SPI_CLK_DIV_SEL 0x1c4
#define REG_SPI_CLK_FREQ_SEL 0x1c8
#define REG_NPU_CLK_DIV_SEL 0x1fc
#define REG_CRYPTO_CLKSRC 0x200
#define REG_RESET_CONTROL 0x834
#define REG_RESET_CONTROL_PCIEHB BIT(29)
#define REG_RESET_CONTROL_PCIE1 BIT(27)
#define REG_RESET_CONTROL_PCIE2 BIT(26)
struct en_clk_desc {
int id;
const char *name;
u32 base_reg;
u8 base_bits;
u8 base_shift;
union {
const unsigned int *base_values;
unsigned int base_value;
};
size_t n_base_values;
u16 div_reg;
u8 div_bits;
u8 div_shift;
u16 div_val0;
u8 div_step;
};
struct en_clk_gate {
void __iomem *base;
struct clk_hw hw;
};
static const u32 gsw_base[] = { 400000000, 500000000 };
static const u32 emi_base[] = { 333000000, 400000000 };
static const u32 bus_base[] = { 500000000, 540000000 };
static const u32 slic_base[] = { 100000000, 3125000 };
static const u32 npu_base[] = { 333000000, 400000000, 500000000 };
static const struct en_clk_desc en7523_base_clks[] = {
{
.id = EN7523_CLK_GSW,
.name = "gsw",
.base_reg = REG_GSW_CLK_DIV_SEL,
.base_bits = 1,
.base_shift = 8,
.base_values = gsw_base,
.n_base_values = ARRAY_SIZE(gsw_base),
.div_bits = 3,
.div_shift = 0,
.div_step = 1,
}, {
.id = EN7523_CLK_EMI,
.name = "emi",
.base_reg = REG_EMI_CLK_DIV_SEL,
.base_bits = 1,
.base_shift = 8,
.base_values = emi_base,
.n_base_values = ARRAY_SIZE(emi_base),
.div_bits = 3,
.div_shift = 0,
.div_step = 1,
}, {
.id = EN7523_CLK_BUS,
.name = "bus",
.base_reg = REG_BUS_CLK_DIV_SEL,
.base_bits = 1,
.base_shift = 8,
.base_values = bus_base,
.n_base_values = ARRAY_SIZE(bus_base),
.div_bits = 3,
.div_shift = 0,
.div_step = 1,
}, {
.id = EN7523_CLK_SLIC,
.name = "slic",
.base_reg = REG_SPI_CLK_FREQ_SEL,
.base_bits = 1,
.base_shift = 0,
.base_values = slic_base,
.n_base_values = ARRAY_SIZE(slic_base),
.div_reg = REG_SPI_CLK_DIV_SEL,
.div_bits = 5,
.div_shift = 24,
.div_val0 = 20,
.div_step = 2,
}, {
.id = EN7523_CLK_SPI,
.name = "spi",
.base_reg = REG_SPI_CLK_DIV_SEL,
.base_value = 400000000,
.div_bits = 5,
.div_shift = 8,
.div_val0 = 40,
.div_step = 2,
}, {
.id = EN7523_CLK_NPU,
.name = "npu",
.base_reg = REG_NPU_CLK_DIV_SEL,
.base_bits = 2,
.base_shift = 8,
.base_values = npu_base,
.n_base_values = ARRAY_SIZE(npu_base),
.div_bits = 3,
.div_shift = 0,
.div_step = 1,
}, {
.id = EN7523_CLK_CRYPTO,
.name = "crypto",
.base_reg = REG_CRYPTO_CLKSRC,
.base_bits = 1,
.base_shift = 8,
.base_values = emi_base,
.n_base_values = ARRAY_SIZE(emi_base),
}
};
static const struct of_device_id of_match_clk_en7523[] = {
{ .compatible = "airoha,en7523-scu", },
{ /* sentinel */ }
};
static unsigned int en7523_get_base_rate(void __iomem *base, unsigned int i)
{
const struct en_clk_desc *desc = &en7523_base_clks[i];
u32 val;
if (!desc->base_bits)
return desc->base_value;
val = readl(base + desc->base_reg);
val >>= desc->base_shift;
val &= (1 << desc->base_bits) - 1;
if (val >= desc->n_base_values)
return 0;
return desc->base_values[val];
}
static u32 en7523_get_div(void __iomem *base, int i)
{
const struct en_clk_desc *desc = &en7523_base_clks[i];
u32 reg, val;
if (!desc->div_bits)
return 1;
reg = desc->div_reg ? desc->div_reg : desc->base_reg;
val = readl(base + reg);
val >>= desc->div_shift;
val &= (1 << desc->div_bits) - 1;
if (!val && desc->div_val0)
return desc->div_val0;
return (val + 1) * desc->div_step;
}
static int en7523_pci_is_enabled(struct clk_hw *hw)
{
struct en_clk_gate *cg = container_of(hw, struct en_clk_gate, hw);
return !!(readl(cg->base + REG_PCI_CONTROL) & REG_PCI_CONTROL_REFCLK_EN1);
}
static int en7523_pci_prepare(struct clk_hw *hw)
{
struct en_clk_gate *cg = container_of(hw, struct en_clk_gate, hw);
void __iomem *np_base = cg->base;
u32 val, mask;
/* Need to pull device low before reset */
val = readl(np_base + REG_PCI_CONTROL);
val &= ~(REG_PCI_CONTROL_PERSTOUT1 | REG_PCI_CONTROL_PERSTOUT);
writel(val, np_base + REG_PCI_CONTROL);
usleep_range(1000, 2000);
/* Enable PCIe port 1 */
val |= REG_PCI_CONTROL_REFCLK_EN1;
writel(val, np_base + REG_PCI_CONTROL);
usleep_range(1000, 2000);
/* Reset to default */
val = readl(np_base + REG_RESET_CONTROL);
mask = REG_RESET_CONTROL_PCIE1 | REG_RESET_CONTROL_PCIE2 |
REG_RESET_CONTROL_PCIEHB;
writel(val & ~mask, np_base + REG_RESET_CONTROL);
usleep_range(1000, 2000);
writel(val | mask, np_base + REG_RESET_CONTROL);
msleep(100);
writel(val & ~mask, np_base + REG_RESET_CONTROL);
usleep_range(5000, 10000);
/* Release device */
mask = REG_PCI_CONTROL_PERSTOUT1 | REG_PCI_CONTROL_PERSTOUT;
val = readl(np_base + REG_PCI_CONTROL);
writel(val & ~mask, np_base + REG_PCI_CONTROL);
usleep_range(1000, 2000);
writel(val | mask, np_base + REG_PCI_CONTROL);
msleep(250);
return 0;
}
static void en7523_pci_unprepare(struct clk_hw *hw)
{
struct en_clk_gate *cg = container_of(hw, struct en_clk_gate, hw);
void __iomem *np_base = cg->base;
u32 val;
val = readl(np_base + REG_PCI_CONTROL);
val &= ~REG_PCI_CONTROL_REFCLK_EN1;
writel(val, np_base + REG_PCI_CONTROL);
}
static struct clk_hw *en7523_register_pcie_clk(struct device *dev,
void __iomem *np_base)
{
static const struct clk_ops pcie_gate_ops = {
.is_enabled = en7523_pci_is_enabled,
.prepare = en7523_pci_prepare,
.unprepare = en7523_pci_unprepare,
};
struct clk_init_data init = {
.name = "pcie",
.ops = &pcie_gate_ops,
};
struct en_clk_gate *cg;
cg = devm_kzalloc(dev, sizeof(*cg), GFP_KERNEL);
if (!cg)
return NULL;
cg->base = np_base;
cg->hw.init = &init;
en7523_pci_unprepare(&cg->hw);
if (clk_hw_register(dev, &cg->hw))
return NULL;
return &cg->hw;
}
static void en7523_register_clocks(struct device *dev, struct clk_hw_onecell_data *clk_data,
void __iomem *base, void __iomem *np_base)
{
struct clk_hw *hw;
u32 rate;
int i;
for (i = 0; i < ARRAY_SIZE(en7523_base_clks); i++) {
const struct en_clk_desc *desc = &en7523_base_clks[i];
rate = en7523_get_base_rate(base, i);
rate /= en7523_get_div(base, i);
hw = clk_hw_register_fixed_rate(dev, desc->name, NULL, 0, rate);
if (IS_ERR(hw)) {
pr_err("Failed to register clk %s: %ld\n",
desc->name, PTR_ERR(hw));
continue;
}
clk_data->hws[desc->id] = hw;
}
hw = en7523_register_pcie_clk(dev, np_base);
clk_data->hws[EN7523_CLK_PCIE] = hw;
clk_data->num = EN7523_NUM_CLOCKS;
}
static int en7523_clk_probe(struct platform_device *pdev)
{
struct device_node *node = pdev->dev.of_node;
struct clk_hw_onecell_data *clk_data;
void __iomem *base, *np_base;
int r;
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
np_base = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(np_base))
return PTR_ERR(np_base);
clk_data = devm_kzalloc(&pdev->dev,
struct_size(clk_data, hws, EN7523_NUM_CLOCKS),
GFP_KERNEL);
if (!clk_data)
return -ENOMEM;
en7523_register_clocks(&pdev->dev, clk_data, base, np_base);
r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r)
dev_err(&pdev->dev,
"could not register clock provider: %s: %d\n",
pdev->name, r);
return r;
}
static struct platform_driver clk_en7523_drv = {
.probe = en7523_clk_probe,
.driver = {
.name = "clk-en7523",
.of_match_table = of_match_clk_en7523,
.suppress_bind_attrs = true,
},
};
static int __init clk_en7523_init(void)
{
return platform_driver_register(&clk_en7523_drv);
}
arch_initcall(clk_en7523_init);
| linux-master | drivers/clk/clk-en7523.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Driver for Renesas 9-series PCIe clock generator driver
*
* The following series can be supported:
* - 9FGV/9DBV/9DMV/9FGL/9DML/9QXL/9SQ
* Currently supported:
* - 9FGV0241
* - 9FGV0441
*
* Copyright (C) 2022 Marek Vasut <[email protected]>
*/
#include <linux/clk-provider.h>
#include <linux/i2c.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/regmap.h>
#define RS9_REG_OE 0x0
#define RS9_REG_SS 0x1
#define RS9_REG_SS_AMP_0V6 0x0
#define RS9_REG_SS_AMP_0V7 0x1
#define RS9_REG_SS_AMP_0V8 0x2
#define RS9_REG_SS_AMP_0V9 0x3
#define RS9_REG_SS_AMP_MASK 0x3
#define RS9_REG_SS_SSC_100 0
#define RS9_REG_SS_SSC_M025 (1 << 3)
#define RS9_REG_SS_SSC_M050 (3 << 3)
#define RS9_REG_SS_SSC_MASK (3 << 3)
#define RS9_REG_SS_SSC_LOCK BIT(5)
#define RS9_REG_SR 0x2
#define RS9_REG_REF 0x3
#define RS9_REG_REF_OE BIT(4)
#define RS9_REG_REF_OD BIT(5)
#define RS9_REG_REF_SR_SLOWEST 0
#define RS9_REG_REF_SR_SLOW (1 << 6)
#define RS9_REG_REF_SR_FAST (2 << 6)
#define RS9_REG_REF_SR_FASTER (3 << 6)
#define RS9_REG_VID 0x5
#define RS9_REG_DID 0x6
#define RS9_REG_BCP 0x7
#define RS9_REG_VID_IDT 0x01
#define RS9_REG_DID_TYPE_FGV (0x0 << RS9_REG_DID_TYPE_SHIFT)
#define RS9_REG_DID_TYPE_DBV (0x1 << RS9_REG_DID_TYPE_SHIFT)
#define RS9_REG_DID_TYPE_DMV (0x2 << RS9_REG_DID_TYPE_SHIFT)
#define RS9_REG_DID_TYPE_SHIFT 0x6
/* Supported Renesas 9-series models. */
enum rs9_model {
RENESAS_9FGV0241,
RENESAS_9FGV0441,
};
/* Structure to describe features of a particular 9-series model */
struct rs9_chip_info {
const enum rs9_model model;
unsigned int num_clks;
u8 did;
};
struct rs9_driver_data {
struct i2c_client *client;
struct regmap *regmap;
const struct rs9_chip_info *chip_info;
struct clk_hw *clk_dif[4];
u8 pll_amplitude;
u8 pll_ssc;
u8 clk_dif_sr;
};
/*
* Renesas 9-series i2c regmap
*/
static const struct regmap_range rs9_readable_ranges[] = {
regmap_reg_range(RS9_REG_OE, RS9_REG_REF),
regmap_reg_range(RS9_REG_VID, RS9_REG_BCP),
};
static const struct regmap_access_table rs9_readable_table = {
.yes_ranges = rs9_readable_ranges,
.n_yes_ranges = ARRAY_SIZE(rs9_readable_ranges),
};
static const struct regmap_range rs9_writeable_ranges[] = {
regmap_reg_range(RS9_REG_OE, RS9_REG_REF),
regmap_reg_range(RS9_REG_BCP, RS9_REG_BCP),
};
static const struct regmap_access_table rs9_writeable_table = {
.yes_ranges = rs9_writeable_ranges,
.n_yes_ranges = ARRAY_SIZE(rs9_writeable_ranges),
};
static int rs9_regmap_i2c_write(void *context,
unsigned int reg, unsigned int val)
{
struct i2c_client *i2c = context;
const u8 data[3] = { reg, 1, val };
const int count = ARRAY_SIZE(data);
int ret;
ret = i2c_master_send(i2c, data, count);
if (ret == count)
return 0;
else if (ret < 0)
return ret;
else
return -EIO;
}
static int rs9_regmap_i2c_read(void *context,
unsigned int reg, unsigned int *val)
{
struct i2c_client *i2c = context;
struct i2c_msg xfer[2];
u8 txdata = reg;
u8 rxdata[2];
int ret;
xfer[0].addr = i2c->addr;
xfer[0].flags = 0;
xfer[0].len = 1;
xfer[0].buf = (void *)&txdata;
xfer[1].addr = i2c->addr;
xfer[1].flags = I2C_M_RD;
xfer[1].len = 2;
xfer[1].buf = (void *)rxdata;
ret = i2c_transfer(i2c->adapter, xfer, 2);
if (ret < 0)
return ret;
if (ret != 2)
return -EIO;
/*
* Byte 0 is transfer length, which is always 1 due
* to BCP register programming to 1 in rs9_probe(),
* ignore it and use data from Byte 1.
*/
*val = rxdata[1];
return 0;
}
static const struct regmap_config rs9_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.cache_type = REGCACHE_FLAT,
.max_register = RS9_REG_BCP,
.num_reg_defaults_raw = 0x8,
.rd_table = &rs9_readable_table,
.wr_table = &rs9_writeable_table,
.reg_write = rs9_regmap_i2c_write,
.reg_read = rs9_regmap_i2c_read,
};
static u8 rs9_calc_dif(const struct rs9_driver_data *rs9, int idx)
{
enum rs9_model model = rs9->chip_info->model;
if (model == RENESAS_9FGV0241)
return BIT(idx) + 1;
else if (model == RENESAS_9FGV0441)
return BIT(idx);
return 0;
}
static int rs9_get_output_config(struct rs9_driver_data *rs9, int idx)
{
struct i2c_client *client = rs9->client;
u8 dif = rs9_calc_dif(rs9, idx);
unsigned char name[5] = "DIF0";
struct device_node *np;
int ret;
u32 sr;
/* Set defaults */
rs9->clk_dif_sr |= dif;
snprintf(name, 5, "DIF%d", idx);
np = of_get_child_by_name(client->dev.of_node, name);
if (!np)
return 0;
/* Output clock slew rate */
ret = of_property_read_u32(np, "renesas,slew-rate", &sr);
of_node_put(np);
if (!ret) {
if (sr == 2000000) { /* 2V/ns */
rs9->clk_dif_sr &= ~dif;
} else if (sr == 3000000) { /* 3V/ns (default) */
rs9->clk_dif_sr |= dif;
} else
ret = dev_err_probe(&client->dev, -EINVAL,
"Invalid renesas,slew-rate value\n");
}
return ret;
}
static int rs9_get_common_config(struct rs9_driver_data *rs9)
{
struct i2c_client *client = rs9->client;
struct device_node *np = client->dev.of_node;
unsigned int amp, ssc;
int ret;
/* Set defaults */
rs9->pll_amplitude = RS9_REG_SS_AMP_0V7;
rs9->pll_ssc = RS9_REG_SS_SSC_100;
/* Output clock amplitude */
ret = of_property_read_u32(np, "renesas,out-amplitude-microvolt",
&);
if (!ret) {
if (amp == 600000) /* 0.6V */
rs9->pll_amplitude = RS9_REG_SS_AMP_0V6;
else if (amp == 700000) /* 0.7V (default) */
rs9->pll_amplitude = RS9_REG_SS_AMP_0V7;
else if (amp == 800000) /* 0.8V */
rs9->pll_amplitude = RS9_REG_SS_AMP_0V8;
else if (amp == 900000) /* 0.9V */
rs9->pll_amplitude = RS9_REG_SS_AMP_0V9;
else
return dev_err_probe(&client->dev, -EINVAL,
"Invalid renesas,out-amplitude-microvolt value\n");
}
/* Output clock spread spectrum */
ret = of_property_read_u32(np, "renesas,out-spread-spectrum", &ssc);
if (!ret) {
if (ssc == 100000) /* 100% ... no spread (default) */
rs9->pll_ssc = RS9_REG_SS_SSC_100;
else if (ssc == 99750) /* -0.25% ... down spread */
rs9->pll_ssc = RS9_REG_SS_SSC_M025;
else if (ssc == 99500) /* -0.50% ... down spread */
rs9->pll_ssc = RS9_REG_SS_SSC_M050;
else
return dev_err_probe(&client->dev, -EINVAL,
"Invalid renesas,out-spread-spectrum value\n");
}
return 0;
}
static void rs9_update_config(struct rs9_driver_data *rs9)
{
int i;
/* If amplitude is non-default, update it. */
if (rs9->pll_amplitude != RS9_REG_SS_AMP_0V7) {
regmap_update_bits(rs9->regmap, RS9_REG_SS, RS9_REG_SS_AMP_MASK,
rs9->pll_amplitude);
}
/* If SSC is non-default, update it. */
if (rs9->pll_ssc != RS9_REG_SS_SSC_100) {
regmap_update_bits(rs9->regmap, RS9_REG_SS, RS9_REG_SS_SSC_MASK,
rs9->pll_ssc);
}
for (i = 0; i < rs9->chip_info->num_clks; i++) {
u8 dif = rs9_calc_dif(rs9, i);
if (rs9->clk_dif_sr & dif)
continue;
regmap_update_bits(rs9->regmap, RS9_REG_SR, dif,
rs9->clk_dif_sr & dif);
}
}
static struct clk_hw *
rs9_of_clk_get(struct of_phandle_args *clkspec, void *data)
{
struct rs9_driver_data *rs9 = data;
unsigned int idx = clkspec->args[0];
return rs9->clk_dif[idx];
}
static int rs9_probe(struct i2c_client *client)
{
unsigned char name[5] = "DIF0";
struct rs9_driver_data *rs9;
unsigned int vid, did;
struct clk_hw *hw;
int i, ret;
rs9 = devm_kzalloc(&client->dev, sizeof(*rs9), GFP_KERNEL);
if (!rs9)
return -ENOMEM;
i2c_set_clientdata(client, rs9);
rs9->client = client;
rs9->chip_info = device_get_match_data(&client->dev);
if (!rs9->chip_info)
return -EINVAL;
/* Fetch common configuration from DT (if specified) */
ret = rs9_get_common_config(rs9);
if (ret)
return ret;
/* Fetch DIFx output configuration from DT (if specified) */
for (i = 0; i < rs9->chip_info->num_clks; i++) {
ret = rs9_get_output_config(rs9, i);
if (ret)
return ret;
}
rs9->regmap = devm_regmap_init(&client->dev, NULL,
client, &rs9_regmap_config);
if (IS_ERR(rs9->regmap))
return dev_err_probe(&client->dev, PTR_ERR(rs9->regmap),
"Failed to allocate register map\n");
/* Always read back 1 Byte via I2C */
ret = regmap_write(rs9->regmap, RS9_REG_BCP, 1);
if (ret < 0)
return ret;
ret = regmap_read(rs9->regmap, RS9_REG_VID, &vid);
if (ret < 0)
return ret;
ret = regmap_read(rs9->regmap, RS9_REG_DID, &did);
if (ret < 0)
return ret;
if (vid != RS9_REG_VID_IDT || did != rs9->chip_info->did)
return dev_err_probe(&client->dev, -ENODEV,
"Incorrect VID/DID: %#02x, %#02x. Expected %#02x, %#02x\n",
vid, did, RS9_REG_VID_IDT,
rs9->chip_info->did);
/* Register clock */
for (i = 0; i < rs9->chip_info->num_clks; i++) {
snprintf(name, 5, "DIF%d", i);
hw = devm_clk_hw_register_fixed_factor_index(&client->dev, name,
0, 0, 4, 1);
if (IS_ERR(hw))
return PTR_ERR(hw);
rs9->clk_dif[i] = hw;
}
ret = devm_of_clk_add_hw_provider(&client->dev, rs9_of_clk_get, rs9);
if (!ret)
rs9_update_config(rs9);
return ret;
}
static int __maybe_unused rs9_suspend(struct device *dev)
{
struct rs9_driver_data *rs9 = dev_get_drvdata(dev);
regcache_cache_only(rs9->regmap, true);
regcache_mark_dirty(rs9->regmap);
return 0;
}
static int __maybe_unused rs9_resume(struct device *dev)
{
struct rs9_driver_data *rs9 = dev_get_drvdata(dev);
int ret;
regcache_cache_only(rs9->regmap, false);
ret = regcache_sync(rs9->regmap);
if (ret)
dev_err(dev, "Failed to restore register map: %d\n", ret);
return ret;
}
static const struct rs9_chip_info renesas_9fgv0241_info = {
.model = RENESAS_9FGV0241,
.num_clks = 2,
.did = RS9_REG_DID_TYPE_FGV | 0x02,
};
static const struct rs9_chip_info renesas_9fgv0441_info = {
.model = RENESAS_9FGV0441,
.num_clks = 4,
.did = RS9_REG_DID_TYPE_FGV | 0x04,
};
static const struct i2c_device_id rs9_id[] = {
{ "9fgv0241", .driver_data = (kernel_ulong_t)&renesas_9fgv0241_info },
{ "9fgv0441", .driver_data = (kernel_ulong_t)&renesas_9fgv0441_info },
{ }
};
MODULE_DEVICE_TABLE(i2c, rs9_id);
static const struct of_device_id clk_rs9_of_match[] = {
{ .compatible = "renesas,9fgv0241", .data = &renesas_9fgv0241_info },
{ .compatible = "renesas,9fgv0441", .data = &renesas_9fgv0441_info },
{ }
};
MODULE_DEVICE_TABLE(of, clk_rs9_of_match);
static SIMPLE_DEV_PM_OPS(rs9_pm_ops, rs9_suspend, rs9_resume);
static struct i2c_driver rs9_driver = {
.driver = {
.name = "clk-renesas-pcie-9series",
.pm = &rs9_pm_ops,
.of_match_table = clk_rs9_of_match,
},
.probe = rs9_probe,
.id_table = rs9_id,
};
module_i2c_driver(rs9_driver);
MODULE_AUTHOR("Marek Vasut <[email protected]>");
MODULE_DESCRIPTION("Renesas 9-series PCIe clock generator driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/clk/clk-renesas-pcie.c |
// SPDX-License-Identifier: GPL-2.0+
//
// clk-max77686.c - Clock driver for Maxim 77686/MAX77802
//
// Copyright (C) 2012 Samsung Electornics
// Jonghwa Lee <[email protected]>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/mfd/max77620.h>
#include <linux/mfd/max77686.h>
#include <linux/mfd/max77686-private.h>
#include <linux/clk-provider.h>
#include <linux/mutex.h>
#include <linux/clkdev.h>
#include <linux/of.h>
#include <linux/regmap.h>
#include <dt-bindings/clock/maxim,max77686.h>
#include <dt-bindings/clock/maxim,max77802.h>
#include <dt-bindings/clock/maxim,max77620.h>
#define MAX77802_CLOCK_LOW_JITTER_SHIFT 0x3
enum max77686_chip_name {
CHIP_MAX77686,
CHIP_MAX77802,
CHIP_MAX77620,
};
struct max77686_hw_clk_info {
const char *name;
u32 clk_reg;
u32 clk_enable_mask;
u32 flags;
};
struct max77686_clk_init_data {
struct regmap *regmap;
struct clk_hw hw;
struct clk_init_data clk_idata;
const struct max77686_hw_clk_info *clk_info;
};
struct max77686_clk_driver_data {
enum max77686_chip_name chip;
struct max77686_clk_init_data *max_clk_data;
size_t num_clks;
};
static const struct
max77686_hw_clk_info max77686_hw_clks_info[MAX77686_CLKS_NUM] = {
[MAX77686_CLK_AP] = {
.name = "32khz_ap",
.clk_reg = MAX77686_REG_32KHZ,
.clk_enable_mask = BIT(MAX77686_CLK_AP),
},
[MAX77686_CLK_CP] = {
.name = "32khz_cp",
.clk_reg = MAX77686_REG_32KHZ,
.clk_enable_mask = BIT(MAX77686_CLK_CP),
},
[MAX77686_CLK_PMIC] = {
.name = "32khz_pmic",
.clk_reg = MAX77686_REG_32KHZ,
.clk_enable_mask = BIT(MAX77686_CLK_PMIC),
},
};
static const struct
max77686_hw_clk_info max77802_hw_clks_info[MAX77802_CLKS_NUM] = {
[MAX77802_CLK_32K_AP] = {
.name = "32khz_ap",
.clk_reg = MAX77802_REG_32KHZ,
.clk_enable_mask = BIT(MAX77802_CLK_32K_AP),
},
[MAX77802_CLK_32K_CP] = {
.name = "32khz_cp",
.clk_reg = MAX77802_REG_32KHZ,
.clk_enable_mask = BIT(MAX77802_CLK_32K_CP),
},
};
static const struct
max77686_hw_clk_info max77620_hw_clks_info[MAX77620_CLKS_NUM] = {
[MAX77620_CLK_32K_OUT0] = {
.name = "32khz_out0",
.clk_reg = MAX77620_REG_CNFG1_32K,
.clk_enable_mask = MAX77620_CNFG1_32K_OUT0_EN,
},
};
static struct max77686_clk_init_data *to_max77686_clk_init_data(
struct clk_hw *hw)
{
return container_of(hw, struct max77686_clk_init_data, hw);
}
static int max77686_clk_prepare(struct clk_hw *hw)
{
struct max77686_clk_init_data *max77686 = to_max77686_clk_init_data(hw);
return regmap_update_bits(max77686->regmap, max77686->clk_info->clk_reg,
max77686->clk_info->clk_enable_mask,
max77686->clk_info->clk_enable_mask);
}
static void max77686_clk_unprepare(struct clk_hw *hw)
{
struct max77686_clk_init_data *max77686 = to_max77686_clk_init_data(hw);
regmap_update_bits(max77686->regmap, max77686->clk_info->clk_reg,
max77686->clk_info->clk_enable_mask,
~max77686->clk_info->clk_enable_mask);
}
static int max77686_clk_is_prepared(struct clk_hw *hw)
{
struct max77686_clk_init_data *max77686 = to_max77686_clk_init_data(hw);
int ret;
u32 val;
ret = regmap_read(max77686->regmap, max77686->clk_info->clk_reg, &val);
if (ret < 0)
return -EINVAL;
return val & max77686->clk_info->clk_enable_mask;
}
static unsigned long max77686_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
return 32768;
}
static const struct clk_ops max77686_clk_ops = {
.prepare = max77686_clk_prepare,
.unprepare = max77686_clk_unprepare,
.is_prepared = max77686_clk_is_prepared,
.recalc_rate = max77686_recalc_rate,
};
static struct clk_hw *
of_clk_max77686_get(struct of_phandle_args *clkspec, void *data)
{
struct max77686_clk_driver_data *drv_data = data;
unsigned int idx = clkspec->args[0];
if (idx >= drv_data->num_clks) {
pr_err("%s: invalid index %u\n", __func__, idx);
return ERR_PTR(-EINVAL);
}
return &drv_data->max_clk_data[idx].hw;
}
static int max77686_clk_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device *parent = dev->parent;
const struct platform_device_id *id = platform_get_device_id(pdev);
struct max77686_clk_driver_data *drv_data;
const struct max77686_hw_clk_info *hw_clks;
struct regmap *regmap;
int i, ret, num_clks;
drv_data = devm_kzalloc(dev, sizeof(*drv_data), GFP_KERNEL);
if (!drv_data)
return -ENOMEM;
regmap = dev_get_regmap(parent, NULL);
if (!regmap) {
dev_err(dev, "Failed to get rtc regmap\n");
return -ENODEV;
}
drv_data->chip = id->driver_data;
switch (drv_data->chip) {
case CHIP_MAX77686:
num_clks = MAX77686_CLKS_NUM;
hw_clks = max77686_hw_clks_info;
break;
case CHIP_MAX77802:
num_clks = MAX77802_CLKS_NUM;
hw_clks = max77802_hw_clks_info;
break;
case CHIP_MAX77620:
num_clks = MAX77620_CLKS_NUM;
hw_clks = max77620_hw_clks_info;
break;
default:
dev_err(dev, "Unknown Chip ID\n");
return -EINVAL;
}
drv_data->num_clks = num_clks;
drv_data->max_clk_data = devm_kcalloc(dev, num_clks,
sizeof(*drv_data->max_clk_data),
GFP_KERNEL);
if (!drv_data->max_clk_data)
return -ENOMEM;
for (i = 0; i < num_clks; i++) {
struct max77686_clk_init_data *max_clk_data;
const char *clk_name;
max_clk_data = &drv_data->max_clk_data[i];
max_clk_data->regmap = regmap;
max_clk_data->clk_info = &hw_clks[i];
max_clk_data->clk_idata.flags = hw_clks[i].flags;
max_clk_data->clk_idata.ops = &max77686_clk_ops;
if (parent->of_node &&
!of_property_read_string_index(parent->of_node,
"clock-output-names",
i, &clk_name))
max_clk_data->clk_idata.name = clk_name;
else
max_clk_data->clk_idata.name = hw_clks[i].name;
max_clk_data->hw.init = &max_clk_data->clk_idata;
ret = devm_clk_hw_register(dev, &max_clk_data->hw);
if (ret) {
dev_err(dev, "Failed to clock register: %d\n", ret);
return ret;
}
ret = devm_clk_hw_register_clkdev(dev, &max_clk_data->hw,
max_clk_data->clk_idata.name,
NULL);
if (ret < 0) {
dev_err(dev, "Failed to clkdev register: %d\n", ret);
return ret;
}
}
if (parent->of_node) {
ret = devm_of_clk_add_hw_provider(dev, of_clk_max77686_get,
drv_data);
if (ret < 0) {
dev_err(dev, "Failed to register OF clock provider: %d\n",
ret);
return ret;
}
}
/* MAX77802: Enable low-jitter mode on the 32khz clocks. */
if (drv_data->chip == CHIP_MAX77802) {
ret = regmap_update_bits(regmap, MAX77802_REG_32KHZ,
1 << MAX77802_CLOCK_LOW_JITTER_SHIFT,
1 << MAX77802_CLOCK_LOW_JITTER_SHIFT);
if (ret < 0) {
dev_err(dev, "Failed to config low-jitter: %d\n", ret);
return ret;
}
}
return 0;
}
static const struct platform_device_id max77686_clk_id[] = {
{ "max77686-clk", .driver_data = CHIP_MAX77686, },
{ "max77802-clk", .driver_data = CHIP_MAX77802, },
{ "max77620-clock", .driver_data = CHIP_MAX77620, },
{},
};
MODULE_DEVICE_TABLE(platform, max77686_clk_id);
static struct platform_driver max77686_clk_driver = {
.driver = {
.name = "max77686-clk",
},
.probe = max77686_clk_probe,
.id_table = max77686_clk_id,
};
module_platform_driver(max77686_clk_driver);
MODULE_DESCRIPTION("MAXIM 77686 Clock Driver");
MODULE_AUTHOR("Jonghwa Lee <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/clk/clk-max77686.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Common clock framework driver for the Versaclock7 family of timing devices.
*
* Copyright (c) 2022 Renesas Electronics Corporation
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/i2c.h>
#include <linux/math64.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/swab.h>
/*
* 16-bit register address: the lower 8 bits of the register address come
* from the offset addr byte and the upper 8 bits come from the page register.
*/
#define VC7_PAGE_ADDR 0xFD
#define VC7_PAGE_WINDOW 256
#define VC7_MAX_REG 0x364
/* Maximum number of banks supported by VC7 */
#define VC7_NUM_BANKS 7
/* Maximum number of FODs supported by VC7 */
#define VC7_NUM_FOD 3
/* Maximum number of IODs supported by VC7 */
#define VC7_NUM_IOD 4
/* Maximum number of outputs supported by VC7 */
#define VC7_NUM_OUT 12
/* VCO valid range is 9.5 GHz to 10.7 GHz */
#define VC7_APLL_VCO_MIN 9500000000UL
#define VC7_APLL_VCO_MAX 10700000000UL
/* APLL denominator is fixed at 2^27 */
#define VC7_APLL_DENOMINATOR_BITS 27
/* FOD 1st stage denominator is fixed 2^34 */
#define VC7_FOD_DENOMINATOR_BITS 34
/* IOD can operate between 1kHz and 650MHz */
#define VC7_IOD_RATE_MIN 1000UL
#define VC7_IOD_RATE_MAX 650000000UL
#define VC7_IOD_MIN_DIVISOR 14
#define VC7_IOD_MAX_DIVISOR 0x1ffffff /* 25-bit */
#define VC7_FOD_RATE_MIN 1000UL
#define VC7_FOD_RATE_MAX 650000000UL
#define VC7_FOD_1ST_STAGE_RATE_MIN 33000000UL /* 33 MHz */
#define VC7_FOD_1ST_STAGE_RATE_MAX 650000000UL /* 650 MHz */
#define VC7_FOD_1ST_INT_MAX 324
#define VC7_FOD_2ND_INT_MIN 2
#define VC7_FOD_2ND_INT_MAX 0x1ffff /* 17-bit */
/* VC7 Registers */
#define VC7_REG_XO_CNFG 0x2C
#define VC7_REG_XO_CNFG_COUNT 4
#define VC7_REG_XO_IB_H_DIV_SHIFT 24
#define VC7_REG_XO_IB_H_DIV_MASK GENMASK(28, VC7_REG_XO_IB_H_DIV_SHIFT)
#define VC7_REG_APLL_FB_DIV_FRAC 0x120
#define VC7_REG_APLL_FB_DIV_FRAC_COUNT 4
#define VC7_REG_APLL_FB_DIV_FRAC_MASK GENMASK(26, 0)
#define VC7_REG_APLL_FB_DIV_INT 0x124
#define VC7_REG_APLL_FB_DIV_INT_COUNT 2
#define VC7_REG_APLL_FB_DIV_INT_MASK GENMASK(9, 0)
#define VC7_REG_APLL_CNFG 0x127
#define VC7_REG_APLL_EN_DOUBLER BIT(0)
#define VC7_REG_OUT_BANK_CNFG(idx) (0x280 + (0x4 * (idx)))
#define VC7_REG_OUTPUT_BANK_SRC_MASK GENMASK(2, 0)
#define VC7_REG_FOD_INT_CNFG(idx) (0x1E0 + (0x10 * (idx)))
#define VC7_REG_FOD_INT_CNFG_COUNT 8
#define VC7_REG_FOD_1ST_INT_MASK GENMASK(8, 0)
#define VC7_REG_FOD_2ND_INT_SHIFT 9
#define VC7_REG_FOD_2ND_INT_MASK GENMASK(25, VC7_REG_FOD_2ND_INT_SHIFT)
#define VC7_REG_FOD_FRAC_SHIFT 26
#define VC7_REG_FOD_FRAC_MASK GENMASK_ULL(59, VC7_REG_FOD_FRAC_SHIFT)
#define VC7_REG_IOD_INT_CNFG(idx) (0x1C0 + (0x8 * (idx)))
#define VC7_REG_IOD_INT_CNFG_COUNT 4
#define VC7_REG_IOD_INT_MASK GENMASK(24, 0)
#define VC7_REG_ODRV_EN(idx) (0x240 + (0x4 * (idx)))
#define VC7_REG_OUT_DIS BIT(0)
struct vc7_driver_data;
static const struct regmap_config vc7_regmap_config;
/* Supported Renesas VC7 models */
enum vc7_model {
VC7_RC21008A,
};
struct vc7_chip_info {
const enum vc7_model model;
const unsigned int banks[VC7_NUM_BANKS];
const unsigned int num_banks;
const unsigned int outputs[VC7_NUM_OUT];
const unsigned int num_outputs;
};
/*
* Changing the APLL frequency is currently not supported.
* The APLL will consist of an opaque block between the XO and FOD/IODs and
* its frequency will be computed based on the current state of the device.
*/
struct vc7_apll_data {
struct clk *clk;
struct vc7_driver_data *vc7;
u8 xo_ib_h_div;
u8 en_doubler;
u16 apll_fb_div_int;
u32 apll_fb_div_frac;
};
struct vc7_fod_data {
struct clk_hw hw;
struct vc7_driver_data *vc7;
unsigned int num;
u32 fod_1st_int;
u32 fod_2nd_int;
u64 fod_frac;
};
struct vc7_iod_data {
struct clk_hw hw;
struct vc7_driver_data *vc7;
unsigned int num;
u32 iod_int;
};
struct vc7_out_data {
struct clk_hw hw;
struct vc7_driver_data *vc7;
unsigned int num;
unsigned int out_dis;
};
struct vc7_driver_data {
struct i2c_client *client;
struct regmap *regmap;
const struct vc7_chip_info *chip_info;
struct clk *pin_xin;
struct vc7_apll_data clk_apll;
struct vc7_fod_data clk_fod[VC7_NUM_FOD];
struct vc7_iod_data clk_iod[VC7_NUM_IOD];
struct vc7_out_data clk_out[VC7_NUM_OUT];
};
struct vc7_bank_src_map {
enum vc7_bank_src_type {
VC7_FOD,
VC7_IOD,
} type;
union _divider {
struct vc7_iod_data *iod;
struct vc7_fod_data *fod;
} src;
};
static struct clk_hw *vc7_of_clk_get(struct of_phandle_args *clkspec,
void *data)
{
struct vc7_driver_data *vc7 = data;
unsigned int idx = clkspec->args[0];
if (idx >= vc7->chip_info->num_outputs)
return ERR_PTR(-EINVAL);
return &vc7->clk_out[idx].hw;
}
static const unsigned int RC21008A_index_to_output_mapping[] = {
1, 2, 3, 6, 7, 8, 10, 11
};
static int vc7_map_index_to_output(const enum vc7_model model, const unsigned int i)
{
switch (model) {
case VC7_RC21008A:
return RC21008A_index_to_output_mapping[i];
default:
return i;
}
}
/* bank to output mapping, same across all variants */
static const unsigned int output_bank_mapping[] = {
0, /* Output 0 */
1, /* Output 1 */
2, /* Output 2 */
2, /* Output 3 */
3, /* Output 4 */
3, /* Output 5 */
3, /* Output 6 */
3, /* Output 7 */
4, /* Output 8 */
4, /* Output 9 */
5, /* Output 10 */
6 /* Output 11 */
};
/**
* vc7_64_mul_64_to_128() - Multiply two u64 and return an unsigned 128-bit integer
* as an upper and lower part.
*
* @left: The left argument.
* @right: The right argument.
* @hi: The upper 64-bits of the 128-bit product.
* @lo: The lower 64-bits of the 128-bit product.
*
* From mul_64_64 in crypto/ecc.c:350 in the linux kernel, accessed in v5.17.2.
*/
static void vc7_64_mul_64_to_128(u64 left, u64 right, u64 *hi, u64 *lo)
{
u64 a0 = left & 0xffffffffull;
u64 a1 = left >> 32;
u64 b0 = right & 0xffffffffull;
u64 b1 = right >> 32;
u64 m0 = a0 * b0;
u64 m1 = a0 * b1;
u64 m2 = a1 * b0;
u64 m3 = a1 * b1;
m2 += (m0 >> 32);
m2 += m1;
/* Overflow */
if (m2 < m1)
m3 += 0x100000000ull;
*lo = (m0 & 0xffffffffull) | (m2 << 32);
*hi = m3 + (m2 >> 32);
}
/**
* vc7_128_div_64_to_64() - Divides a 128-bit uint by a 64-bit divisor, return a 64-bit quotient.
*
* @numhi: The uppper 64-bits of the dividend.
* @numlo: The lower 64-bits of the dividend.
* @den: The denominator (divisor).
* @r: The remainder, pass NULL if the remainder is not needed.
*
* Originally from libdivide, modified to use kernel u64/u32 types.
*
* See https://github.com/ridiculousfish/libdivide/blob/master/libdivide.h#L471.
*
* Return: The 64-bit quotient of the division.
*
* In case of overflow of division by zero, max(u64) is returned.
*/
static u64 vc7_128_div_64_to_64(u64 numhi, u64 numlo, u64 den, u64 *r)
{
/*
* We work in base 2**32.
* A uint32 holds a single digit. A uint64 holds two digits.
* Our numerator is conceptually [num3, num2, num1, num0].
* Our denominator is [den1, den0].
*/
const u64 b = ((u64)1 << 32);
/* The high and low digits of our computed quotient. */
u32 q1, q0;
/* The normalization shift factor */
int shift;
/*
* The high and low digits of our denominator (after normalizing).
* Also the low 2 digits of our numerator (after normalizing).
*/
u32 den1, den0, num1, num0;
/* A partial remainder; */
u64 rem;
/*
* The estimated quotient, and its corresponding remainder (unrelated
* to true remainder).
*/
u64 qhat, rhat;
/* Variables used to correct the estimated quotient. */
u64 c1, c2;
/* Check for overflow and divide by 0. */
if (numhi >= den) {
if (r)
*r = ~0ull;
return ~0ull;
}
/*
* Determine the normalization factor. We multiply den by this, so that
* its leading digit is at least half b. In binary this means just
* shifting left by the number of leading zeros, so that there's a 1 in
* the MSB.
*
* We also shift numer by the same amount. This cannot overflow because
* numhi < den. The expression (-shift & 63) is the same as (64 -
* shift), except it avoids the UB of shifting by 64. The funny bitwise
* 'and' ensures that numlo does not get shifted into numhi if shift is
* 0. clang 11 has an x86 codegen bug here: see LLVM bug 50118. The
* sequence below avoids it.
*/
shift = __builtin_clzll(den);
den <<= shift;
numhi <<= shift;
numhi |= (numlo >> (-shift & 63)) & (-(s64)shift >> 63);
numlo <<= shift;
/*
* Extract the low digits of the numerator and both digits of the
* denominator.
*/
num1 = (u32)(numlo >> 32);
num0 = (u32)(numlo & 0xFFFFFFFFu);
den1 = (u32)(den >> 32);
den0 = (u32)(den & 0xFFFFFFFFu);
/*
* We wish to compute q1 = [n3 n2 n1] / [d1 d0].
* Estimate q1 as [n3 n2] / [d1], and then correct it.
* Note while qhat may be 2 digits, q1 is always 1 digit.
*/
qhat = div64_u64_rem(numhi, den1, &rhat);
c1 = qhat * den0;
c2 = rhat * b + num1;
if (c1 > c2)
qhat -= (c1 - c2 > den) ? 2 : 1;
q1 = (u32)qhat;
/* Compute the true (partial) remainder. */
rem = numhi * b + num1 - q1 * den;
/*
* We wish to compute q0 = [rem1 rem0 n0] / [d1 d0].
* Estimate q0 as [rem1 rem0] / [d1] and correct it.
*/
qhat = div64_u64_rem(rem, den1, &rhat);
c1 = qhat * den0;
c2 = rhat * b + num0;
if (c1 > c2)
qhat -= (c1 - c2 > den) ? 2 : 1;
q0 = (u32)qhat;
/* Return remainder if requested. */
if (r)
*r = (rem * b + num0 - q0 * den) >> shift;
return ((u64)q1 << 32) | q0;
}
static int vc7_get_bank_clk(struct vc7_driver_data *vc7,
unsigned int bank_idx,
unsigned int output_bank_src,
struct vc7_bank_src_map *map)
{
/* Mapping from Table 38 in datasheet */
if (bank_idx == 0 || bank_idx == 1) {
switch (output_bank_src) {
case 0:
map->type = VC7_IOD,
map->src.iod = &vc7->clk_iod[0];
return 0;
case 1:
map->type = VC7_IOD,
map->src.iod = &vc7->clk_iod[1];
return 0;
case 4:
map->type = VC7_FOD,
map->src.fod = &vc7->clk_fod[0];
return 0;
case 5:
map->type = VC7_FOD,
map->src.fod = &vc7->clk_fod[1];
return 0;
default:
break;
}
} else if (bank_idx == 2) {
switch (output_bank_src) {
case 1:
map->type = VC7_IOD,
map->src.iod = &vc7->clk_iod[1];
return 0;
case 4:
map->type = VC7_FOD,
map->src.fod = &vc7->clk_fod[0];
return 0;
case 5:
map->type = VC7_FOD,
map->src.fod = &vc7->clk_fod[1];
return 0;
default:
break;
}
} else if (bank_idx == 3) {
switch (output_bank_src) {
case 4:
map->type = VC7_FOD,
map->src.fod = &vc7->clk_fod[0];
return 0;
case 5:
map->type = VC7_FOD,
map->src.fod = &vc7->clk_fod[1];
return 0;
case 6:
map->type = VC7_FOD,
map->src.fod = &vc7->clk_fod[2];
return 0;
default:
break;
}
} else if (bank_idx == 4) {
switch (output_bank_src) {
case 0:
/* CLKIN1 not supported in this driver */
break;
case 2:
map->type = VC7_IOD,
map->src.iod = &vc7->clk_iod[2];
return 0;
case 5:
map->type = VC7_FOD,
map->src.fod = &vc7->clk_fod[1];
return 0;
case 6:
map->type = VC7_FOD,
map->src.fod = &vc7->clk_fod[2];
return 0;
case 7:
/* CLKIN0 not supported in this driver */
break;
default:
break;
}
} else if (bank_idx == 5) {
switch (output_bank_src) {
case 0:
/* CLKIN1 not supported in this driver */
break;
case 1:
/* XIN_REFIN not supported in this driver */
break;
case 2:
map->type = VC7_IOD,
map->src.iod = &vc7->clk_iod[2];
return 0;
case 3:
map->type = VC7_IOD,
map->src.iod = &vc7->clk_iod[3];
return 0;
case 5:
map->type = VC7_FOD,
map->src.fod = &vc7->clk_fod[1];
return 0;
case 6:
map->type = VC7_FOD,
map->src.fod = &vc7->clk_fod[2];
return 0;
case 7:
/* CLKIN0 not supported in this driver */
break;
default:
break;
}
} else if (bank_idx == 6) {
switch (output_bank_src) {
case 0:
/* CLKIN1 not supported in this driver */
break;
case 2:
map->type = VC7_IOD,
map->src.iod = &vc7->clk_iod[2];
return 0;
case 3:
map->type = VC7_IOD,
map->src.iod = &vc7->clk_iod[3];
return 0;
case 5:
map->type = VC7_FOD,
map->src.fod = &vc7->clk_fod[1];
return 0;
case 6:
map->type = VC7_FOD,
map->src.fod = &vc7->clk_fod[2];
return 0;
case 7:
/* CLKIN0 not supported in this driver */
break;
default:
break;
}
}
pr_warn("bank_src%d = %d is not supported\n", bank_idx, output_bank_src);
return -1;
}
static int vc7_read_apll(struct vc7_driver_data *vc7)
{
int err;
u32 val32;
u16 val16;
err = regmap_bulk_read(vc7->regmap,
VC7_REG_XO_CNFG,
(u32 *)&val32,
VC7_REG_XO_CNFG_COUNT);
if (err) {
dev_err(&vc7->client->dev, "failed to read XO_CNFG\n");
return err;
}
vc7->clk_apll.xo_ib_h_div = (val32 & VC7_REG_XO_IB_H_DIV_MASK)
>> VC7_REG_XO_IB_H_DIV_SHIFT;
err = regmap_read(vc7->regmap,
VC7_REG_APLL_CNFG,
&val32);
if (err) {
dev_err(&vc7->client->dev, "failed to read APLL_CNFG\n");
return err;
}
vc7->clk_apll.en_doubler = val32 & VC7_REG_APLL_EN_DOUBLER;
err = regmap_bulk_read(vc7->regmap,
VC7_REG_APLL_FB_DIV_FRAC,
(u32 *)&val32,
VC7_REG_APLL_FB_DIV_FRAC_COUNT);
if (err) {
dev_err(&vc7->client->dev, "failed to read APLL_FB_DIV_FRAC\n");
return err;
}
vc7->clk_apll.apll_fb_div_frac = val32 & VC7_REG_APLL_FB_DIV_FRAC_MASK;
err = regmap_bulk_read(vc7->regmap,
VC7_REG_APLL_FB_DIV_INT,
(u16 *)&val16,
VC7_REG_APLL_FB_DIV_INT_COUNT);
if (err) {
dev_err(&vc7->client->dev, "failed to read APLL_FB_DIV_INT\n");
return err;
}
vc7->clk_apll.apll_fb_div_int = val16 & VC7_REG_APLL_FB_DIV_INT_MASK;
return 0;
}
static int vc7_read_fod(struct vc7_driver_data *vc7, unsigned int idx)
{
int err;
u64 val;
err = regmap_bulk_read(vc7->regmap,
VC7_REG_FOD_INT_CNFG(idx),
(u64 *)&val,
VC7_REG_FOD_INT_CNFG_COUNT);
if (err) {
dev_err(&vc7->client->dev, "failed to read FOD%d\n", idx);
return err;
}
vc7->clk_fod[idx].fod_1st_int = (val & VC7_REG_FOD_1ST_INT_MASK);
vc7->clk_fod[idx].fod_2nd_int =
(val & VC7_REG_FOD_2ND_INT_MASK) >> VC7_REG_FOD_2ND_INT_SHIFT;
vc7->clk_fod[idx].fod_frac = (val & VC7_REG_FOD_FRAC_MASK)
>> VC7_REG_FOD_FRAC_SHIFT;
return 0;
}
static int vc7_write_fod(struct vc7_driver_data *vc7, unsigned int idx)
{
int err;
u64 val;
/*
* FOD dividers are part of an atomic group where fod_1st_int,
* fod_2nd_int, and fod_frac must be written together. The new divider
* is applied when the MSB of fod_frac is written.
*/
err = regmap_bulk_read(vc7->regmap,
VC7_REG_FOD_INT_CNFG(idx),
(u64 *)&val,
VC7_REG_FOD_INT_CNFG_COUNT);
if (err) {
dev_err(&vc7->client->dev, "failed to read FOD%d\n", idx);
return err;
}
val = u64_replace_bits(val,
vc7->clk_fod[idx].fod_1st_int,
VC7_REG_FOD_1ST_INT_MASK);
val = u64_replace_bits(val,
vc7->clk_fod[idx].fod_2nd_int,
VC7_REG_FOD_2ND_INT_MASK);
val = u64_replace_bits(val,
vc7->clk_fod[idx].fod_frac,
VC7_REG_FOD_FRAC_MASK);
err = regmap_bulk_write(vc7->regmap,
VC7_REG_FOD_INT_CNFG(idx),
(u64 *)&val,
sizeof(u64));
if (err) {
dev_err(&vc7->client->dev, "failed to write FOD%d\n", idx);
return err;
}
return 0;
}
static int vc7_read_iod(struct vc7_driver_data *vc7, unsigned int idx)
{
int err;
u32 val;
err = regmap_bulk_read(vc7->regmap,
VC7_REG_IOD_INT_CNFG(idx),
(u32 *)&val,
VC7_REG_IOD_INT_CNFG_COUNT);
if (err) {
dev_err(&vc7->client->dev, "failed to read IOD%d\n", idx);
return err;
}
vc7->clk_iod[idx].iod_int = (val & VC7_REG_IOD_INT_MASK);
return 0;
}
static int vc7_write_iod(struct vc7_driver_data *vc7, unsigned int idx)
{
int err;
u32 val;
/*
* IOD divider field is atomic and all bits must be written.
* The new divider is applied when the MSB of iod_int is written.
*/
err = regmap_bulk_read(vc7->regmap,
VC7_REG_IOD_INT_CNFG(idx),
(u32 *)&val,
VC7_REG_IOD_INT_CNFG_COUNT);
if (err) {
dev_err(&vc7->client->dev, "failed to read IOD%d\n", idx);
return err;
}
val = u32_replace_bits(val,
vc7->clk_iod[idx].iod_int,
VC7_REG_IOD_INT_MASK);
err = regmap_bulk_write(vc7->regmap,
VC7_REG_IOD_INT_CNFG(idx),
(u32 *)&val,
sizeof(u32));
if (err) {
dev_err(&vc7->client->dev, "failed to write IOD%d\n", idx);
return err;
}
return 0;
}
static int vc7_read_output(struct vc7_driver_data *vc7, unsigned int idx)
{
int err;
unsigned int val, out_num;
out_num = vc7_map_index_to_output(vc7->chip_info->model, idx);
err = regmap_read(vc7->regmap,
VC7_REG_ODRV_EN(out_num),
&val);
if (err) {
dev_err(&vc7->client->dev, "failed to read ODRV_EN[%d]\n", idx);
return err;
}
vc7->clk_out[idx].out_dis = val & VC7_REG_OUT_DIS;
return 0;
}
static int vc7_write_output(struct vc7_driver_data *vc7, unsigned int idx)
{
int err;
unsigned int out_num;
out_num = vc7_map_index_to_output(vc7->chip_info->model, idx);
err = regmap_write_bits(vc7->regmap,
VC7_REG_ODRV_EN(out_num),
VC7_REG_OUT_DIS,
vc7->clk_out[idx].out_dis);
if (err) {
dev_err(&vc7->client->dev, "failed to write ODRV_EN[%d]\n", idx);
return err;
}
return 0;
}
static unsigned long vc7_get_apll_rate(struct vc7_driver_data *vc7)
{
int err;
unsigned long xtal_rate;
u64 refin_div, apll_rate;
xtal_rate = clk_get_rate(vc7->pin_xin);
err = vc7_read_apll(vc7);
if (err) {
dev_err(&vc7->client->dev, "unable to read apll\n");
return err;
}
/* 0 is bypassed, 1 is reserved */
if (vc7->clk_apll.xo_ib_h_div < 2)
refin_div = xtal_rate;
else
refin_div = div64_u64(xtal_rate, vc7->clk_apll.xo_ib_h_div);
if (vc7->clk_apll.en_doubler)
refin_div *= 2;
/* divider = int + (frac / 2^27) */
apll_rate = (refin_div * vc7->clk_apll.apll_fb_div_int) +
((refin_div * vc7->clk_apll.apll_fb_div_frac) >> VC7_APLL_DENOMINATOR_BITS);
pr_debug("%s - xo_ib_h_div: %u, apll_fb_div_int: %u, apll_fb_div_frac: %u\n",
__func__, vc7->clk_apll.xo_ib_h_div, vc7->clk_apll.apll_fb_div_int,
vc7->clk_apll.apll_fb_div_frac);
pr_debug("%s - refin_div: %llu, apll rate: %llu\n",
__func__, refin_div, apll_rate);
return apll_rate;
}
static void vc7_calc_iod_divider(unsigned long rate, unsigned long parent_rate,
u32 *divider)
{
*divider = DIV_ROUND_UP(parent_rate, rate);
if (*divider < VC7_IOD_MIN_DIVISOR)
*divider = VC7_IOD_MIN_DIVISOR;
if (*divider > VC7_IOD_MAX_DIVISOR)
*divider = VC7_IOD_MAX_DIVISOR;
}
static void vc7_calc_fod_1st_stage(unsigned long rate, unsigned long parent_rate,
u32 *div_int, u64 *div_frac)
{
u64 rem;
*div_int = (u32)div64_u64_rem(parent_rate, rate, &rem);
*div_frac = div64_u64(rem << VC7_FOD_DENOMINATOR_BITS, rate);
}
static unsigned long vc7_calc_fod_1st_stage_rate(unsigned long parent_rate,
u32 fod_1st_int, u64 fod_frac)
{
u64 numer, denom, hi, lo, divisor;
numer = fod_frac;
denom = BIT_ULL(VC7_FOD_DENOMINATOR_BITS);
if (fod_frac) {
vc7_64_mul_64_to_128(parent_rate, denom, &hi, &lo);
divisor = ((u64)fod_1st_int * denom) + numer;
return vc7_128_div_64_to_64(hi, lo, divisor, NULL);
}
return div64_u64(parent_rate, fod_1st_int);
}
static unsigned long vc7_calc_fod_2nd_stage_rate(unsigned long parent_rate,
u32 fod_1st_int, u32 fod_2nd_int, u64 fod_frac)
{
unsigned long fod_1st_stage_rate;
fod_1st_stage_rate = vc7_calc_fod_1st_stage_rate(parent_rate, fod_1st_int, fod_frac);
if (fod_2nd_int < 2)
return fod_1st_stage_rate;
/*
* There is a div-by-2 preceding the 2nd stage integer divider
* (not shown on block diagram) so the actual 2nd stage integer
* divisor is 2 * N.
*/
return div64_u64(fod_1st_stage_rate >> 1, fod_2nd_int);
}
static void vc7_calc_fod_divider(unsigned long rate, unsigned long parent_rate,
u32 *fod_1st_int, u32 *fod_2nd_int, u64 *fod_frac)
{
unsigned int allow_frac, i, best_frac_i;
unsigned long first_stage_rate;
vc7_calc_fod_1st_stage(rate, parent_rate, fod_1st_int, fod_frac);
first_stage_rate = vc7_calc_fod_1st_stage_rate(parent_rate, *fod_1st_int, *fod_frac);
*fod_2nd_int = 0;
/* Do we need the second stage integer divider? */
if (first_stage_rate < VC7_FOD_1ST_STAGE_RATE_MIN) {
allow_frac = 0;
best_frac_i = VC7_FOD_2ND_INT_MIN;
for (i = VC7_FOD_2ND_INT_MIN; i <= VC7_FOD_2ND_INT_MAX; i++) {
/*
* 1) There is a div-by-2 preceding the 2nd stage integer divider
* (not shown on block diagram) so the actual 2nd stage integer
* divisor is 2 * N.
* 2) Attempt to find an integer solution first. This means stepping
* through each 2nd stage integer and recalculating the 1st stage
* until the 1st stage frequency is out of bounds. If no integer
* solution is found, use the best fractional solution.
*/
vc7_calc_fod_1st_stage(parent_rate, rate * 2 * i, fod_1st_int, fod_frac);
first_stage_rate = vc7_calc_fod_1st_stage_rate(parent_rate,
*fod_1st_int,
*fod_frac);
/* Remember the first viable fractional solution */
if (best_frac_i == VC7_FOD_2ND_INT_MIN &&
first_stage_rate > VC7_FOD_1ST_STAGE_RATE_MIN) {
best_frac_i = i;
}
/* Is the divider viable? Prefer integer solutions over fractional. */
if (*fod_1st_int < VC7_FOD_1ST_INT_MAX &&
first_stage_rate >= VC7_FOD_1ST_STAGE_RATE_MIN &&
(allow_frac || *fod_frac == 0)) {
*fod_2nd_int = i;
break;
}
/* Ran out of divisors or the 1st stage frequency is out of range */
if (i >= VC7_FOD_2ND_INT_MAX ||
first_stage_rate > VC7_FOD_1ST_STAGE_RATE_MAX) {
allow_frac = 1;
i = best_frac_i;
/* Restore the best frac and rerun the loop for the last time */
if (best_frac_i != VC7_FOD_2ND_INT_MIN)
i--;
continue;
}
}
}
}
static unsigned long vc7_fod_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
{
struct vc7_fod_data *fod = container_of(hw, struct vc7_fod_data, hw);
struct vc7_driver_data *vc7 = fod->vc7;
int err;
unsigned long fod_rate;
err = vc7_read_fod(vc7, fod->num);
if (err) {
dev_err(&vc7->client->dev, "error reading registers for %s\n",
clk_hw_get_name(hw));
return err;
}
pr_debug("%s - %s: parent_rate: %lu\n", __func__, clk_hw_get_name(hw), parent_rate);
fod_rate = vc7_calc_fod_2nd_stage_rate(parent_rate, fod->fod_1st_int,
fod->fod_2nd_int, fod->fod_frac);
pr_debug("%s - %s: fod_1st_int: %u, fod_2nd_int: %u, fod_frac: %llu\n",
__func__, clk_hw_get_name(hw),
fod->fod_1st_int, fod->fod_2nd_int, fod->fod_frac);
pr_debug("%s - %s rate: %lu\n", __func__, clk_hw_get_name(hw), fod_rate);
return fod_rate;
}
static long vc7_fod_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *parent_rate)
{
struct vc7_fod_data *fod = container_of(hw, struct vc7_fod_data, hw);
unsigned long fod_rate;
pr_debug("%s - %s: requested rate: %lu, parent_rate: %lu\n",
__func__, clk_hw_get_name(hw), rate, *parent_rate);
vc7_calc_fod_divider(rate, *parent_rate,
&fod->fod_1st_int, &fod->fod_2nd_int, &fod->fod_frac);
fod_rate = vc7_calc_fod_2nd_stage_rate(*parent_rate, fod->fod_1st_int,
fod->fod_2nd_int, fod->fod_frac);
pr_debug("%s - %s: fod_1st_int: %u, fod_2nd_int: %u, fod_frac: %llu\n",
__func__, clk_hw_get_name(hw),
fod->fod_1st_int, fod->fod_2nd_int, fod->fod_frac);
pr_debug("%s - %s rate: %lu\n", __func__, clk_hw_get_name(hw), fod_rate);
return fod_rate;
}
static int vc7_fod_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate)
{
struct vc7_fod_data *fod = container_of(hw, struct vc7_fod_data, hw);
struct vc7_driver_data *vc7 = fod->vc7;
unsigned long fod_rate;
pr_debug("%s - %s: rate: %lu, parent_rate: %lu\n",
__func__, clk_hw_get_name(hw), rate, parent_rate);
if (rate < VC7_FOD_RATE_MIN || rate > VC7_FOD_RATE_MAX) {
dev_err(&vc7->client->dev,
"requested frequency %lu Hz for %s is out of range\n",
rate, clk_hw_get_name(hw));
return -EINVAL;
}
vc7_write_fod(vc7, fod->num);
fod_rate = vc7_calc_fod_2nd_stage_rate(parent_rate, fod->fod_1st_int,
fod->fod_2nd_int, fod->fod_frac);
pr_debug("%s - %s: fod_1st_int: %u, fod_2nd_int: %u, fod_frac: %llu\n",
__func__, clk_hw_get_name(hw),
fod->fod_1st_int, fod->fod_2nd_int, fod->fod_frac);
pr_debug("%s - %s rate: %lu\n", __func__, clk_hw_get_name(hw), fod_rate);
return 0;
}
static const struct clk_ops vc7_fod_ops = {
.recalc_rate = vc7_fod_recalc_rate,
.round_rate = vc7_fod_round_rate,
.set_rate = vc7_fod_set_rate,
};
static unsigned long vc7_iod_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
{
struct vc7_iod_data *iod = container_of(hw, struct vc7_iod_data, hw);
struct vc7_driver_data *vc7 = iod->vc7;
int err;
unsigned long iod_rate;
err = vc7_read_iod(vc7, iod->num);
if (err) {
dev_err(&vc7->client->dev, "error reading registers for %s\n",
clk_hw_get_name(hw));
return err;
}
iod_rate = div64_u64(parent_rate, iod->iod_int);
pr_debug("%s - %s: iod_int: %u\n", __func__, clk_hw_get_name(hw), iod->iod_int);
pr_debug("%s - %s rate: %lu\n", __func__, clk_hw_get_name(hw), iod_rate);
return iod_rate;
}
static long vc7_iod_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *parent_rate)
{
struct vc7_iod_data *iod = container_of(hw, struct vc7_iod_data, hw);
unsigned long iod_rate;
pr_debug("%s - %s: requested rate: %lu, parent_rate: %lu\n",
__func__, clk_hw_get_name(hw), rate, *parent_rate);
vc7_calc_iod_divider(rate, *parent_rate, &iod->iod_int);
iod_rate = div64_u64(*parent_rate, iod->iod_int);
pr_debug("%s - %s: iod_int: %u\n", __func__, clk_hw_get_name(hw), iod->iod_int);
pr_debug("%s - %s rate: %ld\n", __func__, clk_hw_get_name(hw), iod_rate);
return iod_rate;
}
static int vc7_iod_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate)
{
struct vc7_iod_data *iod = container_of(hw, struct vc7_iod_data, hw);
struct vc7_driver_data *vc7 = iod->vc7;
unsigned long iod_rate;
pr_debug("%s - %s: rate: %lu, parent_rate: %lu\n",
__func__, clk_hw_get_name(hw), rate, parent_rate);
if (rate < VC7_IOD_RATE_MIN || rate > VC7_IOD_RATE_MAX) {
dev_err(&vc7->client->dev,
"requested frequency %lu Hz for %s is out of range\n",
rate, clk_hw_get_name(hw));
return -EINVAL;
}
vc7_write_iod(vc7, iod->num);
iod_rate = div64_u64(parent_rate, iod->iod_int);
pr_debug("%s - %s: iod_int: %u\n", __func__, clk_hw_get_name(hw), iod->iod_int);
pr_debug("%s - %s rate: %ld\n", __func__, clk_hw_get_name(hw), iod_rate);
return 0;
}
static const struct clk_ops vc7_iod_ops = {
.recalc_rate = vc7_iod_recalc_rate,
.round_rate = vc7_iod_round_rate,
.set_rate = vc7_iod_set_rate,
};
static int vc7_clk_out_prepare(struct clk_hw *hw)
{
struct vc7_out_data *out = container_of(hw, struct vc7_out_data, hw);
struct vc7_driver_data *vc7 = out->vc7;
int err;
out->out_dis = 0;
err = vc7_write_output(vc7, out->num);
if (err) {
dev_err(&vc7->client->dev, "error writing registers for %s\n",
clk_hw_get_name(hw));
return err;
}
pr_debug("%s - %s: clk prepared\n", __func__, clk_hw_get_name(hw));
return 0;
}
static void vc7_clk_out_unprepare(struct clk_hw *hw)
{
struct vc7_out_data *out = container_of(hw, struct vc7_out_data, hw);
struct vc7_driver_data *vc7 = out->vc7;
int err;
out->out_dis = 1;
err = vc7_write_output(vc7, out->num);
if (err) {
dev_err(&vc7->client->dev, "error writing registers for %s\n",
clk_hw_get_name(hw));
return;
}
pr_debug("%s - %s: clk unprepared\n", __func__, clk_hw_get_name(hw));
}
static int vc7_clk_out_is_enabled(struct clk_hw *hw)
{
struct vc7_out_data *out = container_of(hw, struct vc7_out_data, hw);
struct vc7_driver_data *vc7 = out->vc7;
int err, is_enabled;
err = vc7_read_output(vc7, out->num);
if (err) {
dev_err(&vc7->client->dev, "error reading registers for %s\n",
clk_hw_get_name(hw));
return err;
}
is_enabled = !out->out_dis;
pr_debug("%s - %s: is_enabled=%d\n", __func__, clk_hw_get_name(hw), is_enabled);
return is_enabled;
}
static const struct clk_ops vc7_clk_out_ops = {
.prepare = vc7_clk_out_prepare,
.unprepare = vc7_clk_out_unprepare,
.is_enabled = vc7_clk_out_is_enabled,
};
static int vc7_probe(struct i2c_client *client)
{
struct vc7_driver_data *vc7;
struct clk_init_data clk_init;
struct vc7_bank_src_map bank_src_map;
const char *node_name, *apll_name;
const char *parent_names[1];
unsigned int i, val, bank_idx, out_num;
unsigned long apll_rate;
int ret;
vc7 = devm_kzalloc(&client->dev, sizeof(*vc7), GFP_KERNEL);
if (!vc7)
return -ENOMEM;
i2c_set_clientdata(client, vc7);
vc7->client = client;
vc7->chip_info = i2c_get_match_data(client);
vc7->pin_xin = devm_clk_get(&client->dev, "xin");
if (PTR_ERR(vc7->pin_xin) == -EPROBE_DEFER) {
return dev_err_probe(&client->dev, -EPROBE_DEFER,
"xin not specified\n");
}
vc7->regmap = devm_regmap_init_i2c(client, &vc7_regmap_config);
if (IS_ERR(vc7->regmap)) {
return dev_err_probe(&client->dev, PTR_ERR(vc7->regmap),
"failed to allocate register map\n");
}
if (of_property_read_string(client->dev.of_node, "clock-output-names",
&node_name))
node_name = client->dev.of_node->name;
/* Register APLL */
apll_rate = vc7_get_apll_rate(vc7);
apll_name = kasprintf(GFP_KERNEL, "%s_apll", node_name);
vc7->clk_apll.clk = clk_register_fixed_rate(&client->dev, apll_name,
__clk_get_name(vc7->pin_xin),
0, apll_rate);
kfree(apll_name); /* ccf made a copy of the name */
if (IS_ERR(vc7->clk_apll.clk)) {
return dev_err_probe(&client->dev, PTR_ERR(vc7->clk_apll.clk),
"failed to register apll\n");
}
/* Register FODs */
for (i = 0; i < VC7_NUM_FOD; i++) {
memset(&clk_init, 0, sizeof(clk_init));
clk_init.name = kasprintf(GFP_KERNEL, "%s_fod%d", node_name, i);
clk_init.ops = &vc7_fod_ops;
clk_init.parent_names = parent_names;
parent_names[0] = __clk_get_name(vc7->clk_apll.clk);
clk_init.num_parents = 1;
vc7->clk_fod[i].num = i;
vc7->clk_fod[i].vc7 = vc7;
vc7->clk_fod[i].hw.init = &clk_init;
ret = devm_clk_hw_register(&client->dev, &vc7->clk_fod[i].hw);
if (ret)
goto err_clk_register;
kfree(clk_init.name); /* ccf made a copy of the name */
}
/* Register IODs */
for (i = 0; i < VC7_NUM_IOD; i++) {
memset(&clk_init, 0, sizeof(clk_init));
clk_init.name = kasprintf(GFP_KERNEL, "%s_iod%d", node_name, i);
clk_init.ops = &vc7_iod_ops;
clk_init.parent_names = parent_names;
parent_names[0] = __clk_get_name(vc7->clk_apll.clk);
clk_init.num_parents = 1;
vc7->clk_iod[i].num = i;
vc7->clk_iod[i].vc7 = vc7;
vc7->clk_iod[i].hw.init = &clk_init;
ret = devm_clk_hw_register(&client->dev, &vc7->clk_iod[i].hw);
if (ret)
goto err_clk_register;
kfree(clk_init.name); /* ccf made a copy of the name */
}
/* Register outputs */
for (i = 0; i < vc7->chip_info->num_outputs; i++) {
out_num = vc7_map_index_to_output(vc7->chip_info->model, i);
/*
* This driver does not support remapping FOD/IOD to banks.
* The device state is read and the driver is setup to match
* the device's existing mapping.
*/
bank_idx = output_bank_mapping[out_num];
regmap_read(vc7->regmap, VC7_REG_OUT_BANK_CNFG(bank_idx), &val);
val &= VC7_REG_OUTPUT_BANK_SRC_MASK;
memset(&bank_src_map, 0, sizeof(bank_src_map));
ret = vc7_get_bank_clk(vc7, bank_idx, val, &bank_src_map);
if (ret) {
dev_err_probe(&client->dev, ret,
"unable to register output %d\n", i);
return ret;
}
switch (bank_src_map.type) {
case VC7_FOD:
parent_names[0] = clk_hw_get_name(&bank_src_map.src.fod->hw);
break;
case VC7_IOD:
parent_names[0] = clk_hw_get_name(&bank_src_map.src.iod->hw);
break;
}
memset(&clk_init, 0, sizeof(clk_init));
clk_init.name = kasprintf(GFP_KERNEL, "%s_out%d", node_name, i);
clk_init.ops = &vc7_clk_out_ops;
clk_init.flags = CLK_SET_RATE_PARENT;
clk_init.parent_names = parent_names;
clk_init.num_parents = 1;
vc7->clk_out[i].num = i;
vc7->clk_out[i].vc7 = vc7;
vc7->clk_out[i].hw.init = &clk_init;
ret = devm_clk_hw_register(&client->dev, &vc7->clk_out[i].hw);
if (ret)
goto err_clk_register;
kfree(clk_init.name); /* ccf made a copy of the name */
}
ret = of_clk_add_hw_provider(client->dev.of_node, vc7_of_clk_get, vc7);
if (ret) {
dev_err_probe(&client->dev, ret, "unable to add clk provider\n");
goto err_clk;
}
return ret;
err_clk_register:
dev_err_probe(&client->dev, ret,
"unable to register %s\n", clk_init.name);
kfree(clk_init.name); /* ccf made a copy of the name */
err_clk:
clk_unregister_fixed_rate(vc7->clk_apll.clk);
return ret;
}
static void vc7_remove(struct i2c_client *client)
{
struct vc7_driver_data *vc7 = i2c_get_clientdata(client);
of_clk_del_provider(client->dev.of_node);
clk_unregister_fixed_rate(vc7->clk_apll.clk);
}
static bool vc7_volatile_reg(struct device *dev, unsigned int reg)
{
if (reg == VC7_PAGE_ADDR)
return false;
return true;
}
static const struct vc7_chip_info vc7_rc21008a_info = {
.model = VC7_RC21008A,
.num_banks = 6,
.num_outputs = 8,
};
static struct regmap_range_cfg vc7_range_cfg[] = {
{
.range_min = 0,
.range_max = VC7_MAX_REG,
.selector_reg = VC7_PAGE_ADDR,
.selector_mask = 0xFF,
.selector_shift = 0,
.window_start = 0,
.window_len = VC7_PAGE_WINDOW,
}};
static const struct regmap_config vc7_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.max_register = VC7_MAX_REG,
.ranges = vc7_range_cfg,
.num_ranges = ARRAY_SIZE(vc7_range_cfg),
.volatile_reg = vc7_volatile_reg,
.cache_type = REGCACHE_RBTREE,
.can_multi_write = true,
.reg_format_endian = REGMAP_ENDIAN_LITTLE,
.val_format_endian = REGMAP_ENDIAN_LITTLE,
};
static const struct i2c_device_id vc7_i2c_id[] = {
{ "rc21008a", .driver_data = (kernel_ulong_t)&vc7_rc21008a_info },
{}
};
MODULE_DEVICE_TABLE(i2c, vc7_i2c_id);
static const struct of_device_id vc7_of_match[] = {
{ .compatible = "renesas,rc21008a", .data = &vc7_rc21008a_info },
{}
};
MODULE_DEVICE_TABLE(of, vc7_of_match);
static struct i2c_driver vc7_i2c_driver = {
.driver = {
.name = "vc7",
.of_match_table = vc7_of_match,
},
.probe = vc7_probe,
.remove = vc7_remove,
.id_table = vc7_i2c_id,
};
module_i2c_driver(vc7_i2c_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Alex Helms <[email protected]");
MODULE_DESCRIPTION("Renesas Versaclock7 common clock framework driver");
| linux-master | drivers/clk/clk-versaclock7.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Nuvoton NPCM7xx Clock Generator
* All the clocks are initialized by the bootloader, so this driver allow only
* reading of current settings directly from the hardware.
*
* Copyright (C) 2018 Nuvoton Technologies [email protected]
*/
#include <linux/module.h>
#include <linux/clk-provider.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/bitfield.h>
#include <dt-bindings/clock/nuvoton,npcm7xx-clock.h>
struct npcm7xx_clk_pll {
struct clk_hw hw;
void __iomem *pllcon;
u8 flags;
};
#define to_npcm7xx_clk_pll(_hw) container_of(_hw, struct npcm7xx_clk_pll, hw)
#define PLLCON_LOKI BIT(31)
#define PLLCON_LOKS BIT(30)
#define PLLCON_FBDV GENMASK(27, 16)
#define PLLCON_OTDV2 GENMASK(15, 13)
#define PLLCON_PWDEN BIT(12)
#define PLLCON_OTDV1 GENMASK(10, 8)
#define PLLCON_INDV GENMASK(5, 0)
static unsigned long npcm7xx_clk_pll_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct npcm7xx_clk_pll *pll = to_npcm7xx_clk_pll(hw);
unsigned long fbdv, indv, otdv1, otdv2;
unsigned int val;
u64 ret;
if (parent_rate == 0) {
pr_err("%s: parent rate is zero", __func__);
return 0;
}
val = readl_relaxed(pll->pllcon);
indv = FIELD_GET(PLLCON_INDV, val);
fbdv = FIELD_GET(PLLCON_FBDV, val);
otdv1 = FIELD_GET(PLLCON_OTDV1, val);
otdv2 = FIELD_GET(PLLCON_OTDV2, val);
ret = (u64)parent_rate * fbdv;
do_div(ret, indv * otdv1 * otdv2);
return ret;
}
static const struct clk_ops npcm7xx_clk_pll_ops = {
.recalc_rate = npcm7xx_clk_pll_recalc_rate,
};
static struct clk_hw *
npcm7xx_clk_register_pll(void __iomem *pllcon, const char *name,
const char *parent_name, unsigned long flags)
{
struct npcm7xx_clk_pll *pll;
struct clk_init_data init;
struct clk_hw *hw;
int ret;
pll = kzalloc(sizeof(*pll), GFP_KERNEL);
if (!pll)
return ERR_PTR(-ENOMEM);
pr_debug("%s reg, name=%s, p=%s\n", __func__, name, parent_name);
init.name = name;
init.ops = &npcm7xx_clk_pll_ops;
init.parent_names = &parent_name;
init.num_parents = 1;
init.flags = flags;
pll->pllcon = pllcon;
pll->hw.init = &init;
hw = &pll->hw;
ret = clk_hw_register(NULL, hw);
if (ret) {
kfree(pll);
hw = ERR_PTR(ret);
}
return hw;
}
#define NPCM7XX_CLKEN1 (0x00)
#define NPCM7XX_CLKEN2 (0x28)
#define NPCM7XX_CLKEN3 (0x30)
#define NPCM7XX_CLKSEL (0x04)
#define NPCM7XX_CLKDIV1 (0x08)
#define NPCM7XX_CLKDIV2 (0x2C)
#define NPCM7XX_CLKDIV3 (0x58)
#define NPCM7XX_PLLCON0 (0x0C)
#define NPCM7XX_PLLCON1 (0x10)
#define NPCM7XX_PLLCON2 (0x54)
#define NPCM7XX_SWRSTR (0x14)
#define NPCM7XX_IRQWAKECON (0x18)
#define NPCM7XX_IRQWAKEFLAG (0x1C)
#define NPCM7XX_IPSRST1 (0x20)
#define NPCM7XX_IPSRST2 (0x24)
#define NPCM7XX_IPSRST3 (0x34)
#define NPCM7XX_WD0RCR (0x38)
#define NPCM7XX_WD1RCR (0x3C)
#define NPCM7XX_WD2RCR (0x40)
#define NPCM7XX_SWRSTC1 (0x44)
#define NPCM7XX_SWRSTC2 (0x48)
#define NPCM7XX_SWRSTC3 (0x4C)
#define NPCM7XX_SWRSTC4 (0x50)
#define NPCM7XX_CORSTC (0x5C)
#define NPCM7XX_PLLCONG (0x60)
#define NPCM7XX_AHBCKFI (0x64)
#define NPCM7XX_SECCNT (0x68)
#define NPCM7XX_CNTR25M (0x6C)
struct npcm7xx_clk_mux_data {
u8 shift;
u8 mask;
u32 *table;
const char *name;
const char * const *parent_names;
u8 num_parents;
unsigned long flags;
/*
* If this clock is exported via DT, set onecell_idx to constant
* defined in include/dt-bindings/clock/nuvoton, NPCM7XX-clock.h for
* this specific clock. Otherwise, set to -1.
*/
int onecell_idx;
};
struct npcm7xx_clk_div_data {
u32 reg;
u8 shift;
u8 width;
const char *name;
const char *parent_name;
u8 clk_divider_flags;
unsigned long flags;
/*
* If this clock is exported via DT, set onecell_idx to constant
* defined in include/dt-bindings/clock/nuvoton, NPCM7XX-clock.h for
* this specific clock. Otherwise, set to -1.
*/
int onecell_idx;
};
struct npcm7xx_clk_pll_data {
u32 reg;
const char *name;
const char *parent_name;
unsigned long flags;
/*
* If this clock is exported via DT, set onecell_idx to constant
* defined in include/dt-bindings/clock/nuvoton, NPCM7XX-clock.h for
* this specific clock. Otherwise, set to -1.
*/
int onecell_idx;
};
/*
* Single copy of strings used to refer to clocks within this driver indexed by
* above enum.
*/
#define NPCM7XX_CLK_S_REFCLK "refclk"
#define NPCM7XX_CLK_S_SYSBYPCK "sysbypck"
#define NPCM7XX_CLK_S_MCBYPCK "mcbypck"
#define NPCM7XX_CLK_S_GFXBYPCK "gfxbypck"
#define NPCM7XX_CLK_S_PLL0 "pll0"
#define NPCM7XX_CLK_S_PLL1 "pll1"
#define NPCM7XX_CLK_S_PLL1_DIV2 "pll1_div2"
#define NPCM7XX_CLK_S_PLL2 "pll2"
#define NPCM7XX_CLK_S_PLL_GFX "pll_gfx"
#define NPCM7XX_CLK_S_PLL2_DIV2 "pll2_div2"
#define NPCM7XX_CLK_S_PIX_MUX "gfx_pixel"
#define NPCM7XX_CLK_S_GPRFSEL_MUX "gprfsel_mux"
#define NPCM7XX_CLK_S_MC_MUX "mc_phy"
#define NPCM7XX_CLK_S_CPU_MUX "cpu" /*AKA system clock.*/
#define NPCM7XX_CLK_S_MC "mc"
#define NPCM7XX_CLK_S_AXI "axi" /*AKA CLK2*/
#define NPCM7XX_CLK_S_AHB "ahb" /*AKA CLK4*/
#define NPCM7XX_CLK_S_CLKOUT_MUX "clkout_mux"
#define NPCM7XX_CLK_S_UART_MUX "uart_mux"
#define NPCM7XX_CLK_S_TIM_MUX "timer_mux"
#define NPCM7XX_CLK_S_SD_MUX "sd_mux"
#define NPCM7XX_CLK_S_GFXM_MUX "gfxm_mux"
#define NPCM7XX_CLK_S_SU_MUX "serial_usb_mux"
#define NPCM7XX_CLK_S_DVC_MUX "dvc_mux"
#define NPCM7XX_CLK_S_GFX_MUX "gfx_mux"
#define NPCM7XX_CLK_S_GFX_PIXEL "gfx_pixel"
#define NPCM7XX_CLK_S_SPI0 "spi0"
#define NPCM7XX_CLK_S_SPI3 "spi3"
#define NPCM7XX_CLK_S_SPIX "spix"
#define NPCM7XX_CLK_S_APB1 "apb1"
#define NPCM7XX_CLK_S_APB2 "apb2"
#define NPCM7XX_CLK_S_APB3 "apb3"
#define NPCM7XX_CLK_S_APB4 "apb4"
#define NPCM7XX_CLK_S_APB5 "apb5"
#define NPCM7XX_CLK_S_TOCK "tock"
#define NPCM7XX_CLK_S_CLKOUT "clkout"
#define NPCM7XX_CLK_S_UART "uart"
#define NPCM7XX_CLK_S_TIMER "timer"
#define NPCM7XX_CLK_S_MMC "mmc"
#define NPCM7XX_CLK_S_SDHC "sdhc"
#define NPCM7XX_CLK_S_ADC "adc"
#define NPCM7XX_CLK_S_GFX "gfx0_gfx1_mem"
#define NPCM7XX_CLK_S_USBIF "serial_usbif"
#define NPCM7XX_CLK_S_USB_HOST "usb_host"
#define NPCM7XX_CLK_S_USB_BRIDGE "usb_bridge"
#define NPCM7XX_CLK_S_PCI "pci"
static u32 pll_mux_table[] = {0, 1, 2, 3};
static const char * const pll_mux_parents[] __initconst = {
NPCM7XX_CLK_S_PLL0,
NPCM7XX_CLK_S_PLL1_DIV2,
NPCM7XX_CLK_S_REFCLK,
NPCM7XX_CLK_S_PLL2_DIV2,
};
static u32 cpuck_mux_table[] = {0, 1, 2, 3};
static const char * const cpuck_mux_parents[] __initconst = {
NPCM7XX_CLK_S_PLL0,
NPCM7XX_CLK_S_PLL1_DIV2,
NPCM7XX_CLK_S_REFCLK,
NPCM7XX_CLK_S_SYSBYPCK,
};
static u32 pixcksel_mux_table[] = {0, 2};
static const char * const pixcksel_mux_parents[] __initconst = {
NPCM7XX_CLK_S_PLL_GFX,
NPCM7XX_CLK_S_REFCLK,
};
static u32 sucksel_mux_table[] = {2, 3};
static const char * const sucksel_mux_parents[] __initconst = {
NPCM7XX_CLK_S_REFCLK,
NPCM7XX_CLK_S_PLL2_DIV2,
};
static u32 mccksel_mux_table[] = {0, 2, 3};
static const char * const mccksel_mux_parents[] __initconst = {
NPCM7XX_CLK_S_PLL1_DIV2,
NPCM7XX_CLK_S_REFCLK,
NPCM7XX_CLK_S_MCBYPCK,
};
static u32 clkoutsel_mux_table[] = {0, 1, 2, 3, 4};
static const char * const clkoutsel_mux_parents[] __initconst = {
NPCM7XX_CLK_S_PLL0,
NPCM7XX_CLK_S_PLL1_DIV2,
NPCM7XX_CLK_S_REFCLK,
NPCM7XX_CLK_S_PLL_GFX, // divided by 2
NPCM7XX_CLK_S_PLL2_DIV2,
};
static u32 gfxmsel_mux_table[] = {2, 3};
static const char * const gfxmsel_mux_parents[] __initconst = {
NPCM7XX_CLK_S_REFCLK,
NPCM7XX_CLK_S_PLL2_DIV2,
};
static u32 dvcssel_mux_table[] = {2, 3};
static const char * const dvcssel_mux_parents[] __initconst = {
NPCM7XX_CLK_S_REFCLK,
NPCM7XX_CLK_S_PLL2,
};
static const struct npcm7xx_clk_pll_data npcm7xx_plls[] __initconst = {
{NPCM7XX_PLLCON0, NPCM7XX_CLK_S_PLL0, NPCM7XX_CLK_S_REFCLK, 0, -1},
{NPCM7XX_PLLCON1, NPCM7XX_CLK_S_PLL1,
NPCM7XX_CLK_S_REFCLK, 0, -1},
{NPCM7XX_PLLCON2, NPCM7XX_CLK_S_PLL2,
NPCM7XX_CLK_S_REFCLK, 0, -1},
{NPCM7XX_PLLCONG, NPCM7XX_CLK_S_PLL_GFX,
NPCM7XX_CLK_S_REFCLK, 0, -1},
};
static const struct npcm7xx_clk_mux_data npcm7xx_muxes[] __initconst = {
{0, GENMASK(1, 0), cpuck_mux_table, NPCM7XX_CLK_S_CPU_MUX,
cpuck_mux_parents, ARRAY_SIZE(cpuck_mux_parents), CLK_IS_CRITICAL,
NPCM7XX_CLK_CPU},
{4, GENMASK(1, 0), pixcksel_mux_table, NPCM7XX_CLK_S_PIX_MUX,
pixcksel_mux_parents, ARRAY_SIZE(pixcksel_mux_parents), 0,
NPCM7XX_CLK_GFX_PIXEL},
{6, GENMASK(1, 0), pll_mux_table, NPCM7XX_CLK_S_SD_MUX,
pll_mux_parents, ARRAY_SIZE(pll_mux_parents), 0, -1},
{8, GENMASK(1, 0), pll_mux_table, NPCM7XX_CLK_S_UART_MUX,
pll_mux_parents, ARRAY_SIZE(pll_mux_parents), 0, -1},
{10, GENMASK(1, 0), sucksel_mux_table, NPCM7XX_CLK_S_SU_MUX,
sucksel_mux_parents, ARRAY_SIZE(sucksel_mux_parents), 0, -1},
{12, GENMASK(1, 0), mccksel_mux_table, NPCM7XX_CLK_S_MC_MUX,
mccksel_mux_parents, ARRAY_SIZE(mccksel_mux_parents), 0, -1},
{14, GENMASK(1, 0), pll_mux_table, NPCM7XX_CLK_S_TIM_MUX,
pll_mux_parents, ARRAY_SIZE(pll_mux_parents), 0, -1},
{16, GENMASK(1, 0), pll_mux_table, NPCM7XX_CLK_S_GFX_MUX,
pll_mux_parents, ARRAY_SIZE(pll_mux_parents), 0, -1},
{18, GENMASK(2, 0), clkoutsel_mux_table, NPCM7XX_CLK_S_CLKOUT_MUX,
clkoutsel_mux_parents, ARRAY_SIZE(clkoutsel_mux_parents), 0, -1},
{21, GENMASK(1, 0), gfxmsel_mux_table, NPCM7XX_CLK_S_GFXM_MUX,
gfxmsel_mux_parents, ARRAY_SIZE(gfxmsel_mux_parents), 0, -1},
{23, GENMASK(1, 0), dvcssel_mux_table, NPCM7XX_CLK_S_DVC_MUX,
dvcssel_mux_parents, ARRAY_SIZE(dvcssel_mux_parents), 0, -1},
};
/* configurable dividers: */
static const struct npcm7xx_clk_div_data npcm7xx_divs[] __initconst = {
{NPCM7XX_CLKDIV1, 28, 3, NPCM7XX_CLK_S_ADC,
NPCM7XX_CLK_S_TIMER, CLK_DIVIDER_POWER_OF_TWO, 0, NPCM7XX_CLK_ADC},
/*30-28 ADCCKDIV*/
{NPCM7XX_CLKDIV1, 26, 2, NPCM7XX_CLK_S_AHB,
NPCM7XX_CLK_S_AXI, 0, CLK_IS_CRITICAL, NPCM7XX_CLK_AHB},
/*27-26 CLK4DIV*/
{NPCM7XX_CLKDIV1, 21, 5, NPCM7XX_CLK_S_TIMER,
NPCM7XX_CLK_S_TIM_MUX, 0, 0, NPCM7XX_CLK_TIMER},
/*25-21 TIMCKDIV*/
{NPCM7XX_CLKDIV1, 16, 5, NPCM7XX_CLK_S_UART,
NPCM7XX_CLK_S_UART_MUX, 0, 0, NPCM7XX_CLK_UART},
/*20-16 UARTDIV*/
{NPCM7XX_CLKDIV1, 11, 5, NPCM7XX_CLK_S_MMC,
NPCM7XX_CLK_S_SD_MUX, 0, 0, NPCM7XX_CLK_MMC},
/*15-11 MMCCKDIV*/
{NPCM7XX_CLKDIV1, 6, 5, NPCM7XX_CLK_S_SPI3,
NPCM7XX_CLK_S_AHB, 0, 0, NPCM7XX_CLK_SPI3},
/*10-6 AHB3CKDIV*/
{NPCM7XX_CLKDIV1, 2, 4, NPCM7XX_CLK_S_PCI,
NPCM7XX_CLK_S_GFX_MUX, 0, 0, NPCM7XX_CLK_PCI},
/*5-2 PCICKDIV*/
{NPCM7XX_CLKDIV1, 0, 1, NPCM7XX_CLK_S_AXI,
NPCM7XX_CLK_S_CPU_MUX, CLK_DIVIDER_POWER_OF_TWO, CLK_IS_CRITICAL,
NPCM7XX_CLK_AXI},/*0 CLK2DIV*/
{NPCM7XX_CLKDIV2, 30, 2, NPCM7XX_CLK_S_APB4,
NPCM7XX_CLK_S_AHB, CLK_DIVIDER_POWER_OF_TWO, 0, NPCM7XX_CLK_APB4},
/*31-30 APB4CKDIV*/
{NPCM7XX_CLKDIV2, 28, 2, NPCM7XX_CLK_S_APB3,
NPCM7XX_CLK_S_AHB, CLK_DIVIDER_POWER_OF_TWO, 0, NPCM7XX_CLK_APB3},
/*29-28 APB3CKDIV*/
{NPCM7XX_CLKDIV2, 26, 2, NPCM7XX_CLK_S_APB2,
NPCM7XX_CLK_S_AHB, CLK_DIVIDER_POWER_OF_TWO, 0, NPCM7XX_CLK_APB2},
/*27-26 APB2CKDIV*/
{NPCM7XX_CLKDIV2, 24, 2, NPCM7XX_CLK_S_APB1,
NPCM7XX_CLK_S_AHB, CLK_DIVIDER_POWER_OF_TWO, 0, NPCM7XX_CLK_APB1},
/*25-24 APB1CKDIV*/
{NPCM7XX_CLKDIV2, 22, 2, NPCM7XX_CLK_S_APB5,
NPCM7XX_CLK_S_AHB, CLK_DIVIDER_POWER_OF_TWO, 0, NPCM7XX_CLK_APB5},
/*23-22 APB5CKDIV*/
{NPCM7XX_CLKDIV2, 16, 5, NPCM7XX_CLK_S_CLKOUT,
NPCM7XX_CLK_S_CLKOUT_MUX, 0, 0, NPCM7XX_CLK_CLKOUT},
/*20-16 CLKOUTDIV*/
{NPCM7XX_CLKDIV2, 13, 3, NPCM7XX_CLK_S_GFX,
NPCM7XX_CLK_S_GFX_MUX, 0, 0, NPCM7XX_CLK_GFX},
/*15-13 GFXCKDIV*/
{NPCM7XX_CLKDIV2, 8, 5, NPCM7XX_CLK_S_USB_BRIDGE,
NPCM7XX_CLK_S_SU_MUX, 0, 0, NPCM7XX_CLK_SU},
/*12-8 SUCKDIV*/
{NPCM7XX_CLKDIV2, 4, 4, NPCM7XX_CLK_S_USB_HOST,
NPCM7XX_CLK_S_SU_MUX, 0, 0, NPCM7XX_CLK_SU48},
/*7-4 SU48CKDIV*/
{NPCM7XX_CLKDIV2, 0, 4, NPCM7XX_CLK_S_SDHC,
NPCM7XX_CLK_S_SD_MUX, 0, 0, NPCM7XX_CLK_SDHC}
,/*3-0 SD1CKDIV*/
{NPCM7XX_CLKDIV3, 6, 5, NPCM7XX_CLK_S_SPI0,
NPCM7XX_CLK_S_AHB, 0, 0, NPCM7XX_CLK_SPI0},
/*10-6 SPI0CKDV*/
{NPCM7XX_CLKDIV3, 1, 5, NPCM7XX_CLK_S_SPIX,
NPCM7XX_CLK_S_AHB, 0, 0, NPCM7XX_CLK_SPIX},
/*5-1 SPIXCKDV*/
};
static DEFINE_SPINLOCK(npcm7xx_clk_lock);
static void __init npcm7xx_clk_init(struct device_node *clk_np)
{
struct clk_hw_onecell_data *npcm7xx_clk_data;
void __iomem *clk_base;
struct resource res;
struct clk_hw *hw;
int ret;
int i;
ret = of_address_to_resource(clk_np, 0, &res);
if (ret) {
pr_err("%pOFn: failed to get resource, ret %d\n", clk_np,
ret);
return;
}
clk_base = ioremap(res.start, resource_size(&res));
if (!clk_base)
goto npcm7xx_init_error;
npcm7xx_clk_data = kzalloc(struct_size(npcm7xx_clk_data, hws,
NPCM7XX_NUM_CLOCKS), GFP_KERNEL);
if (!npcm7xx_clk_data)
goto npcm7xx_init_np_err;
npcm7xx_clk_data->num = NPCM7XX_NUM_CLOCKS;
for (i = 0; i < NPCM7XX_NUM_CLOCKS; i++)
npcm7xx_clk_data->hws[i] = ERR_PTR(-EPROBE_DEFER);
/* Register plls */
for (i = 0; i < ARRAY_SIZE(npcm7xx_plls); i++) {
const struct npcm7xx_clk_pll_data *pll_data = &npcm7xx_plls[i];
hw = npcm7xx_clk_register_pll(clk_base + pll_data->reg,
pll_data->name, pll_data->parent_name, pll_data->flags);
if (IS_ERR(hw)) {
pr_err("npcm7xx_clk: Can't register pll\n");
goto npcm7xx_init_fail;
}
if (pll_data->onecell_idx >= 0)
npcm7xx_clk_data->hws[pll_data->onecell_idx] = hw;
}
/* Register fixed dividers */
hw = clk_hw_register_fixed_factor(NULL, NPCM7XX_CLK_S_PLL1_DIV2,
NPCM7XX_CLK_S_PLL1, 0, 1, 2);
if (IS_ERR(hw)) {
pr_err("npcm7xx_clk: Can't register fixed div\n");
goto npcm7xx_init_fail;
}
hw = clk_hw_register_fixed_factor(NULL, NPCM7XX_CLK_S_PLL2_DIV2,
NPCM7XX_CLK_S_PLL2, 0, 1, 2);
if (IS_ERR(hw)) {
pr_err("npcm7xx_clk: Can't register div2\n");
goto npcm7xx_init_fail;
}
/* Register muxes */
for (i = 0; i < ARRAY_SIZE(npcm7xx_muxes); i++) {
const struct npcm7xx_clk_mux_data *mux_data = &npcm7xx_muxes[i];
hw = clk_hw_register_mux_table(NULL,
mux_data->name,
mux_data->parent_names, mux_data->num_parents,
mux_data->flags, clk_base + NPCM7XX_CLKSEL,
mux_data->shift, mux_data->mask, 0,
mux_data->table, &npcm7xx_clk_lock);
if (IS_ERR(hw)) {
pr_err("npcm7xx_clk: Can't register mux\n");
goto npcm7xx_init_fail;
}
if (mux_data->onecell_idx >= 0)
npcm7xx_clk_data->hws[mux_data->onecell_idx] = hw;
}
/* Register clock dividers specified in npcm7xx_divs */
for (i = 0; i < ARRAY_SIZE(npcm7xx_divs); i++) {
const struct npcm7xx_clk_div_data *div_data = &npcm7xx_divs[i];
hw = clk_hw_register_divider(NULL, div_data->name,
div_data->parent_name,
div_data->flags,
clk_base + div_data->reg,
div_data->shift, div_data->width,
div_data->clk_divider_flags, &npcm7xx_clk_lock);
if (IS_ERR(hw)) {
pr_err("npcm7xx_clk: Can't register div table\n");
goto npcm7xx_init_fail;
}
if (div_data->onecell_idx >= 0)
npcm7xx_clk_data->hws[div_data->onecell_idx] = hw;
}
ret = of_clk_add_hw_provider(clk_np, of_clk_hw_onecell_get,
npcm7xx_clk_data);
if (ret)
pr_err("failed to add DT provider: %d\n", ret);
of_node_put(clk_np);
return;
npcm7xx_init_fail:
kfree(npcm7xx_clk_data->hws);
npcm7xx_init_np_err:
iounmap(clk_base);
npcm7xx_init_error:
of_node_put(clk_np);
}
CLK_OF_DECLARE(npcm7xx_clk_init, "nuvoton,npcm750-clk", npcm7xx_clk_init);
| linux-master | drivers/clk/clk-npcm7xx.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Clock driver for TPS68470 PMIC
*
* Copyright (c) 2021 Red Hat Inc.
* Copyright (C) 2018 Intel Corporation
*
* Authors:
* Hans de Goede <[email protected]>
* Zaikuo Wang <[email protected]>
* Tianshu Qiu <[email protected]>
* Jian Xu Zheng <[email protected]>
* Yuning Pu <[email protected]>
* Antti Laakso <[email protected]>
*/
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
#include <linux/kernel.h>
#include <linux/mfd/tps68470.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/platform_data/tps68470.h>
#include <linux/regmap.h>
#define TPS68470_CLK_NAME "tps68470-clk"
#define to_tps68470_clkdata(clkd) \
container_of(clkd, struct tps68470_clkdata, clkout_hw)
static struct tps68470_clkout_freqs {
unsigned long freq;
unsigned int xtaldiv;
unsigned int plldiv;
unsigned int postdiv;
unsigned int buckdiv;
unsigned int boostdiv;
} clk_freqs[] = {
/*
* The PLL is used to multiply the crystal oscillator
* frequency range of 3 MHz to 27 MHz by a programmable
* factor of F = (M/N)*(1/P) such that the output
* available at the HCLK_A or HCLK_B pins are in the range
* of 4 MHz to 64 MHz in increments of 0.1 MHz.
*
* hclk_# = osc_in * (((plldiv*2)+320) / (xtaldiv+30)) * (1 / 2^postdiv)
*
* PLL_REF_CLK should be as close as possible to 100kHz
* PLL_REF_CLK = input clk / XTALDIV[7:0] + 30)
*
* PLL_VCO_CLK = (PLL_REF_CLK * (plldiv*2 + 320))
*
* BOOST should be as close as possible to 2Mhz
* BOOST = PLL_VCO_CLK / (BOOSTDIV[4:0] + 16) *
*
* BUCK should be as close as possible to 5.2Mhz
* BUCK = PLL_VCO_CLK / (BUCKDIV[3:0] + 5)
*
* osc_in xtaldiv plldiv postdiv hclk_#
* 20Mhz 170 32 1 19.2Mhz
* 20Mhz 170 40 1 20Mhz
* 20Mhz 170 80 1 24Mhz
*/
{ 19200000, 170, 32, 1, 2, 3 },
{ 20000000, 170, 40, 1, 3, 4 },
{ 24000000, 170, 80, 1, 4, 8 },
};
struct tps68470_clkdata {
struct clk_hw clkout_hw;
struct regmap *regmap;
unsigned long rate;
};
static int tps68470_clk_is_prepared(struct clk_hw *hw)
{
struct tps68470_clkdata *clkdata = to_tps68470_clkdata(hw);
int val;
if (regmap_read(clkdata->regmap, TPS68470_REG_PLLCTL, &val))
return 0;
return val & TPS68470_PLL_EN_MASK;
}
static int tps68470_clk_prepare(struct clk_hw *hw)
{
struct tps68470_clkdata *clkdata = to_tps68470_clkdata(hw);
regmap_write(clkdata->regmap, TPS68470_REG_CLKCFG1,
(TPS68470_PLL_OUTPUT_ENABLE << TPS68470_OUTPUT_A_SHIFT) |
(TPS68470_PLL_OUTPUT_ENABLE << TPS68470_OUTPUT_B_SHIFT));
regmap_update_bits(clkdata->regmap, TPS68470_REG_PLLCTL,
TPS68470_PLL_EN_MASK, TPS68470_PLL_EN_MASK);
/*
* The PLLCTL reg lock bit is set by the PMIC after approx. 4ms and
* does not indicate a true lock, so just wait 4 ms.
*/
usleep_range(4000, 5000);
return 0;
}
static void tps68470_clk_unprepare(struct clk_hw *hw)
{
struct tps68470_clkdata *clkdata = to_tps68470_clkdata(hw);
/* Disable clock first ... */
regmap_update_bits(clkdata->regmap, TPS68470_REG_PLLCTL, TPS68470_PLL_EN_MASK, 0);
/* ... and then tri-state the clock outputs. */
regmap_write(clkdata->regmap, TPS68470_REG_CLKCFG1, 0);
}
static unsigned long tps68470_clk_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
{
struct tps68470_clkdata *clkdata = to_tps68470_clkdata(hw);
return clkdata->rate;
}
/*
* This returns the index of the clk_freqs[] cfg with the closest rate for
* use in tps68470_clk_round_rate(). tps68470_clk_set_rate() checks that
* the rate of the returned cfg is an exact match.
*/
static unsigned int tps68470_clk_cfg_lookup(unsigned long rate)
{
long diff, best_diff = LONG_MAX;
unsigned int i, best_idx = 0;
for (i = 0; i < ARRAY_SIZE(clk_freqs); i++) {
diff = clk_freqs[i].freq - rate;
if (diff == 0)
return i;
diff = abs(diff);
if (diff < best_diff) {
best_diff = diff;
best_idx = i;
}
}
return best_idx;
}
static long tps68470_clk_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *parent_rate)
{
unsigned int idx = tps68470_clk_cfg_lookup(rate);
return clk_freqs[idx].freq;
}
static int tps68470_clk_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct tps68470_clkdata *clkdata = to_tps68470_clkdata(hw);
unsigned int idx = tps68470_clk_cfg_lookup(rate);
if (rate != clk_freqs[idx].freq)
return -EINVAL;
regmap_write(clkdata->regmap, TPS68470_REG_BOOSTDIV, clk_freqs[idx].boostdiv);
regmap_write(clkdata->regmap, TPS68470_REG_BUCKDIV, clk_freqs[idx].buckdiv);
regmap_write(clkdata->regmap, TPS68470_REG_PLLSWR, TPS68470_PLLSWR_DEFAULT);
regmap_write(clkdata->regmap, TPS68470_REG_XTALDIV, clk_freqs[idx].xtaldiv);
regmap_write(clkdata->regmap, TPS68470_REG_PLLDIV, clk_freqs[idx].plldiv);
regmap_write(clkdata->regmap, TPS68470_REG_POSTDIV, clk_freqs[idx].postdiv);
regmap_write(clkdata->regmap, TPS68470_REG_POSTDIV2, clk_freqs[idx].postdiv);
regmap_write(clkdata->regmap, TPS68470_REG_CLKCFG2, TPS68470_CLKCFG2_DRV_STR_2MA);
regmap_write(clkdata->regmap, TPS68470_REG_PLLCTL,
TPS68470_OSC_EXT_CAP_DEFAULT << TPS68470_OSC_EXT_CAP_SHIFT |
TPS68470_CLK_SRC_XTAL << TPS68470_CLK_SRC_SHIFT);
clkdata->rate = rate;
return 0;
}
static const struct clk_ops tps68470_clk_ops = {
.is_prepared = tps68470_clk_is_prepared,
.prepare = tps68470_clk_prepare,
.unprepare = tps68470_clk_unprepare,
.recalc_rate = tps68470_clk_recalc_rate,
.round_rate = tps68470_clk_round_rate,
.set_rate = tps68470_clk_set_rate,
};
static int tps68470_clk_probe(struct platform_device *pdev)
{
struct tps68470_clk_platform_data *pdata = pdev->dev.platform_data;
struct clk_init_data tps68470_clk_initdata = {
.name = TPS68470_CLK_NAME,
.ops = &tps68470_clk_ops,
/* Changing the dividers when the PLL is on is not allowed */
.flags = CLK_SET_RATE_GATE,
};
struct tps68470_clkdata *tps68470_clkdata;
struct tps68470_clk_consumer *consumer;
int ret;
int i;
tps68470_clkdata = devm_kzalloc(&pdev->dev, sizeof(*tps68470_clkdata),
GFP_KERNEL);
if (!tps68470_clkdata)
return -ENOMEM;
tps68470_clkdata->regmap = dev_get_drvdata(pdev->dev.parent);
tps68470_clkdata->clkout_hw.init = &tps68470_clk_initdata;
/* Set initial rate */
tps68470_clk_set_rate(&tps68470_clkdata->clkout_hw, clk_freqs[0].freq, 0);
ret = devm_clk_hw_register(&pdev->dev, &tps68470_clkdata->clkout_hw);
if (ret)
return ret;
ret = devm_clk_hw_register_clkdev(&pdev->dev, &tps68470_clkdata->clkout_hw,
TPS68470_CLK_NAME, NULL);
if (ret)
return ret;
if (pdata) {
for (i = 0; i < pdata->n_consumers; i++) {
consumer = &pdata->consumers[i];
ret = devm_clk_hw_register_clkdev(&pdev->dev,
&tps68470_clkdata->clkout_hw,
consumer->consumer_con_id,
consumer->consumer_dev_name);
}
}
return ret;
}
static struct platform_driver tps68470_clk_driver = {
.driver = {
.name = TPS68470_CLK_NAME,
},
.probe = tps68470_clk_probe,
};
/*
* The ACPI tps68470 probe-ordering depends on the clk/gpio/regulator drivers
* registering before the drivers for the camera-sensors which use them bind.
* subsys_initcall() ensures this when the drivers are builtin.
*/
static int __init tps68470_clk_init(void)
{
return platform_driver_register(&tps68470_clk_driver);
}
subsys_initcall(tps68470_clk_init);
static void __exit tps68470_clk_exit(void)
{
platform_driver_unregister(&tps68470_clk_driver);
}
module_exit(tps68470_clk_exit);
MODULE_ALIAS("platform:tps68470-clk");
MODULE_DESCRIPTION("clock driver for TPS68470 pmic");
MODULE_LICENSE("GPL");
| linux-master | drivers/clk/clk-tps68470.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2014 Oleksij Rempel <[email protected]>.
*/
#include <linux/clk.h>
#include <linux/clkdev.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/clk-provider.h>
#include <linux/spinlock.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <dt-bindings/clock/alphascale,asm9260.h>
#define HW_AHBCLKCTRL0 0x0020
#define HW_AHBCLKCTRL1 0x0030
#define HW_SYSPLLCTRL 0x0100
#define HW_MAINCLKSEL 0x0120
#define HW_MAINCLKUEN 0x0124
#define HW_UARTCLKSEL 0x0128
#define HW_UARTCLKUEN 0x012c
#define HW_I2S0CLKSEL 0x0130
#define HW_I2S0CLKUEN 0x0134
#define HW_I2S1CLKSEL 0x0138
#define HW_I2S1CLKUEN 0x013c
#define HW_WDTCLKSEL 0x0160
#define HW_WDTCLKUEN 0x0164
#define HW_CLKOUTCLKSEL 0x0170
#define HW_CLKOUTCLKUEN 0x0174
#define HW_CPUCLKDIV 0x017c
#define HW_SYSAHBCLKDIV 0x0180
#define HW_I2S0MCLKDIV 0x0190
#define HW_I2S0SCLKDIV 0x0194
#define HW_I2S1MCLKDIV 0x0188
#define HW_I2S1SCLKDIV 0x018c
#define HW_UART0CLKDIV 0x0198
#define HW_UART1CLKDIV 0x019c
#define HW_UART2CLKDIV 0x01a0
#define HW_UART3CLKDIV 0x01a4
#define HW_UART4CLKDIV 0x01a8
#define HW_UART5CLKDIV 0x01ac
#define HW_UART6CLKDIV 0x01b0
#define HW_UART7CLKDIV 0x01b4
#define HW_UART8CLKDIV 0x01b8
#define HW_UART9CLKDIV 0x01bc
#define HW_SPI0CLKDIV 0x01c0
#define HW_SPI1CLKDIV 0x01c4
#define HW_QUADSPICLKDIV 0x01c8
#define HW_SSP0CLKDIV 0x01d0
#define HW_NANDCLKDIV 0x01d4
#define HW_TRACECLKDIV 0x01e0
#define HW_CAMMCLKDIV 0x01e8
#define HW_WDTCLKDIV 0x01ec
#define HW_CLKOUTCLKDIV 0x01f4
#define HW_MACCLKDIV 0x01f8
#define HW_LCDCLKDIV 0x01fc
#define HW_ADCANACLKDIV 0x0200
static struct clk_hw_onecell_data *clk_data;
static DEFINE_SPINLOCK(asm9260_clk_lock);
struct asm9260_div_clk {
unsigned int idx;
const char *name;
const char *parent_name;
u32 reg;
};
struct asm9260_gate_data {
unsigned int idx;
const char *name;
const char *parent_name;
u32 reg;
u8 bit_idx;
unsigned long flags;
};
struct asm9260_mux_clock {
u8 mask;
u32 *table;
const char *name;
const struct clk_parent_data *parent_data;
u8 num_parents;
unsigned long offset;
unsigned long flags;
};
static void __iomem *base;
static const struct asm9260_div_clk asm9260_div_clks[] __initconst = {
{ CLKID_SYS_CPU, "cpu_div", "main_gate", HW_CPUCLKDIV },
{ CLKID_SYS_AHB, "ahb_div", "cpu_div", HW_SYSAHBCLKDIV },
/* i2s has two deviders: one for only external mclk and internal
* devider for all clks. */
{ CLKID_SYS_I2S0M, "i2s0m_div", "i2s0_mclk", HW_I2S0MCLKDIV },
{ CLKID_SYS_I2S1M, "i2s1m_div", "i2s1_mclk", HW_I2S1MCLKDIV },
{ CLKID_SYS_I2S0S, "i2s0s_div", "i2s0_gate", HW_I2S0SCLKDIV },
{ CLKID_SYS_I2S1S, "i2s1s_div", "i2s0_gate", HW_I2S1SCLKDIV },
{ CLKID_SYS_UART0, "uart0_div", "uart_gate", HW_UART0CLKDIV },
{ CLKID_SYS_UART1, "uart1_div", "uart_gate", HW_UART1CLKDIV },
{ CLKID_SYS_UART2, "uart2_div", "uart_gate", HW_UART2CLKDIV },
{ CLKID_SYS_UART3, "uart3_div", "uart_gate", HW_UART3CLKDIV },
{ CLKID_SYS_UART4, "uart4_div", "uart_gate", HW_UART4CLKDIV },
{ CLKID_SYS_UART5, "uart5_div", "uart_gate", HW_UART5CLKDIV },
{ CLKID_SYS_UART6, "uart6_div", "uart_gate", HW_UART6CLKDIV },
{ CLKID_SYS_UART7, "uart7_div", "uart_gate", HW_UART7CLKDIV },
{ CLKID_SYS_UART8, "uart8_div", "uart_gate", HW_UART8CLKDIV },
{ CLKID_SYS_UART9, "uart9_div", "uart_gate", HW_UART9CLKDIV },
{ CLKID_SYS_SPI0, "spi0_div", "main_gate", HW_SPI0CLKDIV },
{ CLKID_SYS_SPI1, "spi1_div", "main_gate", HW_SPI1CLKDIV },
{ CLKID_SYS_QUADSPI, "quadspi_div", "main_gate", HW_QUADSPICLKDIV },
{ CLKID_SYS_SSP0, "ssp0_div", "main_gate", HW_SSP0CLKDIV },
{ CLKID_SYS_NAND, "nand_div", "main_gate", HW_NANDCLKDIV },
{ CLKID_SYS_TRACE, "trace_div", "main_gate", HW_TRACECLKDIV },
{ CLKID_SYS_CAMM, "camm_div", "main_gate", HW_CAMMCLKDIV },
{ CLKID_SYS_MAC, "mac_div", "main_gate", HW_MACCLKDIV },
{ CLKID_SYS_LCD, "lcd_div", "main_gate", HW_LCDCLKDIV },
{ CLKID_SYS_ADCANA, "adcana_div", "main_gate", HW_ADCANACLKDIV },
{ CLKID_SYS_WDT, "wdt_div", "wdt_gate", HW_WDTCLKDIV },
{ CLKID_SYS_CLKOUT, "clkout_div", "clkout_gate", HW_CLKOUTCLKDIV },
};
static const struct asm9260_gate_data asm9260_mux_gates[] __initconst = {
{ 0, "main_gate", "main_mux", HW_MAINCLKUEN, 0 },
{ 0, "uart_gate", "uart_mux", HW_UARTCLKUEN, 0 },
{ 0, "i2s0_gate", "i2s0_mux", HW_I2S0CLKUEN, 0 },
{ 0, "i2s1_gate", "i2s1_mux", HW_I2S1CLKUEN, 0 },
{ 0, "wdt_gate", "wdt_mux", HW_WDTCLKUEN, 0 },
{ 0, "clkout_gate", "clkout_mux", HW_CLKOUTCLKUEN, 0 },
};
static const struct asm9260_gate_data asm9260_ahb_gates[] __initconst = {
/* ahb gates */
{ CLKID_AHB_ROM, "rom", "ahb_div",
HW_AHBCLKCTRL0, 1, CLK_IGNORE_UNUSED},
{ CLKID_AHB_RAM, "ram", "ahb_div",
HW_AHBCLKCTRL0, 2, CLK_IGNORE_UNUSED},
{ CLKID_AHB_GPIO, "gpio", "ahb_div",
HW_AHBCLKCTRL0, 4 },
{ CLKID_AHB_MAC, "mac", "ahb_div",
HW_AHBCLKCTRL0, 5 },
{ CLKID_AHB_EMI, "emi", "ahb_div",
HW_AHBCLKCTRL0, 6, CLK_IGNORE_UNUSED},
{ CLKID_AHB_USB0, "usb0", "ahb_div",
HW_AHBCLKCTRL0, 7 },
{ CLKID_AHB_USB1, "usb1", "ahb_div",
HW_AHBCLKCTRL0, 8 },
{ CLKID_AHB_DMA0, "dma0", "ahb_div",
HW_AHBCLKCTRL0, 9 },
{ CLKID_AHB_DMA1, "dma1", "ahb_div",
HW_AHBCLKCTRL0, 10 },
{ CLKID_AHB_UART0, "uart0", "ahb_div",
HW_AHBCLKCTRL0, 11 },
{ CLKID_AHB_UART1, "uart1", "ahb_div",
HW_AHBCLKCTRL0, 12 },
{ CLKID_AHB_UART2, "uart2", "ahb_div",
HW_AHBCLKCTRL0, 13 },
{ CLKID_AHB_UART3, "uart3", "ahb_div",
HW_AHBCLKCTRL0, 14 },
{ CLKID_AHB_UART4, "uart4", "ahb_div",
HW_AHBCLKCTRL0, 15 },
{ CLKID_AHB_UART5, "uart5", "ahb_div",
HW_AHBCLKCTRL0, 16 },
{ CLKID_AHB_UART6, "uart6", "ahb_div",
HW_AHBCLKCTRL0, 17 },
{ CLKID_AHB_UART7, "uart7", "ahb_div",
HW_AHBCLKCTRL0, 18 },
{ CLKID_AHB_UART8, "uart8", "ahb_div",
HW_AHBCLKCTRL0, 19 },
{ CLKID_AHB_UART9, "uart9", "ahb_div",
HW_AHBCLKCTRL0, 20 },
{ CLKID_AHB_I2S0, "i2s0", "ahb_div",
HW_AHBCLKCTRL0, 21 },
{ CLKID_AHB_I2C0, "i2c0", "ahb_div",
HW_AHBCLKCTRL0, 22 },
{ CLKID_AHB_I2C1, "i2c1", "ahb_div",
HW_AHBCLKCTRL0, 23 },
{ CLKID_AHB_SSP0, "ssp0", "ahb_div",
HW_AHBCLKCTRL0, 24 },
{ CLKID_AHB_IOCONFIG, "ioconf", "ahb_div",
HW_AHBCLKCTRL0, 25 },
{ CLKID_AHB_WDT, "wdt", "ahb_div",
HW_AHBCLKCTRL0, 26 },
{ CLKID_AHB_CAN0, "can0", "ahb_div",
HW_AHBCLKCTRL0, 27 },
{ CLKID_AHB_CAN1, "can1", "ahb_div",
HW_AHBCLKCTRL0, 28 },
{ CLKID_AHB_MPWM, "mpwm", "ahb_div",
HW_AHBCLKCTRL0, 29 },
{ CLKID_AHB_SPI0, "spi0", "ahb_div",
HW_AHBCLKCTRL0, 30 },
{ CLKID_AHB_SPI1, "spi1", "ahb_div",
HW_AHBCLKCTRL0, 31 },
{ CLKID_AHB_QEI, "qei", "ahb_div",
HW_AHBCLKCTRL1, 0 },
{ CLKID_AHB_QUADSPI0, "quadspi0", "ahb_div",
HW_AHBCLKCTRL1, 1 },
{ CLKID_AHB_CAMIF, "capmif", "ahb_div",
HW_AHBCLKCTRL1, 2 },
{ CLKID_AHB_LCDIF, "lcdif", "ahb_div",
HW_AHBCLKCTRL1, 3 },
{ CLKID_AHB_TIMER0, "timer0", "ahb_div",
HW_AHBCLKCTRL1, 4 },
{ CLKID_AHB_TIMER1, "timer1", "ahb_div",
HW_AHBCLKCTRL1, 5 },
{ CLKID_AHB_TIMER2, "timer2", "ahb_div",
HW_AHBCLKCTRL1, 6 },
{ CLKID_AHB_TIMER3, "timer3", "ahb_div",
HW_AHBCLKCTRL1, 7 },
{ CLKID_AHB_IRQ, "irq", "ahb_div",
HW_AHBCLKCTRL1, 8, CLK_IGNORE_UNUSED},
{ CLKID_AHB_RTC, "rtc", "ahb_div",
HW_AHBCLKCTRL1, 9 },
{ CLKID_AHB_NAND, "nand", "ahb_div",
HW_AHBCLKCTRL1, 10 },
{ CLKID_AHB_ADC0, "adc0", "ahb_div",
HW_AHBCLKCTRL1, 11 },
{ CLKID_AHB_LED, "led", "ahb_div",
HW_AHBCLKCTRL1, 12 },
{ CLKID_AHB_DAC0, "dac0", "ahb_div",
HW_AHBCLKCTRL1, 13 },
{ CLKID_AHB_LCD, "lcd", "ahb_div",
HW_AHBCLKCTRL1, 14 },
{ CLKID_AHB_I2S1, "i2s1", "ahb_div",
HW_AHBCLKCTRL1, 15 },
{ CLKID_AHB_MAC1, "mac1", "ahb_div",
HW_AHBCLKCTRL1, 16 },
};
static struct clk_parent_data __initdata main_mux_p[] = { { .index = 0, }, { .name = "pll" } };
static struct clk_parent_data __initdata i2s0_mux_p[] = { { .index = 0, }, { .name = "pll" }, { .name = "i2s0m_div"} };
static struct clk_parent_data __initdata i2s1_mux_p[] = { { .index = 0, }, { .name = "pll" }, { .name = "i2s1m_div"} };
static struct clk_parent_data __initdata clkout_mux_p[] = { { .index = 0, }, { .name = "pll" }, { .name = "rtc"} };
static u32 three_mux_table[] = {0, 1, 3};
static struct asm9260_mux_clock asm9260_mux_clks[] __initdata = {
{ 1, three_mux_table, "main_mux", main_mux_p,
ARRAY_SIZE(main_mux_p), HW_MAINCLKSEL, },
{ 1, three_mux_table, "uart_mux", main_mux_p,
ARRAY_SIZE(main_mux_p), HW_UARTCLKSEL, },
{ 1, three_mux_table, "wdt_mux", main_mux_p,
ARRAY_SIZE(main_mux_p), HW_WDTCLKSEL, },
{ 3, three_mux_table, "i2s0_mux", i2s0_mux_p,
ARRAY_SIZE(i2s0_mux_p), HW_I2S0CLKSEL, },
{ 3, three_mux_table, "i2s1_mux", i2s1_mux_p,
ARRAY_SIZE(i2s1_mux_p), HW_I2S1CLKSEL, },
{ 3, three_mux_table, "clkout_mux", clkout_mux_p,
ARRAY_SIZE(clkout_mux_p), HW_CLKOUTCLKSEL, },
};
static void __init asm9260_acc_init(struct device_node *np)
{
struct clk_hw *hw, *pll_hw;
struct clk_hw **hws;
const char *pll_clk = "pll";
struct clk_parent_data pll_parent_data = { .index = 0 };
u32 rate;
int n;
clk_data = kzalloc(struct_size(clk_data, hws, MAX_CLKS), GFP_KERNEL);
if (!clk_data)
return;
clk_data->num = MAX_CLKS;
hws = clk_data->hws;
base = of_io_request_and_map(np, 0, np->name);
if (IS_ERR(base))
panic("%pOFn: unable to map resource", np);
/* register pll */
rate = (ioread32(base + HW_SYSPLLCTRL) & 0xffff) * 1000000;
pll_hw = clk_hw_register_fixed_rate_parent_accuracy(NULL, pll_clk, &pll_parent_data,
0, rate);
if (IS_ERR(pll_hw))
panic("%pOFn: can't register REFCLK. Check DT!", np);
for (n = 0; n < ARRAY_SIZE(asm9260_mux_clks); n++) {
const struct asm9260_mux_clock *mc = &asm9260_mux_clks[n];
hw = clk_hw_register_mux_table_parent_data(NULL, mc->name, mc->parent_data,
mc->num_parents, mc->flags, base + mc->offset,
0, mc->mask, 0, mc->table, &asm9260_clk_lock);
}
/* clock mux gate cells */
for (n = 0; n < ARRAY_SIZE(asm9260_mux_gates); n++) {
const struct asm9260_gate_data *gd = &asm9260_mux_gates[n];
hw = clk_hw_register_gate(NULL, gd->name,
gd->parent_name, gd->flags | CLK_SET_RATE_PARENT,
base + gd->reg, gd->bit_idx, 0, &asm9260_clk_lock);
}
/* clock div cells */
for (n = 0; n < ARRAY_SIZE(asm9260_div_clks); n++) {
const struct asm9260_div_clk *dc = &asm9260_div_clks[n];
hws[dc->idx] = clk_hw_register_divider(NULL, dc->name,
dc->parent_name, CLK_SET_RATE_PARENT,
base + dc->reg, 0, 8, CLK_DIVIDER_ONE_BASED,
&asm9260_clk_lock);
}
/* clock ahb gate cells */
for (n = 0; n < ARRAY_SIZE(asm9260_ahb_gates); n++) {
const struct asm9260_gate_data *gd = &asm9260_ahb_gates[n];
hws[gd->idx] = clk_hw_register_gate(NULL, gd->name,
gd->parent_name, gd->flags, base + gd->reg,
gd->bit_idx, 0, &asm9260_clk_lock);
}
/* check for errors on leaf clocks */
for (n = 0; n < MAX_CLKS; n++) {
if (!IS_ERR(hws[n]))
continue;
pr_err("%pOF: Unable to register leaf clock %d\n",
np, n);
goto fail;
}
/* register clk-provider */
of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data);
return;
fail:
iounmap(base);
}
CLK_OF_DECLARE(asm9260_acc, "alphascale,asm9260-clock-controller",
asm9260_acc_init);
| linux-master | drivers/clk/clk-asm9260.c |
// SPDX-License-Identifier: GPL-2.0+
//
// clk-s2mps11.c - Clock driver for S2MPS11.
//
// Copyright (C) 2013,2014 Samsung Electornics
#include <linux/module.h>
#include <linux/err.h>
#include <linux/of.h>
#include <linux/clkdev.h>
#include <linux/regmap.h>
#include <linux/clk-provider.h>
#include <linux/platform_device.h>
#include <linux/mfd/samsung/s2mps11.h>
#include <linux/mfd/samsung/s2mps13.h>
#include <linux/mfd/samsung/s2mps14.h>
#include <linux/mfd/samsung/s5m8767.h>
#include <linux/mfd/samsung/core.h>
#include <dt-bindings/clock/samsung,s2mps11.h>
struct s2mps11_clk {
struct sec_pmic_dev *iodev;
struct device_node *clk_np;
struct clk_hw hw;
struct clk *clk;
struct clk_lookup *lookup;
u32 mask;
unsigned int reg;
};
static struct s2mps11_clk *to_s2mps11_clk(struct clk_hw *hw)
{
return container_of(hw, struct s2mps11_clk, hw);
}
static int s2mps11_clk_prepare(struct clk_hw *hw)
{
struct s2mps11_clk *s2mps11 = to_s2mps11_clk(hw);
return regmap_update_bits(s2mps11->iodev->regmap_pmic,
s2mps11->reg,
s2mps11->mask, s2mps11->mask);
}
static void s2mps11_clk_unprepare(struct clk_hw *hw)
{
struct s2mps11_clk *s2mps11 = to_s2mps11_clk(hw);
regmap_update_bits(s2mps11->iodev->regmap_pmic, s2mps11->reg,
s2mps11->mask, ~s2mps11->mask);
}
static int s2mps11_clk_is_prepared(struct clk_hw *hw)
{
int ret;
u32 val;
struct s2mps11_clk *s2mps11 = to_s2mps11_clk(hw);
ret = regmap_read(s2mps11->iodev->regmap_pmic,
s2mps11->reg, &val);
if (ret < 0)
return -EINVAL;
return val & s2mps11->mask;
}
static unsigned long s2mps11_clk_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
return 32768;
}
static const struct clk_ops s2mps11_clk_ops = {
.prepare = s2mps11_clk_prepare,
.unprepare = s2mps11_clk_unprepare,
.is_prepared = s2mps11_clk_is_prepared,
.recalc_rate = s2mps11_clk_recalc_rate,
};
/* This s2mps11_clks_init tructure is common to s2mps11, s2mps13 and s2mps14 */
static struct clk_init_data s2mps11_clks_init[S2MPS11_CLKS_NUM] = {
[S2MPS11_CLK_AP] = {
.name = "s2mps11_ap",
.ops = &s2mps11_clk_ops,
},
[S2MPS11_CLK_CP] = {
.name = "s2mps11_cp",
.ops = &s2mps11_clk_ops,
},
[S2MPS11_CLK_BT] = {
.name = "s2mps11_bt",
.ops = &s2mps11_clk_ops,
},
};
static struct device_node *s2mps11_clk_parse_dt(struct platform_device *pdev,
struct clk_init_data *clks_init)
{
struct sec_pmic_dev *iodev = dev_get_drvdata(pdev->dev.parent);
struct device_node *clk_np;
int i;
if (!iodev->dev->of_node)
return ERR_PTR(-EINVAL);
clk_np = of_get_child_by_name(iodev->dev->of_node, "clocks");
if (!clk_np) {
dev_err(&pdev->dev, "could not find clock sub-node\n");
return ERR_PTR(-EINVAL);
}
for (i = 0; i < S2MPS11_CLKS_NUM; i++)
of_property_read_string_index(clk_np, "clock-output-names", i,
&clks_init[i].name);
return clk_np;
}
static int s2mps11_clk_probe(struct platform_device *pdev)
{
struct sec_pmic_dev *iodev = dev_get_drvdata(pdev->dev.parent);
struct s2mps11_clk *s2mps11_clks;
struct clk_hw_onecell_data *clk_data;
unsigned int s2mps11_reg;
int i, ret = 0;
enum sec_device_type hwid = platform_get_device_id(pdev)->driver_data;
s2mps11_clks = devm_kcalloc(&pdev->dev, S2MPS11_CLKS_NUM,
sizeof(*s2mps11_clks), GFP_KERNEL);
if (!s2mps11_clks)
return -ENOMEM;
clk_data = devm_kzalloc(&pdev->dev,
struct_size(clk_data, hws, S2MPS11_CLKS_NUM),
GFP_KERNEL);
if (!clk_data)
return -ENOMEM;
switch (hwid) {
case S2MPS11X:
s2mps11_reg = S2MPS11_REG_RTC_CTRL;
break;
case S2MPS13X:
s2mps11_reg = S2MPS13_REG_RTCCTRL;
break;
case S2MPS14X:
s2mps11_reg = S2MPS14_REG_RTCCTRL;
break;
case S5M8767X:
s2mps11_reg = S5M8767_REG_CTRL1;
break;
default:
dev_err(&pdev->dev, "Invalid device type\n");
return -EINVAL;
}
/* Store clocks of_node in first element of s2mps11_clks array */
s2mps11_clks->clk_np = s2mps11_clk_parse_dt(pdev, s2mps11_clks_init);
if (IS_ERR(s2mps11_clks->clk_np))
return PTR_ERR(s2mps11_clks->clk_np);
for (i = 0; i < S2MPS11_CLKS_NUM; i++) {
if (i == S2MPS11_CLK_CP && hwid == S2MPS14X)
continue; /* Skip clocks not present in some devices */
s2mps11_clks[i].iodev = iodev;
s2mps11_clks[i].hw.init = &s2mps11_clks_init[i];
s2mps11_clks[i].mask = 1 << i;
s2mps11_clks[i].reg = s2mps11_reg;
s2mps11_clks[i].clk = devm_clk_register(&pdev->dev,
&s2mps11_clks[i].hw);
if (IS_ERR(s2mps11_clks[i].clk)) {
dev_err(&pdev->dev, "Fail to register : %s\n",
s2mps11_clks_init[i].name);
ret = PTR_ERR(s2mps11_clks[i].clk);
goto err_reg;
}
s2mps11_clks[i].lookup = clkdev_hw_create(&s2mps11_clks[i].hw,
s2mps11_clks_init[i].name, NULL);
if (!s2mps11_clks[i].lookup) {
ret = -ENOMEM;
goto err_reg;
}
clk_data->hws[i] = &s2mps11_clks[i].hw;
}
clk_data->num = S2MPS11_CLKS_NUM;
of_clk_add_hw_provider(s2mps11_clks->clk_np, of_clk_hw_onecell_get,
clk_data);
platform_set_drvdata(pdev, s2mps11_clks);
return ret;
err_reg:
of_node_put(s2mps11_clks[0].clk_np);
while (--i >= 0)
clkdev_drop(s2mps11_clks[i].lookup);
return ret;
}
static void s2mps11_clk_remove(struct platform_device *pdev)
{
struct s2mps11_clk *s2mps11_clks = platform_get_drvdata(pdev);
int i;
of_clk_del_provider(s2mps11_clks[0].clk_np);
/* Drop the reference obtained in s2mps11_clk_parse_dt */
of_node_put(s2mps11_clks[0].clk_np);
for (i = 0; i < S2MPS11_CLKS_NUM; i++) {
/* Skip clocks not present on S2MPS14 */
if (!s2mps11_clks[i].lookup)
continue;
clkdev_drop(s2mps11_clks[i].lookup);
}
}
static const struct platform_device_id s2mps11_clk_id[] = {
{ "s2mps11-clk", S2MPS11X},
{ "s2mps13-clk", S2MPS13X},
{ "s2mps14-clk", S2MPS14X},
{ "s5m8767-clk", S5M8767X},
{ },
};
MODULE_DEVICE_TABLE(platform, s2mps11_clk_id);
#ifdef CONFIG_OF
/*
* Device is instantiated through parent MFD device and device matching is done
* through platform_device_id.
*
* However if device's DT node contains proper clock compatible and driver is
* built as a module, then the *module* matching will be done trough DT aliases.
* This requires of_device_id table. In the same time this will not change the
* actual *device* matching so do not add .of_match_table.
*/
static const struct of_device_id s2mps11_dt_match[] __used = {
{
.compatible = "samsung,s2mps11-clk",
.data = (void *)S2MPS11X,
}, {
.compatible = "samsung,s2mps13-clk",
.data = (void *)S2MPS13X,
}, {
.compatible = "samsung,s2mps14-clk",
.data = (void *)S2MPS14X,
}, {
.compatible = "samsung,s5m8767-clk",
.data = (void *)S5M8767X,
}, {
/* Sentinel */
},
};
MODULE_DEVICE_TABLE(of, s2mps11_dt_match);
#endif
static struct platform_driver s2mps11_clk_driver = {
.driver = {
.name = "s2mps11-clk",
},
.probe = s2mps11_clk_probe,
.remove_new = s2mps11_clk_remove,
.id_table = s2mps11_clk_id,
};
module_platform_driver(s2mps11_clk_driver);
MODULE_DESCRIPTION("S2MPS11 Clock Driver");
MODULE_AUTHOR("Yadwinder Singh Brar <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/clk/clk-s2mps11.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* System Control and Power Interface (SCPI) Protocol based clock driver
*
* Copyright (C) 2015 ARM Ltd.
*/
#include <linux/clk-provider.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/of.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/scpi_protocol.h>
struct scpi_clk {
u32 id;
struct clk_hw hw;
struct scpi_dvfs_info *info;
struct scpi_ops *scpi_ops;
};
#define to_scpi_clk(clk) container_of(clk, struct scpi_clk, hw)
static struct platform_device *cpufreq_dev;
static unsigned long scpi_clk_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct scpi_clk *clk = to_scpi_clk(hw);
return clk->scpi_ops->clk_get_val(clk->id);
}
static long scpi_clk_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *parent_rate)
{
/*
* We can't figure out what rate it will be, so just return the
* rate back to the caller. scpi_clk_recalc_rate() will be called
* after the rate is set and we'll know what rate the clock is
* running at then.
*/
return rate;
}
static int scpi_clk_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct scpi_clk *clk = to_scpi_clk(hw);
return clk->scpi_ops->clk_set_val(clk->id, rate);
}
static const struct clk_ops scpi_clk_ops = {
.recalc_rate = scpi_clk_recalc_rate,
.round_rate = scpi_clk_round_rate,
.set_rate = scpi_clk_set_rate,
};
/* find closest match to given frequency in OPP table */
static long __scpi_dvfs_round_rate(struct scpi_clk *clk, unsigned long rate)
{
int idx;
unsigned long fmin = 0, fmax = ~0, ftmp;
const struct scpi_opp *opp = clk->info->opps;
for (idx = 0; idx < clk->info->count; idx++, opp++) {
ftmp = opp->freq;
if (ftmp >= rate) {
if (ftmp <= fmax)
fmax = ftmp;
break;
} else if (ftmp >= fmin) {
fmin = ftmp;
}
}
return fmax != ~0 ? fmax : fmin;
}
static unsigned long scpi_dvfs_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct scpi_clk *clk = to_scpi_clk(hw);
int idx = clk->scpi_ops->dvfs_get_idx(clk->id);
const struct scpi_opp *opp;
if (idx < 0)
return 0;
opp = clk->info->opps + idx;
return opp->freq;
}
static long scpi_dvfs_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *parent_rate)
{
struct scpi_clk *clk = to_scpi_clk(hw);
return __scpi_dvfs_round_rate(clk, rate);
}
static int __scpi_find_dvfs_index(struct scpi_clk *clk, unsigned long rate)
{
int idx, max_opp = clk->info->count;
const struct scpi_opp *opp = clk->info->opps;
for (idx = 0; idx < max_opp; idx++, opp++)
if (opp->freq == rate)
return idx;
return -EINVAL;
}
static int scpi_dvfs_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct scpi_clk *clk = to_scpi_clk(hw);
int ret = __scpi_find_dvfs_index(clk, rate);
if (ret < 0)
return ret;
return clk->scpi_ops->dvfs_set_idx(clk->id, (u8)ret);
}
static const struct clk_ops scpi_dvfs_ops = {
.recalc_rate = scpi_dvfs_recalc_rate,
.round_rate = scpi_dvfs_round_rate,
.set_rate = scpi_dvfs_set_rate,
};
static const struct of_device_id scpi_clk_match[] __maybe_unused = {
{ .compatible = "arm,scpi-dvfs-clocks", .data = &scpi_dvfs_ops, },
{ .compatible = "arm,scpi-variable-clocks", .data = &scpi_clk_ops, },
{}
};
static int
scpi_clk_ops_init(struct device *dev, const struct of_device_id *match,
struct scpi_clk *sclk, const char *name)
{
struct clk_init_data init;
unsigned long min = 0, max = 0;
int ret;
init.name = name;
init.flags = 0;
init.num_parents = 0;
init.ops = match->data;
sclk->hw.init = &init;
sclk->scpi_ops = get_scpi_ops();
if (init.ops == &scpi_dvfs_ops) {
sclk->info = sclk->scpi_ops->dvfs_get_info(sclk->id);
if (IS_ERR(sclk->info))
return PTR_ERR(sclk->info);
} else if (init.ops == &scpi_clk_ops) {
if (sclk->scpi_ops->clk_get_range(sclk->id, &min, &max) || !max)
return -EINVAL;
} else {
return -EINVAL;
}
ret = devm_clk_hw_register(dev, &sclk->hw);
if (!ret && max)
clk_hw_set_rate_range(&sclk->hw, min, max);
return ret;
}
struct scpi_clk_data {
struct scpi_clk **clk;
unsigned int clk_num;
};
static struct clk_hw *
scpi_of_clk_src_get(struct of_phandle_args *clkspec, void *data)
{
struct scpi_clk *sclk;
struct scpi_clk_data *clk_data = data;
unsigned int idx = clkspec->args[0], count;
for (count = 0; count < clk_data->clk_num; count++) {
sclk = clk_data->clk[count];
if (idx == sclk->id)
return &sclk->hw;
}
return ERR_PTR(-EINVAL);
}
static int scpi_clk_add(struct device *dev, struct device_node *np,
const struct of_device_id *match)
{
int idx, count, err;
struct scpi_clk_data *clk_data;
count = of_property_count_strings(np, "clock-output-names");
if (count < 0) {
dev_err(dev, "%pOFn: invalid clock output count\n", np);
return -EINVAL;
}
clk_data = devm_kmalloc(dev, sizeof(*clk_data), GFP_KERNEL);
if (!clk_data)
return -ENOMEM;
clk_data->clk_num = count;
clk_data->clk = devm_kcalloc(dev, count, sizeof(*clk_data->clk),
GFP_KERNEL);
if (!clk_data->clk)
return -ENOMEM;
for (idx = 0; idx < count; idx++) {
struct scpi_clk *sclk;
const char *name;
u32 val;
sclk = devm_kzalloc(dev, sizeof(*sclk), GFP_KERNEL);
if (!sclk)
return -ENOMEM;
if (of_property_read_string_index(np, "clock-output-names",
idx, &name)) {
dev_err(dev, "invalid clock name @ %pOFn\n", np);
return -EINVAL;
}
if (of_property_read_u32_index(np, "clock-indices",
idx, &val)) {
dev_err(dev, "invalid clock index @ %pOFn\n", np);
return -EINVAL;
}
sclk->id = val;
err = scpi_clk_ops_init(dev, match, sclk, name);
if (err) {
dev_err(dev, "failed to register clock '%s'\n", name);
return err;
}
dev_dbg(dev, "Registered clock '%s'\n", name);
clk_data->clk[idx] = sclk;
}
return of_clk_add_hw_provider(np, scpi_of_clk_src_get, clk_data);
}
static void scpi_clocks_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *child, *np = dev->of_node;
if (cpufreq_dev) {
platform_device_unregister(cpufreq_dev);
cpufreq_dev = NULL;
}
for_each_available_child_of_node(np, child)
of_clk_del_provider(np);
}
static int scpi_clocks_probe(struct platform_device *pdev)
{
int ret;
struct device *dev = &pdev->dev;
struct device_node *child, *np = dev->of_node;
const struct of_device_id *match;
if (!get_scpi_ops())
return -ENXIO;
for_each_available_child_of_node(np, child) {
match = of_match_node(scpi_clk_match, child);
if (!match)
continue;
ret = scpi_clk_add(dev, child, match);
if (ret) {
scpi_clocks_remove(pdev);
of_node_put(child);
return ret;
}
if (match->data != &scpi_dvfs_ops)
continue;
/* Add the virtual cpufreq device if it's DVFS clock provider */
cpufreq_dev = platform_device_register_simple("scpi-cpufreq",
-1, NULL, 0);
if (IS_ERR(cpufreq_dev))
pr_warn("unable to register cpufreq device");
}
return 0;
}
static const struct of_device_id scpi_clocks_ids[] = {
{ .compatible = "arm,scpi-clocks", },
{}
};
MODULE_DEVICE_TABLE(of, scpi_clocks_ids);
static struct platform_driver scpi_clocks_driver = {
.driver = {
.name = "scpi_clocks",
.of_match_table = scpi_clocks_ids,
},
.probe = scpi_clocks_probe,
.remove_new = scpi_clocks_remove,
};
module_platform_driver(scpi_clocks_driver);
MODULE_AUTHOR("Sudeep Holla <[email protected]>");
MODULE_DESCRIPTION("ARM SCPI clock driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/clk/clk-scpi.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Bitmain BM1880 SoC clock driver
*
* Copyright (c) 2019 Linaro Ltd.
* Author: Manivannan Sadhasivam <[email protected]>
*/
#include <linux/clk-provider.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <dt-bindings/clock/bm1880-clock.h>
#define BM1880_CLK_MPLL_CTL 0x00
#define BM1880_CLK_SPLL_CTL 0x04
#define BM1880_CLK_FPLL_CTL 0x08
#define BM1880_CLK_DDRPLL_CTL 0x0c
#define BM1880_CLK_ENABLE0 0x00
#define BM1880_CLK_ENABLE1 0x04
#define BM1880_CLK_SELECT 0x20
#define BM1880_CLK_DIV0 0x40
#define BM1880_CLK_DIV1 0x44
#define BM1880_CLK_DIV2 0x48
#define BM1880_CLK_DIV3 0x4c
#define BM1880_CLK_DIV4 0x50
#define BM1880_CLK_DIV5 0x54
#define BM1880_CLK_DIV6 0x58
#define BM1880_CLK_DIV7 0x5c
#define BM1880_CLK_DIV8 0x60
#define BM1880_CLK_DIV9 0x64
#define BM1880_CLK_DIV10 0x68
#define BM1880_CLK_DIV11 0x6c
#define BM1880_CLK_DIV12 0x70
#define BM1880_CLK_DIV13 0x74
#define BM1880_CLK_DIV14 0x78
#define BM1880_CLK_DIV15 0x7c
#define BM1880_CLK_DIV16 0x80
#define BM1880_CLK_DIV17 0x84
#define BM1880_CLK_DIV18 0x88
#define BM1880_CLK_DIV19 0x8c
#define BM1880_CLK_DIV20 0x90
#define BM1880_CLK_DIV21 0x94
#define BM1880_CLK_DIV22 0x98
#define BM1880_CLK_DIV23 0x9c
#define BM1880_CLK_DIV24 0xa0
#define BM1880_CLK_DIV25 0xa4
#define BM1880_CLK_DIV26 0xa8
#define BM1880_CLK_DIV27 0xac
#define BM1880_CLK_DIV28 0xb0
#define to_bm1880_pll_clk(_hw) container_of(_hw, struct bm1880_pll_hw_clock, hw)
#define to_bm1880_div_clk(_hw) container_of(_hw, struct bm1880_div_hw_clock, hw)
static DEFINE_SPINLOCK(bm1880_clk_lock);
struct bm1880_clock_data {
void __iomem *pll_base;
void __iomem *sys_base;
struct clk_hw_onecell_data hw_data;
};
struct bm1880_gate_clock {
unsigned int id;
const char *name;
const char *parent;
u32 gate_reg;
s8 gate_shift;
unsigned long flags;
};
struct bm1880_mux_clock {
unsigned int id;
const char *name;
const char * const *parents;
s8 num_parents;
u32 reg;
s8 shift;
unsigned long flags;
};
struct bm1880_div_clock {
unsigned int id;
const char *name;
u32 reg;
u8 shift;
u8 width;
u32 initval;
const struct clk_div_table *table;
unsigned long flags;
};
struct bm1880_div_hw_clock {
struct bm1880_div_clock div;
void __iomem *base;
spinlock_t *lock;
struct clk_hw hw;
struct clk_init_data init;
};
struct bm1880_composite_clock {
unsigned int id;
const char *name;
const char *parent;
const char * const *parents;
unsigned int num_parents;
unsigned long flags;
u32 gate_reg;
u32 mux_reg;
u32 div_reg;
s8 gate_shift;
s8 mux_shift;
s8 div_shift;
s8 div_width;
s16 div_initval;
const struct clk_div_table *table;
};
struct bm1880_pll_clock {
unsigned int id;
const char *name;
u32 reg;
unsigned long flags;
};
struct bm1880_pll_hw_clock {
struct bm1880_pll_clock pll;
void __iomem *base;
struct clk_hw hw;
struct clk_init_data init;
};
static const struct clk_ops bm1880_pll_ops;
static const struct clk_ops bm1880_clk_div_ops;
#define GATE_DIV(_id, _name, _parent, _gate_reg, _gate_shift, _div_reg, \
_div_shift, _div_width, _div_initval, _table, \
_flags) { \
.id = _id, \
.parent = _parent, \
.name = _name, \
.gate_reg = _gate_reg, \
.gate_shift = _gate_shift, \
.div_reg = _div_reg, \
.div_shift = _div_shift, \
.div_width = _div_width, \
.div_initval = _div_initval, \
.table = _table, \
.mux_shift = -1, \
.flags = _flags, \
}
#define GATE_MUX(_id, _name, _parents, _gate_reg, _gate_shift, \
_mux_reg, _mux_shift, _flags) { \
.id = _id, \
.parents = _parents, \
.num_parents = ARRAY_SIZE(_parents), \
.name = _name, \
.gate_reg = _gate_reg, \
.gate_shift = _gate_shift, \
.div_shift = -1, \
.mux_reg = _mux_reg, \
.mux_shift = _mux_shift, \
.flags = _flags, \
}
#define CLK_PLL(_id, _name, _parent, _reg, _flags) { \
.pll.id = _id, \
.pll.name = _name, \
.pll.reg = _reg, \
.hw.init = CLK_HW_INIT_PARENTS_DATA(_name, _parent, \
&bm1880_pll_ops, \
_flags), \
}
#define CLK_DIV(_id, _name, _parent, _reg, _shift, _width, _initval, \
_table, _flags) { \
.div.id = _id, \
.div.name = _name, \
.div.reg = _reg, \
.div.shift = _shift, \
.div.width = _width, \
.div.initval = _initval, \
.div.table = _table, \
.hw.init = CLK_HW_INIT_HW(_name, _parent, \
&bm1880_clk_div_ops, \
_flags), \
}
static struct clk_parent_data bm1880_pll_parent[] = {
{ .fw_name = "osc", .name = "osc" },
};
/*
* All PLL clocks are marked as CRITICAL, hence they are very crucial
* for the functioning of the SoC
*/
static struct bm1880_pll_hw_clock bm1880_pll_clks[] = {
CLK_PLL(BM1880_CLK_MPLL, "clk_mpll", bm1880_pll_parent,
BM1880_CLK_MPLL_CTL, 0),
CLK_PLL(BM1880_CLK_SPLL, "clk_spll", bm1880_pll_parent,
BM1880_CLK_SPLL_CTL, 0),
CLK_PLL(BM1880_CLK_FPLL, "clk_fpll", bm1880_pll_parent,
BM1880_CLK_FPLL_CTL, 0),
CLK_PLL(BM1880_CLK_DDRPLL, "clk_ddrpll", bm1880_pll_parent,
BM1880_CLK_DDRPLL_CTL, 0),
};
/*
* Clocks marked as CRITICAL are needed for the proper functioning
* of the SoC.
*/
static const struct bm1880_gate_clock bm1880_gate_clks[] = {
{ BM1880_CLK_AHB_ROM, "clk_ahb_rom", "clk_mux_axi6",
BM1880_CLK_ENABLE0, 2, 0 },
{ BM1880_CLK_AXI_SRAM, "clk_axi_sram", "clk_axi1",
BM1880_CLK_ENABLE0, 3, 0 },
/*
* Since this clock is sourcing the DDR memory, let's mark it as
* critical to avoid gating.
*/
{ BM1880_CLK_DDR_AXI, "clk_ddr_axi", "clk_mux_axi6",
BM1880_CLK_ENABLE0, 4, CLK_IS_CRITICAL },
{ BM1880_CLK_APB_EFUSE, "clk_apb_efuse", "clk_mux_axi6",
BM1880_CLK_ENABLE0, 6, 0 },
{ BM1880_CLK_AXI5_EMMC, "clk_axi5_emmc", "clk_axi5",
BM1880_CLK_ENABLE0, 7, 0 },
{ BM1880_CLK_AXI5_SD, "clk_axi5_sd", "clk_axi5",
BM1880_CLK_ENABLE0, 10, 0 },
{ BM1880_CLK_AXI4_ETH0, "clk_axi4_eth0", "clk_axi4",
BM1880_CLK_ENABLE0, 14, 0 },
{ BM1880_CLK_AXI4_ETH1, "clk_axi4_eth1", "clk_axi4",
BM1880_CLK_ENABLE0, 16, 0 },
{ BM1880_CLK_AXI1_GDMA, "clk_axi1_gdma", "clk_axi1",
BM1880_CLK_ENABLE0, 17, 0 },
/* Don't gate GPIO clocks as it is not owned by the GPIO driver */
{ BM1880_CLK_APB_GPIO, "clk_apb_gpio", "clk_mux_axi6",
BM1880_CLK_ENABLE0, 18, CLK_IGNORE_UNUSED },
{ BM1880_CLK_APB_GPIO_INTR, "clk_apb_gpio_intr", "clk_mux_axi6",
BM1880_CLK_ENABLE0, 19, CLK_IGNORE_UNUSED },
{ BM1880_CLK_AXI1_MINER, "clk_axi1_miner", "clk_axi1",
BM1880_CLK_ENABLE0, 21, 0 },
{ BM1880_CLK_AHB_SF, "clk_ahb_sf", "clk_mux_axi6",
BM1880_CLK_ENABLE0, 22, 0 },
/*
* Not sure which module this clock is sourcing but gating this clock
* prevents the system from booting. So, let's mark it as critical.
*/
{ BM1880_CLK_SDMA_AXI, "clk_sdma_axi", "clk_axi5",
BM1880_CLK_ENABLE0, 23, CLK_IS_CRITICAL },
{ BM1880_CLK_APB_I2C, "clk_apb_i2c", "clk_mux_axi6",
BM1880_CLK_ENABLE0, 25, 0 },
{ BM1880_CLK_APB_WDT, "clk_apb_wdt", "clk_mux_axi6",
BM1880_CLK_ENABLE0, 26, 0 },
{ BM1880_CLK_APB_JPEG, "clk_apb_jpeg", "clk_axi6",
BM1880_CLK_ENABLE0, 27, 0 },
{ BM1880_CLK_AXI5_NF, "clk_axi5_nf", "clk_axi5",
BM1880_CLK_ENABLE0, 29, 0 },
{ BM1880_CLK_APB_NF, "clk_apb_nf", "clk_axi6",
BM1880_CLK_ENABLE0, 30, 0 },
{ BM1880_CLK_APB_PWM, "clk_apb_pwm", "clk_mux_axi6",
BM1880_CLK_ENABLE1, 0, 0 },
{ BM1880_CLK_RV, "clk_rv", "clk_mux_rv",
BM1880_CLK_ENABLE1, 1, 0 },
{ BM1880_CLK_APB_SPI, "clk_apb_spi", "clk_mux_axi6",
BM1880_CLK_ENABLE1, 2, 0 },
{ BM1880_CLK_UART_500M, "clk_uart_500m", "clk_div_uart_500m",
BM1880_CLK_ENABLE1, 4, 0 },
{ BM1880_CLK_APB_UART, "clk_apb_uart", "clk_axi6",
BM1880_CLK_ENABLE1, 5, 0 },
{ BM1880_CLK_APB_I2S, "clk_apb_i2s", "clk_axi6",
BM1880_CLK_ENABLE1, 6, 0 },
{ BM1880_CLK_AXI4_USB, "clk_axi4_usb", "clk_axi4",
BM1880_CLK_ENABLE1, 7, 0 },
{ BM1880_CLK_APB_USB, "clk_apb_usb", "clk_axi6",
BM1880_CLK_ENABLE1, 8, 0 },
{ BM1880_CLK_12M_USB, "clk_12m_usb", "clk_div_12m_usb",
BM1880_CLK_ENABLE1, 11, 0 },
{ BM1880_CLK_APB_VIDEO, "clk_apb_video", "clk_axi6",
BM1880_CLK_ENABLE1, 12, 0 },
{ BM1880_CLK_APB_VPP, "clk_apb_vpp", "clk_axi6",
BM1880_CLK_ENABLE1, 15, 0 },
{ BM1880_CLK_AXI6, "clk_axi6", "clk_mux_axi6",
BM1880_CLK_ENABLE1, 21, 0 },
};
static const char * const clk_a53_parents[] = { "clk_spll", "clk_mpll" };
static const char * const clk_rv_parents[] = { "clk_div_1_rv", "clk_div_0_rv" };
static const char * const clk_axi1_parents[] = { "clk_div_1_axi1", "clk_div_0_axi1" };
static const char * const clk_axi6_parents[] = { "clk_div_1_axi6", "clk_div_0_axi6" };
static const struct bm1880_mux_clock bm1880_mux_clks[] = {
{ BM1880_CLK_MUX_RV, "clk_mux_rv", clk_rv_parents, 2,
BM1880_CLK_SELECT, 1, 0 },
{ BM1880_CLK_MUX_AXI6, "clk_mux_axi6", clk_axi6_parents, 2,
BM1880_CLK_SELECT, 3, 0 },
};
static const struct clk_div_table bm1880_div_table_0[] = {
{ 0, 1 }, { 1, 2 }, { 2, 3 }, { 3, 4 },
{ 4, 5 }, { 5, 6 }, { 6, 7 }, { 7, 8 },
{ 8, 9 }, { 9, 10 }, { 10, 11 }, { 11, 12 },
{ 12, 13 }, { 13, 14 }, { 14, 15 }, { 15, 16 },
{ 16, 17 }, { 17, 18 }, { 18, 19 }, { 19, 20 },
{ 20, 21 }, { 21, 22 }, { 22, 23 }, { 23, 24 },
{ 24, 25 }, { 25, 26 }, { 26, 27 }, { 27, 28 },
{ 28, 29 }, { 29, 30 }, { 30, 31 }, { 31, 32 },
{ 0, 0 }
};
static const struct clk_div_table bm1880_div_table_1[] = {
{ 0, 1 }, { 1, 2 }, { 2, 3 }, { 3, 4 },
{ 4, 5 }, { 5, 6 }, { 6, 7 }, { 7, 8 },
{ 8, 9 }, { 9, 10 }, { 10, 11 }, { 11, 12 },
{ 12, 13 }, { 13, 14 }, { 14, 15 }, { 15, 16 },
{ 16, 17 }, { 17, 18 }, { 18, 19 }, { 19, 20 },
{ 20, 21 }, { 21, 22 }, { 22, 23 }, { 23, 24 },
{ 24, 25 }, { 25, 26 }, { 26, 27 }, { 27, 28 },
{ 28, 29 }, { 29, 30 }, { 30, 31 }, { 31, 32 },
{ 127, 128 }, { 0, 0 }
};
static const struct clk_div_table bm1880_div_table_2[] = {
{ 0, 1 }, { 1, 2 }, { 2, 3 }, { 3, 4 },
{ 4, 5 }, { 5, 6 }, { 6, 7 }, { 7, 8 },
{ 8, 9 }, { 9, 10 }, { 10, 11 }, { 11, 12 },
{ 12, 13 }, { 13, 14 }, { 14, 15 }, { 15, 16 },
{ 16, 17 }, { 17, 18 }, { 18, 19 }, { 19, 20 },
{ 20, 21 }, { 21, 22 }, { 22, 23 }, { 23, 24 },
{ 24, 25 }, { 25, 26 }, { 26, 27 }, { 27, 28 },
{ 28, 29 }, { 29, 30 }, { 30, 31 }, { 31, 32 },
{ 127, 128 }, { 255, 256 }, { 0, 0 }
};
static const struct clk_div_table bm1880_div_table_3[] = {
{ 0, 1 }, { 1, 2 }, { 2, 3 }, { 3, 4 },
{ 4, 5 }, { 5, 6 }, { 6, 7 }, { 7, 8 },
{ 8, 9 }, { 9, 10 }, { 10, 11 }, { 11, 12 },
{ 12, 13 }, { 13, 14 }, { 14, 15 }, { 15, 16 },
{ 16, 17 }, { 17, 18 }, { 18, 19 }, { 19, 20 },
{ 20, 21 }, { 21, 22 }, { 22, 23 }, { 23, 24 },
{ 24, 25 }, { 25, 26 }, { 26, 27 }, { 27, 28 },
{ 28, 29 }, { 29, 30 }, { 30, 31 }, { 31, 32 },
{ 127, 128 }, { 255, 256 }, { 511, 512 }, { 0, 0 }
};
static const struct clk_div_table bm1880_div_table_4[] = {
{ 0, 1 }, { 1, 2 }, { 2, 3 }, { 3, 4 },
{ 4, 5 }, { 5, 6 }, { 6, 7 }, { 7, 8 },
{ 8, 9 }, { 9, 10 }, { 10, 11 }, { 11, 12 },
{ 12, 13 }, { 13, 14 }, { 14, 15 }, { 15, 16 },
{ 16, 17 }, { 17, 18 }, { 18, 19 }, { 19, 20 },
{ 20, 21 }, { 21, 22 }, { 22, 23 }, { 23, 24 },
{ 24, 25 }, { 25, 26 }, { 26, 27 }, { 27, 28 },
{ 28, 29 }, { 29, 30 }, { 30, 31 }, { 31, 32 },
{ 127, 128 }, { 255, 256 }, { 511, 512 }, { 65535, 65536 },
{ 0, 0 }
};
/*
* Clocks marked as CRITICAL are needed for the proper functioning
* of the SoC.
*/
static struct bm1880_div_hw_clock bm1880_div_clks[] = {
CLK_DIV(BM1880_CLK_DIV_0_RV, "clk_div_0_rv", &bm1880_pll_clks[1].hw,
BM1880_CLK_DIV12, 16, 5, 1, bm1880_div_table_0, 0),
CLK_DIV(BM1880_CLK_DIV_1_RV, "clk_div_1_rv", &bm1880_pll_clks[2].hw,
BM1880_CLK_DIV13, 16, 5, 1, bm1880_div_table_0, 0),
CLK_DIV(BM1880_CLK_DIV_UART_500M, "clk_div_uart_500m", &bm1880_pll_clks[2].hw,
BM1880_CLK_DIV15, 16, 7, 3, bm1880_div_table_1, 0),
CLK_DIV(BM1880_CLK_DIV_0_AXI1, "clk_div_0_axi1", &bm1880_pll_clks[0].hw,
BM1880_CLK_DIV21, 16, 5, 2, bm1880_div_table_0,
0),
CLK_DIV(BM1880_CLK_DIV_1_AXI1, "clk_div_1_axi1", &bm1880_pll_clks[2].hw,
BM1880_CLK_DIV22, 16, 5, 3, bm1880_div_table_0,
0),
CLK_DIV(BM1880_CLK_DIV_0_AXI6, "clk_div_0_axi6", &bm1880_pll_clks[2].hw,
BM1880_CLK_DIV27, 16, 5, 15, bm1880_div_table_0,
0),
CLK_DIV(BM1880_CLK_DIV_1_AXI6, "clk_div_1_axi6", &bm1880_pll_clks[0].hw,
BM1880_CLK_DIV28, 16, 5, 11, bm1880_div_table_0,
0),
CLK_DIV(BM1880_CLK_DIV_12M_USB, "clk_div_12m_usb", &bm1880_pll_clks[2].hw,
BM1880_CLK_DIV18, 16, 7, 125, bm1880_div_table_1, 0),
};
/*
* Clocks marked as CRITICAL are all needed for the proper functioning
* of the SoC.
*/
static struct bm1880_composite_clock bm1880_composite_clks[] = {
/*
* Since clk_a53 and clk_50m_a53 clocks are sourcing the CPU core,
* let's mark them as critical to avoid gating.
*/
GATE_MUX(BM1880_CLK_A53, "clk_a53", clk_a53_parents,
BM1880_CLK_ENABLE0, 0, BM1880_CLK_SELECT, 0,
CLK_IS_CRITICAL),
GATE_DIV(BM1880_CLK_50M_A53, "clk_50m_a53", "clk_fpll",
BM1880_CLK_ENABLE0, 1, BM1880_CLK_DIV0, 16, 5, 30,
bm1880_div_table_0, CLK_IS_CRITICAL),
GATE_DIV(BM1880_CLK_EFUSE, "clk_efuse", "clk_fpll",
BM1880_CLK_ENABLE0, 5, BM1880_CLK_DIV1, 16, 7, 60,
bm1880_div_table_1, 0),
GATE_DIV(BM1880_CLK_EMMC, "clk_emmc", "clk_fpll",
BM1880_CLK_ENABLE0, 8, BM1880_CLK_DIV2, 16, 5, 15,
bm1880_div_table_0, 0),
GATE_DIV(BM1880_CLK_100K_EMMC, "clk_100k_emmc", "clk_div_12m_usb",
BM1880_CLK_ENABLE0, 9, BM1880_CLK_DIV3, 16, 8, 120,
bm1880_div_table_2, 0),
GATE_DIV(BM1880_CLK_SD, "clk_sd", "clk_fpll",
BM1880_CLK_ENABLE0, 11, BM1880_CLK_DIV4, 16, 5, 15,
bm1880_div_table_0, 0),
GATE_DIV(BM1880_CLK_100K_SD, "clk_100k_sd", "clk_div_12m_usb",
BM1880_CLK_ENABLE0, 12, BM1880_CLK_DIV5, 16, 8, 120,
bm1880_div_table_2, 0),
GATE_DIV(BM1880_CLK_500M_ETH0, "clk_500m_eth0", "clk_fpll",
BM1880_CLK_ENABLE0, 13, BM1880_CLK_DIV6, 16, 5, 3,
bm1880_div_table_0, 0),
GATE_DIV(BM1880_CLK_500M_ETH1, "clk_500m_eth1", "clk_fpll",
BM1880_CLK_ENABLE0, 15, BM1880_CLK_DIV7, 16, 5, 3,
bm1880_div_table_0, 0),
/* Don't gate GPIO clocks as it is not owned by the GPIO driver */
GATE_DIV(BM1880_CLK_GPIO_DB, "clk_gpio_db", "clk_div_12m_usb",
BM1880_CLK_ENABLE0, 20, BM1880_CLK_DIV8, 16, 16, 120,
bm1880_div_table_4, CLK_IGNORE_UNUSED),
GATE_DIV(BM1880_CLK_SDMA_AUD, "clk_sdma_aud", "clk_fpll",
BM1880_CLK_ENABLE0, 24, BM1880_CLK_DIV9, 16, 7, 61,
bm1880_div_table_1, 0),
GATE_DIV(BM1880_CLK_JPEG_AXI, "clk_jpeg_axi", "clk_fpll",
BM1880_CLK_ENABLE0, 28, BM1880_CLK_DIV10, 16, 5, 4,
bm1880_div_table_0, 0),
GATE_DIV(BM1880_CLK_NF, "clk_nf", "clk_fpll",
BM1880_CLK_ENABLE0, 31, BM1880_CLK_DIV11, 16, 5, 30,
bm1880_div_table_0, 0),
GATE_DIV(BM1880_CLK_TPU_AXI, "clk_tpu_axi", "clk_spll",
BM1880_CLK_ENABLE1, 3, BM1880_CLK_DIV14, 16, 5, 1,
bm1880_div_table_0, 0),
GATE_DIV(BM1880_CLK_125M_USB, "clk_125m_usb", "clk_fpll",
BM1880_CLK_ENABLE1, 9, BM1880_CLK_DIV16, 16, 5, 12,
bm1880_div_table_0, 0),
GATE_DIV(BM1880_CLK_33K_USB, "clk_33k_usb", "clk_div_12m_usb",
BM1880_CLK_ENABLE1, 10, BM1880_CLK_DIV17, 16, 9, 363,
bm1880_div_table_3, 0),
GATE_DIV(BM1880_CLK_VIDEO_AXI, "clk_video_axi", "clk_fpll",
BM1880_CLK_ENABLE1, 13, BM1880_CLK_DIV19, 16, 5, 4,
bm1880_div_table_0, 0),
GATE_DIV(BM1880_CLK_VPP_AXI, "clk_vpp_axi", "clk_fpll",
BM1880_CLK_ENABLE1, 14, BM1880_CLK_DIV20, 16, 5, 4,
bm1880_div_table_0, 0),
GATE_MUX(BM1880_CLK_AXI1, "clk_axi1", clk_axi1_parents,
BM1880_CLK_ENABLE1, 15, BM1880_CLK_SELECT, 2, 0),
GATE_DIV(BM1880_CLK_AXI2, "clk_axi2", "clk_fpll",
BM1880_CLK_ENABLE1, 17, BM1880_CLK_DIV23, 16, 5, 3,
bm1880_div_table_0, 0),
GATE_DIV(BM1880_CLK_AXI3, "clk_axi3", "clk_mux_rv",
BM1880_CLK_ENABLE1, 18, BM1880_CLK_DIV24, 16, 5, 2,
bm1880_div_table_0, 0),
GATE_DIV(BM1880_CLK_AXI4, "clk_axi4", "clk_fpll",
BM1880_CLK_ENABLE1, 19, BM1880_CLK_DIV25, 16, 5, 6,
bm1880_div_table_0, 0),
GATE_DIV(BM1880_CLK_AXI5, "clk_axi5", "clk_fpll",
BM1880_CLK_ENABLE1, 20, BM1880_CLK_DIV26, 16, 5, 15,
bm1880_div_table_0, 0),
};
static unsigned long bm1880_pll_rate_calc(u32 regval, unsigned long parent_rate)
{
u64 numerator;
u32 fbdiv, refdiv;
u32 postdiv1, postdiv2, denominator;
fbdiv = (regval >> 16) & 0xfff;
refdiv = regval & 0x1f;
postdiv1 = (regval >> 8) & 0x7;
postdiv2 = (regval >> 12) & 0x7;
numerator = parent_rate * fbdiv;
denominator = refdiv * postdiv1 * postdiv2;
do_div(numerator, denominator);
return (unsigned long)numerator;
}
static unsigned long bm1880_pll_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct bm1880_pll_hw_clock *pll_hw = to_bm1880_pll_clk(hw);
unsigned long rate;
u32 regval;
regval = readl(pll_hw->base + pll_hw->pll.reg);
rate = bm1880_pll_rate_calc(regval, parent_rate);
return rate;
}
static const struct clk_ops bm1880_pll_ops = {
.recalc_rate = bm1880_pll_recalc_rate,
};
static struct clk_hw *bm1880_clk_register_pll(struct bm1880_pll_hw_clock *pll_clk,
void __iomem *sys_base)
{
struct clk_hw *hw;
int err;
pll_clk->base = sys_base;
hw = &pll_clk->hw;
err = clk_hw_register(NULL, hw);
if (err)
return ERR_PTR(err);
return hw;
}
static int bm1880_clk_register_plls(struct bm1880_pll_hw_clock *clks,
int num_clks,
struct bm1880_clock_data *data)
{
struct clk_hw *hw;
void __iomem *pll_base = data->pll_base;
int i;
for (i = 0; i < num_clks; i++) {
struct bm1880_pll_hw_clock *bm1880_clk = &clks[i];
hw = bm1880_clk_register_pll(bm1880_clk, pll_base);
if (IS_ERR(hw)) {
pr_err("%s: failed to register clock %s\n",
__func__, bm1880_clk->pll.name);
goto err_clk;
}
data->hw_data.hws[clks[i].pll.id] = hw;
}
return 0;
err_clk:
while (i--)
clk_hw_unregister(data->hw_data.hws[clks[i].pll.id]);
return PTR_ERR(hw);
}
static int bm1880_clk_register_mux(const struct bm1880_mux_clock *clks,
int num_clks,
struct bm1880_clock_data *data)
{
struct clk_hw *hw;
void __iomem *sys_base = data->sys_base;
int i;
for (i = 0; i < num_clks; i++) {
hw = clk_hw_register_mux(NULL, clks[i].name,
clks[i].parents,
clks[i].num_parents,
clks[i].flags,
sys_base + clks[i].reg,
clks[i].shift, 1, 0,
&bm1880_clk_lock);
if (IS_ERR(hw)) {
pr_err("%s: failed to register clock %s\n",
__func__, clks[i].name);
goto err_clk;
}
data->hw_data.hws[clks[i].id] = hw;
}
return 0;
err_clk:
while (i--)
clk_hw_unregister_mux(data->hw_data.hws[clks[i].id]);
return PTR_ERR(hw);
}
static unsigned long bm1880_clk_div_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct bm1880_div_hw_clock *div_hw = to_bm1880_div_clk(hw);
struct bm1880_div_clock *div = &div_hw->div;
void __iomem *reg_addr = div_hw->base + div->reg;
unsigned int val;
unsigned long rate;
if (!(readl(reg_addr) & BIT(3))) {
val = div->initval;
} else {
val = readl(reg_addr) >> div->shift;
val &= clk_div_mask(div->width);
}
rate = divider_recalc_rate(hw, parent_rate, val, div->table,
div->flags, div->width);
return rate;
}
static long bm1880_clk_div_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *prate)
{
struct bm1880_div_hw_clock *div_hw = to_bm1880_div_clk(hw);
struct bm1880_div_clock *div = &div_hw->div;
void __iomem *reg_addr = div_hw->base + div->reg;
if (div->flags & CLK_DIVIDER_READ_ONLY) {
u32 val;
val = readl(reg_addr) >> div->shift;
val &= clk_div_mask(div->width);
return divider_ro_round_rate(hw, rate, prate, div->table,
div->width, div->flags,
val);
}
return divider_round_rate(hw, rate, prate, div->table,
div->width, div->flags);
}
static int bm1880_clk_div_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct bm1880_div_hw_clock *div_hw = to_bm1880_div_clk(hw);
struct bm1880_div_clock *div = &div_hw->div;
void __iomem *reg_addr = div_hw->base + div->reg;
unsigned long flags = 0;
int value;
u32 val;
value = divider_get_val(rate, parent_rate, div->table,
div->width, div_hw->div.flags);
if (value < 0)
return value;
if (div_hw->lock)
spin_lock_irqsave(div_hw->lock, flags);
else
__acquire(div_hw->lock);
val = readl(reg_addr);
val &= ~(clk_div_mask(div->width) << div_hw->div.shift);
val |= (u32)value << div->shift;
writel(val, reg_addr);
if (div_hw->lock)
spin_unlock_irqrestore(div_hw->lock, flags);
else
__release(div_hw->lock);
return 0;
}
static const struct clk_ops bm1880_clk_div_ops = {
.recalc_rate = bm1880_clk_div_recalc_rate,
.round_rate = bm1880_clk_div_round_rate,
.set_rate = bm1880_clk_div_set_rate,
};
static struct clk_hw *bm1880_clk_register_div(struct bm1880_div_hw_clock *div_clk,
void __iomem *sys_base)
{
struct clk_hw *hw;
int err;
div_clk->div.flags = CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO;
div_clk->base = sys_base;
div_clk->lock = &bm1880_clk_lock;
hw = &div_clk->hw;
err = clk_hw_register(NULL, hw);
if (err)
return ERR_PTR(err);
return hw;
}
static int bm1880_clk_register_divs(struct bm1880_div_hw_clock *clks,
int num_clks,
struct bm1880_clock_data *data)
{
struct clk_hw *hw;
void __iomem *sys_base = data->sys_base;
unsigned int i, id;
for (i = 0; i < num_clks; i++) {
struct bm1880_div_hw_clock *bm1880_clk = &clks[i];
hw = bm1880_clk_register_div(bm1880_clk, sys_base);
if (IS_ERR(hw)) {
pr_err("%s: failed to register clock %s\n",
__func__, bm1880_clk->div.name);
goto err_clk;
}
id = clks[i].div.id;
data->hw_data.hws[id] = hw;
}
return 0;
err_clk:
while (i--)
clk_hw_unregister(data->hw_data.hws[clks[i].div.id]);
return PTR_ERR(hw);
}
static int bm1880_clk_register_gate(const struct bm1880_gate_clock *clks,
int num_clks,
struct bm1880_clock_data *data)
{
struct clk_hw *hw;
void __iomem *sys_base = data->sys_base;
int i;
for (i = 0; i < num_clks; i++) {
hw = clk_hw_register_gate(NULL, clks[i].name,
clks[i].parent,
clks[i].flags,
sys_base + clks[i].gate_reg,
clks[i].gate_shift, 0,
&bm1880_clk_lock);
if (IS_ERR(hw)) {
pr_err("%s: failed to register clock %s\n",
__func__, clks[i].name);
goto err_clk;
}
data->hw_data.hws[clks[i].id] = hw;
}
return 0;
err_clk:
while (i--)
clk_hw_unregister_gate(data->hw_data.hws[clks[i].id]);
return PTR_ERR(hw);
}
static struct clk_hw *bm1880_clk_register_composite(struct bm1880_composite_clock *clks,
void __iomem *sys_base)
{
struct clk_hw *hw;
struct clk_mux *mux = NULL;
struct clk_gate *gate = NULL;
struct bm1880_div_hw_clock *div_hws = NULL;
struct clk_hw *mux_hw = NULL, *gate_hw = NULL, *div_hw = NULL;
const struct clk_ops *mux_ops = NULL, *gate_ops = NULL, *div_ops = NULL;
const char * const *parent_names;
const char *parent;
int num_parents;
int ret;
if (clks->mux_shift >= 0) {
mux = kzalloc(sizeof(*mux), GFP_KERNEL);
if (!mux)
return ERR_PTR(-ENOMEM);
mux->reg = sys_base + clks->mux_reg;
mux->mask = 1;
mux->shift = clks->mux_shift;
mux_hw = &mux->hw;
mux_ops = &clk_mux_ops;
mux->lock = &bm1880_clk_lock;
parent_names = clks->parents;
num_parents = clks->num_parents;
} else {
parent = clks->parent;
parent_names = &parent;
num_parents = 1;
}
if (clks->gate_shift >= 0) {
gate = kzalloc(sizeof(*gate), GFP_KERNEL);
if (!gate) {
ret = -ENOMEM;
goto err_out;
}
gate->reg = sys_base + clks->gate_reg;
gate->bit_idx = clks->gate_shift;
gate->lock = &bm1880_clk_lock;
gate_hw = &gate->hw;
gate_ops = &clk_gate_ops;
}
if (clks->div_shift >= 0) {
div_hws = kzalloc(sizeof(*div_hws), GFP_KERNEL);
if (!div_hws) {
ret = -ENOMEM;
goto err_out;
}
div_hws->base = sys_base;
div_hws->div.reg = clks->div_reg;
div_hws->div.shift = clks->div_shift;
div_hws->div.width = clks->div_width;
div_hws->div.table = clks->table;
div_hws->div.initval = clks->div_initval;
div_hws->lock = &bm1880_clk_lock;
div_hws->div.flags = CLK_DIVIDER_ONE_BASED |
CLK_DIVIDER_ALLOW_ZERO;
div_hw = &div_hws->hw;
div_ops = &bm1880_clk_div_ops;
}
hw = clk_hw_register_composite(NULL, clks->name, parent_names,
num_parents, mux_hw, mux_ops, div_hw,
div_ops, gate_hw, gate_ops,
clks->flags);
if (IS_ERR(hw)) {
ret = PTR_ERR(hw);
goto err_out;
}
return hw;
err_out:
kfree(div_hws);
kfree(gate);
kfree(mux);
return ERR_PTR(ret);
}
static int bm1880_clk_register_composites(struct bm1880_composite_clock *clks,
int num_clks,
struct bm1880_clock_data *data)
{
struct clk_hw *hw;
void __iomem *sys_base = data->sys_base;
int i;
for (i = 0; i < num_clks; i++) {
struct bm1880_composite_clock *bm1880_clk = &clks[i];
hw = bm1880_clk_register_composite(bm1880_clk, sys_base);
if (IS_ERR(hw)) {
pr_err("%s: failed to register clock %s\n",
__func__, bm1880_clk->name);
goto err_clk;
}
data->hw_data.hws[clks[i].id] = hw;
}
return 0;
err_clk:
while (i--)
clk_hw_unregister_composite(data->hw_data.hws[clks[i].id]);
return PTR_ERR(hw);
}
static int bm1880_clk_probe(struct platform_device *pdev)
{
struct bm1880_clock_data *clk_data;
void __iomem *pll_base, *sys_base;
struct device *dev = &pdev->dev;
int num_clks, i;
pll_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pll_base))
return PTR_ERR(pll_base);
sys_base = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(sys_base))
return PTR_ERR(sys_base);
num_clks = ARRAY_SIZE(bm1880_pll_clks) +
ARRAY_SIZE(bm1880_div_clks) +
ARRAY_SIZE(bm1880_mux_clks) +
ARRAY_SIZE(bm1880_composite_clks) +
ARRAY_SIZE(bm1880_gate_clks);
clk_data = devm_kzalloc(dev, struct_size(clk_data, hw_data.hws,
num_clks), GFP_KERNEL);
if (!clk_data)
return -ENOMEM;
clk_data->pll_base = pll_base;
clk_data->sys_base = sys_base;
for (i = 0; i < num_clks; i++)
clk_data->hw_data.hws[i] = ERR_PTR(-ENOENT);
clk_data->hw_data.num = num_clks;
bm1880_clk_register_plls(bm1880_pll_clks,
ARRAY_SIZE(bm1880_pll_clks),
clk_data);
bm1880_clk_register_divs(bm1880_div_clks,
ARRAY_SIZE(bm1880_div_clks),
clk_data);
bm1880_clk_register_mux(bm1880_mux_clks,
ARRAY_SIZE(bm1880_mux_clks),
clk_data);
bm1880_clk_register_composites(bm1880_composite_clks,
ARRAY_SIZE(bm1880_composite_clks),
clk_data);
bm1880_clk_register_gate(bm1880_gate_clks,
ARRAY_SIZE(bm1880_gate_clks),
clk_data);
return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
&clk_data->hw_data);
}
static const struct of_device_id bm1880_of_match[] = {
{ .compatible = "bitmain,bm1880-clk", },
{}
};
MODULE_DEVICE_TABLE(of, bm1880_of_match);
static struct platform_driver bm1880_clk_driver = {
.driver = {
.name = "bm1880-clk",
.of_match_table = bm1880_of_match,
},
.probe = bm1880_clk_probe,
};
module_platform_driver(bm1880_clk_driver);
MODULE_AUTHOR("Manivannan Sadhasivam <[email protected]>");
MODULE_DESCRIPTION("Clock driver for Bitmain BM1880 SoC");
| linux-master | drivers/clk/clk-bm1880.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2014 Intel Corporation
*
* Adjustable fractional divider clock implementation.
* Uses rational best approximation algorithm.
*
* Output is calculated as
*
* rate = (m / n) * parent_rate (1)
*
* This is useful when we have a prescaler block which asks for
* m (numerator) and n (denominator) values to be provided to satisfy
* the (1) as much as possible.
*
* Since m and n have the limitation by a range, e.g.
*
* n >= 1, n < N_width, where N_width = 2^nwidth (2)
*
* for some cases the output may be saturated. Hence, from (1) and (2),
* assuming the worst case when m = 1, the inequality
*
* floor(log2(parent_rate / rate)) <= nwidth (3)
*
* may be derived. Thus, in cases when
*
* (parent_rate / rate) >> N_width (4)
*
* we might scale up the rate by 2^scale (see the description of
* CLK_FRAC_DIVIDER_POWER_OF_TWO_PS for additional information), where
*
* scale = floor(log2(parent_rate / rate)) - nwidth (5)
*
* and assume that the IP, that needs m and n, has also its own
* prescaler, which is capable to divide by 2^scale. In this way
* we get the denominator to satisfy the desired range (2) and
* at the same time a much better result of m and n than simple
* saturated values.
*/
#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/io.h>
#include <linux/math.h>
#include <linux/module.h>
#include <linux/rational.h>
#include <linux/slab.h>
#include <linux/clk-provider.h>
#include "clk-fractional-divider.h"
static inline u32 clk_fd_readl(struct clk_fractional_divider *fd)
{
if (fd->flags & CLK_FRAC_DIVIDER_BIG_ENDIAN)
return ioread32be(fd->reg);
return readl(fd->reg);
}
static inline void clk_fd_writel(struct clk_fractional_divider *fd, u32 val)
{
if (fd->flags & CLK_FRAC_DIVIDER_BIG_ENDIAN)
iowrite32be(val, fd->reg);
else
writel(val, fd->reg);
}
static void clk_fd_get_div(struct clk_hw *hw, struct u32_fract *fract)
{
struct clk_fractional_divider *fd = to_clk_fd(hw);
unsigned long flags = 0;
unsigned long m, n;
u32 mmask, nmask;
u32 val;
if (fd->lock)
spin_lock_irqsave(fd->lock, flags);
else
__acquire(fd->lock);
val = clk_fd_readl(fd);
if (fd->lock)
spin_unlock_irqrestore(fd->lock, flags);
else
__release(fd->lock);
mmask = GENMASK(fd->mwidth - 1, 0) << fd->mshift;
nmask = GENMASK(fd->nwidth - 1, 0) << fd->nshift;
m = (val & mmask) >> fd->mshift;
n = (val & nmask) >> fd->nshift;
if (fd->flags & CLK_FRAC_DIVIDER_ZERO_BASED) {
m++;
n++;
}
fract->numerator = m;
fract->denominator = n;
}
static unsigned long clk_fd_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
{
struct u32_fract fract;
u64 ret;
clk_fd_get_div(hw, &fract);
if (!fract.numerator || !fract.denominator)
return parent_rate;
ret = (u64)parent_rate * fract.numerator;
do_div(ret, fract.denominator);
return ret;
}
void clk_fractional_divider_general_approximation(struct clk_hw *hw,
unsigned long rate,
unsigned long *parent_rate,
unsigned long *m, unsigned long *n)
{
struct clk_fractional_divider *fd = to_clk_fd(hw);
/*
* Get rate closer to *parent_rate to guarantee there is no overflow
* for m and n. In the result it will be the nearest rate left shifted
* by (scale - fd->nwidth) bits.
*
* For the detailed explanation see the top comment in this file.
*/
if (fd->flags & CLK_FRAC_DIVIDER_POWER_OF_TWO_PS) {
unsigned long scale = fls_long(*parent_rate / rate - 1);
if (scale > fd->nwidth)
rate <<= scale - fd->nwidth;
}
rational_best_approximation(rate, *parent_rate,
GENMASK(fd->mwidth - 1, 0), GENMASK(fd->nwidth - 1, 0),
m, n);
}
static long clk_fd_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *parent_rate)
{
struct clk_fractional_divider *fd = to_clk_fd(hw);
unsigned long m, n;
u64 ret;
if (!rate || (!clk_hw_can_set_rate_parent(hw) && rate >= *parent_rate))
return *parent_rate;
if (fd->approximation)
fd->approximation(hw, rate, parent_rate, &m, &n);
else
clk_fractional_divider_general_approximation(hw, rate, parent_rate, &m, &n);
ret = (u64)*parent_rate * m;
do_div(ret, n);
return ret;
}
static int clk_fd_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct clk_fractional_divider *fd = to_clk_fd(hw);
unsigned long flags = 0;
unsigned long m, n;
u32 mmask, nmask;
u32 val;
rational_best_approximation(rate, parent_rate,
GENMASK(fd->mwidth - 1, 0), GENMASK(fd->nwidth - 1, 0),
&m, &n);
if (fd->flags & CLK_FRAC_DIVIDER_ZERO_BASED) {
m--;
n--;
}
if (fd->lock)
spin_lock_irqsave(fd->lock, flags);
else
__acquire(fd->lock);
mmask = GENMASK(fd->mwidth - 1, 0) << fd->mshift;
nmask = GENMASK(fd->nwidth - 1, 0) << fd->nshift;
val = clk_fd_readl(fd);
val &= ~(mmask | nmask);
val |= (m << fd->mshift) | (n << fd->nshift);
clk_fd_writel(fd, val);
if (fd->lock)
spin_unlock_irqrestore(fd->lock, flags);
else
__release(fd->lock);
return 0;
}
#ifdef CONFIG_DEBUG_FS
static int clk_fd_numerator_get(void *hw, u64 *val)
{
struct u32_fract fract;
clk_fd_get_div(hw, &fract);
*val = fract.numerator;
return 0;
}
DEFINE_DEBUGFS_ATTRIBUTE(clk_fd_numerator_fops, clk_fd_numerator_get, NULL, "%llu\n");
static int clk_fd_denominator_get(void *hw, u64 *val)
{
struct u32_fract fract;
clk_fd_get_div(hw, &fract);
*val = fract.denominator;
return 0;
}
DEFINE_DEBUGFS_ATTRIBUTE(clk_fd_denominator_fops, clk_fd_denominator_get, NULL, "%llu\n");
static void clk_fd_debug_init(struct clk_hw *hw, struct dentry *dentry)
{
debugfs_create_file("numerator", 0444, dentry, hw, &clk_fd_numerator_fops);
debugfs_create_file("denominator", 0444, dentry, hw, &clk_fd_denominator_fops);
}
#endif
const struct clk_ops clk_fractional_divider_ops = {
.recalc_rate = clk_fd_recalc_rate,
.round_rate = clk_fd_round_rate,
.set_rate = clk_fd_set_rate,
#ifdef CONFIG_DEBUG_FS
.debug_init = clk_fd_debug_init,
#endif
};
EXPORT_SYMBOL_GPL(clk_fractional_divider_ops);
struct clk_hw *clk_hw_register_fractional_divider(struct device *dev,
const char *name, const char *parent_name, unsigned long flags,
void __iomem *reg, u8 mshift, u8 mwidth, u8 nshift, u8 nwidth,
u8 clk_divider_flags, spinlock_t *lock)
{
struct clk_fractional_divider *fd;
struct clk_init_data init;
struct clk_hw *hw;
int ret;
fd = kzalloc(sizeof(*fd), GFP_KERNEL);
if (!fd)
return ERR_PTR(-ENOMEM);
init.name = name;
init.ops = &clk_fractional_divider_ops;
init.flags = flags;
init.parent_names = parent_name ? &parent_name : NULL;
init.num_parents = parent_name ? 1 : 0;
fd->reg = reg;
fd->mshift = mshift;
fd->mwidth = mwidth;
fd->nshift = nshift;
fd->nwidth = nwidth;
fd->flags = clk_divider_flags;
fd->lock = lock;
fd->hw.init = &init;
hw = &fd->hw;
ret = clk_hw_register(dev, hw);
if (ret) {
kfree(fd);
hw = ERR_PTR(ret);
}
return hw;
}
EXPORT_SYMBOL_GPL(clk_hw_register_fractional_divider);
struct clk *clk_register_fractional_divider(struct device *dev,
const char *name, const char *parent_name, unsigned long flags,
void __iomem *reg, u8 mshift, u8 mwidth, u8 nshift, u8 nwidth,
u8 clk_divider_flags, spinlock_t *lock)
{
struct clk_hw *hw;
hw = clk_hw_register_fractional_divider(dev, name, parent_name, flags,
reg, mshift, mwidth, nshift, nwidth, clk_divider_flags,
lock);
if (IS_ERR(hw))
return ERR_CAST(hw);
return hw->clk;
}
EXPORT_SYMBOL_GPL(clk_register_fractional_divider);
void clk_hw_unregister_fractional_divider(struct clk_hw *hw)
{
struct clk_fractional_divider *fd;
fd = to_clk_fd(hw);
clk_hw_unregister(hw);
kfree(fd);
}
| linux-master | drivers/clk/clk-fractional-divider.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for Renesas Versaclock 3
*
* Copyright (C) 2023 Renesas Electronics Corp.
*/
#include <linux/clk-provider.h>
#include <linux/i2c.h>
#include <linux/limits.h>
#include <linux/module.h>
#include <linux/regmap.h>
#define NUM_CONFIG_REGISTERS 37
#define VC3_GENERAL_CTR 0x0
#define VC3_GENERAL_CTR_DIV1_SRC_SEL BIT(3)
#define VC3_GENERAL_CTR_PLL3_REFIN_SEL BIT(2)
#define VC3_PLL3_M_DIVIDER 0x3
#define VC3_PLL3_M_DIV1 BIT(7)
#define VC3_PLL3_M_DIV2 BIT(6)
#define VC3_PLL3_M_DIV(n) ((n) & GENMASK(5, 0))
#define VC3_PLL3_N_DIVIDER 0x4
#define VC3_PLL3_LOOP_FILTER_N_DIV_MSB 0x5
#define VC3_PLL3_CHARGE_PUMP_CTRL 0x6
#define VC3_PLL3_CHARGE_PUMP_CTRL_OUTDIV3_SRC_SEL BIT(7)
#define VC3_PLL1_CTRL_OUTDIV5 0x7
#define VC3_PLL1_CTRL_OUTDIV5_PLL1_MDIV_DOUBLER BIT(7)
#define VC3_PLL1_M_DIVIDER 0x8
#define VC3_PLL1_M_DIV1 BIT(7)
#define VC3_PLL1_M_DIV2 BIT(6)
#define VC3_PLL1_M_DIV(n) ((n) & GENMASK(5, 0))
#define VC3_PLL1_VCO_N_DIVIDER 0x9
#define VC3_PLL1_LOOP_FILTER_N_DIV_MSB 0x0a
#define VC3_OUT_DIV1_DIV2_CTRL 0xf
#define VC3_PLL2_FB_INT_DIV_MSB 0x10
#define VC3_PLL2_FB_INT_DIV_LSB 0x11
#define VC3_PLL2_FB_FRC_DIV_MSB 0x12
#define VC3_PLL2_FB_FRC_DIV_LSB 0x13
#define VC3_PLL2_M_DIVIDER 0x1a
#define VC3_PLL2_MDIV_DOUBLER BIT(7)
#define VC3_PLL2_M_DIV1 BIT(6)
#define VC3_PLL2_M_DIV2 BIT(5)
#define VC3_PLL2_M_DIV(n) ((n) & GENMASK(4, 0))
#define VC3_OUT_DIV3_DIV4_CTRL 0x1b
#define VC3_PLL_OP_CTRL 0x1c
#define VC3_PLL_OP_CTRL_PLL2_REFIN_SEL 6
#define VC3_OUTPUT_CTR 0x1d
#define VC3_OUTPUT_CTR_DIV4_SRC_SEL BIT(3)
#define VC3_SE2_CTRL_REG0 0x1f
#define VC3_SE2_CTRL_REG0_SE2_CLK_SEL BIT(6)
#define VC3_SE3_DIFF1_CTRL_REG 0x21
#define VC3_SE3_DIFF1_CTRL_REG_SE3_CLK_SEL BIT(6)
#define VC3_DIFF1_CTRL_REG 0x22
#define VC3_DIFF1_CTRL_REG_DIFF1_CLK_SEL BIT(7)
#define VC3_DIFF2_CTRL_REG 0x23
#define VC3_DIFF2_CTRL_REG_DIFF2_CLK_SEL BIT(7)
#define VC3_SE1_DIV4_CTRL 0x24
#define VC3_SE1_DIV4_CTRL_SE1_CLK_SEL BIT(3)
#define VC3_PLL1_VCO_MIN 300000000UL
#define VC3_PLL1_VCO_MAX 600000000UL
#define VC3_PLL2_VCO_MIN 400000000UL
#define VC3_PLL2_VCO_MAX 1200000000UL
#define VC3_PLL3_VCO_MIN 300000000UL
#define VC3_PLL3_VCO_MAX 800000000UL
#define VC3_2_POW_16 (U16_MAX + 1)
#define VC3_DIV_MASK(width) ((1 << (width)) - 1)
enum vc3_pfd_mux {
VC3_PFD2_MUX,
VC3_PFD3_MUX,
};
enum vc3_pfd {
VC3_PFD1,
VC3_PFD2,
VC3_PFD3,
};
enum vc3_pll {
VC3_PLL1,
VC3_PLL2,
VC3_PLL3,
};
enum vc3_div_mux {
VC3_DIV1_MUX,
VC3_DIV3_MUX,
VC3_DIV4_MUX,
};
enum vc3_div {
VC3_DIV1,
VC3_DIV2,
VC3_DIV3,
VC3_DIV4,
VC3_DIV5,
};
enum vc3_clk_mux {
VC3_DIFF2_MUX,
VC3_DIFF1_MUX,
VC3_SE3_MUX,
VC3_SE2_MUX,
VC3_SE1_MUX,
};
enum vc3_clk {
VC3_DIFF2,
VC3_DIFF1,
VC3_SE3,
VC3_SE2,
VC3_SE1,
VC3_REF,
};
struct vc3_clk_data {
u8 offs;
u8 bitmsk;
};
struct vc3_pfd_data {
u8 num;
u8 offs;
u8 mdiv1_bitmsk;
u8 mdiv2_bitmsk;
};
struct vc3_pll_data {
u8 num;
u8 int_div_msb_offs;
u8 int_div_lsb_offs;
unsigned long vco_min;
unsigned long vco_max;
};
struct vc3_div_data {
u8 offs;
const struct clk_div_table *table;
u8 shift;
u8 width;
u8 flags;
};
struct vc3_hw_data {
struct clk_hw hw;
struct regmap *regmap;
const void *data;
u32 div_int;
u32 div_frc;
};
static const struct clk_div_table div1_divs[] = {
{ .val = 0, .div = 1, }, { .val = 1, .div = 4, },
{ .val = 2, .div = 5, }, { .val = 3, .div = 6, },
{ .val = 4, .div = 2, }, { .val = 5, .div = 8, },
{ .val = 6, .div = 10, }, { .val = 7, .div = 12, },
{ .val = 8, .div = 4, }, { .val = 9, .div = 16, },
{ .val = 10, .div = 20, }, { .val = 11, .div = 24, },
{ .val = 12, .div = 8, }, { .val = 13, .div = 32, },
{ .val = 14, .div = 40, }, { .val = 15, .div = 48, },
{}
};
static const struct clk_div_table div245_divs[] = {
{ .val = 0, .div = 1, }, { .val = 1, .div = 3, },
{ .val = 2, .div = 5, }, { .val = 3, .div = 10, },
{ .val = 4, .div = 2, }, { .val = 5, .div = 6, },
{ .val = 6, .div = 10, }, { .val = 7, .div = 20, },
{ .val = 8, .div = 4, }, { .val = 9, .div = 12, },
{ .val = 10, .div = 20, }, { .val = 11, .div = 40, },
{ .val = 12, .div = 5, }, { .val = 13, .div = 15, },
{ .val = 14, .div = 25, }, { .val = 15, .div = 50, },
{}
};
static const struct clk_div_table div3_divs[] = {
{ .val = 0, .div = 1, }, { .val = 1, .div = 3, },
{ .val = 2, .div = 5, }, { .val = 3, .div = 10, },
{ .val = 4, .div = 2, }, { .val = 5, .div = 6, },
{ .val = 6, .div = 10, }, { .val = 7, .div = 20, },
{ .val = 8, .div = 4, }, { .val = 9, .div = 12, },
{ .val = 10, .div = 20, }, { .val = 11, .div = 40, },
{ .val = 12, .div = 8, }, { .val = 13, .div = 24, },
{ .val = 14, .div = 40, }, { .val = 15, .div = 80, },
{}
};
static struct clk_hw *clk_out[6];
static unsigned char vc3_pfd_mux_get_parent(struct clk_hw *hw)
{
struct vc3_hw_data *vc3 = container_of(hw, struct vc3_hw_data, hw);
const struct vc3_clk_data *pfd_mux = vc3->data;
u32 src;
regmap_read(vc3->regmap, pfd_mux->offs, &src);
return !!(src & pfd_mux->bitmsk);
}
static int vc3_pfd_mux_set_parent(struct clk_hw *hw, u8 index)
{
struct vc3_hw_data *vc3 = container_of(hw, struct vc3_hw_data, hw);
const struct vc3_clk_data *pfd_mux = vc3->data;
regmap_update_bits(vc3->regmap, pfd_mux->offs, pfd_mux->bitmsk,
index ? pfd_mux->bitmsk : 0);
return 0;
}
static const struct clk_ops vc3_pfd_mux_ops = {
.determine_rate = clk_hw_determine_rate_no_reparent,
.set_parent = vc3_pfd_mux_set_parent,
.get_parent = vc3_pfd_mux_get_parent,
};
static unsigned long vc3_pfd_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct vc3_hw_data *vc3 = container_of(hw, struct vc3_hw_data, hw);
const struct vc3_pfd_data *pfd = vc3->data;
unsigned int prediv, premul;
unsigned long rate;
u8 mdiv;
regmap_read(vc3->regmap, pfd->offs, &prediv);
if (pfd->num == VC3_PFD1) {
/* The bypass_prediv is set, PLL fed from Ref_in directly. */
if (prediv & pfd->mdiv1_bitmsk) {
/* check doubler is set or not */
regmap_read(vc3->regmap, VC3_PLL1_CTRL_OUTDIV5, &premul);
if (premul & VC3_PLL1_CTRL_OUTDIV5_PLL1_MDIV_DOUBLER)
parent_rate *= 2;
return parent_rate;
}
mdiv = VC3_PLL1_M_DIV(prediv);
} else if (pfd->num == VC3_PFD2) {
/* The bypass_prediv is set, PLL fed from Ref_in directly. */
if (prediv & pfd->mdiv1_bitmsk) {
regmap_read(vc3->regmap, VC3_PLL2_M_DIVIDER, &premul);
/* check doubler is set or not */
if (premul & VC3_PLL2_MDIV_DOUBLER)
parent_rate *= 2;
return parent_rate;
}
mdiv = VC3_PLL2_M_DIV(prediv);
} else {
/* The bypass_prediv is set, PLL fed from Ref_in directly. */
if (prediv & pfd->mdiv1_bitmsk)
return parent_rate;
mdiv = VC3_PLL3_M_DIV(prediv);
}
if (prediv & pfd->mdiv2_bitmsk)
rate = parent_rate / 2;
else
rate = parent_rate / mdiv;
return rate;
}
static long vc3_pfd_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *parent_rate)
{
struct vc3_hw_data *vc3 = container_of(hw, struct vc3_hw_data, hw);
const struct vc3_pfd_data *pfd = vc3->data;
unsigned long idiv;
/* PLL cannot operate with input clock above 50 MHz. */
if (rate > 50000000)
return -EINVAL;
/* CLKIN within range of PLL input, feed directly to PLL. */
if (*parent_rate <= 50000000)
return *parent_rate;
idiv = DIV_ROUND_UP(*parent_rate, rate);
if (pfd->num == VC3_PFD1 || pfd->num == VC3_PFD3) {
if (idiv > 63)
return -EINVAL;
} else {
if (idiv > 31)
return -EINVAL;
}
return *parent_rate / idiv;
}
static int vc3_pfd_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct vc3_hw_data *vc3 = container_of(hw, struct vc3_hw_data, hw);
const struct vc3_pfd_data *pfd = vc3->data;
unsigned long idiv;
u8 div;
/* CLKIN within range of PLL input, feed directly to PLL. */
if (parent_rate <= 50000000) {
regmap_update_bits(vc3->regmap, pfd->offs, pfd->mdiv1_bitmsk,
pfd->mdiv1_bitmsk);
regmap_update_bits(vc3->regmap, pfd->offs, pfd->mdiv2_bitmsk, 0);
return 0;
}
idiv = DIV_ROUND_UP(parent_rate, rate);
/* We have dedicated div-2 predivider. */
if (idiv == 2) {
regmap_update_bits(vc3->regmap, pfd->offs, pfd->mdiv2_bitmsk,
pfd->mdiv2_bitmsk);
regmap_update_bits(vc3->regmap, pfd->offs, pfd->mdiv1_bitmsk, 0);
} else {
if (pfd->num == VC3_PFD1)
div = VC3_PLL1_M_DIV(idiv);
else if (pfd->num == VC3_PFD2)
div = VC3_PLL2_M_DIV(idiv);
else
div = VC3_PLL3_M_DIV(idiv);
regmap_write(vc3->regmap, pfd->offs, div);
}
return 0;
}
static const struct clk_ops vc3_pfd_ops = {
.recalc_rate = vc3_pfd_recalc_rate,
.round_rate = vc3_pfd_round_rate,
.set_rate = vc3_pfd_set_rate,
};
static unsigned long vc3_pll_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct vc3_hw_data *vc3 = container_of(hw, struct vc3_hw_data, hw);
const struct vc3_pll_data *pll = vc3->data;
u32 div_int, div_frc, val;
unsigned long rate;
regmap_read(vc3->regmap, pll->int_div_msb_offs, &val);
div_int = (val & GENMASK(2, 0)) << 8;
regmap_read(vc3->regmap, pll->int_div_lsb_offs, &val);
div_int |= val;
if (pll->num == VC3_PLL2) {
regmap_read(vc3->regmap, VC3_PLL2_FB_FRC_DIV_MSB, &val);
div_frc = val << 8;
regmap_read(vc3->regmap, VC3_PLL2_FB_FRC_DIV_LSB, &val);
div_frc |= val;
rate = (parent_rate *
(div_int * VC3_2_POW_16 + div_frc) / VC3_2_POW_16);
} else {
rate = parent_rate * div_int;
}
return rate;
}
static long vc3_pll_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *parent_rate)
{
struct vc3_hw_data *vc3 = container_of(hw, struct vc3_hw_data, hw);
const struct vc3_pll_data *pll = vc3->data;
u64 div_frc;
if (rate < pll->vco_min)
rate = pll->vco_min;
if (rate > pll->vco_max)
rate = pll->vco_max;
vc3->div_int = rate / *parent_rate;
if (pll->num == VC3_PLL2) {
if (vc3->div_int > 0x7ff)
rate = *parent_rate * 0x7ff;
/* Determine best fractional part, which is 16 bit wide */
div_frc = rate % *parent_rate;
div_frc *= BIT(16) - 1;
do_div(div_frc, *parent_rate);
vc3->div_frc = (u32)div_frc;
rate = (*parent_rate *
(vc3->div_int * VC3_2_POW_16 + div_frc) / VC3_2_POW_16);
} else {
rate = *parent_rate * vc3->div_int;
}
return rate;
}
static int vc3_pll_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct vc3_hw_data *vc3 = container_of(hw, struct vc3_hw_data, hw);
const struct vc3_pll_data *pll = vc3->data;
u32 val;
regmap_read(vc3->regmap, pll->int_div_msb_offs, &val);
val = (val & 0xf8) | ((vc3->div_int >> 8) & 0x7);
regmap_write(vc3->regmap, pll->int_div_msb_offs, val);
regmap_write(vc3->regmap, pll->int_div_lsb_offs, vc3->div_int & 0xff);
if (pll->num == VC3_PLL2) {
regmap_write(vc3->regmap, VC3_PLL2_FB_FRC_DIV_MSB,
vc3->div_frc >> 8);
regmap_write(vc3->regmap, VC3_PLL2_FB_FRC_DIV_LSB,
vc3->div_frc & 0xff);
}
return 0;
}
static const struct clk_ops vc3_pll_ops = {
.recalc_rate = vc3_pll_recalc_rate,
.round_rate = vc3_pll_round_rate,
.set_rate = vc3_pll_set_rate,
};
static unsigned char vc3_div_mux_get_parent(struct clk_hw *hw)
{
struct vc3_hw_data *vc3 = container_of(hw, struct vc3_hw_data, hw);
const struct vc3_clk_data *div_mux = vc3->data;
u32 src;
regmap_read(vc3->regmap, div_mux->offs, &src);
return !!(src & div_mux->bitmsk);
}
static int vc3_div_mux_set_parent(struct clk_hw *hw, u8 index)
{
struct vc3_hw_data *vc3 = container_of(hw, struct vc3_hw_data, hw);
const struct vc3_clk_data *div_mux = vc3->data;
regmap_update_bits(vc3->regmap, div_mux->offs, div_mux->bitmsk,
index ? div_mux->bitmsk : 0);
return 0;
}
static const struct clk_ops vc3_div_mux_ops = {
.determine_rate = clk_hw_determine_rate_no_reparent,
.set_parent = vc3_div_mux_set_parent,
.get_parent = vc3_div_mux_get_parent,
};
static unsigned int vc3_get_div(const struct clk_div_table *table,
unsigned int val, unsigned long flag)
{
const struct clk_div_table *clkt;
for (clkt = table; clkt->div; clkt++)
if (clkt->val == val)
return clkt->div;
return 0;
}
static unsigned long vc3_div_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct vc3_hw_data *vc3 = container_of(hw, struct vc3_hw_data, hw);
const struct vc3_div_data *div_data = vc3->data;
unsigned int val;
regmap_read(vc3->regmap, div_data->offs, &val);
val >>= div_data->shift;
val &= VC3_DIV_MASK(div_data->width);
return divider_recalc_rate(hw, parent_rate, val, div_data->table,
div_data->flags, div_data->width);
}
static long vc3_div_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *parent_rate)
{
struct vc3_hw_data *vc3 = container_of(hw, struct vc3_hw_data, hw);
const struct vc3_div_data *div_data = vc3->data;
unsigned int bestdiv;
/* if read only, just return current value */
if (div_data->flags & CLK_DIVIDER_READ_ONLY) {
regmap_read(vc3->regmap, div_data->offs, &bestdiv);
bestdiv >>= div_data->shift;
bestdiv &= VC3_DIV_MASK(div_data->width);
bestdiv = vc3_get_div(div_data->table, bestdiv, div_data->flags);
return DIV_ROUND_UP(*parent_rate, bestdiv);
}
return divider_round_rate(hw, rate, parent_rate, div_data->table,
div_data->width, div_data->flags);
}
static int vc3_div_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct vc3_hw_data *vc3 = container_of(hw, struct vc3_hw_data, hw);
const struct vc3_div_data *div_data = vc3->data;
unsigned int value;
value = divider_get_val(rate, parent_rate, div_data->table,
div_data->width, div_data->flags);
regmap_update_bits(vc3->regmap, div_data->offs,
VC3_DIV_MASK(div_data->width) << div_data->shift,
value << div_data->shift);
return 0;
}
static const struct clk_ops vc3_div_ops = {
.recalc_rate = vc3_div_recalc_rate,
.round_rate = vc3_div_round_rate,
.set_rate = vc3_div_set_rate,
};
static int vc3_clk_mux_determine_rate(struct clk_hw *hw,
struct clk_rate_request *req)
{
int ret;
int frc;
ret = clk_mux_determine_rate_flags(hw, req, CLK_SET_RATE_PARENT);
if (ret) {
/* The below check is equivalent to (best_parent_rate/rate) */
if (req->best_parent_rate >= req->rate) {
frc = DIV_ROUND_CLOSEST_ULL(req->best_parent_rate,
req->rate);
req->rate *= frc;
return clk_mux_determine_rate_flags(hw, req,
CLK_SET_RATE_PARENT);
}
ret = 0;
}
return ret;
}
static unsigned char vc3_clk_mux_get_parent(struct clk_hw *hw)
{
struct vc3_hw_data *vc3 = container_of(hw, struct vc3_hw_data, hw);
const struct vc3_clk_data *clk_mux = vc3->data;
u32 val;
regmap_read(vc3->regmap, clk_mux->offs, &val);
return !!(val & clk_mux->bitmsk);
}
static int vc3_clk_mux_set_parent(struct clk_hw *hw, u8 index)
{
struct vc3_hw_data *vc3 = container_of(hw, struct vc3_hw_data, hw);
const struct vc3_clk_data *clk_mux = vc3->data;
regmap_update_bits(vc3->regmap, clk_mux->offs,
clk_mux->bitmsk, index ? clk_mux->bitmsk : 0);
return 0;
}
static const struct clk_ops vc3_clk_mux_ops = {
.determine_rate = vc3_clk_mux_determine_rate,
.set_parent = vc3_clk_mux_set_parent,
.get_parent = vc3_clk_mux_get_parent,
};
static bool vc3_regmap_is_writeable(struct device *dev, unsigned int reg)
{
return true;
}
static const struct regmap_config vc3_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.cache_type = REGCACHE_RBTREE,
.max_register = 0x24,
.writeable_reg = vc3_regmap_is_writeable,
};
static struct vc3_hw_data clk_div[5];
static const struct clk_parent_data pfd_mux_parent_data[] = {
{ .index = 0, },
{ .hw = &clk_div[VC3_DIV2].hw }
};
static struct vc3_hw_data clk_pfd_mux[] = {
[VC3_PFD2_MUX] = {
.data = &(struct vc3_clk_data) {
.offs = VC3_PLL_OP_CTRL,
.bitmsk = BIT(VC3_PLL_OP_CTRL_PLL2_REFIN_SEL)
},
.hw.init = &(struct clk_init_data){
.name = "pfd2_mux",
.ops = &vc3_pfd_mux_ops,
.parent_data = pfd_mux_parent_data,
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT
}
},
[VC3_PFD3_MUX] = {
.data = &(struct vc3_clk_data) {
.offs = VC3_GENERAL_CTR,
.bitmsk = BIT(VC3_GENERAL_CTR_PLL3_REFIN_SEL)
},
.hw.init = &(struct clk_init_data){
.name = "pfd3_mux",
.ops = &vc3_pfd_mux_ops,
.parent_data = pfd_mux_parent_data,
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT
}
}
};
static struct vc3_hw_data clk_pfd[] = {
[VC3_PFD1] = {
.data = &(struct vc3_pfd_data) {
.num = VC3_PFD1,
.offs = VC3_PLL1_M_DIVIDER,
.mdiv1_bitmsk = VC3_PLL1_M_DIV1,
.mdiv2_bitmsk = VC3_PLL1_M_DIV2
},
.hw.init = &(struct clk_init_data){
.name = "pfd1",
.ops = &vc3_pfd_ops,
.parent_data = &(const struct clk_parent_data) {
.index = 0
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT
}
},
[VC3_PFD2] = {
.data = &(struct vc3_pfd_data) {
.num = VC3_PFD2,
.offs = VC3_PLL2_M_DIVIDER,
.mdiv1_bitmsk = VC3_PLL2_M_DIV1,
.mdiv2_bitmsk = VC3_PLL2_M_DIV2
},
.hw.init = &(struct clk_init_data){
.name = "pfd2",
.ops = &vc3_pfd_ops,
.parent_hws = (const struct clk_hw *[]) {
&clk_pfd_mux[VC3_PFD2_MUX].hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT
}
},
[VC3_PFD3] = {
.data = &(struct vc3_pfd_data) {
.num = VC3_PFD3,
.offs = VC3_PLL3_M_DIVIDER,
.mdiv1_bitmsk = VC3_PLL3_M_DIV1,
.mdiv2_bitmsk = VC3_PLL3_M_DIV2
},
.hw.init = &(struct clk_init_data){
.name = "pfd3",
.ops = &vc3_pfd_ops,
.parent_hws = (const struct clk_hw *[]) {
&clk_pfd_mux[VC3_PFD3_MUX].hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT
}
}
};
static struct vc3_hw_data clk_pll[] = {
[VC3_PLL1] = {
.data = &(struct vc3_pll_data) {
.num = VC3_PLL1,
.int_div_msb_offs = VC3_PLL1_LOOP_FILTER_N_DIV_MSB,
.int_div_lsb_offs = VC3_PLL1_VCO_N_DIVIDER,
.vco_min = VC3_PLL1_VCO_MIN,
.vco_max = VC3_PLL1_VCO_MAX
},
.hw.init = &(struct clk_init_data){
.name = "pll1",
.ops = &vc3_pll_ops,
.parent_hws = (const struct clk_hw *[]) {
&clk_pfd[VC3_PFD1].hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT
}
},
[VC3_PLL2] = {
.data = &(struct vc3_pll_data) {
.num = VC3_PLL2,
.int_div_msb_offs = VC3_PLL2_FB_INT_DIV_MSB,
.int_div_lsb_offs = VC3_PLL2_FB_INT_DIV_LSB,
.vco_min = VC3_PLL2_VCO_MIN,
.vco_max = VC3_PLL2_VCO_MAX
},
.hw.init = &(struct clk_init_data){
.name = "pll2",
.ops = &vc3_pll_ops,
.parent_hws = (const struct clk_hw *[]) {
&clk_pfd[VC3_PFD2].hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT
}
},
[VC3_PLL3] = {
.data = &(struct vc3_pll_data) {
.num = VC3_PLL3,
.int_div_msb_offs = VC3_PLL3_LOOP_FILTER_N_DIV_MSB,
.int_div_lsb_offs = VC3_PLL3_N_DIVIDER,
.vco_min = VC3_PLL3_VCO_MIN,
.vco_max = VC3_PLL3_VCO_MAX
},
.hw.init = &(struct clk_init_data){
.name = "pll3",
.ops = &vc3_pll_ops,
.parent_hws = (const struct clk_hw *[]) {
&clk_pfd[VC3_PFD3].hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT
}
}
};
static const struct clk_parent_data div_mux_parent_data[][2] = {
[VC3_DIV1_MUX] = {
{ .hw = &clk_pll[VC3_PLL1].hw },
{ .index = 0 }
},
[VC3_DIV3_MUX] = {
{ .hw = &clk_pll[VC3_PLL2].hw },
{ .hw = &clk_pll[VC3_PLL3].hw }
},
[VC3_DIV4_MUX] = {
{ .hw = &clk_pll[VC3_PLL2].hw },
{ .index = 0 }
}
};
static struct vc3_hw_data clk_div_mux[] = {
[VC3_DIV1_MUX] = {
.data = &(struct vc3_clk_data) {
.offs = VC3_GENERAL_CTR,
.bitmsk = VC3_GENERAL_CTR_DIV1_SRC_SEL
},
.hw.init = &(struct clk_init_data){
.name = "div1_mux",
.ops = &vc3_div_mux_ops,
.parent_data = div_mux_parent_data[VC3_DIV1_MUX],
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT
}
},
[VC3_DIV3_MUX] = {
.data = &(struct vc3_clk_data) {
.offs = VC3_PLL3_CHARGE_PUMP_CTRL,
.bitmsk = VC3_PLL3_CHARGE_PUMP_CTRL_OUTDIV3_SRC_SEL
},
.hw.init = &(struct clk_init_data){
.name = "div3_mux",
.ops = &vc3_div_mux_ops,
.parent_data = div_mux_parent_data[VC3_DIV3_MUX],
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT
}
},
[VC3_DIV4_MUX] = {
.data = &(struct vc3_clk_data) {
.offs = VC3_OUTPUT_CTR,
.bitmsk = VC3_OUTPUT_CTR_DIV4_SRC_SEL
},
.hw.init = &(struct clk_init_data){
.name = "div4_mux",
.ops = &vc3_div_mux_ops,
.parent_data = div_mux_parent_data[VC3_DIV4_MUX],
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT
}
}
};
static struct vc3_hw_data clk_div[] = {
[VC3_DIV1] = {
.data = &(struct vc3_div_data) {
.offs = VC3_OUT_DIV1_DIV2_CTRL,
.table = div1_divs,
.shift = 4,
.width = 4,
.flags = CLK_DIVIDER_READ_ONLY
},
.hw.init = &(struct clk_init_data){
.name = "div1",
.ops = &vc3_div_ops,
.parent_hws = (const struct clk_hw *[]) {
&clk_div_mux[VC3_DIV1_MUX].hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT
}
},
[VC3_DIV2] = {
.data = &(struct vc3_div_data) {
.offs = VC3_OUT_DIV1_DIV2_CTRL,
.table = div245_divs,
.shift = 0,
.width = 4,
.flags = CLK_DIVIDER_READ_ONLY
},
.hw.init = &(struct clk_init_data){
.name = "div2",
.ops = &vc3_div_ops,
.parent_hws = (const struct clk_hw *[]) {
&clk_pll[VC3_PLL1].hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT
}
},
[VC3_DIV3] = {
.data = &(struct vc3_div_data) {
.offs = VC3_OUT_DIV3_DIV4_CTRL,
.table = div3_divs,
.shift = 4,
.width = 4,
.flags = CLK_DIVIDER_READ_ONLY
},
.hw.init = &(struct clk_init_data){
.name = "div3",
.ops = &vc3_div_ops,
.parent_hws = (const struct clk_hw *[]) {
&clk_div_mux[VC3_DIV3_MUX].hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT
}
},
[VC3_DIV4] = {
.data = &(struct vc3_div_data) {
.offs = VC3_OUT_DIV3_DIV4_CTRL,
.table = div245_divs,
.shift = 0,
.width = 4,
.flags = CLK_DIVIDER_READ_ONLY
},
.hw.init = &(struct clk_init_data){
.name = "div4",
.ops = &vc3_div_ops,
.parent_hws = (const struct clk_hw *[]) {
&clk_div_mux[VC3_DIV4_MUX].hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT
}
},
[VC3_DIV5] = {
.data = &(struct vc3_div_data) {
.offs = VC3_PLL1_CTRL_OUTDIV5,
.table = div245_divs,
.shift = 0,
.width = 4,
.flags = CLK_DIVIDER_READ_ONLY
},
.hw.init = &(struct clk_init_data){
.name = "div5",
.ops = &vc3_div_ops,
.parent_hws = (const struct clk_hw *[]) {
&clk_pll[VC3_PLL3].hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT
}
}
};
static struct vc3_hw_data clk_mux[] = {
[VC3_DIFF2_MUX] = {
.data = &(struct vc3_clk_data) {
.offs = VC3_DIFF2_CTRL_REG,
.bitmsk = VC3_DIFF2_CTRL_REG_DIFF2_CLK_SEL
},
.hw.init = &(struct clk_init_data){
.name = "diff2_mux",
.ops = &vc3_clk_mux_ops,
.parent_hws = (const struct clk_hw *[]) {
&clk_div[VC3_DIV1].hw,
&clk_div[VC3_DIV3].hw
},
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT
}
},
[VC3_DIFF1_MUX] = {
.data = &(struct vc3_clk_data) {
.offs = VC3_DIFF1_CTRL_REG,
.bitmsk = VC3_DIFF1_CTRL_REG_DIFF1_CLK_SEL
},
.hw.init = &(struct clk_init_data){
.name = "diff1_mux",
.ops = &vc3_clk_mux_ops,
.parent_hws = (const struct clk_hw *[]) {
&clk_div[VC3_DIV1].hw,
&clk_div[VC3_DIV3].hw
},
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT
}
},
[VC3_SE3_MUX] = {
.data = &(struct vc3_clk_data) {
.offs = VC3_SE3_DIFF1_CTRL_REG,
.bitmsk = VC3_SE3_DIFF1_CTRL_REG_SE3_CLK_SEL
},
.hw.init = &(struct clk_init_data){
.name = "se3_mux",
.ops = &vc3_clk_mux_ops,
.parent_hws = (const struct clk_hw *[]) {
&clk_div[VC3_DIV2].hw,
&clk_div[VC3_DIV4].hw
},
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT
}
},
[VC3_SE2_MUX] = {
.data = &(struct vc3_clk_data) {
.offs = VC3_SE2_CTRL_REG0,
.bitmsk = VC3_SE2_CTRL_REG0_SE2_CLK_SEL
},
.hw.init = &(struct clk_init_data){
.name = "se2_mux",
.ops = &vc3_clk_mux_ops,
.parent_hws = (const struct clk_hw *[]) {
&clk_div[VC3_DIV5].hw,
&clk_div[VC3_DIV4].hw
},
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT
}
},
[VC3_SE1_MUX] = {
.data = &(struct vc3_clk_data) {
.offs = VC3_SE1_DIV4_CTRL,
.bitmsk = VC3_SE1_DIV4_CTRL_SE1_CLK_SEL
},
.hw.init = &(struct clk_init_data){
.name = "se1_mux",
.ops = &vc3_clk_mux_ops,
.parent_hws = (const struct clk_hw *[]) {
&clk_div[VC3_DIV5].hw,
&clk_div[VC3_DIV4].hw
},
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT
}
}
};
static struct clk_hw *vc3_of_clk_get(struct of_phandle_args *clkspec,
void *data)
{
unsigned int idx = clkspec->args[0];
struct clk_hw **clkout_hw = data;
if (idx >= ARRAY_SIZE(clk_out)) {
pr_err("invalid clk index %u for provider %pOF\n", idx, clkspec->np);
return ERR_PTR(-EINVAL);
}
return clkout_hw[idx];
}
static int vc3_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
u8 settings[NUM_CONFIG_REGISTERS];
struct regmap *regmap;
const char *name;
int ret, i;
regmap = devm_regmap_init_i2c(client, &vc3_regmap_config);
if (IS_ERR(regmap))
return dev_err_probe(dev, PTR_ERR(regmap),
"failed to allocate register map\n");
ret = of_property_read_u8_array(dev->of_node, "renesas,settings",
settings, ARRAY_SIZE(settings));
if (!ret) {
/*
* A raw settings array was specified in the DT. Write the
* settings to the device immediately.
*/
for (i = 0; i < NUM_CONFIG_REGISTERS; i++) {
ret = regmap_write(regmap, i, settings[i]);
if (ret) {
dev_err(dev, "error writing to chip (%i)\n", ret);
return ret;
}
}
} else if (ret == -EOVERFLOW) {
dev_err(&client->dev, "EOVERFLOW reg settings. ARRAY_SIZE: %zu\n",
ARRAY_SIZE(settings));
return ret;
}
/* Register pfd muxes */
for (i = 0; i < ARRAY_SIZE(clk_pfd_mux); i++) {
clk_pfd_mux[i].regmap = regmap;
ret = devm_clk_hw_register(dev, &clk_pfd_mux[i].hw);
if (ret)
return dev_err_probe(dev, ret, "%s failed\n",
clk_pfd_mux[i].hw.init->name);
}
/* Register pfd's */
for (i = 0; i < ARRAY_SIZE(clk_pfd); i++) {
clk_pfd[i].regmap = regmap;
ret = devm_clk_hw_register(dev, &clk_pfd[i].hw);
if (ret)
return dev_err_probe(dev, ret, "%s failed\n",
clk_pfd[i].hw.init->name);
}
/* Register pll's */
for (i = 0; i < ARRAY_SIZE(clk_pll); i++) {
clk_pll[i].regmap = regmap;
ret = devm_clk_hw_register(dev, &clk_pll[i].hw);
if (ret)
return dev_err_probe(dev, ret, "%s failed\n",
clk_pll[i].hw.init->name);
}
/* Register divider muxes */
for (i = 0; i < ARRAY_SIZE(clk_div_mux); i++) {
clk_div_mux[i].regmap = regmap;
ret = devm_clk_hw_register(dev, &clk_div_mux[i].hw);
if (ret)
return dev_err_probe(dev, ret, "%s failed\n",
clk_div_mux[i].hw.init->name);
}
/* Register dividers */
for (i = 0; i < ARRAY_SIZE(clk_div); i++) {
clk_div[i].regmap = regmap;
ret = devm_clk_hw_register(dev, &clk_div[i].hw);
if (ret)
return dev_err_probe(dev, ret, "%s failed\n",
clk_div[i].hw.init->name);
}
/* Register clk muxes */
for (i = 0; i < ARRAY_SIZE(clk_mux); i++) {
clk_mux[i].regmap = regmap;
ret = devm_clk_hw_register(dev, &clk_mux[i].hw);
if (ret)
return dev_err_probe(dev, ret, "%s failed\n",
clk_mux[i].hw.init->name);
}
/* Register clk outputs */
for (i = 0; i < ARRAY_SIZE(clk_out); i++) {
switch (i) {
case VC3_DIFF2:
name = "diff2";
break;
case VC3_DIFF1:
name = "diff1";
break;
case VC3_SE3:
name = "se3";
break;
case VC3_SE2:
name = "se2";
break;
case VC3_SE1:
name = "se1";
break;
case VC3_REF:
name = "ref";
break;
default:
return dev_err_probe(dev, -EINVAL, "invalid clk output %d\n", i);
}
if (i == VC3_REF)
clk_out[i] = devm_clk_hw_register_fixed_factor_index(dev,
name, 0, CLK_SET_RATE_PARENT, 1, 1);
else
clk_out[i] = devm_clk_hw_register_fixed_factor_parent_hw(dev,
name, &clk_mux[i].hw, CLK_SET_RATE_PARENT, 1, 1);
if (IS_ERR(clk_out[i]))
return PTR_ERR(clk_out[i]);
}
ret = devm_of_clk_add_hw_provider(dev, vc3_of_clk_get, clk_out);
if (ret)
return dev_err_probe(dev, ret, "unable to add clk provider\n");
return ret;
}
static const struct of_device_id dev_ids[] = {
{ .compatible = "renesas,5p35023" },
{ /* Sentinel */ }
};
MODULE_DEVICE_TABLE(of, dev_ids);
static struct i2c_driver vc3_driver = {
.driver = {
.name = "vc3",
.of_match_table = of_match_ptr(dev_ids),
},
.probe = vc3_probe,
};
module_i2c_driver(vc3_driver);
MODULE_AUTHOR("Biju Das <[email protected]>");
MODULE_DESCRIPTION("Renesas VersaClock 3 driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/clk/clk-versaclock3.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* TWL6040 clock module driver for OMAP4 McPDM functional clock
*
* Copyright (C) 2012 Texas Instruments Inc.
* Peter Ujfalusi <[email protected]>
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/mfd/twl6040.h>
#include <linux/clk-provider.h>
struct twl6040_pdmclk {
struct twl6040 *twl6040;
struct device *dev;
struct clk_hw pdmclk_hw;
int enabled;
};
static int twl6040_pdmclk_is_prepared(struct clk_hw *hw)
{
struct twl6040_pdmclk *pdmclk = container_of(hw, struct twl6040_pdmclk,
pdmclk_hw);
return pdmclk->enabled;
}
static int twl6040_pdmclk_reset_one_clock(struct twl6040_pdmclk *pdmclk,
unsigned int reg)
{
const u8 reset_mask = TWL6040_HPLLRST; /* Same for HPPLL and LPPLL */
int ret;
ret = twl6040_set_bits(pdmclk->twl6040, reg, reset_mask);
if (ret < 0)
return ret;
ret = twl6040_clear_bits(pdmclk->twl6040, reg, reset_mask);
if (ret < 0)
return ret;
return 0;
}
/*
* TWL6040A2 Phoenix Audio IC erratum #6: "PDM Clock Generation Issue At
* Cold Temperature". This affects cold boot and deeper idle states it
* seems. The workaround consists of resetting HPPLL and LPPLL.
*/
static int twl6040_pdmclk_quirk_reset_clocks(struct twl6040_pdmclk *pdmclk)
{
int ret;
ret = twl6040_pdmclk_reset_one_clock(pdmclk, TWL6040_REG_HPPLLCTL);
if (ret)
return ret;
ret = twl6040_pdmclk_reset_one_clock(pdmclk, TWL6040_REG_LPPLLCTL);
if (ret)
return ret;
return 0;
}
static int twl6040_pdmclk_prepare(struct clk_hw *hw)
{
struct twl6040_pdmclk *pdmclk = container_of(hw, struct twl6040_pdmclk,
pdmclk_hw);
int ret;
ret = twl6040_power(pdmclk->twl6040, 1);
if (ret)
return ret;
ret = twl6040_pdmclk_quirk_reset_clocks(pdmclk);
if (ret)
goto out_err;
pdmclk->enabled = 1;
return 0;
out_err:
dev_err(pdmclk->dev, "%s: error %i\n", __func__, ret);
twl6040_power(pdmclk->twl6040, 0);
return ret;
}
static void twl6040_pdmclk_unprepare(struct clk_hw *hw)
{
struct twl6040_pdmclk *pdmclk = container_of(hw, struct twl6040_pdmclk,
pdmclk_hw);
int ret;
ret = twl6040_power(pdmclk->twl6040, 0);
if (!ret)
pdmclk->enabled = 0;
}
static unsigned long twl6040_pdmclk_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct twl6040_pdmclk *pdmclk = container_of(hw, struct twl6040_pdmclk,
pdmclk_hw);
return twl6040_get_sysclk(pdmclk->twl6040);
}
static const struct clk_ops twl6040_pdmclk_ops = {
.is_prepared = twl6040_pdmclk_is_prepared,
.prepare = twl6040_pdmclk_prepare,
.unprepare = twl6040_pdmclk_unprepare,
.recalc_rate = twl6040_pdmclk_recalc_rate,
};
static const struct clk_init_data twl6040_pdmclk_init = {
.name = "pdmclk",
.ops = &twl6040_pdmclk_ops,
.flags = CLK_GET_RATE_NOCACHE,
};
static int twl6040_pdmclk_probe(struct platform_device *pdev)
{
struct twl6040 *twl6040 = dev_get_drvdata(pdev->dev.parent);
struct twl6040_pdmclk *clkdata;
int ret;
clkdata = devm_kzalloc(&pdev->dev, sizeof(*clkdata), GFP_KERNEL);
if (!clkdata)
return -ENOMEM;
clkdata->dev = &pdev->dev;
clkdata->twl6040 = twl6040;
clkdata->pdmclk_hw.init = &twl6040_pdmclk_init;
ret = devm_clk_hw_register(&pdev->dev, &clkdata->pdmclk_hw);
if (ret)
return ret;
platform_set_drvdata(pdev, clkdata);
return devm_of_clk_add_hw_provider(&pdev->dev, of_clk_hw_simple_get,
&clkdata->pdmclk_hw);
}
static struct platform_driver twl6040_pdmclk_driver = {
.driver = {
.name = "twl6040-pdmclk",
},
.probe = twl6040_pdmclk_probe,
};
module_platform_driver(twl6040_pdmclk_driver);
MODULE_DESCRIPTION("TWL6040 clock driver for McPDM functional clock");
MODULE_AUTHOR("Peter Ujfalusi <[email protected]>");
MODULE_ALIAS("platform:twl6040-pdmclk");
MODULE_LICENSE("GPL");
| linux-master | drivers/clk/clk-twl6040.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2017 NXP
*
* Dong Aisheng <[email protected]>
*/
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/device.h>
#include <linux/export.h>
#include <linux/of.h>
#include <linux/slab.h>
static int __must_check of_clk_bulk_get(struct device_node *np, int num_clks,
struct clk_bulk_data *clks)
{
int ret;
int i;
for (i = 0; i < num_clks; i++) {
clks[i].id = NULL;
clks[i].clk = NULL;
}
for (i = 0; i < num_clks; i++) {
of_property_read_string_index(np, "clock-names", i, &clks[i].id);
clks[i].clk = of_clk_get(np, i);
if (IS_ERR(clks[i].clk)) {
ret = PTR_ERR(clks[i].clk);
pr_err("%pOF: Failed to get clk index: %d ret: %d\n",
np, i, ret);
clks[i].clk = NULL;
goto err;
}
}
return 0;
err:
clk_bulk_put(i, clks);
return ret;
}
static int __must_check of_clk_bulk_get_all(struct device_node *np,
struct clk_bulk_data **clks)
{
struct clk_bulk_data *clk_bulk;
int num_clks;
int ret;
num_clks = of_clk_get_parent_count(np);
if (!num_clks)
return 0;
clk_bulk = kmalloc_array(num_clks, sizeof(*clk_bulk), GFP_KERNEL);
if (!clk_bulk)
return -ENOMEM;
ret = of_clk_bulk_get(np, num_clks, clk_bulk);
if (ret) {
kfree(clk_bulk);
return ret;
}
*clks = clk_bulk;
return num_clks;
}
void clk_bulk_put(int num_clks, struct clk_bulk_data *clks)
{
while (--num_clks >= 0) {
clk_put(clks[num_clks].clk);
clks[num_clks].clk = NULL;
}
}
EXPORT_SYMBOL_GPL(clk_bulk_put);
static int __clk_bulk_get(struct device *dev, int num_clks,
struct clk_bulk_data *clks, bool optional)
{
int ret;
int i;
for (i = 0; i < num_clks; i++)
clks[i].clk = NULL;
for (i = 0; i < num_clks; i++) {
clks[i].clk = clk_get(dev, clks[i].id);
if (IS_ERR(clks[i].clk)) {
ret = PTR_ERR(clks[i].clk);
clks[i].clk = NULL;
if (ret == -ENOENT && optional)
continue;
dev_err_probe(dev, ret,
"Failed to get clk '%s'\n",
clks[i].id);
goto err;
}
}
return 0;
err:
clk_bulk_put(i, clks);
return ret;
}
int __must_check clk_bulk_get(struct device *dev, int num_clks,
struct clk_bulk_data *clks)
{
return __clk_bulk_get(dev, num_clks, clks, false);
}
EXPORT_SYMBOL(clk_bulk_get);
int __must_check clk_bulk_get_optional(struct device *dev, int num_clks,
struct clk_bulk_data *clks)
{
return __clk_bulk_get(dev, num_clks, clks, true);
}
EXPORT_SYMBOL_GPL(clk_bulk_get_optional);
void clk_bulk_put_all(int num_clks, struct clk_bulk_data *clks)
{
if (IS_ERR_OR_NULL(clks))
return;
clk_bulk_put(num_clks, clks);
kfree(clks);
}
EXPORT_SYMBOL(clk_bulk_put_all);
int __must_check clk_bulk_get_all(struct device *dev,
struct clk_bulk_data **clks)
{
struct device_node *np = dev_of_node(dev);
if (!np)
return 0;
return of_clk_bulk_get_all(np, clks);
}
EXPORT_SYMBOL(clk_bulk_get_all);
#ifdef CONFIG_HAVE_CLK_PREPARE
/**
* clk_bulk_unprepare - undo preparation of a set of clock sources
* @num_clks: the number of clk_bulk_data
* @clks: the clk_bulk_data table being unprepared
*
* clk_bulk_unprepare may sleep, which differentiates it from clk_bulk_disable.
* Returns 0 on success, -EERROR otherwise.
*/
void clk_bulk_unprepare(int num_clks, const struct clk_bulk_data *clks)
{
while (--num_clks >= 0)
clk_unprepare(clks[num_clks].clk);
}
EXPORT_SYMBOL_GPL(clk_bulk_unprepare);
/**
* clk_bulk_prepare - prepare a set of clocks
* @num_clks: the number of clk_bulk_data
* @clks: the clk_bulk_data table being prepared
*
* clk_bulk_prepare may sleep, which differentiates it from clk_bulk_enable.
* Returns 0 on success, -EERROR otherwise.
*/
int __must_check clk_bulk_prepare(int num_clks,
const struct clk_bulk_data *clks)
{
int ret;
int i;
for (i = 0; i < num_clks; i++) {
ret = clk_prepare(clks[i].clk);
if (ret) {
pr_err("Failed to prepare clk '%s': %d\n",
clks[i].id, ret);
goto err;
}
}
return 0;
err:
clk_bulk_unprepare(i, clks);
return ret;
}
EXPORT_SYMBOL_GPL(clk_bulk_prepare);
#endif /* CONFIG_HAVE_CLK_PREPARE */
/**
* clk_bulk_disable - gate a set of clocks
* @num_clks: the number of clk_bulk_data
* @clks: the clk_bulk_data table being gated
*
* clk_bulk_disable must not sleep, which differentiates it from
* clk_bulk_unprepare. clk_bulk_disable must be called before
* clk_bulk_unprepare.
*/
void clk_bulk_disable(int num_clks, const struct clk_bulk_data *clks)
{
while (--num_clks >= 0)
clk_disable(clks[num_clks].clk);
}
EXPORT_SYMBOL_GPL(clk_bulk_disable);
/**
* clk_bulk_enable - ungate a set of clocks
* @num_clks: the number of clk_bulk_data
* @clks: the clk_bulk_data table being ungated
*
* clk_bulk_enable must not sleep
* Returns 0 on success, -EERROR otherwise.
*/
int __must_check clk_bulk_enable(int num_clks, const struct clk_bulk_data *clks)
{
int ret;
int i;
for (i = 0; i < num_clks; i++) {
ret = clk_enable(clks[i].clk);
if (ret) {
pr_err("Failed to enable clk '%s': %d\n",
clks[i].id, ret);
goto err;
}
}
return 0;
err:
clk_bulk_disable(i, clks);
return ret;
}
EXPORT_SYMBOL_GPL(clk_bulk_enable);
| linux-master | drivers/clk/clk-bulk.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* clk-si5351.c: Skyworks / Silicon Labs Si5351A/B/C I2C Clock Generator
*
* Sebastian Hesselbarth <[email protected]>
* Rabeeh Khoury <[email protected]>
*
* References:
* [1] "Si5351A/B/C Data Sheet"
* https://www.skyworksinc.com/-/media/Skyworks/SL/documents/public/data-sheets/Si5351-B.pdf
* [2] "AN619: Manually Generating an Si5351 Register Map"
* https://www.skyworksinc.com/-/media/Skyworks/SL/documents/public/application-notes/AN619.pdf
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/rational.h>
#include <linux/i2c.h>
#include <linux/of.h>
#include <linux/platform_data/si5351.h>
#include <linux/regmap.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <asm/div64.h>
#include "clk-si5351.h"
struct si5351_driver_data;
struct si5351_parameters {
unsigned long p1;
unsigned long p2;
unsigned long p3;
int valid;
};
struct si5351_hw_data {
struct clk_hw hw;
struct si5351_driver_data *drvdata;
struct si5351_parameters params;
unsigned char num;
};
struct si5351_driver_data {
enum si5351_variant variant;
struct i2c_client *client;
struct regmap *regmap;
struct clk *pxtal;
const char *pxtal_name;
struct clk_hw xtal;
struct clk *pclkin;
const char *pclkin_name;
struct clk_hw clkin;
struct si5351_hw_data pll[2];
struct si5351_hw_data *msynth;
struct si5351_hw_data *clkout;
size_t num_clkout;
};
static const char * const si5351_input_names[] = {
"xtal", "clkin"
};
static const char * const si5351_pll_names[] = {
"si5351_plla", "si5351_pllb", "si5351_vxco"
};
static const char * const si5351_msynth_names[] = {
"ms0", "ms1", "ms2", "ms3", "ms4", "ms5", "ms6", "ms7"
};
static const char * const si5351_clkout_names[] = {
"clk0", "clk1", "clk2", "clk3", "clk4", "clk5", "clk6", "clk7"
};
/*
* Si5351 i2c regmap
*/
static inline u8 si5351_reg_read(struct si5351_driver_data *drvdata, u8 reg)
{
u32 val;
int ret;
ret = regmap_read(drvdata->regmap, reg, &val);
if (ret) {
dev_err(&drvdata->client->dev,
"unable to read from reg%02x\n", reg);
return 0;
}
return (u8)val;
}
static inline int si5351_bulk_read(struct si5351_driver_data *drvdata,
u8 reg, u8 count, u8 *buf)
{
return regmap_bulk_read(drvdata->regmap, reg, buf, count);
}
static inline int si5351_reg_write(struct si5351_driver_data *drvdata,
u8 reg, u8 val)
{
return regmap_write(drvdata->regmap, reg, val);
}
static inline int si5351_bulk_write(struct si5351_driver_data *drvdata,
u8 reg, u8 count, const u8 *buf)
{
return regmap_raw_write(drvdata->regmap, reg, buf, count);
}
static inline int si5351_set_bits(struct si5351_driver_data *drvdata,
u8 reg, u8 mask, u8 val)
{
return regmap_update_bits(drvdata->regmap, reg, mask, val);
}
static inline u8 si5351_msynth_params_address(int num)
{
if (num > 5)
return SI5351_CLK6_PARAMETERS + (num - 6);
return SI5351_CLK0_PARAMETERS + (SI5351_PARAMETERS_LENGTH * num);
}
static void si5351_read_parameters(struct si5351_driver_data *drvdata,
u8 reg, struct si5351_parameters *params)
{
u8 buf[SI5351_PARAMETERS_LENGTH];
switch (reg) {
case SI5351_CLK6_PARAMETERS:
case SI5351_CLK7_PARAMETERS:
buf[0] = si5351_reg_read(drvdata, reg);
params->p1 = buf[0];
params->p2 = 0;
params->p3 = 1;
break;
default:
si5351_bulk_read(drvdata, reg, SI5351_PARAMETERS_LENGTH, buf);
params->p1 = ((buf[2] & 0x03) << 16) | (buf[3] << 8) | buf[4];
params->p2 = ((buf[5] & 0x0f) << 16) | (buf[6] << 8) | buf[7];
params->p3 = ((buf[5] & 0xf0) << 12) | (buf[0] << 8) | buf[1];
}
params->valid = 1;
}
static void si5351_write_parameters(struct si5351_driver_data *drvdata,
u8 reg, struct si5351_parameters *params)
{
u8 buf[SI5351_PARAMETERS_LENGTH];
switch (reg) {
case SI5351_CLK6_PARAMETERS:
case SI5351_CLK7_PARAMETERS:
buf[0] = params->p1 & 0xff;
si5351_reg_write(drvdata, reg, buf[0]);
break;
default:
buf[0] = ((params->p3 & 0x0ff00) >> 8) & 0xff;
buf[1] = params->p3 & 0xff;
/* save rdiv and divby4 */
buf[2] = si5351_reg_read(drvdata, reg + 2) & ~0x03;
buf[2] |= ((params->p1 & 0x30000) >> 16) & 0x03;
buf[3] = ((params->p1 & 0x0ff00) >> 8) & 0xff;
buf[4] = params->p1 & 0xff;
buf[5] = ((params->p3 & 0xf0000) >> 12) |
((params->p2 & 0xf0000) >> 16);
buf[6] = ((params->p2 & 0x0ff00) >> 8) & 0xff;
buf[7] = params->p2 & 0xff;
si5351_bulk_write(drvdata, reg, SI5351_PARAMETERS_LENGTH, buf);
}
}
static bool si5351_regmap_is_volatile(struct device *dev, unsigned int reg)
{
switch (reg) {
case SI5351_DEVICE_STATUS:
case SI5351_INTERRUPT_STATUS:
case SI5351_PLL_RESET:
return true;
}
return false;
}
static bool si5351_regmap_is_writeable(struct device *dev, unsigned int reg)
{
/* reserved registers */
if (reg >= 4 && reg <= 8)
return false;
if (reg >= 10 && reg <= 14)
return false;
if (reg >= 173 && reg <= 176)
return false;
if (reg >= 178 && reg <= 182)
return false;
/* read-only */
if (reg == SI5351_DEVICE_STATUS)
return false;
return true;
}
static const struct regmap_config si5351_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.cache_type = REGCACHE_RBTREE,
.max_register = 187,
.writeable_reg = si5351_regmap_is_writeable,
.volatile_reg = si5351_regmap_is_volatile,
};
/*
* Si5351 xtal clock input
*/
static int si5351_xtal_prepare(struct clk_hw *hw)
{
struct si5351_driver_data *drvdata =
container_of(hw, struct si5351_driver_data, xtal);
si5351_set_bits(drvdata, SI5351_FANOUT_ENABLE,
SI5351_XTAL_ENABLE, SI5351_XTAL_ENABLE);
return 0;
}
static void si5351_xtal_unprepare(struct clk_hw *hw)
{
struct si5351_driver_data *drvdata =
container_of(hw, struct si5351_driver_data, xtal);
si5351_set_bits(drvdata, SI5351_FANOUT_ENABLE,
SI5351_XTAL_ENABLE, 0);
}
static const struct clk_ops si5351_xtal_ops = {
.prepare = si5351_xtal_prepare,
.unprepare = si5351_xtal_unprepare,
};
/*
* Si5351 clkin clock input (Si5351C only)
*/
static int si5351_clkin_prepare(struct clk_hw *hw)
{
struct si5351_driver_data *drvdata =
container_of(hw, struct si5351_driver_data, clkin);
si5351_set_bits(drvdata, SI5351_FANOUT_ENABLE,
SI5351_CLKIN_ENABLE, SI5351_CLKIN_ENABLE);
return 0;
}
static void si5351_clkin_unprepare(struct clk_hw *hw)
{
struct si5351_driver_data *drvdata =
container_of(hw, struct si5351_driver_data, clkin);
si5351_set_bits(drvdata, SI5351_FANOUT_ENABLE,
SI5351_CLKIN_ENABLE, 0);
}
/*
* CMOS clock source constraints:
* The input frequency range of the PLL is 10Mhz to 40MHz.
* If CLKIN is >40MHz, the input divider must be used.
*/
static unsigned long si5351_clkin_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct si5351_driver_data *drvdata =
container_of(hw, struct si5351_driver_data, clkin);
unsigned long rate;
unsigned char idiv;
rate = parent_rate;
if (parent_rate > 160000000) {
idiv = SI5351_CLKIN_DIV_8;
rate /= 8;
} else if (parent_rate > 80000000) {
idiv = SI5351_CLKIN_DIV_4;
rate /= 4;
} else if (parent_rate > 40000000) {
idiv = SI5351_CLKIN_DIV_2;
rate /= 2;
} else {
idiv = SI5351_CLKIN_DIV_1;
}
si5351_set_bits(drvdata, SI5351_PLL_INPUT_SOURCE,
SI5351_CLKIN_DIV_MASK, idiv);
dev_dbg(&drvdata->client->dev, "%s - clkin div = %d, rate = %lu\n",
__func__, (1 << (idiv >> 6)), rate);
return rate;
}
static const struct clk_ops si5351_clkin_ops = {
.prepare = si5351_clkin_prepare,
.unprepare = si5351_clkin_unprepare,
.recalc_rate = si5351_clkin_recalc_rate,
};
/*
* Si5351 vxco clock input (Si5351B only)
*/
static int si5351_vxco_prepare(struct clk_hw *hw)
{
struct si5351_hw_data *hwdata =
container_of(hw, struct si5351_hw_data, hw);
dev_warn(&hwdata->drvdata->client->dev, "VXCO currently unsupported\n");
return 0;
}
static void si5351_vxco_unprepare(struct clk_hw *hw)
{
}
static unsigned long si5351_vxco_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
return 0;
}
static int si5351_vxco_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent)
{
return 0;
}
static const struct clk_ops si5351_vxco_ops = {
.prepare = si5351_vxco_prepare,
.unprepare = si5351_vxco_unprepare,
.recalc_rate = si5351_vxco_recalc_rate,
.set_rate = si5351_vxco_set_rate,
};
/*
* Si5351 pll a/b
*
* Feedback Multisynth Divider Equations [2]
*
* fVCO = fIN * (a + b/c)
*
* with 15 + 0/1048575 <= (a + b/c) <= 90 + 0/1048575 and
* fIN = fXTAL or fIN = fCLKIN/CLKIN_DIV
*
* Feedback Multisynth Register Equations
*
* (1) MSNx_P1[17:0] = 128 * a + floor(128 * b/c) - 512
* (2) MSNx_P2[19:0] = 128 * b - c * floor(128 * b/c) = (128*b) mod c
* (3) MSNx_P3[19:0] = c
*
* Transposing (2) yields: (4) floor(128 * b/c) = (128 * b / MSNx_P2)/c
*
* Using (4) on (1) yields:
* MSNx_P1 = 128 * a + (128 * b/MSNx_P2)/c - 512
* MSNx_P1 + 512 + MSNx_P2/c = 128 * a + 128 * b/c
*
* a + b/c = (MSNx_P1 + MSNx_P2/MSNx_P3 + 512)/128
* = (MSNx_P1*MSNx_P3 + MSNx_P2 + 512*MSNx_P3)/(128*MSNx_P3)
*
*/
static int _si5351_pll_reparent(struct si5351_driver_data *drvdata,
int num, enum si5351_pll_src parent)
{
u8 mask = (num == 0) ? SI5351_PLLA_SOURCE : SI5351_PLLB_SOURCE;
if (parent == SI5351_PLL_SRC_DEFAULT)
return 0;
if (num > 2)
return -EINVAL;
if (drvdata->variant != SI5351_VARIANT_C &&
parent != SI5351_PLL_SRC_XTAL)
return -EINVAL;
si5351_set_bits(drvdata, SI5351_PLL_INPUT_SOURCE, mask,
(parent == SI5351_PLL_SRC_XTAL) ? 0 : mask);
return 0;
}
static unsigned char si5351_pll_get_parent(struct clk_hw *hw)
{
struct si5351_hw_data *hwdata =
container_of(hw, struct si5351_hw_data, hw);
u8 mask = (hwdata->num == 0) ? SI5351_PLLA_SOURCE : SI5351_PLLB_SOURCE;
u8 val;
val = si5351_reg_read(hwdata->drvdata, SI5351_PLL_INPUT_SOURCE);
return (val & mask) ? 1 : 0;
}
static int si5351_pll_set_parent(struct clk_hw *hw, u8 index)
{
struct si5351_hw_data *hwdata =
container_of(hw, struct si5351_hw_data, hw);
if (hwdata->drvdata->variant != SI5351_VARIANT_C &&
index > 0)
return -EPERM;
if (index > 1)
return -EINVAL;
return _si5351_pll_reparent(hwdata->drvdata, hwdata->num,
(index == 0) ? SI5351_PLL_SRC_XTAL :
SI5351_PLL_SRC_CLKIN);
}
static unsigned long si5351_pll_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct si5351_hw_data *hwdata =
container_of(hw, struct si5351_hw_data, hw);
u8 reg = (hwdata->num == 0) ? SI5351_PLLA_PARAMETERS :
SI5351_PLLB_PARAMETERS;
unsigned long long rate;
if (!hwdata->params.valid)
si5351_read_parameters(hwdata->drvdata, reg, &hwdata->params);
if (hwdata->params.p3 == 0)
return parent_rate;
/* fVCO = fIN * (P1*P3 + 512*P3 + P2)/(128*P3) */
rate = hwdata->params.p1 * hwdata->params.p3;
rate += 512 * hwdata->params.p3;
rate += hwdata->params.p2;
rate *= parent_rate;
do_div(rate, 128 * hwdata->params.p3);
dev_dbg(&hwdata->drvdata->client->dev,
"%s - %s: p1 = %lu, p2 = %lu, p3 = %lu, parent_rate = %lu, rate = %lu\n",
__func__, clk_hw_get_name(hw),
hwdata->params.p1, hwdata->params.p2, hwdata->params.p3,
parent_rate, (unsigned long)rate);
return (unsigned long)rate;
}
static int si5351_pll_determine_rate(struct clk_hw *hw,
struct clk_rate_request *req)
{
struct si5351_hw_data *hwdata =
container_of(hw, struct si5351_hw_data, hw);
unsigned long rate = req->rate;
unsigned long rfrac, denom, a, b, c;
unsigned long long lltmp;
if (rate < SI5351_PLL_VCO_MIN)
rate = SI5351_PLL_VCO_MIN;
if (rate > SI5351_PLL_VCO_MAX)
rate = SI5351_PLL_VCO_MAX;
/* determine integer part of feedback equation */
a = rate / req->best_parent_rate;
if (a < SI5351_PLL_A_MIN)
rate = req->best_parent_rate * SI5351_PLL_A_MIN;
if (a > SI5351_PLL_A_MAX)
rate = req->best_parent_rate * SI5351_PLL_A_MAX;
/* find best approximation for b/c = fVCO mod fIN */
denom = 1000 * 1000;
lltmp = rate % (req->best_parent_rate);
lltmp *= denom;
do_div(lltmp, req->best_parent_rate);
rfrac = (unsigned long)lltmp;
b = 0;
c = 1;
if (rfrac)
rational_best_approximation(rfrac, denom,
SI5351_PLL_B_MAX, SI5351_PLL_C_MAX, &b, &c);
/* calculate parameters */
hwdata->params.p3 = c;
hwdata->params.p2 = (128 * b) % c;
hwdata->params.p1 = 128 * a;
hwdata->params.p1 += (128 * b / c);
hwdata->params.p1 -= 512;
/* recalculate rate by fIN * (a + b/c) */
lltmp = req->best_parent_rate;
lltmp *= b;
do_div(lltmp, c);
rate = (unsigned long)lltmp;
rate += req->best_parent_rate * a;
dev_dbg(&hwdata->drvdata->client->dev,
"%s - %s: a = %lu, b = %lu, c = %lu, parent_rate = %lu, rate = %lu\n",
__func__, clk_hw_get_name(hw), a, b, c,
req->best_parent_rate, rate);
req->rate = rate;
return 0;
}
static int si5351_pll_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct si5351_hw_data *hwdata =
container_of(hw, struct si5351_hw_data, hw);
u8 reg = (hwdata->num == 0) ? SI5351_PLLA_PARAMETERS :
SI5351_PLLB_PARAMETERS;
/* write multisynth parameters */
si5351_write_parameters(hwdata->drvdata, reg, &hwdata->params);
/* plla/pllb ctrl is in clk6/clk7 ctrl registers */
si5351_set_bits(hwdata->drvdata, SI5351_CLK6_CTRL + hwdata->num,
SI5351_CLK_INTEGER_MODE,
(hwdata->params.p2 == 0) ? SI5351_CLK_INTEGER_MODE : 0);
/* Do a pll soft reset on the affected pll */
si5351_reg_write(hwdata->drvdata, SI5351_PLL_RESET,
hwdata->num == 0 ? SI5351_PLL_RESET_A :
SI5351_PLL_RESET_B);
dev_dbg(&hwdata->drvdata->client->dev,
"%s - %s: p1 = %lu, p2 = %lu, p3 = %lu, parent_rate = %lu, rate = %lu\n",
__func__, clk_hw_get_name(hw),
hwdata->params.p1, hwdata->params.p2, hwdata->params.p3,
parent_rate, rate);
return 0;
}
static const struct clk_ops si5351_pll_ops = {
.set_parent = si5351_pll_set_parent,
.get_parent = si5351_pll_get_parent,
.recalc_rate = si5351_pll_recalc_rate,
.determine_rate = si5351_pll_determine_rate,
.set_rate = si5351_pll_set_rate,
};
/*
* Si5351 multisync divider
*
* for fOUT <= 150 MHz:
*
* fOUT = (fIN * (a + b/c)) / CLKOUTDIV
*
* with 6 + 0/1048575 <= (a + b/c) <= 1800 + 0/1048575 and
* fIN = fVCO0, fVCO1
*
* Output Clock Multisynth Register Equations
*
* MSx_P1[17:0] = 128 * a + floor(128 * b/c) - 512
* MSx_P2[19:0] = 128 * b - c * floor(128 * b/c) = (128*b) mod c
* MSx_P3[19:0] = c
*
* MS[6,7] are integer (P1) divide only, P1 = divide value,
* P2 and P3 are not applicable
*
* for 150MHz < fOUT <= 160MHz:
*
* MSx_P1 = 0, MSx_P2 = 0, MSx_P3 = 1, MSx_INT = 1, MSx_DIVBY4 = 11b
*/
static int _si5351_msynth_reparent(struct si5351_driver_data *drvdata,
int num, enum si5351_multisynth_src parent)
{
if (parent == SI5351_MULTISYNTH_SRC_DEFAULT)
return 0;
if (num > 8)
return -EINVAL;
si5351_set_bits(drvdata, SI5351_CLK0_CTRL + num, SI5351_CLK_PLL_SELECT,
(parent == SI5351_MULTISYNTH_SRC_VCO0) ? 0 :
SI5351_CLK_PLL_SELECT);
return 0;
}
static unsigned char si5351_msynth_get_parent(struct clk_hw *hw)
{
struct si5351_hw_data *hwdata =
container_of(hw, struct si5351_hw_data, hw);
u8 val;
val = si5351_reg_read(hwdata->drvdata, SI5351_CLK0_CTRL + hwdata->num);
return (val & SI5351_CLK_PLL_SELECT) ? 1 : 0;
}
static int si5351_msynth_set_parent(struct clk_hw *hw, u8 index)
{
struct si5351_hw_data *hwdata =
container_of(hw, struct si5351_hw_data, hw);
return _si5351_msynth_reparent(hwdata->drvdata, hwdata->num,
(index == 0) ? SI5351_MULTISYNTH_SRC_VCO0 :
SI5351_MULTISYNTH_SRC_VCO1);
}
static unsigned long si5351_msynth_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct si5351_hw_data *hwdata =
container_of(hw, struct si5351_hw_data, hw);
u8 reg = si5351_msynth_params_address(hwdata->num);
unsigned long long rate;
unsigned long m;
if (!hwdata->params.valid)
si5351_read_parameters(hwdata->drvdata, reg, &hwdata->params);
/*
* multisync0-5: fOUT = (128 * P3 * fIN) / (P1*P3 + P2 + 512*P3)
* multisync6-7: fOUT = fIN / P1
*/
rate = parent_rate;
if (hwdata->num > 5) {
m = hwdata->params.p1;
} else if (hwdata->params.p3 == 0) {
return parent_rate;
} else if ((si5351_reg_read(hwdata->drvdata, reg + 2) &
SI5351_OUTPUT_CLK_DIVBY4) == SI5351_OUTPUT_CLK_DIVBY4) {
m = 4;
} else {
rate *= 128 * hwdata->params.p3;
m = hwdata->params.p1 * hwdata->params.p3;
m += hwdata->params.p2;
m += 512 * hwdata->params.p3;
}
if (m == 0)
return 0;
do_div(rate, m);
dev_dbg(&hwdata->drvdata->client->dev,
"%s - %s: p1 = %lu, p2 = %lu, p3 = %lu, m = %lu, parent_rate = %lu, rate = %lu\n",
__func__, clk_hw_get_name(hw),
hwdata->params.p1, hwdata->params.p2, hwdata->params.p3,
m, parent_rate, (unsigned long)rate);
return (unsigned long)rate;
}
static int si5351_msynth_determine_rate(struct clk_hw *hw,
struct clk_rate_request *req)
{
struct si5351_hw_data *hwdata =
container_of(hw, struct si5351_hw_data, hw);
unsigned long rate = req->rate;
unsigned long long lltmp;
unsigned long a, b, c;
int divby4;
/* multisync6-7 can only handle freqencies < 150MHz */
if (hwdata->num >= 6 && rate > SI5351_MULTISYNTH67_MAX_FREQ)
rate = SI5351_MULTISYNTH67_MAX_FREQ;
/* multisync frequency is 1MHz .. 160MHz */
if (rate > SI5351_MULTISYNTH_MAX_FREQ)
rate = SI5351_MULTISYNTH_MAX_FREQ;
if (rate < SI5351_MULTISYNTH_MIN_FREQ)
rate = SI5351_MULTISYNTH_MIN_FREQ;
divby4 = 0;
if (rate > SI5351_MULTISYNTH_DIVBY4_FREQ)
divby4 = 1;
/* multisync can set pll */
if (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) {
/*
* find largest integer divider for max
* vco frequency and given target rate
*/
if (divby4 == 0) {
lltmp = SI5351_PLL_VCO_MAX;
do_div(lltmp, rate);
a = (unsigned long)lltmp;
} else
a = 4;
b = 0;
c = 1;
req->best_parent_rate = a * rate;
} else if (hwdata->num >= 6) {
/* determine the closest integer divider */
a = DIV_ROUND_CLOSEST(req->best_parent_rate, rate);
if (a < SI5351_MULTISYNTH_A_MIN)
a = SI5351_MULTISYNTH_A_MIN;
if (a > SI5351_MULTISYNTH67_A_MAX)
a = SI5351_MULTISYNTH67_A_MAX;
b = 0;
c = 1;
} else {
unsigned long rfrac, denom;
/* disable divby4 */
if (divby4) {
rate = SI5351_MULTISYNTH_DIVBY4_FREQ;
divby4 = 0;
}
/* determine integer part of divider equation */
a = req->best_parent_rate / rate;
if (a < SI5351_MULTISYNTH_A_MIN)
a = SI5351_MULTISYNTH_A_MIN;
if (a > SI5351_MULTISYNTH_A_MAX)
a = SI5351_MULTISYNTH_A_MAX;
/* find best approximation for b/c = fVCO mod fOUT */
denom = 1000 * 1000;
lltmp = req->best_parent_rate % rate;
lltmp *= denom;
do_div(lltmp, rate);
rfrac = (unsigned long)lltmp;
b = 0;
c = 1;
if (rfrac)
rational_best_approximation(rfrac, denom,
SI5351_MULTISYNTH_B_MAX, SI5351_MULTISYNTH_C_MAX,
&b, &c);
}
/* recalculate rate by fOUT = fIN / (a + b/c) */
lltmp = req->best_parent_rate;
lltmp *= c;
do_div(lltmp, a * c + b);
rate = (unsigned long)lltmp;
/* calculate parameters */
if (divby4) {
hwdata->params.p3 = 1;
hwdata->params.p2 = 0;
hwdata->params.p1 = 0;
} else if (hwdata->num >= 6) {
hwdata->params.p3 = 0;
hwdata->params.p2 = 0;
hwdata->params.p1 = a;
} else {
hwdata->params.p3 = c;
hwdata->params.p2 = (128 * b) % c;
hwdata->params.p1 = 128 * a;
hwdata->params.p1 += (128 * b / c);
hwdata->params.p1 -= 512;
}
dev_dbg(&hwdata->drvdata->client->dev,
"%s - %s: a = %lu, b = %lu, c = %lu, divby4 = %d, parent_rate = %lu, rate = %lu\n",
__func__, clk_hw_get_name(hw), a, b, c, divby4,
req->best_parent_rate, rate);
req->rate = rate;
return 0;
}
static int si5351_msynth_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct si5351_hw_data *hwdata =
container_of(hw, struct si5351_hw_data, hw);
u8 reg = si5351_msynth_params_address(hwdata->num);
int divby4 = 0;
/* write multisynth parameters */
si5351_write_parameters(hwdata->drvdata, reg, &hwdata->params);
if (rate > SI5351_MULTISYNTH_DIVBY4_FREQ)
divby4 = 1;
/* enable/disable integer mode and divby4 on multisynth0-5 */
if (hwdata->num < 6) {
si5351_set_bits(hwdata->drvdata, reg + 2,
SI5351_OUTPUT_CLK_DIVBY4,
(divby4) ? SI5351_OUTPUT_CLK_DIVBY4 : 0);
si5351_set_bits(hwdata->drvdata, SI5351_CLK0_CTRL + hwdata->num,
SI5351_CLK_INTEGER_MODE,
(hwdata->params.p2 == 0) ? SI5351_CLK_INTEGER_MODE : 0);
}
dev_dbg(&hwdata->drvdata->client->dev,
"%s - %s: p1 = %lu, p2 = %lu, p3 = %lu, divby4 = %d, parent_rate = %lu, rate = %lu\n",
__func__, clk_hw_get_name(hw),
hwdata->params.p1, hwdata->params.p2, hwdata->params.p3,
divby4, parent_rate, rate);
return 0;
}
static const struct clk_ops si5351_msynth_ops = {
.set_parent = si5351_msynth_set_parent,
.get_parent = si5351_msynth_get_parent,
.recalc_rate = si5351_msynth_recalc_rate,
.determine_rate = si5351_msynth_determine_rate,
.set_rate = si5351_msynth_set_rate,
};
/*
* Si5351 clkout divider
*/
static int _si5351_clkout_reparent(struct si5351_driver_data *drvdata,
int num, enum si5351_clkout_src parent)
{
u8 val;
if (num > 8)
return -EINVAL;
switch (parent) {
case SI5351_CLKOUT_SRC_MSYNTH_N:
val = SI5351_CLK_INPUT_MULTISYNTH_N;
break;
case SI5351_CLKOUT_SRC_MSYNTH_0_4:
/* clk0/clk4 can only connect to its own multisync */
if (num == 0 || num == 4)
val = SI5351_CLK_INPUT_MULTISYNTH_N;
else
val = SI5351_CLK_INPUT_MULTISYNTH_0_4;
break;
case SI5351_CLKOUT_SRC_XTAL:
val = SI5351_CLK_INPUT_XTAL;
break;
case SI5351_CLKOUT_SRC_CLKIN:
if (drvdata->variant != SI5351_VARIANT_C)
return -EINVAL;
val = SI5351_CLK_INPUT_CLKIN;
break;
default:
return 0;
}
si5351_set_bits(drvdata, SI5351_CLK0_CTRL + num,
SI5351_CLK_INPUT_MASK, val);
return 0;
}
static int _si5351_clkout_set_drive_strength(
struct si5351_driver_data *drvdata, int num,
enum si5351_drive_strength drive)
{
u8 mask;
if (num > 8)
return -EINVAL;
switch (drive) {
case SI5351_DRIVE_2MA:
mask = SI5351_CLK_DRIVE_STRENGTH_2MA;
break;
case SI5351_DRIVE_4MA:
mask = SI5351_CLK_DRIVE_STRENGTH_4MA;
break;
case SI5351_DRIVE_6MA:
mask = SI5351_CLK_DRIVE_STRENGTH_6MA;
break;
case SI5351_DRIVE_8MA:
mask = SI5351_CLK_DRIVE_STRENGTH_8MA;
break;
default:
return 0;
}
si5351_set_bits(drvdata, SI5351_CLK0_CTRL + num,
SI5351_CLK_DRIVE_STRENGTH_MASK, mask);
return 0;
}
static int _si5351_clkout_set_disable_state(
struct si5351_driver_data *drvdata, int num,
enum si5351_disable_state state)
{
u8 reg = (num < 4) ? SI5351_CLK3_0_DISABLE_STATE :
SI5351_CLK7_4_DISABLE_STATE;
u8 shift = (num < 4) ? (2 * num) : (2 * (num-4));
u8 mask = SI5351_CLK_DISABLE_STATE_MASK << shift;
u8 val;
if (num > 8)
return -EINVAL;
switch (state) {
case SI5351_DISABLE_LOW:
val = SI5351_CLK_DISABLE_STATE_LOW;
break;
case SI5351_DISABLE_HIGH:
val = SI5351_CLK_DISABLE_STATE_HIGH;
break;
case SI5351_DISABLE_FLOATING:
val = SI5351_CLK_DISABLE_STATE_FLOAT;
break;
case SI5351_DISABLE_NEVER:
val = SI5351_CLK_DISABLE_STATE_NEVER;
break;
default:
return 0;
}
si5351_set_bits(drvdata, reg, mask, val << shift);
return 0;
}
static void _si5351_clkout_reset_pll(struct si5351_driver_data *drvdata, int num)
{
u8 val = si5351_reg_read(drvdata, SI5351_CLK0_CTRL + num);
u8 mask = val & SI5351_CLK_PLL_SELECT ? SI5351_PLL_RESET_B :
SI5351_PLL_RESET_A;
unsigned int v;
int err;
switch (val & SI5351_CLK_INPUT_MASK) {
case SI5351_CLK_INPUT_XTAL:
case SI5351_CLK_INPUT_CLKIN:
return; /* pll not used, no need to reset */
}
si5351_reg_write(drvdata, SI5351_PLL_RESET, mask);
err = regmap_read_poll_timeout(drvdata->regmap, SI5351_PLL_RESET, v,
!(v & mask), 0, 20000);
if (err < 0)
dev_err(&drvdata->client->dev, "Reset bit didn't clear\n");
dev_dbg(&drvdata->client->dev, "%s - %s: pll = %d\n",
__func__, clk_hw_get_name(&drvdata->clkout[num].hw),
(val & SI5351_CLK_PLL_SELECT) ? 1 : 0);
}
static int si5351_clkout_prepare(struct clk_hw *hw)
{
struct si5351_hw_data *hwdata =
container_of(hw, struct si5351_hw_data, hw);
struct si5351_platform_data *pdata =
hwdata->drvdata->client->dev.platform_data;
si5351_set_bits(hwdata->drvdata, SI5351_CLK0_CTRL + hwdata->num,
SI5351_CLK_POWERDOWN, 0);
/*
* Do a pll soft reset on the parent pll -- needed to get a
* deterministic phase relationship between the output clocks.
*/
if (pdata->clkout[hwdata->num].pll_reset)
_si5351_clkout_reset_pll(hwdata->drvdata, hwdata->num);
si5351_set_bits(hwdata->drvdata, SI5351_OUTPUT_ENABLE_CTRL,
(1 << hwdata->num), 0);
return 0;
}
static void si5351_clkout_unprepare(struct clk_hw *hw)
{
struct si5351_hw_data *hwdata =
container_of(hw, struct si5351_hw_data, hw);
si5351_set_bits(hwdata->drvdata, SI5351_CLK0_CTRL + hwdata->num,
SI5351_CLK_POWERDOWN, SI5351_CLK_POWERDOWN);
si5351_set_bits(hwdata->drvdata, SI5351_OUTPUT_ENABLE_CTRL,
(1 << hwdata->num), (1 << hwdata->num));
}
static u8 si5351_clkout_get_parent(struct clk_hw *hw)
{
struct si5351_hw_data *hwdata =
container_of(hw, struct si5351_hw_data, hw);
int index = 0;
unsigned char val;
val = si5351_reg_read(hwdata->drvdata, SI5351_CLK0_CTRL + hwdata->num);
switch (val & SI5351_CLK_INPUT_MASK) {
case SI5351_CLK_INPUT_MULTISYNTH_N:
index = 0;
break;
case SI5351_CLK_INPUT_MULTISYNTH_0_4:
index = 1;
break;
case SI5351_CLK_INPUT_XTAL:
index = 2;
break;
case SI5351_CLK_INPUT_CLKIN:
index = 3;
break;
}
return index;
}
static int si5351_clkout_set_parent(struct clk_hw *hw, u8 index)
{
struct si5351_hw_data *hwdata =
container_of(hw, struct si5351_hw_data, hw);
enum si5351_clkout_src parent = SI5351_CLKOUT_SRC_DEFAULT;
switch (index) {
case 0:
parent = SI5351_CLKOUT_SRC_MSYNTH_N;
break;
case 1:
parent = SI5351_CLKOUT_SRC_MSYNTH_0_4;
break;
case 2:
parent = SI5351_CLKOUT_SRC_XTAL;
break;
case 3:
parent = SI5351_CLKOUT_SRC_CLKIN;
break;
}
return _si5351_clkout_reparent(hwdata->drvdata, hwdata->num, parent);
}
static unsigned long si5351_clkout_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct si5351_hw_data *hwdata =
container_of(hw, struct si5351_hw_data, hw);
unsigned char reg;
unsigned char rdiv;
if (hwdata->num <= 5)
reg = si5351_msynth_params_address(hwdata->num) + 2;
else
reg = SI5351_CLK6_7_OUTPUT_DIVIDER;
rdiv = si5351_reg_read(hwdata->drvdata, reg);
if (hwdata->num == 6) {
rdiv &= SI5351_OUTPUT_CLK6_DIV_MASK;
} else {
rdiv &= SI5351_OUTPUT_CLK_DIV_MASK;
rdiv >>= SI5351_OUTPUT_CLK_DIV_SHIFT;
}
return parent_rate >> rdiv;
}
static int si5351_clkout_determine_rate(struct clk_hw *hw,
struct clk_rate_request *req)
{
struct si5351_hw_data *hwdata =
container_of(hw, struct si5351_hw_data, hw);
unsigned long rate = req->rate;
unsigned char rdiv;
/* clkout6/7 can only handle output freqencies < 150MHz */
if (hwdata->num >= 6 && rate > SI5351_CLKOUT67_MAX_FREQ)
rate = SI5351_CLKOUT67_MAX_FREQ;
/* clkout freqency is 8kHz - 160MHz */
if (rate > SI5351_CLKOUT_MAX_FREQ)
rate = SI5351_CLKOUT_MAX_FREQ;
if (rate < SI5351_CLKOUT_MIN_FREQ)
rate = SI5351_CLKOUT_MIN_FREQ;
/* request frequency if multisync master */
if (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) {
/* use r divider for frequencies below 1MHz */
rdiv = SI5351_OUTPUT_CLK_DIV_1;
while (rate < SI5351_MULTISYNTH_MIN_FREQ &&
rdiv < SI5351_OUTPUT_CLK_DIV_128) {
rdiv += 1;
rate *= 2;
}
req->best_parent_rate = rate;
} else {
unsigned long new_rate, new_err, err;
/* round to closed rdiv */
rdiv = SI5351_OUTPUT_CLK_DIV_1;
new_rate = req->best_parent_rate;
err = abs(new_rate - rate);
do {
new_rate >>= 1;
new_err = abs(new_rate - rate);
if (new_err > err || rdiv == SI5351_OUTPUT_CLK_DIV_128)
break;
rdiv++;
err = new_err;
} while (1);
}
rate = req->best_parent_rate >> rdiv;
dev_dbg(&hwdata->drvdata->client->dev,
"%s - %s: rdiv = %u, parent_rate = %lu, rate = %lu\n",
__func__, clk_hw_get_name(hw), (1 << rdiv),
req->best_parent_rate, rate);
req->rate = rate;
return 0;
}
static int si5351_clkout_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct si5351_hw_data *hwdata =
container_of(hw, struct si5351_hw_data, hw);
unsigned long new_rate, new_err, err;
unsigned char rdiv;
/* round to closed rdiv */
rdiv = SI5351_OUTPUT_CLK_DIV_1;
new_rate = parent_rate;
err = abs(new_rate - rate);
do {
new_rate >>= 1;
new_err = abs(new_rate - rate);
if (new_err > err || rdiv == SI5351_OUTPUT_CLK_DIV_128)
break;
rdiv++;
err = new_err;
} while (1);
/* write output divider */
switch (hwdata->num) {
case 6:
si5351_set_bits(hwdata->drvdata, SI5351_CLK6_7_OUTPUT_DIVIDER,
SI5351_OUTPUT_CLK6_DIV_MASK, rdiv);
break;
case 7:
si5351_set_bits(hwdata->drvdata, SI5351_CLK6_7_OUTPUT_DIVIDER,
SI5351_OUTPUT_CLK_DIV_MASK,
rdiv << SI5351_OUTPUT_CLK_DIV_SHIFT);
break;
default:
si5351_set_bits(hwdata->drvdata,
si5351_msynth_params_address(hwdata->num) + 2,
SI5351_OUTPUT_CLK_DIV_MASK,
rdiv << SI5351_OUTPUT_CLK_DIV_SHIFT);
}
/* powerup clkout */
si5351_set_bits(hwdata->drvdata, SI5351_CLK0_CTRL + hwdata->num,
SI5351_CLK_POWERDOWN, 0);
dev_dbg(&hwdata->drvdata->client->dev,
"%s - %s: rdiv = %u, parent_rate = %lu, rate = %lu\n",
__func__, clk_hw_get_name(hw), (1 << rdiv),
parent_rate, rate);
return 0;
}
static const struct clk_ops si5351_clkout_ops = {
.prepare = si5351_clkout_prepare,
.unprepare = si5351_clkout_unprepare,
.set_parent = si5351_clkout_set_parent,
.get_parent = si5351_clkout_get_parent,
.recalc_rate = si5351_clkout_recalc_rate,
.determine_rate = si5351_clkout_determine_rate,
.set_rate = si5351_clkout_set_rate,
};
/*
* Si5351 i2c probe and DT
*/
#ifdef CONFIG_OF
static const struct of_device_id si5351_dt_ids[] = {
{ .compatible = "silabs,si5351a", .data = (void *)SI5351_VARIANT_A, },
{ .compatible = "silabs,si5351a-msop",
.data = (void *)SI5351_VARIANT_A3, },
{ .compatible = "silabs,si5351b", .data = (void *)SI5351_VARIANT_B, },
{ .compatible = "silabs,si5351c", .data = (void *)SI5351_VARIANT_C, },
{ }
};
MODULE_DEVICE_TABLE(of, si5351_dt_ids);
static int si5351_dt_parse(struct i2c_client *client,
enum si5351_variant variant)
{
struct device_node *child, *np = client->dev.of_node;
struct si5351_platform_data *pdata;
struct property *prop;
const __be32 *p;
int num = 0;
u32 val;
if (np == NULL)
return 0;
pdata = devm_kzalloc(&client->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return -ENOMEM;
/*
* property silabs,pll-source : <num src>, [<..>]
* allow to selectively set pll source
*/
of_property_for_each_u32(np, "silabs,pll-source", prop, p, num) {
if (num >= 2) {
dev_err(&client->dev,
"invalid pll %d on pll-source prop\n", num);
return -EINVAL;
}
p = of_prop_next_u32(prop, p, &val);
if (!p) {
dev_err(&client->dev,
"missing pll-source for pll %d\n", num);
return -EINVAL;
}
switch (val) {
case 0:
pdata->pll_src[num] = SI5351_PLL_SRC_XTAL;
break;
case 1:
if (variant != SI5351_VARIANT_C) {
dev_err(&client->dev,
"invalid parent %d for pll %d\n",
val, num);
return -EINVAL;
}
pdata->pll_src[num] = SI5351_PLL_SRC_CLKIN;
break;
default:
dev_err(&client->dev,
"invalid parent %d for pll %d\n", val, num);
return -EINVAL;
}
}
/* per clkout properties */
for_each_child_of_node(np, child) {
if (of_property_read_u32(child, "reg", &num)) {
dev_err(&client->dev, "missing reg property of %pOFn\n",
child);
goto put_child;
}
if (num >= 8 ||
(variant == SI5351_VARIANT_A3 && num >= 3)) {
dev_err(&client->dev, "invalid clkout %d\n", num);
goto put_child;
}
if (!of_property_read_u32(child, "silabs,multisynth-source",
&val)) {
switch (val) {
case 0:
pdata->clkout[num].multisynth_src =
SI5351_MULTISYNTH_SRC_VCO0;
break;
case 1:
pdata->clkout[num].multisynth_src =
SI5351_MULTISYNTH_SRC_VCO1;
break;
default:
dev_err(&client->dev,
"invalid parent %d for multisynth %d\n",
val, num);
goto put_child;
}
}
if (!of_property_read_u32(child, "silabs,clock-source", &val)) {
switch (val) {
case 0:
pdata->clkout[num].clkout_src =
SI5351_CLKOUT_SRC_MSYNTH_N;
break;
case 1:
pdata->clkout[num].clkout_src =
SI5351_CLKOUT_SRC_MSYNTH_0_4;
break;
case 2:
pdata->clkout[num].clkout_src =
SI5351_CLKOUT_SRC_XTAL;
break;
case 3:
if (variant != SI5351_VARIANT_C) {
dev_err(&client->dev,
"invalid parent %d for clkout %d\n",
val, num);
goto put_child;
}
pdata->clkout[num].clkout_src =
SI5351_CLKOUT_SRC_CLKIN;
break;
default:
dev_err(&client->dev,
"invalid parent %d for clkout %d\n",
val, num);
goto put_child;
}
}
if (!of_property_read_u32(child, "silabs,drive-strength",
&val)) {
switch (val) {
case SI5351_DRIVE_2MA:
case SI5351_DRIVE_4MA:
case SI5351_DRIVE_6MA:
case SI5351_DRIVE_8MA:
pdata->clkout[num].drive = val;
break;
default:
dev_err(&client->dev,
"invalid drive strength %d for clkout %d\n",
val, num);
goto put_child;
}
}
if (!of_property_read_u32(child, "silabs,disable-state",
&val)) {
switch (val) {
case 0:
pdata->clkout[num].disable_state =
SI5351_DISABLE_LOW;
break;
case 1:
pdata->clkout[num].disable_state =
SI5351_DISABLE_HIGH;
break;
case 2:
pdata->clkout[num].disable_state =
SI5351_DISABLE_FLOATING;
break;
case 3:
pdata->clkout[num].disable_state =
SI5351_DISABLE_NEVER;
break;
default:
dev_err(&client->dev,
"invalid disable state %d for clkout %d\n",
val, num);
goto put_child;
}
}
if (!of_property_read_u32(child, "clock-frequency", &val))
pdata->clkout[num].rate = val;
pdata->clkout[num].pll_master =
of_property_read_bool(child, "silabs,pll-master");
pdata->clkout[num].pll_reset =
of_property_read_bool(child, "silabs,pll-reset");
}
client->dev.platform_data = pdata;
return 0;
put_child:
of_node_put(child);
return -EINVAL;
}
static struct clk_hw *
si53351_of_clk_get(struct of_phandle_args *clkspec, void *data)
{
struct si5351_driver_data *drvdata = data;
unsigned int idx = clkspec->args[0];
if (idx >= drvdata->num_clkout) {
pr_err("%s: invalid index %u\n", __func__, idx);
return ERR_PTR(-EINVAL);
}
return &drvdata->clkout[idx].hw;
}
#else
static int si5351_dt_parse(struct i2c_client *client, enum si5351_variant variant)
{
return 0;
}
static struct clk_hw *
si53351_of_clk_get(struct of_phandle_args *clkspec, void *data)
{
return NULL;
}
#endif /* CONFIG_OF */
static const struct i2c_device_id si5351_i2c_ids[] = {
{ "si5351a", SI5351_VARIANT_A },
{ "si5351a-msop", SI5351_VARIANT_A3 },
{ "si5351b", SI5351_VARIANT_B },
{ "si5351c", SI5351_VARIANT_C },
{ }
};
MODULE_DEVICE_TABLE(i2c, si5351_i2c_ids);
static int si5351_i2c_probe(struct i2c_client *client)
{
const struct i2c_device_id *id = i2c_match_id(si5351_i2c_ids, client);
enum si5351_variant variant = (enum si5351_variant)id->driver_data;
struct si5351_platform_data *pdata;
struct si5351_driver_data *drvdata;
struct clk_init_data init;
const char *parent_names[4];
u8 num_parents, num_clocks;
int ret, n;
ret = si5351_dt_parse(client, variant);
if (ret)
return ret;
pdata = client->dev.platform_data;
if (!pdata)
return -EINVAL;
drvdata = devm_kzalloc(&client->dev, sizeof(*drvdata), GFP_KERNEL);
if (!drvdata)
return -ENOMEM;
i2c_set_clientdata(client, drvdata);
drvdata->client = client;
drvdata->variant = variant;
drvdata->pxtal = devm_clk_get(&client->dev, "xtal");
drvdata->pclkin = devm_clk_get(&client->dev, "clkin");
if (PTR_ERR(drvdata->pxtal) == -EPROBE_DEFER ||
PTR_ERR(drvdata->pclkin) == -EPROBE_DEFER)
return -EPROBE_DEFER;
/*
* Check for valid parent clock: VARIANT_A and VARIANT_B need XTAL,
* VARIANT_C can have CLKIN instead.
*/
if (IS_ERR(drvdata->pxtal) &&
(drvdata->variant != SI5351_VARIANT_C || IS_ERR(drvdata->pclkin))) {
dev_err(&client->dev, "missing parent clock\n");
return -EINVAL;
}
drvdata->regmap = devm_regmap_init_i2c(client, &si5351_regmap_config);
if (IS_ERR(drvdata->regmap)) {
dev_err(&client->dev, "failed to allocate register map\n");
return PTR_ERR(drvdata->regmap);
}
/* Disable interrupts */
si5351_reg_write(drvdata, SI5351_INTERRUPT_MASK, 0xf0);
/* Ensure pll select is on XTAL for Si5351A/B */
if (drvdata->variant != SI5351_VARIANT_C)
si5351_set_bits(drvdata, SI5351_PLL_INPUT_SOURCE,
SI5351_PLLA_SOURCE | SI5351_PLLB_SOURCE, 0);
/* setup clock configuration */
for (n = 0; n < 2; n++) {
ret = _si5351_pll_reparent(drvdata, n, pdata->pll_src[n]);
if (ret) {
dev_err(&client->dev,
"failed to reparent pll %d to %d\n",
n, pdata->pll_src[n]);
return ret;
}
}
for (n = 0; n < 8; n++) {
ret = _si5351_msynth_reparent(drvdata, n,
pdata->clkout[n].multisynth_src);
if (ret) {
dev_err(&client->dev,
"failed to reparent multisynth %d to %d\n",
n, pdata->clkout[n].multisynth_src);
return ret;
}
ret = _si5351_clkout_reparent(drvdata, n,
pdata->clkout[n].clkout_src);
if (ret) {
dev_err(&client->dev,
"failed to reparent clkout %d to %d\n",
n, pdata->clkout[n].clkout_src);
return ret;
}
ret = _si5351_clkout_set_drive_strength(drvdata, n,
pdata->clkout[n].drive);
if (ret) {
dev_err(&client->dev,
"failed set drive strength of clkout%d to %d\n",
n, pdata->clkout[n].drive);
return ret;
}
ret = _si5351_clkout_set_disable_state(drvdata, n,
pdata->clkout[n].disable_state);
if (ret) {
dev_err(&client->dev,
"failed set disable state of clkout%d to %d\n",
n, pdata->clkout[n].disable_state);
return ret;
}
}
/* register xtal input clock gate */
memset(&init, 0, sizeof(init));
init.name = si5351_input_names[0];
init.ops = &si5351_xtal_ops;
init.flags = 0;
if (!IS_ERR(drvdata->pxtal)) {
drvdata->pxtal_name = __clk_get_name(drvdata->pxtal);
init.parent_names = &drvdata->pxtal_name;
init.num_parents = 1;
}
drvdata->xtal.init = &init;
ret = devm_clk_hw_register(&client->dev, &drvdata->xtal);
if (ret) {
dev_err(&client->dev, "unable to register %s\n", init.name);
return ret;
}
/* register clkin input clock gate */
if (drvdata->variant == SI5351_VARIANT_C) {
memset(&init, 0, sizeof(init));
init.name = si5351_input_names[1];
init.ops = &si5351_clkin_ops;
if (!IS_ERR(drvdata->pclkin)) {
drvdata->pclkin_name = __clk_get_name(drvdata->pclkin);
init.parent_names = &drvdata->pclkin_name;
init.num_parents = 1;
}
drvdata->clkin.init = &init;
ret = devm_clk_hw_register(&client->dev, &drvdata->clkin);
if (ret) {
dev_err(&client->dev, "unable to register %s\n",
init.name);
return ret;
}
}
/* Si5351C allows to mux either xtal or clkin to PLL input */
num_parents = (drvdata->variant == SI5351_VARIANT_C) ? 2 : 1;
parent_names[0] = si5351_input_names[0];
parent_names[1] = si5351_input_names[1];
/* register PLLA */
drvdata->pll[0].num = 0;
drvdata->pll[0].drvdata = drvdata;
drvdata->pll[0].hw.init = &init;
memset(&init, 0, sizeof(init));
init.name = si5351_pll_names[0];
init.ops = &si5351_pll_ops;
init.flags = 0;
init.parent_names = parent_names;
init.num_parents = num_parents;
ret = devm_clk_hw_register(&client->dev, &drvdata->pll[0].hw);
if (ret) {
dev_err(&client->dev, "unable to register %s\n", init.name);
return ret;
}
/* register PLLB or VXCO (Si5351B) */
drvdata->pll[1].num = 1;
drvdata->pll[1].drvdata = drvdata;
drvdata->pll[1].hw.init = &init;
memset(&init, 0, sizeof(init));
if (drvdata->variant == SI5351_VARIANT_B) {
init.name = si5351_pll_names[2];
init.ops = &si5351_vxco_ops;
init.flags = 0;
init.parent_names = NULL;
init.num_parents = 0;
} else {
init.name = si5351_pll_names[1];
init.ops = &si5351_pll_ops;
init.flags = 0;
init.parent_names = parent_names;
init.num_parents = num_parents;
}
ret = devm_clk_hw_register(&client->dev, &drvdata->pll[1].hw);
if (ret) {
dev_err(&client->dev, "unable to register %s\n", init.name);
return ret;
}
/* register clk multisync and clk out divider */
num_clocks = (drvdata->variant == SI5351_VARIANT_A3) ? 3 : 8;
parent_names[0] = si5351_pll_names[0];
if (drvdata->variant == SI5351_VARIANT_B)
parent_names[1] = si5351_pll_names[2];
else
parent_names[1] = si5351_pll_names[1];
drvdata->msynth = devm_kcalloc(&client->dev, num_clocks,
sizeof(*drvdata->msynth), GFP_KERNEL);
drvdata->clkout = devm_kcalloc(&client->dev, num_clocks,
sizeof(*drvdata->clkout), GFP_KERNEL);
drvdata->num_clkout = num_clocks;
if (WARN_ON(!drvdata->msynth || !drvdata->clkout)) {
ret = -ENOMEM;
return ret;
}
for (n = 0; n < num_clocks; n++) {
drvdata->msynth[n].num = n;
drvdata->msynth[n].drvdata = drvdata;
drvdata->msynth[n].hw.init = &init;
memset(&init, 0, sizeof(init));
init.name = si5351_msynth_names[n];
init.ops = &si5351_msynth_ops;
init.flags = 0;
if (pdata->clkout[n].pll_master)
init.flags |= CLK_SET_RATE_PARENT;
init.parent_names = parent_names;
init.num_parents = 2;
ret = devm_clk_hw_register(&client->dev,
&drvdata->msynth[n].hw);
if (ret) {
dev_err(&client->dev, "unable to register %s\n",
init.name);
return ret;
}
}
num_parents = (drvdata->variant == SI5351_VARIANT_C) ? 4 : 3;
parent_names[2] = si5351_input_names[0];
parent_names[3] = si5351_input_names[1];
for (n = 0; n < num_clocks; n++) {
parent_names[0] = si5351_msynth_names[n];
parent_names[1] = (n < 4) ? si5351_msynth_names[0] :
si5351_msynth_names[4];
drvdata->clkout[n].num = n;
drvdata->clkout[n].drvdata = drvdata;
drvdata->clkout[n].hw.init = &init;
memset(&init, 0, sizeof(init));
init.name = si5351_clkout_names[n];
init.ops = &si5351_clkout_ops;
init.flags = 0;
if (pdata->clkout[n].clkout_src == SI5351_CLKOUT_SRC_MSYNTH_N)
init.flags |= CLK_SET_RATE_PARENT;
init.parent_names = parent_names;
init.num_parents = num_parents;
ret = devm_clk_hw_register(&client->dev,
&drvdata->clkout[n].hw);
if (ret) {
dev_err(&client->dev, "unable to register %s\n",
init.name);
return ret;
}
/* set initial clkout rate */
if (pdata->clkout[n].rate != 0) {
int ret;
ret = clk_set_rate(drvdata->clkout[n].hw.clk,
pdata->clkout[n].rate);
if (ret != 0) {
dev_err(&client->dev, "Cannot set rate : %d\n",
ret);
}
}
}
ret = devm_of_clk_add_hw_provider(&client->dev, si53351_of_clk_get,
drvdata);
if (ret) {
dev_err(&client->dev, "unable to add clk provider\n");
return ret;
}
return 0;
}
static struct i2c_driver si5351_driver = {
.driver = {
.name = "si5351",
.of_match_table = of_match_ptr(si5351_dt_ids),
},
.probe = si5351_i2c_probe,
.id_table = si5351_i2c_ids,
};
module_i2c_driver(si5351_driver);
MODULE_AUTHOR("Sebastian Hesselbarth <[email protected]");
MODULE_DESCRIPTION("Silicon Labs Si5351A/B/C clock generator driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/clk/clk-si5351.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Microchip LAN966x SoC Clock driver.
*
* Copyright (C) 2021 Microchip Technology, Inc. and its subsidiaries
*
* Author: Kavyasree Kotagiri <[email protected]>
*/
#include <linux/bitfield.h>
#include <linux/clk-provider.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <dt-bindings/clock/microchip,lan966x.h>
#define GCK_ENA BIT(0)
#define GCK_SRC_SEL GENMASK(9, 8)
#define GCK_PRESCALER GENMASK(23, 16)
#define DIV_MAX 255
static const char *clk_names[N_CLOCKS] = {
"qspi0", "qspi1", "qspi2", "sdmmc0",
"pi", "mcan0", "mcan1", "flexcom0",
"flexcom1", "flexcom2", "flexcom3",
"flexcom4", "timer1", "usb_refclk",
};
struct lan966x_gck {
struct clk_hw hw;
void __iomem *reg;
};
#define to_lan966x_gck(hw) container_of(hw, struct lan966x_gck, hw)
static const struct clk_parent_data lan966x_gck_pdata[] = {
{ .fw_name = "cpu", },
{ .fw_name = "ddr", },
{ .fw_name = "sys", },
};
static struct clk_init_data init = {
.parent_data = lan966x_gck_pdata,
.num_parents = ARRAY_SIZE(lan966x_gck_pdata),
};
struct clk_gate_soc_desc {
const char *name;
int bit_idx;
};
static const struct clk_gate_soc_desc clk_gate_desc[] = {
{ "uhphs", 11 },
{ "udphs", 10 },
{ "mcramc", 9 },
{ "hmatrix", 8 },
{ }
};
static DEFINE_SPINLOCK(clk_gate_lock);
static void __iomem *base;
static int lan966x_gck_enable(struct clk_hw *hw)
{
struct lan966x_gck *gck = to_lan966x_gck(hw);
u32 val = readl(gck->reg);
val |= GCK_ENA;
writel(val, gck->reg);
return 0;
}
static void lan966x_gck_disable(struct clk_hw *hw)
{
struct lan966x_gck *gck = to_lan966x_gck(hw);
u32 val = readl(gck->reg);
val &= ~GCK_ENA;
writel(val, gck->reg);
}
static int lan966x_gck_set_rate(struct clk_hw *hw,
unsigned long rate,
unsigned long parent_rate)
{
struct lan966x_gck *gck = to_lan966x_gck(hw);
u32 div, val = readl(gck->reg);
if (rate == 0 || parent_rate == 0)
return -EINVAL;
/* Set Prescalar */
div = parent_rate / rate;
val &= ~GCK_PRESCALER;
val |= FIELD_PREP(GCK_PRESCALER, (div - 1));
writel(val, gck->reg);
return 0;
}
static unsigned long lan966x_gck_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct lan966x_gck *gck = to_lan966x_gck(hw);
u32 div, val = readl(gck->reg);
div = FIELD_GET(GCK_PRESCALER, val);
return parent_rate / (div + 1);
}
static int lan966x_gck_determine_rate(struct clk_hw *hw,
struct clk_rate_request *req)
{
struct clk_hw *parent;
int i;
for (i = 0; i < clk_hw_get_num_parents(hw); ++i) {
parent = clk_hw_get_parent_by_index(hw, i);
if (!parent)
continue;
/* Allowed prescaler divider range is 0-255 */
if (clk_hw_get_rate(parent) / req->rate <= DIV_MAX) {
req->best_parent_hw = parent;
req->best_parent_rate = clk_hw_get_rate(parent);
return 0;
}
}
return -EINVAL;
}
static u8 lan966x_gck_get_parent(struct clk_hw *hw)
{
struct lan966x_gck *gck = to_lan966x_gck(hw);
u32 val = readl(gck->reg);
return FIELD_GET(GCK_SRC_SEL, val);
}
static int lan966x_gck_set_parent(struct clk_hw *hw, u8 index)
{
struct lan966x_gck *gck = to_lan966x_gck(hw);
u32 val = readl(gck->reg);
val &= ~GCK_SRC_SEL;
val |= FIELD_PREP(GCK_SRC_SEL, index);
writel(val, gck->reg);
return 0;
}
static const struct clk_ops lan966x_gck_ops = {
.enable = lan966x_gck_enable,
.disable = lan966x_gck_disable,
.set_rate = lan966x_gck_set_rate,
.recalc_rate = lan966x_gck_recalc_rate,
.determine_rate = lan966x_gck_determine_rate,
.set_parent = lan966x_gck_set_parent,
.get_parent = lan966x_gck_get_parent,
};
static struct clk_hw *lan966x_gck_clk_register(struct device *dev, int i)
{
struct lan966x_gck *priv;
int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return ERR_PTR(-ENOMEM);
priv->reg = base + (i * 4);
priv->hw.init = &init;
ret = devm_clk_hw_register(dev, &priv->hw);
if (ret)
return ERR_PTR(ret);
return &priv->hw;
};
static int lan966x_gate_clk_register(struct device *dev,
struct clk_hw_onecell_data *hw_data,
void __iomem *gate_base)
{
int i;
for (i = GCK_GATE_UHPHS; i < N_CLOCKS; ++i) {
int idx = i - GCK_GATE_UHPHS;
hw_data->hws[i] =
devm_clk_hw_register_gate(dev, clk_gate_desc[idx].name,
"lan966x", 0, gate_base,
clk_gate_desc[idx].bit_idx,
0, &clk_gate_lock);
if (IS_ERR(hw_data->hws[i]))
return dev_err_probe(dev, PTR_ERR(hw_data->hws[i]),
"failed to register %s clock\n",
clk_gate_desc[idx].name);
}
return 0;
}
static int lan966x_clk_probe(struct platform_device *pdev)
{
struct clk_hw_onecell_data *hw_data;
struct device *dev = &pdev->dev;
void __iomem *gate_base;
struct resource *res;
int i, ret;
hw_data = devm_kzalloc(dev, struct_size(hw_data, hws, N_CLOCKS),
GFP_KERNEL);
if (!hw_data)
return -ENOMEM;
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
init.ops = &lan966x_gck_ops;
hw_data->num = GCK_GATE_UHPHS;
for (i = 0; i < GCK_GATE_UHPHS; i++) {
init.name = clk_names[i];
hw_data->hws[i] = lan966x_gck_clk_register(dev, i);
if (IS_ERR(hw_data->hws[i])) {
dev_err(dev, "failed to register %s clock\n",
init.name);
return PTR_ERR(hw_data->hws[i]);
}
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (res) {
gate_base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(gate_base))
return PTR_ERR(gate_base);
hw_data->num = N_CLOCKS;
ret = lan966x_gate_clk_register(dev, hw_data, gate_base);
if (ret)
return ret;
}
return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, hw_data);
}
static const struct of_device_id lan966x_clk_dt_ids[] = {
{ .compatible = "microchip,lan966x-gck", },
{ }
};
MODULE_DEVICE_TABLE(of, lan966x_clk_dt_ids);
static struct platform_driver lan966x_clk_driver = {
.probe = lan966x_clk_probe,
.driver = {
.name = "lan966x-clk",
.of_match_table = lan966x_clk_dt_ids,
},
};
module_platform_driver(lan966x_clk_driver);
MODULE_AUTHOR("Kavyasree Kotagiri <[email protected]>");
MODULE_DESCRIPTION("LAN966X clock driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/clk/clk-lan966x.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2013 Freescale Semiconductor, Inc.
* Copyright 2021 NXP
*
* clock driver for Freescale QorIQ SoCs.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <dt-bindings/clock/fsl,qoriq-clockgen.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
#include <linux/fsl/guts.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#define PLL_DIV1 0
#define PLL_DIV2 1
#define PLL_DIV3 2
#define PLL_DIV4 3
#define PLATFORM_PLL 0
#define CGA_PLL1 1
#define CGA_PLL2 2
#define CGA_PLL3 3
#define CGA_PLL4 4 /* only on clockgen-1.0, which lacks CGB */
#define CGB_PLL1 4
#define CGB_PLL2 5
#define MAX_PLL_DIV 32
struct clockgen_pll_div {
struct clk *clk;
char name[32];
};
struct clockgen_pll {
struct clockgen_pll_div div[MAX_PLL_DIV];
};
#define CLKSEL_VALID 1
#define CLKSEL_80PCT 2 /* Only allowed if PLL <= 80% of max cpu freq */
struct clockgen_sourceinfo {
u32 flags; /* CLKSEL_xxx */
int pll; /* CGx_PLLn */
int div; /* PLL_DIVn */
};
#define NUM_MUX_PARENTS 16
struct clockgen_muxinfo {
struct clockgen_sourceinfo clksel[NUM_MUX_PARENTS];
};
#define NUM_HWACCEL 5
#define NUM_CMUX 8
struct clockgen;
/*
* cmux freq must be >= platform pll.
* If not set, cmux freq must be >= platform pll/2
*/
#define CG_CMUX_GE_PLAT 1
#define CG_PLL_8BIT 2 /* PLLCnGSR[CFG] is 8 bits, not 6 */
#define CG_VER3 4 /* version 3 cg: reg layout different */
#define CG_LITTLE_ENDIAN 8
struct clockgen_chipinfo {
const char *compat, *guts_compat;
const struct clockgen_muxinfo *cmux_groups[2];
const struct clockgen_muxinfo *hwaccel[NUM_HWACCEL];
void (*init_periph)(struct clockgen *cg);
int cmux_to_group[NUM_CMUX + 1]; /* array should be -1 terminated */
u32 pll_mask; /* 1 << n bit set if PLL n is valid */
u32 flags; /* CG_xxx */
};
struct clockgen {
struct device_node *node;
void __iomem *regs;
struct clockgen_chipinfo info; /* mutable copy */
struct clk *sysclk, *coreclk;
struct clockgen_pll pll[6];
struct clk *cmux[NUM_CMUX];
struct clk *hwaccel[NUM_HWACCEL];
struct clk *fman[2];
struct ccsr_guts __iomem *guts;
};
static struct clockgen clockgen;
static bool add_cpufreq_dev __initdata;
static void cg_out(struct clockgen *cg, u32 val, u32 __iomem *reg)
{
if (cg->info.flags & CG_LITTLE_ENDIAN)
iowrite32(val, reg);
else
iowrite32be(val, reg);
}
static u32 cg_in(struct clockgen *cg, u32 __iomem *reg)
{
u32 val;
if (cg->info.flags & CG_LITTLE_ENDIAN)
val = ioread32(reg);
else
val = ioread32be(reg);
return val;
}
static const struct clockgen_muxinfo p2041_cmux_grp1 = {
{
[0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
[1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
[4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
}
};
static const struct clockgen_muxinfo p2041_cmux_grp2 = {
{
[0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
[4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
[5] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
}
};
static const struct clockgen_muxinfo p5020_cmux_grp1 = {
{
[0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
[1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
[4] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL2, PLL_DIV1 },
}
};
static const struct clockgen_muxinfo p5020_cmux_grp2 = {
{
[0] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL1, PLL_DIV1 },
[4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
[5] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
}
};
static const struct clockgen_muxinfo p5040_cmux_grp1 = {
{
[0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
[1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
[4] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL2, PLL_DIV1 },
[5] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL2, PLL_DIV2 },
}
};
static const struct clockgen_muxinfo p5040_cmux_grp2 = {
{
[0] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL1, PLL_DIV1 },
[1] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL1, PLL_DIV2 },
[4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
[5] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
}
};
static const struct clockgen_muxinfo p4080_cmux_grp1 = {
{
[0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
[1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
[4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
[5] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
[8] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL3, PLL_DIV1 },
}
};
static const struct clockgen_muxinfo p4080_cmux_grp2 = {
{
[0] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL1, PLL_DIV1 },
[8] = { CLKSEL_VALID, CGA_PLL3, PLL_DIV1 },
[9] = { CLKSEL_VALID, CGA_PLL3, PLL_DIV2 },
[12] = { CLKSEL_VALID, CGA_PLL4, PLL_DIV1 },
[13] = { CLKSEL_VALID, CGA_PLL4, PLL_DIV2 },
}
};
static const struct clockgen_muxinfo t1023_cmux = {
{
[0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
[1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
}
};
static const struct clockgen_muxinfo t1040_cmux = {
{
[0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
[1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
[4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
[5] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
}
};
static const struct clockgen_muxinfo clockgen2_cmux_cga = {
{
{ CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
{ CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
{ CLKSEL_VALID, CGA_PLL1, PLL_DIV4 },
{},
{ CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
{ CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
{ CLKSEL_VALID, CGA_PLL2, PLL_DIV4 },
{},
{ CLKSEL_VALID, CGA_PLL3, PLL_DIV1 },
{ CLKSEL_VALID, CGA_PLL3, PLL_DIV2 },
{ CLKSEL_VALID, CGA_PLL3, PLL_DIV4 },
},
};
static const struct clockgen_muxinfo clockgen2_cmux_cga12 = {
{
{ CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
{ CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
{ CLKSEL_VALID, CGA_PLL1, PLL_DIV4 },
{},
{ CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
{ CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
{ CLKSEL_VALID, CGA_PLL2, PLL_DIV4 },
},
};
static const struct clockgen_muxinfo clockgen2_cmux_cgb = {
{
{ CLKSEL_VALID, CGB_PLL1, PLL_DIV1 },
{ CLKSEL_VALID, CGB_PLL1, PLL_DIV2 },
{ CLKSEL_VALID, CGB_PLL1, PLL_DIV4 },
{},
{ CLKSEL_VALID, CGB_PLL2, PLL_DIV1 },
{ CLKSEL_VALID, CGB_PLL2, PLL_DIV2 },
{ CLKSEL_VALID, CGB_PLL2, PLL_DIV4 },
},
};
static const struct clockgen_muxinfo ls1021a_cmux = {
{
{ CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
{ CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
{ CLKSEL_VALID, CGA_PLL1, PLL_DIV4 },
}
};
static const struct clockgen_muxinfo ls1028a_hwa1 = {
{
{ CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 },
{ CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
{ CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
{ CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
{ CLKSEL_VALID, CGA_PLL1, PLL_DIV4 },
{},
{ CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
{ CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
},
};
static const struct clockgen_muxinfo ls1028a_hwa2 = {
{
{ CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 },
{ CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
{ CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
{ CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
{ CLKSEL_VALID, CGA_PLL2, PLL_DIV4 },
{},
{ CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
{ CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
},
};
static const struct clockgen_muxinfo ls1028a_hwa3 = {
{
{ CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 },
{ CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
{ CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
{ CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
{ CLKSEL_VALID, CGA_PLL1, PLL_DIV4 },
{},
{ CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
{ CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
},
};
static const struct clockgen_muxinfo ls1028a_hwa4 = {
{
{ CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 },
{ CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
{ CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
{ CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
{ CLKSEL_VALID, CGA_PLL2, PLL_DIV4 },
{},
{ CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
{ CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
},
};
static const struct clockgen_muxinfo ls1043a_hwa1 = {
{
{},
{},
{ CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
{ CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
{},
{},
{ CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
{ CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
},
};
static const struct clockgen_muxinfo ls1043a_hwa2 = {
{
{},
{ CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
{},
{ CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
},
};
static const struct clockgen_muxinfo ls1046a_hwa1 = {
{
{},
{},
{ CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
{ CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
{ CLKSEL_VALID, CGA_PLL1, PLL_DIV4 },
{ CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 },
{ CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
{ CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
},
};
static const struct clockgen_muxinfo ls1046a_hwa2 = {
{
{},
{ CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
{ CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
{ CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
{},
{},
{ CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
},
};
static const struct clockgen_muxinfo ls1088a_hwa1 = {
{
{},
{ CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
{ CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
{ CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
{ CLKSEL_VALID, CGA_PLL1, PLL_DIV4 },
{},
{ CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
{ CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
},
};
static const struct clockgen_muxinfo ls1088a_hwa2 = {
{
{},
{ CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
{ CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
{ CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
{ CLKSEL_VALID, CGA_PLL2, PLL_DIV4 },
{},
{ CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
{ CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
},
};
static const struct clockgen_muxinfo ls1012a_cmux = {
{
[0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
{},
[2] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
}
};
static const struct clockgen_muxinfo t1023_hwa1 = {
{
{},
{ CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
{ CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
{ CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
},
};
static const struct clockgen_muxinfo t1023_hwa2 = {
{
[6] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
},
};
static const struct clockgen_muxinfo t2080_hwa1 = {
{
{},
{ CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
{ CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
{ CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
{ CLKSEL_VALID, CGA_PLL1, PLL_DIV4 },
{ CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 },
{ CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
{ CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
},
};
static const struct clockgen_muxinfo t2080_hwa2 = {
{
{},
{ CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
{ CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
{ CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
{ CLKSEL_VALID, CGA_PLL2, PLL_DIV4 },
{ CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 },
{ CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
{ CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
},
};
static const struct clockgen_muxinfo t4240_hwa1 = {
{
{ CLKSEL_VALID, PLATFORM_PLL, PLL_DIV2 },
{ CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
{ CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
{ CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
{ CLKSEL_VALID, CGA_PLL1, PLL_DIV4 },
{},
{ CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
{ CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
},
};
static const struct clockgen_muxinfo t4240_hwa4 = {
{
[2] = { CLKSEL_VALID, CGB_PLL1, PLL_DIV2 },
[3] = { CLKSEL_VALID, CGB_PLL1, PLL_DIV3 },
[4] = { CLKSEL_VALID, CGB_PLL1, PLL_DIV4 },
[5] = { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 },
[6] = { CLKSEL_VALID, CGB_PLL2, PLL_DIV2 },
},
};
static const struct clockgen_muxinfo t4240_hwa5 = {
{
[2] = { CLKSEL_VALID, CGB_PLL2, PLL_DIV2 },
[3] = { CLKSEL_VALID, CGB_PLL2, PLL_DIV3 },
[4] = { CLKSEL_VALID, CGB_PLL2, PLL_DIV4 },
[5] = { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 },
[6] = { CLKSEL_VALID, CGB_PLL1, PLL_DIV2 },
[7] = { CLKSEL_VALID, CGB_PLL1, PLL_DIV3 },
},
};
#define RCWSR7_FM1_CLK_SEL 0x40000000
#define RCWSR7_FM2_CLK_SEL 0x20000000
#define RCWSR7_HWA_ASYNC_DIV 0x04000000
static void __init p2041_init_periph(struct clockgen *cg)
{
u32 reg;
reg = ioread32be(&cg->guts->rcwsr[7]);
if (reg & RCWSR7_FM1_CLK_SEL)
cg->fman[0] = cg->pll[CGA_PLL2].div[PLL_DIV2].clk;
else
cg->fman[0] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk;
}
static void __init p4080_init_periph(struct clockgen *cg)
{
u32 reg;
reg = ioread32be(&cg->guts->rcwsr[7]);
if (reg & RCWSR7_FM1_CLK_SEL)
cg->fman[0] = cg->pll[CGA_PLL3].div[PLL_DIV2].clk;
else
cg->fman[0] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk;
if (reg & RCWSR7_FM2_CLK_SEL)
cg->fman[1] = cg->pll[CGA_PLL3].div[PLL_DIV2].clk;
else
cg->fman[1] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk;
}
static void __init p5020_init_periph(struct clockgen *cg)
{
u32 reg;
int div = PLL_DIV2;
reg = ioread32be(&cg->guts->rcwsr[7]);
if (reg & RCWSR7_HWA_ASYNC_DIV)
div = PLL_DIV4;
if (reg & RCWSR7_FM1_CLK_SEL)
cg->fman[0] = cg->pll[CGA_PLL2].div[div].clk;
else
cg->fman[0] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk;
}
static void __init p5040_init_periph(struct clockgen *cg)
{
u32 reg;
int div = PLL_DIV2;
reg = ioread32be(&cg->guts->rcwsr[7]);
if (reg & RCWSR7_HWA_ASYNC_DIV)
div = PLL_DIV4;
if (reg & RCWSR7_FM1_CLK_SEL)
cg->fman[0] = cg->pll[CGA_PLL3].div[div].clk;
else
cg->fman[0] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk;
if (reg & RCWSR7_FM2_CLK_SEL)
cg->fman[1] = cg->pll[CGA_PLL3].div[div].clk;
else
cg->fman[1] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk;
}
static void __init t1023_init_periph(struct clockgen *cg)
{
cg->fman[0] = cg->hwaccel[1];
}
static void __init t1040_init_periph(struct clockgen *cg)
{
cg->fman[0] = cg->pll[PLATFORM_PLL].div[PLL_DIV1].clk;
}
static void __init t2080_init_periph(struct clockgen *cg)
{
cg->fman[0] = cg->hwaccel[0];
}
static void __init t4240_init_periph(struct clockgen *cg)
{
cg->fman[0] = cg->hwaccel[3];
cg->fman[1] = cg->hwaccel[4];
}
static const struct clockgen_chipinfo chipinfo[] = {
{
.compat = "fsl,b4420-clockgen",
.guts_compat = "fsl,b4860-device-config",
.init_periph = t2080_init_periph,
.cmux_groups = {
&clockgen2_cmux_cga12, &clockgen2_cmux_cgb
},
.hwaccel = {
&t2080_hwa1
},
.cmux_to_group = {
0, 1, 1, 1, -1
},
.pll_mask = BIT(PLATFORM_PLL) |
BIT(CGA_PLL1) | BIT(CGA_PLL2) | BIT(CGA_PLL3) |
BIT(CGB_PLL1) | BIT(CGB_PLL2),
.flags = CG_PLL_8BIT,
},
{
.compat = "fsl,b4860-clockgen",
.guts_compat = "fsl,b4860-device-config",
.init_periph = t2080_init_periph,
.cmux_groups = {
&clockgen2_cmux_cga12, &clockgen2_cmux_cgb
},
.hwaccel = {
&t2080_hwa1
},
.cmux_to_group = {
0, 1, 1, 1, -1
},
.pll_mask = BIT(PLATFORM_PLL) |
BIT(CGA_PLL1) | BIT(CGA_PLL2) | BIT(CGA_PLL3) |
BIT(CGB_PLL1) | BIT(CGB_PLL2),
.flags = CG_PLL_8BIT,
},
{
.compat = "fsl,ls1021a-clockgen",
.cmux_groups = {
&ls1021a_cmux
},
.cmux_to_group = {
0, -1
},
.pll_mask = BIT(PLATFORM_PLL) |
BIT(CGA_PLL1) | BIT(CGA_PLL2),
},
{
.compat = "fsl,ls1028a-clockgen",
.cmux_groups = {
&clockgen2_cmux_cga12
},
.hwaccel = {
&ls1028a_hwa1, &ls1028a_hwa2,
&ls1028a_hwa3, &ls1028a_hwa4
},
.cmux_to_group = {
0, 0, 0, 0, -1
},
.pll_mask = BIT(PLATFORM_PLL) |
BIT(CGA_PLL1) | BIT(CGA_PLL2),
.flags = CG_VER3 | CG_LITTLE_ENDIAN,
},
{
.compat = "fsl,ls1043a-clockgen",
.init_periph = t2080_init_periph,
.cmux_groups = {
&t1040_cmux
},
.hwaccel = {
&ls1043a_hwa1, &ls1043a_hwa2
},
.cmux_to_group = {
0, -1
},
.pll_mask = BIT(PLATFORM_PLL) |
BIT(CGA_PLL1) | BIT(CGA_PLL2),
.flags = CG_PLL_8BIT,
},
{
.compat = "fsl,ls1046a-clockgen",
.init_periph = t2080_init_periph,
.cmux_groups = {
&t1040_cmux
},
.hwaccel = {
&ls1046a_hwa1, &ls1046a_hwa2
},
.cmux_to_group = {
0, -1
},
.pll_mask = BIT(PLATFORM_PLL) |
BIT(CGA_PLL1) | BIT(CGA_PLL2),
.flags = CG_PLL_8BIT,
},
{
.compat = "fsl,ls1088a-clockgen",
.cmux_groups = {
&clockgen2_cmux_cga12
},
.hwaccel = {
&ls1088a_hwa1, &ls1088a_hwa2
},
.cmux_to_group = {
0, 0, -1
},
.pll_mask = BIT(PLATFORM_PLL) |
BIT(CGA_PLL1) | BIT(CGA_PLL2),
.flags = CG_VER3 | CG_LITTLE_ENDIAN,
},
{
.compat = "fsl,ls1012a-clockgen",
.cmux_groups = {
&ls1012a_cmux
},
.cmux_to_group = {
0, -1
},
.pll_mask = BIT(PLATFORM_PLL) | BIT(CGA_PLL1),
},
{
.compat = "fsl,ls2080a-clockgen",
.cmux_groups = {
&clockgen2_cmux_cga12, &clockgen2_cmux_cgb
},
.cmux_to_group = {
0, 0, 1, 1, -1
},
.pll_mask = BIT(PLATFORM_PLL) |
BIT(CGA_PLL1) | BIT(CGA_PLL2) |
BIT(CGB_PLL1) | BIT(CGB_PLL2),
.flags = CG_VER3 | CG_LITTLE_ENDIAN,
},
{
.compat = "fsl,lx2160a-clockgen",
.cmux_groups = {
&clockgen2_cmux_cga12, &clockgen2_cmux_cgb
},
.cmux_to_group = {
0, 0, 0, 0, 1, 1, 1, 1, -1
},
.pll_mask = BIT(PLATFORM_PLL) |
BIT(CGA_PLL1) | BIT(CGA_PLL2) |
BIT(CGB_PLL1) | BIT(CGB_PLL2),
.flags = CG_VER3 | CG_LITTLE_ENDIAN,
},
{
.compat = "fsl,p2041-clockgen",
.guts_compat = "fsl,qoriq-device-config-1.0",
.init_periph = p2041_init_periph,
.cmux_groups = {
&p2041_cmux_grp1, &p2041_cmux_grp2
},
.cmux_to_group = {
0, 0, 1, 1, -1
},
.pll_mask = BIT(PLATFORM_PLL) |
BIT(CGA_PLL1) | BIT(CGA_PLL2),
},
{
.compat = "fsl,p3041-clockgen",
.guts_compat = "fsl,qoriq-device-config-1.0",
.init_periph = p2041_init_periph,
.cmux_groups = {
&p2041_cmux_grp1, &p2041_cmux_grp2
},
.cmux_to_group = {
0, 0, 1, 1, -1
},
.pll_mask = BIT(PLATFORM_PLL) |
BIT(CGA_PLL1) | BIT(CGA_PLL2),
},
{
.compat = "fsl,p4080-clockgen",
.guts_compat = "fsl,qoriq-device-config-1.0",
.init_periph = p4080_init_periph,
.cmux_groups = {
&p4080_cmux_grp1, &p4080_cmux_grp2
},
.cmux_to_group = {
0, 0, 0, 0, 1, 1, 1, 1, -1
},
.pll_mask = BIT(PLATFORM_PLL) |
BIT(CGA_PLL1) | BIT(CGA_PLL2) |
BIT(CGA_PLL3) | BIT(CGA_PLL4),
},
{
.compat = "fsl,p5020-clockgen",
.guts_compat = "fsl,qoriq-device-config-1.0",
.init_periph = p5020_init_periph,
.cmux_groups = {
&p5020_cmux_grp1, &p5020_cmux_grp2
},
.cmux_to_group = {
0, 1, -1
},
.pll_mask = BIT(PLATFORM_PLL) |
BIT(CGA_PLL1) | BIT(CGA_PLL2),
},
{
.compat = "fsl,p5040-clockgen",
.guts_compat = "fsl,p5040-device-config",
.init_periph = p5040_init_periph,
.cmux_groups = {
&p5040_cmux_grp1, &p5040_cmux_grp2
},
.cmux_to_group = {
0, 0, 1, 1, -1
},
.pll_mask = BIT(PLATFORM_PLL) |
BIT(CGA_PLL1) | BIT(CGA_PLL2) | BIT(CGA_PLL3),
},
{
.compat = "fsl,t1023-clockgen",
.guts_compat = "fsl,t1023-device-config",
.init_periph = t1023_init_periph,
.cmux_groups = {
&t1023_cmux
},
.hwaccel = {
&t1023_hwa1, &t1023_hwa2
},
.cmux_to_group = {
0, 0, -1
},
.pll_mask = BIT(PLATFORM_PLL) | BIT(CGA_PLL1),
.flags = CG_PLL_8BIT,
},
{
.compat = "fsl,t1040-clockgen",
.guts_compat = "fsl,t1040-device-config",
.init_periph = t1040_init_periph,
.cmux_groups = {
&t1040_cmux
},
.cmux_to_group = {
0, 0, 0, 0, -1
},
.pll_mask = BIT(PLATFORM_PLL) |
BIT(CGA_PLL1) | BIT(CGA_PLL2),
.flags = CG_PLL_8BIT,
},
{
.compat = "fsl,t2080-clockgen",
.guts_compat = "fsl,t2080-device-config",
.init_periph = t2080_init_periph,
.cmux_groups = {
&clockgen2_cmux_cga12
},
.hwaccel = {
&t2080_hwa1, &t2080_hwa2
},
.cmux_to_group = {
0, -1
},
.pll_mask = BIT(PLATFORM_PLL) |
BIT(CGA_PLL1) | BIT(CGA_PLL2),
.flags = CG_PLL_8BIT,
},
{
.compat = "fsl,t4240-clockgen",
.guts_compat = "fsl,t4240-device-config",
.init_periph = t4240_init_periph,
.cmux_groups = {
&clockgen2_cmux_cga, &clockgen2_cmux_cgb
},
.hwaccel = {
&t4240_hwa1, NULL, NULL, &t4240_hwa4, &t4240_hwa5
},
.cmux_to_group = {
0, 0, 1, -1
},
.pll_mask = BIT(PLATFORM_PLL) |
BIT(CGA_PLL1) | BIT(CGA_PLL2) | BIT(CGA_PLL3) |
BIT(CGB_PLL1) | BIT(CGB_PLL2),
.flags = CG_PLL_8BIT,
},
{},
};
struct mux_hwclock {
struct clk_hw hw;
struct clockgen *cg;
const struct clockgen_muxinfo *info;
u32 __iomem *reg;
u8 parent_to_clksel[NUM_MUX_PARENTS];
s8 clksel_to_parent[NUM_MUX_PARENTS];
int num_parents;
};
#define to_mux_hwclock(p) container_of(p, struct mux_hwclock, hw)
#define CLKSEL_MASK 0x78000000
#define CLKSEL_SHIFT 27
static int mux_set_parent(struct clk_hw *hw, u8 idx)
{
struct mux_hwclock *hwc = to_mux_hwclock(hw);
u32 clksel;
if (idx >= hwc->num_parents)
return -EINVAL;
clksel = hwc->parent_to_clksel[idx];
cg_out(hwc->cg, (clksel << CLKSEL_SHIFT) & CLKSEL_MASK, hwc->reg);
return 0;
}
static u8 mux_get_parent(struct clk_hw *hw)
{
struct mux_hwclock *hwc = to_mux_hwclock(hw);
u32 clksel;
s8 ret;
clksel = (cg_in(hwc->cg, hwc->reg) & CLKSEL_MASK) >> CLKSEL_SHIFT;
ret = hwc->clksel_to_parent[clksel];
if (ret < 0) {
pr_err("%s: mux at %p has bad clksel\n", __func__, hwc->reg);
return 0;
}
return ret;
}
static const struct clk_ops cmux_ops = {
.determine_rate = clk_hw_determine_rate_no_reparent,
.get_parent = mux_get_parent,
.set_parent = mux_set_parent,
};
/*
* Don't allow setting for now, as the clock options haven't been
* sanitized for additional restrictions.
*/
static const struct clk_ops hwaccel_ops = {
.get_parent = mux_get_parent,
};
static const struct clockgen_pll_div *get_pll_div(struct clockgen *cg,
struct mux_hwclock *hwc,
int idx)
{
int pll, div;
if (!(hwc->info->clksel[idx].flags & CLKSEL_VALID))
return NULL;
pll = hwc->info->clksel[idx].pll;
div = hwc->info->clksel[idx].div;
return &cg->pll[pll].div[div];
}
static struct clk * __init create_mux_common(struct clockgen *cg,
struct mux_hwclock *hwc,
const struct clk_ops *ops,
unsigned long min_rate,
unsigned long max_rate,
unsigned long pct80_rate,
const char *fmt, int idx)
{
struct clk_init_data init = {};
struct clk *clk;
const struct clockgen_pll_div *div;
const char *parent_names[NUM_MUX_PARENTS];
char name[32];
int i, j;
snprintf(name, sizeof(name), fmt, idx);
for (i = 0, j = 0; i < NUM_MUX_PARENTS; i++) {
unsigned long rate;
hwc->clksel_to_parent[i] = -1;
div = get_pll_div(cg, hwc, i);
if (!div)
continue;
rate = clk_get_rate(div->clk);
if (hwc->info->clksel[i].flags & CLKSEL_80PCT &&
rate > pct80_rate)
continue;
if (rate < min_rate)
continue;
if (rate > max_rate)
continue;
parent_names[j] = div->name;
hwc->parent_to_clksel[j] = i;
hwc->clksel_to_parent[i] = j;
j++;
}
init.name = name;
init.ops = ops;
init.parent_names = parent_names;
init.num_parents = hwc->num_parents = j;
init.flags = 0;
hwc->hw.init = &init;
hwc->cg = cg;
clk = clk_register(NULL, &hwc->hw);
if (IS_ERR(clk)) {
pr_err("%s: Couldn't register %s: %ld\n", __func__, name,
PTR_ERR(clk));
kfree(hwc);
return NULL;
}
return clk;
}
static struct clk * __init create_one_cmux(struct clockgen *cg, int idx)
{
struct mux_hwclock *hwc;
const struct clockgen_pll_div *div;
unsigned long plat_rate, min_rate;
u64 max_rate, pct80_rate;
u32 clksel;
hwc = kzalloc(sizeof(*hwc), GFP_KERNEL);
if (!hwc)
return NULL;
if (cg->info.flags & CG_VER3)
hwc->reg = cg->regs + 0x70000 + 0x20 * idx;
else
hwc->reg = cg->regs + 0x20 * idx;
hwc->info = cg->info.cmux_groups[cg->info.cmux_to_group[idx]];
/*
* Find the rate for the default clksel, and treat it as the
* maximum rated core frequency. If this is an incorrect
* assumption, certain clock options (possibly including the
* default clksel) may be inappropriately excluded on certain
* chips.
*/
clksel = (cg_in(cg, hwc->reg) & CLKSEL_MASK) >> CLKSEL_SHIFT;
div = get_pll_div(cg, hwc, clksel);
if (!div) {
kfree(hwc);
return NULL;
}
max_rate = clk_get_rate(div->clk);
pct80_rate = max_rate * 8;
do_div(pct80_rate, 10);
plat_rate = clk_get_rate(cg->pll[PLATFORM_PLL].div[PLL_DIV1].clk);
if (cg->info.flags & CG_CMUX_GE_PLAT)
min_rate = plat_rate;
else
min_rate = plat_rate / 2;
return create_mux_common(cg, hwc, &cmux_ops, min_rate, max_rate,
pct80_rate, "cg-cmux%d", idx);
}
static struct clk * __init create_one_hwaccel(struct clockgen *cg, int idx)
{
struct mux_hwclock *hwc;
hwc = kzalloc(sizeof(*hwc), GFP_KERNEL);
if (!hwc)
return NULL;
hwc->reg = cg->regs + 0x20 * idx + 0x10;
hwc->info = cg->info.hwaccel[idx];
return create_mux_common(cg, hwc, &hwaccel_ops, 0, ULONG_MAX, 0,
"cg-hwaccel%d", idx);
}
static void __init create_muxes(struct clockgen *cg)
{
int i;
for (i = 0; i < ARRAY_SIZE(cg->cmux); i++) {
if (cg->info.cmux_to_group[i] < 0)
break;
if (cg->info.cmux_to_group[i] >=
ARRAY_SIZE(cg->info.cmux_groups)) {
WARN_ON_ONCE(1);
continue;
}
cg->cmux[i] = create_one_cmux(cg, i);
}
for (i = 0; i < ARRAY_SIZE(cg->hwaccel); i++) {
if (!cg->info.hwaccel[i])
continue;
cg->hwaccel[i] = create_one_hwaccel(cg, i);
}
}
static void __init _clockgen_init(struct device_node *np, bool legacy);
/*
* Legacy nodes may get probed before the parent clockgen node.
* It is assumed that device trees with legacy nodes will not
* contain a "clocks" property -- otherwise the input clocks may
* not be initialized at this point.
*/
static void __init legacy_init_clockgen(struct device_node *np)
{
if (!clockgen.node) {
struct device_node *parent_np;
parent_np = of_get_parent(np);
_clockgen_init(parent_np, true);
of_node_put(parent_np);
}
}
/* Legacy node */
static void __init core_mux_init(struct device_node *np)
{
struct clk *clk;
struct resource res;
int idx, rc;
legacy_init_clockgen(np);
if (of_address_to_resource(np, 0, &res))
return;
idx = (res.start & 0xf0) >> 5;
clk = clockgen.cmux[idx];
rc = of_clk_add_provider(np, of_clk_src_simple_get, clk);
if (rc) {
pr_err("%s: Couldn't register clk provider for node %pOFn: %d\n",
__func__, np, rc);
return;
}
}
static struct clk __init
*sysclk_from_fixed(struct device_node *node, const char *name)
{
u32 rate;
if (of_property_read_u32(node, "clock-frequency", &rate))
return ERR_PTR(-ENODEV);
return clk_register_fixed_rate(NULL, name, NULL, 0, rate);
}
static struct clk __init *input_clock(const char *name, struct clk *clk)
{
const char *input_name;
/* Register the input clock under the desired name. */
input_name = __clk_get_name(clk);
clk = clk_register_fixed_factor(NULL, name, input_name,
0, 1, 1);
if (IS_ERR(clk))
pr_err("%s: Couldn't register %s: %ld\n", __func__, name,
PTR_ERR(clk));
return clk;
}
static struct clk __init *input_clock_by_name(const char *name,
const char *dtname)
{
struct clk *clk;
clk = of_clk_get_by_name(clockgen.node, dtname);
if (IS_ERR(clk))
return clk;
return input_clock(name, clk);
}
static struct clk __init *input_clock_by_index(const char *name, int idx)
{
struct clk *clk;
clk = of_clk_get(clockgen.node, 0);
if (IS_ERR(clk))
return clk;
return input_clock(name, clk);
}
static struct clk * __init create_sysclk(const char *name)
{
struct device_node *sysclk;
struct clk *clk;
clk = sysclk_from_fixed(clockgen.node, name);
if (!IS_ERR(clk))
return clk;
clk = input_clock_by_name(name, "sysclk");
if (!IS_ERR(clk))
return clk;
clk = input_clock_by_index(name, 0);
if (!IS_ERR(clk))
return clk;
sysclk = of_get_child_by_name(clockgen.node, "sysclk");
if (sysclk) {
clk = sysclk_from_fixed(sysclk, name);
of_node_put(sysclk);
if (!IS_ERR(clk))
return clk;
}
pr_err("%s: No input sysclk\n", __func__);
return NULL;
}
static struct clk * __init create_coreclk(const char *name)
{
struct clk *clk;
clk = input_clock_by_name(name, "coreclk");
if (!IS_ERR(clk))
return clk;
/*
* This indicates a mix of legacy nodes with the new coreclk
* mechanism, which should never happen. If this error occurs,
* don't use the wrong input clock just because coreclk isn't
* ready yet.
*/
if (WARN_ON(PTR_ERR(clk) == -EPROBE_DEFER))
return clk;
return NULL;
}
/* Legacy node */
static void __init sysclk_init(struct device_node *node)
{
struct clk *clk;
legacy_init_clockgen(node);
clk = clockgen.sysclk;
if (clk)
of_clk_add_provider(node, of_clk_src_simple_get, clk);
}
#define PLL_KILL BIT(31)
static void __init create_one_pll(struct clockgen *cg, int idx)
{
u32 __iomem *reg;
u32 mult;
struct clockgen_pll *pll = &cg->pll[idx];
const char *input = "cg-sysclk";
int i;
if (!(cg->info.pll_mask & (1 << idx)))
return;
if (cg->coreclk && idx != PLATFORM_PLL) {
if (IS_ERR(cg->coreclk))
return;
input = "cg-coreclk";
}
if (cg->info.flags & CG_VER3) {
switch (idx) {
case PLATFORM_PLL:
reg = cg->regs + 0x60080;
break;
case CGA_PLL1:
reg = cg->regs + 0x80;
break;
case CGA_PLL2:
reg = cg->regs + 0xa0;
break;
case CGB_PLL1:
reg = cg->regs + 0x10080;
break;
case CGB_PLL2:
reg = cg->regs + 0x100a0;
break;
default:
WARN_ONCE(1, "index %d\n", idx);
return;
}
} else {
if (idx == PLATFORM_PLL)
reg = cg->regs + 0xc00;
else
reg = cg->regs + 0x800 + 0x20 * (idx - 1);
}
/* Get the multiple of PLL */
mult = cg_in(cg, reg);
/* Check if this PLL is disabled */
if (mult & PLL_KILL) {
pr_debug("%s(): pll %p disabled\n", __func__, reg);
return;
}
if ((cg->info.flags & CG_VER3) ||
((cg->info.flags & CG_PLL_8BIT) && idx != PLATFORM_PLL))
mult = (mult & GENMASK(8, 1)) >> 1;
else
mult = (mult & GENMASK(6, 1)) >> 1;
for (i = 0; i < ARRAY_SIZE(pll->div); i++) {
struct clk *clk;
int ret;
/*
* For platform PLL, there are MAX_PLL_DIV divider clocks.
* For core PLL, there are 4 divider clocks at most.
*/
if (idx != PLATFORM_PLL && i >= 4)
break;
snprintf(pll->div[i].name, sizeof(pll->div[i].name),
"cg-pll%d-div%d", idx, i + 1);
clk = clk_register_fixed_factor(NULL,
pll->div[i].name, input, 0, mult, i + 1);
if (IS_ERR(clk)) {
pr_err("%s: %s: register failed %ld\n",
__func__, pll->div[i].name, PTR_ERR(clk));
continue;
}
pll->div[i].clk = clk;
ret = clk_register_clkdev(clk, pll->div[i].name, NULL);
if (ret != 0)
pr_err("%s: %s: register to lookup table failed %d\n",
__func__, pll->div[i].name, ret);
}
}
static void __init create_plls(struct clockgen *cg)
{
int i;
for (i = 0; i < ARRAY_SIZE(cg->pll); i++)
create_one_pll(cg, i);
}
static void __init legacy_pll_init(struct device_node *np, int idx)
{
struct clockgen_pll *pll;
struct clk_onecell_data *onecell_data;
struct clk **subclks;
int count, rc;
legacy_init_clockgen(np);
pll = &clockgen.pll[idx];
count = of_property_count_strings(np, "clock-output-names");
BUILD_BUG_ON(ARRAY_SIZE(pll->div) < 4);
subclks = kcalloc(4, sizeof(struct clk *), GFP_KERNEL);
if (!subclks)
return;
onecell_data = kmalloc(sizeof(*onecell_data), GFP_KERNEL);
if (!onecell_data)
goto err_clks;
if (count <= 3) {
subclks[0] = pll->div[0].clk;
subclks[1] = pll->div[1].clk;
subclks[2] = pll->div[3].clk;
} else {
subclks[0] = pll->div[0].clk;
subclks[1] = pll->div[1].clk;
subclks[2] = pll->div[2].clk;
subclks[3] = pll->div[3].clk;
}
onecell_data->clks = subclks;
onecell_data->clk_num = count;
rc = of_clk_add_provider(np, of_clk_src_onecell_get, onecell_data);
if (rc) {
pr_err("%s: Couldn't register clk provider for node %pOFn: %d\n",
__func__, np, rc);
goto err_cell;
}
return;
err_cell:
kfree(onecell_data);
err_clks:
kfree(subclks);
}
/* Legacy node */
static void __init pltfrm_pll_init(struct device_node *np)
{
legacy_pll_init(np, PLATFORM_PLL);
}
/* Legacy node */
static void __init core_pll_init(struct device_node *np)
{
struct resource res;
int idx;
if (of_address_to_resource(np, 0, &res))
return;
if ((res.start & 0xfff) == 0xc00) {
/*
* ls1021a devtree labels the platform PLL
* with the core PLL compatible
*/
pltfrm_pll_init(np);
} else {
idx = (res.start & 0xf0) >> 5;
legacy_pll_init(np, CGA_PLL1 + idx);
}
}
static struct clk *clockgen_clk_get(struct of_phandle_args *clkspec, void *data)
{
struct clockgen *cg = data;
struct clk *clk;
struct clockgen_pll *pll;
u32 type, idx;
if (clkspec->args_count < 2) {
pr_err("%s: insufficient phandle args\n", __func__);
return ERR_PTR(-EINVAL);
}
type = clkspec->args[0];
idx = clkspec->args[1];
switch (type) {
case QORIQ_CLK_SYSCLK:
if (idx != 0)
goto bad_args;
clk = cg->sysclk;
break;
case QORIQ_CLK_CMUX:
if (idx >= ARRAY_SIZE(cg->cmux))
goto bad_args;
clk = cg->cmux[idx];
break;
case QORIQ_CLK_HWACCEL:
if (idx >= ARRAY_SIZE(cg->hwaccel))
goto bad_args;
clk = cg->hwaccel[idx];
break;
case QORIQ_CLK_FMAN:
if (idx >= ARRAY_SIZE(cg->fman))
goto bad_args;
clk = cg->fman[idx];
break;
case QORIQ_CLK_PLATFORM_PLL:
pll = &cg->pll[PLATFORM_PLL];
if (idx >= ARRAY_SIZE(pll->div))
goto bad_args;
clk = pll->div[idx].clk;
break;
case QORIQ_CLK_CORECLK:
if (idx != 0)
goto bad_args;
clk = cg->coreclk;
if (IS_ERR(clk))
clk = NULL;
break;
default:
goto bad_args;
}
if (!clk)
return ERR_PTR(-ENOENT);
return clk;
bad_args:
pr_err("%s: Bad phandle args %u %u\n", __func__, type, idx);
return ERR_PTR(-EINVAL);
}
#ifdef CONFIG_PPC
#include <asm/mpc85xx.h>
static const u32 a4510_svrs[] __initconst = {
(SVR_P2040 << 8) | 0x10, /* P2040 1.0 */
(SVR_P2040 << 8) | 0x11, /* P2040 1.1 */
(SVR_P2041 << 8) | 0x10, /* P2041 1.0 */
(SVR_P2041 << 8) | 0x11, /* P2041 1.1 */
(SVR_P3041 << 8) | 0x10, /* P3041 1.0 */
(SVR_P3041 << 8) | 0x11, /* P3041 1.1 */
(SVR_P4040 << 8) | 0x20, /* P4040 2.0 */
(SVR_P4080 << 8) | 0x20, /* P4080 2.0 */
(SVR_P5010 << 8) | 0x10, /* P5010 1.0 */
(SVR_P5010 << 8) | 0x20, /* P5010 2.0 */
(SVR_P5020 << 8) | 0x10, /* P5020 1.0 */
(SVR_P5021 << 8) | 0x10, /* P5021 1.0 */
(SVR_P5040 << 8) | 0x10, /* P5040 1.0 */
};
#define SVR_SECURITY 0x80000 /* The Security (E) bit */
static bool __init has_erratum_a4510(void)
{
u32 svr = mfspr(SPRN_SVR);
int i;
svr &= ~SVR_SECURITY;
for (i = 0; i < ARRAY_SIZE(a4510_svrs); i++) {
if (svr == a4510_svrs[i])
return true;
}
return false;
}
#else
static bool __init has_erratum_a4510(void)
{
return false;
}
#endif
static void __init _clockgen_init(struct device_node *np, bool legacy)
{
int i, ret;
bool is_old_ls1021a = false;
/* May have already been called by a legacy probe */
if (clockgen.node)
return;
clockgen.node = np;
clockgen.regs = of_iomap(np, 0);
if (!clockgen.regs &&
of_device_is_compatible(of_root, "fsl,ls1021a")) {
/* Compatibility hack for old, broken device trees */
clockgen.regs = ioremap(0x1ee1000, 0x1000);
is_old_ls1021a = true;
}
if (!clockgen.regs) {
pr_err("%s(): %pOFn: of_iomap() failed\n", __func__, np);
return;
}
for (i = 0; i < ARRAY_SIZE(chipinfo); i++) {
if (of_device_is_compatible(np, chipinfo[i].compat))
break;
if (is_old_ls1021a &&
!strcmp(chipinfo[i].compat, "fsl,ls1021a-clockgen"))
break;
}
if (i == ARRAY_SIZE(chipinfo)) {
pr_err("%s: unknown clockgen node %pOF\n", __func__, np);
goto err;
}
clockgen.info = chipinfo[i];
if (clockgen.info.guts_compat) {
struct device_node *guts;
guts = of_find_compatible_node(NULL, NULL,
clockgen.info.guts_compat);
if (guts) {
clockgen.guts = of_iomap(guts, 0);
if (!clockgen.guts) {
pr_err("%s: Couldn't map %pOF regs\n", __func__,
guts);
}
of_node_put(guts);
}
}
if (has_erratum_a4510())
clockgen.info.flags |= CG_CMUX_GE_PLAT;
clockgen.sysclk = create_sysclk("cg-sysclk");
clockgen.coreclk = create_coreclk("cg-coreclk");
create_plls(&clockgen);
create_muxes(&clockgen);
if (clockgen.info.init_periph)
clockgen.info.init_periph(&clockgen);
ret = of_clk_add_provider(np, clockgen_clk_get, &clockgen);
if (ret) {
pr_err("%s: Couldn't register clk provider for node %pOFn: %d\n",
__func__, np, ret);
}
/* Don't create cpufreq device for legacy clockgen blocks */
add_cpufreq_dev = !legacy;
return;
err:
iounmap(clockgen.regs);
clockgen.regs = NULL;
}
static void __init clockgen_init(struct device_node *np)
{
_clockgen_init(np, false);
}
static int __init clockgen_cpufreq_init(void)
{
struct platform_device *pdev;
if (add_cpufreq_dev) {
pdev = platform_device_register_simple("qoriq-cpufreq", -1,
NULL, 0);
if (IS_ERR(pdev))
pr_err("Couldn't register qoriq-cpufreq err=%ld\n",
PTR_ERR(pdev));
}
return 0;
}
device_initcall(clockgen_cpufreq_init);
CLK_OF_DECLARE(qoriq_clockgen_1, "fsl,qoriq-clockgen-1.0", clockgen_init);
CLK_OF_DECLARE(qoriq_clockgen_2, "fsl,qoriq-clockgen-2.0", clockgen_init);
CLK_OF_DECLARE(qoriq_clockgen_b4420, "fsl,b4420-clockgen", clockgen_init);
CLK_OF_DECLARE(qoriq_clockgen_b4860, "fsl,b4860-clockgen", clockgen_init);
CLK_OF_DECLARE(qoriq_clockgen_ls1012a, "fsl,ls1012a-clockgen", clockgen_init);
CLK_OF_DECLARE(qoriq_clockgen_ls1021a, "fsl,ls1021a-clockgen", clockgen_init);
CLK_OF_DECLARE(qoriq_clockgen_ls1028a, "fsl,ls1028a-clockgen", clockgen_init);
CLK_OF_DECLARE(qoriq_clockgen_ls1043a, "fsl,ls1043a-clockgen", clockgen_init);
CLK_OF_DECLARE(qoriq_clockgen_ls1046a, "fsl,ls1046a-clockgen", clockgen_init);
CLK_OF_DECLARE(qoriq_clockgen_ls1088a, "fsl,ls1088a-clockgen", clockgen_init);
CLK_OF_DECLARE(qoriq_clockgen_ls2080a, "fsl,ls2080a-clockgen", clockgen_init);
CLK_OF_DECLARE(qoriq_clockgen_lx2160a, "fsl,lx2160a-clockgen", clockgen_init);
CLK_OF_DECLARE(qoriq_clockgen_p2041, "fsl,p2041-clockgen", clockgen_init);
CLK_OF_DECLARE(qoriq_clockgen_p3041, "fsl,p3041-clockgen", clockgen_init);
CLK_OF_DECLARE(qoriq_clockgen_p4080, "fsl,p4080-clockgen", clockgen_init);
CLK_OF_DECLARE(qoriq_clockgen_p5020, "fsl,p5020-clockgen", clockgen_init);
CLK_OF_DECLARE(qoriq_clockgen_p5040, "fsl,p5040-clockgen", clockgen_init);
CLK_OF_DECLARE(qoriq_clockgen_t1023, "fsl,t1023-clockgen", clockgen_init);
CLK_OF_DECLARE(qoriq_clockgen_t1040, "fsl,t1040-clockgen", clockgen_init);
CLK_OF_DECLARE(qoriq_clockgen_t2080, "fsl,t2080-clockgen", clockgen_init);
CLK_OF_DECLARE(qoriq_clockgen_t4240, "fsl,t4240-clockgen", clockgen_init);
/* Legacy nodes */
CLK_OF_DECLARE(qoriq_sysclk_1, "fsl,qoriq-sysclk-1.0", sysclk_init);
CLK_OF_DECLARE(qoriq_sysclk_2, "fsl,qoriq-sysclk-2.0", sysclk_init);
CLK_OF_DECLARE(qoriq_core_pll_1, "fsl,qoriq-core-pll-1.0", core_pll_init);
CLK_OF_DECLARE(qoriq_core_pll_2, "fsl,qoriq-core-pll-2.0", core_pll_init);
CLK_OF_DECLARE(qoriq_core_mux_1, "fsl,qoriq-core-mux-1.0", core_mux_init);
CLK_OF_DECLARE(qoriq_core_mux_2, "fsl,qoriq-core-mux-2.0", core_mux_init);
CLK_OF_DECLARE(qoriq_pltfrm_pll_1, "fsl,qoriq-platform-pll-1.0", pltfrm_pll_init);
CLK_OF_DECLARE(qoriq_pltfrm_pll_2, "fsl,qoriq-platform-pll-2.0", pltfrm_pll_init);
| linux-master | drivers/clk/clk-qoriq.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) STMicroelectronics 2017
* Author: Gabriel Fernandez <[email protected]> for STMicroelectronics.
*/
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/mfd/syscon.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/regmap.h>
#include <dt-bindings/clock/stm32h7-clks.h>
/* Reset Clock Control Registers */
#define RCC_CR 0x00
#define RCC_CFGR 0x10
#define RCC_D1CFGR 0x18
#define RCC_D2CFGR 0x1C
#define RCC_D3CFGR 0x20
#define RCC_PLLCKSELR 0x28
#define RCC_PLLCFGR 0x2C
#define RCC_PLL1DIVR 0x30
#define RCC_PLL1FRACR 0x34
#define RCC_PLL2DIVR 0x38
#define RCC_PLL2FRACR 0x3C
#define RCC_PLL3DIVR 0x40
#define RCC_PLL3FRACR 0x44
#define RCC_D1CCIPR 0x4C
#define RCC_D2CCIP1R 0x50
#define RCC_D2CCIP2R 0x54
#define RCC_D3CCIPR 0x58
#define RCC_BDCR 0x70
#define RCC_CSR 0x74
#define RCC_AHB3ENR 0xD4
#define RCC_AHB1ENR 0xD8
#define RCC_AHB2ENR 0xDC
#define RCC_AHB4ENR 0xE0
#define RCC_APB3ENR 0xE4
#define RCC_APB1LENR 0xE8
#define RCC_APB1HENR 0xEC
#define RCC_APB2ENR 0xF0
#define RCC_APB4ENR 0xF4
static DEFINE_SPINLOCK(stm32rcc_lock);
static void __iomem *base;
static struct clk_hw **hws;
/* System clock parent */
static const char * const sys_src[] = {
"hsi_ck", "csi_ck", "hse_ck", "pll1_p" };
static const char * const tracein_src[] = {
"hsi_ck", "csi_ck", "hse_ck", "pll1_r" };
static const char * const per_src[] = {
"hsi_ker", "csi_ker", "hse_ck", "disabled" };
static const char * const pll_src[] = {
"hsi_ck", "csi_ck", "hse_ck", "no clock" };
static const char * const sdmmc_src[] = { "pll1_q", "pll2_r" };
static const char * const dsi_src[] = { "ck_dsi_phy", "pll2_q" };
static const char * const qspi_src[] = {
"hclk", "pll1_q", "pll2_r", "per_ck" };
static const char * const fmc_src[] = {
"hclk", "pll1_q", "pll2_r", "per_ck" };
/* Kernel clock parent */
static const char * const swp_src[] = { "pclk1", "hsi_ker" };
static const char * const fdcan_src[] = { "hse_ck", "pll1_q", "pll2_q" };
static const char * const dfsdm1_src[] = { "pclk2", "sys_ck" };
static const char * const spdifrx_src[] = {
"pll1_q", "pll2_r", "pll3_r", "hsi_ker" };
static const char *spi_src1[5] = {
"pll1_q", "pll2_p", "pll3_p", NULL, "per_ck" };
static const char * const spi_src2[] = {
"pclk2", "pll2_q", "pll3_q", "hsi_ker", "csi_ker", "hse_ck" };
static const char * const spi_src3[] = {
"pclk4", "pll2_q", "pll3_q", "hsi_ker", "csi_ker", "hse_ck" };
static const char * const lptim_src1[] = {
"pclk1", "pll2_p", "pll3_r", "lse_ck", "lsi_ck", "per_ck" };
static const char * const lptim_src2[] = {
"pclk4", "pll2_p", "pll3_r", "lse_ck", "lsi_ck", "per_ck" };
static const char * const cec_src[] = {"lse_ck", "lsi_ck", "csi_ker_div122" };
static const char * const usbotg_src[] = {"pll1_q", "pll3_q", "rc48_ck" };
/* i2c 1,2,3 src */
static const char * const i2c_src1[] = {
"pclk1", "pll3_r", "hsi_ker", "csi_ker" };
static const char * const i2c_src2[] = {
"pclk4", "pll3_r", "hsi_ker", "csi_ker" };
static const char * const rng_src[] = {
"rc48_ck", "pll1_q", "lse_ck", "lsi_ck" };
/* usart 1,6 src */
static const char * const usart_src1[] = {
"pclk2", "pll2_q", "pll3_q", "hsi_ker", "csi_ker", "lse_ck" };
/* usart 2,3,4,5,7,8 src */
static const char * const usart_src2[] = {
"pclk1", "pll2_q", "pll3_q", "hsi_ker", "csi_ker", "lse_ck" };
static const char *sai_src[5] = {
"pll1_q", "pll2_p", "pll3_p", NULL, "per_ck" };
static const char * const adc_src[] = { "pll2_p", "pll3_r", "per_ck" };
/* lptim 2,3,4,5 src */
static const char * const lpuart1_src[] = {
"pclk3", "pll2_q", "pll3_q", "csi_ker", "lse_ck" };
static const char * const hrtim_src[] = { "tim2_ker", "d1cpre" };
/* RTC clock parent */
static const char * const rtc_src[] = { "off", "lse_ck", "lsi_ck", "hse_1M" };
/* Micro-controller output clock parent */
static const char * const mco_src1[] = {
"hsi_ck", "lse_ck", "hse_ck", "pll1_q", "rc48_ck" };
static const char * const mco_src2[] = {
"sys_ck", "pll2_p", "hse_ck", "pll1_p", "csi_ck", "lsi_ck" };
/* LCD clock */
static const char * const ltdc_src[] = {"pll3_r"};
/* Gate clock with ready bit and backup domain management */
struct stm32_ready_gate {
struct clk_gate gate;
u8 bit_rdy;
};
#define to_ready_gate_clk(_rgate) container_of(_rgate, struct stm32_ready_gate,\
gate)
#define RGATE_TIMEOUT 10000
static int ready_gate_clk_enable(struct clk_hw *hw)
{
struct clk_gate *gate = to_clk_gate(hw);
struct stm32_ready_gate *rgate = to_ready_gate_clk(gate);
int bit_status;
unsigned int timeout = RGATE_TIMEOUT;
if (clk_gate_ops.is_enabled(hw))
return 0;
clk_gate_ops.enable(hw);
/* We can't use readl_poll_timeout() because we can blocked if
* someone enables this clock before clocksource changes.
* Only jiffies counter is available. Jiffies are incremented by
* interruptions and enable op does not allow to be interrupted.
*/
do {
bit_status = !(readl(gate->reg) & BIT(rgate->bit_rdy));
if (bit_status)
udelay(100);
} while (bit_status && --timeout);
return bit_status;
}
static void ready_gate_clk_disable(struct clk_hw *hw)
{
struct clk_gate *gate = to_clk_gate(hw);
struct stm32_ready_gate *rgate = to_ready_gate_clk(gate);
int bit_status;
unsigned int timeout = RGATE_TIMEOUT;
if (!clk_gate_ops.is_enabled(hw))
return;
clk_gate_ops.disable(hw);
do {
bit_status = !!(readl(gate->reg) & BIT(rgate->bit_rdy));
if (bit_status)
udelay(100);
} while (bit_status && --timeout);
}
static const struct clk_ops ready_gate_clk_ops = {
.enable = ready_gate_clk_enable,
.disable = ready_gate_clk_disable,
.is_enabled = clk_gate_is_enabled,
};
static struct clk_hw *clk_register_ready_gate(struct device *dev,
const char *name, const char *parent_name,
void __iomem *reg, u8 bit_idx, u8 bit_rdy,
unsigned long flags, spinlock_t *lock)
{
struct stm32_ready_gate *rgate;
struct clk_init_data init = { NULL };
struct clk_hw *hw;
int ret;
rgate = kzalloc(sizeof(*rgate), GFP_KERNEL);
if (!rgate)
return ERR_PTR(-ENOMEM);
init.name = name;
init.ops = &ready_gate_clk_ops;
init.flags = flags;
init.parent_names = &parent_name;
init.num_parents = 1;
rgate->bit_rdy = bit_rdy;
rgate->gate.lock = lock;
rgate->gate.reg = reg;
rgate->gate.bit_idx = bit_idx;
rgate->gate.hw.init = &init;
hw = &rgate->gate.hw;
ret = clk_hw_register(dev, hw);
if (ret) {
kfree(rgate);
hw = ERR_PTR(ret);
}
return hw;
}
struct gate_cfg {
u32 offset;
u8 bit_idx;
};
struct muxdiv_cfg {
u32 offset;
u8 shift;
u8 width;
};
struct composite_clk_cfg {
struct gate_cfg *gate;
struct muxdiv_cfg *mux;
struct muxdiv_cfg *div;
const char *name;
const char * const *parent_name;
int num_parents;
u32 flags;
};
struct composite_clk_gcfg_t {
u8 flags;
const struct clk_ops *ops;
};
/*
* General config definition of a composite clock (only clock diviser for rate)
*/
struct composite_clk_gcfg {
struct composite_clk_gcfg_t *mux;
struct composite_clk_gcfg_t *div;
struct composite_clk_gcfg_t *gate;
};
#define M_CFG_MUX(_mux_ops, _mux_flags)\
.mux = &(struct composite_clk_gcfg_t) { _mux_flags, _mux_ops}
#define M_CFG_DIV(_rate_ops, _rate_flags)\
.div = &(struct composite_clk_gcfg_t) {_rate_flags, _rate_ops}
#define M_CFG_GATE(_gate_ops, _gate_flags)\
.gate = &(struct composite_clk_gcfg_t) { _gate_flags, _gate_ops}
static struct clk_mux *_get_cmux(void __iomem *reg, u8 shift, u8 width,
u32 flags, spinlock_t *lock)
{
struct clk_mux *mux;
mux = kzalloc(sizeof(*mux), GFP_KERNEL);
if (!mux)
return ERR_PTR(-ENOMEM);
mux->reg = reg;
mux->shift = shift;
mux->mask = (1 << width) - 1;
mux->flags = flags;
mux->lock = lock;
return mux;
}
static struct clk_divider *_get_cdiv(void __iomem *reg, u8 shift, u8 width,
u32 flags, spinlock_t *lock)
{
struct clk_divider *div;
div = kzalloc(sizeof(*div), GFP_KERNEL);
if (!div)
return ERR_PTR(-ENOMEM);
div->reg = reg;
div->shift = shift;
div->width = width;
div->flags = flags;
div->lock = lock;
return div;
}
static struct clk_gate *_get_cgate(void __iomem *reg, u8 bit_idx, u32 flags,
spinlock_t *lock)
{
struct clk_gate *gate;
gate = kzalloc(sizeof(*gate), GFP_KERNEL);
if (!gate)
return ERR_PTR(-ENOMEM);
gate->reg = reg;
gate->bit_idx = bit_idx;
gate->flags = flags;
gate->lock = lock;
return gate;
}
struct composite_cfg {
struct clk_hw *mux_hw;
struct clk_hw *div_hw;
struct clk_hw *gate_hw;
const struct clk_ops *mux_ops;
const struct clk_ops *div_ops;
const struct clk_ops *gate_ops;
};
static void get_cfg_composite_div(const struct composite_clk_gcfg *gcfg,
const struct composite_clk_cfg *cfg,
struct composite_cfg *composite, spinlock_t *lock)
{
struct clk_mux *mux = NULL;
struct clk_divider *div = NULL;
struct clk_gate *gate = NULL;
const struct clk_ops *mux_ops, *div_ops, *gate_ops;
struct clk_hw *mux_hw;
struct clk_hw *div_hw;
struct clk_hw *gate_hw;
mux_ops = div_ops = gate_ops = NULL;
mux_hw = div_hw = gate_hw = NULL;
if (gcfg->mux && cfg->mux) {
mux = _get_cmux(base + cfg->mux->offset,
cfg->mux->shift,
cfg->mux->width,
gcfg->mux->flags, lock);
if (!IS_ERR(mux)) {
mux_hw = &mux->hw;
mux_ops = gcfg->mux->ops ?
gcfg->mux->ops : &clk_mux_ops;
}
}
if (gcfg->div && cfg->div) {
div = _get_cdiv(base + cfg->div->offset,
cfg->div->shift,
cfg->div->width,
gcfg->div->flags, lock);
if (!IS_ERR(div)) {
div_hw = &div->hw;
div_ops = gcfg->div->ops ?
gcfg->div->ops : &clk_divider_ops;
}
}
if (gcfg->gate && cfg->gate) {
gate = _get_cgate(base + cfg->gate->offset,
cfg->gate->bit_idx,
gcfg->gate->flags, lock);
if (!IS_ERR(gate)) {
gate_hw = &gate->hw;
gate_ops = gcfg->gate->ops ?
gcfg->gate->ops : &clk_gate_ops;
}
}
composite->mux_hw = mux_hw;
composite->mux_ops = mux_ops;
composite->div_hw = div_hw;
composite->div_ops = div_ops;
composite->gate_hw = gate_hw;
composite->gate_ops = gate_ops;
}
/* Kernel Timer */
struct timer_ker {
u8 dppre_shift;
struct clk_hw hw;
spinlock_t *lock;
};
#define to_timer_ker(_hw) container_of(_hw, struct timer_ker, hw)
static unsigned long timer_ker_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct timer_ker *clk_elem = to_timer_ker(hw);
u32 timpre;
u32 dppre_shift = clk_elem->dppre_shift;
u32 prescaler;
u32 mul;
timpre = (readl(base + RCC_CFGR) >> 15) & 0x01;
prescaler = (readl(base + RCC_D2CFGR) >> dppre_shift) & 0x03;
mul = 2;
if (prescaler < 4)
mul = 1;
else if (timpre && prescaler > 4)
mul = 4;
return parent_rate * mul;
}
static const struct clk_ops timer_ker_ops = {
.recalc_rate = timer_ker_recalc_rate,
};
static struct clk_hw *clk_register_stm32_timer_ker(struct device *dev,
const char *name, const char *parent_name,
unsigned long flags,
u8 dppre_shift,
spinlock_t *lock)
{
struct timer_ker *element;
struct clk_init_data init;
struct clk_hw *hw;
int err;
element = kzalloc(sizeof(*element), GFP_KERNEL);
if (!element)
return ERR_PTR(-ENOMEM);
init.name = name;
init.ops = &timer_ker_ops;
init.flags = flags;
init.parent_names = &parent_name;
init.num_parents = 1;
element->hw.init = &init;
element->lock = lock;
element->dppre_shift = dppre_shift;
hw = &element->hw;
err = clk_hw_register(dev, hw);
if (err) {
kfree(element);
return ERR_PTR(err);
}
return hw;
}
static const struct clk_div_table d1cpre_div_table[] = {
{ 0, 1 }, { 1, 1 }, { 2, 1 }, { 3, 1},
{ 4, 1 }, { 5, 1 }, { 6, 1 }, { 7, 1},
{ 8, 2 }, { 9, 4 }, { 10, 8 }, { 11, 16 },
{ 12, 64 }, { 13, 128 }, { 14, 256 },
{ 15, 512 },
{ 0 },
};
static const struct clk_div_table ppre_div_table[] = {
{ 0, 1 }, { 1, 1 }, { 2, 1 }, { 3, 1},
{ 4, 2 }, { 5, 4 }, { 6, 8 }, { 7, 16 },
{ 0 },
};
static void register_core_and_bus_clocks(void)
{
/* CORE AND BUS */
hws[SYS_D1CPRE] = clk_hw_register_divider_table(NULL, "d1cpre",
"sys_ck", CLK_IGNORE_UNUSED, base + RCC_D1CFGR, 8, 4, 0,
d1cpre_div_table, &stm32rcc_lock);
hws[HCLK] = clk_hw_register_divider_table(NULL, "hclk", "d1cpre",
CLK_IGNORE_UNUSED, base + RCC_D1CFGR, 0, 4, 0,
d1cpre_div_table, &stm32rcc_lock);
/* D1 DOMAIN */
/* * CPU Systick */
hws[CPU_SYSTICK] = clk_hw_register_fixed_factor(NULL, "systick",
"d1cpre", 0, 1, 8);
/* * APB3 peripheral */
hws[PCLK3] = clk_hw_register_divider_table(NULL, "pclk3", "hclk", 0,
base + RCC_D1CFGR, 4, 3, 0,
ppre_div_table, &stm32rcc_lock);
/* D2 DOMAIN */
/* * APB1 peripheral */
hws[PCLK1] = clk_hw_register_divider_table(NULL, "pclk1", "hclk", 0,
base + RCC_D2CFGR, 4, 3, 0,
ppre_div_table, &stm32rcc_lock);
/* Timers prescaler clocks */
clk_register_stm32_timer_ker(NULL, "tim1_ker", "pclk1", 0,
4, &stm32rcc_lock);
/* * APB2 peripheral */
hws[PCLK2] = clk_hw_register_divider_table(NULL, "pclk2", "hclk", 0,
base + RCC_D2CFGR, 8, 3, 0, ppre_div_table,
&stm32rcc_lock);
clk_register_stm32_timer_ker(NULL, "tim2_ker", "pclk2", 0, 8,
&stm32rcc_lock);
/* D3 DOMAIN */
/* * APB4 peripheral */
hws[PCLK4] = clk_hw_register_divider_table(NULL, "pclk4", "hclk", 0,
base + RCC_D3CFGR, 4, 3, 0,
ppre_div_table, &stm32rcc_lock);
}
/* MUX clock configuration */
struct stm32_mux_clk {
const char *name;
const char * const *parents;
u8 num_parents;
u32 offset;
u8 shift;
u8 width;
u32 flags;
};
#define M_MCLOCF(_name, _parents, _mux_offset, _mux_shift, _mux_width, _flags)\
{\
.name = _name,\
.parents = _parents,\
.num_parents = ARRAY_SIZE(_parents),\
.offset = _mux_offset,\
.shift = _mux_shift,\
.width = _mux_width,\
.flags = _flags,\
}
#define M_MCLOC(_name, _parents, _mux_offset, _mux_shift, _mux_width)\
M_MCLOCF(_name, _parents, _mux_offset, _mux_shift, _mux_width, 0)\
static const struct stm32_mux_clk stm32_mclk[] __initconst = {
M_MCLOC("per_ck", per_src, RCC_D1CCIPR, 28, 3),
M_MCLOC("pllsrc", pll_src, RCC_PLLCKSELR, 0, 3),
M_MCLOC("sys_ck", sys_src, RCC_CFGR, 0, 3),
M_MCLOC("tracein_ck", tracein_src, RCC_CFGR, 0, 3),
};
/* Oscillary clock configuration */
struct stm32_osc_clk {
const char *name;
const char *parent;
u32 gate_offset;
u8 bit_idx;
u8 bit_rdy;
u32 flags;
};
#define OSC_CLKF(_name, _parent, _gate_offset, _bit_idx, _bit_rdy, _flags)\
{\
.name = _name,\
.parent = _parent,\
.gate_offset = _gate_offset,\
.bit_idx = _bit_idx,\
.bit_rdy = _bit_rdy,\
.flags = _flags,\
}
#define OSC_CLK(_name, _parent, _gate_offset, _bit_idx, _bit_rdy)\
OSC_CLKF(_name, _parent, _gate_offset, _bit_idx, _bit_rdy, 0)
static const struct stm32_osc_clk stm32_oclk[] __initconst = {
OSC_CLKF("hsi_ck", "hsidiv", RCC_CR, 0, 2, CLK_IGNORE_UNUSED),
OSC_CLKF("hsi_ker", "hsidiv", RCC_CR, 1, 2, CLK_IGNORE_UNUSED),
OSC_CLKF("csi_ck", "clk-csi", RCC_CR, 7, 8, CLK_IGNORE_UNUSED),
OSC_CLKF("csi_ker", "clk-csi", RCC_CR, 9, 8, CLK_IGNORE_UNUSED),
OSC_CLKF("rc48_ck", "clk-rc48", RCC_CR, 12, 13, CLK_IGNORE_UNUSED),
OSC_CLKF("lsi_ck", "clk-lsi", RCC_CSR, 0, 1, CLK_IGNORE_UNUSED),
};
/* PLL configuration */
struct st32h7_pll_cfg {
u8 bit_idx;
u32 offset_divr;
u8 bit_frac_en;
u32 offset_frac;
u8 divm;
};
struct stm32_pll_data {
const char *name;
const char *parent_name;
unsigned long flags;
const struct st32h7_pll_cfg *cfg;
};
static const struct st32h7_pll_cfg stm32h7_pll1 = {
.bit_idx = 24,
.offset_divr = RCC_PLL1DIVR,
.bit_frac_en = 0,
.offset_frac = RCC_PLL1FRACR,
.divm = 4,
};
static const struct st32h7_pll_cfg stm32h7_pll2 = {
.bit_idx = 26,
.offset_divr = RCC_PLL2DIVR,
.bit_frac_en = 4,
.offset_frac = RCC_PLL2FRACR,
.divm = 12,
};
static const struct st32h7_pll_cfg stm32h7_pll3 = {
.bit_idx = 28,
.offset_divr = RCC_PLL3DIVR,
.bit_frac_en = 8,
.offset_frac = RCC_PLL3FRACR,
.divm = 20,
};
static const struct stm32_pll_data stm32_pll[] = {
{ "vco1", "pllsrc", CLK_IGNORE_UNUSED, &stm32h7_pll1 },
{ "vco2", "pllsrc", 0, &stm32h7_pll2 },
{ "vco3", "pllsrc", 0, &stm32h7_pll3 },
};
struct stm32_fractional_divider {
void __iomem *mreg;
u8 mshift;
u8 mwidth;
void __iomem *nreg;
u8 nshift;
u8 nwidth;
void __iomem *freg_status;
u8 freg_bit;
void __iomem *freg_value;
u8 fshift;
u8 fwidth;
u8 flags;
struct clk_hw hw;
spinlock_t *lock;
};
struct stm32_pll_obj {
spinlock_t *lock;
struct stm32_fractional_divider div;
struct stm32_ready_gate rgate;
struct clk_hw hw;
};
#define to_pll(_hw) container_of(_hw, struct stm32_pll_obj, hw)
static int pll_is_enabled(struct clk_hw *hw)
{
struct stm32_pll_obj *clk_elem = to_pll(hw);
struct clk_hw *_hw = &clk_elem->rgate.gate.hw;
__clk_hw_set_clk(_hw, hw);
return ready_gate_clk_ops.is_enabled(_hw);
}
static int pll_enable(struct clk_hw *hw)
{
struct stm32_pll_obj *clk_elem = to_pll(hw);
struct clk_hw *_hw = &clk_elem->rgate.gate.hw;
__clk_hw_set_clk(_hw, hw);
return ready_gate_clk_ops.enable(_hw);
}
static void pll_disable(struct clk_hw *hw)
{
struct stm32_pll_obj *clk_elem = to_pll(hw);
struct clk_hw *_hw = &clk_elem->rgate.gate.hw;
__clk_hw_set_clk(_hw, hw);
ready_gate_clk_ops.disable(_hw);
}
static int pll_frac_is_enabled(struct clk_hw *hw)
{
struct stm32_pll_obj *clk_elem = to_pll(hw);
struct stm32_fractional_divider *fd = &clk_elem->div;
return (readl(fd->freg_status) >> fd->freg_bit) & 0x01;
}
static unsigned long pll_read_frac(struct clk_hw *hw)
{
struct stm32_pll_obj *clk_elem = to_pll(hw);
struct stm32_fractional_divider *fd = &clk_elem->div;
return (readl(fd->freg_value) >> fd->fshift) &
GENMASK(fd->fwidth - 1, 0);
}
static unsigned long pll_fd_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct stm32_pll_obj *clk_elem = to_pll(hw);
struct stm32_fractional_divider *fd = &clk_elem->div;
unsigned long m, n;
u32 val, mask;
u64 rate, rate1 = 0;
val = readl(fd->mreg);
mask = GENMASK(fd->mwidth - 1, 0) << fd->mshift;
m = (val & mask) >> fd->mshift;
val = readl(fd->nreg);
mask = GENMASK(fd->nwidth - 1, 0) << fd->nshift;
n = ((val & mask) >> fd->nshift) + 1;
if (!n || !m)
return parent_rate;
rate = (u64)parent_rate * n;
do_div(rate, m);
if (pll_frac_is_enabled(hw)) {
val = pll_read_frac(hw);
rate1 = (u64)parent_rate * (u64)val;
do_div(rate1, (m * 8191));
}
return rate + rate1;
}
static const struct clk_ops pll_ops = {
.enable = pll_enable,
.disable = pll_disable,
.is_enabled = pll_is_enabled,
.recalc_rate = pll_fd_recalc_rate,
};
static struct clk_hw *clk_register_stm32_pll(struct device *dev,
const char *name,
const char *parent,
unsigned long flags,
const struct st32h7_pll_cfg *cfg,
spinlock_t *lock)
{
struct stm32_pll_obj *pll;
struct clk_init_data init = { NULL };
struct clk_hw *hw;
int ret;
struct stm32_fractional_divider *div = NULL;
struct stm32_ready_gate *rgate;
pll = kzalloc(sizeof(*pll), GFP_KERNEL);
if (!pll)
return ERR_PTR(-ENOMEM);
init.name = name;
init.ops = &pll_ops;
init.flags = flags;
init.parent_names = &parent;
init.num_parents = 1;
pll->hw.init = &init;
hw = &pll->hw;
rgate = &pll->rgate;
rgate->bit_rdy = cfg->bit_idx + 1;
rgate->gate.lock = lock;
rgate->gate.reg = base + RCC_CR;
rgate->gate.bit_idx = cfg->bit_idx;
div = &pll->div;
div->flags = 0;
div->mreg = base + RCC_PLLCKSELR;
div->mshift = cfg->divm;
div->mwidth = 6;
div->nreg = base + cfg->offset_divr;
div->nshift = 0;
div->nwidth = 9;
div->freg_status = base + RCC_PLLCFGR;
div->freg_bit = cfg->bit_frac_en;
div->freg_value = base + cfg->offset_frac;
div->fshift = 3;
div->fwidth = 13;
div->lock = lock;
ret = clk_hw_register(dev, hw);
if (ret) {
kfree(pll);
hw = ERR_PTR(ret);
}
return hw;
}
/* ODF CLOCKS */
static unsigned long odf_divider_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
return clk_divider_ops.recalc_rate(hw, parent_rate);
}
static int odf_divider_determine_rate(struct clk_hw *hw,
struct clk_rate_request *req)
{
return clk_divider_ops.determine_rate(hw, req);
}
static int odf_divider_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct clk_hw *hwp;
int pll_status;
int ret;
hwp = clk_hw_get_parent(hw);
pll_status = pll_is_enabled(hwp);
if (pll_status)
pll_disable(hwp);
ret = clk_divider_ops.set_rate(hw, rate, parent_rate);
if (pll_status)
pll_enable(hwp);
return ret;
}
static const struct clk_ops odf_divider_ops = {
.recalc_rate = odf_divider_recalc_rate,
.determine_rate = odf_divider_determine_rate,
.set_rate = odf_divider_set_rate,
};
static int odf_gate_enable(struct clk_hw *hw)
{
struct clk_hw *hwp;
int pll_status;
int ret;
if (clk_gate_ops.is_enabled(hw))
return 0;
hwp = clk_hw_get_parent(hw);
pll_status = pll_is_enabled(hwp);
if (pll_status)
pll_disable(hwp);
ret = clk_gate_ops.enable(hw);
if (pll_status)
pll_enable(hwp);
return ret;
}
static void odf_gate_disable(struct clk_hw *hw)
{
struct clk_hw *hwp;
int pll_status;
if (!clk_gate_ops.is_enabled(hw))
return;
hwp = clk_hw_get_parent(hw);
pll_status = pll_is_enabled(hwp);
if (pll_status)
pll_disable(hwp);
clk_gate_ops.disable(hw);
if (pll_status)
pll_enable(hwp);
}
static const struct clk_ops odf_gate_ops = {
.enable = odf_gate_enable,
.disable = odf_gate_disable,
.is_enabled = clk_gate_is_enabled,
};
static struct composite_clk_gcfg odf_clk_gcfg = {
M_CFG_DIV(&odf_divider_ops, 0),
M_CFG_GATE(&odf_gate_ops, 0),
};
#define M_ODF_F(_name, _parent, _gate_offset, _bit_idx, _rate_offset,\
_rate_shift, _rate_width, _flags)\
{\
.mux = NULL,\
.div = &(struct muxdiv_cfg) {_rate_offset, _rate_shift, _rate_width},\
.gate = &(struct gate_cfg) {_gate_offset, _bit_idx },\
.name = _name,\
.parent_name = &(const char *) {_parent},\
.num_parents = 1,\
.flags = _flags,\
}
#define M_ODF(_name, _parent, _gate_offset, _bit_idx, _rate_offset,\
_rate_shift, _rate_width)\
M_ODF_F(_name, _parent, _gate_offset, _bit_idx, _rate_offset,\
_rate_shift, _rate_width, 0)\
static const struct composite_clk_cfg stm32_odf[3][3] = {
{
M_ODF_F("pll1_p", "vco1", RCC_PLLCFGR, 16, RCC_PLL1DIVR, 9, 7,
CLK_IGNORE_UNUSED),
M_ODF_F("pll1_q", "vco1", RCC_PLLCFGR, 17, RCC_PLL1DIVR, 16, 7,
CLK_IGNORE_UNUSED),
M_ODF_F("pll1_r", "vco1", RCC_PLLCFGR, 18, RCC_PLL1DIVR, 24, 7,
CLK_IGNORE_UNUSED),
},
{
M_ODF("pll2_p", "vco2", RCC_PLLCFGR, 19, RCC_PLL2DIVR, 9, 7),
M_ODF("pll2_q", "vco2", RCC_PLLCFGR, 20, RCC_PLL2DIVR, 16, 7),
M_ODF("pll2_r", "vco2", RCC_PLLCFGR, 21, RCC_PLL2DIVR, 24, 7),
},
{
M_ODF("pll3_p", "vco3", RCC_PLLCFGR, 22, RCC_PLL3DIVR, 9, 7),
M_ODF("pll3_q", "vco3", RCC_PLLCFGR, 23, RCC_PLL3DIVR, 16, 7),
M_ODF("pll3_r", "vco3", RCC_PLLCFGR, 24, RCC_PLL3DIVR, 24, 7),
}
};
/* PERIF CLOCKS */
struct pclk_t {
u32 gate_offset;
u8 bit_idx;
const char *name;
const char *parent;
u32 flags;
};
#define PER_CLKF(_gate_offset, _bit_idx, _name, _parent, _flags)\
{\
.gate_offset = _gate_offset,\
.bit_idx = _bit_idx,\
.name = _name,\
.parent = _parent,\
.flags = _flags,\
}
#define PER_CLK(_gate_offset, _bit_idx, _name, _parent)\
PER_CLKF(_gate_offset, _bit_idx, _name, _parent, 0)
static const struct pclk_t pclk[] = {
PER_CLK(RCC_AHB3ENR, 31, "d1sram1", "hclk"),
PER_CLK(RCC_AHB3ENR, 30, "itcm", "hclk"),
PER_CLK(RCC_AHB3ENR, 29, "dtcm2", "hclk"),
PER_CLK(RCC_AHB3ENR, 28, "dtcm1", "hclk"),
PER_CLK(RCC_AHB3ENR, 8, "flitf", "hclk"),
PER_CLK(RCC_AHB3ENR, 5, "jpgdec", "hclk"),
PER_CLK(RCC_AHB3ENR, 4, "dma2d", "hclk"),
PER_CLK(RCC_AHB3ENR, 0, "mdma", "hclk"),
PER_CLK(RCC_AHB1ENR, 28, "usb2ulpi", "hclk"),
PER_CLK(RCC_AHB1ENR, 26, "usb1ulpi", "hclk"),
PER_CLK(RCC_AHB1ENR, 17, "eth1rx", "hclk"),
PER_CLK(RCC_AHB1ENR, 16, "eth1tx", "hclk"),
PER_CLK(RCC_AHB1ENR, 15, "eth1mac", "hclk"),
PER_CLK(RCC_AHB1ENR, 14, "art", "hclk"),
PER_CLK(RCC_AHB1ENR, 1, "dma2", "hclk"),
PER_CLK(RCC_AHB1ENR, 0, "dma1", "hclk"),
PER_CLK(RCC_AHB2ENR, 31, "d2sram3", "hclk"),
PER_CLK(RCC_AHB2ENR, 30, "d2sram2", "hclk"),
PER_CLK(RCC_AHB2ENR, 29, "d2sram1", "hclk"),
PER_CLK(RCC_AHB2ENR, 5, "hash", "hclk"),
PER_CLK(RCC_AHB2ENR, 4, "crypt", "hclk"),
PER_CLK(RCC_AHB2ENR, 0, "camitf", "hclk"),
PER_CLK(RCC_AHB4ENR, 28, "bkpram", "hclk"),
PER_CLK(RCC_AHB4ENR, 25, "hsem", "hclk"),
PER_CLK(RCC_AHB4ENR, 21, "bdma", "hclk"),
PER_CLK(RCC_AHB4ENR, 19, "crc", "hclk"),
PER_CLK(RCC_AHB4ENR, 10, "gpiok", "hclk"),
PER_CLK(RCC_AHB4ENR, 9, "gpioj", "hclk"),
PER_CLK(RCC_AHB4ENR, 8, "gpioi", "hclk"),
PER_CLK(RCC_AHB4ENR, 7, "gpioh", "hclk"),
PER_CLK(RCC_AHB4ENR, 6, "gpiog", "hclk"),
PER_CLK(RCC_AHB4ENR, 5, "gpiof", "hclk"),
PER_CLK(RCC_AHB4ENR, 4, "gpioe", "hclk"),
PER_CLK(RCC_AHB4ENR, 3, "gpiod", "hclk"),
PER_CLK(RCC_AHB4ENR, 2, "gpioc", "hclk"),
PER_CLK(RCC_AHB4ENR, 1, "gpiob", "hclk"),
PER_CLK(RCC_AHB4ENR, 0, "gpioa", "hclk"),
PER_CLK(RCC_APB3ENR, 6, "wwdg1", "pclk3"),
PER_CLK(RCC_APB1LENR, 29, "dac12", "pclk1"),
PER_CLK(RCC_APB1LENR, 11, "wwdg2", "pclk1"),
PER_CLK(RCC_APB1LENR, 8, "tim14", "tim1_ker"),
PER_CLK(RCC_APB1LENR, 7, "tim13", "tim1_ker"),
PER_CLK(RCC_APB1LENR, 6, "tim12", "tim1_ker"),
PER_CLK(RCC_APB1LENR, 5, "tim7", "tim1_ker"),
PER_CLK(RCC_APB1LENR, 4, "tim6", "tim1_ker"),
PER_CLK(RCC_APB1LENR, 3, "tim5", "tim1_ker"),
PER_CLK(RCC_APB1LENR, 2, "tim4", "tim1_ker"),
PER_CLK(RCC_APB1LENR, 1, "tim3", "tim1_ker"),
PER_CLK(RCC_APB1LENR, 0, "tim2", "tim1_ker"),
PER_CLK(RCC_APB1HENR, 5, "mdios", "pclk1"),
PER_CLK(RCC_APB1HENR, 4, "opamp", "pclk1"),
PER_CLK(RCC_APB1HENR, 1, "crs", "pclk1"),
PER_CLK(RCC_APB2ENR, 18, "tim17", "tim2_ker"),
PER_CLK(RCC_APB2ENR, 17, "tim16", "tim2_ker"),
PER_CLK(RCC_APB2ENR, 16, "tim15", "tim2_ker"),
PER_CLK(RCC_APB2ENR, 1, "tim8", "tim2_ker"),
PER_CLK(RCC_APB2ENR, 0, "tim1", "tim2_ker"),
PER_CLK(RCC_APB4ENR, 26, "tmpsens", "pclk4"),
PER_CLK(RCC_APB4ENR, 16, "rtcapb", "pclk4"),
PER_CLK(RCC_APB4ENR, 15, "vref", "pclk4"),
PER_CLK(RCC_APB4ENR, 14, "comp12", "pclk4"),
PER_CLK(RCC_APB4ENR, 1, "syscfg", "pclk4"),
};
/* KERNEL CLOCKS */
#define KER_CLKF(_gate_offset, _bit_idx,\
_mux_offset, _mux_shift, _mux_width,\
_name, _parent_name,\
_flags) \
{ \
.gate = &(struct gate_cfg) {_gate_offset, _bit_idx},\
.mux = &(struct muxdiv_cfg) {_mux_offset, _mux_shift, _mux_width },\
.name = _name, \
.parent_name = _parent_name, \
.num_parents = ARRAY_SIZE(_parent_name),\
.flags = _flags,\
}
#define KER_CLK(_gate_offset, _bit_idx, _mux_offset, _mux_shift, _mux_width,\
_name, _parent_name) \
KER_CLKF(_gate_offset, _bit_idx, _mux_offset, _mux_shift, _mux_width,\
_name, _parent_name, 0)\
#define KER_CLKF_NOMUX(_gate_offset, _bit_idx,\
_name, _parent_name,\
_flags) \
{ \
.gate = &(struct gate_cfg) {_gate_offset, _bit_idx},\
.mux = NULL,\
.name = _name, \
.parent_name = _parent_name, \
.num_parents = 1,\
.flags = _flags,\
}
static const struct composite_clk_cfg kclk[] = {
KER_CLK(RCC_AHB3ENR, 16, RCC_D1CCIPR, 16, 1, "sdmmc1", sdmmc_src),
KER_CLKF(RCC_AHB3ENR, 14, RCC_D1CCIPR, 4, 2, "quadspi", qspi_src,
CLK_IGNORE_UNUSED),
KER_CLKF(RCC_AHB3ENR, 12, RCC_D1CCIPR, 0, 2, "fmc", fmc_src,
CLK_IGNORE_UNUSED),
KER_CLK(RCC_AHB1ENR, 27, RCC_D2CCIP2R, 20, 2, "usb2otg", usbotg_src),
KER_CLK(RCC_AHB1ENR, 25, RCC_D2CCIP2R, 20, 2, "usb1otg", usbotg_src),
KER_CLK(RCC_AHB1ENR, 5, RCC_D3CCIPR, 16, 2, "adc12", adc_src),
KER_CLK(RCC_AHB2ENR, 9, RCC_D1CCIPR, 16, 1, "sdmmc2", sdmmc_src),
KER_CLK(RCC_AHB2ENR, 6, RCC_D2CCIP2R, 8, 2, "rng", rng_src),
KER_CLK(RCC_AHB4ENR, 24, RCC_D3CCIPR, 16, 2, "adc3", adc_src),
KER_CLKF(RCC_APB3ENR, 4, RCC_D1CCIPR, 8, 1, "dsi", dsi_src,
CLK_SET_RATE_PARENT),
KER_CLKF_NOMUX(RCC_APB3ENR, 3, "ltdc", ltdc_src, CLK_SET_RATE_PARENT),
KER_CLK(RCC_APB1LENR, 31, RCC_D2CCIP2R, 0, 3, "usart8", usart_src2),
KER_CLK(RCC_APB1LENR, 30, RCC_D2CCIP2R, 0, 3, "usart7", usart_src2),
KER_CLK(RCC_APB1LENR, 27, RCC_D2CCIP2R, 22, 2, "hdmicec", cec_src),
KER_CLK(RCC_APB1LENR, 23, RCC_D2CCIP2R, 12, 2, "i2c3", i2c_src1),
KER_CLK(RCC_APB1LENR, 22, RCC_D2CCIP2R, 12, 2, "i2c2", i2c_src1),
KER_CLK(RCC_APB1LENR, 21, RCC_D2CCIP2R, 12, 2, "i2c1", i2c_src1),
KER_CLK(RCC_APB1LENR, 20, RCC_D2CCIP2R, 0, 3, "uart5", usart_src2),
KER_CLK(RCC_APB1LENR, 19, RCC_D2CCIP2R, 0, 3, "uart4", usart_src2),
KER_CLK(RCC_APB1LENR, 18, RCC_D2CCIP2R, 0, 3, "usart3", usart_src2),
KER_CLK(RCC_APB1LENR, 17, RCC_D2CCIP2R, 0, 3, "usart2", usart_src2),
KER_CLK(RCC_APB1LENR, 16, RCC_D2CCIP1R, 20, 2, "spdifrx", spdifrx_src),
KER_CLK(RCC_APB1LENR, 15, RCC_D2CCIP1R, 16, 3, "spi3", spi_src1),
KER_CLK(RCC_APB1LENR, 14, RCC_D2CCIP1R, 16, 3, "spi2", spi_src1),
KER_CLK(RCC_APB1LENR, 9, RCC_D2CCIP2R, 28, 3, "lptim1", lptim_src1),
KER_CLK(RCC_APB1HENR, 8, RCC_D2CCIP1R, 28, 2, "fdcan", fdcan_src),
KER_CLK(RCC_APB1HENR, 2, RCC_D2CCIP1R, 31, 1, "swp", swp_src),
KER_CLK(RCC_APB2ENR, 29, RCC_CFGR, 14, 1, "hrtim", hrtim_src),
KER_CLK(RCC_APB2ENR, 28, RCC_D2CCIP1R, 24, 1, "dfsdm1", dfsdm1_src),
KER_CLKF(RCC_APB2ENR, 24, RCC_D2CCIP1R, 6, 3, "sai3", sai_src,
CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT),
KER_CLKF(RCC_APB2ENR, 23, RCC_D2CCIP1R, 6, 3, "sai2", sai_src,
CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT),
KER_CLKF(RCC_APB2ENR, 22, RCC_D2CCIP1R, 0, 3, "sai1", sai_src,
CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT),
KER_CLK(RCC_APB2ENR, 20, RCC_D2CCIP1R, 16, 3, "spi5", spi_src2),
KER_CLK(RCC_APB2ENR, 13, RCC_D2CCIP1R, 16, 3, "spi4", spi_src2),
KER_CLK(RCC_APB2ENR, 12, RCC_D2CCIP1R, 16, 3, "spi1", spi_src1),
KER_CLK(RCC_APB2ENR, 5, RCC_D2CCIP2R, 3, 3, "usart6", usart_src1),
KER_CLK(RCC_APB2ENR, 4, RCC_D2CCIP2R, 3, 3, "usart1", usart_src1),
KER_CLK(RCC_APB4ENR, 21, RCC_D3CCIPR, 24, 3, "sai4b", sai_src),
KER_CLK(RCC_APB4ENR, 21, RCC_D3CCIPR, 21, 3, "sai4a", sai_src),
KER_CLK(RCC_APB4ENR, 12, RCC_D3CCIPR, 13, 3, "lptim5", lptim_src2),
KER_CLK(RCC_APB4ENR, 11, RCC_D3CCIPR, 13, 3, "lptim4", lptim_src2),
KER_CLK(RCC_APB4ENR, 10, RCC_D3CCIPR, 13, 3, "lptim3", lptim_src2),
KER_CLK(RCC_APB4ENR, 9, RCC_D3CCIPR, 10, 3, "lptim2", lptim_src2),
KER_CLK(RCC_APB4ENR, 7, RCC_D3CCIPR, 8, 2, "i2c4", i2c_src2),
KER_CLK(RCC_APB4ENR, 5, RCC_D3CCIPR, 28, 3, "spi6", spi_src3),
KER_CLK(RCC_APB4ENR, 3, RCC_D3CCIPR, 0, 3, "lpuart1", lpuart1_src),
};
static struct composite_clk_gcfg kernel_clk_cfg = {
M_CFG_MUX(NULL, 0),
M_CFG_GATE(NULL, 0),
};
/* RTC clock */
/*
* RTC & LSE registers are protected against parasitic write access.
* PWR_CR_DBP bit must be set to enable write access to RTC registers.
*/
/* STM32_PWR_CR */
#define PWR_CR 0x00
/* STM32_PWR_CR bit field */
#define PWR_CR_DBP BIT(8)
static struct composite_clk_gcfg rtc_clk_cfg = {
M_CFG_MUX(NULL, 0),
M_CFG_GATE(NULL, 0),
};
static const struct composite_clk_cfg rtc_clk =
KER_CLK(RCC_BDCR, 15, RCC_BDCR, 8, 2, "rtc_ck", rtc_src);
/* Micro-controller output clock */
static struct composite_clk_gcfg mco_clk_cfg = {
M_CFG_MUX(NULL, 0),
M_CFG_DIV(NULL, CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO),
};
#define M_MCO_F(_name, _parents, _mux_offset, _mux_shift, _mux_width,\
_rate_offset, _rate_shift, _rate_width,\
_flags)\
{\
.mux = &(struct muxdiv_cfg) {_mux_offset, _mux_shift, _mux_width },\
.div = &(struct muxdiv_cfg) {_rate_offset, _rate_shift, _rate_width},\
.gate = NULL,\
.name = _name,\
.parent_name = _parents,\
.num_parents = ARRAY_SIZE(_parents),\
.flags = _flags,\
}
static const struct composite_clk_cfg mco_clk[] = {
M_MCO_F("mco1", mco_src1, RCC_CFGR, 22, 4, RCC_CFGR, 18, 4, 0),
M_MCO_F("mco2", mco_src2, RCC_CFGR, 29, 3, RCC_CFGR, 25, 4, 0),
};
static void __init stm32h7_rcc_init(struct device_node *np)
{
struct clk_hw_onecell_data *clk_data;
struct composite_cfg c_cfg;
int n;
const char *hse_clk, *lse_clk, *i2s_clk;
struct regmap *pdrm;
clk_data = kzalloc(struct_size(clk_data, hws, STM32H7_MAX_CLKS),
GFP_KERNEL);
if (!clk_data)
return;
clk_data->num = STM32H7_MAX_CLKS;
hws = clk_data->hws;
for (n = 0; n < STM32H7_MAX_CLKS; n++)
hws[n] = ERR_PTR(-ENOENT);
/* get RCC base @ from DT */
base = of_iomap(np, 0);
if (!base) {
pr_err("%pOFn: unable to map resource", np);
goto err_free_clks;
}
pdrm = syscon_regmap_lookup_by_phandle(np, "st,syscfg");
if (IS_ERR(pdrm))
pr_warn("%s: Unable to get syscfg\n", __func__);
else
/* In any case disable backup domain write protection
* and will never be enabled.
* Needed by LSE & RTC clocks.
*/
regmap_update_bits(pdrm, PWR_CR, PWR_CR_DBP, PWR_CR_DBP);
/* Put parent names from DT */
hse_clk = of_clk_get_parent_name(np, 0);
lse_clk = of_clk_get_parent_name(np, 1);
i2s_clk = of_clk_get_parent_name(np, 2);
sai_src[3] = i2s_clk;
spi_src1[3] = i2s_clk;
/* Register Internal oscillators */
clk_hw_register_fixed_rate(NULL, "clk-hsi", NULL, 0, 64000000);
clk_hw_register_fixed_rate(NULL, "clk-csi", NULL, 0, 4000000);
clk_hw_register_fixed_rate(NULL, "clk-lsi", NULL, 0, 32000);
clk_hw_register_fixed_rate(NULL, "clk-rc48", NULL, 0, 48000);
/* This clock is coming from outside. Frequencies unknown */
hws[CK_DSI_PHY] = clk_hw_register_fixed_rate(NULL, "ck_dsi_phy", NULL,
0, 0);
hws[HSI_DIV] = clk_hw_register_divider(NULL, "hsidiv", "clk-hsi", 0,
base + RCC_CR, 3, 2, CLK_DIVIDER_POWER_OF_TWO,
&stm32rcc_lock);
hws[HSE_1M] = clk_hw_register_divider(NULL, "hse_1M", "hse_ck", 0,
base + RCC_CFGR, 8, 6, CLK_DIVIDER_ONE_BASED |
CLK_DIVIDER_ALLOW_ZERO,
&stm32rcc_lock);
/* Mux system clocks */
for (n = 0; n < ARRAY_SIZE(stm32_mclk); n++)
hws[MCLK_BANK + n] = clk_hw_register_mux(NULL,
stm32_mclk[n].name,
stm32_mclk[n].parents,
stm32_mclk[n].num_parents,
stm32_mclk[n].flags,
stm32_mclk[n].offset + base,
stm32_mclk[n].shift,
stm32_mclk[n].width,
0,
&stm32rcc_lock);
register_core_and_bus_clocks();
/* Oscillary clocks */
for (n = 0; n < ARRAY_SIZE(stm32_oclk); n++)
hws[OSC_BANK + n] = clk_register_ready_gate(NULL,
stm32_oclk[n].name,
stm32_oclk[n].parent,
stm32_oclk[n].gate_offset + base,
stm32_oclk[n].bit_idx,
stm32_oclk[n].bit_rdy,
stm32_oclk[n].flags,
&stm32rcc_lock);
hws[HSE_CK] = clk_register_ready_gate(NULL,
"hse_ck",
hse_clk,
RCC_CR + base,
16, 17,
0,
&stm32rcc_lock);
hws[LSE_CK] = clk_register_ready_gate(NULL,
"lse_ck",
lse_clk,
RCC_BDCR + base,
0, 1,
0,
&stm32rcc_lock);
hws[CSI_KER_DIV122 + n] = clk_hw_register_fixed_factor(NULL,
"csi_ker_div122", "csi_ker", 0, 1, 122);
/* PLLs */
for (n = 0; n < ARRAY_SIZE(stm32_pll); n++) {
int odf;
/* Register the VCO */
clk_register_stm32_pll(NULL, stm32_pll[n].name,
stm32_pll[n].parent_name, stm32_pll[n].flags,
stm32_pll[n].cfg,
&stm32rcc_lock);
/* Register the 3 output dividers */
for (odf = 0; odf < 3; odf++) {
int idx = n * 3 + odf;
get_cfg_composite_div(&odf_clk_gcfg, &stm32_odf[n][odf],
&c_cfg, &stm32rcc_lock);
hws[ODF_BANK + idx] = clk_hw_register_composite(NULL,
stm32_odf[n][odf].name,
stm32_odf[n][odf].parent_name,
stm32_odf[n][odf].num_parents,
c_cfg.mux_hw, c_cfg.mux_ops,
c_cfg.div_hw, c_cfg.div_ops,
c_cfg.gate_hw, c_cfg.gate_ops,
stm32_odf[n][odf].flags);
}
}
/* Peripheral clocks */
for (n = 0; n < ARRAY_SIZE(pclk); n++)
hws[PERIF_BANK + n] = clk_hw_register_gate(NULL, pclk[n].name,
pclk[n].parent,
pclk[n].flags, base + pclk[n].gate_offset,
pclk[n].bit_idx, pclk[n].flags, &stm32rcc_lock);
/* Kernel clocks */
for (n = 0; n < ARRAY_SIZE(kclk); n++) {
get_cfg_composite_div(&kernel_clk_cfg, &kclk[n], &c_cfg,
&stm32rcc_lock);
hws[KERN_BANK + n] = clk_hw_register_composite(NULL,
kclk[n].name,
kclk[n].parent_name,
kclk[n].num_parents,
c_cfg.mux_hw, c_cfg.mux_ops,
c_cfg.div_hw, c_cfg.div_ops,
c_cfg.gate_hw, c_cfg.gate_ops,
kclk[n].flags);
}
/* RTC clock (default state is off) */
clk_hw_register_fixed_rate(NULL, "off", NULL, 0, 0);
get_cfg_composite_div(&rtc_clk_cfg, &rtc_clk, &c_cfg, &stm32rcc_lock);
hws[RTC_CK] = clk_hw_register_composite(NULL,
rtc_clk.name,
rtc_clk.parent_name,
rtc_clk.num_parents,
c_cfg.mux_hw, c_cfg.mux_ops,
c_cfg.div_hw, c_cfg.div_ops,
c_cfg.gate_hw, c_cfg.gate_ops,
rtc_clk.flags);
/* Micro-controller clocks */
for (n = 0; n < ARRAY_SIZE(mco_clk); n++) {
get_cfg_composite_div(&mco_clk_cfg, &mco_clk[n], &c_cfg,
&stm32rcc_lock);
hws[MCO_BANK + n] = clk_hw_register_composite(NULL,
mco_clk[n].name,
mco_clk[n].parent_name,
mco_clk[n].num_parents,
c_cfg.mux_hw, c_cfg.mux_ops,
c_cfg.div_hw, c_cfg.div_ops,
c_cfg.gate_hw, c_cfg.gate_ops,
mco_clk[n].flags);
}
of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data);
return;
err_free_clks:
kfree(clk_data);
}
/* The RCC node is a clock and reset controller, and these
* functionalities are supported by different drivers that
* matches the same compatible strings.
*/
CLK_OF_DECLARE_DRIVER(stm32h7_rcc, "st,stm32h743-rcc", stm32h7_rcc_init);
| linux-master | drivers/clk/clk-stm32h7.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Author: Daniel Thompson <[email protected]>
*
* Inspired by clk-asm9260.c .
*/
#include <linux/clk-provider.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/ioport.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/regmap.h>
#include <linux/mfd/syscon.h>
/*
* Include list of clocks wich are not derived from system clock (SYSCLOCK)
* The index of these clocks is the secondary index of DT bindings
*
*/
#include <dt-bindings/clock/stm32fx-clock.h>
#define STM32F4_RCC_CR 0x00
#define STM32F4_RCC_PLLCFGR 0x04
#define STM32F4_RCC_CFGR 0x08
#define STM32F4_RCC_AHB1ENR 0x30
#define STM32F4_RCC_AHB2ENR 0x34
#define STM32F4_RCC_AHB3ENR 0x38
#define STM32F4_RCC_APB1ENR 0x40
#define STM32F4_RCC_APB2ENR 0x44
#define STM32F4_RCC_BDCR 0x70
#define STM32F4_RCC_CSR 0x74
#define STM32F4_RCC_PLLI2SCFGR 0x84
#define STM32F4_RCC_PLLSAICFGR 0x88
#define STM32F4_RCC_DCKCFGR 0x8c
#define STM32F7_RCC_DCKCFGR2 0x90
#define NONE -1
#define NO_IDX NONE
#define NO_MUX NONE
#define NO_GATE NONE
struct stm32f4_gate_data {
u8 offset;
u8 bit_idx;
const char *name;
const char *parent_name;
unsigned long flags;
};
static const struct stm32f4_gate_data stm32f429_gates[] __initconst = {
{ STM32F4_RCC_AHB1ENR, 0, "gpioa", "ahb_div" },
{ STM32F4_RCC_AHB1ENR, 1, "gpiob", "ahb_div" },
{ STM32F4_RCC_AHB1ENR, 2, "gpioc", "ahb_div" },
{ STM32F4_RCC_AHB1ENR, 3, "gpiod", "ahb_div" },
{ STM32F4_RCC_AHB1ENR, 4, "gpioe", "ahb_div" },
{ STM32F4_RCC_AHB1ENR, 5, "gpiof", "ahb_div" },
{ STM32F4_RCC_AHB1ENR, 6, "gpiog", "ahb_div" },
{ STM32F4_RCC_AHB1ENR, 7, "gpioh", "ahb_div" },
{ STM32F4_RCC_AHB1ENR, 8, "gpioi", "ahb_div" },
{ STM32F4_RCC_AHB1ENR, 9, "gpioj", "ahb_div" },
{ STM32F4_RCC_AHB1ENR, 10, "gpiok", "ahb_div" },
{ STM32F4_RCC_AHB1ENR, 12, "crc", "ahb_div" },
{ STM32F4_RCC_AHB1ENR, 18, "bkpsra", "ahb_div" },
{ STM32F4_RCC_AHB1ENR, 20, "ccmdatam", "ahb_div" },
{ STM32F4_RCC_AHB1ENR, 21, "dma1", "ahb_div" },
{ STM32F4_RCC_AHB1ENR, 22, "dma2", "ahb_div" },
{ STM32F4_RCC_AHB1ENR, 23, "dma2d", "ahb_div" },
{ STM32F4_RCC_AHB1ENR, 25, "ethmac", "ahb_div" },
{ STM32F4_RCC_AHB1ENR, 26, "ethmactx", "ahb_div" },
{ STM32F4_RCC_AHB1ENR, 27, "ethmacrx", "ahb_div" },
{ STM32F4_RCC_AHB1ENR, 28, "ethmacptp", "ahb_div" },
{ STM32F4_RCC_AHB1ENR, 29, "otghs", "ahb_div" },
{ STM32F4_RCC_AHB1ENR, 30, "otghsulpi", "ahb_div" },
{ STM32F4_RCC_AHB2ENR, 0, "dcmi", "ahb_div" },
{ STM32F4_RCC_AHB2ENR, 4, "cryp", "ahb_div" },
{ STM32F4_RCC_AHB2ENR, 5, "hash", "ahb_div" },
{ STM32F4_RCC_AHB2ENR, 6, "rng", "pll48" },
{ STM32F4_RCC_AHB2ENR, 7, "otgfs", "pll48" },
{ STM32F4_RCC_AHB3ENR, 0, "fmc", "ahb_div",
CLK_IGNORE_UNUSED },
{ STM32F4_RCC_APB1ENR, 0, "tim2", "apb1_mul" },
{ STM32F4_RCC_APB1ENR, 1, "tim3", "apb1_mul" },
{ STM32F4_RCC_APB1ENR, 2, "tim4", "apb1_mul" },
{ STM32F4_RCC_APB1ENR, 3, "tim5", "apb1_mul" },
{ STM32F4_RCC_APB1ENR, 4, "tim6", "apb1_mul" },
{ STM32F4_RCC_APB1ENR, 5, "tim7", "apb1_mul" },
{ STM32F4_RCC_APB1ENR, 6, "tim12", "apb1_mul" },
{ STM32F4_RCC_APB1ENR, 7, "tim13", "apb1_mul" },
{ STM32F4_RCC_APB1ENR, 8, "tim14", "apb1_mul" },
{ STM32F4_RCC_APB1ENR, 11, "wwdg", "apb1_div" },
{ STM32F4_RCC_APB1ENR, 14, "spi2", "apb1_div" },
{ STM32F4_RCC_APB1ENR, 15, "spi3", "apb1_div" },
{ STM32F4_RCC_APB1ENR, 17, "uart2", "apb1_div" },
{ STM32F4_RCC_APB1ENR, 18, "uart3", "apb1_div" },
{ STM32F4_RCC_APB1ENR, 19, "uart4", "apb1_div" },
{ STM32F4_RCC_APB1ENR, 20, "uart5", "apb1_div" },
{ STM32F4_RCC_APB1ENR, 21, "i2c1", "apb1_div" },
{ STM32F4_RCC_APB1ENR, 22, "i2c2", "apb1_div" },
{ STM32F4_RCC_APB1ENR, 23, "i2c3", "apb1_div" },
{ STM32F4_RCC_APB1ENR, 25, "can1", "apb1_div" },
{ STM32F4_RCC_APB1ENR, 26, "can2", "apb1_div" },
{ STM32F4_RCC_APB1ENR, 28, "pwr", "apb1_div" },
{ STM32F4_RCC_APB1ENR, 29, "dac", "apb1_div" },
{ STM32F4_RCC_APB1ENR, 30, "uart7", "apb1_div" },
{ STM32F4_RCC_APB1ENR, 31, "uart8", "apb1_div" },
{ STM32F4_RCC_APB2ENR, 0, "tim1", "apb2_mul" },
{ STM32F4_RCC_APB2ENR, 1, "tim8", "apb2_mul" },
{ STM32F4_RCC_APB2ENR, 4, "usart1", "apb2_div" },
{ STM32F4_RCC_APB2ENR, 5, "usart6", "apb2_div" },
{ STM32F4_RCC_APB2ENR, 8, "adc1", "apb2_div" },
{ STM32F4_RCC_APB2ENR, 9, "adc2", "apb2_div" },
{ STM32F4_RCC_APB2ENR, 10, "adc3", "apb2_div" },
{ STM32F4_RCC_APB2ENR, 11, "sdio", "pll48" },
{ STM32F4_RCC_APB2ENR, 12, "spi1", "apb2_div" },
{ STM32F4_RCC_APB2ENR, 13, "spi4", "apb2_div" },
{ STM32F4_RCC_APB2ENR, 14, "syscfg", "apb2_div" },
{ STM32F4_RCC_APB2ENR, 16, "tim9", "apb2_mul" },
{ STM32F4_RCC_APB2ENR, 17, "tim10", "apb2_mul" },
{ STM32F4_RCC_APB2ENR, 18, "tim11", "apb2_mul" },
{ STM32F4_RCC_APB2ENR, 20, "spi5", "apb2_div" },
{ STM32F4_RCC_APB2ENR, 21, "spi6", "apb2_div" },
{ STM32F4_RCC_APB2ENR, 22, "sai1", "apb2_div" },
};
static const struct stm32f4_gate_data stm32f469_gates[] __initconst = {
{ STM32F4_RCC_AHB1ENR, 0, "gpioa", "ahb_div" },
{ STM32F4_RCC_AHB1ENR, 1, "gpiob", "ahb_div" },
{ STM32F4_RCC_AHB1ENR, 2, "gpioc", "ahb_div" },
{ STM32F4_RCC_AHB1ENR, 3, "gpiod", "ahb_div" },
{ STM32F4_RCC_AHB1ENR, 4, "gpioe", "ahb_div" },
{ STM32F4_RCC_AHB1ENR, 5, "gpiof", "ahb_div" },
{ STM32F4_RCC_AHB1ENR, 6, "gpiog", "ahb_div" },
{ STM32F4_RCC_AHB1ENR, 7, "gpioh", "ahb_div" },
{ STM32F4_RCC_AHB1ENR, 8, "gpioi", "ahb_div" },
{ STM32F4_RCC_AHB1ENR, 9, "gpioj", "ahb_div" },
{ STM32F4_RCC_AHB1ENR, 10, "gpiok", "ahb_div" },
{ STM32F4_RCC_AHB1ENR, 12, "crc", "ahb_div" },
{ STM32F4_RCC_AHB1ENR, 18, "bkpsra", "ahb_div" },
{ STM32F4_RCC_AHB1ENR, 20, "ccmdatam", "ahb_div" },
{ STM32F4_RCC_AHB1ENR, 21, "dma1", "ahb_div" },
{ STM32F4_RCC_AHB1ENR, 22, "dma2", "ahb_div" },
{ STM32F4_RCC_AHB1ENR, 23, "dma2d", "ahb_div" },
{ STM32F4_RCC_AHB1ENR, 25, "ethmac", "ahb_div" },
{ STM32F4_RCC_AHB1ENR, 26, "ethmactx", "ahb_div" },
{ STM32F4_RCC_AHB1ENR, 27, "ethmacrx", "ahb_div" },
{ STM32F4_RCC_AHB1ENR, 28, "ethmacptp", "ahb_div" },
{ STM32F4_RCC_AHB1ENR, 29, "otghs", "ahb_div" },
{ STM32F4_RCC_AHB1ENR, 30, "otghsulpi", "ahb_div" },
{ STM32F4_RCC_AHB2ENR, 0, "dcmi", "ahb_div" },
{ STM32F4_RCC_AHB2ENR, 4, "cryp", "ahb_div" },
{ STM32F4_RCC_AHB2ENR, 5, "hash", "ahb_div" },
{ STM32F4_RCC_AHB2ENR, 6, "rng", "pll48" },
{ STM32F4_RCC_AHB2ENR, 7, "otgfs", "pll48" },
{ STM32F4_RCC_AHB3ENR, 0, "fmc", "ahb_div",
CLK_IGNORE_UNUSED },
{ STM32F4_RCC_AHB3ENR, 1, "qspi", "ahb_div",
CLK_IGNORE_UNUSED },
{ STM32F4_RCC_APB1ENR, 0, "tim2", "apb1_mul" },
{ STM32F4_RCC_APB1ENR, 1, "tim3", "apb1_mul" },
{ STM32F4_RCC_APB1ENR, 2, "tim4", "apb1_mul" },
{ STM32F4_RCC_APB1ENR, 3, "tim5", "apb1_mul" },
{ STM32F4_RCC_APB1ENR, 4, "tim6", "apb1_mul" },
{ STM32F4_RCC_APB1ENR, 5, "tim7", "apb1_mul" },
{ STM32F4_RCC_APB1ENR, 6, "tim12", "apb1_mul" },
{ STM32F4_RCC_APB1ENR, 7, "tim13", "apb1_mul" },
{ STM32F4_RCC_APB1ENR, 8, "tim14", "apb1_mul" },
{ STM32F4_RCC_APB1ENR, 11, "wwdg", "apb1_div" },
{ STM32F4_RCC_APB1ENR, 14, "spi2", "apb1_div" },
{ STM32F4_RCC_APB1ENR, 15, "spi3", "apb1_div" },
{ STM32F4_RCC_APB1ENR, 17, "uart2", "apb1_div" },
{ STM32F4_RCC_APB1ENR, 18, "uart3", "apb1_div" },
{ STM32F4_RCC_APB1ENR, 19, "uart4", "apb1_div" },
{ STM32F4_RCC_APB1ENR, 20, "uart5", "apb1_div" },
{ STM32F4_RCC_APB1ENR, 21, "i2c1", "apb1_div" },
{ STM32F4_RCC_APB1ENR, 22, "i2c2", "apb1_div" },
{ STM32F4_RCC_APB1ENR, 23, "i2c3", "apb1_div" },
{ STM32F4_RCC_APB1ENR, 25, "can1", "apb1_div" },
{ STM32F4_RCC_APB1ENR, 26, "can2", "apb1_div" },
{ STM32F4_RCC_APB1ENR, 28, "pwr", "apb1_div" },
{ STM32F4_RCC_APB1ENR, 29, "dac", "apb1_div" },
{ STM32F4_RCC_APB1ENR, 30, "uart7", "apb1_div" },
{ STM32F4_RCC_APB1ENR, 31, "uart8", "apb1_div" },
{ STM32F4_RCC_APB2ENR, 0, "tim1", "apb2_mul" },
{ STM32F4_RCC_APB2ENR, 1, "tim8", "apb2_mul" },
{ STM32F4_RCC_APB2ENR, 4, "usart1", "apb2_div" },
{ STM32F4_RCC_APB2ENR, 5, "usart6", "apb2_div" },
{ STM32F4_RCC_APB2ENR, 8, "adc1", "apb2_div" },
{ STM32F4_RCC_APB2ENR, 9, "adc2", "apb2_div" },
{ STM32F4_RCC_APB2ENR, 10, "adc3", "apb2_div" },
{ STM32F4_RCC_APB2ENR, 11, "sdio", "sdmux" },
{ STM32F4_RCC_APB2ENR, 12, "spi1", "apb2_div" },
{ STM32F4_RCC_APB2ENR, 13, "spi4", "apb2_div" },
{ STM32F4_RCC_APB2ENR, 14, "syscfg", "apb2_div" },
{ STM32F4_RCC_APB2ENR, 16, "tim9", "apb2_mul" },
{ STM32F4_RCC_APB2ENR, 17, "tim10", "apb2_mul" },
{ STM32F4_RCC_APB2ENR, 18, "tim11", "apb2_mul" },
{ STM32F4_RCC_APB2ENR, 20, "spi5", "apb2_div" },
{ STM32F4_RCC_APB2ENR, 21, "spi6", "apb2_div" },
{ STM32F4_RCC_APB2ENR, 22, "sai1", "apb2_div" },
};
static const struct stm32f4_gate_data stm32f746_gates[] __initconst = {
{ STM32F4_RCC_AHB1ENR, 0, "gpioa", "ahb_div" },
{ STM32F4_RCC_AHB1ENR, 1, "gpiob", "ahb_div" },
{ STM32F4_RCC_AHB1ENR, 2, "gpioc", "ahb_div" },
{ STM32F4_RCC_AHB1ENR, 3, "gpiod", "ahb_div" },
{ STM32F4_RCC_AHB1ENR, 4, "gpioe", "ahb_div" },
{ STM32F4_RCC_AHB1ENR, 5, "gpiof", "ahb_div" },
{ STM32F4_RCC_AHB1ENR, 6, "gpiog", "ahb_div" },
{ STM32F4_RCC_AHB1ENR, 7, "gpioh", "ahb_div" },
{ STM32F4_RCC_AHB1ENR, 8, "gpioi", "ahb_div" },
{ STM32F4_RCC_AHB1ENR, 9, "gpioj", "ahb_div" },
{ STM32F4_RCC_AHB1ENR, 10, "gpiok", "ahb_div" },
{ STM32F4_RCC_AHB1ENR, 12, "crc", "ahb_div" },
{ STM32F4_RCC_AHB1ENR, 18, "bkpsra", "ahb_div" },
{ STM32F4_RCC_AHB1ENR, 20, "dtcmram", "ahb_div" },
{ STM32F4_RCC_AHB1ENR, 21, "dma1", "ahb_div" },
{ STM32F4_RCC_AHB1ENR, 22, "dma2", "ahb_div" },
{ STM32F4_RCC_AHB1ENR, 23, "dma2d", "ahb_div" },
{ STM32F4_RCC_AHB1ENR, 25, "ethmac", "ahb_div" },
{ STM32F4_RCC_AHB1ENR, 26, "ethmactx", "ahb_div" },
{ STM32F4_RCC_AHB1ENR, 27, "ethmacrx", "ahb_div" },
{ STM32F4_RCC_AHB1ENR, 28, "ethmacptp", "ahb_div" },
{ STM32F4_RCC_AHB1ENR, 29, "otghs", "ahb_div" },
{ STM32F4_RCC_AHB1ENR, 30, "otghsulpi", "ahb_div" },
{ STM32F4_RCC_AHB2ENR, 0, "dcmi", "ahb_div" },
{ STM32F4_RCC_AHB2ENR, 4, "cryp", "ahb_div" },
{ STM32F4_RCC_AHB2ENR, 5, "hash", "ahb_div" },
{ STM32F4_RCC_AHB2ENR, 6, "rng", "pll48" },
{ STM32F4_RCC_AHB2ENR, 7, "otgfs", "pll48" },
{ STM32F4_RCC_AHB3ENR, 0, "fmc", "ahb_div",
CLK_IGNORE_UNUSED },
{ STM32F4_RCC_AHB3ENR, 1, "qspi", "ahb_div",
CLK_IGNORE_UNUSED },
{ STM32F4_RCC_APB1ENR, 0, "tim2", "apb1_mul" },
{ STM32F4_RCC_APB1ENR, 1, "tim3", "apb1_mul" },
{ STM32F4_RCC_APB1ENR, 2, "tim4", "apb1_mul" },
{ STM32F4_RCC_APB1ENR, 3, "tim5", "apb1_mul" },
{ STM32F4_RCC_APB1ENR, 4, "tim6", "apb1_mul" },
{ STM32F4_RCC_APB1ENR, 5, "tim7", "apb1_mul" },
{ STM32F4_RCC_APB1ENR, 6, "tim12", "apb1_mul" },
{ STM32F4_RCC_APB1ENR, 7, "tim13", "apb1_mul" },
{ STM32F4_RCC_APB1ENR, 8, "tim14", "apb1_mul" },
{ STM32F4_RCC_APB1ENR, 11, "wwdg", "apb1_div" },
{ STM32F4_RCC_APB1ENR, 14, "spi2", "apb1_div" },
{ STM32F4_RCC_APB1ENR, 15, "spi3", "apb1_div" },
{ STM32F4_RCC_APB1ENR, 16, "spdifrx", "apb1_div" },
{ STM32F4_RCC_APB1ENR, 25, "can1", "apb1_div" },
{ STM32F4_RCC_APB1ENR, 26, "can2", "apb1_div" },
{ STM32F4_RCC_APB1ENR, 27, "cec", "apb1_div" },
{ STM32F4_RCC_APB1ENR, 28, "pwr", "apb1_div" },
{ STM32F4_RCC_APB1ENR, 29, "dac", "apb1_div" },
{ STM32F4_RCC_APB2ENR, 0, "tim1", "apb2_mul" },
{ STM32F4_RCC_APB2ENR, 1, "tim8", "apb2_mul" },
{ STM32F4_RCC_APB2ENR, 7, "sdmmc2", "sdmux" },
{ STM32F4_RCC_APB2ENR, 8, "adc1", "apb2_div" },
{ STM32F4_RCC_APB2ENR, 9, "adc2", "apb2_div" },
{ STM32F4_RCC_APB2ENR, 10, "adc3", "apb2_div" },
{ STM32F4_RCC_APB2ENR, 11, "sdmmc", "sdmux" },
{ STM32F4_RCC_APB2ENR, 12, "spi1", "apb2_div" },
{ STM32F4_RCC_APB2ENR, 13, "spi4", "apb2_div" },
{ STM32F4_RCC_APB2ENR, 14, "syscfg", "apb2_div" },
{ STM32F4_RCC_APB2ENR, 16, "tim9", "apb2_mul" },
{ STM32F4_RCC_APB2ENR, 17, "tim10", "apb2_mul" },
{ STM32F4_RCC_APB2ENR, 18, "tim11", "apb2_mul" },
{ STM32F4_RCC_APB2ENR, 20, "spi5", "apb2_div" },
{ STM32F4_RCC_APB2ENR, 21, "spi6", "apb2_div" },
{ STM32F4_RCC_APB2ENR, 22, "sai1", "apb2_div" },
{ STM32F4_RCC_APB2ENR, 23, "sai2", "apb2_div" },
};
static const struct stm32f4_gate_data stm32f769_gates[] __initconst = {
{ STM32F4_RCC_AHB1ENR, 0, "gpioa", "ahb_div" },
{ STM32F4_RCC_AHB1ENR, 1, "gpiob", "ahb_div" },
{ STM32F4_RCC_AHB1ENR, 2, "gpioc", "ahb_div" },
{ STM32F4_RCC_AHB1ENR, 3, "gpiod", "ahb_div" },
{ STM32F4_RCC_AHB1ENR, 4, "gpioe", "ahb_div" },
{ STM32F4_RCC_AHB1ENR, 5, "gpiof", "ahb_div" },
{ STM32F4_RCC_AHB1ENR, 6, "gpiog", "ahb_div" },
{ STM32F4_RCC_AHB1ENR, 7, "gpioh", "ahb_div" },
{ STM32F4_RCC_AHB1ENR, 8, "gpioi", "ahb_div" },
{ STM32F4_RCC_AHB1ENR, 9, "gpioj", "ahb_div" },
{ STM32F4_RCC_AHB1ENR, 10, "gpiok", "ahb_div" },
{ STM32F4_RCC_AHB1ENR, 12, "crc", "ahb_div" },
{ STM32F4_RCC_AHB1ENR, 18, "bkpsra", "ahb_div" },
{ STM32F4_RCC_AHB1ENR, 20, "dtcmram", "ahb_div" },
{ STM32F4_RCC_AHB1ENR, 21, "dma1", "ahb_div" },
{ STM32F4_RCC_AHB1ENR, 22, "dma2", "ahb_div" },
{ STM32F4_RCC_AHB1ENR, 23, "dma2d", "ahb_div" },
{ STM32F4_RCC_AHB1ENR, 25, "ethmac", "ahb_div" },
{ STM32F4_RCC_AHB1ENR, 26, "ethmactx", "ahb_div" },
{ STM32F4_RCC_AHB1ENR, 27, "ethmacrx", "ahb_div" },
{ STM32F4_RCC_AHB1ENR, 28, "ethmacptp", "ahb_div" },
{ STM32F4_RCC_AHB1ENR, 29, "otghs", "ahb_div" },
{ STM32F4_RCC_AHB1ENR, 30, "otghsulpi", "ahb_div" },
{ STM32F4_RCC_AHB2ENR, 0, "dcmi", "ahb_div" },
{ STM32F4_RCC_AHB2ENR, 1, "jpeg", "ahb_div" },
{ STM32F4_RCC_AHB2ENR, 4, "cryp", "ahb_div" },
{ STM32F4_RCC_AHB2ENR, 5, "hash", "ahb_div" },
{ STM32F4_RCC_AHB2ENR, 6, "rng", "pll48" },
{ STM32F4_RCC_AHB2ENR, 7, "otgfs", "pll48" },
{ STM32F4_RCC_AHB3ENR, 0, "fmc", "ahb_div",
CLK_IGNORE_UNUSED },
{ STM32F4_RCC_AHB3ENR, 1, "qspi", "ahb_div",
CLK_IGNORE_UNUSED },
{ STM32F4_RCC_APB1ENR, 0, "tim2", "apb1_mul" },
{ STM32F4_RCC_APB1ENR, 1, "tim3", "apb1_mul" },
{ STM32F4_RCC_APB1ENR, 2, "tim4", "apb1_mul" },
{ STM32F4_RCC_APB1ENR, 3, "tim5", "apb1_mul" },
{ STM32F4_RCC_APB1ENR, 4, "tim6", "apb1_mul" },
{ STM32F4_RCC_APB1ENR, 5, "tim7", "apb1_mul" },
{ STM32F4_RCC_APB1ENR, 6, "tim12", "apb1_mul" },
{ STM32F4_RCC_APB1ENR, 7, "tim13", "apb1_mul" },
{ STM32F4_RCC_APB1ENR, 8, "tim14", "apb1_mul" },
{ STM32F4_RCC_APB1ENR, 10, "rtcapb", "apb1_mul" },
{ STM32F4_RCC_APB1ENR, 11, "wwdg", "apb1_div" },
{ STM32F4_RCC_APB1ENR, 13, "can3", "apb1_div" },
{ STM32F4_RCC_APB1ENR, 14, "spi2", "apb1_div" },
{ STM32F4_RCC_APB1ENR, 15, "spi3", "apb1_div" },
{ STM32F4_RCC_APB1ENR, 16, "spdifrx", "apb1_div" },
{ STM32F4_RCC_APB1ENR, 25, "can1", "apb1_div" },
{ STM32F4_RCC_APB1ENR, 26, "can2", "apb1_div" },
{ STM32F4_RCC_APB1ENR, 27, "cec", "apb1_div" },
{ STM32F4_RCC_APB1ENR, 28, "pwr", "apb1_div" },
{ STM32F4_RCC_APB1ENR, 29, "dac", "apb1_div" },
{ STM32F4_RCC_APB2ENR, 0, "tim1", "apb2_mul" },
{ STM32F4_RCC_APB2ENR, 1, "tim8", "apb2_mul" },
{ STM32F4_RCC_APB2ENR, 7, "sdmmc2", "sdmux2" },
{ STM32F4_RCC_APB2ENR, 8, "adc1", "apb2_div" },
{ STM32F4_RCC_APB2ENR, 9, "adc2", "apb2_div" },
{ STM32F4_RCC_APB2ENR, 10, "adc3", "apb2_div" },
{ STM32F4_RCC_APB2ENR, 11, "sdmmc1", "sdmux1" },
{ STM32F4_RCC_APB2ENR, 12, "spi1", "apb2_div" },
{ STM32F4_RCC_APB2ENR, 13, "spi4", "apb2_div" },
{ STM32F4_RCC_APB2ENR, 14, "syscfg", "apb2_div" },
{ STM32F4_RCC_APB2ENR, 16, "tim9", "apb2_mul" },
{ STM32F4_RCC_APB2ENR, 17, "tim10", "apb2_mul" },
{ STM32F4_RCC_APB2ENR, 18, "tim11", "apb2_mul" },
{ STM32F4_RCC_APB2ENR, 20, "spi5", "apb2_div" },
{ STM32F4_RCC_APB2ENR, 21, "spi6", "apb2_div" },
{ STM32F4_RCC_APB2ENR, 22, "sai1", "apb2_div" },
{ STM32F4_RCC_APB2ENR, 23, "sai2", "apb2_div" },
{ STM32F4_RCC_APB2ENR, 30, "mdio", "apb2_div" },
};
/*
* This bitmask tells us which bit offsets (0..192) on STM32F4[23]xxx
* have gate bits associated with them. Its combined hweight is 71.
*/
#define MAX_GATE_MAP 3
static const u64 stm32f42xx_gate_map[MAX_GATE_MAP] = { 0x000000f17ef417ffull,
0x0000000000000001ull,
0x04777f33f6fec9ffull };
static const u64 stm32f46xx_gate_map[MAX_GATE_MAP] = { 0x000000f17ef417ffull,
0x0000000000000003ull,
0x0c777f33f6fec9ffull };
static const u64 stm32f746_gate_map[MAX_GATE_MAP] = { 0x000000f17ef417ffull,
0x0000000000000003ull,
0x04f77f833e01c9ffull };
static const u64 stm32f769_gate_map[MAX_GATE_MAP] = { 0x000000f37ef417ffull,
0x0000000000000003ull,
0x44F77F833E01EDFFull };
static const u64 *stm32f4_gate_map;
static struct clk_hw **clks;
static DEFINE_SPINLOCK(stm32f4_clk_lock);
static void __iomem *base;
static struct regmap *pdrm;
static int stm32fx_end_primary_clk;
/*
* "Multiplier" device for APBx clocks.
*
* The APBx dividers are power-of-two dividers and, if *not* running in 1:1
* mode, they also tap out the one of the low order state bits to run the
* timers. ST datasheets represent this feature as a (conditional) clock
* multiplier.
*/
struct clk_apb_mul {
struct clk_hw hw;
u8 bit_idx;
};
#define to_clk_apb_mul(_hw) container_of(_hw, struct clk_apb_mul, hw)
static unsigned long clk_apb_mul_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct clk_apb_mul *am = to_clk_apb_mul(hw);
if (readl(base + STM32F4_RCC_CFGR) & BIT(am->bit_idx))
return parent_rate * 2;
return parent_rate;
}
static long clk_apb_mul_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *prate)
{
struct clk_apb_mul *am = to_clk_apb_mul(hw);
unsigned long mult = 1;
if (readl(base + STM32F4_RCC_CFGR) & BIT(am->bit_idx))
mult = 2;
if (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) {
unsigned long best_parent = rate / mult;
*prate = clk_hw_round_rate(clk_hw_get_parent(hw), best_parent);
}
return *prate * mult;
}
static int clk_apb_mul_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
/*
* We must report success but we can do so unconditionally because
* clk_apb_mul_round_rate returns values that ensure this call is a
* nop.
*/
return 0;
}
static const struct clk_ops clk_apb_mul_factor_ops = {
.round_rate = clk_apb_mul_round_rate,
.set_rate = clk_apb_mul_set_rate,
.recalc_rate = clk_apb_mul_recalc_rate,
};
static struct clk *clk_register_apb_mul(struct device *dev, const char *name,
const char *parent_name,
unsigned long flags, u8 bit_idx)
{
struct clk_apb_mul *am;
struct clk_init_data init;
struct clk *clk;
am = kzalloc(sizeof(*am), GFP_KERNEL);
if (!am)
return ERR_PTR(-ENOMEM);
am->bit_idx = bit_idx;
am->hw.init = &init;
init.name = name;
init.ops = &clk_apb_mul_factor_ops;
init.flags = flags;
init.parent_names = &parent_name;
init.num_parents = 1;
clk = clk_register(dev, &am->hw);
if (IS_ERR(clk))
kfree(am);
return clk;
}
enum {
PLL,
PLL_I2S,
PLL_SAI,
};
static const struct clk_div_table pll_divp_table[] = {
{ 0, 2 }, { 1, 4 }, { 2, 6 }, { 3, 8 }, { 0 }
};
static const struct clk_div_table pll_divq_table[] = {
{ 2, 2 }, { 3, 3 }, { 4, 4 }, { 5, 5 }, { 6, 6 }, { 7, 7 },
{ 8, 8 }, { 9, 9 }, { 10, 10 }, { 11, 11 }, { 12, 12 }, { 13, 13 },
{ 14, 14 }, { 15, 15 },
{ 0 }
};
static const struct clk_div_table pll_divr_table[] = {
{ 2, 2 }, { 3, 3 }, { 4, 4 }, { 5, 5 }, { 6, 6 }, { 7, 7 }, { 0 }
};
struct stm32f4_pll {
spinlock_t *lock;
struct clk_gate gate;
u8 offset;
u8 bit_rdy_idx;
u8 status;
u8 n_start;
};
#define to_stm32f4_pll(_gate) container_of(_gate, struct stm32f4_pll, gate)
struct stm32f4_pll_post_div_data {
int idx;
int pll_idx;
const char *name;
const char *parent;
u8 flag;
u8 offset;
u8 shift;
u8 width;
u8 flag_div;
const struct clk_div_table *div_table;
};
struct stm32f4_vco_data {
const char *vco_name;
u8 offset;
u8 bit_idx;
u8 bit_rdy_idx;
};
static const struct stm32f4_vco_data vco_data[] = {
{ "vco", STM32F4_RCC_PLLCFGR, 24, 25 },
{ "vco-i2s", STM32F4_RCC_PLLI2SCFGR, 26, 27 },
{ "vco-sai", STM32F4_RCC_PLLSAICFGR, 28, 29 },
};
static const struct clk_div_table post_divr_table[] = {
{ 0, 2 }, { 1, 4 }, { 2, 8 }, { 3, 16 }, { 0 }
};
#define MAX_POST_DIV 3
static const struct stm32f4_pll_post_div_data post_div_data[MAX_POST_DIV] = {
{ CLK_I2SQ_PDIV, PLL_VCO_I2S, "plli2s-q-div", "plli2s-q",
CLK_SET_RATE_PARENT, STM32F4_RCC_DCKCFGR, 0, 5, 0, NULL},
{ CLK_SAIQ_PDIV, PLL_VCO_SAI, "pllsai-q-div", "pllsai-q",
CLK_SET_RATE_PARENT, STM32F4_RCC_DCKCFGR, 8, 5, 0, NULL },
{ NO_IDX, PLL_VCO_SAI, "pllsai-r-div", "pllsai-r", CLK_SET_RATE_PARENT,
STM32F4_RCC_DCKCFGR, 16, 2, 0, post_divr_table },
};
struct stm32f4_div_data {
u8 shift;
u8 width;
u8 flag_div;
const struct clk_div_table *div_table;
};
#define MAX_PLL_DIV 3
static const struct stm32f4_div_data div_data[MAX_PLL_DIV] = {
{ 16, 2, 0, pll_divp_table },
{ 24, 4, 0, pll_divq_table },
{ 28, 3, 0, pll_divr_table },
};
struct stm32f4_pll_data {
u8 pll_num;
u8 n_start;
const char *div_name[MAX_PLL_DIV];
};
static const struct stm32f4_pll_data stm32f429_pll[MAX_PLL_DIV] = {
{ PLL, 192, { "pll", "pll48", NULL } },
{ PLL_I2S, 192, { NULL, "plli2s-q", "plli2s-r" } },
{ PLL_SAI, 49, { NULL, "pllsai-q", "pllsai-r" } },
};
static const struct stm32f4_pll_data stm32f469_pll[MAX_PLL_DIV] = {
{ PLL, 50, { "pll", "pll-q", "pll-r" } },
{ PLL_I2S, 50, { "plli2s-p", "plli2s-q", "plli2s-r" } },
{ PLL_SAI, 50, { "pllsai-p", "pllsai-q", "pllsai-r" } },
};
static int stm32f4_pll_is_enabled(struct clk_hw *hw)
{
return clk_gate_ops.is_enabled(hw);
}
#define PLL_TIMEOUT 10000
static int stm32f4_pll_enable(struct clk_hw *hw)
{
struct clk_gate *gate = to_clk_gate(hw);
struct stm32f4_pll *pll = to_stm32f4_pll(gate);
int bit_status;
unsigned int timeout = PLL_TIMEOUT;
if (clk_gate_ops.is_enabled(hw))
return 0;
clk_gate_ops.enable(hw);
do {
bit_status = !(readl(gate->reg) & BIT(pll->bit_rdy_idx));
} while (bit_status && --timeout);
return bit_status;
}
static void stm32f4_pll_disable(struct clk_hw *hw)
{
clk_gate_ops.disable(hw);
}
static unsigned long stm32f4_pll_recalc(struct clk_hw *hw,
unsigned long parent_rate)
{
struct clk_gate *gate = to_clk_gate(hw);
struct stm32f4_pll *pll = to_stm32f4_pll(gate);
unsigned long n;
n = (readl(base + pll->offset) >> 6) & 0x1ff;
return parent_rate * n;
}
static long stm32f4_pll_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *prate)
{
struct clk_gate *gate = to_clk_gate(hw);
struct stm32f4_pll *pll = to_stm32f4_pll(gate);
unsigned long n;
n = rate / *prate;
if (n < pll->n_start)
n = pll->n_start;
else if (n > 432)
n = 432;
return *prate * n;
}
static int stm32f4_pll_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct clk_gate *gate = to_clk_gate(hw);
struct stm32f4_pll *pll = to_stm32f4_pll(gate);
unsigned long n;
unsigned long val;
int pll_state;
pll_state = stm32f4_pll_is_enabled(hw);
if (pll_state)
stm32f4_pll_disable(hw);
n = rate / parent_rate;
val = readl(base + pll->offset) & ~(0x1ff << 6);
writel(val | ((n & 0x1ff) << 6), base + pll->offset);
if (pll_state)
stm32f4_pll_enable(hw);
return 0;
}
static const struct clk_ops stm32f4_pll_gate_ops = {
.enable = stm32f4_pll_enable,
.disable = stm32f4_pll_disable,
.is_enabled = stm32f4_pll_is_enabled,
.recalc_rate = stm32f4_pll_recalc,
.round_rate = stm32f4_pll_round_rate,
.set_rate = stm32f4_pll_set_rate,
};
struct stm32f4_pll_div {
struct clk_divider div;
struct clk_hw *hw_pll;
};
#define to_pll_div_clk(_div) container_of(_div, struct stm32f4_pll_div, div)
static unsigned long stm32f4_pll_div_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
return clk_divider_ops.recalc_rate(hw, parent_rate);
}
static int stm32f4_pll_div_determine_rate(struct clk_hw *hw,
struct clk_rate_request *req)
{
return clk_divider_ops.determine_rate(hw, req);
}
static int stm32f4_pll_div_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
int pll_state, ret;
struct clk_divider *div = to_clk_divider(hw);
struct stm32f4_pll_div *pll_div = to_pll_div_clk(div);
pll_state = stm32f4_pll_is_enabled(pll_div->hw_pll);
if (pll_state)
stm32f4_pll_disable(pll_div->hw_pll);
ret = clk_divider_ops.set_rate(hw, rate, parent_rate);
if (pll_state)
stm32f4_pll_enable(pll_div->hw_pll);
return ret;
}
static const struct clk_ops stm32f4_pll_div_ops = {
.recalc_rate = stm32f4_pll_div_recalc_rate,
.determine_rate = stm32f4_pll_div_determine_rate,
.set_rate = stm32f4_pll_div_set_rate,
};
static struct clk_hw *clk_register_pll_div(const char *name,
const char *parent_name, unsigned long flags,
void __iomem *reg, u8 shift, u8 width,
u8 clk_divider_flags, const struct clk_div_table *table,
struct clk_hw *pll_hw, spinlock_t *lock)
{
struct stm32f4_pll_div *pll_div;
struct clk_hw *hw;
struct clk_init_data init;
int ret;
/* allocate the divider */
pll_div = kzalloc(sizeof(*pll_div), GFP_KERNEL);
if (!pll_div)
return ERR_PTR(-ENOMEM);
init.name = name;
init.ops = &stm32f4_pll_div_ops;
init.flags = flags;
init.parent_names = (parent_name ? &parent_name : NULL);
init.num_parents = (parent_name ? 1 : 0);
/* struct clk_divider assignments */
pll_div->div.reg = reg;
pll_div->div.shift = shift;
pll_div->div.width = width;
pll_div->div.flags = clk_divider_flags;
pll_div->div.lock = lock;
pll_div->div.table = table;
pll_div->div.hw.init = &init;
pll_div->hw_pll = pll_hw;
/* register the clock */
hw = &pll_div->div.hw;
ret = clk_hw_register(NULL, hw);
if (ret) {
kfree(pll_div);
hw = ERR_PTR(ret);
}
return hw;
}
static struct clk_hw *stm32f4_rcc_register_pll(const char *pllsrc,
const struct stm32f4_pll_data *data, spinlock_t *lock)
{
struct stm32f4_pll *pll;
struct clk_init_data init = { NULL };
void __iomem *reg;
struct clk_hw *pll_hw;
int ret;
int i;
const struct stm32f4_vco_data *vco;
pll = kzalloc(sizeof(*pll), GFP_KERNEL);
if (!pll)
return ERR_PTR(-ENOMEM);
vco = &vco_data[data->pll_num];
init.name = vco->vco_name;
init.ops = &stm32f4_pll_gate_ops;
init.flags = CLK_SET_RATE_GATE;
init.parent_names = &pllsrc;
init.num_parents = 1;
pll->gate.lock = lock;
pll->gate.reg = base + STM32F4_RCC_CR;
pll->gate.bit_idx = vco->bit_idx;
pll->gate.hw.init = &init;
pll->offset = vco->offset;
pll->n_start = data->n_start;
pll->bit_rdy_idx = vco->bit_rdy_idx;
pll->status = (readl(base + STM32F4_RCC_CR) >> vco->bit_idx) & 0x1;
reg = base + pll->offset;
pll_hw = &pll->gate.hw;
ret = clk_hw_register(NULL, pll_hw);
if (ret) {
kfree(pll);
return ERR_PTR(ret);
}
for (i = 0; i < MAX_PLL_DIV; i++)
if (data->div_name[i])
clk_register_pll_div(data->div_name[i],
vco->vco_name,
0,
reg,
div_data[i].shift,
div_data[i].width,
div_data[i].flag_div,
div_data[i].div_table,
pll_hw,
lock);
return pll_hw;
}
/*
* Converts the primary and secondary indices (as they appear in DT) to an
* offset into our struct clock array.
*/
static int stm32f4_rcc_lookup_clk_idx(u8 primary, u8 secondary)
{
u64 table[MAX_GATE_MAP];
if (primary == 1) {
if (WARN_ON(secondary >= stm32fx_end_primary_clk))
return -EINVAL;
return secondary;
}
memcpy(table, stm32f4_gate_map, sizeof(table));
/* only bits set in table can be used as indices */
if (WARN_ON(secondary >= BITS_PER_BYTE * sizeof(table) ||
0 == (table[BIT_ULL_WORD(secondary)] &
BIT_ULL_MASK(secondary))))
return -EINVAL;
/* mask out bits above our current index */
table[BIT_ULL_WORD(secondary)] &=
GENMASK_ULL(secondary % BITS_PER_LONG_LONG, 0);
return stm32fx_end_primary_clk - 1 + hweight64(table[0]) +
(BIT_ULL_WORD(secondary) >= 1 ? hweight64(table[1]) : 0) +
(BIT_ULL_WORD(secondary) >= 2 ? hweight64(table[2]) : 0);
}
static struct clk_hw *
stm32f4_rcc_lookup_clk(struct of_phandle_args *clkspec, void *data)
{
int i = stm32f4_rcc_lookup_clk_idx(clkspec->args[0], clkspec->args[1]);
if (i < 0)
return ERR_PTR(-EINVAL);
return clks[i];
}
#define to_rgclk(_rgate) container_of(_rgate, struct stm32_rgate, gate)
static inline void disable_power_domain_write_protection(void)
{
if (pdrm)
regmap_update_bits(pdrm, 0x00, (1 << 8), (1 << 8));
}
static inline void enable_power_domain_write_protection(void)
{
if (pdrm)
regmap_update_bits(pdrm, 0x00, (1 << 8), (0 << 8));
}
static inline void sofware_reset_backup_domain(void)
{
unsigned long val;
val = readl(base + STM32F4_RCC_BDCR);
writel(val | BIT(16), base + STM32F4_RCC_BDCR);
writel(val & ~BIT(16), base + STM32F4_RCC_BDCR);
}
struct stm32_rgate {
struct clk_gate gate;
u8 bit_rdy_idx;
};
#define RGATE_TIMEOUT 50000
static int rgclk_enable(struct clk_hw *hw)
{
struct clk_gate *gate = to_clk_gate(hw);
struct stm32_rgate *rgate = to_rgclk(gate);
int bit_status;
unsigned int timeout = RGATE_TIMEOUT;
if (clk_gate_ops.is_enabled(hw))
return 0;
disable_power_domain_write_protection();
clk_gate_ops.enable(hw);
do {
bit_status = !(readl(gate->reg) & BIT(rgate->bit_rdy_idx));
if (bit_status)
udelay(100);
} while (bit_status && --timeout);
enable_power_domain_write_protection();
return bit_status;
}
static void rgclk_disable(struct clk_hw *hw)
{
clk_gate_ops.disable(hw);
}
static int rgclk_is_enabled(struct clk_hw *hw)
{
return clk_gate_ops.is_enabled(hw);
}
static const struct clk_ops rgclk_ops = {
.enable = rgclk_enable,
.disable = rgclk_disable,
.is_enabled = rgclk_is_enabled,
};
static struct clk_hw *clk_register_rgate(struct device *dev, const char *name,
const char *parent_name, unsigned long flags,
void __iomem *reg, u8 bit_idx, u8 bit_rdy_idx,
u8 clk_gate_flags, spinlock_t *lock)
{
struct stm32_rgate *rgate;
struct clk_init_data init = { NULL };
struct clk_hw *hw;
int ret;
rgate = kzalloc(sizeof(*rgate), GFP_KERNEL);
if (!rgate)
return ERR_PTR(-ENOMEM);
init.name = name;
init.ops = &rgclk_ops;
init.flags = flags;
init.parent_names = &parent_name;
init.num_parents = 1;
rgate->bit_rdy_idx = bit_rdy_idx;
rgate->gate.lock = lock;
rgate->gate.reg = reg;
rgate->gate.bit_idx = bit_idx;
rgate->gate.hw.init = &init;
hw = &rgate->gate.hw;
ret = clk_hw_register(dev, hw);
if (ret) {
kfree(rgate);
hw = ERR_PTR(ret);
}
return hw;
}
static int cclk_gate_enable(struct clk_hw *hw)
{
int ret;
disable_power_domain_write_protection();
ret = clk_gate_ops.enable(hw);
enable_power_domain_write_protection();
return ret;
}
static void cclk_gate_disable(struct clk_hw *hw)
{
disable_power_domain_write_protection();
clk_gate_ops.disable(hw);
enable_power_domain_write_protection();
}
static int cclk_gate_is_enabled(struct clk_hw *hw)
{
return clk_gate_ops.is_enabled(hw);
}
static const struct clk_ops cclk_gate_ops = {
.enable = cclk_gate_enable,
.disable = cclk_gate_disable,
.is_enabled = cclk_gate_is_enabled,
};
static u8 cclk_mux_get_parent(struct clk_hw *hw)
{
return clk_mux_ops.get_parent(hw);
}
static int cclk_mux_set_parent(struct clk_hw *hw, u8 index)
{
int ret;
disable_power_domain_write_protection();
sofware_reset_backup_domain();
ret = clk_mux_ops.set_parent(hw, index);
enable_power_domain_write_protection();
return ret;
}
static const struct clk_ops cclk_mux_ops = {
.determine_rate = clk_hw_determine_rate_no_reparent,
.get_parent = cclk_mux_get_parent,
.set_parent = cclk_mux_set_parent,
};
static struct clk_hw *stm32_register_cclk(struct device *dev, const char *name,
const char * const *parent_names, int num_parents,
void __iomem *reg, u8 bit_idx, u8 shift, unsigned long flags,
spinlock_t *lock)
{
struct clk_hw *hw;
struct clk_gate *gate;
struct clk_mux *mux;
gate = kzalloc(sizeof(*gate), GFP_KERNEL);
if (!gate) {
hw = ERR_PTR(-EINVAL);
goto fail;
}
mux = kzalloc(sizeof(*mux), GFP_KERNEL);
if (!mux) {
kfree(gate);
hw = ERR_PTR(-EINVAL);
goto fail;
}
gate->reg = reg;
gate->bit_idx = bit_idx;
gate->flags = 0;
gate->lock = lock;
mux->reg = reg;
mux->shift = shift;
mux->mask = 3;
mux->flags = 0;
hw = clk_hw_register_composite(dev, name, parent_names, num_parents,
&mux->hw, &cclk_mux_ops,
NULL, NULL,
&gate->hw, &cclk_gate_ops,
flags);
if (IS_ERR(hw)) {
kfree(gate);
kfree(mux);
}
fail:
return hw;
}
static const char *sys_parents[] __initdata = { "hsi", NULL, "pll" };
static const struct clk_div_table ahb_div_table[] = {
{ 0x0, 1 }, { 0x1, 1 }, { 0x2, 1 }, { 0x3, 1 },
{ 0x4, 1 }, { 0x5, 1 }, { 0x6, 1 }, { 0x7, 1 },
{ 0x8, 2 }, { 0x9, 4 }, { 0xa, 8 }, { 0xb, 16 },
{ 0xc, 64 }, { 0xd, 128 }, { 0xe, 256 }, { 0xf, 512 },
{ 0 },
};
static const struct clk_div_table apb_div_table[] = {
{ 0, 1 }, { 0, 1 }, { 0, 1 }, { 0, 1 },
{ 4, 2 }, { 5, 4 }, { 6, 8 }, { 7, 16 },
{ 0 },
};
static const char *rtc_parents[4] = {
"no-clock", "lse", "lsi", "hse-rtc"
};
static const char *pll_src = "pll-src";
static const char *pllsrc_parent[2] = { "hsi", NULL };
static const char *dsi_parent[2] = { NULL, "pll-r" };
static const char *lcd_parent[1] = { "pllsai-r-div" };
static const char *i2s_parents[2] = { "plli2s-r", NULL };
static const char *sai_parents[4] = { "pllsai-q-div", "plli2s-q-div", NULL,
"no-clock" };
static const char *pll48_parents[2] = { "pll-q", "pllsai-p" };
static const char *sdmux_parents[2] = { "pll48", "sys" };
static const char *hdmi_parents[2] = { "lse", "hsi_div488" };
static const char *spdif_parent[1] = { "plli2s-p" };
static const char *lptim_parent[4] = { "apb1_mul", "lsi", "hsi", "lse" };
static const char *uart_parents1[4] = { "apb2_div", "sys", "hsi", "lse" };
static const char *uart_parents2[4] = { "apb1_div", "sys", "hsi", "lse" };
static const char *i2c_parents[4] = { "apb1_div", "sys", "hsi", "no-clock" };
static const char * const dfsdm1_src[] = { "apb2_div", "sys" };
static const char * const adsfdm1_parent[] = { "sai1_clk", "sai2_clk" };
struct stm32_aux_clk {
int idx;
const char *name;
const char * const *parent_names;
int num_parents;
int offset_mux;
u8 shift;
u8 mask;
int offset_gate;
u8 bit_idx;
unsigned long flags;
};
struct stm32f4_clk_data {
const struct stm32f4_gate_data *gates_data;
const u64 *gates_map;
int gates_num;
const struct stm32f4_pll_data *pll_data;
const struct stm32_aux_clk *aux_clk;
int aux_clk_num;
int end_primary;
};
static const struct stm32_aux_clk stm32f429_aux_clk[] = {
{
CLK_LCD, "lcd-tft", lcd_parent, ARRAY_SIZE(lcd_parent),
NO_MUX, 0, 0,
STM32F4_RCC_APB2ENR, 26,
CLK_SET_RATE_PARENT
},
{
CLK_I2S, "i2s", i2s_parents, ARRAY_SIZE(i2s_parents),
STM32F4_RCC_CFGR, 23, 1,
NO_GATE, 0,
CLK_SET_RATE_PARENT
},
{
CLK_SAI1, "sai1-a", sai_parents, ARRAY_SIZE(sai_parents),
STM32F4_RCC_DCKCFGR, 20, 3,
STM32F4_RCC_APB2ENR, 22,
CLK_SET_RATE_PARENT
},
{
CLK_SAI2, "sai1-b", sai_parents, ARRAY_SIZE(sai_parents),
STM32F4_RCC_DCKCFGR, 22, 3,
STM32F4_RCC_APB2ENR, 22,
CLK_SET_RATE_PARENT
},
};
static const struct stm32_aux_clk stm32f469_aux_clk[] = {
{
CLK_LCD, "lcd-tft", lcd_parent, ARRAY_SIZE(lcd_parent),
NO_MUX, 0, 0,
STM32F4_RCC_APB2ENR, 26,
CLK_SET_RATE_PARENT
},
{
CLK_I2S, "i2s", i2s_parents, ARRAY_SIZE(i2s_parents),
STM32F4_RCC_CFGR, 23, 1,
NO_GATE, 0,
CLK_SET_RATE_PARENT
},
{
CLK_SAI1, "sai1-a", sai_parents, ARRAY_SIZE(sai_parents),
STM32F4_RCC_DCKCFGR, 20, 3,
STM32F4_RCC_APB2ENR, 22,
CLK_SET_RATE_PARENT
},
{
CLK_SAI2, "sai1-b", sai_parents, ARRAY_SIZE(sai_parents),
STM32F4_RCC_DCKCFGR, 22, 3,
STM32F4_RCC_APB2ENR, 22,
CLK_SET_RATE_PARENT
},
{
NO_IDX, "pll48", pll48_parents, ARRAY_SIZE(pll48_parents),
STM32F4_RCC_DCKCFGR, 27, 1,
NO_GATE, 0,
0
},
{
NO_IDX, "sdmux", sdmux_parents, ARRAY_SIZE(sdmux_parents),
STM32F4_RCC_DCKCFGR, 28, 1,
NO_GATE, 0,
0
},
{
CLK_F469_DSI, "dsi", dsi_parent, ARRAY_SIZE(dsi_parent),
STM32F4_RCC_DCKCFGR, 29, 1,
STM32F4_RCC_APB2ENR, 27,
CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT
},
};
static const struct stm32_aux_clk stm32f746_aux_clk[] = {
{
CLK_LCD, "lcd-tft", lcd_parent, ARRAY_SIZE(lcd_parent),
NO_MUX, 0, 0,
STM32F4_RCC_APB2ENR, 26,
CLK_SET_RATE_PARENT
},
{
CLK_I2S, "i2s", i2s_parents, ARRAY_SIZE(i2s_parents),
STM32F4_RCC_CFGR, 23, 1,
NO_GATE, 0,
CLK_SET_RATE_PARENT
},
{
CLK_SAI1, "sai1_clk", sai_parents, ARRAY_SIZE(sai_parents),
STM32F4_RCC_DCKCFGR, 20, 3,
STM32F4_RCC_APB2ENR, 22,
CLK_SET_RATE_PARENT
},
{
CLK_SAI2, "sai2_clk", sai_parents, ARRAY_SIZE(sai_parents),
STM32F4_RCC_DCKCFGR, 22, 3,
STM32F4_RCC_APB2ENR, 23,
CLK_SET_RATE_PARENT
},
{
NO_IDX, "pll48", pll48_parents, ARRAY_SIZE(pll48_parents),
STM32F7_RCC_DCKCFGR2, 27, 1,
NO_GATE, 0,
0
},
{
NO_IDX, "sdmux", sdmux_parents, ARRAY_SIZE(sdmux_parents),
STM32F7_RCC_DCKCFGR2, 28, 1,
NO_GATE, 0,
0
},
{
CLK_HDMI_CEC, "hdmi-cec",
hdmi_parents, ARRAY_SIZE(hdmi_parents),
STM32F7_RCC_DCKCFGR2, 26, 1,
NO_GATE, 0,
0
},
{
CLK_SPDIF, "spdif-rx",
spdif_parent, ARRAY_SIZE(spdif_parent),
STM32F7_RCC_DCKCFGR2, 22, 3,
STM32F4_RCC_APB2ENR, 23,
CLK_SET_RATE_PARENT
},
{
CLK_USART1, "usart1",
uart_parents1, ARRAY_SIZE(uart_parents1),
STM32F7_RCC_DCKCFGR2, 0, 3,
STM32F4_RCC_APB2ENR, 4,
CLK_SET_RATE_PARENT,
},
{
CLK_USART2, "usart2",
uart_parents2, ARRAY_SIZE(uart_parents1),
STM32F7_RCC_DCKCFGR2, 2, 3,
STM32F4_RCC_APB1ENR, 17,
CLK_SET_RATE_PARENT,
},
{
CLK_USART3, "usart3",
uart_parents2, ARRAY_SIZE(uart_parents1),
STM32F7_RCC_DCKCFGR2, 4, 3,
STM32F4_RCC_APB1ENR, 18,
CLK_SET_RATE_PARENT,
},
{
CLK_UART4, "uart4",
uart_parents2, ARRAY_SIZE(uart_parents1),
STM32F7_RCC_DCKCFGR2, 6, 3,
STM32F4_RCC_APB1ENR, 19,
CLK_SET_RATE_PARENT,
},
{
CLK_UART5, "uart5",
uart_parents2, ARRAY_SIZE(uart_parents1),
STM32F7_RCC_DCKCFGR2, 8, 3,
STM32F4_RCC_APB1ENR, 20,
CLK_SET_RATE_PARENT,
},
{
CLK_USART6, "usart6",
uart_parents1, ARRAY_SIZE(uart_parents1),
STM32F7_RCC_DCKCFGR2, 10, 3,
STM32F4_RCC_APB2ENR, 5,
CLK_SET_RATE_PARENT,
},
{
CLK_UART7, "uart7",
uart_parents2, ARRAY_SIZE(uart_parents1),
STM32F7_RCC_DCKCFGR2, 12, 3,
STM32F4_RCC_APB1ENR, 30,
CLK_SET_RATE_PARENT,
},
{
CLK_UART8, "uart8",
uart_parents2, ARRAY_SIZE(uart_parents1),
STM32F7_RCC_DCKCFGR2, 14, 3,
STM32F4_RCC_APB1ENR, 31,
CLK_SET_RATE_PARENT,
},
{
CLK_I2C1, "i2c1",
i2c_parents, ARRAY_SIZE(i2c_parents),
STM32F7_RCC_DCKCFGR2, 16, 3,
STM32F4_RCC_APB1ENR, 21,
CLK_SET_RATE_PARENT,
},
{
CLK_I2C2, "i2c2",
i2c_parents, ARRAY_SIZE(i2c_parents),
STM32F7_RCC_DCKCFGR2, 18, 3,
STM32F4_RCC_APB1ENR, 22,
CLK_SET_RATE_PARENT,
},
{
CLK_I2C3, "i2c3",
i2c_parents, ARRAY_SIZE(i2c_parents),
STM32F7_RCC_DCKCFGR2, 20, 3,
STM32F4_RCC_APB1ENR, 23,
CLK_SET_RATE_PARENT,
},
{
CLK_I2C4, "i2c4",
i2c_parents, ARRAY_SIZE(i2c_parents),
STM32F7_RCC_DCKCFGR2, 22, 3,
STM32F4_RCC_APB1ENR, 24,
CLK_SET_RATE_PARENT,
},
{
CLK_LPTIMER, "lptim1",
lptim_parent, ARRAY_SIZE(lptim_parent),
STM32F7_RCC_DCKCFGR2, 24, 3,
STM32F4_RCC_APB1ENR, 9,
CLK_SET_RATE_PARENT
},
};
static const struct stm32_aux_clk stm32f769_aux_clk[] = {
{
CLK_LCD, "lcd-tft", lcd_parent, ARRAY_SIZE(lcd_parent),
NO_MUX, 0, 0,
STM32F4_RCC_APB2ENR, 26,
CLK_SET_RATE_PARENT
},
{
CLK_I2S, "i2s", i2s_parents, ARRAY_SIZE(i2s_parents),
STM32F4_RCC_CFGR, 23, 1,
NO_GATE, 0,
CLK_SET_RATE_PARENT
},
{
CLK_SAI1, "sai1_clk", sai_parents, ARRAY_SIZE(sai_parents),
STM32F4_RCC_DCKCFGR, 20, 3,
STM32F4_RCC_APB2ENR, 22,
CLK_SET_RATE_PARENT
},
{
CLK_SAI2, "sai2_clk", sai_parents, ARRAY_SIZE(sai_parents),
STM32F4_RCC_DCKCFGR, 22, 3,
STM32F4_RCC_APB2ENR, 23,
CLK_SET_RATE_PARENT
},
{
NO_IDX, "pll48", pll48_parents, ARRAY_SIZE(pll48_parents),
STM32F7_RCC_DCKCFGR2, 27, 1,
NO_GATE, 0,
0
},
{
NO_IDX, "sdmux1", sdmux_parents, ARRAY_SIZE(sdmux_parents),
STM32F7_RCC_DCKCFGR2, 28, 1,
NO_GATE, 0,
0
},
{
NO_IDX, "sdmux2", sdmux_parents, ARRAY_SIZE(sdmux_parents),
STM32F7_RCC_DCKCFGR2, 29, 1,
NO_GATE, 0,
0
},
{
CLK_HDMI_CEC, "hdmi-cec",
hdmi_parents, ARRAY_SIZE(hdmi_parents),
STM32F7_RCC_DCKCFGR2, 26, 1,
NO_GATE, 0,
0
},
{
CLK_SPDIF, "spdif-rx",
spdif_parent, ARRAY_SIZE(spdif_parent),
STM32F7_RCC_DCKCFGR2, 22, 3,
STM32F4_RCC_APB2ENR, 23,
CLK_SET_RATE_PARENT
},
{
CLK_USART1, "usart1",
uart_parents1, ARRAY_SIZE(uart_parents1),
STM32F7_RCC_DCKCFGR2, 0, 3,
STM32F4_RCC_APB2ENR, 4,
CLK_SET_RATE_PARENT,
},
{
CLK_USART2, "usart2",
uart_parents2, ARRAY_SIZE(uart_parents1),
STM32F7_RCC_DCKCFGR2, 2, 3,
STM32F4_RCC_APB1ENR, 17,
CLK_SET_RATE_PARENT,
},
{
CLK_USART3, "usart3",
uart_parents2, ARRAY_SIZE(uart_parents1),
STM32F7_RCC_DCKCFGR2, 4, 3,
STM32F4_RCC_APB1ENR, 18,
CLK_SET_RATE_PARENT,
},
{
CLK_UART4, "uart4",
uart_parents2, ARRAY_SIZE(uart_parents1),
STM32F7_RCC_DCKCFGR2, 6, 3,
STM32F4_RCC_APB1ENR, 19,
CLK_SET_RATE_PARENT,
},
{
CLK_UART5, "uart5",
uart_parents2, ARRAY_SIZE(uart_parents1),
STM32F7_RCC_DCKCFGR2, 8, 3,
STM32F4_RCC_APB1ENR, 20,
CLK_SET_RATE_PARENT,
},
{
CLK_USART6, "usart6",
uart_parents1, ARRAY_SIZE(uart_parents1),
STM32F7_RCC_DCKCFGR2, 10, 3,
STM32F4_RCC_APB2ENR, 5,
CLK_SET_RATE_PARENT,
},
{
CLK_UART7, "uart7",
uart_parents2, ARRAY_SIZE(uart_parents1),
STM32F7_RCC_DCKCFGR2, 12, 3,
STM32F4_RCC_APB1ENR, 30,
CLK_SET_RATE_PARENT,
},
{
CLK_UART8, "uart8",
uart_parents2, ARRAY_SIZE(uart_parents1),
STM32F7_RCC_DCKCFGR2, 14, 3,
STM32F4_RCC_APB1ENR, 31,
CLK_SET_RATE_PARENT,
},
{
CLK_I2C1, "i2c1",
i2c_parents, ARRAY_SIZE(i2c_parents),
STM32F7_RCC_DCKCFGR2, 16, 3,
STM32F4_RCC_APB1ENR, 21,
CLK_SET_RATE_PARENT,
},
{
CLK_I2C2, "i2c2",
i2c_parents, ARRAY_SIZE(i2c_parents),
STM32F7_RCC_DCKCFGR2, 18, 3,
STM32F4_RCC_APB1ENR, 22,
CLK_SET_RATE_PARENT,
},
{
CLK_I2C3, "i2c3",
i2c_parents, ARRAY_SIZE(i2c_parents),
STM32F7_RCC_DCKCFGR2, 20, 3,
STM32F4_RCC_APB1ENR, 23,
CLK_SET_RATE_PARENT,
},
{
CLK_I2C4, "i2c4",
i2c_parents, ARRAY_SIZE(i2c_parents),
STM32F7_RCC_DCKCFGR2, 22, 3,
STM32F4_RCC_APB1ENR, 24,
CLK_SET_RATE_PARENT,
},
{
CLK_LPTIMER, "lptim1",
lptim_parent, ARRAY_SIZE(lptim_parent),
STM32F7_RCC_DCKCFGR2, 24, 3,
STM32F4_RCC_APB1ENR, 9,
CLK_SET_RATE_PARENT
},
{
CLK_F769_DSI, "dsi",
dsi_parent, ARRAY_SIZE(dsi_parent),
STM32F7_RCC_DCKCFGR2, 0, 1,
STM32F4_RCC_APB2ENR, 27,
CLK_SET_RATE_PARENT
},
{
CLK_DFSDM1, "dfsdm1",
dfsdm1_src, ARRAY_SIZE(dfsdm1_src),
STM32F4_RCC_DCKCFGR, 25, 1,
STM32F4_RCC_APB2ENR, 29,
CLK_SET_RATE_PARENT
},
{
CLK_ADFSDM1, "adfsdm1",
adsfdm1_parent, ARRAY_SIZE(adsfdm1_parent),
STM32F4_RCC_DCKCFGR, 26, 1,
STM32F4_RCC_APB2ENR, 29,
CLK_SET_RATE_PARENT
},
};
static const struct stm32f4_clk_data stm32f429_clk_data = {
.end_primary = END_PRIMARY_CLK,
.gates_data = stm32f429_gates,
.gates_map = stm32f42xx_gate_map,
.gates_num = ARRAY_SIZE(stm32f429_gates),
.pll_data = stm32f429_pll,
.aux_clk = stm32f429_aux_clk,
.aux_clk_num = ARRAY_SIZE(stm32f429_aux_clk),
};
static const struct stm32f4_clk_data stm32f469_clk_data = {
.end_primary = END_PRIMARY_CLK,
.gates_data = stm32f469_gates,
.gates_map = stm32f46xx_gate_map,
.gates_num = ARRAY_SIZE(stm32f469_gates),
.pll_data = stm32f469_pll,
.aux_clk = stm32f469_aux_clk,
.aux_clk_num = ARRAY_SIZE(stm32f469_aux_clk),
};
static const struct stm32f4_clk_data stm32f746_clk_data = {
.end_primary = END_PRIMARY_CLK_F7,
.gates_data = stm32f746_gates,
.gates_map = stm32f746_gate_map,
.gates_num = ARRAY_SIZE(stm32f746_gates),
.pll_data = stm32f469_pll,
.aux_clk = stm32f746_aux_clk,
.aux_clk_num = ARRAY_SIZE(stm32f746_aux_clk),
};
static const struct stm32f4_clk_data stm32f769_clk_data = {
.end_primary = END_PRIMARY_CLK_F7,
.gates_data = stm32f769_gates,
.gates_map = stm32f769_gate_map,
.gates_num = ARRAY_SIZE(stm32f769_gates),
.pll_data = stm32f469_pll,
.aux_clk = stm32f769_aux_clk,
.aux_clk_num = ARRAY_SIZE(stm32f769_aux_clk),
};
static const struct of_device_id stm32f4_of_match[] = {
{
.compatible = "st,stm32f42xx-rcc",
.data = &stm32f429_clk_data
},
{
.compatible = "st,stm32f469-rcc",
.data = &stm32f469_clk_data
},
{
.compatible = "st,stm32f746-rcc",
.data = &stm32f746_clk_data
},
{
.compatible = "st,stm32f769-rcc",
.data = &stm32f769_clk_data
},
{}
};
static struct clk_hw *stm32_register_aux_clk(const char *name,
const char * const *parent_names, int num_parents,
int offset_mux, u8 shift, u8 mask,
int offset_gate, u8 bit_idx,
unsigned long flags, spinlock_t *lock)
{
struct clk_hw *hw;
struct clk_gate *gate = NULL;
struct clk_mux *mux = NULL;
struct clk_hw *mux_hw = NULL, *gate_hw = NULL;
const struct clk_ops *mux_ops = NULL, *gate_ops = NULL;
if (offset_gate != NO_GATE) {
gate = kzalloc(sizeof(*gate), GFP_KERNEL);
if (!gate) {
hw = ERR_PTR(-EINVAL);
goto fail;
}
gate->reg = base + offset_gate;
gate->bit_idx = bit_idx;
gate->flags = 0;
gate->lock = lock;
gate_hw = &gate->hw;
gate_ops = &clk_gate_ops;
}
if (offset_mux != NO_MUX) {
mux = kzalloc(sizeof(*mux), GFP_KERNEL);
if (!mux) {
hw = ERR_PTR(-EINVAL);
goto fail;
}
mux->reg = base + offset_mux;
mux->shift = shift;
mux->mask = mask;
mux->flags = 0;
mux_hw = &mux->hw;
mux_ops = &clk_mux_ops;
}
if (mux_hw == NULL && gate_hw == NULL) {
hw = ERR_PTR(-EINVAL);
goto fail;
}
hw = clk_hw_register_composite(NULL, name, parent_names, num_parents,
mux_hw, mux_ops,
NULL, NULL,
gate_hw, gate_ops,
flags);
fail:
if (IS_ERR(hw)) {
kfree(gate);
kfree(mux);
}
return hw;
}
static void __init stm32f4_rcc_init(struct device_node *np)
{
const char *hse_clk, *i2s_in_clk;
int n;
const struct of_device_id *match;
const struct stm32f4_clk_data *data;
unsigned long pllm;
struct clk_hw *pll_src_hw;
base = of_iomap(np, 0);
if (!base) {
pr_err("%pOFn: unable to map resource\n", np);
return;
}
pdrm = syscon_regmap_lookup_by_phandle(np, "st,syscfg");
if (IS_ERR(pdrm)) {
pdrm = NULL;
pr_warn("%s: Unable to get syscfg\n", __func__);
}
match = of_match_node(stm32f4_of_match, np);
if (WARN_ON(!match))
return;
data = match->data;
stm32fx_end_primary_clk = data->end_primary;
clks = kmalloc_array(data->gates_num + stm32fx_end_primary_clk,
sizeof(*clks), GFP_KERNEL);
if (!clks)
goto fail;
stm32f4_gate_map = data->gates_map;
hse_clk = of_clk_get_parent_name(np, 0);
dsi_parent[0] = hse_clk;
pllsrc_parent[1] = hse_clk;
i2s_in_clk = of_clk_get_parent_name(np, 1);
i2s_parents[1] = i2s_in_clk;
sai_parents[2] = i2s_in_clk;
if (of_device_is_compatible(np, "st,stm32f769-rcc")) {
clk_hw_register_gate(NULL, "dfsdm1_apb", "apb2_div", 0,
base + STM32F4_RCC_APB2ENR, 29,
CLK_IGNORE_UNUSED, &stm32f4_clk_lock);
dsi_parent[0] = pll_src;
sai_parents[3] = pll_src;
}
clks[CLK_HSI] = clk_hw_register_fixed_rate_with_accuracy(NULL, "hsi",
NULL, 0, 16000000, 160000);
pll_src_hw = clk_hw_register_mux(NULL, pll_src, pllsrc_parent,
ARRAY_SIZE(pllsrc_parent), 0,
base + STM32F4_RCC_PLLCFGR, 22, 1, 0,
&stm32f4_clk_lock);
pllm = readl(base + STM32F4_RCC_PLLCFGR) & 0x3f;
clk_hw_register_fixed_factor(NULL, "vco_in", pll_src,
0, 1, pllm);
stm32f4_rcc_register_pll("vco_in", &data->pll_data[0],
&stm32f4_clk_lock);
clks[PLL_VCO_I2S] = stm32f4_rcc_register_pll("vco_in",
&data->pll_data[1], &stm32f4_clk_lock);
clks[PLL_VCO_SAI] = stm32f4_rcc_register_pll("vco_in",
&data->pll_data[2], &stm32f4_clk_lock);
for (n = 0; n < MAX_POST_DIV; n++) {
const struct stm32f4_pll_post_div_data *post_div;
struct clk_hw *hw;
post_div = &post_div_data[n];
hw = clk_register_pll_div(post_div->name,
post_div->parent,
post_div->flag,
base + post_div->offset,
post_div->shift,
post_div->width,
post_div->flag_div,
post_div->div_table,
clks[post_div->pll_idx],
&stm32f4_clk_lock);
if (post_div->idx != NO_IDX)
clks[post_div->idx] = hw;
}
sys_parents[1] = hse_clk;
clks[CLK_SYSCLK] = clk_hw_register_mux_table(
NULL, "sys", sys_parents, ARRAY_SIZE(sys_parents), 0,
base + STM32F4_RCC_CFGR, 0, 3, 0, NULL, &stm32f4_clk_lock);
clk_register_divider_table(NULL, "ahb_div", "sys",
CLK_SET_RATE_PARENT, base + STM32F4_RCC_CFGR,
4, 4, 0, ahb_div_table, &stm32f4_clk_lock);
clk_register_divider_table(NULL, "apb1_div", "ahb_div",
CLK_SET_RATE_PARENT, base + STM32F4_RCC_CFGR,
10, 3, 0, apb_div_table, &stm32f4_clk_lock);
clk_register_apb_mul(NULL, "apb1_mul", "apb1_div",
CLK_SET_RATE_PARENT, 12);
clk_register_divider_table(NULL, "apb2_div", "ahb_div",
CLK_SET_RATE_PARENT, base + STM32F4_RCC_CFGR,
13, 3, 0, apb_div_table, &stm32f4_clk_lock);
clk_register_apb_mul(NULL, "apb2_mul", "apb2_div",
CLK_SET_RATE_PARENT, 15);
clks[SYSTICK] = clk_hw_register_fixed_factor(NULL, "systick", "ahb_div",
0, 1, 8);
clks[FCLK] = clk_hw_register_fixed_factor(NULL, "fclk", "ahb_div",
0, 1, 1);
for (n = 0; n < data->gates_num; n++) {
const struct stm32f4_gate_data *gd;
unsigned int secondary;
int idx;
gd = &data->gates_data[n];
secondary = 8 * (gd->offset - STM32F4_RCC_AHB1ENR) +
gd->bit_idx;
idx = stm32f4_rcc_lookup_clk_idx(0, secondary);
if (idx < 0)
goto fail;
clks[idx] = clk_hw_register_gate(
NULL, gd->name, gd->parent_name, gd->flags,
base + gd->offset, gd->bit_idx, 0, &stm32f4_clk_lock);
if (IS_ERR(clks[idx])) {
pr_err("%pOF: Unable to register leaf clock %s\n",
np, gd->name);
goto fail;
}
}
clks[CLK_LSI] = clk_register_rgate(NULL, "lsi", "clk-lsi", 0,
base + STM32F4_RCC_CSR, 0, 1, 0, &stm32f4_clk_lock);
if (IS_ERR(clks[CLK_LSI])) {
pr_err("Unable to register lsi clock\n");
goto fail;
}
clks[CLK_LSE] = clk_register_rgate(NULL, "lse", "clk-lse", 0,
base + STM32F4_RCC_BDCR, 0, 1, 0, &stm32f4_clk_lock);
if (IS_ERR(clks[CLK_LSE])) {
pr_err("Unable to register lse clock\n");
goto fail;
}
clks[CLK_HSE_RTC] = clk_hw_register_divider(NULL, "hse-rtc", "clk-hse",
0, base + STM32F4_RCC_CFGR, 16, 5, 0,
&stm32f4_clk_lock);
if (IS_ERR(clks[CLK_HSE_RTC])) {
pr_err("Unable to register hse-rtc clock\n");
goto fail;
}
clks[CLK_RTC] = stm32_register_cclk(NULL, "rtc", rtc_parents, 4,
base + STM32F4_RCC_BDCR, 15, 8, 0, &stm32f4_clk_lock);
if (IS_ERR(clks[CLK_RTC])) {
pr_err("Unable to register rtc clock\n");
goto fail;
}
for (n = 0; n < data->aux_clk_num; n++) {
const struct stm32_aux_clk *aux_clk;
struct clk_hw *hw;
aux_clk = &data->aux_clk[n];
hw = stm32_register_aux_clk(aux_clk->name,
aux_clk->parent_names, aux_clk->num_parents,
aux_clk->offset_mux, aux_clk->shift,
aux_clk->mask, aux_clk->offset_gate,
aux_clk->bit_idx, aux_clk->flags,
&stm32f4_clk_lock);
if (IS_ERR(hw)) {
pr_warn("Unable to register %s clk\n", aux_clk->name);
continue;
}
if (aux_clk->idx != NO_IDX)
clks[aux_clk->idx] = hw;
}
if (of_device_is_compatible(np, "st,stm32f746-rcc")) {
clk_hw_register_fixed_factor(NULL, "hsi_div488", "hsi", 0,
1, 488);
clks[CLK_PLL_SRC] = pll_src_hw;
}
of_clk_add_hw_provider(np, stm32f4_rcc_lookup_clk, NULL);
return;
fail:
kfree(clks);
iounmap(base);
}
CLK_OF_DECLARE_DRIVER(stm32f42xx_rcc, "st,stm32f42xx-rcc", stm32f4_rcc_init);
CLK_OF_DECLARE_DRIVER(stm32f46xx_rcc, "st,stm32f469-rcc", stm32f4_rcc_init);
CLK_OF_DECLARE_DRIVER(stm32f746_rcc, "st,stm32f746-rcc", stm32f4_rcc_init);
CLK_OF_DECLARE_DRIVER(stm32f769_rcc, "st,stm32f769-rcc", stm32f4_rcc_init);
| linux-master | drivers/clk/clk-stm32f4.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2010-2011 Canonical Ltd <[email protected]>
* Copyright (C) 2011-2012 Linaro Ltd <[email protected]>
*
* Standard functionality for the common clock API. See Documentation/driver-api/clk.rst
*/
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/clk/clk-conf.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
#include <linux/err.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/device.h>
#include <linux/init.h>
#include <linux/pm_runtime.h>
#include <linux/sched.h>
#include <linux/clkdev.h>
#include "clk.h"
static DEFINE_SPINLOCK(enable_lock);
static DEFINE_MUTEX(prepare_lock);
static struct task_struct *prepare_owner;
static struct task_struct *enable_owner;
static int prepare_refcnt;
static int enable_refcnt;
static HLIST_HEAD(clk_root_list);
static HLIST_HEAD(clk_orphan_list);
static LIST_HEAD(clk_notifier_list);
static const struct hlist_head *all_lists[] = {
&clk_root_list,
&clk_orphan_list,
NULL,
};
/*** private data structures ***/
struct clk_parent_map {
const struct clk_hw *hw;
struct clk_core *core;
const char *fw_name;
const char *name;
int index;
};
struct clk_core {
const char *name;
const struct clk_ops *ops;
struct clk_hw *hw;
struct module *owner;
struct device *dev;
struct device_node *of_node;
struct clk_core *parent;
struct clk_parent_map *parents;
u8 num_parents;
u8 new_parent_index;
unsigned long rate;
unsigned long req_rate;
unsigned long new_rate;
struct clk_core *new_parent;
struct clk_core *new_child;
unsigned long flags;
bool orphan;
bool rpm_enabled;
unsigned int enable_count;
unsigned int prepare_count;
unsigned int protect_count;
unsigned long min_rate;
unsigned long max_rate;
unsigned long accuracy;
int phase;
struct clk_duty duty;
struct hlist_head children;
struct hlist_node child_node;
struct hlist_head clks;
unsigned int notifier_count;
#ifdef CONFIG_DEBUG_FS
struct dentry *dentry;
struct hlist_node debug_node;
#endif
struct kref ref;
};
#define CREATE_TRACE_POINTS
#include <trace/events/clk.h>
struct clk {
struct clk_core *core;
struct device *dev;
const char *dev_id;
const char *con_id;
unsigned long min_rate;
unsigned long max_rate;
unsigned int exclusive_count;
struct hlist_node clks_node;
};
/*** runtime pm ***/
static int clk_pm_runtime_get(struct clk_core *core)
{
if (!core->rpm_enabled)
return 0;
return pm_runtime_resume_and_get(core->dev);
}
static void clk_pm_runtime_put(struct clk_core *core)
{
if (!core->rpm_enabled)
return;
pm_runtime_put_sync(core->dev);
}
/*** locking ***/
static void clk_prepare_lock(void)
{
if (!mutex_trylock(&prepare_lock)) {
if (prepare_owner == current) {
prepare_refcnt++;
return;
}
mutex_lock(&prepare_lock);
}
WARN_ON_ONCE(prepare_owner != NULL);
WARN_ON_ONCE(prepare_refcnt != 0);
prepare_owner = current;
prepare_refcnt = 1;
}
static void clk_prepare_unlock(void)
{
WARN_ON_ONCE(prepare_owner != current);
WARN_ON_ONCE(prepare_refcnt == 0);
if (--prepare_refcnt)
return;
prepare_owner = NULL;
mutex_unlock(&prepare_lock);
}
static unsigned long clk_enable_lock(void)
__acquires(enable_lock)
{
unsigned long flags;
/*
* On UP systems, spin_trylock_irqsave() always returns true, even if
* we already hold the lock. So, in that case, we rely only on
* reference counting.
*/
if (!IS_ENABLED(CONFIG_SMP) ||
!spin_trylock_irqsave(&enable_lock, flags)) {
if (enable_owner == current) {
enable_refcnt++;
__acquire(enable_lock);
if (!IS_ENABLED(CONFIG_SMP))
local_save_flags(flags);
return flags;
}
spin_lock_irqsave(&enable_lock, flags);
}
WARN_ON_ONCE(enable_owner != NULL);
WARN_ON_ONCE(enable_refcnt != 0);
enable_owner = current;
enable_refcnt = 1;
return flags;
}
static void clk_enable_unlock(unsigned long flags)
__releases(enable_lock)
{
WARN_ON_ONCE(enable_owner != current);
WARN_ON_ONCE(enable_refcnt == 0);
if (--enable_refcnt) {
__release(enable_lock);
return;
}
enable_owner = NULL;
spin_unlock_irqrestore(&enable_lock, flags);
}
static bool clk_core_rate_is_protected(struct clk_core *core)
{
return core->protect_count;
}
static bool clk_core_is_prepared(struct clk_core *core)
{
bool ret = false;
/*
* .is_prepared is optional for clocks that can prepare
* fall back to software usage counter if it is missing
*/
if (!core->ops->is_prepared)
return core->prepare_count;
if (!clk_pm_runtime_get(core)) {
ret = core->ops->is_prepared(core->hw);
clk_pm_runtime_put(core);
}
return ret;
}
static bool clk_core_is_enabled(struct clk_core *core)
{
bool ret = false;
/*
* .is_enabled is only mandatory for clocks that gate
* fall back to software usage counter if .is_enabled is missing
*/
if (!core->ops->is_enabled)
return core->enable_count;
/*
* Check if clock controller's device is runtime active before
* calling .is_enabled callback. If not, assume that clock is
* disabled, because we might be called from atomic context, from
* which pm_runtime_get() is not allowed.
* This function is called mainly from clk_disable_unused_subtree,
* which ensures proper runtime pm activation of controller before
* taking enable spinlock, but the below check is needed if one tries
* to call it from other places.
*/
if (core->rpm_enabled) {
pm_runtime_get_noresume(core->dev);
if (!pm_runtime_active(core->dev)) {
ret = false;
goto done;
}
}
/*
* This could be called with the enable lock held, or from atomic
* context. If the parent isn't enabled already, we can't do
* anything here. We can also assume this clock isn't enabled.
*/
if ((core->flags & CLK_OPS_PARENT_ENABLE) && core->parent)
if (!clk_core_is_enabled(core->parent)) {
ret = false;
goto done;
}
ret = core->ops->is_enabled(core->hw);
done:
if (core->rpm_enabled)
pm_runtime_put(core->dev);
return ret;
}
/*** helper functions ***/
const char *__clk_get_name(const struct clk *clk)
{
return !clk ? NULL : clk->core->name;
}
EXPORT_SYMBOL_GPL(__clk_get_name);
const char *clk_hw_get_name(const struct clk_hw *hw)
{
return hw->core->name;
}
EXPORT_SYMBOL_GPL(clk_hw_get_name);
struct clk_hw *__clk_get_hw(struct clk *clk)
{
return !clk ? NULL : clk->core->hw;
}
EXPORT_SYMBOL_GPL(__clk_get_hw);
unsigned int clk_hw_get_num_parents(const struct clk_hw *hw)
{
return hw->core->num_parents;
}
EXPORT_SYMBOL_GPL(clk_hw_get_num_parents);
struct clk_hw *clk_hw_get_parent(const struct clk_hw *hw)
{
return hw->core->parent ? hw->core->parent->hw : NULL;
}
EXPORT_SYMBOL_GPL(clk_hw_get_parent);
static struct clk_core *__clk_lookup_subtree(const char *name,
struct clk_core *core)
{
struct clk_core *child;
struct clk_core *ret;
if (!strcmp(core->name, name))
return core;
hlist_for_each_entry(child, &core->children, child_node) {
ret = __clk_lookup_subtree(name, child);
if (ret)
return ret;
}
return NULL;
}
static struct clk_core *clk_core_lookup(const char *name)
{
struct clk_core *root_clk;
struct clk_core *ret;
if (!name)
return NULL;
/* search the 'proper' clk tree first */
hlist_for_each_entry(root_clk, &clk_root_list, child_node) {
ret = __clk_lookup_subtree(name, root_clk);
if (ret)
return ret;
}
/* if not found, then search the orphan tree */
hlist_for_each_entry(root_clk, &clk_orphan_list, child_node) {
ret = __clk_lookup_subtree(name, root_clk);
if (ret)
return ret;
}
return NULL;
}
#ifdef CONFIG_OF
static int of_parse_clkspec(const struct device_node *np, int index,
const char *name, struct of_phandle_args *out_args);
static struct clk_hw *
of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec);
#else
static inline int of_parse_clkspec(const struct device_node *np, int index,
const char *name,
struct of_phandle_args *out_args)
{
return -ENOENT;
}
static inline struct clk_hw *
of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec)
{
return ERR_PTR(-ENOENT);
}
#endif
/**
* clk_core_get - Find the clk_core parent of a clk
* @core: clk to find parent of
* @p_index: parent index to search for
*
* This is the preferred method for clk providers to find the parent of a
* clk when that parent is external to the clk controller. The parent_names
* array is indexed and treated as a local name matching a string in the device
* node's 'clock-names' property or as the 'con_id' matching the device's
* dev_name() in a clk_lookup. This allows clk providers to use their own
* namespace instead of looking for a globally unique parent string.
*
* For example the following DT snippet would allow a clock registered by the
* clock-controller@c001 that has a clk_init_data::parent_data array
* with 'xtal' in the 'name' member to find the clock provided by the
* clock-controller@f00abcd without needing to get the globally unique name of
* the xtal clk.
*
* parent: clock-controller@f00abcd {
* reg = <0xf00abcd 0xabcd>;
* #clock-cells = <0>;
* };
*
* clock-controller@c001 {
* reg = <0xc001 0xf00d>;
* clocks = <&parent>;
* clock-names = "xtal";
* #clock-cells = <1>;
* };
*
* Returns: -ENOENT when the provider can't be found or the clk doesn't
* exist in the provider or the name can't be found in the DT node or
* in a clkdev lookup. NULL when the provider knows about the clk but it
* isn't provided on this system.
* A valid clk_core pointer when the clk can be found in the provider.
*/
static struct clk_core *clk_core_get(struct clk_core *core, u8 p_index)
{
const char *name = core->parents[p_index].fw_name;
int index = core->parents[p_index].index;
struct clk_hw *hw = ERR_PTR(-ENOENT);
struct device *dev = core->dev;
const char *dev_id = dev ? dev_name(dev) : NULL;
struct device_node *np = core->of_node;
struct of_phandle_args clkspec;
if (np && (name || index >= 0) &&
!of_parse_clkspec(np, index, name, &clkspec)) {
hw = of_clk_get_hw_from_clkspec(&clkspec);
of_node_put(clkspec.np);
} else if (name) {
/*
* If the DT search above couldn't find the provider fallback to
* looking up via clkdev based clk_lookups.
*/
hw = clk_find_hw(dev_id, name);
}
if (IS_ERR(hw))
return ERR_CAST(hw);
return hw->core;
}
static void clk_core_fill_parent_index(struct clk_core *core, u8 index)
{
struct clk_parent_map *entry = &core->parents[index];
struct clk_core *parent;
if (entry->hw) {
parent = entry->hw->core;
} else {
parent = clk_core_get(core, index);
if (PTR_ERR(parent) == -ENOENT && entry->name)
parent = clk_core_lookup(entry->name);
}
/*
* We have a direct reference but it isn't registered yet?
* Orphan it and let clk_reparent() update the orphan status
* when the parent is registered.
*/
if (!parent)
parent = ERR_PTR(-EPROBE_DEFER);
/* Only cache it if it's not an error */
if (!IS_ERR(parent))
entry->core = parent;
}
static struct clk_core *clk_core_get_parent_by_index(struct clk_core *core,
u8 index)
{
if (!core || index >= core->num_parents || !core->parents)
return NULL;
if (!core->parents[index].core)
clk_core_fill_parent_index(core, index);
return core->parents[index].core;
}
struct clk_hw *
clk_hw_get_parent_by_index(const struct clk_hw *hw, unsigned int index)
{
struct clk_core *parent;
parent = clk_core_get_parent_by_index(hw->core, index);
return !parent ? NULL : parent->hw;
}
EXPORT_SYMBOL_GPL(clk_hw_get_parent_by_index);
unsigned int __clk_get_enable_count(struct clk *clk)
{
return !clk ? 0 : clk->core->enable_count;
}
static unsigned long clk_core_get_rate_nolock(struct clk_core *core)
{
if (!core)
return 0;
if (!core->num_parents || core->parent)
return core->rate;
/*
* Clk must have a parent because num_parents > 0 but the parent isn't
* known yet. Best to return 0 as the rate of this clk until we can
* properly recalc the rate based on the parent's rate.
*/
return 0;
}
unsigned long clk_hw_get_rate(const struct clk_hw *hw)
{
return clk_core_get_rate_nolock(hw->core);
}
EXPORT_SYMBOL_GPL(clk_hw_get_rate);
static unsigned long clk_core_get_accuracy_no_lock(struct clk_core *core)
{
if (!core)
return 0;
return core->accuracy;
}
unsigned long clk_hw_get_flags(const struct clk_hw *hw)
{
return hw->core->flags;
}
EXPORT_SYMBOL_GPL(clk_hw_get_flags);
bool clk_hw_is_prepared(const struct clk_hw *hw)
{
return clk_core_is_prepared(hw->core);
}
EXPORT_SYMBOL_GPL(clk_hw_is_prepared);
bool clk_hw_rate_is_protected(const struct clk_hw *hw)
{
return clk_core_rate_is_protected(hw->core);
}
EXPORT_SYMBOL_GPL(clk_hw_rate_is_protected);
bool clk_hw_is_enabled(const struct clk_hw *hw)
{
return clk_core_is_enabled(hw->core);
}
EXPORT_SYMBOL_GPL(clk_hw_is_enabled);
bool __clk_is_enabled(struct clk *clk)
{
if (!clk)
return false;
return clk_core_is_enabled(clk->core);
}
EXPORT_SYMBOL_GPL(__clk_is_enabled);
static bool mux_is_better_rate(unsigned long rate, unsigned long now,
unsigned long best, unsigned long flags)
{
if (flags & CLK_MUX_ROUND_CLOSEST)
return abs(now - rate) < abs(best - rate);
return now <= rate && now > best;
}
static void clk_core_init_rate_req(struct clk_core * const core,
struct clk_rate_request *req,
unsigned long rate);
static int clk_core_round_rate_nolock(struct clk_core *core,
struct clk_rate_request *req);
static bool clk_core_has_parent(struct clk_core *core, const struct clk_core *parent)
{
struct clk_core *tmp;
unsigned int i;
/* Optimize for the case where the parent is already the parent. */
if (core->parent == parent)
return true;
for (i = 0; i < core->num_parents; i++) {
tmp = clk_core_get_parent_by_index(core, i);
if (!tmp)
continue;
if (tmp == parent)
return true;
}
return false;
}
static void
clk_core_forward_rate_req(struct clk_core *core,
const struct clk_rate_request *old_req,
struct clk_core *parent,
struct clk_rate_request *req,
unsigned long parent_rate)
{
if (WARN_ON(!clk_core_has_parent(core, parent)))
return;
clk_core_init_rate_req(parent, req, parent_rate);
if (req->min_rate < old_req->min_rate)
req->min_rate = old_req->min_rate;
if (req->max_rate > old_req->max_rate)
req->max_rate = old_req->max_rate;
}
static int
clk_core_determine_rate_no_reparent(struct clk_hw *hw,
struct clk_rate_request *req)
{
struct clk_core *core = hw->core;
struct clk_core *parent = core->parent;
unsigned long best;
int ret;
if (core->flags & CLK_SET_RATE_PARENT) {
struct clk_rate_request parent_req;
if (!parent) {
req->rate = 0;
return 0;
}
clk_core_forward_rate_req(core, req, parent, &parent_req,
req->rate);
trace_clk_rate_request_start(&parent_req);
ret = clk_core_round_rate_nolock(parent, &parent_req);
if (ret)
return ret;
trace_clk_rate_request_done(&parent_req);
best = parent_req.rate;
} else if (parent) {
best = clk_core_get_rate_nolock(parent);
} else {
best = clk_core_get_rate_nolock(core);
}
req->best_parent_rate = best;
req->rate = best;
return 0;
}
int clk_mux_determine_rate_flags(struct clk_hw *hw,
struct clk_rate_request *req,
unsigned long flags)
{
struct clk_core *core = hw->core, *parent, *best_parent = NULL;
int i, num_parents, ret;
unsigned long best = 0;
/* if NO_REPARENT flag set, pass through to current parent */
if (core->flags & CLK_SET_RATE_NO_REPARENT)
return clk_core_determine_rate_no_reparent(hw, req);
/* find the parent that can provide the fastest rate <= rate */
num_parents = core->num_parents;
for (i = 0; i < num_parents; i++) {
unsigned long parent_rate;
parent = clk_core_get_parent_by_index(core, i);
if (!parent)
continue;
if (core->flags & CLK_SET_RATE_PARENT) {
struct clk_rate_request parent_req;
clk_core_forward_rate_req(core, req, parent, &parent_req, req->rate);
trace_clk_rate_request_start(&parent_req);
ret = clk_core_round_rate_nolock(parent, &parent_req);
if (ret)
continue;
trace_clk_rate_request_done(&parent_req);
parent_rate = parent_req.rate;
} else {
parent_rate = clk_core_get_rate_nolock(parent);
}
if (mux_is_better_rate(req->rate, parent_rate,
best, flags)) {
best_parent = parent;
best = parent_rate;
}
}
if (!best_parent)
return -EINVAL;
req->best_parent_hw = best_parent->hw;
req->best_parent_rate = best;
req->rate = best;
return 0;
}
EXPORT_SYMBOL_GPL(clk_mux_determine_rate_flags);
struct clk *__clk_lookup(const char *name)
{
struct clk_core *core = clk_core_lookup(name);
return !core ? NULL : core->hw->clk;
}
static void clk_core_get_boundaries(struct clk_core *core,
unsigned long *min_rate,
unsigned long *max_rate)
{
struct clk *clk_user;
lockdep_assert_held(&prepare_lock);
*min_rate = core->min_rate;
*max_rate = core->max_rate;
hlist_for_each_entry(clk_user, &core->clks, clks_node)
*min_rate = max(*min_rate, clk_user->min_rate);
hlist_for_each_entry(clk_user, &core->clks, clks_node)
*max_rate = min(*max_rate, clk_user->max_rate);
}
/*
* clk_hw_get_rate_range() - returns the clock rate range for a hw clk
* @hw: the hw clk we want to get the range from
* @min_rate: pointer to the variable that will hold the minimum
* @max_rate: pointer to the variable that will hold the maximum
*
* Fills the @min_rate and @max_rate variables with the minimum and
* maximum that clock can reach.
*/
void clk_hw_get_rate_range(struct clk_hw *hw, unsigned long *min_rate,
unsigned long *max_rate)
{
clk_core_get_boundaries(hw->core, min_rate, max_rate);
}
EXPORT_SYMBOL_GPL(clk_hw_get_rate_range);
static bool clk_core_check_boundaries(struct clk_core *core,
unsigned long min_rate,
unsigned long max_rate)
{
struct clk *user;
lockdep_assert_held(&prepare_lock);
if (min_rate > core->max_rate || max_rate < core->min_rate)
return false;
hlist_for_each_entry(user, &core->clks, clks_node)
if (min_rate > user->max_rate || max_rate < user->min_rate)
return false;
return true;
}
void clk_hw_set_rate_range(struct clk_hw *hw, unsigned long min_rate,
unsigned long max_rate)
{
hw->core->min_rate = min_rate;
hw->core->max_rate = max_rate;
}
EXPORT_SYMBOL_GPL(clk_hw_set_rate_range);
/*
* __clk_mux_determine_rate - clk_ops::determine_rate implementation for a mux type clk
* @hw: mux type clk to determine rate on
* @req: rate request, also used to return preferred parent and frequencies
*
* Helper for finding best parent to provide a given frequency. This can be used
* directly as a determine_rate callback (e.g. for a mux), or from a more
* complex clock that may combine a mux with other operations.
*
* Returns: 0 on success, -EERROR value on error
*/
int __clk_mux_determine_rate(struct clk_hw *hw,
struct clk_rate_request *req)
{
return clk_mux_determine_rate_flags(hw, req, 0);
}
EXPORT_SYMBOL_GPL(__clk_mux_determine_rate);
int __clk_mux_determine_rate_closest(struct clk_hw *hw,
struct clk_rate_request *req)
{
return clk_mux_determine_rate_flags(hw, req, CLK_MUX_ROUND_CLOSEST);
}
EXPORT_SYMBOL_GPL(__clk_mux_determine_rate_closest);
/*
* clk_hw_determine_rate_no_reparent - clk_ops::determine_rate implementation for a clk that doesn't reparent
* @hw: mux type clk to determine rate on
* @req: rate request, also used to return preferred frequency
*
* Helper for finding best parent rate to provide a given frequency.
* This can be used directly as a determine_rate callback (e.g. for a
* mux), or from a more complex clock that may combine a mux with other
* operations.
*
* Returns: 0 on success, -EERROR value on error
*/
int clk_hw_determine_rate_no_reparent(struct clk_hw *hw,
struct clk_rate_request *req)
{
return clk_core_determine_rate_no_reparent(hw, req);
}
EXPORT_SYMBOL_GPL(clk_hw_determine_rate_no_reparent);
/*** clk api ***/
static void clk_core_rate_unprotect(struct clk_core *core)
{
lockdep_assert_held(&prepare_lock);
if (!core)
return;
if (WARN(core->protect_count == 0,
"%s already unprotected\n", core->name))
return;
if (--core->protect_count > 0)
return;
clk_core_rate_unprotect(core->parent);
}
static int clk_core_rate_nuke_protect(struct clk_core *core)
{
int ret;
lockdep_assert_held(&prepare_lock);
if (!core)
return -EINVAL;
if (core->protect_count == 0)
return 0;
ret = core->protect_count;
core->protect_count = 1;
clk_core_rate_unprotect(core);
return ret;
}
/**
* clk_rate_exclusive_put - release exclusivity over clock rate control
* @clk: the clk over which the exclusivity is released
*
* clk_rate_exclusive_put() completes a critical section during which a clock
* consumer cannot tolerate any other consumer making any operation on the
* clock which could result in a rate change or rate glitch. Exclusive clocks
* cannot have their rate changed, either directly or indirectly due to changes
* further up the parent chain of clocks. As a result, clocks up parent chain
* also get under exclusive control of the calling consumer.
*
* If exlusivity is claimed more than once on clock, even by the same consumer,
* the rate effectively gets locked as exclusivity can't be preempted.
*
* Calls to clk_rate_exclusive_put() must be balanced with calls to
* clk_rate_exclusive_get(). Calls to this function may sleep, and do not return
* error status.
*/
void clk_rate_exclusive_put(struct clk *clk)
{
if (!clk)
return;
clk_prepare_lock();
/*
* if there is something wrong with this consumer protect count, stop
* here before messing with the provider
*/
if (WARN_ON(clk->exclusive_count <= 0))
goto out;
clk_core_rate_unprotect(clk->core);
clk->exclusive_count--;
out:
clk_prepare_unlock();
}
EXPORT_SYMBOL_GPL(clk_rate_exclusive_put);
static void clk_core_rate_protect(struct clk_core *core)
{
lockdep_assert_held(&prepare_lock);
if (!core)
return;
if (core->protect_count == 0)
clk_core_rate_protect(core->parent);
core->protect_count++;
}
static void clk_core_rate_restore_protect(struct clk_core *core, int count)
{
lockdep_assert_held(&prepare_lock);
if (!core)
return;
if (count == 0)
return;
clk_core_rate_protect(core);
core->protect_count = count;
}
/**
* clk_rate_exclusive_get - get exclusivity over the clk rate control
* @clk: the clk over which the exclusity of rate control is requested
*
* clk_rate_exclusive_get() begins a critical section during which a clock
* consumer cannot tolerate any other consumer making any operation on the
* clock which could result in a rate change or rate glitch. Exclusive clocks
* cannot have their rate changed, either directly or indirectly due to changes
* further up the parent chain of clocks. As a result, clocks up parent chain
* also get under exclusive control of the calling consumer.
*
* If exlusivity is claimed more than once on clock, even by the same consumer,
* the rate effectively gets locked as exclusivity can't be preempted.
*
* Calls to clk_rate_exclusive_get() should be balanced with calls to
* clk_rate_exclusive_put(). Calls to this function may sleep.
* Returns 0 on success, -EERROR otherwise
*/
int clk_rate_exclusive_get(struct clk *clk)
{
if (!clk)
return 0;
clk_prepare_lock();
clk_core_rate_protect(clk->core);
clk->exclusive_count++;
clk_prepare_unlock();
return 0;
}
EXPORT_SYMBOL_GPL(clk_rate_exclusive_get);
static void clk_core_unprepare(struct clk_core *core)
{
lockdep_assert_held(&prepare_lock);
if (!core)
return;
if (WARN(core->prepare_count == 0,
"%s already unprepared\n", core->name))
return;
if (WARN(core->prepare_count == 1 && core->flags & CLK_IS_CRITICAL,
"Unpreparing critical %s\n", core->name))
return;
if (core->flags & CLK_SET_RATE_GATE)
clk_core_rate_unprotect(core);
if (--core->prepare_count > 0)
return;
WARN(core->enable_count > 0, "Unpreparing enabled %s\n", core->name);
trace_clk_unprepare(core);
if (core->ops->unprepare)
core->ops->unprepare(core->hw);
trace_clk_unprepare_complete(core);
clk_core_unprepare(core->parent);
clk_pm_runtime_put(core);
}
static void clk_core_unprepare_lock(struct clk_core *core)
{
clk_prepare_lock();
clk_core_unprepare(core);
clk_prepare_unlock();
}
/**
* clk_unprepare - undo preparation of a clock source
* @clk: the clk being unprepared
*
* clk_unprepare may sleep, which differentiates it from clk_disable. In a
* simple case, clk_unprepare can be used instead of clk_disable to gate a clk
* if the operation may sleep. One example is a clk which is accessed over
* I2c. In the complex case a clk gate operation may require a fast and a slow
* part. It is this reason that clk_unprepare and clk_disable are not mutually
* exclusive. In fact clk_disable must be called before clk_unprepare.
*/
void clk_unprepare(struct clk *clk)
{
if (IS_ERR_OR_NULL(clk))
return;
clk_core_unprepare_lock(clk->core);
}
EXPORT_SYMBOL_GPL(clk_unprepare);
static int clk_core_prepare(struct clk_core *core)
{
int ret = 0;
lockdep_assert_held(&prepare_lock);
if (!core)
return 0;
if (core->prepare_count == 0) {
ret = clk_pm_runtime_get(core);
if (ret)
return ret;
ret = clk_core_prepare(core->parent);
if (ret)
goto runtime_put;
trace_clk_prepare(core);
if (core->ops->prepare)
ret = core->ops->prepare(core->hw);
trace_clk_prepare_complete(core);
if (ret)
goto unprepare;
}
core->prepare_count++;
/*
* CLK_SET_RATE_GATE is a special case of clock protection
* Instead of a consumer claiming exclusive rate control, it is
* actually the provider which prevents any consumer from making any
* operation which could result in a rate change or rate glitch while
* the clock is prepared.
*/
if (core->flags & CLK_SET_RATE_GATE)
clk_core_rate_protect(core);
return 0;
unprepare:
clk_core_unprepare(core->parent);
runtime_put:
clk_pm_runtime_put(core);
return ret;
}
static int clk_core_prepare_lock(struct clk_core *core)
{
int ret;
clk_prepare_lock();
ret = clk_core_prepare(core);
clk_prepare_unlock();
return ret;
}
/**
* clk_prepare - prepare a clock source
* @clk: the clk being prepared
*
* clk_prepare may sleep, which differentiates it from clk_enable. In a simple
* case, clk_prepare can be used instead of clk_enable to ungate a clk if the
* operation may sleep. One example is a clk which is accessed over I2c. In
* the complex case a clk ungate operation may require a fast and a slow part.
* It is this reason that clk_prepare and clk_enable are not mutually
* exclusive. In fact clk_prepare must be called before clk_enable.
* Returns 0 on success, -EERROR otherwise.
*/
int clk_prepare(struct clk *clk)
{
if (!clk)
return 0;
return clk_core_prepare_lock(clk->core);
}
EXPORT_SYMBOL_GPL(clk_prepare);
static void clk_core_disable(struct clk_core *core)
{
lockdep_assert_held(&enable_lock);
if (!core)
return;
if (WARN(core->enable_count == 0, "%s already disabled\n", core->name))
return;
if (WARN(core->enable_count == 1 && core->flags & CLK_IS_CRITICAL,
"Disabling critical %s\n", core->name))
return;
if (--core->enable_count > 0)
return;
trace_clk_disable(core);
if (core->ops->disable)
core->ops->disable(core->hw);
trace_clk_disable_complete(core);
clk_core_disable(core->parent);
}
static void clk_core_disable_lock(struct clk_core *core)
{
unsigned long flags;
flags = clk_enable_lock();
clk_core_disable(core);
clk_enable_unlock(flags);
}
/**
* clk_disable - gate a clock
* @clk: the clk being gated
*
* clk_disable must not sleep, which differentiates it from clk_unprepare. In
* a simple case, clk_disable can be used instead of clk_unprepare to gate a
* clk if the operation is fast and will never sleep. One example is a
* SoC-internal clk which is controlled via simple register writes. In the
* complex case a clk gate operation may require a fast and a slow part. It is
* this reason that clk_unprepare and clk_disable are not mutually exclusive.
* In fact clk_disable must be called before clk_unprepare.
*/
void clk_disable(struct clk *clk)
{
if (IS_ERR_OR_NULL(clk))
return;
clk_core_disable_lock(clk->core);
}
EXPORT_SYMBOL_GPL(clk_disable);
static int clk_core_enable(struct clk_core *core)
{
int ret = 0;
lockdep_assert_held(&enable_lock);
if (!core)
return 0;
if (WARN(core->prepare_count == 0,
"Enabling unprepared %s\n", core->name))
return -ESHUTDOWN;
if (core->enable_count == 0) {
ret = clk_core_enable(core->parent);
if (ret)
return ret;
trace_clk_enable(core);
if (core->ops->enable)
ret = core->ops->enable(core->hw);
trace_clk_enable_complete(core);
if (ret) {
clk_core_disable(core->parent);
return ret;
}
}
core->enable_count++;
return 0;
}
static int clk_core_enable_lock(struct clk_core *core)
{
unsigned long flags;
int ret;
flags = clk_enable_lock();
ret = clk_core_enable(core);
clk_enable_unlock(flags);
return ret;
}
/**
* clk_gate_restore_context - restore context for poweroff
* @hw: the clk_hw pointer of clock whose state is to be restored
*
* The clock gate restore context function enables or disables
* the gate clocks based on the enable_count. This is done in cases
* where the clock context is lost and based on the enable_count
* the clock either needs to be enabled/disabled. This
* helps restore the state of gate clocks.
*/
void clk_gate_restore_context(struct clk_hw *hw)
{
struct clk_core *core = hw->core;
if (core->enable_count)
core->ops->enable(hw);
else
core->ops->disable(hw);
}
EXPORT_SYMBOL_GPL(clk_gate_restore_context);
static int clk_core_save_context(struct clk_core *core)
{
struct clk_core *child;
int ret = 0;
hlist_for_each_entry(child, &core->children, child_node) {
ret = clk_core_save_context(child);
if (ret < 0)
return ret;
}
if (core->ops && core->ops->save_context)
ret = core->ops->save_context(core->hw);
return ret;
}
static void clk_core_restore_context(struct clk_core *core)
{
struct clk_core *child;
if (core->ops && core->ops->restore_context)
core->ops->restore_context(core->hw);
hlist_for_each_entry(child, &core->children, child_node)
clk_core_restore_context(child);
}
/**
* clk_save_context - save clock context for poweroff
*
* Saves the context of the clock register for powerstates in which the
* contents of the registers will be lost. Occurs deep within the suspend
* code. Returns 0 on success.
*/
int clk_save_context(void)
{
struct clk_core *clk;
int ret;
hlist_for_each_entry(clk, &clk_root_list, child_node) {
ret = clk_core_save_context(clk);
if (ret < 0)
return ret;
}
hlist_for_each_entry(clk, &clk_orphan_list, child_node) {
ret = clk_core_save_context(clk);
if (ret < 0)
return ret;
}
return 0;
}
EXPORT_SYMBOL_GPL(clk_save_context);
/**
* clk_restore_context - restore clock context after poweroff
*
* Restore the saved clock context upon resume.
*
*/
void clk_restore_context(void)
{
struct clk_core *core;
hlist_for_each_entry(core, &clk_root_list, child_node)
clk_core_restore_context(core);
hlist_for_each_entry(core, &clk_orphan_list, child_node)
clk_core_restore_context(core);
}
EXPORT_SYMBOL_GPL(clk_restore_context);
/**
* clk_enable - ungate a clock
* @clk: the clk being ungated
*
* clk_enable must not sleep, which differentiates it from clk_prepare. In a
* simple case, clk_enable can be used instead of clk_prepare to ungate a clk
* if the operation will never sleep. One example is a SoC-internal clk which
* is controlled via simple register writes. In the complex case a clk ungate
* operation may require a fast and a slow part. It is this reason that
* clk_enable and clk_prepare are not mutually exclusive. In fact clk_prepare
* must be called before clk_enable. Returns 0 on success, -EERROR
* otherwise.
*/
int clk_enable(struct clk *clk)
{
if (!clk)
return 0;
return clk_core_enable_lock(clk->core);
}
EXPORT_SYMBOL_GPL(clk_enable);
/**
* clk_is_enabled_when_prepared - indicate if preparing a clock also enables it.
* @clk: clock source
*
* Returns true if clk_prepare() implicitly enables the clock, effectively
* making clk_enable()/clk_disable() no-ops, false otherwise.
*
* This is of interest mainly to power management code where actually
* disabling the clock also requires unpreparing it to have any material
* effect.
*
* Regardless of the value returned here, the caller must always invoke
* clk_enable() or clk_prepare_enable() and counterparts for usage counts
* to be right.
*/
bool clk_is_enabled_when_prepared(struct clk *clk)
{
return clk && !(clk->core->ops->enable && clk->core->ops->disable);
}
EXPORT_SYMBOL_GPL(clk_is_enabled_when_prepared);
static int clk_core_prepare_enable(struct clk_core *core)
{
int ret;
ret = clk_core_prepare_lock(core);
if (ret)
return ret;
ret = clk_core_enable_lock(core);
if (ret)
clk_core_unprepare_lock(core);
return ret;
}
static void clk_core_disable_unprepare(struct clk_core *core)
{
clk_core_disable_lock(core);
clk_core_unprepare_lock(core);
}
static void __init clk_unprepare_unused_subtree(struct clk_core *core)
{
struct clk_core *child;
lockdep_assert_held(&prepare_lock);
hlist_for_each_entry(child, &core->children, child_node)
clk_unprepare_unused_subtree(child);
if (core->prepare_count)
return;
if (core->flags & CLK_IGNORE_UNUSED)
return;
if (clk_pm_runtime_get(core))
return;
if (clk_core_is_prepared(core)) {
trace_clk_unprepare(core);
if (core->ops->unprepare_unused)
core->ops->unprepare_unused(core->hw);
else if (core->ops->unprepare)
core->ops->unprepare(core->hw);
trace_clk_unprepare_complete(core);
}
clk_pm_runtime_put(core);
}
static void __init clk_disable_unused_subtree(struct clk_core *core)
{
struct clk_core *child;
unsigned long flags;
lockdep_assert_held(&prepare_lock);
hlist_for_each_entry(child, &core->children, child_node)
clk_disable_unused_subtree(child);
if (core->flags & CLK_OPS_PARENT_ENABLE)
clk_core_prepare_enable(core->parent);
if (clk_pm_runtime_get(core))
goto unprepare_out;
flags = clk_enable_lock();
if (core->enable_count)
goto unlock_out;
if (core->flags & CLK_IGNORE_UNUSED)
goto unlock_out;
/*
* some gate clocks have special needs during the disable-unused
* sequence. call .disable_unused if available, otherwise fall
* back to .disable
*/
if (clk_core_is_enabled(core)) {
trace_clk_disable(core);
if (core->ops->disable_unused)
core->ops->disable_unused(core->hw);
else if (core->ops->disable)
core->ops->disable(core->hw);
trace_clk_disable_complete(core);
}
unlock_out:
clk_enable_unlock(flags);
clk_pm_runtime_put(core);
unprepare_out:
if (core->flags & CLK_OPS_PARENT_ENABLE)
clk_core_disable_unprepare(core->parent);
}
static bool clk_ignore_unused __initdata;
static int __init clk_ignore_unused_setup(char *__unused)
{
clk_ignore_unused = true;
return 1;
}
__setup("clk_ignore_unused", clk_ignore_unused_setup);
static int __init clk_disable_unused(void)
{
struct clk_core *core;
if (clk_ignore_unused) {
pr_warn("clk: Not disabling unused clocks\n");
return 0;
}
pr_info("clk: Disabling unused clocks\n");
clk_prepare_lock();
hlist_for_each_entry(core, &clk_root_list, child_node)
clk_disable_unused_subtree(core);
hlist_for_each_entry(core, &clk_orphan_list, child_node)
clk_disable_unused_subtree(core);
hlist_for_each_entry(core, &clk_root_list, child_node)
clk_unprepare_unused_subtree(core);
hlist_for_each_entry(core, &clk_orphan_list, child_node)
clk_unprepare_unused_subtree(core);
clk_prepare_unlock();
return 0;
}
late_initcall_sync(clk_disable_unused);
static int clk_core_determine_round_nolock(struct clk_core *core,
struct clk_rate_request *req)
{
long rate;
lockdep_assert_held(&prepare_lock);
if (!core)
return 0;
/*
* Some clock providers hand-craft their clk_rate_requests and
* might not fill min_rate and max_rate.
*
* If it's the case, clamping the rate is equivalent to setting
* the rate to 0 which is bad. Skip the clamping but complain so
* that it gets fixed, hopefully.
*/
if (!req->min_rate && !req->max_rate)
pr_warn("%s: %s: clk_rate_request has initialized min or max rate.\n",
__func__, core->name);
else
req->rate = clamp(req->rate, req->min_rate, req->max_rate);
/*
* At this point, core protection will be disabled
* - if the provider is not protected at all
* - if the calling consumer is the only one which has exclusivity
* over the provider
*/
if (clk_core_rate_is_protected(core)) {
req->rate = core->rate;
} else if (core->ops->determine_rate) {
return core->ops->determine_rate(core->hw, req);
} else if (core->ops->round_rate) {
rate = core->ops->round_rate(core->hw, req->rate,
&req->best_parent_rate);
if (rate < 0)
return rate;
req->rate = rate;
} else {
return -EINVAL;
}
return 0;
}
static void clk_core_init_rate_req(struct clk_core * const core,
struct clk_rate_request *req,
unsigned long rate)
{
struct clk_core *parent;
if (WARN_ON(!req))
return;
memset(req, 0, sizeof(*req));
req->max_rate = ULONG_MAX;
if (!core)
return;
req->core = core;
req->rate = rate;
clk_core_get_boundaries(core, &req->min_rate, &req->max_rate);
parent = core->parent;
if (parent) {
req->best_parent_hw = parent->hw;
req->best_parent_rate = parent->rate;
} else {
req->best_parent_hw = NULL;
req->best_parent_rate = 0;
}
}
/**
* clk_hw_init_rate_request - Initializes a clk_rate_request
* @hw: the clk for which we want to submit a rate request
* @req: the clk_rate_request structure we want to initialise
* @rate: the rate which is to be requested
*
* Initializes a clk_rate_request structure to submit to
* __clk_determine_rate() or similar functions.
*/
void clk_hw_init_rate_request(const struct clk_hw *hw,
struct clk_rate_request *req,
unsigned long rate)
{
if (WARN_ON(!hw || !req))
return;
clk_core_init_rate_req(hw->core, req, rate);
}
EXPORT_SYMBOL_GPL(clk_hw_init_rate_request);
/**
* clk_hw_forward_rate_request - Forwards a clk_rate_request to a clock's parent
* @hw: the original clock that got the rate request
* @old_req: the original clk_rate_request structure we want to forward
* @parent: the clk we want to forward @old_req to
* @req: the clk_rate_request structure we want to initialise
* @parent_rate: The rate which is to be requested to @parent
*
* Initializes a clk_rate_request structure to submit to a clock parent
* in __clk_determine_rate() or similar functions.
*/
void clk_hw_forward_rate_request(const struct clk_hw *hw,
const struct clk_rate_request *old_req,
const struct clk_hw *parent,
struct clk_rate_request *req,
unsigned long parent_rate)
{
if (WARN_ON(!hw || !old_req || !parent || !req))
return;
clk_core_forward_rate_req(hw->core, old_req,
parent->core, req,
parent_rate);
}
EXPORT_SYMBOL_GPL(clk_hw_forward_rate_request);
static bool clk_core_can_round(struct clk_core * const core)
{
return core->ops->determine_rate || core->ops->round_rate;
}
static int clk_core_round_rate_nolock(struct clk_core *core,
struct clk_rate_request *req)
{
int ret;
lockdep_assert_held(&prepare_lock);
if (!core) {
req->rate = 0;
return 0;
}
if (clk_core_can_round(core))
return clk_core_determine_round_nolock(core, req);
if (core->flags & CLK_SET_RATE_PARENT) {
struct clk_rate_request parent_req;
clk_core_forward_rate_req(core, req, core->parent, &parent_req, req->rate);
trace_clk_rate_request_start(&parent_req);
ret = clk_core_round_rate_nolock(core->parent, &parent_req);
if (ret)
return ret;
trace_clk_rate_request_done(&parent_req);
req->best_parent_rate = parent_req.rate;
req->rate = parent_req.rate;
return 0;
}
req->rate = core->rate;
return 0;
}
/**
* __clk_determine_rate - get the closest rate actually supported by a clock
* @hw: determine the rate of this clock
* @req: target rate request
*
* Useful for clk_ops such as .set_rate and .determine_rate.
*/
int __clk_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
{
if (!hw) {
req->rate = 0;
return 0;
}
return clk_core_round_rate_nolock(hw->core, req);
}
EXPORT_SYMBOL_GPL(__clk_determine_rate);
/**
* clk_hw_round_rate() - round the given rate for a hw clk
* @hw: the hw clk for which we are rounding a rate
* @rate: the rate which is to be rounded
*
* Takes in a rate as input and rounds it to a rate that the clk can actually
* use.
*
* Context: prepare_lock must be held.
* For clk providers to call from within clk_ops such as .round_rate,
* .determine_rate.
*
* Return: returns rounded rate of hw clk if clk supports round_rate operation
* else returns the parent rate.
*/
unsigned long clk_hw_round_rate(struct clk_hw *hw, unsigned long rate)
{
int ret;
struct clk_rate_request req;
clk_core_init_rate_req(hw->core, &req, rate);
trace_clk_rate_request_start(&req);
ret = clk_core_round_rate_nolock(hw->core, &req);
if (ret)
return 0;
trace_clk_rate_request_done(&req);
return req.rate;
}
EXPORT_SYMBOL_GPL(clk_hw_round_rate);
/**
* clk_round_rate - round the given rate for a clk
* @clk: the clk for which we are rounding a rate
* @rate: the rate which is to be rounded
*
* Takes in a rate as input and rounds it to a rate that the clk can actually
* use which is then returned. If clk doesn't support round_rate operation
* then the parent rate is returned.
*/
long clk_round_rate(struct clk *clk, unsigned long rate)
{
struct clk_rate_request req;
int ret;
if (!clk)
return 0;
clk_prepare_lock();
if (clk->exclusive_count)
clk_core_rate_unprotect(clk->core);
clk_core_init_rate_req(clk->core, &req, rate);
trace_clk_rate_request_start(&req);
ret = clk_core_round_rate_nolock(clk->core, &req);
trace_clk_rate_request_done(&req);
if (clk->exclusive_count)
clk_core_rate_protect(clk->core);
clk_prepare_unlock();
if (ret)
return ret;
return req.rate;
}
EXPORT_SYMBOL_GPL(clk_round_rate);
/**
* __clk_notify - call clk notifier chain
* @core: clk that is changing rate
* @msg: clk notifier type (see include/linux/clk.h)
* @old_rate: old clk rate
* @new_rate: new clk rate
*
* Triggers a notifier call chain on the clk rate-change notification
* for 'clk'. Passes a pointer to the struct clk and the previous
* and current rates to the notifier callback. Intended to be called by
* internal clock code only. Returns NOTIFY_DONE from the last driver
* called if all went well, or NOTIFY_STOP or NOTIFY_BAD immediately if
* a driver returns that.
*/
static int __clk_notify(struct clk_core *core, unsigned long msg,
unsigned long old_rate, unsigned long new_rate)
{
struct clk_notifier *cn;
struct clk_notifier_data cnd;
int ret = NOTIFY_DONE;
cnd.old_rate = old_rate;
cnd.new_rate = new_rate;
list_for_each_entry(cn, &clk_notifier_list, node) {
if (cn->clk->core == core) {
cnd.clk = cn->clk;
ret = srcu_notifier_call_chain(&cn->notifier_head, msg,
&cnd);
if (ret & NOTIFY_STOP_MASK)
return ret;
}
}
return ret;
}
/**
* __clk_recalc_accuracies
* @core: first clk in the subtree
*
* Walks the subtree of clks starting with clk and recalculates accuracies as
* it goes. Note that if a clk does not implement the .recalc_accuracy
* callback then it is assumed that the clock will take on the accuracy of its
* parent.
*/
static void __clk_recalc_accuracies(struct clk_core *core)
{
unsigned long parent_accuracy = 0;
struct clk_core *child;
lockdep_assert_held(&prepare_lock);
if (core->parent)
parent_accuracy = core->parent->accuracy;
if (core->ops->recalc_accuracy)
core->accuracy = core->ops->recalc_accuracy(core->hw,
parent_accuracy);
else
core->accuracy = parent_accuracy;
hlist_for_each_entry(child, &core->children, child_node)
__clk_recalc_accuracies(child);
}
static long clk_core_get_accuracy_recalc(struct clk_core *core)
{
if (core && (core->flags & CLK_GET_ACCURACY_NOCACHE))
__clk_recalc_accuracies(core);
return clk_core_get_accuracy_no_lock(core);
}
/**
* clk_get_accuracy - return the accuracy of clk
* @clk: the clk whose accuracy is being returned
*
* Simply returns the cached accuracy of the clk, unless
* CLK_GET_ACCURACY_NOCACHE flag is set, which means a recalc_rate will be
* issued.
* If clk is NULL then returns 0.
*/
long clk_get_accuracy(struct clk *clk)
{
long accuracy;
if (!clk)
return 0;
clk_prepare_lock();
accuracy = clk_core_get_accuracy_recalc(clk->core);
clk_prepare_unlock();
return accuracy;
}
EXPORT_SYMBOL_GPL(clk_get_accuracy);
static unsigned long clk_recalc(struct clk_core *core,
unsigned long parent_rate)
{
unsigned long rate = parent_rate;
if (core->ops->recalc_rate && !clk_pm_runtime_get(core)) {
rate = core->ops->recalc_rate(core->hw, parent_rate);
clk_pm_runtime_put(core);
}
return rate;
}
/**
* __clk_recalc_rates
* @core: first clk in the subtree
* @update_req: Whether req_rate should be updated with the new rate
* @msg: notification type (see include/linux/clk.h)
*
* Walks the subtree of clks starting with clk and recalculates rates as it
* goes. Note that if a clk does not implement the .recalc_rate callback then
* it is assumed that the clock will take on the rate of its parent.
*
* clk_recalc_rates also propagates the POST_RATE_CHANGE notification,
* if necessary.
*/
static void __clk_recalc_rates(struct clk_core *core, bool update_req,
unsigned long msg)
{
unsigned long old_rate;
unsigned long parent_rate = 0;
struct clk_core *child;
lockdep_assert_held(&prepare_lock);
old_rate = core->rate;
if (core->parent)
parent_rate = core->parent->rate;
core->rate = clk_recalc(core, parent_rate);
if (update_req)
core->req_rate = core->rate;
/*
* ignore NOTIFY_STOP and NOTIFY_BAD return values for POST_RATE_CHANGE
* & ABORT_RATE_CHANGE notifiers
*/
if (core->notifier_count && msg)
__clk_notify(core, msg, old_rate, core->rate);
hlist_for_each_entry(child, &core->children, child_node)
__clk_recalc_rates(child, update_req, msg);
}
static unsigned long clk_core_get_rate_recalc(struct clk_core *core)
{
if (core && (core->flags & CLK_GET_RATE_NOCACHE))
__clk_recalc_rates(core, false, 0);
return clk_core_get_rate_nolock(core);
}
/**
* clk_get_rate - return the rate of clk
* @clk: the clk whose rate is being returned
*
* Simply returns the cached rate of the clk, unless CLK_GET_RATE_NOCACHE flag
* is set, which means a recalc_rate will be issued. Can be called regardless of
* the clock enabledness. If clk is NULL, or if an error occurred, then returns
* 0.
*/
unsigned long clk_get_rate(struct clk *clk)
{
unsigned long rate;
if (!clk)
return 0;
clk_prepare_lock();
rate = clk_core_get_rate_recalc(clk->core);
clk_prepare_unlock();
return rate;
}
EXPORT_SYMBOL_GPL(clk_get_rate);
static int clk_fetch_parent_index(struct clk_core *core,
struct clk_core *parent)
{
int i;
if (!parent)
return -EINVAL;
for (i = 0; i < core->num_parents; i++) {
/* Found it first try! */
if (core->parents[i].core == parent)
return i;
/* Something else is here, so keep looking */
if (core->parents[i].core)
continue;
/* Maybe core hasn't been cached but the hw is all we know? */
if (core->parents[i].hw) {
if (core->parents[i].hw == parent->hw)
break;
/* Didn't match, but we're expecting a clk_hw */
continue;
}
/* Maybe it hasn't been cached (clk_set_parent() path) */
if (parent == clk_core_get(core, i))
break;
/* Fallback to comparing globally unique names */
if (core->parents[i].name &&
!strcmp(parent->name, core->parents[i].name))
break;
}
if (i == core->num_parents)
return -EINVAL;
core->parents[i].core = parent;
return i;
}
/**
* clk_hw_get_parent_index - return the index of the parent clock
* @hw: clk_hw associated with the clk being consumed
*
* Fetches and returns the index of parent clock. Returns -EINVAL if the given
* clock does not have a current parent.
*/
int clk_hw_get_parent_index(struct clk_hw *hw)
{
struct clk_hw *parent = clk_hw_get_parent(hw);
if (WARN_ON(parent == NULL))
return -EINVAL;
return clk_fetch_parent_index(hw->core, parent->core);
}
EXPORT_SYMBOL_GPL(clk_hw_get_parent_index);
/*
* Update the orphan status of @core and all its children.
*/
static void clk_core_update_orphan_status(struct clk_core *core, bool is_orphan)
{
struct clk_core *child;
core->orphan = is_orphan;
hlist_for_each_entry(child, &core->children, child_node)
clk_core_update_orphan_status(child, is_orphan);
}
static void clk_reparent(struct clk_core *core, struct clk_core *new_parent)
{
bool was_orphan = core->orphan;
hlist_del(&core->child_node);
if (new_parent) {
bool becomes_orphan = new_parent->orphan;
/* avoid duplicate POST_RATE_CHANGE notifications */
if (new_parent->new_child == core)
new_parent->new_child = NULL;
hlist_add_head(&core->child_node, &new_parent->children);
if (was_orphan != becomes_orphan)
clk_core_update_orphan_status(core, becomes_orphan);
} else {
hlist_add_head(&core->child_node, &clk_orphan_list);
if (!was_orphan)
clk_core_update_orphan_status(core, true);
}
core->parent = new_parent;
}
static struct clk_core *__clk_set_parent_before(struct clk_core *core,
struct clk_core *parent)
{
unsigned long flags;
struct clk_core *old_parent = core->parent;
/*
* 1. enable parents for CLK_OPS_PARENT_ENABLE clock
*
* 2. Migrate prepare state between parents and prevent race with
* clk_enable().
*
* If the clock is not prepared, then a race with
* clk_enable/disable() is impossible since we already have the
* prepare lock (future calls to clk_enable() need to be preceded by
* a clk_prepare()).
*
* If the clock is prepared, migrate the prepared state to the new
* parent and also protect against a race with clk_enable() by
* forcing the clock and the new parent on. This ensures that all
* future calls to clk_enable() are practically NOPs with respect to
* hardware and software states.
*
* See also: Comment for clk_set_parent() below.
*/
/* enable old_parent & parent if CLK_OPS_PARENT_ENABLE is set */
if (core->flags & CLK_OPS_PARENT_ENABLE) {
clk_core_prepare_enable(old_parent);
clk_core_prepare_enable(parent);
}
/* migrate prepare count if > 0 */
if (core->prepare_count) {
clk_core_prepare_enable(parent);
clk_core_enable_lock(core);
}
/* update the clk tree topology */
flags = clk_enable_lock();
clk_reparent(core, parent);
clk_enable_unlock(flags);
return old_parent;
}
static void __clk_set_parent_after(struct clk_core *core,
struct clk_core *parent,
struct clk_core *old_parent)
{
/*
* Finish the migration of prepare state and undo the changes done
* for preventing a race with clk_enable().
*/
if (core->prepare_count) {
clk_core_disable_lock(core);
clk_core_disable_unprepare(old_parent);
}
/* re-balance ref counting if CLK_OPS_PARENT_ENABLE is set */
if (core->flags & CLK_OPS_PARENT_ENABLE) {
clk_core_disable_unprepare(parent);
clk_core_disable_unprepare(old_parent);
}
}
static int __clk_set_parent(struct clk_core *core, struct clk_core *parent,
u8 p_index)
{
unsigned long flags;
int ret = 0;
struct clk_core *old_parent;
old_parent = __clk_set_parent_before(core, parent);
trace_clk_set_parent(core, parent);
/* change clock input source */
if (parent && core->ops->set_parent)
ret = core->ops->set_parent(core->hw, p_index);
trace_clk_set_parent_complete(core, parent);
if (ret) {
flags = clk_enable_lock();
clk_reparent(core, old_parent);
clk_enable_unlock(flags);
__clk_set_parent_after(core, old_parent, parent);
return ret;
}
__clk_set_parent_after(core, parent, old_parent);
return 0;
}
/**
* __clk_speculate_rates
* @core: first clk in the subtree
* @parent_rate: the "future" rate of clk's parent
*
* Walks the subtree of clks starting with clk, speculating rates as it
* goes and firing off PRE_RATE_CHANGE notifications as necessary.
*
* Unlike clk_recalc_rates, clk_speculate_rates exists only for sending
* pre-rate change notifications and returns early if no clks in the
* subtree have subscribed to the notifications. Note that if a clk does not
* implement the .recalc_rate callback then it is assumed that the clock will
* take on the rate of its parent.
*/
static int __clk_speculate_rates(struct clk_core *core,
unsigned long parent_rate)
{
struct clk_core *child;
unsigned long new_rate;
int ret = NOTIFY_DONE;
lockdep_assert_held(&prepare_lock);
new_rate = clk_recalc(core, parent_rate);
/* abort rate change if a driver returns NOTIFY_BAD or NOTIFY_STOP */
if (core->notifier_count)
ret = __clk_notify(core, PRE_RATE_CHANGE, core->rate, new_rate);
if (ret & NOTIFY_STOP_MASK) {
pr_debug("%s: clk notifier callback for clock %s aborted with error %d\n",
__func__, core->name, ret);
goto out;
}
hlist_for_each_entry(child, &core->children, child_node) {
ret = __clk_speculate_rates(child, new_rate);
if (ret & NOTIFY_STOP_MASK)
break;
}
out:
return ret;
}
static void clk_calc_subtree(struct clk_core *core, unsigned long new_rate,
struct clk_core *new_parent, u8 p_index)
{
struct clk_core *child;
core->new_rate = new_rate;
core->new_parent = new_parent;
core->new_parent_index = p_index;
/* include clk in new parent's PRE_RATE_CHANGE notifications */
core->new_child = NULL;
if (new_parent && new_parent != core->parent)
new_parent->new_child = core;
hlist_for_each_entry(child, &core->children, child_node) {
child->new_rate = clk_recalc(child, new_rate);
clk_calc_subtree(child, child->new_rate, NULL, 0);
}
}
/*
* calculate the new rates returning the topmost clock that has to be
* changed.
*/
static struct clk_core *clk_calc_new_rates(struct clk_core *core,
unsigned long rate)
{
struct clk_core *top = core;
struct clk_core *old_parent, *parent;
unsigned long best_parent_rate = 0;
unsigned long new_rate;
unsigned long min_rate;
unsigned long max_rate;
int p_index = 0;
long ret;
/* sanity */
if (IS_ERR_OR_NULL(core))
return NULL;
/* save parent rate, if it exists */
parent = old_parent = core->parent;
if (parent)
best_parent_rate = parent->rate;
clk_core_get_boundaries(core, &min_rate, &max_rate);
/* find the closest rate and parent clk/rate */
if (clk_core_can_round(core)) {
struct clk_rate_request req;
clk_core_init_rate_req(core, &req, rate);
trace_clk_rate_request_start(&req);
ret = clk_core_determine_round_nolock(core, &req);
if (ret < 0)
return NULL;
trace_clk_rate_request_done(&req);
best_parent_rate = req.best_parent_rate;
new_rate = req.rate;
parent = req.best_parent_hw ? req.best_parent_hw->core : NULL;
if (new_rate < min_rate || new_rate > max_rate)
return NULL;
} else if (!parent || !(core->flags & CLK_SET_RATE_PARENT)) {
/* pass-through clock without adjustable parent */
core->new_rate = core->rate;
return NULL;
} else {
/* pass-through clock with adjustable parent */
top = clk_calc_new_rates(parent, rate);
new_rate = parent->new_rate;
goto out;
}
/* some clocks must be gated to change parent */
if (parent != old_parent &&
(core->flags & CLK_SET_PARENT_GATE) && core->prepare_count) {
pr_debug("%s: %s not gated but wants to reparent\n",
__func__, core->name);
return NULL;
}
/* try finding the new parent index */
if (parent && core->num_parents > 1) {
p_index = clk_fetch_parent_index(core, parent);
if (p_index < 0) {
pr_debug("%s: clk %s can not be parent of clk %s\n",
__func__, parent->name, core->name);
return NULL;
}
}
if ((core->flags & CLK_SET_RATE_PARENT) && parent &&
best_parent_rate != parent->rate)
top = clk_calc_new_rates(parent, best_parent_rate);
out:
clk_calc_subtree(core, new_rate, parent, p_index);
return top;
}
/*
* Notify about rate changes in a subtree. Always walk down the whole tree
* so that in case of an error we can walk down the whole tree again and
* abort the change.
*/
static struct clk_core *clk_propagate_rate_change(struct clk_core *core,
unsigned long event)
{
struct clk_core *child, *tmp_clk, *fail_clk = NULL;
int ret = NOTIFY_DONE;
if (core->rate == core->new_rate)
return NULL;
if (core->notifier_count) {
ret = __clk_notify(core, event, core->rate, core->new_rate);
if (ret & NOTIFY_STOP_MASK)
fail_clk = core;
}
hlist_for_each_entry(child, &core->children, child_node) {
/* Skip children who will be reparented to another clock */
if (child->new_parent && child->new_parent != core)
continue;
tmp_clk = clk_propagate_rate_change(child, event);
if (tmp_clk)
fail_clk = tmp_clk;
}
/* handle the new child who might not be in core->children yet */
if (core->new_child) {
tmp_clk = clk_propagate_rate_change(core->new_child, event);
if (tmp_clk)
fail_clk = tmp_clk;
}
return fail_clk;
}
/*
* walk down a subtree and set the new rates notifying the rate
* change on the way
*/
static void clk_change_rate(struct clk_core *core)
{
struct clk_core *child;
struct hlist_node *tmp;
unsigned long old_rate;
unsigned long best_parent_rate = 0;
bool skip_set_rate = false;
struct clk_core *old_parent;
struct clk_core *parent = NULL;
old_rate = core->rate;
if (core->new_parent) {
parent = core->new_parent;
best_parent_rate = core->new_parent->rate;
} else if (core->parent) {
parent = core->parent;
best_parent_rate = core->parent->rate;
}
if (clk_pm_runtime_get(core))
return;
if (core->flags & CLK_SET_RATE_UNGATE) {
clk_core_prepare(core);
clk_core_enable_lock(core);
}
if (core->new_parent && core->new_parent != core->parent) {
old_parent = __clk_set_parent_before(core, core->new_parent);
trace_clk_set_parent(core, core->new_parent);
if (core->ops->set_rate_and_parent) {
skip_set_rate = true;
core->ops->set_rate_and_parent(core->hw, core->new_rate,
best_parent_rate,
core->new_parent_index);
} else if (core->ops->set_parent) {
core->ops->set_parent(core->hw, core->new_parent_index);
}
trace_clk_set_parent_complete(core, core->new_parent);
__clk_set_parent_after(core, core->new_parent, old_parent);
}
if (core->flags & CLK_OPS_PARENT_ENABLE)
clk_core_prepare_enable(parent);
trace_clk_set_rate(core, core->new_rate);
if (!skip_set_rate && core->ops->set_rate)
core->ops->set_rate(core->hw, core->new_rate, best_parent_rate);
trace_clk_set_rate_complete(core, core->new_rate);
core->rate = clk_recalc(core, best_parent_rate);
if (core->flags & CLK_SET_RATE_UNGATE) {
clk_core_disable_lock(core);
clk_core_unprepare(core);
}
if (core->flags & CLK_OPS_PARENT_ENABLE)
clk_core_disable_unprepare(parent);
if (core->notifier_count && old_rate != core->rate)
__clk_notify(core, POST_RATE_CHANGE, old_rate, core->rate);
if (core->flags & CLK_RECALC_NEW_RATES)
(void)clk_calc_new_rates(core, core->new_rate);
/*
* Use safe iteration, as change_rate can actually swap parents
* for certain clock types.
*/
hlist_for_each_entry_safe(child, tmp, &core->children, child_node) {
/* Skip children who will be reparented to another clock */
if (child->new_parent && child->new_parent != core)
continue;
clk_change_rate(child);
}
/* handle the new child who might not be in core->children yet */
if (core->new_child)
clk_change_rate(core->new_child);
clk_pm_runtime_put(core);
}
static unsigned long clk_core_req_round_rate_nolock(struct clk_core *core,
unsigned long req_rate)
{
int ret, cnt;
struct clk_rate_request req;
lockdep_assert_held(&prepare_lock);
if (!core)
return 0;
/* simulate what the rate would be if it could be freely set */
cnt = clk_core_rate_nuke_protect(core);
if (cnt < 0)
return cnt;
clk_core_init_rate_req(core, &req, req_rate);
trace_clk_rate_request_start(&req);
ret = clk_core_round_rate_nolock(core, &req);
trace_clk_rate_request_done(&req);
/* restore the protection */
clk_core_rate_restore_protect(core, cnt);
return ret ? 0 : req.rate;
}
static int clk_core_set_rate_nolock(struct clk_core *core,
unsigned long req_rate)
{
struct clk_core *top, *fail_clk;
unsigned long rate;
int ret;
if (!core)
return 0;
rate = clk_core_req_round_rate_nolock(core, req_rate);
/* bail early if nothing to do */
if (rate == clk_core_get_rate_nolock(core))
return 0;
/* fail on a direct rate set of a protected provider */
if (clk_core_rate_is_protected(core))
return -EBUSY;
/* calculate new rates and get the topmost changed clock */
top = clk_calc_new_rates(core, req_rate);
if (!top)
return -EINVAL;
ret = clk_pm_runtime_get(core);
if (ret)
return ret;
/* notify that we are about to change rates */
fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE);
if (fail_clk) {
pr_debug("%s: failed to set %s rate\n", __func__,
fail_clk->name);
clk_propagate_rate_change(top, ABORT_RATE_CHANGE);
ret = -EBUSY;
goto err;
}
/* change the rates */
clk_change_rate(top);
core->req_rate = req_rate;
err:
clk_pm_runtime_put(core);
return ret;
}
/**
* clk_set_rate - specify a new rate for clk
* @clk: the clk whose rate is being changed
* @rate: the new rate for clk
*
* In the simplest case clk_set_rate will only adjust the rate of clk.
*
* Setting the CLK_SET_RATE_PARENT flag allows the rate change operation to
* propagate up to clk's parent; whether or not this happens depends on the
* outcome of clk's .round_rate implementation. If *parent_rate is unchanged
* after calling .round_rate then upstream parent propagation is ignored. If
* *parent_rate comes back with a new rate for clk's parent then we propagate
* up to clk's parent and set its rate. Upward propagation will continue
* until either a clk does not support the CLK_SET_RATE_PARENT flag or
* .round_rate stops requesting changes to clk's parent_rate.
*
* Rate changes are accomplished via tree traversal that also recalculates the
* rates for the clocks and fires off POST_RATE_CHANGE notifiers.
*
* Returns 0 on success, -EERROR otherwise.
*/
int clk_set_rate(struct clk *clk, unsigned long rate)
{
int ret;
if (!clk)
return 0;
/* prevent racing with updates to the clock topology */
clk_prepare_lock();
if (clk->exclusive_count)
clk_core_rate_unprotect(clk->core);
ret = clk_core_set_rate_nolock(clk->core, rate);
if (clk->exclusive_count)
clk_core_rate_protect(clk->core);
clk_prepare_unlock();
return ret;
}
EXPORT_SYMBOL_GPL(clk_set_rate);
/**
* clk_set_rate_exclusive - specify a new rate and get exclusive control
* @clk: the clk whose rate is being changed
* @rate: the new rate for clk
*
* This is a combination of clk_set_rate() and clk_rate_exclusive_get()
* within a critical section
*
* This can be used initially to ensure that at least 1 consumer is
* satisfied when several consumers are competing for exclusivity over the
* same clock provider.
*
* The exclusivity is not applied if setting the rate failed.
*
* Calls to clk_rate_exclusive_get() should be balanced with calls to
* clk_rate_exclusive_put().
*
* Returns 0 on success, -EERROR otherwise.
*/
int clk_set_rate_exclusive(struct clk *clk, unsigned long rate)
{
int ret;
if (!clk)
return 0;
/* prevent racing with updates to the clock topology */
clk_prepare_lock();
/*
* The temporary protection removal is not here, on purpose
* This function is meant to be used instead of clk_rate_protect,
* so before the consumer code path protect the clock provider
*/
ret = clk_core_set_rate_nolock(clk->core, rate);
if (!ret) {
clk_core_rate_protect(clk->core);
clk->exclusive_count++;
}
clk_prepare_unlock();
return ret;
}
EXPORT_SYMBOL_GPL(clk_set_rate_exclusive);
static int clk_set_rate_range_nolock(struct clk *clk,
unsigned long min,
unsigned long max)
{
int ret = 0;
unsigned long old_min, old_max, rate;
lockdep_assert_held(&prepare_lock);
if (!clk)
return 0;
trace_clk_set_rate_range(clk->core, min, max);
if (min > max) {
pr_err("%s: clk %s dev %s con %s: invalid range [%lu, %lu]\n",
__func__, clk->core->name, clk->dev_id, clk->con_id,
min, max);
return -EINVAL;
}
if (clk->exclusive_count)
clk_core_rate_unprotect(clk->core);
/* Save the current values in case we need to rollback the change */
old_min = clk->min_rate;
old_max = clk->max_rate;
clk->min_rate = min;
clk->max_rate = max;
if (!clk_core_check_boundaries(clk->core, min, max)) {
ret = -EINVAL;
goto out;
}
rate = clk->core->req_rate;
if (clk->core->flags & CLK_GET_RATE_NOCACHE)
rate = clk_core_get_rate_recalc(clk->core);
/*
* Since the boundaries have been changed, let's give the
* opportunity to the provider to adjust the clock rate based on
* the new boundaries.
*
* We also need to handle the case where the clock is currently
* outside of the boundaries. Clamping the last requested rate
* to the current minimum and maximum will also handle this.
*
* FIXME:
* There is a catch. It may fail for the usual reason (clock
* broken, clock protected, etc) but also because:
* - round_rate() was not favorable and fell on the wrong
* side of the boundary
* - the determine_rate() callback does not really check for
* this corner case when determining the rate
*/
rate = clamp(rate, min, max);
ret = clk_core_set_rate_nolock(clk->core, rate);
if (ret) {
/* rollback the changes */
clk->min_rate = old_min;
clk->max_rate = old_max;
}
out:
if (clk->exclusive_count)
clk_core_rate_protect(clk->core);
return ret;
}
/**
* clk_set_rate_range - set a rate range for a clock source
* @clk: clock source
* @min: desired minimum clock rate in Hz, inclusive
* @max: desired maximum clock rate in Hz, inclusive
*
* Return: 0 for success or negative errno on failure.
*/
int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max)
{
int ret;
if (!clk)
return 0;
clk_prepare_lock();
ret = clk_set_rate_range_nolock(clk, min, max);
clk_prepare_unlock();
return ret;
}
EXPORT_SYMBOL_GPL(clk_set_rate_range);
/**
* clk_set_min_rate - set a minimum clock rate for a clock source
* @clk: clock source
* @rate: desired minimum clock rate in Hz, inclusive
*
* Returns success (0) or negative errno.
*/
int clk_set_min_rate(struct clk *clk, unsigned long rate)
{
if (!clk)
return 0;
trace_clk_set_min_rate(clk->core, rate);
return clk_set_rate_range(clk, rate, clk->max_rate);
}
EXPORT_SYMBOL_GPL(clk_set_min_rate);
/**
* clk_set_max_rate - set a maximum clock rate for a clock source
* @clk: clock source
* @rate: desired maximum clock rate in Hz, inclusive
*
* Returns success (0) or negative errno.
*/
int clk_set_max_rate(struct clk *clk, unsigned long rate)
{
if (!clk)
return 0;
trace_clk_set_max_rate(clk->core, rate);
return clk_set_rate_range(clk, clk->min_rate, rate);
}
EXPORT_SYMBOL_GPL(clk_set_max_rate);
/**
* clk_get_parent - return the parent of a clk
* @clk: the clk whose parent gets returned
*
* Simply returns clk->parent. Returns NULL if clk is NULL.
*/
struct clk *clk_get_parent(struct clk *clk)
{
struct clk *parent;
if (!clk)
return NULL;
clk_prepare_lock();
/* TODO: Create a per-user clk and change callers to call clk_put */
parent = !clk->core->parent ? NULL : clk->core->parent->hw->clk;
clk_prepare_unlock();
return parent;
}
EXPORT_SYMBOL_GPL(clk_get_parent);
static struct clk_core *__clk_init_parent(struct clk_core *core)
{
u8 index = 0;
if (core->num_parents > 1 && core->ops->get_parent)
index = core->ops->get_parent(core->hw);
return clk_core_get_parent_by_index(core, index);
}
static void clk_core_reparent(struct clk_core *core,
struct clk_core *new_parent)
{
clk_reparent(core, new_parent);
__clk_recalc_accuracies(core);
__clk_recalc_rates(core, true, POST_RATE_CHANGE);
}
void clk_hw_reparent(struct clk_hw *hw, struct clk_hw *new_parent)
{
if (!hw)
return;
clk_core_reparent(hw->core, !new_parent ? NULL : new_parent->core);
}
/**
* clk_has_parent - check if a clock is a possible parent for another
* @clk: clock source
* @parent: parent clock source
*
* This function can be used in drivers that need to check that a clock can be
* the parent of another without actually changing the parent.
*
* Returns true if @parent is a possible parent for @clk, false otherwise.
*/
bool clk_has_parent(const struct clk *clk, const struct clk *parent)
{
/* NULL clocks should be nops, so return success if either is NULL. */
if (!clk || !parent)
return true;
return clk_core_has_parent(clk->core, parent->core);
}
EXPORT_SYMBOL_GPL(clk_has_parent);
static int clk_core_set_parent_nolock(struct clk_core *core,
struct clk_core *parent)
{
int ret = 0;
int p_index = 0;
unsigned long p_rate = 0;
lockdep_assert_held(&prepare_lock);
if (!core)
return 0;
if (core->parent == parent)
return 0;
/* verify ops for multi-parent clks */
if (core->num_parents > 1 && !core->ops->set_parent)
return -EPERM;
/* check that we are allowed to re-parent if the clock is in use */
if ((core->flags & CLK_SET_PARENT_GATE) && core->prepare_count)
return -EBUSY;
if (clk_core_rate_is_protected(core))
return -EBUSY;
/* try finding the new parent index */
if (parent) {
p_index = clk_fetch_parent_index(core, parent);
if (p_index < 0) {
pr_debug("%s: clk %s can not be parent of clk %s\n",
__func__, parent->name, core->name);
return p_index;
}
p_rate = parent->rate;
}
ret = clk_pm_runtime_get(core);
if (ret)
return ret;
/* propagate PRE_RATE_CHANGE notifications */
ret = __clk_speculate_rates(core, p_rate);
/* abort if a driver objects */
if (ret & NOTIFY_STOP_MASK)
goto runtime_put;
/* do the re-parent */
ret = __clk_set_parent(core, parent, p_index);
/* propagate rate an accuracy recalculation accordingly */
if (ret) {
__clk_recalc_rates(core, true, ABORT_RATE_CHANGE);
} else {
__clk_recalc_rates(core, true, POST_RATE_CHANGE);
__clk_recalc_accuracies(core);
}
runtime_put:
clk_pm_runtime_put(core);
return ret;
}
int clk_hw_set_parent(struct clk_hw *hw, struct clk_hw *parent)
{
return clk_core_set_parent_nolock(hw->core, parent->core);
}
EXPORT_SYMBOL_GPL(clk_hw_set_parent);
/**
* clk_set_parent - switch the parent of a mux clk
* @clk: the mux clk whose input we are switching
* @parent: the new input to clk
*
* Re-parent clk to use parent as its new input source. If clk is in
* prepared state, the clk will get enabled for the duration of this call. If
* that's not acceptable for a specific clk (Eg: the consumer can't handle
* that, the reparenting is glitchy in hardware, etc), use the
* CLK_SET_PARENT_GATE flag to allow reparenting only when clk is unprepared.
*
* After successfully changing clk's parent clk_set_parent will update the
* clk topology, sysfs topology and propagate rate recalculation via
* __clk_recalc_rates.
*
* Returns 0 on success, -EERROR otherwise.
*/
int clk_set_parent(struct clk *clk, struct clk *parent)
{
int ret;
if (!clk)
return 0;
clk_prepare_lock();
if (clk->exclusive_count)
clk_core_rate_unprotect(clk->core);
ret = clk_core_set_parent_nolock(clk->core,
parent ? parent->core : NULL);
if (clk->exclusive_count)
clk_core_rate_protect(clk->core);
clk_prepare_unlock();
return ret;
}
EXPORT_SYMBOL_GPL(clk_set_parent);
static int clk_core_set_phase_nolock(struct clk_core *core, int degrees)
{
int ret = -EINVAL;
lockdep_assert_held(&prepare_lock);
if (!core)
return 0;
if (clk_core_rate_is_protected(core))
return -EBUSY;
trace_clk_set_phase(core, degrees);
if (core->ops->set_phase) {
ret = core->ops->set_phase(core->hw, degrees);
if (!ret)
core->phase = degrees;
}
trace_clk_set_phase_complete(core, degrees);
return ret;
}
/**
* clk_set_phase - adjust the phase shift of a clock signal
* @clk: clock signal source
* @degrees: number of degrees the signal is shifted
*
* Shifts the phase of a clock signal by the specified
* degrees. Returns 0 on success, -EERROR otherwise.
*
* This function makes no distinction about the input or reference
* signal that we adjust the clock signal phase against. For example
* phase locked-loop clock signal generators we may shift phase with
* respect to feedback clock signal input, but for other cases the
* clock phase may be shifted with respect to some other, unspecified
* signal.
*
* Additionally the concept of phase shift does not propagate through
* the clock tree hierarchy, which sets it apart from clock rates and
* clock accuracy. A parent clock phase attribute does not have an
* impact on the phase attribute of a child clock.
*/
int clk_set_phase(struct clk *clk, int degrees)
{
int ret;
if (!clk)
return 0;
/* sanity check degrees */
degrees %= 360;
if (degrees < 0)
degrees += 360;
clk_prepare_lock();
if (clk->exclusive_count)
clk_core_rate_unprotect(clk->core);
ret = clk_core_set_phase_nolock(clk->core, degrees);
if (clk->exclusive_count)
clk_core_rate_protect(clk->core);
clk_prepare_unlock();
return ret;
}
EXPORT_SYMBOL_GPL(clk_set_phase);
static int clk_core_get_phase(struct clk_core *core)
{
int ret;
lockdep_assert_held(&prepare_lock);
if (!core->ops->get_phase)
return 0;
/* Always try to update cached phase if possible */
ret = core->ops->get_phase(core->hw);
if (ret >= 0)
core->phase = ret;
return ret;
}
/**
* clk_get_phase - return the phase shift of a clock signal
* @clk: clock signal source
*
* Returns the phase shift of a clock node in degrees, otherwise returns
* -EERROR.
*/
int clk_get_phase(struct clk *clk)
{
int ret;
if (!clk)
return 0;
clk_prepare_lock();
ret = clk_core_get_phase(clk->core);
clk_prepare_unlock();
return ret;
}
EXPORT_SYMBOL_GPL(clk_get_phase);
static void clk_core_reset_duty_cycle_nolock(struct clk_core *core)
{
/* Assume a default value of 50% */
core->duty.num = 1;
core->duty.den = 2;
}
static int clk_core_update_duty_cycle_parent_nolock(struct clk_core *core);
static int clk_core_update_duty_cycle_nolock(struct clk_core *core)
{
struct clk_duty *duty = &core->duty;
int ret = 0;
if (!core->ops->get_duty_cycle)
return clk_core_update_duty_cycle_parent_nolock(core);
ret = core->ops->get_duty_cycle(core->hw, duty);
if (ret)
goto reset;
/* Don't trust the clock provider too much */
if (duty->den == 0 || duty->num > duty->den) {
ret = -EINVAL;
goto reset;
}
return 0;
reset:
clk_core_reset_duty_cycle_nolock(core);
return ret;
}
static int clk_core_update_duty_cycle_parent_nolock(struct clk_core *core)
{
int ret = 0;
if (core->parent &&
core->flags & CLK_DUTY_CYCLE_PARENT) {
ret = clk_core_update_duty_cycle_nolock(core->parent);
memcpy(&core->duty, &core->parent->duty, sizeof(core->duty));
} else {
clk_core_reset_duty_cycle_nolock(core);
}
return ret;
}
static int clk_core_set_duty_cycle_parent_nolock(struct clk_core *core,
struct clk_duty *duty);
static int clk_core_set_duty_cycle_nolock(struct clk_core *core,
struct clk_duty *duty)
{
int ret;
lockdep_assert_held(&prepare_lock);
if (clk_core_rate_is_protected(core))
return -EBUSY;
trace_clk_set_duty_cycle(core, duty);
if (!core->ops->set_duty_cycle)
return clk_core_set_duty_cycle_parent_nolock(core, duty);
ret = core->ops->set_duty_cycle(core->hw, duty);
if (!ret)
memcpy(&core->duty, duty, sizeof(*duty));
trace_clk_set_duty_cycle_complete(core, duty);
return ret;
}
static int clk_core_set_duty_cycle_parent_nolock(struct clk_core *core,
struct clk_duty *duty)
{
int ret = 0;
if (core->parent &&
core->flags & (CLK_DUTY_CYCLE_PARENT | CLK_SET_RATE_PARENT)) {
ret = clk_core_set_duty_cycle_nolock(core->parent, duty);
memcpy(&core->duty, &core->parent->duty, sizeof(core->duty));
}
return ret;
}
/**
* clk_set_duty_cycle - adjust the duty cycle ratio of a clock signal
* @clk: clock signal source
* @num: numerator of the duty cycle ratio to be applied
* @den: denominator of the duty cycle ratio to be applied
*
* Apply the duty cycle ratio if the ratio is valid and the clock can
* perform this operation
*
* Returns (0) on success, a negative errno otherwise.
*/
int clk_set_duty_cycle(struct clk *clk, unsigned int num, unsigned int den)
{
int ret;
struct clk_duty duty;
if (!clk)
return 0;
/* sanity check the ratio */
if (den == 0 || num > den)
return -EINVAL;
duty.num = num;
duty.den = den;
clk_prepare_lock();
if (clk->exclusive_count)
clk_core_rate_unprotect(clk->core);
ret = clk_core_set_duty_cycle_nolock(clk->core, &duty);
if (clk->exclusive_count)
clk_core_rate_protect(clk->core);
clk_prepare_unlock();
return ret;
}
EXPORT_SYMBOL_GPL(clk_set_duty_cycle);
static int clk_core_get_scaled_duty_cycle(struct clk_core *core,
unsigned int scale)
{
struct clk_duty *duty = &core->duty;
int ret;
clk_prepare_lock();
ret = clk_core_update_duty_cycle_nolock(core);
if (!ret)
ret = mult_frac(scale, duty->num, duty->den);
clk_prepare_unlock();
return ret;
}
/**
* clk_get_scaled_duty_cycle - return the duty cycle ratio of a clock signal
* @clk: clock signal source
* @scale: scaling factor to be applied to represent the ratio as an integer
*
* Returns the duty cycle ratio of a clock node multiplied by the provided
* scaling factor, or negative errno on error.
*/
int clk_get_scaled_duty_cycle(struct clk *clk, unsigned int scale)
{
if (!clk)
return 0;
return clk_core_get_scaled_duty_cycle(clk->core, scale);
}
EXPORT_SYMBOL_GPL(clk_get_scaled_duty_cycle);
/**
* clk_is_match - check if two clk's point to the same hardware clock
* @p: clk compared against q
* @q: clk compared against p
*
* Returns true if the two struct clk pointers both point to the same hardware
* clock node. Put differently, returns true if struct clk *p and struct clk *q
* share the same struct clk_core object.
*
* Returns false otherwise. Note that two NULL clks are treated as matching.
*/
bool clk_is_match(const struct clk *p, const struct clk *q)
{
/* trivial case: identical struct clk's or both NULL */
if (p == q)
return true;
/* true if clk->core pointers match. Avoid dereferencing garbage */
if (!IS_ERR_OR_NULL(p) && !IS_ERR_OR_NULL(q))
if (p->core == q->core)
return true;
return false;
}
EXPORT_SYMBOL_GPL(clk_is_match);
/*** debugfs support ***/
#ifdef CONFIG_DEBUG_FS
#include <linux/debugfs.h>
static struct dentry *rootdir;
static int inited = 0;
static DEFINE_MUTEX(clk_debug_lock);
static HLIST_HEAD(clk_debug_list);
static struct hlist_head *orphan_list[] = {
&clk_orphan_list,
NULL,
};
static void clk_summary_show_one(struct seq_file *s, struct clk_core *c,
int level)
{
int phase;
seq_printf(s, "%*s%-*s %7d %8d %8d %11lu %10lu ",
level * 3 + 1, "",
30 - level * 3, c->name,
c->enable_count, c->prepare_count, c->protect_count,
clk_core_get_rate_recalc(c),
clk_core_get_accuracy_recalc(c));
phase = clk_core_get_phase(c);
if (phase >= 0)
seq_printf(s, "%5d", phase);
else
seq_puts(s, "-----");
seq_printf(s, " %6d", clk_core_get_scaled_duty_cycle(c, 100000));
if (c->ops->is_enabled)
seq_printf(s, " %9c\n", clk_core_is_enabled(c) ? 'Y' : 'N');
else if (!c->ops->enable)
seq_printf(s, " %9c\n", 'Y');
else
seq_printf(s, " %9c\n", '?');
}
static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c,
int level)
{
struct clk_core *child;
clk_pm_runtime_get(c);
clk_summary_show_one(s, c, level);
clk_pm_runtime_put(c);
hlist_for_each_entry(child, &c->children, child_node)
clk_summary_show_subtree(s, child, level + 1);
}
static int clk_summary_show(struct seq_file *s, void *data)
{
struct clk_core *c;
struct hlist_head **lists = s->private;
seq_puts(s, " enable prepare protect duty hardware\n");
seq_puts(s, " clock count count count rate accuracy phase cycle enable\n");
seq_puts(s, "-------------------------------------------------------------------------------------------------------\n");
clk_prepare_lock();
for (; *lists; lists++)
hlist_for_each_entry(c, *lists, child_node)
clk_summary_show_subtree(s, c, 0);
clk_prepare_unlock();
return 0;
}
DEFINE_SHOW_ATTRIBUTE(clk_summary);
static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level)
{
int phase;
unsigned long min_rate, max_rate;
clk_core_get_boundaries(c, &min_rate, &max_rate);
/* This should be JSON format, i.e. elements separated with a comma */
seq_printf(s, "\"%s\": { ", c->name);
seq_printf(s, "\"enable_count\": %d,", c->enable_count);
seq_printf(s, "\"prepare_count\": %d,", c->prepare_count);
seq_printf(s, "\"protect_count\": %d,", c->protect_count);
seq_printf(s, "\"rate\": %lu,", clk_core_get_rate_recalc(c));
seq_printf(s, "\"min_rate\": %lu,", min_rate);
seq_printf(s, "\"max_rate\": %lu,", max_rate);
seq_printf(s, "\"accuracy\": %lu,", clk_core_get_accuracy_recalc(c));
phase = clk_core_get_phase(c);
if (phase >= 0)
seq_printf(s, "\"phase\": %d,", phase);
seq_printf(s, "\"duty_cycle\": %u",
clk_core_get_scaled_duty_cycle(c, 100000));
}
static void clk_dump_subtree(struct seq_file *s, struct clk_core *c, int level)
{
struct clk_core *child;
clk_dump_one(s, c, level);
hlist_for_each_entry(child, &c->children, child_node) {
seq_putc(s, ',');
clk_dump_subtree(s, child, level + 1);
}
seq_putc(s, '}');
}
static int clk_dump_show(struct seq_file *s, void *data)
{
struct clk_core *c;
bool first_node = true;
struct hlist_head **lists = s->private;
seq_putc(s, '{');
clk_prepare_lock();
for (; *lists; lists++) {
hlist_for_each_entry(c, *lists, child_node) {
if (!first_node)
seq_putc(s, ',');
first_node = false;
clk_dump_subtree(s, c, 0);
}
}
clk_prepare_unlock();
seq_puts(s, "}\n");
return 0;
}
DEFINE_SHOW_ATTRIBUTE(clk_dump);
#undef CLOCK_ALLOW_WRITE_DEBUGFS
#ifdef CLOCK_ALLOW_WRITE_DEBUGFS
/*
* This can be dangerous, therefore don't provide any real compile time
* configuration option for this feature.
* People who want to use this will need to modify the source code directly.
*/
static int clk_rate_set(void *data, u64 val)
{
struct clk_core *core = data;
int ret;
clk_prepare_lock();
ret = clk_core_set_rate_nolock(core, val);
clk_prepare_unlock();
return ret;
}
#define clk_rate_mode 0644
static int clk_prepare_enable_set(void *data, u64 val)
{
struct clk_core *core = data;
int ret = 0;
if (val)
ret = clk_prepare_enable(core->hw->clk);
else
clk_disable_unprepare(core->hw->clk);
return ret;
}
static int clk_prepare_enable_get(void *data, u64 *val)
{
struct clk_core *core = data;
*val = core->enable_count && core->prepare_count;
return 0;
}
DEFINE_DEBUGFS_ATTRIBUTE(clk_prepare_enable_fops, clk_prepare_enable_get,
clk_prepare_enable_set, "%llu\n");
#else
#define clk_rate_set NULL
#define clk_rate_mode 0444
#endif
static int clk_rate_get(void *data, u64 *val)
{
struct clk_core *core = data;
clk_prepare_lock();
*val = clk_core_get_rate_recalc(core);
clk_prepare_unlock();
return 0;
}
DEFINE_DEBUGFS_ATTRIBUTE(clk_rate_fops, clk_rate_get, clk_rate_set, "%llu\n");
static const struct {
unsigned long flag;
const char *name;
} clk_flags[] = {
#define ENTRY(f) { f, #f }
ENTRY(CLK_SET_RATE_GATE),
ENTRY(CLK_SET_PARENT_GATE),
ENTRY(CLK_SET_RATE_PARENT),
ENTRY(CLK_IGNORE_UNUSED),
ENTRY(CLK_GET_RATE_NOCACHE),
ENTRY(CLK_SET_RATE_NO_REPARENT),
ENTRY(CLK_GET_ACCURACY_NOCACHE),
ENTRY(CLK_RECALC_NEW_RATES),
ENTRY(CLK_SET_RATE_UNGATE),
ENTRY(CLK_IS_CRITICAL),
ENTRY(CLK_OPS_PARENT_ENABLE),
ENTRY(CLK_DUTY_CYCLE_PARENT),
#undef ENTRY
};
static int clk_flags_show(struct seq_file *s, void *data)
{
struct clk_core *core = s->private;
unsigned long flags = core->flags;
unsigned int i;
for (i = 0; flags && i < ARRAY_SIZE(clk_flags); i++) {
if (flags & clk_flags[i].flag) {
seq_printf(s, "%s\n", clk_flags[i].name);
flags &= ~clk_flags[i].flag;
}
}
if (flags) {
/* Unknown flags */
seq_printf(s, "0x%lx\n", flags);
}
return 0;
}
DEFINE_SHOW_ATTRIBUTE(clk_flags);
static void possible_parent_show(struct seq_file *s, struct clk_core *core,
unsigned int i, char terminator)
{
struct clk_core *parent;
/*
* Go through the following options to fetch a parent's name.
*
* 1. Fetch the registered parent clock and use its name
* 2. Use the global (fallback) name if specified
* 3. Use the local fw_name if provided
* 4. Fetch parent clock's clock-output-name if DT index was set
*
* This may still fail in some cases, such as when the parent is
* specified directly via a struct clk_hw pointer, but it isn't
* registered (yet).
*/
parent = clk_core_get_parent_by_index(core, i);
if (parent)
seq_puts(s, parent->name);
else if (core->parents[i].name)
seq_puts(s, core->parents[i].name);
else if (core->parents[i].fw_name)
seq_printf(s, "<%s>(fw)", core->parents[i].fw_name);
else if (core->parents[i].index >= 0)
seq_puts(s,
of_clk_get_parent_name(core->of_node,
core->parents[i].index));
else
seq_puts(s, "(missing)");
seq_putc(s, terminator);
}
static int possible_parents_show(struct seq_file *s, void *data)
{
struct clk_core *core = s->private;
int i;
for (i = 0; i < core->num_parents - 1; i++)
possible_parent_show(s, core, i, ' ');
possible_parent_show(s, core, i, '\n');
return 0;
}
DEFINE_SHOW_ATTRIBUTE(possible_parents);
static int current_parent_show(struct seq_file *s, void *data)
{
struct clk_core *core = s->private;
if (core->parent)
seq_printf(s, "%s\n", core->parent->name);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(current_parent);
#ifdef CLOCK_ALLOW_WRITE_DEBUGFS
static ssize_t current_parent_write(struct file *file, const char __user *ubuf,
size_t count, loff_t *ppos)
{
struct seq_file *s = file->private_data;
struct clk_core *core = s->private;
struct clk_core *parent;
u8 idx;
int err;
err = kstrtou8_from_user(ubuf, count, 0, &idx);
if (err < 0)
return err;
parent = clk_core_get_parent_by_index(core, idx);
if (!parent)
return -ENOENT;
clk_prepare_lock();
err = clk_core_set_parent_nolock(core, parent);
clk_prepare_unlock();
if (err)
return err;
return count;
}
static const struct file_operations current_parent_rw_fops = {
.open = current_parent_open,
.write = current_parent_write,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
#endif
static int clk_duty_cycle_show(struct seq_file *s, void *data)
{
struct clk_core *core = s->private;
struct clk_duty *duty = &core->duty;
seq_printf(s, "%u/%u\n", duty->num, duty->den);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(clk_duty_cycle);
static int clk_min_rate_show(struct seq_file *s, void *data)
{
struct clk_core *core = s->private;
unsigned long min_rate, max_rate;
clk_prepare_lock();
clk_core_get_boundaries(core, &min_rate, &max_rate);
clk_prepare_unlock();
seq_printf(s, "%lu\n", min_rate);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(clk_min_rate);
static int clk_max_rate_show(struct seq_file *s, void *data)
{
struct clk_core *core = s->private;
unsigned long min_rate, max_rate;
clk_prepare_lock();
clk_core_get_boundaries(core, &min_rate, &max_rate);
clk_prepare_unlock();
seq_printf(s, "%lu\n", max_rate);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(clk_max_rate);
static void clk_debug_create_one(struct clk_core *core, struct dentry *pdentry)
{
struct dentry *root;
if (!core || !pdentry)
return;
root = debugfs_create_dir(core->name, pdentry);
core->dentry = root;
debugfs_create_file("clk_rate", clk_rate_mode, root, core,
&clk_rate_fops);
debugfs_create_file("clk_min_rate", 0444, root, core, &clk_min_rate_fops);
debugfs_create_file("clk_max_rate", 0444, root, core, &clk_max_rate_fops);
debugfs_create_ulong("clk_accuracy", 0444, root, &core->accuracy);
debugfs_create_u32("clk_phase", 0444, root, &core->phase);
debugfs_create_file("clk_flags", 0444, root, core, &clk_flags_fops);
debugfs_create_u32("clk_prepare_count", 0444, root, &core->prepare_count);
debugfs_create_u32("clk_enable_count", 0444, root, &core->enable_count);
debugfs_create_u32("clk_protect_count", 0444, root, &core->protect_count);
debugfs_create_u32("clk_notifier_count", 0444, root, &core->notifier_count);
debugfs_create_file("clk_duty_cycle", 0444, root, core,
&clk_duty_cycle_fops);
#ifdef CLOCK_ALLOW_WRITE_DEBUGFS
debugfs_create_file("clk_prepare_enable", 0644, root, core,
&clk_prepare_enable_fops);
if (core->num_parents > 1)
debugfs_create_file("clk_parent", 0644, root, core,
¤t_parent_rw_fops);
else
#endif
if (core->num_parents > 0)
debugfs_create_file("clk_parent", 0444, root, core,
¤t_parent_fops);
if (core->num_parents > 1)
debugfs_create_file("clk_possible_parents", 0444, root, core,
&possible_parents_fops);
if (core->ops->debug_init)
core->ops->debug_init(core->hw, core->dentry);
}
/**
* clk_debug_register - add a clk node to the debugfs clk directory
* @core: the clk being added to the debugfs clk directory
*
* Dynamically adds a clk to the debugfs clk directory if debugfs has been
* initialized. Otherwise it bails out early since the debugfs clk directory
* will be created lazily by clk_debug_init as part of a late_initcall.
*/
static void clk_debug_register(struct clk_core *core)
{
mutex_lock(&clk_debug_lock);
hlist_add_head(&core->debug_node, &clk_debug_list);
if (inited)
clk_debug_create_one(core, rootdir);
mutex_unlock(&clk_debug_lock);
}
/**
* clk_debug_unregister - remove a clk node from the debugfs clk directory
* @core: the clk being removed from the debugfs clk directory
*
* Dynamically removes a clk and all its child nodes from the
* debugfs clk directory if clk->dentry points to debugfs created by
* clk_debug_register in __clk_core_init.
*/
static void clk_debug_unregister(struct clk_core *core)
{
mutex_lock(&clk_debug_lock);
hlist_del_init(&core->debug_node);
debugfs_remove_recursive(core->dentry);
core->dentry = NULL;
mutex_unlock(&clk_debug_lock);
}
/**
* clk_debug_init - lazily populate the debugfs clk directory
*
* clks are often initialized very early during boot before memory can be
* dynamically allocated and well before debugfs is setup. This function
* populates the debugfs clk directory once at boot-time when we know that
* debugfs is setup. It should only be called once at boot-time, all other clks
* added dynamically will be done so with clk_debug_register.
*/
static int __init clk_debug_init(void)
{
struct clk_core *core;
#ifdef CLOCK_ALLOW_WRITE_DEBUGFS
pr_warn("\n");
pr_warn("********************************************************************\n");
pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
pr_warn("** **\n");
pr_warn("** WRITEABLE clk DebugFS SUPPORT HAS BEEN ENABLED IN THIS KERNEL **\n");
pr_warn("** **\n");
pr_warn("** This means that this kernel is built to expose clk operations **\n");
pr_warn("** such as parent or rate setting, enabling, disabling, etc. **\n");
pr_warn("** to userspace, which may compromise security on your system. **\n");
pr_warn("** **\n");
pr_warn("** If you see this message and you are not debugging the **\n");
pr_warn("** kernel, report this immediately to your vendor! **\n");
pr_warn("** **\n");
pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
pr_warn("********************************************************************\n");
#endif
rootdir = debugfs_create_dir("clk", NULL);
debugfs_create_file("clk_summary", 0444, rootdir, &all_lists,
&clk_summary_fops);
debugfs_create_file("clk_dump", 0444, rootdir, &all_lists,
&clk_dump_fops);
debugfs_create_file("clk_orphan_summary", 0444, rootdir, &orphan_list,
&clk_summary_fops);
debugfs_create_file("clk_orphan_dump", 0444, rootdir, &orphan_list,
&clk_dump_fops);
mutex_lock(&clk_debug_lock);
hlist_for_each_entry(core, &clk_debug_list, debug_node)
clk_debug_create_one(core, rootdir);
inited = 1;
mutex_unlock(&clk_debug_lock);
return 0;
}
late_initcall(clk_debug_init);
#else
static inline void clk_debug_register(struct clk_core *core) { }
static inline void clk_debug_unregister(struct clk_core *core)
{
}
#endif
static void clk_core_reparent_orphans_nolock(void)
{
struct clk_core *orphan;
struct hlist_node *tmp2;
/*
* walk the list of orphan clocks and reparent any that newly finds a
* parent.
*/
hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) {
struct clk_core *parent = __clk_init_parent(orphan);
/*
* We need to use __clk_set_parent_before() and _after() to
* properly migrate any prepare/enable count of the orphan
* clock. This is important for CLK_IS_CRITICAL clocks, which
* are enabled during init but might not have a parent yet.
*/
if (parent) {
/* update the clk tree topology */
__clk_set_parent_before(orphan, parent);
__clk_set_parent_after(orphan, parent, NULL);
__clk_recalc_accuracies(orphan);
__clk_recalc_rates(orphan, true, 0);
/*
* __clk_init_parent() will set the initial req_rate to
* 0 if the clock doesn't have clk_ops::recalc_rate and
* is an orphan when it's registered.
*
* 'req_rate' is used by clk_set_rate_range() and
* clk_put() to trigger a clk_set_rate() call whenever
* the boundaries are modified. Let's make sure
* 'req_rate' is set to something non-zero so that
* clk_set_rate_range() doesn't drop the frequency.
*/
orphan->req_rate = orphan->rate;
}
}
}
/**
* __clk_core_init - initialize the data structures in a struct clk_core
* @core: clk_core being initialized
*
* Initializes the lists in struct clk_core, queries the hardware for the
* parent and rate and sets them both.
*/
static int __clk_core_init(struct clk_core *core)
{
int ret;
struct clk_core *parent;
unsigned long rate;
int phase;
clk_prepare_lock();
/*
* Set hw->core after grabbing the prepare_lock to synchronize with
* callers of clk_core_fill_parent_index() where we treat hw->core
* being NULL as the clk not being registered yet. This is crucial so
* that clks aren't parented until their parent is fully registered.
*/
core->hw->core = core;
ret = clk_pm_runtime_get(core);
if (ret)
goto unlock;
/* check to see if a clock with this name is already registered */
if (clk_core_lookup(core->name)) {
pr_debug("%s: clk %s already initialized\n",
__func__, core->name);
ret = -EEXIST;
goto out;
}
/* check that clk_ops are sane. See Documentation/driver-api/clk.rst */
if (core->ops->set_rate &&
!((core->ops->round_rate || core->ops->determine_rate) &&
core->ops->recalc_rate)) {
pr_err("%s: %s must implement .round_rate or .determine_rate in addition to .recalc_rate\n",
__func__, core->name);
ret = -EINVAL;
goto out;
}
if (core->ops->set_parent && !core->ops->get_parent) {
pr_err("%s: %s must implement .get_parent & .set_parent\n",
__func__, core->name);
ret = -EINVAL;
goto out;
}
if (core->ops->set_parent && !core->ops->determine_rate) {
pr_err("%s: %s must implement .set_parent & .determine_rate\n",
__func__, core->name);
ret = -EINVAL;
goto out;
}
if (core->num_parents > 1 && !core->ops->get_parent) {
pr_err("%s: %s must implement .get_parent as it has multi parents\n",
__func__, core->name);
ret = -EINVAL;
goto out;
}
if (core->ops->set_rate_and_parent &&
!(core->ops->set_parent && core->ops->set_rate)) {
pr_err("%s: %s must implement .set_parent & .set_rate\n",
__func__, core->name);
ret = -EINVAL;
goto out;
}
/*
* optional platform-specific magic
*
* The .init callback is not used by any of the basic clock types, but
* exists for weird hardware that must perform initialization magic for
* CCF to get an accurate view of clock for any other callbacks. It may
* also be used needs to perform dynamic allocations. Such allocation
* must be freed in the terminate() callback.
* This callback shall not be used to initialize the parameters state,
* such as rate, parent, etc ...
*
* If it exist, this callback should called before any other callback of
* the clock
*/
if (core->ops->init) {
ret = core->ops->init(core->hw);
if (ret)
goto out;
}
parent = core->parent = __clk_init_parent(core);
/*
* Populate core->parent if parent has already been clk_core_init'd. If
* parent has not yet been clk_core_init'd then place clk in the orphan
* list. If clk doesn't have any parents then place it in the root
* clk list.
*
* Every time a new clk is clk_init'd then we walk the list of orphan
* clocks and re-parent any that are children of the clock currently
* being clk_init'd.
*/
if (parent) {
hlist_add_head(&core->child_node, &parent->children);
core->orphan = parent->orphan;
} else if (!core->num_parents) {
hlist_add_head(&core->child_node, &clk_root_list);
core->orphan = false;
} else {
hlist_add_head(&core->child_node, &clk_orphan_list);
core->orphan = true;
}
/*
* Set clk's accuracy. The preferred method is to use
* .recalc_accuracy. For simple clocks and lazy developers the default
* fallback is to use the parent's accuracy. If a clock doesn't have a
* parent (or is orphaned) then accuracy is set to zero (perfect
* clock).
*/
if (core->ops->recalc_accuracy)
core->accuracy = core->ops->recalc_accuracy(core->hw,
clk_core_get_accuracy_no_lock(parent));
else if (parent)
core->accuracy = parent->accuracy;
else
core->accuracy = 0;
/*
* Set clk's phase by clk_core_get_phase() caching the phase.
* Since a phase is by definition relative to its parent, just
* query the current clock phase, or just assume it's in phase.
*/
phase = clk_core_get_phase(core);
if (phase < 0) {
ret = phase;
pr_warn("%s: Failed to get phase for clk '%s'\n", __func__,
core->name);
goto out;
}
/*
* Set clk's duty cycle.
*/
clk_core_update_duty_cycle_nolock(core);
/*
* Set clk's rate. The preferred method is to use .recalc_rate. For
* simple clocks and lazy developers the default fallback is to use the
* parent's rate. If a clock doesn't have a parent (or is orphaned)
* then rate is set to zero.
*/
if (core->ops->recalc_rate)
rate = core->ops->recalc_rate(core->hw,
clk_core_get_rate_nolock(parent));
else if (parent)
rate = parent->rate;
else
rate = 0;
core->rate = core->req_rate = rate;
/*
* Enable CLK_IS_CRITICAL clocks so newly added critical clocks
* don't get accidentally disabled when walking the orphan tree and
* reparenting clocks
*/
if (core->flags & CLK_IS_CRITICAL) {
ret = clk_core_prepare(core);
if (ret) {
pr_warn("%s: critical clk '%s' failed to prepare\n",
__func__, core->name);
goto out;
}
ret = clk_core_enable_lock(core);
if (ret) {
pr_warn("%s: critical clk '%s' failed to enable\n",
__func__, core->name);
clk_core_unprepare(core);
goto out;
}
}
clk_core_reparent_orphans_nolock();
kref_init(&core->ref);
out:
clk_pm_runtime_put(core);
unlock:
if (ret) {
hlist_del_init(&core->child_node);
core->hw->core = NULL;
}
clk_prepare_unlock();
if (!ret)
clk_debug_register(core);
return ret;
}
/**
* clk_core_link_consumer - Add a clk consumer to the list of consumers in a clk_core
* @core: clk to add consumer to
* @clk: consumer to link to a clk
*/
static void clk_core_link_consumer(struct clk_core *core, struct clk *clk)
{
clk_prepare_lock();
hlist_add_head(&clk->clks_node, &core->clks);
clk_prepare_unlock();
}
/**
* clk_core_unlink_consumer - Remove a clk consumer from the list of consumers in a clk_core
* @clk: consumer to unlink
*/
static void clk_core_unlink_consumer(struct clk *clk)
{
lockdep_assert_held(&prepare_lock);
hlist_del(&clk->clks_node);
}
/**
* alloc_clk - Allocate a clk consumer, but leave it unlinked to the clk_core
* @core: clk to allocate a consumer for
* @dev_id: string describing device name
* @con_id: connection ID string on device
*
* Returns: clk consumer left unlinked from the consumer list
*/
static struct clk *alloc_clk(struct clk_core *core, const char *dev_id,
const char *con_id)
{
struct clk *clk;
clk = kzalloc(sizeof(*clk), GFP_KERNEL);
if (!clk)
return ERR_PTR(-ENOMEM);
clk->core = core;
clk->dev_id = dev_id;
clk->con_id = kstrdup_const(con_id, GFP_KERNEL);
clk->max_rate = ULONG_MAX;
return clk;
}
/**
* free_clk - Free a clk consumer
* @clk: clk consumer to free
*
* Note, this assumes the clk has been unlinked from the clk_core consumer
* list.
*/
static void free_clk(struct clk *clk)
{
kfree_const(clk->con_id);
kfree(clk);
}
/**
* clk_hw_create_clk: Allocate and link a clk consumer to a clk_core given
* a clk_hw
* @dev: clk consumer device
* @hw: clk_hw associated with the clk being consumed
* @dev_id: string describing device name
* @con_id: connection ID string on device
*
* This is the main function used to create a clk pointer for use by clk
* consumers. It connects a consumer to the clk_core and clk_hw structures
* used by the framework and clk provider respectively.
*/
struct clk *clk_hw_create_clk(struct device *dev, struct clk_hw *hw,
const char *dev_id, const char *con_id)
{
struct clk *clk;
struct clk_core *core;
/* This is to allow this function to be chained to others */
if (IS_ERR_OR_NULL(hw))
return ERR_CAST(hw);
core = hw->core;
clk = alloc_clk(core, dev_id, con_id);
if (IS_ERR(clk))
return clk;
clk->dev = dev;
if (!try_module_get(core->owner)) {
free_clk(clk);
return ERR_PTR(-ENOENT);
}
kref_get(&core->ref);
clk_core_link_consumer(core, clk);
return clk;
}
/**
* clk_hw_get_clk - get clk consumer given an clk_hw
* @hw: clk_hw associated with the clk being consumed
* @con_id: connection ID string on device
*
* Returns: new clk consumer
* This is the function to be used by providers which need
* to get a consumer clk and act on the clock element
* Calls to this function must be balanced with calls clk_put()
*/
struct clk *clk_hw_get_clk(struct clk_hw *hw, const char *con_id)
{
struct device *dev = hw->core->dev;
const char *name = dev ? dev_name(dev) : NULL;
return clk_hw_create_clk(dev, hw, name, con_id);
}
EXPORT_SYMBOL(clk_hw_get_clk);
static int clk_cpy_name(const char **dst_p, const char *src, bool must_exist)
{
const char *dst;
if (!src) {
if (must_exist)
return -EINVAL;
return 0;
}
*dst_p = dst = kstrdup_const(src, GFP_KERNEL);
if (!dst)
return -ENOMEM;
return 0;
}
static int clk_core_populate_parent_map(struct clk_core *core,
const struct clk_init_data *init)
{
u8 num_parents = init->num_parents;
const char * const *parent_names = init->parent_names;
const struct clk_hw **parent_hws = init->parent_hws;
const struct clk_parent_data *parent_data = init->parent_data;
int i, ret = 0;
struct clk_parent_map *parents, *parent;
if (!num_parents)
return 0;
/*
* Avoid unnecessary string look-ups of clk_core's possible parents by
* having a cache of names/clk_hw pointers to clk_core pointers.
*/
parents = kcalloc(num_parents, sizeof(*parents), GFP_KERNEL);
core->parents = parents;
if (!parents)
return -ENOMEM;
/* Copy everything over because it might be __initdata */
for (i = 0, parent = parents; i < num_parents; i++, parent++) {
parent->index = -1;
if (parent_names) {
/* throw a WARN if any entries are NULL */
WARN(!parent_names[i],
"%s: invalid NULL in %s's .parent_names\n",
__func__, core->name);
ret = clk_cpy_name(&parent->name, parent_names[i],
true);
} else if (parent_data) {
parent->hw = parent_data[i].hw;
parent->index = parent_data[i].index;
ret = clk_cpy_name(&parent->fw_name,
parent_data[i].fw_name, false);
if (!ret)
ret = clk_cpy_name(&parent->name,
parent_data[i].name,
false);
} else if (parent_hws) {
parent->hw = parent_hws[i];
} else {
ret = -EINVAL;
WARN(1, "Must specify parents if num_parents > 0\n");
}
if (ret) {
do {
kfree_const(parents[i].name);
kfree_const(parents[i].fw_name);
} while (--i >= 0);
kfree(parents);
return ret;
}
}
return 0;
}
static void clk_core_free_parent_map(struct clk_core *core)
{
int i = core->num_parents;
if (!core->num_parents)
return;
while (--i >= 0) {
kfree_const(core->parents[i].name);
kfree_const(core->parents[i].fw_name);
}
kfree(core->parents);
}
static struct clk *
__clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
{
int ret;
struct clk_core *core;
const struct clk_init_data *init = hw->init;
/*
* The init data is not supposed to be used outside of registration path.
* Set it to NULL so that provider drivers can't use it either and so that
* we catch use of hw->init early on in the core.
*/
hw->init = NULL;
core = kzalloc(sizeof(*core), GFP_KERNEL);
if (!core) {
ret = -ENOMEM;
goto fail_out;
}
core->name = kstrdup_const(init->name, GFP_KERNEL);
if (!core->name) {
ret = -ENOMEM;
goto fail_name;
}
if (WARN_ON(!init->ops)) {
ret = -EINVAL;
goto fail_ops;
}
core->ops = init->ops;
if (dev && pm_runtime_enabled(dev))
core->rpm_enabled = true;
core->dev = dev;
core->of_node = np;
if (dev && dev->driver)
core->owner = dev->driver->owner;
core->hw = hw;
core->flags = init->flags;
core->num_parents = init->num_parents;
core->min_rate = 0;
core->max_rate = ULONG_MAX;
ret = clk_core_populate_parent_map(core, init);
if (ret)
goto fail_parents;
INIT_HLIST_HEAD(&core->clks);
/*
* Don't call clk_hw_create_clk() here because that would pin the
* provider module to itself and prevent it from ever being removed.
*/
hw->clk = alloc_clk(core, NULL, NULL);
if (IS_ERR(hw->clk)) {
ret = PTR_ERR(hw->clk);
goto fail_create_clk;
}
clk_core_link_consumer(core, hw->clk);
ret = __clk_core_init(core);
if (!ret)
return hw->clk;
clk_prepare_lock();
clk_core_unlink_consumer(hw->clk);
clk_prepare_unlock();
free_clk(hw->clk);
hw->clk = NULL;
fail_create_clk:
clk_core_free_parent_map(core);
fail_parents:
fail_ops:
kfree_const(core->name);
fail_name:
kfree(core);
fail_out:
return ERR_PTR(ret);
}
/**
* dev_or_parent_of_node() - Get device node of @dev or @dev's parent
* @dev: Device to get device node of
*
* Return: device node pointer of @dev, or the device node pointer of
* @dev->parent if dev doesn't have a device node, or NULL if neither
* @dev or @dev->parent have a device node.
*/
static struct device_node *dev_or_parent_of_node(struct device *dev)
{
struct device_node *np;
if (!dev)
return NULL;
np = dev_of_node(dev);
if (!np)
np = dev_of_node(dev->parent);
return np;
}
/**
* clk_register - allocate a new clock, register it and return an opaque cookie
* @dev: device that is registering this clock
* @hw: link to hardware-specific clock data
*
* clk_register is the *deprecated* interface for populating the clock tree with
* new clock nodes. Use clk_hw_register() instead.
*
* Returns: a pointer to the newly allocated struct clk which
* cannot be dereferenced by driver code but may be used in conjunction with the
* rest of the clock API. In the event of an error clk_register will return an
* error code; drivers must test for an error code after calling clk_register.
*/
struct clk *clk_register(struct device *dev, struct clk_hw *hw)
{
return __clk_register(dev, dev_or_parent_of_node(dev), hw);
}
EXPORT_SYMBOL_GPL(clk_register);
/**
* clk_hw_register - register a clk_hw and return an error code
* @dev: device that is registering this clock
* @hw: link to hardware-specific clock data
*
* clk_hw_register is the primary interface for populating the clock tree with
* new clock nodes. It returns an integer equal to zero indicating success or
* less than zero indicating failure. Drivers must test for an error code after
* calling clk_hw_register().
*/
int clk_hw_register(struct device *dev, struct clk_hw *hw)
{
return PTR_ERR_OR_ZERO(__clk_register(dev, dev_or_parent_of_node(dev),
hw));
}
EXPORT_SYMBOL_GPL(clk_hw_register);
/*
* of_clk_hw_register - register a clk_hw and return an error code
* @node: device_node of device that is registering this clock
* @hw: link to hardware-specific clock data
*
* of_clk_hw_register() is the primary interface for populating the clock tree
* with new clock nodes when a struct device is not available, but a struct
* device_node is. It returns an integer equal to zero indicating success or
* less than zero indicating failure. Drivers must test for an error code after
* calling of_clk_hw_register().
*/
int of_clk_hw_register(struct device_node *node, struct clk_hw *hw)
{
return PTR_ERR_OR_ZERO(__clk_register(NULL, node, hw));
}
EXPORT_SYMBOL_GPL(of_clk_hw_register);
/* Free memory allocated for a clock. */
static void __clk_release(struct kref *ref)
{
struct clk_core *core = container_of(ref, struct clk_core, ref);
lockdep_assert_held(&prepare_lock);
clk_core_free_parent_map(core);
kfree_const(core->name);
kfree(core);
}
/*
* Empty clk_ops for unregistered clocks. These are used temporarily
* after clk_unregister() was called on a clock and until last clock
* consumer calls clk_put() and the struct clk object is freed.
*/
static int clk_nodrv_prepare_enable(struct clk_hw *hw)
{
return -ENXIO;
}
static void clk_nodrv_disable_unprepare(struct clk_hw *hw)
{
WARN_ON_ONCE(1);
}
static int clk_nodrv_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
return -ENXIO;
}
static int clk_nodrv_set_parent(struct clk_hw *hw, u8 index)
{
return -ENXIO;
}
static int clk_nodrv_determine_rate(struct clk_hw *hw,
struct clk_rate_request *req)
{
return -ENXIO;
}
static const struct clk_ops clk_nodrv_ops = {
.enable = clk_nodrv_prepare_enable,
.disable = clk_nodrv_disable_unprepare,
.prepare = clk_nodrv_prepare_enable,
.unprepare = clk_nodrv_disable_unprepare,
.determine_rate = clk_nodrv_determine_rate,
.set_rate = clk_nodrv_set_rate,
.set_parent = clk_nodrv_set_parent,
};
static void clk_core_evict_parent_cache_subtree(struct clk_core *root,
const struct clk_core *target)
{
int i;
struct clk_core *child;
for (i = 0; i < root->num_parents; i++)
if (root->parents[i].core == target)
root->parents[i].core = NULL;
hlist_for_each_entry(child, &root->children, child_node)
clk_core_evict_parent_cache_subtree(child, target);
}
/* Remove this clk from all parent caches */
static void clk_core_evict_parent_cache(struct clk_core *core)
{
const struct hlist_head **lists;
struct clk_core *root;
lockdep_assert_held(&prepare_lock);
for (lists = all_lists; *lists; lists++)
hlist_for_each_entry(root, *lists, child_node)
clk_core_evict_parent_cache_subtree(root, core);
}
/**
* clk_unregister - unregister a currently registered clock
* @clk: clock to unregister
*/
void clk_unregister(struct clk *clk)
{
unsigned long flags;
const struct clk_ops *ops;
if (!clk || WARN_ON_ONCE(IS_ERR(clk)))
return;
clk_debug_unregister(clk->core);
clk_prepare_lock();
ops = clk->core->ops;
if (ops == &clk_nodrv_ops) {
pr_err("%s: unregistered clock: %s\n", __func__,
clk->core->name);
goto unlock;
}
/*
* Assign empty clock ops for consumers that might still hold
* a reference to this clock.
*/
flags = clk_enable_lock();
clk->core->ops = &clk_nodrv_ops;
clk_enable_unlock(flags);
if (ops->terminate)
ops->terminate(clk->core->hw);
if (!hlist_empty(&clk->core->children)) {
struct clk_core *child;
struct hlist_node *t;
/* Reparent all children to the orphan list. */
hlist_for_each_entry_safe(child, t, &clk->core->children,
child_node)
clk_core_set_parent_nolock(child, NULL);
}
clk_core_evict_parent_cache(clk->core);
hlist_del_init(&clk->core->child_node);
if (clk->core->prepare_count)
pr_warn("%s: unregistering prepared clock: %s\n",
__func__, clk->core->name);
if (clk->core->protect_count)
pr_warn("%s: unregistering protected clock: %s\n",
__func__, clk->core->name);
kref_put(&clk->core->ref, __clk_release);
free_clk(clk);
unlock:
clk_prepare_unlock();
}
EXPORT_SYMBOL_GPL(clk_unregister);
/**
* clk_hw_unregister - unregister a currently registered clk_hw
* @hw: hardware-specific clock data to unregister
*/
void clk_hw_unregister(struct clk_hw *hw)
{
clk_unregister(hw->clk);
}
EXPORT_SYMBOL_GPL(clk_hw_unregister);
static void devm_clk_unregister_cb(struct device *dev, void *res)
{
clk_unregister(*(struct clk **)res);
}
static void devm_clk_hw_unregister_cb(struct device *dev, void *res)
{
clk_hw_unregister(*(struct clk_hw **)res);
}
/**
* devm_clk_register - resource managed clk_register()
* @dev: device that is registering this clock
* @hw: link to hardware-specific clock data
*
* Managed clk_register(). This function is *deprecated*, use devm_clk_hw_register() instead.
*
* Clocks returned from this function are automatically clk_unregister()ed on
* driver detach. See clk_register() for more information.
*/
struct clk *devm_clk_register(struct device *dev, struct clk_hw *hw)
{
struct clk *clk;
struct clk **clkp;
clkp = devres_alloc(devm_clk_unregister_cb, sizeof(*clkp), GFP_KERNEL);
if (!clkp)
return ERR_PTR(-ENOMEM);
clk = clk_register(dev, hw);
if (!IS_ERR(clk)) {
*clkp = clk;
devres_add(dev, clkp);
} else {
devres_free(clkp);
}
return clk;
}
EXPORT_SYMBOL_GPL(devm_clk_register);
/**
* devm_clk_hw_register - resource managed clk_hw_register()
* @dev: device that is registering this clock
* @hw: link to hardware-specific clock data
*
* Managed clk_hw_register(). Clocks registered by this function are
* automatically clk_hw_unregister()ed on driver detach. See clk_hw_register()
* for more information.
*/
int devm_clk_hw_register(struct device *dev, struct clk_hw *hw)
{
struct clk_hw **hwp;
int ret;
hwp = devres_alloc(devm_clk_hw_unregister_cb, sizeof(*hwp), GFP_KERNEL);
if (!hwp)
return -ENOMEM;
ret = clk_hw_register(dev, hw);
if (!ret) {
*hwp = hw;
devres_add(dev, hwp);
} else {
devres_free(hwp);
}
return ret;
}
EXPORT_SYMBOL_GPL(devm_clk_hw_register);
static void devm_clk_release(struct device *dev, void *res)
{
clk_put(*(struct clk **)res);
}
/**
* devm_clk_hw_get_clk - resource managed clk_hw_get_clk()
* @dev: device that is registering this clock
* @hw: clk_hw associated with the clk being consumed
* @con_id: connection ID string on device
*
* Managed clk_hw_get_clk(). Clocks got with this function are
* automatically clk_put() on driver detach. See clk_put()
* for more information.
*/
struct clk *devm_clk_hw_get_clk(struct device *dev, struct clk_hw *hw,
const char *con_id)
{
struct clk *clk;
struct clk **clkp;
/* This should not happen because it would mean we have drivers
* passing around clk_hw pointers instead of having the caller use
* proper clk_get() style APIs
*/
WARN_ON_ONCE(dev != hw->core->dev);
clkp = devres_alloc(devm_clk_release, sizeof(*clkp), GFP_KERNEL);
if (!clkp)
return ERR_PTR(-ENOMEM);
clk = clk_hw_get_clk(hw, con_id);
if (!IS_ERR(clk)) {
*clkp = clk;
devres_add(dev, clkp);
} else {
devres_free(clkp);
}
return clk;
}
EXPORT_SYMBOL_GPL(devm_clk_hw_get_clk);
/*
* clkdev helpers
*/
void __clk_put(struct clk *clk)
{
struct module *owner;
if (!clk || WARN_ON_ONCE(IS_ERR(clk)))
return;
clk_prepare_lock();
/*
* Before calling clk_put, all calls to clk_rate_exclusive_get() from a
* given user should be balanced with calls to clk_rate_exclusive_put()
* and by that same consumer
*/
if (WARN_ON(clk->exclusive_count)) {
/* We voiced our concern, let's sanitize the situation */
clk->core->protect_count -= (clk->exclusive_count - 1);
clk_core_rate_unprotect(clk->core);
clk->exclusive_count = 0;
}
hlist_del(&clk->clks_node);
/* If we had any boundaries on that clock, let's drop them. */
if (clk->min_rate > 0 || clk->max_rate < ULONG_MAX)
clk_set_rate_range_nolock(clk, 0, ULONG_MAX);
owner = clk->core->owner;
kref_put(&clk->core->ref, __clk_release);
clk_prepare_unlock();
module_put(owner);
free_clk(clk);
}
/*** clk rate change notifiers ***/
/**
* clk_notifier_register - add a clk rate change notifier
* @clk: struct clk * to watch
* @nb: struct notifier_block * with callback info
*
* Request notification when clk's rate changes. This uses an SRCU
* notifier because we want it to block and notifier unregistrations are
* uncommon. The callbacks associated with the notifier must not
* re-enter into the clk framework by calling any top-level clk APIs;
* this will cause a nested prepare_lock mutex.
*
* In all notification cases (pre, post and abort rate change) the original
* clock rate is passed to the callback via struct clk_notifier_data.old_rate
* and the new frequency is passed via struct clk_notifier_data.new_rate.
*
* clk_notifier_register() must be called from non-atomic context.
* Returns -EINVAL if called with null arguments, -ENOMEM upon
* allocation failure; otherwise, passes along the return value of
* srcu_notifier_chain_register().
*/
int clk_notifier_register(struct clk *clk, struct notifier_block *nb)
{
struct clk_notifier *cn;
int ret = -ENOMEM;
if (!clk || !nb)
return -EINVAL;
clk_prepare_lock();
/* search the list of notifiers for this clk */
list_for_each_entry(cn, &clk_notifier_list, node)
if (cn->clk == clk)
goto found;
/* if clk wasn't in the notifier list, allocate new clk_notifier */
cn = kzalloc(sizeof(*cn), GFP_KERNEL);
if (!cn)
goto out;
cn->clk = clk;
srcu_init_notifier_head(&cn->notifier_head);
list_add(&cn->node, &clk_notifier_list);
found:
ret = srcu_notifier_chain_register(&cn->notifier_head, nb);
clk->core->notifier_count++;
out:
clk_prepare_unlock();
return ret;
}
EXPORT_SYMBOL_GPL(clk_notifier_register);
/**
* clk_notifier_unregister - remove a clk rate change notifier
* @clk: struct clk *
* @nb: struct notifier_block * with callback info
*
* Request no further notification for changes to 'clk' and frees memory
* allocated in clk_notifier_register.
*
* Returns -EINVAL if called with null arguments; otherwise, passes
* along the return value of srcu_notifier_chain_unregister().
*/
int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
{
struct clk_notifier *cn;
int ret = -ENOENT;
if (!clk || !nb)
return -EINVAL;
clk_prepare_lock();
list_for_each_entry(cn, &clk_notifier_list, node) {
if (cn->clk == clk) {
ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb);
clk->core->notifier_count--;
/* XXX the notifier code should handle this better */
if (!cn->notifier_head.head) {
srcu_cleanup_notifier_head(&cn->notifier_head);
list_del(&cn->node);
kfree(cn);
}
break;
}
}
clk_prepare_unlock();
return ret;
}
EXPORT_SYMBOL_GPL(clk_notifier_unregister);
struct clk_notifier_devres {
struct clk *clk;
struct notifier_block *nb;
};
static void devm_clk_notifier_release(struct device *dev, void *res)
{
struct clk_notifier_devres *devres = res;
clk_notifier_unregister(devres->clk, devres->nb);
}
int devm_clk_notifier_register(struct device *dev, struct clk *clk,
struct notifier_block *nb)
{
struct clk_notifier_devres *devres;
int ret;
devres = devres_alloc(devm_clk_notifier_release,
sizeof(*devres), GFP_KERNEL);
if (!devres)
return -ENOMEM;
ret = clk_notifier_register(clk, nb);
if (!ret) {
devres->clk = clk;
devres->nb = nb;
devres_add(dev, devres);
} else {
devres_free(devres);
}
return ret;
}
EXPORT_SYMBOL_GPL(devm_clk_notifier_register);
#ifdef CONFIG_OF
static void clk_core_reparent_orphans(void)
{
clk_prepare_lock();
clk_core_reparent_orphans_nolock();
clk_prepare_unlock();
}
/**
* struct of_clk_provider - Clock provider registration structure
* @link: Entry in global list of clock providers
* @node: Pointer to device tree node of clock provider
* @get: Get clock callback. Returns NULL or a struct clk for the
* given clock specifier
* @get_hw: Get clk_hw callback. Returns NULL, ERR_PTR or a
* struct clk_hw for the given clock specifier
* @data: context pointer to be passed into @get callback
*/
struct of_clk_provider {
struct list_head link;
struct device_node *node;
struct clk *(*get)(struct of_phandle_args *clkspec, void *data);
struct clk_hw *(*get_hw)(struct of_phandle_args *clkspec, void *data);
void *data;
};
extern struct of_device_id __clk_of_table;
static const struct of_device_id __clk_of_table_sentinel
__used __section("__clk_of_table_end");
static LIST_HEAD(of_clk_providers);
static DEFINE_MUTEX(of_clk_mutex);
struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec,
void *data)
{
return data;
}
EXPORT_SYMBOL_GPL(of_clk_src_simple_get);
struct clk_hw *of_clk_hw_simple_get(struct of_phandle_args *clkspec, void *data)
{
return data;
}
EXPORT_SYMBOL_GPL(of_clk_hw_simple_get);
struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data)
{
struct clk_onecell_data *clk_data = data;
unsigned int idx = clkspec->args[0];
if (idx >= clk_data->clk_num) {
pr_err("%s: invalid clock index %u\n", __func__, idx);
return ERR_PTR(-EINVAL);
}
return clk_data->clks[idx];
}
EXPORT_SYMBOL_GPL(of_clk_src_onecell_get);
struct clk_hw *
of_clk_hw_onecell_get(struct of_phandle_args *clkspec, void *data)
{
struct clk_hw_onecell_data *hw_data = data;
unsigned int idx = clkspec->args[0];
if (idx >= hw_data->num) {
pr_err("%s: invalid index %u\n", __func__, idx);
return ERR_PTR(-EINVAL);
}
return hw_data->hws[idx];
}
EXPORT_SYMBOL_GPL(of_clk_hw_onecell_get);
/**
* of_clk_add_provider() - Register a clock provider for a node
* @np: Device node pointer associated with clock provider
* @clk_src_get: callback for decoding clock
* @data: context pointer for @clk_src_get callback.
*
* This function is *deprecated*. Use of_clk_add_hw_provider() instead.
*/
int of_clk_add_provider(struct device_node *np,
struct clk *(*clk_src_get)(struct of_phandle_args *clkspec,
void *data),
void *data)
{
struct of_clk_provider *cp;
int ret;
if (!np)
return 0;
cp = kzalloc(sizeof(*cp), GFP_KERNEL);
if (!cp)
return -ENOMEM;
cp->node = of_node_get(np);
cp->data = data;
cp->get = clk_src_get;
mutex_lock(&of_clk_mutex);
list_add(&cp->link, &of_clk_providers);
mutex_unlock(&of_clk_mutex);
pr_debug("Added clock from %pOF\n", np);
clk_core_reparent_orphans();
ret = of_clk_set_defaults(np, true);
if (ret < 0)
of_clk_del_provider(np);
fwnode_dev_initialized(&np->fwnode, true);
return ret;
}
EXPORT_SYMBOL_GPL(of_clk_add_provider);
/**
* of_clk_add_hw_provider() - Register a clock provider for a node
* @np: Device node pointer associated with clock provider
* @get: callback for decoding clk_hw
* @data: context pointer for @get callback.
*/
int of_clk_add_hw_provider(struct device_node *np,
struct clk_hw *(*get)(struct of_phandle_args *clkspec,
void *data),
void *data)
{
struct of_clk_provider *cp;
int ret;
if (!np)
return 0;
cp = kzalloc(sizeof(*cp), GFP_KERNEL);
if (!cp)
return -ENOMEM;
cp->node = of_node_get(np);
cp->data = data;
cp->get_hw = get;
mutex_lock(&of_clk_mutex);
list_add(&cp->link, &of_clk_providers);
mutex_unlock(&of_clk_mutex);
pr_debug("Added clk_hw provider from %pOF\n", np);
clk_core_reparent_orphans();
ret = of_clk_set_defaults(np, true);
if (ret < 0)
of_clk_del_provider(np);
fwnode_dev_initialized(&np->fwnode, true);
return ret;
}
EXPORT_SYMBOL_GPL(of_clk_add_hw_provider);
static void devm_of_clk_release_provider(struct device *dev, void *res)
{
of_clk_del_provider(*(struct device_node **)res);
}
/*
* We allow a child device to use its parent device as the clock provider node
* for cases like MFD sub-devices where the child device driver wants to use
* devm_*() APIs but not list the device in DT as a sub-node.
*/
static struct device_node *get_clk_provider_node(struct device *dev)
{
struct device_node *np, *parent_np;
np = dev->of_node;
parent_np = dev->parent ? dev->parent->of_node : NULL;
if (!of_property_present(np, "#clock-cells"))
if (of_property_present(parent_np, "#clock-cells"))
np = parent_np;
return np;
}
/**
* devm_of_clk_add_hw_provider() - Managed clk provider node registration
* @dev: Device acting as the clock provider (used for DT node and lifetime)
* @get: callback for decoding clk_hw
* @data: context pointer for @get callback
*
* Registers clock provider for given device's node. If the device has no DT
* node or if the device node lacks of clock provider information (#clock-cells)
* then the parent device's node is scanned for this information. If parent node
* has the #clock-cells then it is used in registration. Provider is
* automatically released at device exit.
*
* Return: 0 on success or an errno on failure.
*/
int devm_of_clk_add_hw_provider(struct device *dev,
struct clk_hw *(*get)(struct of_phandle_args *clkspec,
void *data),
void *data)
{
struct device_node **ptr, *np;
int ret;
ptr = devres_alloc(devm_of_clk_release_provider, sizeof(*ptr),
GFP_KERNEL);
if (!ptr)
return -ENOMEM;
np = get_clk_provider_node(dev);
ret = of_clk_add_hw_provider(np, get, data);
if (!ret) {
*ptr = np;
devres_add(dev, ptr);
} else {
devres_free(ptr);
}
return ret;
}
EXPORT_SYMBOL_GPL(devm_of_clk_add_hw_provider);
/**
* of_clk_del_provider() - Remove a previously registered clock provider
* @np: Device node pointer associated with clock provider
*/
void of_clk_del_provider(struct device_node *np)
{
struct of_clk_provider *cp;
if (!np)
return;
mutex_lock(&of_clk_mutex);
list_for_each_entry(cp, &of_clk_providers, link) {
if (cp->node == np) {
list_del(&cp->link);
fwnode_dev_initialized(&np->fwnode, false);
of_node_put(cp->node);
kfree(cp);
break;
}
}
mutex_unlock(&of_clk_mutex);
}
EXPORT_SYMBOL_GPL(of_clk_del_provider);
/**
* of_parse_clkspec() - Parse a DT clock specifier for a given device node
* @np: device node to parse clock specifier from
* @index: index of phandle to parse clock out of. If index < 0, @name is used
* @name: clock name to find and parse. If name is NULL, the index is used
* @out_args: Result of parsing the clock specifier
*
* Parses a device node's "clocks" and "clock-names" properties to find the
* phandle and cells for the index or name that is desired. The resulting clock
* specifier is placed into @out_args, or an errno is returned when there's a
* parsing error. The @index argument is ignored if @name is non-NULL.
*
* Example:
*
* phandle1: clock-controller@1 {
* #clock-cells = <2>;
* }
*
* phandle2: clock-controller@2 {
* #clock-cells = <1>;
* }
*
* clock-consumer@3 {
* clocks = <&phandle1 1 2 &phandle2 3>;
* clock-names = "name1", "name2";
* }
*
* To get a device_node for `clock-controller@2' node you may call this
* function a few different ways:
*
* of_parse_clkspec(clock-consumer@3, -1, "name2", &args);
* of_parse_clkspec(clock-consumer@3, 1, NULL, &args);
* of_parse_clkspec(clock-consumer@3, 1, "name2", &args);
*
* Return: 0 upon successfully parsing the clock specifier. Otherwise, -ENOENT
* if @name is NULL or -EINVAL if @name is non-NULL and it can't be found in
* the "clock-names" property of @np.
*/
static int of_parse_clkspec(const struct device_node *np, int index,
const char *name, struct of_phandle_args *out_args)
{
int ret = -ENOENT;
/* Walk up the tree of devices looking for a clock property that matches */
while (np) {
/*
* For named clocks, first look up the name in the
* "clock-names" property. If it cannot be found, then index
* will be an error code and of_parse_phandle_with_args() will
* return -EINVAL.
*/
if (name)
index = of_property_match_string(np, "clock-names", name);
ret = of_parse_phandle_with_args(np, "clocks", "#clock-cells",
index, out_args);
if (!ret)
break;
if (name && index >= 0)
break;
/*
* No matching clock found on this node. If the parent node
* has a "clock-ranges" property, then we can try one of its
* clocks.
*/
np = np->parent;
if (np && !of_get_property(np, "clock-ranges", NULL))
break;
index = 0;
}
return ret;
}
static struct clk_hw *
__of_clk_get_hw_from_provider(struct of_clk_provider *provider,
struct of_phandle_args *clkspec)
{
struct clk *clk;
if (provider->get_hw)
return provider->get_hw(clkspec, provider->data);
clk = provider->get(clkspec, provider->data);
if (IS_ERR(clk))
return ERR_CAST(clk);
return __clk_get_hw(clk);
}
static struct clk_hw *
of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec)
{
struct of_clk_provider *provider;
struct clk_hw *hw = ERR_PTR(-EPROBE_DEFER);
if (!clkspec)
return ERR_PTR(-EINVAL);
mutex_lock(&of_clk_mutex);
list_for_each_entry(provider, &of_clk_providers, link) {
if (provider->node == clkspec->np) {
hw = __of_clk_get_hw_from_provider(provider, clkspec);
if (!IS_ERR(hw))
break;
}
}
mutex_unlock(&of_clk_mutex);
return hw;
}
/**
* of_clk_get_from_provider() - Lookup a clock from a clock provider
* @clkspec: pointer to a clock specifier data structure
*
* This function looks up a struct clk from the registered list of clock
* providers, an input is a clock specifier data structure as returned
* from the of_parse_phandle_with_args() function call.
*/
struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec)
{
struct clk_hw *hw = of_clk_get_hw_from_clkspec(clkspec);
return clk_hw_create_clk(NULL, hw, NULL, __func__);
}
EXPORT_SYMBOL_GPL(of_clk_get_from_provider);
struct clk_hw *of_clk_get_hw(struct device_node *np, int index,
const char *con_id)
{
int ret;
struct clk_hw *hw;
struct of_phandle_args clkspec;
ret = of_parse_clkspec(np, index, con_id, &clkspec);
if (ret)
return ERR_PTR(ret);
hw = of_clk_get_hw_from_clkspec(&clkspec);
of_node_put(clkspec.np);
return hw;
}
static struct clk *__of_clk_get(struct device_node *np,
int index, const char *dev_id,
const char *con_id)
{
struct clk_hw *hw = of_clk_get_hw(np, index, con_id);
return clk_hw_create_clk(NULL, hw, dev_id, con_id);
}
struct clk *of_clk_get(struct device_node *np, int index)
{
return __of_clk_get(np, index, np->full_name, NULL);
}
EXPORT_SYMBOL(of_clk_get);
/**
* of_clk_get_by_name() - Parse and lookup a clock referenced by a device node
* @np: pointer to clock consumer node
* @name: name of consumer's clock input, or NULL for the first clock reference
*
* This function parses the clocks and clock-names properties,
* and uses them to look up the struct clk from the registered list of clock
* providers.
*/
struct clk *of_clk_get_by_name(struct device_node *np, const char *name)
{
if (!np)
return ERR_PTR(-ENOENT);
return __of_clk_get(np, 0, np->full_name, name);
}
EXPORT_SYMBOL(of_clk_get_by_name);
/**
* of_clk_get_parent_count() - Count the number of clocks a device node has
* @np: device node to count
*
* Returns: The number of clocks that are possible parents of this node
*/
unsigned int of_clk_get_parent_count(const struct device_node *np)
{
int count;
count = of_count_phandle_with_args(np, "clocks", "#clock-cells");
if (count < 0)
return 0;
return count;
}
EXPORT_SYMBOL_GPL(of_clk_get_parent_count);
const char *of_clk_get_parent_name(const struct device_node *np, int index)
{
struct of_phandle_args clkspec;
struct property *prop;
const char *clk_name;
const __be32 *vp;
u32 pv;
int rc;
int count;
struct clk *clk;
rc = of_parse_phandle_with_args(np, "clocks", "#clock-cells", index,
&clkspec);
if (rc)
return NULL;
index = clkspec.args_count ? clkspec.args[0] : 0;
count = 0;
/* if there is an indices property, use it to transfer the index
* specified into an array offset for the clock-output-names property.
*/
of_property_for_each_u32(clkspec.np, "clock-indices", prop, vp, pv) {
if (index == pv) {
index = count;
break;
}
count++;
}
/* We went off the end of 'clock-indices' without finding it */
if (prop && !vp)
return NULL;
if (of_property_read_string_index(clkspec.np, "clock-output-names",
index,
&clk_name) < 0) {
/*
* Best effort to get the name if the clock has been
* registered with the framework. If the clock isn't
* registered, we return the node name as the name of
* the clock as long as #clock-cells = 0.
*/
clk = of_clk_get_from_provider(&clkspec);
if (IS_ERR(clk)) {
if (clkspec.args_count == 0)
clk_name = clkspec.np->name;
else
clk_name = NULL;
} else {
clk_name = __clk_get_name(clk);
clk_put(clk);
}
}
of_node_put(clkspec.np);
return clk_name;
}
EXPORT_SYMBOL_GPL(of_clk_get_parent_name);
/**
* of_clk_parent_fill() - Fill @parents with names of @np's parents and return
* number of parents
* @np: Device node pointer associated with clock provider
* @parents: pointer to char array that hold the parents' names
* @size: size of the @parents array
*
* Return: number of parents for the clock node.
*/
int of_clk_parent_fill(struct device_node *np, const char **parents,
unsigned int size)
{
unsigned int i = 0;
while (i < size && (parents[i] = of_clk_get_parent_name(np, i)) != NULL)
i++;
return i;
}
EXPORT_SYMBOL_GPL(of_clk_parent_fill);
struct clock_provider {
void (*clk_init_cb)(struct device_node *);
struct device_node *np;
struct list_head node;
};
/*
* This function looks for a parent clock. If there is one, then it
* checks that the provider for this parent clock was initialized, in
* this case the parent clock will be ready.
*/
static int parent_ready(struct device_node *np)
{
int i = 0;
while (true) {
struct clk *clk = of_clk_get(np, i);
/* this parent is ready we can check the next one */
if (!IS_ERR(clk)) {
clk_put(clk);
i++;
continue;
}
/* at least one parent is not ready, we exit now */
if (PTR_ERR(clk) == -EPROBE_DEFER)
return 0;
/*
* Here we make assumption that the device tree is
* written correctly. So an error means that there is
* no more parent. As we didn't exit yet, then the
* previous parent are ready. If there is no clock
* parent, no need to wait for them, then we can
* consider their absence as being ready
*/
return 1;
}
}
/**
* of_clk_detect_critical() - set CLK_IS_CRITICAL flag from Device Tree
* @np: Device node pointer associated with clock provider
* @index: clock index
* @flags: pointer to top-level framework flags
*
* Detects if the clock-critical property exists and, if so, sets the
* corresponding CLK_IS_CRITICAL flag.
*
* Do not use this function. It exists only for legacy Device Tree
* bindings, such as the one-clock-per-node style that are outdated.
* Those bindings typically put all clock data into .dts and the Linux
* driver has no clock data, thus making it impossible to set this flag
* correctly from the driver. Only those drivers may call
* of_clk_detect_critical from their setup functions.
*
* Return: error code or zero on success
*/
int of_clk_detect_critical(struct device_node *np, int index,
unsigned long *flags)
{
struct property *prop;
const __be32 *cur;
uint32_t idx;
if (!np || !flags)
return -EINVAL;
of_property_for_each_u32(np, "clock-critical", prop, cur, idx)
if (index == idx)
*flags |= CLK_IS_CRITICAL;
return 0;
}
/**
* of_clk_init() - Scan and init clock providers from the DT
* @matches: array of compatible values and init functions for providers.
*
* This function scans the device tree for matching clock providers
* and calls their initialization functions. It also does it by trying
* to follow the dependencies.
*/
void __init of_clk_init(const struct of_device_id *matches)
{
const struct of_device_id *match;
struct device_node *np;
struct clock_provider *clk_provider, *next;
bool is_init_done;
bool force = false;
LIST_HEAD(clk_provider_list);
if (!matches)
matches = &__clk_of_table;
/* First prepare the list of the clocks providers */
for_each_matching_node_and_match(np, matches, &match) {
struct clock_provider *parent;
if (!of_device_is_available(np))
continue;
parent = kzalloc(sizeof(*parent), GFP_KERNEL);
if (!parent) {
list_for_each_entry_safe(clk_provider, next,
&clk_provider_list, node) {
list_del(&clk_provider->node);
of_node_put(clk_provider->np);
kfree(clk_provider);
}
of_node_put(np);
return;
}
parent->clk_init_cb = match->data;
parent->np = of_node_get(np);
list_add_tail(&parent->node, &clk_provider_list);
}
while (!list_empty(&clk_provider_list)) {
is_init_done = false;
list_for_each_entry_safe(clk_provider, next,
&clk_provider_list, node) {
if (force || parent_ready(clk_provider->np)) {
/* Don't populate platform devices */
of_node_set_flag(clk_provider->np,
OF_POPULATED);
clk_provider->clk_init_cb(clk_provider->np);
of_clk_set_defaults(clk_provider->np, true);
list_del(&clk_provider->node);
of_node_put(clk_provider->np);
kfree(clk_provider);
is_init_done = true;
}
}
/*
* We didn't manage to initialize any of the
* remaining providers during the last loop, so now we
* initialize all the remaining ones unconditionally
* in case the clock parent was not mandatory
*/
if (!is_init_done)
force = true;
}
}
#endif
| linux-master | drivers/clk/clk.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2011 Sascha Hauer, Pengutronix <[email protected]>
*/
#include <linux/module.h>
#include <linux/clk-provider.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/of.h>
#include <linux/platform_device.h>
/*
* DOC: basic fixed multiplier and divider clock that cannot gate
*
* Traits of this clock:
* prepare - clk_prepare only ensures that parents are prepared
* enable - clk_enable only ensures that parents are enabled
* rate - rate is fixed. clk->rate = parent->rate / div * mult
* parent - fixed parent. No clk_set_parent support
*/
static unsigned long clk_factor_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct clk_fixed_factor *fix = to_clk_fixed_factor(hw);
unsigned long long int rate;
rate = (unsigned long long int)parent_rate * fix->mult;
do_div(rate, fix->div);
return (unsigned long)rate;
}
static long clk_factor_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *prate)
{
struct clk_fixed_factor *fix = to_clk_fixed_factor(hw);
if (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) {
unsigned long best_parent;
best_parent = (rate / fix->mult) * fix->div;
*prate = clk_hw_round_rate(clk_hw_get_parent(hw), best_parent);
}
return (*prate / fix->div) * fix->mult;
}
static int clk_factor_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
/*
* We must report success but we can do so unconditionally because
* clk_factor_round_rate returns values that ensure this call is a
* nop.
*/
return 0;
}
const struct clk_ops clk_fixed_factor_ops = {
.round_rate = clk_factor_round_rate,
.set_rate = clk_factor_set_rate,
.recalc_rate = clk_factor_recalc_rate,
};
EXPORT_SYMBOL_GPL(clk_fixed_factor_ops);
static void devm_clk_hw_register_fixed_factor_release(struct device *dev, void *res)
{
struct clk_fixed_factor *fix = res;
/*
* We can not use clk_hw_unregister_fixed_factor, since it will kfree()
* the hw, resulting in double free. Just unregister the hw and let
* devres code kfree() it.
*/
clk_hw_unregister(&fix->hw);
}
static struct clk_hw *
__clk_hw_register_fixed_factor(struct device *dev, struct device_node *np,
const char *name, const char *parent_name,
const struct clk_hw *parent_hw, int index,
unsigned long flags, unsigned int mult, unsigned int div,
bool devm)
{
struct clk_fixed_factor *fix;
struct clk_init_data init = { };
struct clk_parent_data pdata = { .index = index };
struct clk_hw *hw;
int ret;
/* You can't use devm without a dev */
if (devm && !dev)
return ERR_PTR(-EINVAL);
if (devm)
fix = devres_alloc(devm_clk_hw_register_fixed_factor_release,
sizeof(*fix), GFP_KERNEL);
else
fix = kmalloc(sizeof(*fix), GFP_KERNEL);
if (!fix)
return ERR_PTR(-ENOMEM);
/* struct clk_fixed_factor assignments */
fix->mult = mult;
fix->div = div;
fix->hw.init = &init;
init.name = name;
init.ops = &clk_fixed_factor_ops;
init.flags = flags;
if (parent_name)
init.parent_names = &parent_name;
else if (parent_hw)
init.parent_hws = &parent_hw;
else
init.parent_data = &pdata;
init.num_parents = 1;
hw = &fix->hw;
if (dev)
ret = clk_hw_register(dev, hw);
else
ret = of_clk_hw_register(np, hw);
if (ret) {
if (devm)
devres_free(fix);
else
kfree(fix);
hw = ERR_PTR(ret);
} else if (devm)
devres_add(dev, fix);
return hw;
}
/**
* devm_clk_hw_register_fixed_factor_index - Register a fixed factor clock with
* parent from DT index
* @dev: device that is registering this clock
* @name: name of this clock
* @index: index of phandle in @dev 'clocks' property
* @flags: fixed factor flags
* @mult: multiplier
* @div: divider
*
* Return: Pointer to fixed factor clk_hw structure that was registered or
* an error pointer.
*/
struct clk_hw *devm_clk_hw_register_fixed_factor_index(struct device *dev,
const char *name, unsigned int index, unsigned long flags,
unsigned int mult, unsigned int div)
{
return __clk_hw_register_fixed_factor(dev, NULL, name, NULL, NULL, index,
flags, mult, div, true);
}
EXPORT_SYMBOL_GPL(devm_clk_hw_register_fixed_factor_index);
/**
* devm_clk_hw_register_fixed_factor_parent_hw - Register a fixed factor clock with
* pointer to parent clock
* @dev: device that is registering this clock
* @name: name of this clock
* @parent_hw: pointer to parent clk
* @flags: fixed factor flags
* @mult: multiplier
* @div: divider
*
* Return: Pointer to fixed factor clk_hw structure that was registered or
* an error pointer.
*/
struct clk_hw *devm_clk_hw_register_fixed_factor_parent_hw(struct device *dev,
const char *name, const struct clk_hw *parent_hw,
unsigned long flags, unsigned int mult, unsigned int div)
{
return __clk_hw_register_fixed_factor(dev, NULL, name, NULL, parent_hw,
-1, flags, mult, div, true);
}
EXPORT_SYMBOL_GPL(devm_clk_hw_register_fixed_factor_parent_hw);
struct clk_hw *clk_hw_register_fixed_factor_parent_hw(struct device *dev,
const char *name, const struct clk_hw *parent_hw,
unsigned long flags, unsigned int mult, unsigned int div)
{
return __clk_hw_register_fixed_factor(dev, NULL, name, NULL,
parent_hw, -1, flags, mult, div,
false);
}
EXPORT_SYMBOL_GPL(clk_hw_register_fixed_factor_parent_hw);
struct clk_hw *clk_hw_register_fixed_factor(struct device *dev,
const char *name, const char *parent_name, unsigned long flags,
unsigned int mult, unsigned int div)
{
return __clk_hw_register_fixed_factor(dev, NULL, name, parent_name, NULL, -1,
flags, mult, div, false);
}
EXPORT_SYMBOL_GPL(clk_hw_register_fixed_factor);
struct clk *clk_register_fixed_factor(struct device *dev, const char *name,
const char *parent_name, unsigned long flags,
unsigned int mult, unsigned int div)
{
struct clk_hw *hw;
hw = clk_hw_register_fixed_factor(dev, name, parent_name, flags, mult,
div);
if (IS_ERR(hw))
return ERR_CAST(hw);
return hw->clk;
}
EXPORT_SYMBOL_GPL(clk_register_fixed_factor);
void clk_unregister_fixed_factor(struct clk *clk)
{
struct clk_hw *hw;
hw = __clk_get_hw(clk);
if (!hw)
return;
clk_unregister(clk);
kfree(to_clk_fixed_factor(hw));
}
EXPORT_SYMBOL_GPL(clk_unregister_fixed_factor);
void clk_hw_unregister_fixed_factor(struct clk_hw *hw)
{
struct clk_fixed_factor *fix;
fix = to_clk_fixed_factor(hw);
clk_hw_unregister(hw);
kfree(fix);
}
EXPORT_SYMBOL_GPL(clk_hw_unregister_fixed_factor);
struct clk_hw *devm_clk_hw_register_fixed_factor(struct device *dev,
const char *name, const char *parent_name, unsigned long flags,
unsigned int mult, unsigned int div)
{
return __clk_hw_register_fixed_factor(dev, NULL, name, parent_name, NULL, -1,
flags, mult, div, true);
}
EXPORT_SYMBOL_GPL(devm_clk_hw_register_fixed_factor);
#ifdef CONFIG_OF
static struct clk_hw *_of_fixed_factor_clk_setup(struct device_node *node)
{
struct clk_hw *hw;
const char *clk_name = node->name;
u32 div, mult;
int ret;
if (of_property_read_u32(node, "clock-div", &div)) {
pr_err("%s Fixed factor clock <%pOFn> must have a clock-div property\n",
__func__, node);
return ERR_PTR(-EIO);
}
if (of_property_read_u32(node, "clock-mult", &mult)) {
pr_err("%s Fixed factor clock <%pOFn> must have a clock-mult property\n",
__func__, node);
return ERR_PTR(-EIO);
}
of_property_read_string(node, "clock-output-names", &clk_name);
hw = __clk_hw_register_fixed_factor(NULL, node, clk_name, NULL, NULL, 0,
0, mult, div, false);
if (IS_ERR(hw)) {
/*
* Clear OF_POPULATED flag so that clock registration can be
* attempted again from probe function.
*/
of_node_clear_flag(node, OF_POPULATED);
return ERR_CAST(hw);
}
ret = of_clk_add_hw_provider(node, of_clk_hw_simple_get, hw);
if (ret) {
clk_hw_unregister_fixed_factor(hw);
return ERR_PTR(ret);
}
return hw;
}
/**
* of_fixed_factor_clk_setup() - Setup function for simple fixed factor clock
* @node: device node for the clock
*/
void __init of_fixed_factor_clk_setup(struct device_node *node)
{
_of_fixed_factor_clk_setup(node);
}
CLK_OF_DECLARE(fixed_factor_clk, "fixed-factor-clock",
of_fixed_factor_clk_setup);
static void of_fixed_factor_clk_remove(struct platform_device *pdev)
{
struct clk_hw *clk = platform_get_drvdata(pdev);
of_clk_del_provider(pdev->dev.of_node);
clk_hw_unregister_fixed_factor(clk);
}
static int of_fixed_factor_clk_probe(struct platform_device *pdev)
{
struct clk_hw *clk;
/*
* This function is not executed when of_fixed_factor_clk_setup
* succeeded.
*/
clk = _of_fixed_factor_clk_setup(pdev->dev.of_node);
if (IS_ERR(clk))
return PTR_ERR(clk);
platform_set_drvdata(pdev, clk);
return 0;
}
static const struct of_device_id of_fixed_factor_clk_ids[] = {
{ .compatible = "fixed-factor-clock" },
{ }
};
MODULE_DEVICE_TABLE(of, of_fixed_factor_clk_ids);
static struct platform_driver of_fixed_factor_clk_driver = {
.driver = {
.name = "of_fixed_factor_clk",
.of_match_table = of_fixed_factor_clk_ids,
},
.probe = of_fixed_factor_clk_probe,
.remove_new = of_fixed_factor_clk_remove,
};
builtin_platform_driver(of_fixed_factor_clk_driver);
#endif
| linux-master | drivers/clk/clk-fixed-factor.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Kunit test for clk rate management
*/
#include <linux/clk.h>
#include <linux/clk-provider.h>
/* Needed for clk_hw_get_clk() */
#include "clk.h"
#include <kunit/test.h>
#define DUMMY_CLOCK_INIT_RATE (42 * 1000 * 1000)
#define DUMMY_CLOCK_RATE_1 (142 * 1000 * 1000)
#define DUMMY_CLOCK_RATE_2 (242 * 1000 * 1000)
struct clk_dummy_context {
struct clk_hw hw;
unsigned long rate;
};
static unsigned long clk_dummy_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct clk_dummy_context *ctx =
container_of(hw, struct clk_dummy_context, hw);
return ctx->rate;
}
static int clk_dummy_determine_rate(struct clk_hw *hw,
struct clk_rate_request *req)
{
/* Just return the same rate without modifying it */
return 0;
}
static int clk_dummy_maximize_rate(struct clk_hw *hw,
struct clk_rate_request *req)
{
/*
* If there's a maximum set, always run the clock at the maximum
* allowed.
*/
if (req->max_rate < ULONG_MAX)
req->rate = req->max_rate;
return 0;
}
static int clk_dummy_minimize_rate(struct clk_hw *hw,
struct clk_rate_request *req)
{
/*
* If there's a minimum set, always run the clock at the minimum
* allowed.
*/
if (req->min_rate > 0)
req->rate = req->min_rate;
return 0;
}
static int clk_dummy_set_rate(struct clk_hw *hw,
unsigned long rate,
unsigned long parent_rate)
{
struct clk_dummy_context *ctx =
container_of(hw, struct clk_dummy_context, hw);
ctx->rate = rate;
return 0;
}
static int clk_dummy_single_set_parent(struct clk_hw *hw, u8 index)
{
if (index >= clk_hw_get_num_parents(hw))
return -EINVAL;
return 0;
}
static u8 clk_dummy_single_get_parent(struct clk_hw *hw)
{
return 0;
}
static const struct clk_ops clk_dummy_rate_ops = {
.recalc_rate = clk_dummy_recalc_rate,
.determine_rate = clk_dummy_determine_rate,
.set_rate = clk_dummy_set_rate,
};
static const struct clk_ops clk_dummy_maximize_rate_ops = {
.recalc_rate = clk_dummy_recalc_rate,
.determine_rate = clk_dummy_maximize_rate,
.set_rate = clk_dummy_set_rate,
};
static const struct clk_ops clk_dummy_minimize_rate_ops = {
.recalc_rate = clk_dummy_recalc_rate,
.determine_rate = clk_dummy_minimize_rate,
.set_rate = clk_dummy_set_rate,
};
static const struct clk_ops clk_dummy_single_parent_ops = {
/*
* FIXME: Even though we should probably be able to use
* __clk_mux_determine_rate() here, if we use it and call
* clk_round_rate() or clk_set_rate() with a rate lower than
* what all the parents can provide, it will return -EINVAL.
*
* This is due to the fact that it has the undocumented
* behaviour to always pick up the closest rate higher than the
* requested rate. If we get something lower, it thus considers
* that it's not acceptable and will return an error.
*
* It's somewhat inconsistent and creates a weird threshold
* between rates above the parent rate which would be rounded to
* what the parent can provide, but rates below will simply
* return an error.
*/
.determine_rate = __clk_mux_determine_rate_closest,
.set_parent = clk_dummy_single_set_parent,
.get_parent = clk_dummy_single_get_parent,
};
struct clk_multiple_parent_ctx {
struct clk_dummy_context parents_ctx[2];
struct clk_hw hw;
u8 current_parent;
};
static int clk_multiple_parents_mux_set_parent(struct clk_hw *hw, u8 index)
{
struct clk_multiple_parent_ctx *ctx =
container_of(hw, struct clk_multiple_parent_ctx, hw);
if (index >= clk_hw_get_num_parents(hw))
return -EINVAL;
ctx->current_parent = index;
return 0;
}
static u8 clk_multiple_parents_mux_get_parent(struct clk_hw *hw)
{
struct clk_multiple_parent_ctx *ctx =
container_of(hw, struct clk_multiple_parent_ctx, hw);
return ctx->current_parent;
}
static const struct clk_ops clk_multiple_parents_mux_ops = {
.get_parent = clk_multiple_parents_mux_get_parent,
.set_parent = clk_multiple_parents_mux_set_parent,
.determine_rate = __clk_mux_determine_rate_closest,
};
static const struct clk_ops clk_multiple_parents_no_reparent_mux_ops = {
.determine_rate = clk_hw_determine_rate_no_reparent,
.get_parent = clk_multiple_parents_mux_get_parent,
.set_parent = clk_multiple_parents_mux_set_parent,
};
static int clk_test_init_with_ops(struct kunit *test, const struct clk_ops *ops)
{
struct clk_dummy_context *ctx;
struct clk_init_data init = { };
int ret;
ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
ctx->rate = DUMMY_CLOCK_INIT_RATE;
test->priv = ctx;
init.name = "test_dummy_rate";
init.ops = ops;
ctx->hw.init = &init;
ret = clk_hw_register(NULL, &ctx->hw);
if (ret)
return ret;
return 0;
}
static int clk_test_init(struct kunit *test)
{
return clk_test_init_with_ops(test, &clk_dummy_rate_ops);
}
static int clk_maximize_test_init(struct kunit *test)
{
return clk_test_init_with_ops(test, &clk_dummy_maximize_rate_ops);
}
static int clk_minimize_test_init(struct kunit *test)
{
return clk_test_init_with_ops(test, &clk_dummy_minimize_rate_ops);
}
static void clk_test_exit(struct kunit *test)
{
struct clk_dummy_context *ctx = test->priv;
clk_hw_unregister(&ctx->hw);
}
/*
* Test that the actual rate matches what is returned by clk_get_rate()
*/
static void clk_test_get_rate(struct kunit *test)
{
struct clk_dummy_context *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
struct clk *clk = clk_hw_get_clk(hw, NULL);
unsigned long rate;
rate = clk_get_rate(clk);
KUNIT_ASSERT_GT(test, rate, 0);
KUNIT_EXPECT_EQ(test, rate, ctx->rate);
clk_put(clk);
}
/*
* Test that, after a call to clk_set_rate(), the rate returned by
* clk_get_rate() matches.
*
* This assumes that clk_ops.determine_rate or clk_ops.round_rate won't
* modify the requested rate, which is our case in clk_dummy_rate_ops.
*/
static void clk_test_set_get_rate(struct kunit *test)
{
struct clk_dummy_context *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
struct clk *clk = clk_hw_get_clk(hw, NULL);
unsigned long rate;
KUNIT_ASSERT_EQ(test,
clk_set_rate(clk, DUMMY_CLOCK_RATE_1),
0);
rate = clk_get_rate(clk);
KUNIT_ASSERT_GT(test, rate, 0);
KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
clk_put(clk);
}
/*
* Test that, after several calls to clk_set_rate(), the rate returned
* by clk_get_rate() matches the last one.
*
* This assumes that clk_ops.determine_rate or clk_ops.round_rate won't
* modify the requested rate, which is our case in clk_dummy_rate_ops.
*/
static void clk_test_set_set_get_rate(struct kunit *test)
{
struct clk_dummy_context *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
struct clk *clk = clk_hw_get_clk(hw, NULL);
unsigned long rate;
KUNIT_ASSERT_EQ(test,
clk_set_rate(clk, DUMMY_CLOCK_RATE_1),
0);
KUNIT_ASSERT_EQ(test,
clk_set_rate(clk, DUMMY_CLOCK_RATE_2),
0);
rate = clk_get_rate(clk);
KUNIT_ASSERT_GT(test, rate, 0);
KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
clk_put(clk);
}
/*
* Test that clk_round_rate and clk_set_rate are consitent and will
* return the same frequency.
*/
static void clk_test_round_set_get_rate(struct kunit *test)
{
struct clk_dummy_context *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
struct clk *clk = clk_hw_get_clk(hw, NULL);
unsigned long set_rate;
long rounded_rate;
rounded_rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1);
KUNIT_ASSERT_GT(test, rounded_rate, 0);
KUNIT_EXPECT_EQ(test, rounded_rate, DUMMY_CLOCK_RATE_1);
KUNIT_ASSERT_EQ(test,
clk_set_rate(clk, DUMMY_CLOCK_RATE_1),
0);
set_rate = clk_get_rate(clk);
KUNIT_ASSERT_GT(test, set_rate, 0);
KUNIT_EXPECT_EQ(test, rounded_rate, set_rate);
clk_put(clk);
}
static struct kunit_case clk_test_cases[] = {
KUNIT_CASE(clk_test_get_rate),
KUNIT_CASE(clk_test_set_get_rate),
KUNIT_CASE(clk_test_set_set_get_rate),
KUNIT_CASE(clk_test_round_set_get_rate),
{}
};
/*
* Test suite for a basic rate clock, without any parent.
*
* These tests exercise the rate API with simple scenarios
*/
static struct kunit_suite clk_test_suite = {
.name = "clk-test",
.init = clk_test_init,
.exit = clk_test_exit,
.test_cases = clk_test_cases,
};
static int clk_uncached_test_init(struct kunit *test)
{
struct clk_dummy_context *ctx;
int ret;
ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
test->priv = ctx;
ctx->rate = DUMMY_CLOCK_INIT_RATE;
ctx->hw.init = CLK_HW_INIT_NO_PARENT("test-clk",
&clk_dummy_rate_ops,
CLK_GET_RATE_NOCACHE);
ret = clk_hw_register(NULL, &ctx->hw);
if (ret)
return ret;
return 0;
}
/*
* Test that for an uncached clock, the clock framework doesn't cache
* the rate and clk_get_rate() will return the underlying clock rate
* even if it changed.
*/
static void clk_test_uncached_get_rate(struct kunit *test)
{
struct clk_dummy_context *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
struct clk *clk = clk_hw_get_clk(hw, NULL);
unsigned long rate;
rate = clk_get_rate(clk);
KUNIT_ASSERT_GT(test, rate, 0);
KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_INIT_RATE);
/* We change the rate behind the clock framework's back */
ctx->rate = DUMMY_CLOCK_RATE_1;
rate = clk_get_rate(clk);
KUNIT_ASSERT_GT(test, rate, 0);
KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
clk_put(clk);
}
/*
* Test that for an uncached clock, clk_set_rate_range() will work
* properly if the rate hasn't changed.
*/
static void clk_test_uncached_set_range(struct kunit *test)
{
struct clk_dummy_context *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
struct clk *clk = clk_hw_get_clk(hw, NULL);
unsigned long rate;
KUNIT_ASSERT_EQ(test,
clk_set_rate_range(clk,
DUMMY_CLOCK_RATE_1,
DUMMY_CLOCK_RATE_2),
0);
rate = clk_get_rate(clk);
KUNIT_ASSERT_GT(test, rate, 0);
KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
clk_put(clk);
}
/*
* Test that for an uncached clock, clk_set_rate_range() will work
* properly if the rate has changed in hardware.
*
* In this case, it means that if the rate wasn't initially in the range
* we're trying to set, but got changed at some point into the range
* without the kernel knowing about it, its rate shouldn't be affected.
*/
static void clk_test_uncached_updated_rate_set_range(struct kunit *test)
{
struct clk_dummy_context *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
struct clk *clk = clk_hw_get_clk(hw, NULL);
unsigned long rate;
/* We change the rate behind the clock framework's back */
ctx->rate = DUMMY_CLOCK_RATE_1 + 1000;
KUNIT_ASSERT_EQ(test,
clk_set_rate_range(clk,
DUMMY_CLOCK_RATE_1,
DUMMY_CLOCK_RATE_2),
0);
rate = clk_get_rate(clk);
KUNIT_ASSERT_GT(test, rate, 0);
KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
clk_put(clk);
}
static struct kunit_case clk_uncached_test_cases[] = {
KUNIT_CASE(clk_test_uncached_get_rate),
KUNIT_CASE(clk_test_uncached_set_range),
KUNIT_CASE(clk_test_uncached_updated_rate_set_range),
{}
};
/*
* Test suite for a basic, uncached, rate clock, without any parent.
*
* These tests exercise the rate API with simple scenarios
*/
static struct kunit_suite clk_uncached_test_suite = {
.name = "clk-uncached-test",
.init = clk_uncached_test_init,
.exit = clk_test_exit,
.test_cases = clk_uncached_test_cases,
};
static int
clk_multiple_parents_mux_test_init(struct kunit *test)
{
struct clk_multiple_parent_ctx *ctx;
const char *parents[2] = { "parent-0", "parent-1"};
int ret;
ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
test->priv = ctx;
ctx->parents_ctx[0].hw.init = CLK_HW_INIT_NO_PARENT("parent-0",
&clk_dummy_rate_ops,
0);
ctx->parents_ctx[0].rate = DUMMY_CLOCK_RATE_1;
ret = clk_hw_register(NULL, &ctx->parents_ctx[0].hw);
if (ret)
return ret;
ctx->parents_ctx[1].hw.init = CLK_HW_INIT_NO_PARENT("parent-1",
&clk_dummy_rate_ops,
0);
ctx->parents_ctx[1].rate = DUMMY_CLOCK_RATE_2;
ret = clk_hw_register(NULL, &ctx->parents_ctx[1].hw);
if (ret)
return ret;
ctx->current_parent = 0;
ctx->hw.init = CLK_HW_INIT_PARENTS("test-mux", parents,
&clk_multiple_parents_mux_ops,
CLK_SET_RATE_PARENT);
ret = clk_hw_register(NULL, &ctx->hw);
if (ret)
return ret;
return 0;
}
static void
clk_multiple_parents_mux_test_exit(struct kunit *test)
{
struct clk_multiple_parent_ctx *ctx = test->priv;
clk_hw_unregister(&ctx->hw);
clk_hw_unregister(&ctx->parents_ctx[0].hw);
clk_hw_unregister(&ctx->parents_ctx[1].hw);
}
/*
* Test that for a clock with multiple parents, clk_get_parent()
* actually returns the current one.
*/
static void
clk_test_multiple_parents_mux_get_parent(struct kunit *test)
{
struct clk_multiple_parent_ctx *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
struct clk *clk = clk_hw_get_clk(hw, NULL);
struct clk *parent = clk_hw_get_clk(&ctx->parents_ctx[0].hw, NULL);
KUNIT_EXPECT_TRUE(test, clk_is_match(clk_get_parent(clk), parent));
clk_put(parent);
clk_put(clk);
}
/*
* Test that for a clock with a multiple parents, clk_has_parent()
* actually reports all of them as parents.
*/
static void
clk_test_multiple_parents_mux_has_parent(struct kunit *test)
{
struct clk_multiple_parent_ctx *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
struct clk *clk = clk_hw_get_clk(hw, NULL);
struct clk *parent;
parent = clk_hw_get_clk(&ctx->parents_ctx[0].hw, NULL);
KUNIT_EXPECT_TRUE(test, clk_has_parent(clk, parent));
clk_put(parent);
parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
KUNIT_EXPECT_TRUE(test, clk_has_parent(clk, parent));
clk_put(parent);
clk_put(clk);
}
/*
* Test that for a clock with a multiple parents, if we set a range on
* that clock and the parent is changed, its rate after the reparenting
* is still within the range we asked for.
*
* FIXME: clk_set_parent() only does the reparenting but doesn't
* reevaluate whether the new clock rate is within its boundaries or
* not.
*/
static void
clk_test_multiple_parents_mux_set_range_set_parent_get_rate(struct kunit *test)
{
struct clk_multiple_parent_ctx *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
struct clk *clk = clk_hw_get_clk(hw, NULL);
struct clk *parent1, *parent2;
unsigned long rate;
int ret;
kunit_skip(test, "This needs to be fixed in the core.");
parent1 = clk_hw_get_clk(&ctx->parents_ctx[0].hw, NULL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent1);
KUNIT_ASSERT_TRUE(test, clk_is_match(clk_get_parent(clk), parent1));
parent2 = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent2);
ret = clk_set_rate(parent1, DUMMY_CLOCK_RATE_1);
KUNIT_ASSERT_EQ(test, ret, 0);
ret = clk_set_rate(parent2, DUMMY_CLOCK_RATE_2);
KUNIT_ASSERT_EQ(test, ret, 0);
ret = clk_set_rate_range(clk,
DUMMY_CLOCK_RATE_1 - 1000,
DUMMY_CLOCK_RATE_1 + 1000);
KUNIT_ASSERT_EQ(test, ret, 0);
ret = clk_set_parent(clk, parent2);
KUNIT_ASSERT_EQ(test, ret, 0);
rate = clk_get_rate(clk);
KUNIT_ASSERT_GT(test, rate, 0);
KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1 - 1000);
KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
clk_put(parent2);
clk_put(parent1);
clk_put(clk);
}
static struct kunit_case clk_multiple_parents_mux_test_cases[] = {
KUNIT_CASE(clk_test_multiple_parents_mux_get_parent),
KUNIT_CASE(clk_test_multiple_parents_mux_has_parent),
KUNIT_CASE(clk_test_multiple_parents_mux_set_range_set_parent_get_rate),
{}
};
/*
* Test suite for a basic mux clock with two parents, with
* CLK_SET_RATE_PARENT on the child.
*
* These tests exercise the consumer API and check that the state of the
* child and parents are sane and consistent.
*/
static struct kunit_suite
clk_multiple_parents_mux_test_suite = {
.name = "clk-multiple-parents-mux-test",
.init = clk_multiple_parents_mux_test_init,
.exit = clk_multiple_parents_mux_test_exit,
.test_cases = clk_multiple_parents_mux_test_cases,
};
static int
clk_orphan_transparent_multiple_parent_mux_test_init(struct kunit *test)
{
struct clk_multiple_parent_ctx *ctx;
const char *parents[2] = { "missing-parent", "proper-parent"};
int ret;
ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
test->priv = ctx;
ctx->parents_ctx[1].hw.init = CLK_HW_INIT_NO_PARENT("proper-parent",
&clk_dummy_rate_ops,
0);
ctx->parents_ctx[1].rate = DUMMY_CLOCK_INIT_RATE;
ret = clk_hw_register(NULL, &ctx->parents_ctx[1].hw);
if (ret)
return ret;
ctx->hw.init = CLK_HW_INIT_PARENTS("test-orphan-mux", parents,
&clk_multiple_parents_mux_ops,
CLK_SET_RATE_PARENT);
ret = clk_hw_register(NULL, &ctx->hw);
if (ret)
return ret;
return 0;
}
static void
clk_orphan_transparent_multiple_parent_mux_test_exit(struct kunit *test)
{
struct clk_multiple_parent_ctx *ctx = test->priv;
clk_hw_unregister(&ctx->hw);
clk_hw_unregister(&ctx->parents_ctx[1].hw);
}
/*
* Test that, for a mux whose current parent hasn't been registered yet and is
* thus orphan, clk_get_parent() will return NULL.
*/
static void
clk_test_orphan_transparent_multiple_parent_mux_get_parent(struct kunit *test)
{
struct clk_multiple_parent_ctx *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
struct clk *clk = clk_hw_get_clk(hw, NULL);
KUNIT_EXPECT_PTR_EQ(test, clk_get_parent(clk), NULL);
clk_put(clk);
}
/*
* Test that, for a mux whose current parent hasn't been registered yet,
* calling clk_set_parent() to a valid parent will properly update the
* mux parent and its orphan status.
*/
static void
clk_test_orphan_transparent_multiple_parent_mux_set_parent(struct kunit *test)
{
struct clk_multiple_parent_ctx *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
struct clk *clk = clk_hw_get_clk(hw, NULL);
struct clk *parent, *new_parent;
int ret;
parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
ret = clk_set_parent(clk, parent);
KUNIT_ASSERT_EQ(test, ret, 0);
new_parent = clk_get_parent(clk);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
KUNIT_EXPECT_TRUE(test, clk_is_match(parent, new_parent));
clk_put(parent);
clk_put(clk);
}
/*
* Test that, for a mux that started orphan but got switched to a valid
* parent, calling clk_drop_range() on the mux won't affect the parent
* rate.
*/
static void
clk_test_orphan_transparent_multiple_parent_mux_set_parent_drop_range(struct kunit *test)
{
struct clk_multiple_parent_ctx *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
struct clk *clk = clk_hw_get_clk(hw, NULL);
struct clk *parent;
unsigned long parent_rate, new_parent_rate;
int ret;
parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
parent_rate = clk_get_rate(parent);
KUNIT_ASSERT_GT(test, parent_rate, 0);
ret = clk_set_parent(clk, parent);
KUNIT_ASSERT_EQ(test, ret, 0);
ret = clk_drop_range(clk);
KUNIT_ASSERT_EQ(test, ret, 0);
new_parent_rate = clk_get_rate(clk);
KUNIT_ASSERT_GT(test, new_parent_rate, 0);
KUNIT_EXPECT_EQ(test, parent_rate, new_parent_rate);
clk_put(parent);
clk_put(clk);
}
/*
* Test that, for a mux that started orphan but got switched to a valid
* parent, the rate of the mux and its new parent are consistent.
*/
static void
clk_test_orphan_transparent_multiple_parent_mux_set_parent_get_rate(struct kunit *test)
{
struct clk_multiple_parent_ctx *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
struct clk *clk = clk_hw_get_clk(hw, NULL);
struct clk *parent;
unsigned long parent_rate, rate;
int ret;
parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
parent_rate = clk_get_rate(parent);
KUNIT_ASSERT_GT(test, parent_rate, 0);
ret = clk_set_parent(clk, parent);
KUNIT_ASSERT_EQ(test, ret, 0);
rate = clk_get_rate(clk);
KUNIT_ASSERT_GT(test, rate, 0);
KUNIT_EXPECT_EQ(test, parent_rate, rate);
clk_put(parent);
clk_put(clk);
}
/*
* Test that, for a mux that started orphan but got switched to a valid
* parent, calling clk_put() on the mux won't affect the parent rate.
*/
static void
clk_test_orphan_transparent_multiple_parent_mux_set_parent_put(struct kunit *test)
{
struct clk_multiple_parent_ctx *ctx = test->priv;
struct clk *clk, *parent;
unsigned long parent_rate, new_parent_rate;
int ret;
parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
clk = clk_hw_get_clk(&ctx->hw, NULL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, clk);
parent_rate = clk_get_rate(parent);
KUNIT_ASSERT_GT(test, parent_rate, 0);
ret = clk_set_parent(clk, parent);
KUNIT_ASSERT_EQ(test, ret, 0);
clk_put(clk);
new_parent_rate = clk_get_rate(parent);
KUNIT_ASSERT_GT(test, new_parent_rate, 0);
KUNIT_EXPECT_EQ(test, parent_rate, new_parent_rate);
clk_put(parent);
}
/*
* Test that, for a mux that started orphan but got switched to a valid
* parent, calling clk_set_rate_range() will affect the parent state if
* its rate is out of range.
*/
static void
clk_test_orphan_transparent_multiple_parent_mux_set_parent_set_range_modified(struct kunit *test)
{
struct clk_multiple_parent_ctx *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
struct clk *clk = clk_hw_get_clk(hw, NULL);
struct clk *parent;
unsigned long rate;
int ret;
parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
ret = clk_set_parent(clk, parent);
KUNIT_ASSERT_EQ(test, ret, 0);
ret = clk_set_rate_range(clk, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
KUNIT_ASSERT_EQ(test, ret, 0);
rate = clk_get_rate(clk);
KUNIT_ASSERT_GT(test, rate, 0);
KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
clk_put(parent);
clk_put(clk);
}
/*
* Test that, for a mux that started orphan but got switched to a valid
* parent, calling clk_set_rate_range() won't affect the parent state if
* its rate is within range.
*/
static void
clk_test_orphan_transparent_multiple_parent_mux_set_parent_set_range_untouched(struct kunit *test)
{
struct clk_multiple_parent_ctx *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
struct clk *clk = clk_hw_get_clk(hw, NULL);
struct clk *parent;
unsigned long parent_rate, new_parent_rate;
int ret;
parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
parent_rate = clk_get_rate(parent);
KUNIT_ASSERT_GT(test, parent_rate, 0);
ret = clk_set_parent(clk, parent);
KUNIT_ASSERT_EQ(test, ret, 0);
ret = clk_set_rate_range(clk,
DUMMY_CLOCK_INIT_RATE - 1000,
DUMMY_CLOCK_INIT_RATE + 1000);
KUNIT_ASSERT_EQ(test, ret, 0);
new_parent_rate = clk_get_rate(parent);
KUNIT_ASSERT_GT(test, new_parent_rate, 0);
KUNIT_EXPECT_EQ(test, parent_rate, new_parent_rate);
clk_put(parent);
clk_put(clk);
}
/*
* Test that, for a mux whose current parent hasn't been registered yet,
* calling clk_set_rate_range() will succeed, and will be taken into
* account when rounding a rate.
*/
static void
clk_test_orphan_transparent_multiple_parent_mux_set_range_round_rate(struct kunit *test)
{
struct clk_multiple_parent_ctx *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
struct clk *clk = clk_hw_get_clk(hw, NULL);
long rate;
int ret;
ret = clk_set_rate_range(clk, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
KUNIT_ASSERT_EQ(test, ret, 0);
rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
KUNIT_ASSERT_GT(test, rate, 0);
KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
clk_put(clk);
}
/*
* Test that, for a mux that started orphan, was assigned and rate and
* then got switched to a valid parent, its rate is eventually within
* range.
*
* FIXME: Even though we update the rate as part of clk_set_parent(), we
* don't evaluate whether that new rate is within range and needs to be
* adjusted.
*/
static void
clk_test_orphan_transparent_multiple_parent_mux_set_range_set_parent_get_rate(struct kunit *test)
{
struct clk_multiple_parent_ctx *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
struct clk *clk = clk_hw_get_clk(hw, NULL);
struct clk *parent;
unsigned long rate;
int ret;
kunit_skip(test, "This needs to be fixed in the core.");
clk_hw_set_rate_range(hw, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
ret = clk_set_parent(clk, parent);
KUNIT_ASSERT_EQ(test, ret, 0);
rate = clk_get_rate(clk);
KUNIT_ASSERT_GT(test, rate, 0);
KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
clk_put(parent);
clk_put(clk);
}
static struct kunit_case clk_orphan_transparent_multiple_parent_mux_test_cases[] = {
KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_get_parent),
KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent),
KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent_drop_range),
KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent_get_rate),
KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent_put),
KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent_set_range_modified),
KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent_set_range_untouched),
KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_range_round_rate),
KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_range_set_parent_get_rate),
{}
};
/*
* Test suite for a basic mux clock with two parents. The default parent
* isn't registered, only the second parent is. By default, the clock
* will thus be orphan.
*
* These tests exercise the behaviour of the consumer API when dealing
* with an orphan clock, and how we deal with the transition to a valid
* parent.
*/
static struct kunit_suite clk_orphan_transparent_multiple_parent_mux_test_suite = {
.name = "clk-orphan-transparent-multiple-parent-mux-test",
.init = clk_orphan_transparent_multiple_parent_mux_test_init,
.exit = clk_orphan_transparent_multiple_parent_mux_test_exit,
.test_cases = clk_orphan_transparent_multiple_parent_mux_test_cases,
};
struct clk_single_parent_ctx {
struct clk_dummy_context parent_ctx;
struct clk_hw hw;
};
static int clk_single_parent_mux_test_init(struct kunit *test)
{
struct clk_single_parent_ctx *ctx;
int ret;
ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
test->priv = ctx;
ctx->parent_ctx.rate = DUMMY_CLOCK_INIT_RATE;
ctx->parent_ctx.hw.init =
CLK_HW_INIT_NO_PARENT("parent-clk",
&clk_dummy_rate_ops,
0);
ret = clk_hw_register(NULL, &ctx->parent_ctx.hw);
if (ret)
return ret;
ctx->hw.init = CLK_HW_INIT("test-clk", "parent-clk",
&clk_dummy_single_parent_ops,
CLK_SET_RATE_PARENT);
ret = clk_hw_register(NULL, &ctx->hw);
if (ret)
return ret;
return 0;
}
static void
clk_single_parent_mux_test_exit(struct kunit *test)
{
struct clk_single_parent_ctx *ctx = test->priv;
clk_hw_unregister(&ctx->hw);
clk_hw_unregister(&ctx->parent_ctx.hw);
}
/*
* Test that for a clock with a single parent, clk_get_parent() actually
* returns the parent.
*/
static void
clk_test_single_parent_mux_get_parent(struct kunit *test)
{
struct clk_single_parent_ctx *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
struct clk *clk = clk_hw_get_clk(hw, NULL);
struct clk *parent = clk_hw_get_clk(&ctx->parent_ctx.hw, NULL);
KUNIT_EXPECT_TRUE(test, clk_is_match(clk_get_parent(clk), parent));
clk_put(parent);
clk_put(clk);
}
/*
* Test that for a clock with a single parent, clk_has_parent() actually
* reports it as a parent.
*/
static void
clk_test_single_parent_mux_has_parent(struct kunit *test)
{
struct clk_single_parent_ctx *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
struct clk *clk = clk_hw_get_clk(hw, NULL);
struct clk *parent = clk_hw_get_clk(&ctx->parent_ctx.hw, NULL);
KUNIT_EXPECT_TRUE(test, clk_has_parent(clk, parent));
clk_put(parent);
clk_put(clk);
}
/*
* Test that for a clock that can't modify its rate and with a single
* parent, if we set disjoints range on the parent and then the child,
* the second will return an error.
*
* FIXME: clk_set_rate_range() only considers the current clock when
* evaluating whether ranges are disjoints and not the upstream clocks
* ranges.
*/
static void
clk_test_single_parent_mux_set_range_disjoint_child_last(struct kunit *test)
{
struct clk_single_parent_ctx *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
struct clk *clk = clk_hw_get_clk(hw, NULL);
struct clk *parent;
int ret;
kunit_skip(test, "This needs to be fixed in the core.");
parent = clk_get_parent(clk);
KUNIT_ASSERT_PTR_NE(test, parent, NULL);
ret = clk_set_rate_range(parent, 1000, 2000);
KUNIT_ASSERT_EQ(test, ret, 0);
ret = clk_set_rate_range(clk, 3000, 4000);
KUNIT_EXPECT_LT(test, ret, 0);
clk_put(clk);
}
/*
* Test that for a clock that can't modify its rate and with a single
* parent, if we set disjoints range on the child and then the parent,
* the second will return an error.
*
* FIXME: clk_set_rate_range() only considers the current clock when
* evaluating whether ranges are disjoints and not the downstream clocks
* ranges.
*/
static void
clk_test_single_parent_mux_set_range_disjoint_parent_last(struct kunit *test)
{
struct clk_single_parent_ctx *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
struct clk *clk = clk_hw_get_clk(hw, NULL);
struct clk *parent;
int ret;
kunit_skip(test, "This needs to be fixed in the core.");
parent = clk_get_parent(clk);
KUNIT_ASSERT_PTR_NE(test, parent, NULL);
ret = clk_set_rate_range(clk, 1000, 2000);
KUNIT_ASSERT_EQ(test, ret, 0);
ret = clk_set_rate_range(parent, 3000, 4000);
KUNIT_EXPECT_LT(test, ret, 0);
clk_put(clk);
}
/*
* Test that for a clock that can't modify its rate and with a single
* parent, if we set a range on the parent and then call
* clk_round_rate(), the boundaries of the parent are taken into
* account.
*/
static void
clk_test_single_parent_mux_set_range_round_rate_parent_only(struct kunit *test)
{
struct clk_single_parent_ctx *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
struct clk *clk = clk_hw_get_clk(hw, NULL);
struct clk *parent;
long rate;
int ret;
parent = clk_get_parent(clk);
KUNIT_ASSERT_PTR_NE(test, parent, NULL);
ret = clk_set_rate_range(parent, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
KUNIT_ASSERT_EQ(test, ret, 0);
rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
KUNIT_ASSERT_GT(test, rate, 0);
KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
clk_put(clk);
}
/*
* Test that for a clock that can't modify its rate and with a single
* parent, if we set a range on the parent and a more restrictive one on
* the child, and then call clk_round_rate(), the boundaries of the
* two clocks are taken into account.
*/
static void
clk_test_single_parent_mux_set_range_round_rate_child_smaller(struct kunit *test)
{
struct clk_single_parent_ctx *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
struct clk *clk = clk_hw_get_clk(hw, NULL);
struct clk *parent;
long rate;
int ret;
parent = clk_get_parent(clk);
KUNIT_ASSERT_PTR_NE(test, parent, NULL);
ret = clk_set_rate_range(parent, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
KUNIT_ASSERT_EQ(test, ret, 0);
ret = clk_set_rate_range(clk, DUMMY_CLOCK_RATE_1 + 1000, DUMMY_CLOCK_RATE_2 - 1000);
KUNIT_ASSERT_EQ(test, ret, 0);
rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
KUNIT_ASSERT_GT(test, rate, 0);
KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2 - 1000);
rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_2 + 1000);
KUNIT_ASSERT_GT(test, rate, 0);
KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2 - 1000);
clk_put(clk);
}
/*
* Test that for a clock that can't modify its rate and with a single
* parent, if we set a range on the child and a more restrictive one on
* the parent, and then call clk_round_rate(), the boundaries of the
* two clocks are taken into account.
*/
static void
clk_test_single_parent_mux_set_range_round_rate_parent_smaller(struct kunit *test)
{
struct clk_single_parent_ctx *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
struct clk *clk = clk_hw_get_clk(hw, NULL);
struct clk *parent;
long rate;
int ret;
parent = clk_get_parent(clk);
KUNIT_ASSERT_PTR_NE(test, parent, NULL);
ret = clk_set_rate_range(parent, DUMMY_CLOCK_RATE_1 + 1000, DUMMY_CLOCK_RATE_2 - 1000);
KUNIT_ASSERT_EQ(test, ret, 0);
ret = clk_set_rate_range(clk, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
KUNIT_ASSERT_EQ(test, ret, 0);
rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
KUNIT_ASSERT_GT(test, rate, 0);
KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2 - 1000);
rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_2 + 1000);
KUNIT_ASSERT_GT(test, rate, 0);
KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2 - 1000);
clk_put(clk);
}
static struct kunit_case clk_single_parent_mux_test_cases[] = {
KUNIT_CASE(clk_test_single_parent_mux_get_parent),
KUNIT_CASE(clk_test_single_parent_mux_has_parent),
KUNIT_CASE(clk_test_single_parent_mux_set_range_disjoint_child_last),
KUNIT_CASE(clk_test_single_parent_mux_set_range_disjoint_parent_last),
KUNIT_CASE(clk_test_single_parent_mux_set_range_round_rate_child_smaller),
KUNIT_CASE(clk_test_single_parent_mux_set_range_round_rate_parent_only),
KUNIT_CASE(clk_test_single_parent_mux_set_range_round_rate_parent_smaller),
{}
};
/*
* Test suite for a basic mux clock with one parent, with
* CLK_SET_RATE_PARENT on the child.
*
* These tests exercise the consumer API and check that the state of the
* child and parent are sane and consistent.
*/
static struct kunit_suite
clk_single_parent_mux_test_suite = {
.name = "clk-single-parent-mux-test",
.init = clk_single_parent_mux_test_init,
.exit = clk_single_parent_mux_test_exit,
.test_cases = clk_single_parent_mux_test_cases,
};
static int clk_orphan_transparent_single_parent_mux_test_init(struct kunit *test)
{
struct clk_single_parent_ctx *ctx;
struct clk_init_data init = { };
const char * const parents[] = { "orphan_parent" };
int ret;
ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
test->priv = ctx;
init.name = "test_orphan_dummy_parent";
init.ops = &clk_dummy_single_parent_ops;
init.parent_names = parents;
init.num_parents = ARRAY_SIZE(parents);
init.flags = CLK_SET_RATE_PARENT;
ctx->hw.init = &init;
ret = clk_hw_register(NULL, &ctx->hw);
if (ret)
return ret;
memset(&init, 0, sizeof(init));
init.name = "orphan_parent";
init.ops = &clk_dummy_rate_ops;
ctx->parent_ctx.hw.init = &init;
ctx->parent_ctx.rate = DUMMY_CLOCK_INIT_RATE;
ret = clk_hw_register(NULL, &ctx->parent_ctx.hw);
if (ret)
return ret;
return 0;
}
/*
* Test that a mux-only clock, with an initial rate within a range,
* will still have the same rate after the range has been enforced.
*
* See:
* https://lore.kernel.org/linux-clk/[email protected]/
*/
static void clk_test_orphan_transparent_parent_mux_set_range(struct kunit *test)
{
struct clk_single_parent_ctx *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
struct clk *clk = clk_hw_get_clk(hw, NULL);
unsigned long rate, new_rate;
rate = clk_get_rate(clk);
KUNIT_ASSERT_GT(test, rate, 0);
KUNIT_ASSERT_EQ(test,
clk_set_rate_range(clk,
ctx->parent_ctx.rate - 1000,
ctx->parent_ctx.rate + 1000),
0);
new_rate = clk_get_rate(clk);
KUNIT_ASSERT_GT(test, new_rate, 0);
KUNIT_EXPECT_EQ(test, rate, new_rate);
clk_put(clk);
}
static struct kunit_case clk_orphan_transparent_single_parent_mux_test_cases[] = {
KUNIT_CASE(clk_test_orphan_transparent_parent_mux_set_range),
{}
};
/*
* Test suite for a basic mux clock with one parent. The parent is
* registered after its child. The clock will thus be an orphan when
* registered, but will no longer be when the tests run.
*
* These tests make sure a clock that used to be orphan has a sane,
* consistent, behaviour.
*/
static struct kunit_suite clk_orphan_transparent_single_parent_test_suite = {
.name = "clk-orphan-transparent-single-parent-test",
.init = clk_orphan_transparent_single_parent_mux_test_init,
.exit = clk_single_parent_mux_test_exit,
.test_cases = clk_orphan_transparent_single_parent_mux_test_cases,
};
struct clk_single_parent_two_lvl_ctx {
struct clk_dummy_context parent_parent_ctx;
struct clk_dummy_context parent_ctx;
struct clk_hw hw;
};
static int
clk_orphan_two_level_root_last_test_init(struct kunit *test)
{
struct clk_single_parent_two_lvl_ctx *ctx;
int ret;
ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
test->priv = ctx;
ctx->parent_ctx.hw.init =
CLK_HW_INIT("intermediate-parent",
"root-parent",
&clk_dummy_single_parent_ops,
CLK_SET_RATE_PARENT);
ret = clk_hw_register(NULL, &ctx->parent_ctx.hw);
if (ret)
return ret;
ctx->hw.init =
CLK_HW_INIT("test-clk", "intermediate-parent",
&clk_dummy_single_parent_ops,
CLK_SET_RATE_PARENT);
ret = clk_hw_register(NULL, &ctx->hw);
if (ret)
return ret;
ctx->parent_parent_ctx.rate = DUMMY_CLOCK_INIT_RATE;
ctx->parent_parent_ctx.hw.init =
CLK_HW_INIT_NO_PARENT("root-parent",
&clk_dummy_rate_ops,
0);
ret = clk_hw_register(NULL, &ctx->parent_parent_ctx.hw);
if (ret)
return ret;
return 0;
}
static void
clk_orphan_two_level_root_last_test_exit(struct kunit *test)
{
struct clk_single_parent_two_lvl_ctx *ctx = test->priv;
clk_hw_unregister(&ctx->hw);
clk_hw_unregister(&ctx->parent_ctx.hw);
clk_hw_unregister(&ctx->parent_parent_ctx.hw);
}
/*
* Test that, for a clock whose parent used to be orphan, clk_get_rate()
* will return the proper rate.
*/
static void
clk_orphan_two_level_root_last_test_get_rate(struct kunit *test)
{
struct clk_single_parent_two_lvl_ctx *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
struct clk *clk = clk_hw_get_clk(hw, NULL);
unsigned long rate;
rate = clk_get_rate(clk);
KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_INIT_RATE);
clk_put(clk);
}
/*
* Test that, for a clock whose parent used to be orphan,
* clk_set_rate_range() won't affect its rate if it is already within
* range.
*
* See (for Exynos 4210):
* https://lore.kernel.org/linux-clk/[email protected]/
*/
static void
clk_orphan_two_level_root_last_test_set_range(struct kunit *test)
{
struct clk_single_parent_two_lvl_ctx *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
struct clk *clk = clk_hw_get_clk(hw, NULL);
unsigned long rate;
int ret;
ret = clk_set_rate_range(clk,
DUMMY_CLOCK_INIT_RATE - 1000,
DUMMY_CLOCK_INIT_RATE + 1000);
KUNIT_ASSERT_EQ(test, ret, 0);
rate = clk_get_rate(clk);
KUNIT_ASSERT_GT(test, rate, 0);
KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_INIT_RATE);
clk_put(clk);
}
static struct kunit_case
clk_orphan_two_level_root_last_test_cases[] = {
KUNIT_CASE(clk_orphan_two_level_root_last_test_get_rate),
KUNIT_CASE(clk_orphan_two_level_root_last_test_set_range),
{}
};
/*
* Test suite for a basic, transparent, clock with a parent that is also
* such a clock. The parent's parent is registered last, while the
* parent and its child are registered in that order. The intermediate
* and leaf clocks will thus be orphan when registered, but the leaf
* clock itself will always have its parent and will never be
* reparented. Indeed, it's only orphan because its parent is.
*
* These tests exercise the behaviour of the consumer API when dealing
* with an orphan clock, and how we deal with the transition to a valid
* parent.
*/
static struct kunit_suite
clk_orphan_two_level_root_last_test_suite = {
.name = "clk-orphan-two-level-root-last-test",
.init = clk_orphan_two_level_root_last_test_init,
.exit = clk_orphan_two_level_root_last_test_exit,
.test_cases = clk_orphan_two_level_root_last_test_cases,
};
/*
* Test that clk_set_rate_range won't return an error for a valid range
* and that it will make sure the rate of the clock is within the
* boundaries.
*/
static void clk_range_test_set_range(struct kunit *test)
{
struct clk_dummy_context *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
struct clk *clk = clk_hw_get_clk(hw, NULL);
unsigned long rate;
KUNIT_ASSERT_EQ(test,
clk_set_rate_range(clk,
DUMMY_CLOCK_RATE_1,
DUMMY_CLOCK_RATE_2),
0);
rate = clk_get_rate(clk);
KUNIT_ASSERT_GT(test, rate, 0);
KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
clk_put(clk);
}
/*
* Test that calling clk_set_rate_range with a minimum rate higher than
* the maximum rate returns an error.
*/
static void clk_range_test_set_range_invalid(struct kunit *test)
{
struct clk_dummy_context *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
struct clk *clk = clk_hw_get_clk(hw, NULL);
KUNIT_EXPECT_LT(test,
clk_set_rate_range(clk,
DUMMY_CLOCK_RATE_1 + 1000,
DUMMY_CLOCK_RATE_1),
0);
clk_put(clk);
}
/*
* Test that users can't set multiple, disjoints, range that would be
* impossible to meet.
*/
static void clk_range_test_multiple_disjoints_range(struct kunit *test)
{
struct clk_dummy_context *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
struct clk *user1, *user2;
user1 = clk_hw_get_clk(hw, NULL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user1);
user2 = clk_hw_get_clk(hw, NULL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user2);
KUNIT_ASSERT_EQ(test,
clk_set_rate_range(user1, 1000, 2000),
0);
KUNIT_EXPECT_LT(test,
clk_set_rate_range(user2, 3000, 4000),
0);
clk_put(user2);
clk_put(user1);
}
/*
* Test that if our clock has some boundaries and we try to round a rate
* lower than the minimum, the returned rate will be within range.
*/
static void clk_range_test_set_range_round_rate_lower(struct kunit *test)
{
struct clk_dummy_context *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
struct clk *clk = clk_hw_get_clk(hw, NULL);
long rate;
KUNIT_ASSERT_EQ(test,
clk_set_rate_range(clk,
DUMMY_CLOCK_RATE_1,
DUMMY_CLOCK_RATE_2),
0);
rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
KUNIT_ASSERT_GT(test, rate, 0);
KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
clk_put(clk);
}
/*
* Test that if our clock has some boundaries and we try to set a rate
* higher than the maximum, the new rate will be within range.
*/
static void clk_range_test_set_range_set_rate_lower(struct kunit *test)
{
struct clk_dummy_context *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
struct clk *clk = clk_hw_get_clk(hw, NULL);
unsigned long rate;
KUNIT_ASSERT_EQ(test,
clk_set_rate_range(clk,
DUMMY_CLOCK_RATE_1,
DUMMY_CLOCK_RATE_2),
0);
KUNIT_ASSERT_EQ(test,
clk_set_rate(clk, DUMMY_CLOCK_RATE_1 - 1000),
0);
rate = clk_get_rate(clk);
KUNIT_ASSERT_GT(test, rate, 0);
KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
clk_put(clk);
}
/*
* Test that if our clock has some boundaries and we try to round and
* set a rate lower than the minimum, the rate returned by
* clk_round_rate() will be consistent with the new rate set by
* clk_set_rate().
*/
static void clk_range_test_set_range_set_round_rate_consistent_lower(struct kunit *test)
{
struct clk_dummy_context *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
struct clk *clk = clk_hw_get_clk(hw, NULL);
long rounded;
KUNIT_ASSERT_EQ(test,
clk_set_rate_range(clk,
DUMMY_CLOCK_RATE_1,
DUMMY_CLOCK_RATE_2),
0);
rounded = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
KUNIT_ASSERT_GT(test, rounded, 0);
KUNIT_ASSERT_EQ(test,
clk_set_rate(clk, DUMMY_CLOCK_RATE_1 - 1000),
0);
KUNIT_EXPECT_EQ(test, rounded, clk_get_rate(clk));
clk_put(clk);
}
/*
* Test that if our clock has some boundaries and we try to round a rate
* higher than the maximum, the returned rate will be within range.
*/
static void clk_range_test_set_range_round_rate_higher(struct kunit *test)
{
struct clk_dummy_context *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
struct clk *clk = clk_hw_get_clk(hw, NULL);
long rate;
KUNIT_ASSERT_EQ(test,
clk_set_rate_range(clk,
DUMMY_CLOCK_RATE_1,
DUMMY_CLOCK_RATE_2),
0);
rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_2 + 1000);
KUNIT_ASSERT_GT(test, rate, 0);
KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
clk_put(clk);
}
/*
* Test that if our clock has some boundaries and we try to set a rate
* higher than the maximum, the new rate will be within range.
*/
static void clk_range_test_set_range_set_rate_higher(struct kunit *test)
{
struct clk_dummy_context *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
struct clk *clk = clk_hw_get_clk(hw, NULL);
unsigned long rate;
KUNIT_ASSERT_EQ(test,
clk_set_rate_range(clk,
DUMMY_CLOCK_RATE_1,
DUMMY_CLOCK_RATE_2),
0);
KUNIT_ASSERT_EQ(test,
clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
0);
rate = clk_get_rate(clk);
KUNIT_ASSERT_GT(test, rate, 0);
KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
clk_put(clk);
}
/*
* Test that if our clock has some boundaries and we try to round and
* set a rate higher than the maximum, the rate returned by
* clk_round_rate() will be consistent with the new rate set by
* clk_set_rate().
*/
static void clk_range_test_set_range_set_round_rate_consistent_higher(struct kunit *test)
{
struct clk_dummy_context *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
struct clk *clk = clk_hw_get_clk(hw, NULL);
long rounded;
KUNIT_ASSERT_EQ(test,
clk_set_rate_range(clk,
DUMMY_CLOCK_RATE_1,
DUMMY_CLOCK_RATE_2),
0);
rounded = clk_round_rate(clk, DUMMY_CLOCK_RATE_2 + 1000);
KUNIT_ASSERT_GT(test, rounded, 0);
KUNIT_ASSERT_EQ(test,
clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
0);
KUNIT_EXPECT_EQ(test, rounded, clk_get_rate(clk));
clk_put(clk);
}
/*
* Test that if our clock has a rate lower than the minimum set by a
* call to clk_set_rate_range(), the rate will be raised to match the
* new minimum.
*
* This assumes that clk_ops.determine_rate or clk_ops.round_rate won't
* modify the requested rate, which is our case in clk_dummy_rate_ops.
*/
static void clk_range_test_set_range_get_rate_raised(struct kunit *test)
{
struct clk_dummy_context *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
struct clk *clk = clk_hw_get_clk(hw, NULL);
unsigned long rate;
KUNIT_ASSERT_EQ(test,
clk_set_rate(clk, DUMMY_CLOCK_RATE_1 - 1000),
0);
KUNIT_ASSERT_EQ(test,
clk_set_rate_range(clk,
DUMMY_CLOCK_RATE_1,
DUMMY_CLOCK_RATE_2),
0);
rate = clk_get_rate(clk);
KUNIT_ASSERT_GT(test, rate, 0);
KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
clk_put(clk);
}
/*
* Test that if our clock has a rate higher than the maximum set by a
* call to clk_set_rate_range(), the rate will be lowered to match the
* new maximum.
*
* This assumes that clk_ops.determine_rate or clk_ops.round_rate won't
* modify the requested rate, which is our case in clk_dummy_rate_ops.
*/
static void clk_range_test_set_range_get_rate_lowered(struct kunit *test)
{
struct clk_dummy_context *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
struct clk *clk = clk_hw_get_clk(hw, NULL);
unsigned long rate;
KUNIT_ASSERT_EQ(test,
clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
0);
KUNIT_ASSERT_EQ(test,
clk_set_rate_range(clk,
DUMMY_CLOCK_RATE_1,
DUMMY_CLOCK_RATE_2),
0);
rate = clk_get_rate(clk);
KUNIT_ASSERT_GT(test, rate, 0);
KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
clk_put(clk);
}
static struct kunit_case clk_range_test_cases[] = {
KUNIT_CASE(clk_range_test_set_range),
KUNIT_CASE(clk_range_test_set_range_invalid),
KUNIT_CASE(clk_range_test_multiple_disjoints_range),
KUNIT_CASE(clk_range_test_set_range_round_rate_lower),
KUNIT_CASE(clk_range_test_set_range_set_rate_lower),
KUNIT_CASE(clk_range_test_set_range_set_round_rate_consistent_lower),
KUNIT_CASE(clk_range_test_set_range_round_rate_higher),
KUNIT_CASE(clk_range_test_set_range_set_rate_higher),
KUNIT_CASE(clk_range_test_set_range_set_round_rate_consistent_higher),
KUNIT_CASE(clk_range_test_set_range_get_rate_raised),
KUNIT_CASE(clk_range_test_set_range_get_rate_lowered),
{}
};
/*
* Test suite for a basic rate clock, without any parent.
*
* These tests exercise the rate range API: clk_set_rate_range(),
* clk_set_min_rate(), clk_set_max_rate(), clk_drop_range().
*/
static struct kunit_suite clk_range_test_suite = {
.name = "clk-range-test",
.init = clk_test_init,
.exit = clk_test_exit,
.test_cases = clk_range_test_cases,
};
/*
* Test that if we have several subsequent calls to
* clk_set_rate_range(), the core will reevaluate whether a new rate is
* needed each and every time.
*
* With clk_dummy_maximize_rate_ops, this means that the rate will
* trail along the maximum as it evolves.
*/
static void clk_range_test_set_range_rate_maximized(struct kunit *test)
{
struct clk_dummy_context *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
struct clk *clk = clk_hw_get_clk(hw, NULL);
unsigned long rate;
KUNIT_ASSERT_EQ(test,
clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
0);
KUNIT_ASSERT_EQ(test,
clk_set_rate_range(clk,
DUMMY_CLOCK_RATE_1,
DUMMY_CLOCK_RATE_2),
0);
rate = clk_get_rate(clk);
KUNIT_ASSERT_GT(test, rate, 0);
KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
KUNIT_ASSERT_EQ(test,
clk_set_rate_range(clk,
DUMMY_CLOCK_RATE_1,
DUMMY_CLOCK_RATE_2 - 1000),
0);
rate = clk_get_rate(clk);
KUNIT_ASSERT_GT(test, rate, 0);
KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2 - 1000);
KUNIT_ASSERT_EQ(test,
clk_set_rate_range(clk,
DUMMY_CLOCK_RATE_1,
DUMMY_CLOCK_RATE_2),
0);
rate = clk_get_rate(clk);
KUNIT_ASSERT_GT(test, rate, 0);
KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
clk_put(clk);
}
/*
* Test that if we have several subsequent calls to
* clk_set_rate_range(), across multiple users, the core will reevaluate
* whether a new rate is needed each and every time.
*
* With clk_dummy_maximize_rate_ops, this means that the rate will
* trail along the maximum as it evolves.
*/
static void clk_range_test_multiple_set_range_rate_maximized(struct kunit *test)
{
struct clk_dummy_context *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
struct clk *clk = clk_hw_get_clk(hw, NULL);
struct clk *user1, *user2;
unsigned long rate;
user1 = clk_hw_get_clk(hw, NULL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user1);
user2 = clk_hw_get_clk(hw, NULL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user2);
KUNIT_ASSERT_EQ(test,
clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
0);
KUNIT_ASSERT_EQ(test,
clk_set_rate_range(user1,
0,
DUMMY_CLOCK_RATE_2),
0);
rate = clk_get_rate(clk);
KUNIT_ASSERT_GT(test, rate, 0);
KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
KUNIT_ASSERT_EQ(test,
clk_set_rate_range(user2,
0,
DUMMY_CLOCK_RATE_1),
0);
rate = clk_get_rate(clk);
KUNIT_ASSERT_GT(test, rate, 0);
KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
KUNIT_ASSERT_EQ(test,
clk_drop_range(user2),
0);
rate = clk_get_rate(clk);
KUNIT_ASSERT_GT(test, rate, 0);
KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
clk_put(user2);
clk_put(user1);
clk_put(clk);
}
/*
* Test that if we have several subsequent calls to
* clk_set_rate_range(), across multiple users, the core will reevaluate
* whether a new rate is needed, including when a user drop its clock.
*
* With clk_dummy_maximize_rate_ops, this means that the rate will
* trail along the maximum as it evolves.
*/
static void clk_range_test_multiple_set_range_rate_put_maximized(struct kunit *test)
{
struct clk_dummy_context *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
struct clk *clk = clk_hw_get_clk(hw, NULL);
struct clk *user1, *user2;
unsigned long rate;
user1 = clk_hw_get_clk(hw, NULL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user1);
user2 = clk_hw_get_clk(hw, NULL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user2);
KUNIT_ASSERT_EQ(test,
clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
0);
KUNIT_ASSERT_EQ(test,
clk_set_rate_range(user1,
0,
DUMMY_CLOCK_RATE_2),
0);
rate = clk_get_rate(clk);
KUNIT_ASSERT_GT(test, rate, 0);
KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
KUNIT_ASSERT_EQ(test,
clk_set_rate_range(user2,
0,
DUMMY_CLOCK_RATE_1),
0);
rate = clk_get_rate(clk);
KUNIT_ASSERT_GT(test, rate, 0);
KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
clk_put(user2);
rate = clk_get_rate(clk);
KUNIT_ASSERT_GT(test, rate, 0);
KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
clk_put(user1);
clk_put(clk);
}
static struct kunit_case clk_range_maximize_test_cases[] = {
KUNIT_CASE(clk_range_test_set_range_rate_maximized),
KUNIT_CASE(clk_range_test_multiple_set_range_rate_maximized),
KUNIT_CASE(clk_range_test_multiple_set_range_rate_put_maximized),
{}
};
/*
* Test suite for a basic rate clock, without any parent.
*
* These tests exercise the rate range API: clk_set_rate_range(),
* clk_set_min_rate(), clk_set_max_rate(), clk_drop_range(), with a
* driver that will always try to run at the highest possible rate.
*/
static struct kunit_suite clk_range_maximize_test_suite = {
.name = "clk-range-maximize-test",
.init = clk_maximize_test_init,
.exit = clk_test_exit,
.test_cases = clk_range_maximize_test_cases,
};
/*
* Test that if we have several subsequent calls to
* clk_set_rate_range(), the core will reevaluate whether a new rate is
* needed each and every time.
*
* With clk_dummy_minimize_rate_ops, this means that the rate will
* trail along the minimum as it evolves.
*/
static void clk_range_test_set_range_rate_minimized(struct kunit *test)
{
struct clk_dummy_context *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
struct clk *clk = clk_hw_get_clk(hw, NULL);
unsigned long rate;
KUNIT_ASSERT_EQ(test,
clk_set_rate(clk, DUMMY_CLOCK_RATE_1 - 1000),
0);
KUNIT_ASSERT_EQ(test,
clk_set_rate_range(clk,
DUMMY_CLOCK_RATE_1,
DUMMY_CLOCK_RATE_2),
0);
rate = clk_get_rate(clk);
KUNIT_ASSERT_GT(test, rate, 0);
KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
KUNIT_ASSERT_EQ(test,
clk_set_rate_range(clk,
DUMMY_CLOCK_RATE_1 + 1000,
DUMMY_CLOCK_RATE_2),
0);
rate = clk_get_rate(clk);
KUNIT_ASSERT_GT(test, rate, 0);
KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
KUNIT_ASSERT_EQ(test,
clk_set_rate_range(clk,
DUMMY_CLOCK_RATE_1,
DUMMY_CLOCK_RATE_2),
0);
rate = clk_get_rate(clk);
KUNIT_ASSERT_GT(test, rate, 0);
KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
clk_put(clk);
}
/*
* Test that if we have several subsequent calls to
* clk_set_rate_range(), across multiple users, the core will reevaluate
* whether a new rate is needed each and every time.
*
* With clk_dummy_minimize_rate_ops, this means that the rate will
* trail along the minimum as it evolves.
*/
static void clk_range_test_multiple_set_range_rate_minimized(struct kunit *test)
{
struct clk_dummy_context *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
struct clk *clk = clk_hw_get_clk(hw, NULL);
struct clk *user1, *user2;
unsigned long rate;
user1 = clk_hw_get_clk(hw, NULL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user1);
user2 = clk_hw_get_clk(hw, NULL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user2);
KUNIT_ASSERT_EQ(test,
clk_set_rate_range(user1,
DUMMY_CLOCK_RATE_1,
ULONG_MAX),
0);
rate = clk_get_rate(clk);
KUNIT_ASSERT_GT(test, rate, 0);
KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
KUNIT_ASSERT_EQ(test,
clk_set_rate_range(user2,
DUMMY_CLOCK_RATE_2,
ULONG_MAX),
0);
rate = clk_get_rate(clk);
KUNIT_ASSERT_GT(test, rate, 0);
KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
KUNIT_ASSERT_EQ(test,
clk_drop_range(user2),
0);
rate = clk_get_rate(clk);
KUNIT_ASSERT_GT(test, rate, 0);
KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
clk_put(user2);
clk_put(user1);
clk_put(clk);
}
/*
* Test that if we have several subsequent calls to
* clk_set_rate_range(), across multiple users, the core will reevaluate
* whether a new rate is needed, including when a user drop its clock.
*
* With clk_dummy_minimize_rate_ops, this means that the rate will
* trail along the minimum as it evolves.
*/
static void clk_range_test_multiple_set_range_rate_put_minimized(struct kunit *test)
{
struct clk_dummy_context *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
struct clk *clk = clk_hw_get_clk(hw, NULL);
struct clk *user1, *user2;
unsigned long rate;
user1 = clk_hw_get_clk(hw, NULL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user1);
user2 = clk_hw_get_clk(hw, NULL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user2);
KUNIT_ASSERT_EQ(test,
clk_set_rate_range(user1,
DUMMY_CLOCK_RATE_1,
ULONG_MAX),
0);
rate = clk_get_rate(clk);
KUNIT_ASSERT_GT(test, rate, 0);
KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
KUNIT_ASSERT_EQ(test,
clk_set_rate_range(user2,
DUMMY_CLOCK_RATE_2,
ULONG_MAX),
0);
rate = clk_get_rate(clk);
KUNIT_ASSERT_GT(test, rate, 0);
KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
clk_put(user2);
rate = clk_get_rate(clk);
KUNIT_ASSERT_GT(test, rate, 0);
KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
clk_put(user1);
clk_put(clk);
}
static struct kunit_case clk_range_minimize_test_cases[] = {
KUNIT_CASE(clk_range_test_set_range_rate_minimized),
KUNIT_CASE(clk_range_test_multiple_set_range_rate_minimized),
KUNIT_CASE(clk_range_test_multiple_set_range_rate_put_minimized),
{}
};
/*
* Test suite for a basic rate clock, without any parent.
*
* These tests exercise the rate range API: clk_set_rate_range(),
* clk_set_min_rate(), clk_set_max_rate(), clk_drop_range(), with a
* driver that will always try to run at the lowest possible rate.
*/
static struct kunit_suite clk_range_minimize_test_suite = {
.name = "clk-range-minimize-test",
.init = clk_minimize_test_init,
.exit = clk_test_exit,
.test_cases = clk_range_minimize_test_cases,
};
struct clk_leaf_mux_ctx {
struct clk_multiple_parent_ctx mux_ctx;
struct clk_hw hw;
};
static int
clk_leaf_mux_set_rate_parent_test_init(struct kunit *test)
{
struct clk_leaf_mux_ctx *ctx;
const char *top_parents[2] = { "parent-0", "parent-1" };
int ret;
ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
test->priv = ctx;
ctx->mux_ctx.parents_ctx[0].hw.init = CLK_HW_INIT_NO_PARENT("parent-0",
&clk_dummy_rate_ops,
0);
ctx->mux_ctx.parents_ctx[0].rate = DUMMY_CLOCK_RATE_1;
ret = clk_hw_register(NULL, &ctx->mux_ctx.parents_ctx[0].hw);
if (ret)
return ret;
ctx->mux_ctx.parents_ctx[1].hw.init = CLK_HW_INIT_NO_PARENT("parent-1",
&clk_dummy_rate_ops,
0);
ctx->mux_ctx.parents_ctx[1].rate = DUMMY_CLOCK_RATE_2;
ret = clk_hw_register(NULL, &ctx->mux_ctx.parents_ctx[1].hw);
if (ret)
return ret;
ctx->mux_ctx.current_parent = 0;
ctx->mux_ctx.hw.init = CLK_HW_INIT_PARENTS("test-mux", top_parents,
&clk_multiple_parents_mux_ops,
0);
ret = clk_hw_register(NULL, &ctx->mux_ctx.hw);
if (ret)
return ret;
ctx->hw.init = CLK_HW_INIT_HW("test-clock", &ctx->mux_ctx.hw,
&clk_dummy_single_parent_ops,
CLK_SET_RATE_PARENT);
ret = clk_hw_register(NULL, &ctx->hw);
if (ret)
return ret;
return 0;
}
static void clk_leaf_mux_set_rate_parent_test_exit(struct kunit *test)
{
struct clk_leaf_mux_ctx *ctx = test->priv;
clk_hw_unregister(&ctx->hw);
clk_hw_unregister(&ctx->mux_ctx.hw);
clk_hw_unregister(&ctx->mux_ctx.parents_ctx[0].hw);
clk_hw_unregister(&ctx->mux_ctx.parents_ctx[1].hw);
}
/*
* Test that, for a clock that will forward any rate request to its
* parent, the rate request structure returned by __clk_determine_rate
* is sane and will be what we expect.
*/
static void clk_leaf_mux_set_rate_parent_determine_rate(struct kunit *test)
{
struct clk_leaf_mux_ctx *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
struct clk *clk = clk_hw_get_clk(hw, NULL);
struct clk_rate_request req;
unsigned long rate;
int ret;
rate = clk_get_rate(clk);
KUNIT_ASSERT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
clk_hw_init_rate_request(hw, &req, DUMMY_CLOCK_RATE_2);
ret = __clk_determine_rate(hw, &req);
KUNIT_ASSERT_EQ(test, ret, 0);
KUNIT_EXPECT_EQ(test, req.rate, DUMMY_CLOCK_RATE_2);
KUNIT_EXPECT_EQ(test, req.best_parent_rate, DUMMY_CLOCK_RATE_2);
KUNIT_EXPECT_PTR_EQ(test, req.best_parent_hw, &ctx->mux_ctx.hw);
clk_put(clk);
}
static struct kunit_case clk_leaf_mux_set_rate_parent_test_cases[] = {
KUNIT_CASE(clk_leaf_mux_set_rate_parent_determine_rate),
{}
};
/*
* Test suite for a clock whose parent is a mux with multiple parents.
* The leaf clock has CLK_SET_RATE_PARENT, and will forward rate
* requests to the mux, which will then select which parent is the best
* fit for a given rate.
*
* These tests exercise the behaviour of muxes, and the proper selection
* of parents.
*/
static struct kunit_suite clk_leaf_mux_set_rate_parent_test_suite = {
.name = "clk-leaf-mux-set-rate-parent",
.init = clk_leaf_mux_set_rate_parent_test_init,
.exit = clk_leaf_mux_set_rate_parent_test_exit,
.test_cases = clk_leaf_mux_set_rate_parent_test_cases,
};
struct clk_mux_notifier_rate_change {
bool done;
unsigned long old_rate;
unsigned long new_rate;
wait_queue_head_t wq;
};
struct clk_mux_notifier_ctx {
struct clk_multiple_parent_ctx mux_ctx;
struct clk *clk;
struct notifier_block clk_nb;
struct clk_mux_notifier_rate_change pre_rate_change;
struct clk_mux_notifier_rate_change post_rate_change;
};
#define NOTIFIER_TIMEOUT_MS 100
static int clk_mux_notifier_callback(struct notifier_block *nb,
unsigned long action, void *data)
{
struct clk_notifier_data *clk_data = data;
struct clk_mux_notifier_ctx *ctx = container_of(nb,
struct clk_mux_notifier_ctx,
clk_nb);
if (action & PRE_RATE_CHANGE) {
ctx->pre_rate_change.old_rate = clk_data->old_rate;
ctx->pre_rate_change.new_rate = clk_data->new_rate;
ctx->pre_rate_change.done = true;
wake_up_interruptible(&ctx->pre_rate_change.wq);
}
if (action & POST_RATE_CHANGE) {
ctx->post_rate_change.old_rate = clk_data->old_rate;
ctx->post_rate_change.new_rate = clk_data->new_rate;
ctx->post_rate_change.done = true;
wake_up_interruptible(&ctx->post_rate_change.wq);
}
return 0;
}
static int clk_mux_notifier_test_init(struct kunit *test)
{
struct clk_mux_notifier_ctx *ctx;
const char *top_parents[2] = { "parent-0", "parent-1" };
int ret;
ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
test->priv = ctx;
ctx->clk_nb.notifier_call = clk_mux_notifier_callback;
init_waitqueue_head(&ctx->pre_rate_change.wq);
init_waitqueue_head(&ctx->post_rate_change.wq);
ctx->mux_ctx.parents_ctx[0].hw.init = CLK_HW_INIT_NO_PARENT("parent-0",
&clk_dummy_rate_ops,
0);
ctx->mux_ctx.parents_ctx[0].rate = DUMMY_CLOCK_RATE_1;
ret = clk_hw_register(NULL, &ctx->mux_ctx.parents_ctx[0].hw);
if (ret)
return ret;
ctx->mux_ctx.parents_ctx[1].hw.init = CLK_HW_INIT_NO_PARENT("parent-1",
&clk_dummy_rate_ops,
0);
ctx->mux_ctx.parents_ctx[1].rate = DUMMY_CLOCK_RATE_2;
ret = clk_hw_register(NULL, &ctx->mux_ctx.parents_ctx[1].hw);
if (ret)
return ret;
ctx->mux_ctx.current_parent = 0;
ctx->mux_ctx.hw.init = CLK_HW_INIT_PARENTS("test-mux", top_parents,
&clk_multiple_parents_mux_ops,
0);
ret = clk_hw_register(NULL, &ctx->mux_ctx.hw);
if (ret)
return ret;
ctx->clk = clk_hw_get_clk(&ctx->mux_ctx.hw, NULL);
ret = clk_notifier_register(ctx->clk, &ctx->clk_nb);
if (ret)
return ret;
return 0;
}
static void clk_mux_notifier_test_exit(struct kunit *test)
{
struct clk_mux_notifier_ctx *ctx = test->priv;
struct clk *clk = ctx->clk;
clk_notifier_unregister(clk, &ctx->clk_nb);
clk_put(clk);
clk_hw_unregister(&ctx->mux_ctx.hw);
clk_hw_unregister(&ctx->mux_ctx.parents_ctx[0].hw);
clk_hw_unregister(&ctx->mux_ctx.parents_ctx[1].hw);
}
/*
* Test that if the we have a notifier registered on a mux, the core
* will notify us when we switch to another parent, and with the proper
* old and new rates.
*/
static void clk_mux_notifier_set_parent_test(struct kunit *test)
{
struct clk_mux_notifier_ctx *ctx = test->priv;
struct clk_hw *hw = &ctx->mux_ctx.hw;
struct clk *clk = clk_hw_get_clk(hw, NULL);
struct clk *new_parent = clk_hw_get_clk(&ctx->mux_ctx.parents_ctx[1].hw, NULL);
int ret;
ret = clk_set_parent(clk, new_parent);
KUNIT_ASSERT_EQ(test, ret, 0);
ret = wait_event_interruptible_timeout(ctx->pre_rate_change.wq,
ctx->pre_rate_change.done,
msecs_to_jiffies(NOTIFIER_TIMEOUT_MS));
KUNIT_ASSERT_GT(test, ret, 0);
KUNIT_EXPECT_EQ(test, ctx->pre_rate_change.old_rate, DUMMY_CLOCK_RATE_1);
KUNIT_EXPECT_EQ(test, ctx->pre_rate_change.new_rate, DUMMY_CLOCK_RATE_2);
ret = wait_event_interruptible_timeout(ctx->post_rate_change.wq,
ctx->post_rate_change.done,
msecs_to_jiffies(NOTIFIER_TIMEOUT_MS));
KUNIT_ASSERT_GT(test, ret, 0);
KUNIT_EXPECT_EQ(test, ctx->post_rate_change.old_rate, DUMMY_CLOCK_RATE_1);
KUNIT_EXPECT_EQ(test, ctx->post_rate_change.new_rate, DUMMY_CLOCK_RATE_2);
clk_put(new_parent);
clk_put(clk);
}
static struct kunit_case clk_mux_notifier_test_cases[] = {
KUNIT_CASE(clk_mux_notifier_set_parent_test),
{}
};
/*
* Test suite for a mux with multiple parents, and a notifier registered
* on the mux.
*
* These tests exercise the behaviour of notifiers.
*/
static struct kunit_suite clk_mux_notifier_test_suite = {
.name = "clk-mux-notifier",
.init = clk_mux_notifier_test_init,
.exit = clk_mux_notifier_test_exit,
.test_cases = clk_mux_notifier_test_cases,
};
static int
clk_mux_no_reparent_test_init(struct kunit *test)
{
struct clk_multiple_parent_ctx *ctx;
const char *parents[2] = { "parent-0", "parent-1"};
int ret;
ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
test->priv = ctx;
ctx->parents_ctx[0].hw.init = CLK_HW_INIT_NO_PARENT("parent-0",
&clk_dummy_rate_ops,
0);
ctx->parents_ctx[0].rate = DUMMY_CLOCK_RATE_1;
ret = clk_hw_register(NULL, &ctx->parents_ctx[0].hw);
if (ret)
return ret;
ctx->parents_ctx[1].hw.init = CLK_HW_INIT_NO_PARENT("parent-1",
&clk_dummy_rate_ops,
0);
ctx->parents_ctx[1].rate = DUMMY_CLOCK_RATE_2;
ret = clk_hw_register(NULL, &ctx->parents_ctx[1].hw);
if (ret)
return ret;
ctx->current_parent = 0;
ctx->hw.init = CLK_HW_INIT_PARENTS("test-mux", parents,
&clk_multiple_parents_no_reparent_mux_ops,
0);
ret = clk_hw_register(NULL, &ctx->hw);
if (ret)
return ret;
return 0;
}
static void
clk_mux_no_reparent_test_exit(struct kunit *test)
{
struct clk_multiple_parent_ctx *ctx = test->priv;
clk_hw_unregister(&ctx->hw);
clk_hw_unregister(&ctx->parents_ctx[0].hw);
clk_hw_unregister(&ctx->parents_ctx[1].hw);
}
/*
* Test that if the we have a mux that cannot change parent and we call
* clk_round_rate() on it with a rate that should cause it to change
* parent, it won't.
*/
static void clk_mux_no_reparent_round_rate(struct kunit *test)
{
struct clk_multiple_parent_ctx *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
struct clk *clk = clk_hw_get_clk(hw, NULL);
struct clk *other_parent, *parent;
unsigned long other_parent_rate;
unsigned long parent_rate;
long rounded_rate;
parent = clk_get_parent(clk);
KUNIT_ASSERT_PTR_NE(test, parent, NULL);
parent_rate = clk_get_rate(parent);
KUNIT_ASSERT_GT(test, parent_rate, 0);
other_parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, other_parent);
KUNIT_ASSERT_FALSE(test, clk_is_match(parent, other_parent));
other_parent_rate = clk_get_rate(other_parent);
KUNIT_ASSERT_GT(test, other_parent_rate, 0);
clk_put(other_parent);
rounded_rate = clk_round_rate(clk, other_parent_rate);
KUNIT_ASSERT_GT(test, rounded_rate, 0);
KUNIT_EXPECT_EQ(test, rounded_rate, parent_rate);
clk_put(clk);
}
/*
* Test that if the we have a mux that cannot change parent and we call
* clk_set_rate() on it with a rate that should cause it to change
* parent, it won't.
*/
static void clk_mux_no_reparent_set_rate(struct kunit *test)
{
struct clk_multiple_parent_ctx *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
struct clk *clk = clk_hw_get_clk(hw, NULL);
struct clk *other_parent, *parent;
unsigned long other_parent_rate;
unsigned long parent_rate;
unsigned long rate;
int ret;
parent = clk_get_parent(clk);
KUNIT_ASSERT_PTR_NE(test, parent, NULL);
parent_rate = clk_get_rate(parent);
KUNIT_ASSERT_GT(test, parent_rate, 0);
other_parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, other_parent);
KUNIT_ASSERT_FALSE(test, clk_is_match(parent, other_parent));
other_parent_rate = clk_get_rate(other_parent);
KUNIT_ASSERT_GT(test, other_parent_rate, 0);
clk_put(other_parent);
ret = clk_set_rate(clk, other_parent_rate);
KUNIT_ASSERT_EQ(test, ret, 0);
rate = clk_get_rate(clk);
KUNIT_ASSERT_GT(test, rate, 0);
KUNIT_EXPECT_EQ(test, rate, parent_rate);
clk_put(clk);
}
static struct kunit_case clk_mux_no_reparent_test_cases[] = {
KUNIT_CASE(clk_mux_no_reparent_round_rate),
KUNIT_CASE(clk_mux_no_reparent_set_rate),
{}
};
/*
* Test suite for a clock mux that isn't allowed to change parent, using
* the clk_hw_determine_rate_no_reparent() helper.
*
* These tests exercise that helper, and the proper selection of
* rates and parents.
*/
static struct kunit_suite clk_mux_no_reparent_test_suite = {
.name = "clk-mux-no-reparent",
.init = clk_mux_no_reparent_test_init,
.exit = clk_mux_no_reparent_test_exit,
.test_cases = clk_mux_no_reparent_test_cases,
};
kunit_test_suites(
&clk_leaf_mux_set_rate_parent_test_suite,
&clk_test_suite,
&clk_multiple_parents_mux_test_suite,
&clk_mux_no_reparent_test_suite,
&clk_mux_notifier_test_suite,
&clk_orphan_transparent_multiple_parent_mux_test_suite,
&clk_orphan_transparent_single_parent_test_suite,
&clk_orphan_two_level_root_last_test_suite,
&clk_range_test_suite,
&clk_range_maximize_test_suite,
&clk_range_minimize_test_suite,
&clk_single_parent_mux_test_suite,
&clk_uncached_test_suite
);
MODULE_LICENSE("GPL v2");
| linux-master | drivers/clk/clk_test.c |
// SPDX-License-Identifier: GPL-2.0-only OR MIT
/*
* Driver for an SoC block (Numerically Controlled Oscillator)
* found on t8103 (M1) and other Apple chips
*
* Copyright (C) The Asahi Linux Contributors
*/
#include <linux/bits.h>
#include <linux/bitfield.h>
#include <linux/clk-provider.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/math64.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/spinlock.h>
#define NCO_CHANNEL_STRIDE 0x4000
#define NCO_CHANNEL_REGSIZE 20
#define REG_CTRL 0
#define CTRL_ENABLE BIT(31)
#define REG_DIV 4
#define DIV_FINE GENMASK(1, 0)
#define DIV_COARSE GENMASK(12, 2)
#define REG_INC1 8
#define REG_INC2 12
#define REG_ACCINIT 16
/*
* Theory of operation (postulated)
*
* The REG_DIV register indirectly expresses a base integer divisor, roughly
* corresponding to twice the desired ratio of input to output clock. This
* base divisor is adjusted on a cycle-by-cycle basis based on the state of a
* 32-bit phase accumulator to achieve a desired precise clock ratio over the
* long term.
*
* Specifically an output clock cycle is produced after (REG_DIV divisor)/2
* or (REG_DIV divisor + 1)/2 input cycles, the latter taking effect when top
* bit of the 32-bit accumulator is set. The accumulator is incremented each
* produced output cycle, by the value from either REG_INC1 or REG_INC2, which
* of the two is selected depending again on the accumulator's current top bit.
*
* Because the NCO hardware implements counting of input clock cycles in part
* in a Galois linear-feedback shift register, the higher bits of divisor
* are programmed into REG_DIV by picking an appropriate LFSR state. See
* applnco_compute_tables/applnco_div_translate for details on this.
*/
#define LFSR_POLY 0xa01
#define LFSR_INIT 0x7ff
#define LFSR_LEN 11
#define LFSR_PERIOD ((1 << LFSR_LEN) - 1)
#define LFSR_TBLSIZE (1 << LFSR_LEN)
/* The minimal attainable coarse divisor (first value in table) */
#define COARSE_DIV_OFFSET 2
struct applnco_tables {
u16 fwd[LFSR_TBLSIZE];
u16 inv[LFSR_TBLSIZE];
};
struct applnco_channel {
void __iomem *base;
struct applnco_tables *tbl;
struct clk_hw hw;
spinlock_t lock;
};
#define to_applnco_channel(_hw) container_of(_hw, struct applnco_channel, hw)
static void applnco_enable_nolock(struct clk_hw *hw)
{
struct applnco_channel *chan = to_applnco_channel(hw);
u32 val;
val = readl_relaxed(chan->base + REG_CTRL);
writel_relaxed(val | CTRL_ENABLE, chan->base + REG_CTRL);
}
static void applnco_disable_nolock(struct clk_hw *hw)
{
struct applnco_channel *chan = to_applnco_channel(hw);
u32 val;
val = readl_relaxed(chan->base + REG_CTRL);
writel_relaxed(val & ~CTRL_ENABLE, chan->base + REG_CTRL);
}
static int applnco_is_enabled(struct clk_hw *hw)
{
struct applnco_channel *chan = to_applnco_channel(hw);
return (readl_relaxed(chan->base + REG_CTRL) & CTRL_ENABLE) != 0;
}
static void applnco_compute_tables(struct applnco_tables *tbl)
{
int i;
u32 state = LFSR_INIT;
/*
* Go through the states of a Galois LFSR and build
* a coarse divisor translation table.
*/
for (i = LFSR_PERIOD; i > 0; i--) {
if (state & 1)
state = (state >> 1) ^ (LFSR_POLY >> 1);
else
state = (state >> 1);
tbl->fwd[i] = state;
tbl->inv[state] = i;
}
/* Zero value is special-cased */
tbl->fwd[0] = 0;
tbl->inv[0] = 0;
}
static bool applnco_div_out_of_range(unsigned int div)
{
unsigned int coarse = div / 4;
return coarse < COARSE_DIV_OFFSET ||
coarse >= COARSE_DIV_OFFSET + LFSR_TBLSIZE;
}
static u32 applnco_div_translate(struct applnco_tables *tbl, unsigned int div)
{
unsigned int coarse = div / 4;
if (WARN_ON(applnco_div_out_of_range(div)))
return 0;
return FIELD_PREP(DIV_COARSE, tbl->fwd[coarse - COARSE_DIV_OFFSET]) |
FIELD_PREP(DIV_FINE, div % 4);
}
static unsigned int applnco_div_translate_inv(struct applnco_tables *tbl, u32 regval)
{
unsigned int coarse, fine;
coarse = tbl->inv[FIELD_GET(DIV_COARSE, regval)] + COARSE_DIV_OFFSET;
fine = FIELD_GET(DIV_FINE, regval);
return coarse * 4 + fine;
}
static int applnco_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct applnco_channel *chan = to_applnco_channel(hw);
unsigned long flags;
u32 div, inc1, inc2;
bool was_enabled;
div = 2 * parent_rate / rate;
inc1 = 2 * parent_rate - div * rate;
inc2 = inc1 - rate;
if (applnco_div_out_of_range(div))
return -EINVAL;
div = applnco_div_translate(chan->tbl, div);
spin_lock_irqsave(&chan->lock, flags);
was_enabled = applnco_is_enabled(hw);
applnco_disable_nolock(hw);
writel_relaxed(div, chan->base + REG_DIV);
writel_relaxed(inc1, chan->base + REG_INC1);
writel_relaxed(inc2, chan->base + REG_INC2);
/* Presumably a neutral initial value for accumulator */
writel_relaxed(1 << 31, chan->base + REG_ACCINIT);
if (was_enabled)
applnco_enable_nolock(hw);
spin_unlock_irqrestore(&chan->lock, flags);
return 0;
}
static unsigned long applnco_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct applnco_channel *chan = to_applnco_channel(hw);
u32 div, inc1, inc2, incbase;
div = applnco_div_translate_inv(chan->tbl,
readl_relaxed(chan->base + REG_DIV));
inc1 = readl_relaxed(chan->base + REG_INC1);
inc2 = readl_relaxed(chan->base + REG_INC2);
/*
* We don't support wraparound of accumulator
* nor the edge case of both increments being zero
*/
if (inc1 >= (1 << 31) || inc2 < (1 << 31) || (inc1 == 0 && inc2 == 0))
return 0;
/* Scale both sides of division by incbase to maintain precision */
incbase = inc1 - inc2;
return div64_u64(((u64) parent_rate) * 2 * incbase,
((u64) div) * incbase + inc1);
}
static long applnco_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *parent_rate)
{
unsigned long lo = *parent_rate / (COARSE_DIV_OFFSET + LFSR_TBLSIZE) + 1;
unsigned long hi = *parent_rate / COARSE_DIV_OFFSET;
return clamp(rate, lo, hi);
}
static int applnco_enable(struct clk_hw *hw)
{
struct applnco_channel *chan = to_applnco_channel(hw);
unsigned long flags;
spin_lock_irqsave(&chan->lock, flags);
applnco_enable_nolock(hw);
spin_unlock_irqrestore(&chan->lock, flags);
return 0;
}
static void applnco_disable(struct clk_hw *hw)
{
struct applnco_channel *chan = to_applnco_channel(hw);
unsigned long flags;
spin_lock_irqsave(&chan->lock, flags);
applnco_disable_nolock(hw);
spin_unlock_irqrestore(&chan->lock, flags);
}
static const struct clk_ops applnco_ops = {
.set_rate = applnco_set_rate,
.recalc_rate = applnco_recalc_rate,
.round_rate = applnco_round_rate,
.enable = applnco_enable,
.disable = applnco_disable,
.is_enabled = applnco_is_enabled,
};
static int applnco_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct clk_parent_data pdata = { .index = 0 };
struct clk_init_data init;
struct clk_hw_onecell_data *onecell_data;
void __iomem *base;
struct resource *res;
struct applnco_tables *tbl;
unsigned int nchannels;
int ret, i;
base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(base))
return PTR_ERR(base);
if (resource_size(res) < NCO_CHANNEL_REGSIZE)
return -EINVAL;
nchannels = (resource_size(res) - NCO_CHANNEL_REGSIZE)
/ NCO_CHANNEL_STRIDE + 1;
onecell_data = devm_kzalloc(&pdev->dev, struct_size(onecell_data, hws,
nchannels), GFP_KERNEL);
if (!onecell_data)
return -ENOMEM;
onecell_data->num = nchannels;
tbl = devm_kzalloc(&pdev->dev, sizeof(*tbl), GFP_KERNEL);
if (!tbl)
return -ENOMEM;
applnco_compute_tables(tbl);
for (i = 0; i < nchannels; i++) {
struct applnco_channel *chan;
chan = devm_kzalloc(&pdev->dev, sizeof(*chan), GFP_KERNEL);
if (!chan)
return -ENOMEM;
chan->base = base + NCO_CHANNEL_STRIDE * i;
chan->tbl = tbl;
spin_lock_init(&chan->lock);
memset(&init, 0, sizeof(init));
init.name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
"%s-%d", np->name, i);
init.ops = &applnco_ops;
init.parent_data = &pdata;
init.num_parents = 1;
init.flags = 0;
chan->hw.init = &init;
ret = devm_clk_hw_register(&pdev->dev, &chan->hw);
if (ret)
return ret;
onecell_data->hws[i] = &chan->hw;
}
return devm_of_clk_add_hw_provider(&pdev->dev, of_clk_hw_onecell_get,
onecell_data);
}
static const struct of_device_id applnco_ids[] = {
{ .compatible = "apple,nco" },
{ }
};
MODULE_DEVICE_TABLE(of, applnco_ids);
static struct platform_driver applnco_driver = {
.driver = {
.name = "apple-nco",
.of_match_table = applnco_ids,
},
.probe = applnco_probe,
};
module_platform_driver(applnco_driver);
MODULE_AUTHOR("Martin Povišer <[email protected]>");
MODULE_DESCRIPTION("Clock driver for NCO blocks on Apple SoCs");
MODULE_LICENSE("GPL");
| linux-master | drivers/clk/clk-apple-nco.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/regulator/consumer.h>
#include <dt-bindings/clock/maxim,max9485.h>
#define MAX9485_NUM_CLKS 4
/* This chip has only one register of 8 bit width. */
#define MAX9485_FS_12KHZ (0 << 0)
#define MAX9485_FS_32KHZ (1 << 0)
#define MAX9485_FS_44_1KHZ (2 << 0)
#define MAX9485_FS_48KHZ (3 << 0)
#define MAX9485_SCALE_256 (0 << 2)
#define MAX9485_SCALE_384 (1 << 2)
#define MAX9485_SCALE_768 (2 << 2)
#define MAX9485_DOUBLE BIT(4)
#define MAX9485_CLKOUT1_ENABLE BIT(5)
#define MAX9485_CLKOUT2_ENABLE BIT(6)
#define MAX9485_MCLK_ENABLE BIT(7)
#define MAX9485_FREQ_MASK 0x1f
struct max9485_rate {
unsigned long out;
u8 reg_value;
};
/*
* Ordered by frequency. For frequency the hardware can generate with
* multiple settings, the one with lowest jitter is listed first.
*/
static const struct max9485_rate max9485_rates[] = {
{ 3072000, MAX9485_FS_12KHZ | MAX9485_SCALE_256 },
{ 4608000, MAX9485_FS_12KHZ | MAX9485_SCALE_384 },
{ 8192000, MAX9485_FS_32KHZ | MAX9485_SCALE_256 },
{ 9126000, MAX9485_FS_12KHZ | MAX9485_SCALE_768 },
{ 11289600, MAX9485_FS_44_1KHZ | MAX9485_SCALE_256 },
{ 12288000, MAX9485_FS_48KHZ | MAX9485_SCALE_256 },
{ 12288000, MAX9485_FS_32KHZ | MAX9485_SCALE_384 },
{ 16384000, MAX9485_FS_32KHZ | MAX9485_SCALE_256 | MAX9485_DOUBLE },
{ 16934400, MAX9485_FS_44_1KHZ | MAX9485_SCALE_384 },
{ 18384000, MAX9485_FS_48KHZ | MAX9485_SCALE_384 },
{ 22579200, MAX9485_FS_44_1KHZ | MAX9485_SCALE_256 | MAX9485_DOUBLE },
{ 24576000, MAX9485_FS_48KHZ | MAX9485_SCALE_256 | MAX9485_DOUBLE },
{ 24576000, MAX9485_FS_32KHZ | MAX9485_SCALE_384 | MAX9485_DOUBLE },
{ 24576000, MAX9485_FS_32KHZ | MAX9485_SCALE_768 },
{ 33868800, MAX9485_FS_44_1KHZ | MAX9485_SCALE_384 | MAX9485_DOUBLE },
{ 33868800, MAX9485_FS_44_1KHZ | MAX9485_SCALE_768 },
{ 36864000, MAX9485_FS_48KHZ | MAX9485_SCALE_384 | MAX9485_DOUBLE },
{ 36864000, MAX9485_FS_48KHZ | MAX9485_SCALE_768 },
{ 49152000, MAX9485_FS_32KHZ | MAX9485_SCALE_768 | MAX9485_DOUBLE },
{ 67737600, MAX9485_FS_44_1KHZ | MAX9485_SCALE_768 | MAX9485_DOUBLE },
{ 73728000, MAX9485_FS_48KHZ | MAX9485_SCALE_768 | MAX9485_DOUBLE },
{ } /* sentinel */
};
struct max9485_driver_data;
struct max9485_clk_hw {
struct clk_hw hw;
struct clk_init_data init;
u8 enable_bit;
struct max9485_driver_data *drvdata;
};
struct max9485_driver_data {
struct clk *xclk;
struct i2c_client *client;
u8 reg_value;
struct regulator *supply;
struct gpio_desc *reset_gpio;
struct max9485_clk_hw hw[MAX9485_NUM_CLKS];
};
static inline struct max9485_clk_hw *to_max9485_clk(struct clk_hw *hw)
{
return container_of(hw, struct max9485_clk_hw, hw);
}
static int max9485_update_bits(struct max9485_driver_data *drvdata,
u8 mask, u8 value)
{
int ret;
drvdata->reg_value &= ~mask;
drvdata->reg_value |= value;
dev_dbg(&drvdata->client->dev,
"updating mask 0x%02x value 0x%02x -> 0x%02x\n",
mask, value, drvdata->reg_value);
ret = i2c_master_send(drvdata->client,
&drvdata->reg_value,
sizeof(drvdata->reg_value));
return ret < 0 ? ret : 0;
}
static int max9485_clk_prepare(struct clk_hw *hw)
{
struct max9485_clk_hw *clk_hw = to_max9485_clk(hw);
return max9485_update_bits(clk_hw->drvdata,
clk_hw->enable_bit,
clk_hw->enable_bit);
}
static void max9485_clk_unprepare(struct clk_hw *hw)
{
struct max9485_clk_hw *clk_hw = to_max9485_clk(hw);
max9485_update_bits(clk_hw->drvdata, clk_hw->enable_bit, 0);
}
/*
* CLKOUT - configurable clock output
*/
static int max9485_clkout_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct max9485_clk_hw *clk_hw = to_max9485_clk(hw);
const struct max9485_rate *entry;
for (entry = max9485_rates; entry->out != 0; entry++)
if (entry->out == rate)
break;
if (entry->out == 0)
return -EINVAL;
return max9485_update_bits(clk_hw->drvdata,
MAX9485_FREQ_MASK,
entry->reg_value);
}
static unsigned long max9485_clkout_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct max9485_clk_hw *clk_hw = to_max9485_clk(hw);
struct max9485_driver_data *drvdata = clk_hw->drvdata;
u8 val = drvdata->reg_value & MAX9485_FREQ_MASK;
const struct max9485_rate *entry;
for (entry = max9485_rates; entry->out != 0; entry++)
if (val == entry->reg_value)
return entry->out;
return 0;
}
static long max9485_clkout_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *parent_rate)
{
const struct max9485_rate *curr, *prev = NULL;
for (curr = max9485_rates; curr->out != 0; curr++) {
/* Exact matches */
if (curr->out == rate)
return rate;
/*
* Find the first entry that has a frequency higher than the
* requested one.
*/
if (curr->out > rate) {
unsigned int mid;
/*
* If this is the first entry, clamp the value to the
* lowest possible frequency.
*/
if (!prev)
return curr->out;
/*
* Otherwise, determine whether the previous entry or
* current one is closer.
*/
mid = prev->out + ((curr->out - prev->out) / 2);
return (mid > rate) ? prev->out : curr->out;
}
prev = curr;
}
/* If the last entry was still too high, clamp the value */
return prev->out;
}
struct max9485_clk {
const char *name;
int parent_index;
const struct clk_ops ops;
u8 enable_bit;
};
static const struct max9485_clk max9485_clks[MAX9485_NUM_CLKS] = {
[MAX9485_MCLKOUT] = {
.name = "mclkout",
.parent_index = -1,
.enable_bit = MAX9485_MCLK_ENABLE,
.ops = {
.prepare = max9485_clk_prepare,
.unprepare = max9485_clk_unprepare,
},
},
[MAX9485_CLKOUT] = {
.name = "clkout",
.parent_index = -1,
.ops = {
.set_rate = max9485_clkout_set_rate,
.round_rate = max9485_clkout_round_rate,
.recalc_rate = max9485_clkout_recalc_rate,
},
},
[MAX9485_CLKOUT1] = {
.name = "clkout1",
.parent_index = MAX9485_CLKOUT,
.enable_bit = MAX9485_CLKOUT1_ENABLE,
.ops = {
.prepare = max9485_clk_prepare,
.unprepare = max9485_clk_unprepare,
},
},
[MAX9485_CLKOUT2] = {
.name = "clkout2",
.parent_index = MAX9485_CLKOUT,
.enable_bit = MAX9485_CLKOUT2_ENABLE,
.ops = {
.prepare = max9485_clk_prepare,
.unprepare = max9485_clk_unprepare,
},
},
};
static struct clk_hw *
max9485_of_clk_get(struct of_phandle_args *clkspec, void *data)
{
struct max9485_driver_data *drvdata = data;
unsigned int idx = clkspec->args[0];
return &drvdata->hw[idx].hw;
}
static int max9485_i2c_probe(struct i2c_client *client)
{
struct max9485_driver_data *drvdata;
struct device *dev = &client->dev;
const char *xclk_name;
int i, ret;
drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
if (!drvdata)
return -ENOMEM;
drvdata->xclk = devm_clk_get(dev, "xclk");
if (IS_ERR(drvdata->xclk))
return PTR_ERR(drvdata->xclk);
xclk_name = __clk_get_name(drvdata->xclk);
drvdata->supply = devm_regulator_get(dev, "vdd");
if (IS_ERR(drvdata->supply))
return PTR_ERR(drvdata->supply);
ret = regulator_enable(drvdata->supply);
if (ret < 0)
return ret;
drvdata->reset_gpio =
devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(drvdata->reset_gpio))
return PTR_ERR(drvdata->reset_gpio);
i2c_set_clientdata(client, drvdata);
drvdata->client = client;
ret = i2c_master_recv(drvdata->client, &drvdata->reg_value,
sizeof(drvdata->reg_value));
if (ret < 0) {
dev_warn(dev, "Unable to read device register: %d\n", ret);
return ret;
}
for (i = 0; i < MAX9485_NUM_CLKS; i++) {
int parent_index = max9485_clks[i].parent_index;
const char *name;
if (of_property_read_string_index(dev->of_node,
"clock-output-names",
i, &name) == 0) {
drvdata->hw[i].init.name = name;
} else {
drvdata->hw[i].init.name = max9485_clks[i].name;
}
drvdata->hw[i].init.ops = &max9485_clks[i].ops;
drvdata->hw[i].init.num_parents = 1;
drvdata->hw[i].init.flags = 0;
if (parent_index > 0) {
drvdata->hw[i].init.parent_names =
&drvdata->hw[parent_index].init.name;
drvdata->hw[i].init.flags |= CLK_SET_RATE_PARENT;
} else {
drvdata->hw[i].init.parent_names = &xclk_name;
}
drvdata->hw[i].enable_bit = max9485_clks[i].enable_bit;
drvdata->hw[i].hw.init = &drvdata->hw[i].init;
drvdata->hw[i].drvdata = drvdata;
ret = devm_clk_hw_register(dev, &drvdata->hw[i].hw);
if (ret < 0)
return ret;
}
return devm_of_clk_add_hw_provider(dev, max9485_of_clk_get, drvdata);
}
static int __maybe_unused max9485_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct max9485_driver_data *drvdata = i2c_get_clientdata(client);
gpiod_set_value_cansleep(drvdata->reset_gpio, 0);
return 0;
}
static int __maybe_unused max9485_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct max9485_driver_data *drvdata = i2c_get_clientdata(client);
int ret;
gpiod_set_value_cansleep(drvdata->reset_gpio, 1);
ret = i2c_master_send(client, &drvdata->reg_value,
sizeof(drvdata->reg_value));
return ret < 0 ? ret : 0;
}
static const struct dev_pm_ops max9485_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(max9485_suspend, max9485_resume)
};
static const struct of_device_id max9485_dt_ids[] = {
{ .compatible = "maxim,max9485", },
{ }
};
MODULE_DEVICE_TABLE(of, max9485_dt_ids);
static const struct i2c_device_id max9485_i2c_ids[] = {
{ .name = "max9485", },
{ }
};
MODULE_DEVICE_TABLE(i2c, max9485_i2c_ids);
static struct i2c_driver max9485_driver = {
.driver = {
.name = "max9485",
.pm = &max9485_pm_ops,
.of_match_table = max9485_dt_ids,
},
.probe = max9485_i2c_probe,
.id_table = max9485_i2c_ids,
};
module_i2c_driver(max9485_driver);
MODULE_AUTHOR("Daniel Mack <[email protected]>");
MODULE_DESCRIPTION("MAX9485 Programmable Audio Clock Generator");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/clk/clk-max9485.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Author: Yinbo Zhu <[email protected]>
* Copyright (C) 2022-2023 Loongson Technology Corporation Limited
*/
#include <linux/err.h>
#include <linux/init.h>
#include <linux/clk-provider.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include <dt-bindings/clock/loongson,ls2k-clk.h>
#define LOONGSON2_PLL_MULT_SHIFT 32
#define LOONGSON2_PLL_MULT_WIDTH 10
#define LOONGSON2_PLL_DIV_SHIFT 26
#define LOONGSON2_PLL_DIV_WIDTH 6
#define LOONGSON2_APB_FREQSCALE_SHIFT 20
#define LOONGSON2_APB_FREQSCALE_WIDTH 3
#define LOONGSON2_USB_FREQSCALE_SHIFT 16
#define LOONGSON2_USB_FREQSCALE_WIDTH 3
#define LOONGSON2_SATA_FREQSCALE_SHIFT 12
#define LOONGSON2_SATA_FREQSCALE_WIDTH 3
#define LOONGSON2_BOOT_FREQSCALE_SHIFT 8
#define LOONGSON2_BOOT_FREQSCALE_WIDTH 3
static void __iomem *loongson2_pll_base;
static const struct clk_parent_data pdata[] = {
{ .fw_name = "ref_100m",},
};
static struct clk_hw *loongson2_clk_register(struct device *dev,
const char *name,
const char *parent_name,
const struct clk_ops *ops,
unsigned long flags)
{
int ret;
struct clk_hw *hw;
struct clk_init_data init = { };
hw = devm_kzalloc(dev, sizeof(*hw), GFP_KERNEL);
if (!hw)
return ERR_PTR(-ENOMEM);
init.name = name;
init.ops = ops;
init.flags = flags;
init.num_parents = 1;
if (!parent_name)
init.parent_data = pdata;
else
init.parent_names = &parent_name;
hw->init = &init;
ret = devm_clk_hw_register(dev, hw);
if (ret)
hw = ERR_PTR(ret);
return hw;
}
static unsigned long loongson2_calc_pll_rate(int offset, unsigned long rate)
{
u64 val;
u32 mult, div;
val = readq(loongson2_pll_base + offset);
mult = (val >> LOONGSON2_PLL_MULT_SHIFT) &
clk_div_mask(LOONGSON2_PLL_MULT_WIDTH);
div = (val >> LOONGSON2_PLL_DIV_SHIFT) &
clk_div_mask(LOONGSON2_PLL_DIV_WIDTH);
return div_u64((u64)rate * mult, div);
}
static unsigned long loongson2_node_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
return loongson2_calc_pll_rate(0x0, parent_rate);
}
static const struct clk_ops loongson2_node_clk_ops = {
.recalc_rate = loongson2_node_recalc_rate,
};
static unsigned long loongson2_ddr_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
return loongson2_calc_pll_rate(0x10, parent_rate);
}
static const struct clk_ops loongson2_ddr_clk_ops = {
.recalc_rate = loongson2_ddr_recalc_rate,
};
static unsigned long loongson2_dc_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
return loongson2_calc_pll_rate(0x20, parent_rate);
}
static const struct clk_ops loongson2_dc_clk_ops = {
.recalc_rate = loongson2_dc_recalc_rate,
};
static unsigned long loongson2_pix0_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
return loongson2_calc_pll_rate(0x30, parent_rate);
}
static const struct clk_ops loongson2_pix0_clk_ops = {
.recalc_rate = loongson2_pix0_recalc_rate,
};
static unsigned long loongson2_pix1_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
return loongson2_calc_pll_rate(0x40, parent_rate);
}
static const struct clk_ops loongson2_pix1_clk_ops = {
.recalc_rate = loongson2_pix1_recalc_rate,
};
static unsigned long loongson2_calc_rate(unsigned long rate,
int shift, int width)
{
u64 val;
u32 mult;
val = readq(loongson2_pll_base + 0x50);
mult = (val >> shift) & clk_div_mask(width);
return div_u64((u64)rate * (mult + 1), 8);
}
static unsigned long loongson2_boot_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
return loongson2_calc_rate(parent_rate,
LOONGSON2_BOOT_FREQSCALE_SHIFT,
LOONGSON2_BOOT_FREQSCALE_WIDTH);
}
static const struct clk_ops loongson2_boot_clk_ops = {
.recalc_rate = loongson2_boot_recalc_rate,
};
static unsigned long loongson2_apb_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
return loongson2_calc_rate(parent_rate,
LOONGSON2_APB_FREQSCALE_SHIFT,
LOONGSON2_APB_FREQSCALE_WIDTH);
}
static const struct clk_ops loongson2_apb_clk_ops = {
.recalc_rate = loongson2_apb_recalc_rate,
};
static unsigned long loongson2_usb_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
return loongson2_calc_rate(parent_rate,
LOONGSON2_USB_FREQSCALE_SHIFT,
LOONGSON2_USB_FREQSCALE_WIDTH);
}
static const struct clk_ops loongson2_usb_clk_ops = {
.recalc_rate = loongson2_usb_recalc_rate,
};
static unsigned long loongson2_sata_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
return loongson2_calc_rate(parent_rate,
LOONGSON2_SATA_FREQSCALE_SHIFT,
LOONGSON2_SATA_FREQSCALE_WIDTH);
}
static const struct clk_ops loongson2_sata_clk_ops = {
.recalc_rate = loongson2_sata_recalc_rate,
};
static inline int loongson2_check_clk_hws(struct clk_hw *clks[], unsigned int count)
{
unsigned int i;
for (i = 0; i < count; i++)
if (IS_ERR(clks[i])) {
pr_err("Loongson2 clk %u: register failed with %ld\n",
i, PTR_ERR(clks[i]));
return PTR_ERR(clks[i]);
}
return 0;
}
static int loongson2_clk_probe(struct platform_device *pdev)
{
int ret;
struct clk_hw **hws;
struct clk_hw_onecell_data *clk_hw_data;
spinlock_t loongson2_clk_lock;
struct device *dev = &pdev->dev;
loongson2_pll_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(loongson2_pll_base))
return PTR_ERR(loongson2_pll_base);
clk_hw_data = devm_kzalloc(dev, struct_size(clk_hw_data, hws, LOONGSON2_CLK_END),
GFP_KERNEL);
if (WARN_ON(!clk_hw_data))
return -ENOMEM;
clk_hw_data->num = LOONGSON2_CLK_END;
hws = clk_hw_data->hws;
hws[LOONGSON2_NODE_PLL] = loongson2_clk_register(dev, "node_pll",
NULL,
&loongson2_node_clk_ops, 0);
hws[LOONGSON2_DDR_PLL] = loongson2_clk_register(dev, "ddr_pll",
NULL,
&loongson2_ddr_clk_ops, 0);
hws[LOONGSON2_DC_PLL] = loongson2_clk_register(dev, "dc_pll",
NULL,
&loongson2_dc_clk_ops, 0);
hws[LOONGSON2_PIX0_PLL] = loongson2_clk_register(dev, "pix0_pll",
NULL,
&loongson2_pix0_clk_ops, 0);
hws[LOONGSON2_PIX1_PLL] = loongson2_clk_register(dev, "pix1_pll",
NULL,
&loongson2_pix1_clk_ops, 0);
hws[LOONGSON2_BOOT_CLK] = loongson2_clk_register(dev, "boot",
NULL,
&loongson2_boot_clk_ops, 0);
hws[LOONGSON2_NODE_CLK] = devm_clk_hw_register_divider(dev, "node",
"node_pll", 0,
loongson2_pll_base + 0x8, 0,
6, CLK_DIVIDER_ONE_BASED,
&loongson2_clk_lock);
/*
* The hda clk divisor in the upper 32bits and the clk-prodiver
* layer code doesn't support 64bit io operation thus a conversion
* is required that subtract shift by 32 and add 4byte to the hda
* address
*/
hws[LOONGSON2_HDA_CLK] = devm_clk_hw_register_divider(dev, "hda",
"ddr_pll", 0,
loongson2_pll_base + 0x22, 12,
7, CLK_DIVIDER_ONE_BASED,
&loongson2_clk_lock);
hws[LOONGSON2_GPU_CLK] = devm_clk_hw_register_divider(dev, "gpu",
"ddr_pll", 0,
loongson2_pll_base + 0x18, 22,
6, CLK_DIVIDER_ONE_BASED,
&loongson2_clk_lock);
hws[LOONGSON2_DDR_CLK] = devm_clk_hw_register_divider(dev, "ddr",
"ddr_pll", 0,
loongson2_pll_base + 0x18, 0,
6, CLK_DIVIDER_ONE_BASED,
&loongson2_clk_lock);
hws[LOONGSON2_GMAC_CLK] = devm_clk_hw_register_divider(dev, "gmac",
"dc_pll", 0,
loongson2_pll_base + 0x28, 22,
6, CLK_DIVIDER_ONE_BASED,
&loongson2_clk_lock);
hws[LOONGSON2_DC_CLK] = devm_clk_hw_register_divider(dev, "dc",
"dc_pll", 0,
loongson2_pll_base + 0x28, 0,
6, CLK_DIVIDER_ONE_BASED,
&loongson2_clk_lock);
hws[LOONGSON2_APB_CLK] = loongson2_clk_register(dev, "apb",
"gmac",
&loongson2_apb_clk_ops, 0);
hws[LOONGSON2_USB_CLK] = loongson2_clk_register(dev, "usb",
"gmac",
&loongson2_usb_clk_ops, 0);
hws[LOONGSON2_SATA_CLK] = loongson2_clk_register(dev, "sata",
"gmac",
&loongson2_sata_clk_ops, 0);
hws[LOONGSON2_PIX0_CLK] = clk_hw_register_divider(NULL, "pix0",
"pix0_pll", 0,
loongson2_pll_base + 0x38, 0, 6,
CLK_DIVIDER_ONE_BASED,
&loongson2_clk_lock);
hws[LOONGSON2_PIX1_CLK] = clk_hw_register_divider(NULL, "pix1",
"pix1_pll", 0,
loongson2_pll_base + 0x48, 0, 6,
CLK_DIVIDER_ONE_BASED,
&loongson2_clk_lock);
ret = loongson2_check_clk_hws(hws, LOONGSON2_CLK_END);
if (ret)
return ret;
return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, clk_hw_data);
}
static const struct of_device_id loongson2_clk_match_table[] = {
{ .compatible = "loongson,ls2k-clk" },
{ }
};
MODULE_DEVICE_TABLE(of, loongson2_clk_match_table);
static struct platform_driver loongson2_clk_driver = {
.probe = loongson2_clk_probe,
.driver = {
.name = "loongson2-clk",
.of_match_table = loongson2_clk_match_table,
},
};
module_platform_driver(loongson2_clk_driver);
MODULE_DESCRIPTION("Loongson2 clock driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/clk/clk-loongson2.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Driver for Silicon Labs Si5340, Si5341, Si5342, Si5344 and Si5345
* Copyright (C) 2019 Topic Embedded Products
* Author: Mike Looijmans <[email protected]>
*
* The Si5341 has 10 outputs and 5 synthesizers.
* The Si5340 is a smaller version of the Si5341 with only 4 outputs.
* The Si5345 is similar to the Si5341, with the addition of fractional input
* dividers and automatic input selection.
* The Si5342 and Si5344 are smaller versions of the Si5345.
*/
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/delay.h>
#include <linux/gcd.h>
#include <linux/math64.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#include <asm/unaligned.h>
#define SI5341_NUM_INPUTS 4
#define SI5340_MAX_NUM_OUTPUTS 4
#define SI5341_MAX_NUM_OUTPUTS 10
#define SI5342_MAX_NUM_OUTPUTS 2
#define SI5344_MAX_NUM_OUTPUTS 4
#define SI5345_MAX_NUM_OUTPUTS 10
#define SI5340_NUM_SYNTH 4
#define SI5341_NUM_SYNTH 5
#define SI5342_NUM_SYNTH 2
#define SI5344_NUM_SYNTH 4
#define SI5345_NUM_SYNTH 5
/* Range of the synthesizer fractional divider */
#define SI5341_SYNTH_N_MIN 10
#define SI5341_SYNTH_N_MAX 4095
/* The chip can get its input clock from 3 input pins or an XTAL */
/* There is one PLL running at 13500–14256 MHz */
#define SI5341_PLL_VCO_MIN 13500000000ull
#define SI5341_PLL_VCO_MAX 14256000000ull
/* The 5 frequency synthesizers obtain their input from the PLL */
struct clk_si5341_synth {
struct clk_hw hw;
struct clk_si5341 *data;
u8 index;
};
#define to_clk_si5341_synth(_hw) \
container_of(_hw, struct clk_si5341_synth, hw)
/* The output stages can be connected to any synth (full mux) */
struct clk_si5341_output {
struct clk_hw hw;
struct clk_si5341 *data;
struct regulator *vddo_reg;
u8 index;
};
#define to_clk_si5341_output(_hw) \
container_of(_hw, struct clk_si5341_output, hw)
struct clk_si5341 {
struct clk_hw hw;
struct regmap *regmap;
struct i2c_client *i2c_client;
struct clk_si5341_synth synth[SI5341_NUM_SYNTH];
struct clk_si5341_output clk[SI5341_MAX_NUM_OUTPUTS];
struct clk *input_clk[SI5341_NUM_INPUTS];
const char *input_clk_name[SI5341_NUM_INPUTS];
const u16 *reg_output_offset;
const u16 *reg_rdiv_offset;
u64 freq_vco; /* 13500–14256 MHz */
u8 num_outputs;
u8 num_synth;
u16 chip_id;
bool xaxb_ext_clk;
bool iovdd_33;
};
#define to_clk_si5341(_hw) container_of(_hw, struct clk_si5341, hw)
struct clk_si5341_output_config {
u8 out_format_drv_bits;
u8 out_cm_ampl_bits;
u8 vdd_sel_bits;
bool synth_master;
bool always_on;
};
#define SI5341_PAGE 0x0001
#define SI5341_PN_BASE 0x0002
#define SI5341_DEVICE_REV 0x0005
#define SI5341_STATUS 0x000C
#define SI5341_LOS 0x000D
#define SI5341_STATUS_STICKY 0x0011
#define SI5341_LOS_STICKY 0x0012
#define SI5341_SOFT_RST 0x001C
#define SI5341_IN_SEL 0x0021
#define SI5341_DEVICE_READY 0x00FE
#define SI5341_XAXB_CFG 0x090E
#define SI5341_IO_VDD_SEL 0x0943
#define SI5341_IN_EN 0x0949
#define SI5341_INX_TO_PFD_EN 0x094A
/* Status bits */
#define SI5341_STATUS_SYSINCAL BIT(0)
#define SI5341_STATUS_LOSXAXB BIT(1)
#define SI5341_STATUS_LOSREF BIT(2)
#define SI5341_STATUS_LOL BIT(3)
/* Input selection */
#define SI5341_IN_SEL_MASK 0x06
#define SI5341_IN_SEL_SHIFT 1
#define SI5341_IN_SEL_REGCTRL 0x01
#define SI5341_INX_TO_PFD_SHIFT 4
/* XTAL config bits */
#define SI5341_XAXB_CFG_EXTCLK_EN BIT(0)
#define SI5341_XAXB_CFG_PDNB BIT(1)
/* Input dividers (48-bit) */
#define SI5341_IN_PDIV(x) (0x0208 + ((x) * 10))
#define SI5341_IN_PSET(x) (0x020E + ((x) * 10))
#define SI5341_PX_UPD 0x0230
/* PLL configuration */
#define SI5341_PLL_M_NUM 0x0235
#define SI5341_PLL_M_DEN 0x023B
/* Output configuration */
#define SI5341_OUT_CONFIG(output) \
((output)->data->reg_output_offset[(output)->index])
#define SI5341_OUT_FORMAT(output) (SI5341_OUT_CONFIG(output) + 1)
#define SI5341_OUT_CM(output) (SI5341_OUT_CONFIG(output) + 2)
#define SI5341_OUT_MUX_SEL(output) (SI5341_OUT_CONFIG(output) + 3)
#define SI5341_OUT_R_REG(output) \
((output)->data->reg_rdiv_offset[(output)->index])
#define SI5341_OUT_MUX_VDD_SEL_MASK 0x38
/* Synthesize N divider */
#define SI5341_SYNTH_N_NUM(x) (0x0302 + ((x) * 11))
#define SI5341_SYNTH_N_DEN(x) (0x0308 + ((x) * 11))
#define SI5341_SYNTH_N_UPD(x) (0x030C + ((x) * 11))
/* Synthesizer output enable, phase bypass, power mode */
#define SI5341_SYNTH_N_CLK_TO_OUTX_EN 0x0A03
#define SI5341_SYNTH_N_PIBYP 0x0A04
#define SI5341_SYNTH_N_PDNB 0x0A05
#define SI5341_SYNTH_N_CLK_DIS 0x0B4A
#define SI5341_REGISTER_MAX 0xBFF
/* SI5341_OUT_CONFIG bits */
#define SI5341_OUT_CFG_PDN BIT(0)
#define SI5341_OUT_CFG_OE BIT(1)
#define SI5341_OUT_CFG_RDIV_FORCE2 BIT(2)
/* Static configuration (to be moved to firmware) */
struct si5341_reg_default {
u16 address;
u8 value;
};
static const char * const si5341_input_clock_names[] = {
"in0", "in1", "in2", "xtal"
};
/* Output configuration registers 0..9 are not quite logically organized */
/* Also for si5345 */
static const u16 si5341_reg_output_offset[] = {
0x0108,
0x010D,
0x0112,
0x0117,
0x011C,
0x0121,
0x0126,
0x012B,
0x0130,
0x013A,
};
/* for si5340, si5342 and si5344 */
static const u16 si5340_reg_output_offset[] = {
0x0112,
0x0117,
0x0126,
0x012B,
};
/* The location of the R divider registers */
static const u16 si5341_reg_rdiv_offset[] = {
0x024A,
0x024D,
0x0250,
0x0253,
0x0256,
0x0259,
0x025C,
0x025F,
0x0262,
0x0268,
};
static const u16 si5340_reg_rdiv_offset[] = {
0x0250,
0x0253,
0x025C,
0x025F,
};
/*
* Programming sequence from ClockBuilder, settings to initialize the system
* using only the XTAL input, without pre-divider.
* This also contains settings that aren't mentioned anywhere in the datasheet.
* The "known" settings like synth and output configuration are done later.
*/
static const struct si5341_reg_default si5341_reg_defaults[] = {
{ 0x0017, 0x3A }, /* INT mask (disable interrupts) */
{ 0x0018, 0xFF }, /* INT mask */
{ 0x0021, 0x0F }, /* Select XTAL as input */
{ 0x0022, 0x00 }, /* Not in datasheet */
{ 0x002B, 0x02 }, /* SPI config */
{ 0x002C, 0x20 }, /* LOS enable for XTAL */
{ 0x002D, 0x00 }, /* LOS timing */
{ 0x002E, 0x00 },
{ 0x002F, 0x00 },
{ 0x0030, 0x00 },
{ 0x0031, 0x00 },
{ 0x0032, 0x00 },
{ 0x0033, 0x00 },
{ 0x0034, 0x00 },
{ 0x0035, 0x00 },
{ 0x0036, 0x00 },
{ 0x0037, 0x00 },
{ 0x0038, 0x00 }, /* LOS setting (thresholds) */
{ 0x0039, 0x00 },
{ 0x003A, 0x00 },
{ 0x003B, 0x00 },
{ 0x003C, 0x00 },
{ 0x003D, 0x00 }, /* LOS setting (thresholds) end */
{ 0x0041, 0x00 }, /* LOS0_DIV_SEL */
{ 0x0042, 0x00 }, /* LOS1_DIV_SEL */
{ 0x0043, 0x00 }, /* LOS2_DIV_SEL */
{ 0x0044, 0x00 }, /* LOS3_DIV_SEL */
{ 0x009E, 0x00 }, /* Not in datasheet */
{ 0x0102, 0x01 }, /* Enable outputs */
{ 0x013F, 0x00 }, /* Not in datasheet */
{ 0x0140, 0x00 }, /* Not in datasheet */
{ 0x0141, 0x40 }, /* OUT LOS */
{ 0x0202, 0x00 }, /* XAXB_FREQ_OFFSET (=0)*/
{ 0x0203, 0x00 },
{ 0x0204, 0x00 },
{ 0x0205, 0x00 },
{ 0x0206, 0x00 }, /* PXAXB (2^x) */
{ 0x0208, 0x00 }, /* Px divider setting (usually 0) */
{ 0x0209, 0x00 },
{ 0x020A, 0x00 },
{ 0x020B, 0x00 },
{ 0x020C, 0x00 },
{ 0x020D, 0x00 },
{ 0x020E, 0x00 },
{ 0x020F, 0x00 },
{ 0x0210, 0x00 },
{ 0x0211, 0x00 },
{ 0x0212, 0x00 },
{ 0x0213, 0x00 },
{ 0x0214, 0x00 },
{ 0x0215, 0x00 },
{ 0x0216, 0x00 },
{ 0x0217, 0x00 },
{ 0x0218, 0x00 },
{ 0x0219, 0x00 },
{ 0x021A, 0x00 },
{ 0x021B, 0x00 },
{ 0x021C, 0x00 },
{ 0x021D, 0x00 },
{ 0x021E, 0x00 },
{ 0x021F, 0x00 },
{ 0x0220, 0x00 },
{ 0x0221, 0x00 },
{ 0x0222, 0x00 },
{ 0x0223, 0x00 },
{ 0x0224, 0x00 },
{ 0x0225, 0x00 },
{ 0x0226, 0x00 },
{ 0x0227, 0x00 },
{ 0x0228, 0x00 },
{ 0x0229, 0x00 },
{ 0x022A, 0x00 },
{ 0x022B, 0x00 },
{ 0x022C, 0x00 },
{ 0x022D, 0x00 },
{ 0x022E, 0x00 },
{ 0x022F, 0x00 }, /* Px divider setting (usually 0) end */
{ 0x026B, 0x00 }, /* DESIGN_ID (ASCII string) */
{ 0x026C, 0x00 },
{ 0x026D, 0x00 },
{ 0x026E, 0x00 },
{ 0x026F, 0x00 },
{ 0x0270, 0x00 },
{ 0x0271, 0x00 },
{ 0x0272, 0x00 }, /* DESIGN_ID (ASCII string) end */
{ 0x0339, 0x1F }, /* N_FSTEP_MSK */
{ 0x033B, 0x00 }, /* Nx_FSTEPW (Frequency step) */
{ 0x033C, 0x00 },
{ 0x033D, 0x00 },
{ 0x033E, 0x00 },
{ 0x033F, 0x00 },
{ 0x0340, 0x00 },
{ 0x0341, 0x00 },
{ 0x0342, 0x00 },
{ 0x0343, 0x00 },
{ 0x0344, 0x00 },
{ 0x0345, 0x00 },
{ 0x0346, 0x00 },
{ 0x0347, 0x00 },
{ 0x0348, 0x00 },
{ 0x0349, 0x00 },
{ 0x034A, 0x00 },
{ 0x034B, 0x00 },
{ 0x034C, 0x00 },
{ 0x034D, 0x00 },
{ 0x034E, 0x00 },
{ 0x034F, 0x00 },
{ 0x0350, 0x00 },
{ 0x0351, 0x00 },
{ 0x0352, 0x00 },
{ 0x0353, 0x00 },
{ 0x0354, 0x00 },
{ 0x0355, 0x00 },
{ 0x0356, 0x00 },
{ 0x0357, 0x00 },
{ 0x0358, 0x00 }, /* Nx_FSTEPW (Frequency step) end */
{ 0x0359, 0x00 }, /* Nx_DELAY */
{ 0x035A, 0x00 },
{ 0x035B, 0x00 },
{ 0x035C, 0x00 },
{ 0x035D, 0x00 },
{ 0x035E, 0x00 },
{ 0x035F, 0x00 },
{ 0x0360, 0x00 },
{ 0x0361, 0x00 },
{ 0x0362, 0x00 }, /* Nx_DELAY end */
{ 0x0802, 0x00 }, /* Not in datasheet */
{ 0x0803, 0x00 }, /* Not in datasheet */
{ 0x0804, 0x00 }, /* Not in datasheet */
{ 0x090E, 0x02 }, /* XAXB_EXTCLK_EN=0 XAXB_PDNB=1 (use XTAL) */
{ 0x091C, 0x04 }, /* ZDM_EN=4 (Normal mode) */
{ 0x0949, 0x00 }, /* IN_EN (disable input clocks) */
{ 0x094A, 0x00 }, /* INx_TO_PFD_EN (disabled) */
{ 0x0A02, 0x00 }, /* Not in datasheet */
{ 0x0B44, 0x0F }, /* PDIV_ENB (datasheet does not mention what it is) */
{ 0x0B57, 0x10 }, /* VCO_RESET_CALCODE (not described in datasheet) */
{ 0x0B58, 0x05 }, /* VCO_RESET_CALCODE (not described in datasheet) */
};
/* Read and interpret a 44-bit followed by a 32-bit value in the regmap */
static int si5341_decode_44_32(struct regmap *regmap, unsigned int reg,
u64 *val1, u32 *val2)
{
int err;
u8 r[10];
err = regmap_bulk_read(regmap, reg, r, 10);
if (err < 0)
return err;
*val1 = ((u64)((r[5] & 0x0f) << 8 | r[4]) << 32) |
(get_unaligned_le32(r));
*val2 = get_unaligned_le32(&r[6]);
return 0;
}
static int si5341_encode_44_32(struct regmap *regmap, unsigned int reg,
u64 n_num, u32 n_den)
{
u8 r[10];
/* Shift left as far as possible without overflowing */
while (!(n_num & BIT_ULL(43)) && !(n_den & BIT(31))) {
n_num <<= 1;
n_den <<= 1;
}
/* 44 bits (6 bytes) numerator */
put_unaligned_le32(n_num, r);
r[4] = (n_num >> 32) & 0xff;
r[5] = (n_num >> 40) & 0x0f;
/* 32 bits denominator */
put_unaligned_le32(n_den, &r[6]);
/* Program the fraction */
return regmap_bulk_write(regmap, reg, r, sizeof(r));
}
/* VCO, we assume it runs at a constant frequency */
static unsigned long si5341_clk_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct clk_si5341 *data = to_clk_si5341(hw);
int err;
u64 res;
u64 m_num;
u32 m_den;
unsigned int shift;
/* Assume that PDIV is not being used, just read the PLL setting */
err = si5341_decode_44_32(data->regmap, SI5341_PLL_M_NUM,
&m_num, &m_den);
if (err < 0)
return 0;
if (!m_num || !m_den)
return 0;
/*
* Though m_num is 64-bit, only the upper bits are actually used. While
* calculating m_num and m_den, they are shifted as far as possible to
* the left. To avoid 96-bit division here, we just shift them back so
* we can do with just 64 bits.
*/
shift = 0;
res = m_num;
while (res & 0xffff00000000ULL) {
++shift;
res >>= 1;
}
res *= parent_rate;
do_div(res, (m_den >> shift));
/* We cannot return the actual frequency in 32 bit, store it locally */
data->freq_vco = res;
/* Report kHz since the value is out of range */
do_div(res, 1000);
return (unsigned long)res;
}
static int si5341_clk_get_selected_input(struct clk_si5341 *data)
{
int err;
u32 val;
err = regmap_read(data->regmap, SI5341_IN_SEL, &val);
if (err < 0)
return err;
return (val & SI5341_IN_SEL_MASK) >> SI5341_IN_SEL_SHIFT;
}
static u8 si5341_clk_get_parent(struct clk_hw *hw)
{
struct clk_si5341 *data = to_clk_si5341(hw);
int res = si5341_clk_get_selected_input(data);
if (res < 0)
return 0; /* Apparently we cannot report errors */
return res;
}
static int si5341_clk_reparent(struct clk_si5341 *data, u8 index)
{
int err;
u8 val;
val = (index << SI5341_IN_SEL_SHIFT) & SI5341_IN_SEL_MASK;
/* Enable register-based input selection */
val |= SI5341_IN_SEL_REGCTRL;
err = regmap_update_bits(data->regmap,
SI5341_IN_SEL, SI5341_IN_SEL_REGCTRL | SI5341_IN_SEL_MASK, val);
if (err < 0)
return err;
if (index < 3) {
/* Enable input buffer for selected input */
err = regmap_update_bits(data->regmap,
SI5341_IN_EN, 0x07, BIT(index));
if (err < 0)
return err;
/* Enables the input to phase detector */
err = regmap_update_bits(data->regmap, SI5341_INX_TO_PFD_EN,
0x7 << SI5341_INX_TO_PFD_SHIFT,
BIT(index + SI5341_INX_TO_PFD_SHIFT));
if (err < 0)
return err;
/* Power down XTAL oscillator and buffer */
err = regmap_update_bits(data->regmap, SI5341_XAXB_CFG,
SI5341_XAXB_CFG_PDNB, 0);
if (err < 0)
return err;
/*
* Set the P divider to "1". There's no explanation in the
* datasheet of these registers, but the clockbuilder software
* programs a "1" when the input is being used.
*/
err = regmap_write(data->regmap, SI5341_IN_PDIV(index), 1);
if (err < 0)
return err;
err = regmap_write(data->regmap, SI5341_IN_PSET(index), 1);
if (err < 0)
return err;
/* Set update PDIV bit */
err = regmap_write(data->regmap, SI5341_PX_UPD, BIT(index));
if (err < 0)
return err;
} else {
/* Disable all input buffers */
err = regmap_update_bits(data->regmap, SI5341_IN_EN, 0x07, 0);
if (err < 0)
return err;
/* Disable input to phase detector */
err = regmap_update_bits(data->regmap, SI5341_INX_TO_PFD_EN,
0x7 << SI5341_INX_TO_PFD_SHIFT, 0);
if (err < 0)
return err;
/* Power up XTAL oscillator and buffer, select clock mode */
err = regmap_update_bits(data->regmap, SI5341_XAXB_CFG,
SI5341_XAXB_CFG_PDNB | SI5341_XAXB_CFG_EXTCLK_EN,
SI5341_XAXB_CFG_PDNB | (data->xaxb_ext_clk ?
SI5341_XAXB_CFG_EXTCLK_EN : 0));
if (err < 0)
return err;
}
return 0;
}
static int si5341_clk_set_parent(struct clk_hw *hw, u8 index)
{
struct clk_si5341 *data = to_clk_si5341(hw);
return si5341_clk_reparent(data, index);
}
static const struct clk_ops si5341_clk_ops = {
.determine_rate = clk_hw_determine_rate_no_reparent,
.set_parent = si5341_clk_set_parent,
.get_parent = si5341_clk_get_parent,
.recalc_rate = si5341_clk_recalc_rate,
};
/* Synthesizers, there are 5 synthesizers that connect to any of the outputs */
/* The synthesizer is on if all power and enable bits are set */
static int si5341_synth_clk_is_on(struct clk_hw *hw)
{
struct clk_si5341_synth *synth = to_clk_si5341_synth(hw);
int err;
u32 val;
u8 index = synth->index;
err = regmap_read(synth->data->regmap,
SI5341_SYNTH_N_CLK_TO_OUTX_EN, &val);
if (err < 0)
return 0;
if (!(val & BIT(index)))
return 0;
err = regmap_read(synth->data->regmap, SI5341_SYNTH_N_PDNB, &val);
if (err < 0)
return 0;
if (!(val & BIT(index)))
return 0;
/* This bit must be 0 for the synthesizer to receive clock input */
err = regmap_read(synth->data->regmap, SI5341_SYNTH_N_CLK_DIS, &val);
if (err < 0)
return 0;
return !(val & BIT(index));
}
static void si5341_synth_clk_unprepare(struct clk_hw *hw)
{
struct clk_si5341_synth *synth = to_clk_si5341_synth(hw);
u8 index = synth->index; /* In range 0..5 */
u8 mask = BIT(index);
/* Disable output */
regmap_update_bits(synth->data->regmap,
SI5341_SYNTH_N_CLK_TO_OUTX_EN, mask, 0);
/* Power down */
regmap_update_bits(synth->data->regmap,
SI5341_SYNTH_N_PDNB, mask, 0);
/* Disable clock input to synth (set to 1 to disable) */
regmap_update_bits(synth->data->regmap,
SI5341_SYNTH_N_CLK_DIS, mask, mask);
}
static int si5341_synth_clk_prepare(struct clk_hw *hw)
{
struct clk_si5341_synth *synth = to_clk_si5341_synth(hw);
int err;
u8 index = synth->index;
u8 mask = BIT(index);
/* Power up */
err = regmap_update_bits(synth->data->regmap,
SI5341_SYNTH_N_PDNB, mask, mask);
if (err < 0)
return err;
/* Enable clock input to synth (set bit to 0 to enable) */
err = regmap_update_bits(synth->data->regmap,
SI5341_SYNTH_N_CLK_DIS, mask, 0);
if (err < 0)
return err;
/* Enable output */
return regmap_update_bits(synth->data->regmap,
SI5341_SYNTH_N_CLK_TO_OUTX_EN, mask, mask);
}
/* Synth clock frequency: Fvco * n_den / n_den, with Fvco in 13500-14256 MHz */
static unsigned long si5341_synth_clk_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct clk_si5341_synth *synth = to_clk_si5341_synth(hw);
u64 f;
u64 n_num;
u32 n_den;
int err;
err = si5341_decode_44_32(synth->data->regmap,
SI5341_SYNTH_N_NUM(synth->index), &n_num, &n_den);
if (err < 0)
return err;
/* Check for bogus/uninitialized settings */
if (!n_num || !n_den)
return 0;
/*
* n_num and n_den are shifted left as much as possible, so to prevent
* overflow in 64-bit math, we shift n_den 4 bits to the right
*/
f = synth->data->freq_vco;
f *= n_den >> 4;
/* Now we need to do 64-bit division: f/n_num */
/* And compensate for the 4 bits we dropped */
f = div64_u64(f, (n_num >> 4));
return f;
}
static long si5341_synth_clk_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *parent_rate)
{
struct clk_si5341_synth *synth = to_clk_si5341_synth(hw);
u64 f;
/* The synthesizer accuracy is such that anything in range will work */
f = synth->data->freq_vco;
do_div(f, SI5341_SYNTH_N_MAX);
if (rate < f)
return f;
f = synth->data->freq_vco;
do_div(f, SI5341_SYNTH_N_MIN);
if (rate > f)
return f;
return rate;
}
static int si5341_synth_program(struct clk_si5341_synth *synth,
u64 n_num, u32 n_den, bool is_integer)
{
int err;
u8 index = synth->index;
err = si5341_encode_44_32(synth->data->regmap,
SI5341_SYNTH_N_NUM(index), n_num, n_den);
err = regmap_update_bits(synth->data->regmap,
SI5341_SYNTH_N_PIBYP, BIT(index), is_integer ? BIT(index) : 0);
if (err < 0)
return err;
return regmap_write(synth->data->regmap,
SI5341_SYNTH_N_UPD(index), 0x01);
}
static int si5341_synth_clk_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct clk_si5341_synth *synth = to_clk_si5341_synth(hw);
u64 n_num;
u32 n_den;
u32 r;
u32 g;
bool is_integer;
n_num = synth->data->freq_vco;
/* see if there's an integer solution */
r = do_div(n_num, rate);
is_integer = (r == 0);
if (is_integer) {
/* Integer divider equal to n_num */
n_den = 1;
} else {
/* Calculate a fractional solution */
g = gcd(r, rate);
n_den = rate / g;
n_num *= n_den;
n_num += r / g;
}
dev_dbg(&synth->data->i2c_client->dev,
"%s(%u): n=0x%llx d=0x%x %s\n", __func__,
synth->index, n_num, n_den,
is_integer ? "int" : "frac");
return si5341_synth_program(synth, n_num, n_den, is_integer);
}
static const struct clk_ops si5341_synth_clk_ops = {
.is_prepared = si5341_synth_clk_is_on,
.prepare = si5341_synth_clk_prepare,
.unprepare = si5341_synth_clk_unprepare,
.recalc_rate = si5341_synth_clk_recalc_rate,
.round_rate = si5341_synth_clk_round_rate,
.set_rate = si5341_synth_clk_set_rate,
};
static int si5341_output_clk_is_on(struct clk_hw *hw)
{
struct clk_si5341_output *output = to_clk_si5341_output(hw);
int err;
u32 val;
err = regmap_read(output->data->regmap,
SI5341_OUT_CONFIG(output), &val);
if (err < 0)
return err;
/* Bit 0=PDN, 1=OE so only a value of 0x2 enables the output */
return (val & 0x03) == SI5341_OUT_CFG_OE;
}
/* Disables and then powers down the output */
static void si5341_output_clk_unprepare(struct clk_hw *hw)
{
struct clk_si5341_output *output = to_clk_si5341_output(hw);
regmap_update_bits(output->data->regmap,
SI5341_OUT_CONFIG(output),
SI5341_OUT_CFG_OE, 0);
regmap_update_bits(output->data->regmap,
SI5341_OUT_CONFIG(output),
SI5341_OUT_CFG_PDN, SI5341_OUT_CFG_PDN);
}
/* Powers up and then enables the output */
static int si5341_output_clk_prepare(struct clk_hw *hw)
{
struct clk_si5341_output *output = to_clk_si5341_output(hw);
int err;
err = regmap_update_bits(output->data->regmap,
SI5341_OUT_CONFIG(output),
SI5341_OUT_CFG_PDN, 0);
if (err < 0)
return err;
return regmap_update_bits(output->data->regmap,
SI5341_OUT_CONFIG(output),
SI5341_OUT_CFG_OE, SI5341_OUT_CFG_OE);
}
static unsigned long si5341_output_clk_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct clk_si5341_output *output = to_clk_si5341_output(hw);
int err;
u32 val;
u32 r_divider;
u8 r[3];
err = regmap_read(output->data->regmap,
SI5341_OUT_CONFIG(output), &val);
if (err < 0)
return err;
/* If SI5341_OUT_CFG_RDIV_FORCE2 is set, r_divider is 2 */
if (val & SI5341_OUT_CFG_RDIV_FORCE2)
return parent_rate / 2;
err = regmap_bulk_read(output->data->regmap,
SI5341_OUT_R_REG(output), r, 3);
if (err < 0)
return err;
/* Calculate value as 24-bit integer*/
r_divider = r[2] << 16 | r[1] << 8 | r[0];
/* If Rx_REG is zero, the divider is disabled, so return a "0" rate */
if (!r_divider)
return 0;
/* Divider is 2*(Rx_REG+1) */
r_divider += 1;
r_divider <<= 1;
return parent_rate / r_divider;
}
static int si5341_output_clk_determine_rate(struct clk_hw *hw,
struct clk_rate_request *req)
{
unsigned long rate = req->rate;
unsigned long r;
if (!rate)
return 0;
r = req->best_parent_rate >> 1;
/* If rate is an even divisor, no changes to parent required */
if (r && !(r % rate))
return 0;
if (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) {
if (rate > 200000000) {
/* minimum r-divider is 2 */
r = 2;
} else {
/* Take a parent frequency near 400 MHz */
r = (400000000u / rate) & ~1;
}
req->best_parent_rate = r * rate;
} else {
/* We cannot change our parent's rate, report what we can do */
r /= rate;
rate = req->best_parent_rate / (r << 1);
}
req->rate = rate;
return 0;
}
static int si5341_output_clk_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct clk_si5341_output *output = to_clk_si5341_output(hw);
u32 r_div;
int err;
u8 r[3];
if (!rate)
return -EINVAL;
/* Frequency divider is (r_div + 1) * 2 */
r_div = (parent_rate / rate) >> 1;
if (r_div <= 1)
r_div = 0;
else if (r_div >= BIT(24))
r_div = BIT(24) - 1;
else
--r_div;
/* For a value of "2", we set the "OUT0_RDIV_FORCE2" bit */
err = regmap_update_bits(output->data->regmap,
SI5341_OUT_CONFIG(output),
SI5341_OUT_CFG_RDIV_FORCE2,
(r_div == 0) ? SI5341_OUT_CFG_RDIV_FORCE2 : 0);
if (err < 0)
return err;
/* Always write Rx_REG, because a zero value disables the divider */
r[0] = r_div ? (r_div & 0xff) : 1;
r[1] = (r_div >> 8) & 0xff;
r[2] = (r_div >> 16) & 0xff;
err = regmap_bulk_write(output->data->regmap,
SI5341_OUT_R_REG(output), r, 3);
return 0;
}
static int si5341_output_reparent(struct clk_si5341_output *output, u8 index)
{
return regmap_update_bits(output->data->regmap,
SI5341_OUT_MUX_SEL(output), 0x07, index);
}
static int si5341_output_set_parent(struct clk_hw *hw, u8 index)
{
struct clk_si5341_output *output = to_clk_si5341_output(hw);
if (index >= output->data->num_synth)
return -EINVAL;
return si5341_output_reparent(output, index);
}
static u8 si5341_output_get_parent(struct clk_hw *hw)
{
struct clk_si5341_output *output = to_clk_si5341_output(hw);
u32 val;
regmap_read(output->data->regmap, SI5341_OUT_MUX_SEL(output), &val);
return val & 0x7;
}
static const struct clk_ops si5341_output_clk_ops = {
.is_prepared = si5341_output_clk_is_on,
.prepare = si5341_output_clk_prepare,
.unprepare = si5341_output_clk_unprepare,
.recalc_rate = si5341_output_clk_recalc_rate,
.determine_rate = si5341_output_clk_determine_rate,
.set_rate = si5341_output_clk_set_rate,
.set_parent = si5341_output_set_parent,
.get_parent = si5341_output_get_parent,
};
/*
* The chip can be bought in a pre-programmed version, or one can program the
* NVM in the chip to boot up in a preset mode. This routine tries to determine
* if that's the case, or if we need to reset and program everything from
* scratch. Returns negative error, or true/false.
*/
static int si5341_is_programmed_already(struct clk_si5341 *data)
{
int err;
u8 r[4];
/* Read the PLL divider value, it must have a non-zero value */
err = regmap_bulk_read(data->regmap, SI5341_PLL_M_DEN,
r, ARRAY_SIZE(r));
if (err < 0)
return err;
return !!get_unaligned_le32(r);
}
static struct clk_hw *
of_clk_si5341_get(struct of_phandle_args *clkspec, void *_data)
{
struct clk_si5341 *data = _data;
unsigned int idx = clkspec->args[1];
unsigned int group = clkspec->args[0];
switch (group) {
case 0:
if (idx >= data->num_outputs) {
dev_err(&data->i2c_client->dev,
"invalid output index %u\n", idx);
return ERR_PTR(-EINVAL);
}
return &data->clk[idx].hw;
case 1:
if (idx >= data->num_synth) {
dev_err(&data->i2c_client->dev,
"invalid synthesizer index %u\n", idx);
return ERR_PTR(-EINVAL);
}
return &data->synth[idx].hw;
case 2:
if (idx > 0) {
dev_err(&data->i2c_client->dev,
"invalid PLL index %u\n", idx);
return ERR_PTR(-EINVAL);
}
return &data->hw;
default:
dev_err(&data->i2c_client->dev, "invalid group %u\n", group);
return ERR_PTR(-EINVAL);
}
}
static int si5341_probe_chip_id(struct clk_si5341 *data)
{
int err;
u8 reg[4];
u16 model;
err = regmap_bulk_read(data->regmap, SI5341_PN_BASE, reg,
ARRAY_SIZE(reg));
if (err < 0) {
dev_err(&data->i2c_client->dev, "Failed to read chip ID\n");
return err;
}
model = get_unaligned_le16(reg);
dev_info(&data->i2c_client->dev, "Chip: %x Grade: %u Rev: %u\n",
model, reg[2], reg[3]);
switch (model) {
case 0x5340:
data->num_outputs = SI5340_MAX_NUM_OUTPUTS;
data->num_synth = SI5340_NUM_SYNTH;
data->reg_output_offset = si5340_reg_output_offset;
data->reg_rdiv_offset = si5340_reg_rdiv_offset;
break;
case 0x5341:
data->num_outputs = SI5341_MAX_NUM_OUTPUTS;
data->num_synth = SI5341_NUM_SYNTH;
data->reg_output_offset = si5341_reg_output_offset;
data->reg_rdiv_offset = si5341_reg_rdiv_offset;
break;
case 0x5342:
data->num_outputs = SI5342_MAX_NUM_OUTPUTS;
data->num_synth = SI5342_NUM_SYNTH;
data->reg_output_offset = si5340_reg_output_offset;
data->reg_rdiv_offset = si5340_reg_rdiv_offset;
break;
case 0x5344:
data->num_outputs = SI5344_MAX_NUM_OUTPUTS;
data->num_synth = SI5344_NUM_SYNTH;
data->reg_output_offset = si5340_reg_output_offset;
data->reg_rdiv_offset = si5340_reg_rdiv_offset;
break;
case 0x5345:
data->num_outputs = SI5345_MAX_NUM_OUTPUTS;
data->num_synth = SI5345_NUM_SYNTH;
data->reg_output_offset = si5341_reg_output_offset;
data->reg_rdiv_offset = si5341_reg_rdiv_offset;
break;
default:
dev_err(&data->i2c_client->dev, "Model '%x' not supported\n",
model);
return -EINVAL;
}
data->chip_id = model;
return 0;
}
/* Read active settings into the regmap cache for later reference */
static int si5341_read_settings(struct clk_si5341 *data)
{
int err;
u8 i;
u8 r[10];
err = regmap_bulk_read(data->regmap, SI5341_PLL_M_NUM, r, 10);
if (err < 0)
return err;
err = regmap_bulk_read(data->regmap,
SI5341_SYNTH_N_CLK_TO_OUTX_EN, r, 3);
if (err < 0)
return err;
err = regmap_bulk_read(data->regmap,
SI5341_SYNTH_N_CLK_DIS, r, 1);
if (err < 0)
return err;
for (i = 0; i < data->num_synth; ++i) {
err = regmap_bulk_read(data->regmap,
SI5341_SYNTH_N_NUM(i), r, 10);
if (err < 0)
return err;
}
for (i = 0; i < data->num_outputs; ++i) {
err = regmap_bulk_read(data->regmap,
data->reg_output_offset[i], r, 4);
if (err < 0)
return err;
err = regmap_bulk_read(data->regmap,
data->reg_rdiv_offset[i], r, 3);
if (err < 0)
return err;
}
return 0;
}
static int si5341_write_multiple(struct clk_si5341 *data,
const struct si5341_reg_default *values, unsigned int num_values)
{
unsigned int i;
int res;
for (i = 0; i < num_values; ++i) {
res = regmap_write(data->regmap,
values[i].address, values[i].value);
if (res < 0) {
dev_err(&data->i2c_client->dev,
"Failed to write %#x:%#x\n",
values[i].address, values[i].value);
return res;
}
}
return 0;
}
static const struct si5341_reg_default si5341_preamble[] = {
{ 0x0B25, 0x00 },
{ 0x0502, 0x01 },
{ 0x0505, 0x03 },
{ 0x0957, 0x17 },
{ 0x0B4E, 0x1A },
};
static const struct si5341_reg_default si5345_preamble[] = {
{ 0x0B25, 0x00 },
{ 0x0540, 0x01 },
};
static int si5341_send_preamble(struct clk_si5341 *data)
{
int res;
u32 revision;
/* For revision 2 and up, the values are slightly different */
res = regmap_read(data->regmap, SI5341_DEVICE_REV, &revision);
if (res < 0)
return res;
/* Write "preamble" as specified by datasheet */
res = regmap_write(data->regmap, 0xB24, revision < 2 ? 0xD8 : 0xC0);
if (res < 0)
return res;
/* The si5342..si5345 require a different preamble */
if (data->chip_id > 0x5341)
res = si5341_write_multiple(data,
si5345_preamble, ARRAY_SIZE(si5345_preamble));
else
res = si5341_write_multiple(data,
si5341_preamble, ARRAY_SIZE(si5341_preamble));
if (res < 0)
return res;
/* Datasheet specifies a 300ms wait after sending the preamble */
msleep(300);
return 0;
}
/* Perform a soft reset and write post-amble */
static int si5341_finalize_defaults(struct clk_si5341 *data)
{
int res;
u32 revision;
res = regmap_write(data->regmap, SI5341_IO_VDD_SEL,
data->iovdd_33 ? 1 : 0);
if (res < 0)
return res;
res = regmap_read(data->regmap, SI5341_DEVICE_REV, &revision);
if (res < 0)
return res;
dev_dbg(&data->i2c_client->dev, "%s rev=%u\n", __func__, revision);
res = regmap_write(data->regmap, SI5341_SOFT_RST, 0x01);
if (res < 0)
return res;
/* The si5342..si5345 have an additional post-amble */
if (data->chip_id > 0x5341) {
res = regmap_write(data->regmap, 0x540, 0x0);
if (res < 0)
return res;
}
/* Datasheet does not explain these nameless registers */
res = regmap_write(data->regmap, 0xB24, revision < 2 ? 0xDB : 0xC3);
if (res < 0)
return res;
res = regmap_write(data->regmap, 0x0B25, 0x02);
if (res < 0)
return res;
return 0;
}
static const struct regmap_range si5341_regmap_volatile_range[] = {
regmap_reg_range(0x000C, 0x0012), /* Status */
regmap_reg_range(0x001C, 0x001E), /* reset, finc/fdec */
regmap_reg_range(0x00E2, 0x00FE), /* NVM, interrupts, device ready */
/* Update bits for P divider and synth config */
regmap_reg_range(SI5341_PX_UPD, SI5341_PX_UPD),
regmap_reg_range(SI5341_SYNTH_N_UPD(0), SI5341_SYNTH_N_UPD(0)),
regmap_reg_range(SI5341_SYNTH_N_UPD(1), SI5341_SYNTH_N_UPD(1)),
regmap_reg_range(SI5341_SYNTH_N_UPD(2), SI5341_SYNTH_N_UPD(2)),
regmap_reg_range(SI5341_SYNTH_N_UPD(3), SI5341_SYNTH_N_UPD(3)),
regmap_reg_range(SI5341_SYNTH_N_UPD(4), SI5341_SYNTH_N_UPD(4)),
};
static const struct regmap_access_table si5341_regmap_volatile = {
.yes_ranges = si5341_regmap_volatile_range,
.n_yes_ranges = ARRAY_SIZE(si5341_regmap_volatile_range),
};
/* Pages 0, 1, 2, 3, 9, A, B are valid, so there are 12 pages */
static const struct regmap_range_cfg si5341_regmap_ranges[] = {
{
.range_min = 0,
.range_max = SI5341_REGISTER_MAX,
.selector_reg = SI5341_PAGE,
.selector_mask = 0xff,
.selector_shift = 0,
.window_start = 0,
.window_len = 256,
},
};
static int si5341_wait_device_ready(struct i2c_client *client)
{
int count;
/* Datasheet warns: Any attempt to read or write any register other
* than DEVICE_READY before DEVICE_READY reads as 0x0F may corrupt the
* NVM programming and may corrupt the register contents, as they are
* read from NVM. Note that this includes accesses to the PAGE register.
* Also: DEVICE_READY is available on every register page, so no page
* change is needed to read it.
* Do this outside regmap to avoid automatic PAGE register access.
* May take up to 300ms to complete.
*/
for (count = 0; count < 15; ++count) {
s32 result = i2c_smbus_read_byte_data(client,
SI5341_DEVICE_READY);
if (result < 0)
return result;
if (result == 0x0F)
return 0;
msleep(20);
}
dev_err(&client->dev, "timeout waiting for DEVICE_READY\n");
return -EIO;
}
static const struct regmap_config si5341_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.cache_type = REGCACHE_RBTREE,
.ranges = si5341_regmap_ranges,
.num_ranges = ARRAY_SIZE(si5341_regmap_ranges),
.max_register = SI5341_REGISTER_MAX,
.volatile_table = &si5341_regmap_volatile,
};
static int si5341_dt_parse_dt(struct clk_si5341 *data,
struct clk_si5341_output_config *config)
{
struct device_node *child;
struct device_node *np = data->i2c_client->dev.of_node;
u32 num;
u32 val;
memset(config, 0, sizeof(struct clk_si5341_output_config) *
SI5341_MAX_NUM_OUTPUTS);
for_each_child_of_node(np, child) {
if (of_property_read_u32(child, "reg", &num)) {
dev_err(&data->i2c_client->dev, "missing reg property of %s\n",
child->name);
goto put_child;
}
if (num >= SI5341_MAX_NUM_OUTPUTS) {
dev_err(&data->i2c_client->dev, "invalid clkout %d\n", num);
goto put_child;
}
if (!of_property_read_u32(child, "silabs,format", &val)) {
/* Set cm and ampl conservatively to 3v3 settings */
switch (val) {
case 1: /* normal differential */
config[num].out_cm_ampl_bits = 0x33;
break;
case 2: /* low-power differential */
config[num].out_cm_ampl_bits = 0x13;
break;
case 4: /* LVCMOS */
config[num].out_cm_ampl_bits = 0x33;
/* Set SI recommended impedance for LVCMOS */
config[num].out_format_drv_bits |= 0xc0;
break;
default:
dev_err(&data->i2c_client->dev,
"invalid silabs,format %u for %u\n",
val, num);
goto put_child;
}
config[num].out_format_drv_bits &= ~0x07;
config[num].out_format_drv_bits |= val & 0x07;
/* Always enable the SYNC feature */
config[num].out_format_drv_bits |= 0x08;
}
if (!of_property_read_u32(child, "silabs,common-mode", &val)) {
if (val > 0xf) {
dev_err(&data->i2c_client->dev,
"invalid silabs,common-mode %u\n",
val);
goto put_child;
}
config[num].out_cm_ampl_bits &= 0xf0;
config[num].out_cm_ampl_bits |= val & 0x0f;
}
if (!of_property_read_u32(child, "silabs,amplitude", &val)) {
if (val > 0xf) {
dev_err(&data->i2c_client->dev,
"invalid silabs,amplitude %u\n",
val);
goto put_child;
}
config[num].out_cm_ampl_bits &= 0x0f;
config[num].out_cm_ampl_bits |= (val << 4) & 0xf0;
}
if (of_property_read_bool(child, "silabs,disable-high"))
config[num].out_format_drv_bits |= 0x10;
config[num].synth_master =
of_property_read_bool(child, "silabs,synth-master");
config[num].always_on =
of_property_read_bool(child, "always-on");
config[num].vdd_sel_bits = 0x08;
if (data->clk[num].vddo_reg) {
int vdd = regulator_get_voltage(data->clk[num].vddo_reg);
switch (vdd) {
case 3300000:
config[num].vdd_sel_bits |= 0 << 4;
break;
case 1800000:
config[num].vdd_sel_bits |= 1 << 4;
break;
case 2500000:
config[num].vdd_sel_bits |= 2 << 4;
break;
default:
dev_err(&data->i2c_client->dev,
"unsupported vddo voltage %d for %s\n",
vdd, child->name);
goto put_child;
}
} else {
/* chip seems to default to 2.5V when not set */
dev_warn(&data->i2c_client->dev,
"no regulator set, defaulting vdd_sel to 2.5V for %s\n",
child->name);
config[num].vdd_sel_bits |= 2 << 4;
}
}
return 0;
put_child:
of_node_put(child);
return -EINVAL;
}
/*
* If not pre-configured, calculate and set the PLL configuration manually.
* For low-jitter performance, the PLL should be set such that the synthesizers
* only need integer division.
* Without any user guidance, we'll set the PLL to 14GHz, which still allows
* the chip to generate any frequency on its outputs, but jitter performance
* may be sub-optimal.
*/
static int si5341_initialize_pll(struct clk_si5341 *data)
{
struct device_node *np = data->i2c_client->dev.of_node;
u32 m_num = 0;
u32 m_den = 0;
int sel;
if (of_property_read_u32(np, "silabs,pll-m-num", &m_num)) {
dev_err(&data->i2c_client->dev,
"PLL configuration requires silabs,pll-m-num\n");
}
if (of_property_read_u32(np, "silabs,pll-m-den", &m_den)) {
dev_err(&data->i2c_client->dev,
"PLL configuration requires silabs,pll-m-den\n");
}
if (!m_num || !m_den) {
dev_err(&data->i2c_client->dev,
"PLL configuration invalid, assume 14GHz\n");
sel = si5341_clk_get_selected_input(data);
if (sel < 0)
return sel;
m_den = clk_get_rate(data->input_clk[sel]) / 10;
m_num = 1400000000;
}
return si5341_encode_44_32(data->regmap,
SI5341_PLL_M_NUM, m_num, m_den);
}
static int si5341_clk_select_active_input(struct clk_si5341 *data)
{
int res;
int err;
int i;
res = si5341_clk_get_selected_input(data);
if (res < 0)
return res;
/* If the current register setting is invalid, pick the first input */
if (!data->input_clk[res]) {
dev_dbg(&data->i2c_client->dev,
"Input %d not connected, rerouting\n", res);
res = -ENODEV;
for (i = 0; i < SI5341_NUM_INPUTS; ++i) {
if (data->input_clk[i]) {
res = i;
break;
}
}
if (res < 0) {
dev_err(&data->i2c_client->dev,
"No clock input available\n");
return res;
}
}
/* Make sure the selected clock is also enabled and routed */
err = si5341_clk_reparent(data, res);
if (err < 0)
return err;
err = clk_prepare_enable(data->input_clk[res]);
if (err < 0)
return err;
return res;
}
static ssize_t input_present_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct clk_si5341 *data = dev_get_drvdata(dev);
u32 status;
int res = regmap_read(data->regmap, SI5341_STATUS, &status);
if (res < 0)
return res;
res = !(status & SI5341_STATUS_LOSREF);
return sysfs_emit(buf, "%d\n", res);
}
static DEVICE_ATTR_RO(input_present);
static ssize_t input_present_sticky_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct clk_si5341 *data = dev_get_drvdata(dev);
u32 status;
int res = regmap_read(data->regmap, SI5341_STATUS_STICKY, &status);
if (res < 0)
return res;
res = !(status & SI5341_STATUS_LOSREF);
return sysfs_emit(buf, "%d\n", res);
}
static DEVICE_ATTR_RO(input_present_sticky);
static ssize_t pll_locked_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct clk_si5341 *data = dev_get_drvdata(dev);
u32 status;
int res = regmap_read(data->regmap, SI5341_STATUS, &status);
if (res < 0)
return res;
res = !(status & SI5341_STATUS_LOL);
return sysfs_emit(buf, "%d\n", res);
}
static DEVICE_ATTR_RO(pll_locked);
static ssize_t pll_locked_sticky_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct clk_si5341 *data = dev_get_drvdata(dev);
u32 status;
int res = regmap_read(data->regmap, SI5341_STATUS_STICKY, &status);
if (res < 0)
return res;
res = !(status & SI5341_STATUS_LOL);
return sysfs_emit(buf, "%d\n", res);
}
static DEVICE_ATTR_RO(pll_locked_sticky);
static ssize_t clear_sticky_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct clk_si5341 *data = dev_get_drvdata(dev);
long val;
if (kstrtol(buf, 10, &val))
return -EINVAL;
if (val) {
int res = regmap_write(data->regmap, SI5341_STATUS_STICKY, 0);
if (res < 0)
return res;
}
return count;
}
static DEVICE_ATTR_WO(clear_sticky);
static const struct attribute *si5341_attributes[] = {
&dev_attr_input_present.attr,
&dev_attr_input_present_sticky.attr,
&dev_attr_pll_locked.attr,
&dev_attr_pll_locked_sticky.attr,
&dev_attr_clear_sticky.attr,
NULL
};
static int si5341_probe(struct i2c_client *client)
{
struct clk_si5341 *data;
struct clk_init_data init;
struct clk *input;
const char *root_clock_name;
const char *synth_clock_names[SI5341_NUM_SYNTH] = { NULL };
int err;
unsigned int i;
struct clk_si5341_output_config config[SI5341_MAX_NUM_OUTPUTS];
bool initialization_required;
u32 status;
data = devm_kzalloc(&client->dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
data->i2c_client = client;
/* Must be done before otherwise touching hardware */
err = si5341_wait_device_ready(client);
if (err)
return err;
for (i = 0; i < SI5341_NUM_INPUTS; ++i) {
input = devm_clk_get(&client->dev, si5341_input_clock_names[i]);
if (IS_ERR(input)) {
if (PTR_ERR(input) == -EPROBE_DEFER)
return -EPROBE_DEFER;
data->input_clk_name[i] = si5341_input_clock_names[i];
} else {
data->input_clk[i] = input;
data->input_clk_name[i] = __clk_get_name(input);
}
}
for (i = 0; i < SI5341_MAX_NUM_OUTPUTS; ++i) {
char reg_name[10];
snprintf(reg_name, sizeof(reg_name), "vddo%d", i);
data->clk[i].vddo_reg = devm_regulator_get_optional(
&client->dev, reg_name);
if (IS_ERR(data->clk[i].vddo_reg)) {
err = PTR_ERR(data->clk[i].vddo_reg);
data->clk[i].vddo_reg = NULL;
if (err == -ENODEV)
continue;
goto cleanup;
} else {
err = regulator_enable(data->clk[i].vddo_reg);
if (err) {
dev_err(&client->dev,
"failed to enable %s regulator: %d\n",
reg_name, err);
data->clk[i].vddo_reg = NULL;
goto cleanup;
}
}
}
err = si5341_dt_parse_dt(data, config);
if (err)
goto cleanup;
if (of_property_read_string(client->dev.of_node, "clock-output-names",
&init.name))
init.name = client->dev.of_node->name;
root_clock_name = init.name;
data->regmap = devm_regmap_init_i2c(client, &si5341_regmap_config);
if (IS_ERR(data->regmap)) {
err = PTR_ERR(data->regmap);
goto cleanup;
}
i2c_set_clientdata(client, data);
err = si5341_probe_chip_id(data);
if (err < 0)
goto cleanup;
if (of_property_read_bool(client->dev.of_node, "silabs,reprogram")) {
initialization_required = true;
} else {
err = si5341_is_programmed_already(data);
if (err < 0)
goto cleanup;
initialization_required = !err;
}
data->xaxb_ext_clk = of_property_read_bool(client->dev.of_node,
"silabs,xaxb-ext-clk");
data->iovdd_33 = of_property_read_bool(client->dev.of_node,
"silabs,iovdd-33");
if (initialization_required) {
/* Populate the regmap cache in preparation for "cache only" */
err = si5341_read_settings(data);
if (err < 0)
goto cleanup;
err = si5341_send_preamble(data);
if (err < 0)
goto cleanup;
/*
* We intend to send all 'final' register values in a single
* transaction. So cache all register writes until we're done
* configuring.
*/
regcache_cache_only(data->regmap, true);
/* Write the configuration pairs from the firmware blob */
err = si5341_write_multiple(data, si5341_reg_defaults,
ARRAY_SIZE(si5341_reg_defaults));
if (err < 0)
goto cleanup;
}
/* Input must be up and running at this point */
err = si5341_clk_select_active_input(data);
if (err < 0)
goto cleanup;
if (initialization_required) {
/* PLL configuration is required */
err = si5341_initialize_pll(data);
if (err < 0)
goto cleanup;
}
/* Register the PLL */
init.parent_names = data->input_clk_name;
init.num_parents = SI5341_NUM_INPUTS;
init.ops = &si5341_clk_ops;
init.flags = 0;
data->hw.init = &init;
err = devm_clk_hw_register(&client->dev, &data->hw);
if (err) {
dev_err(&client->dev, "clock registration failed\n");
goto cleanup;
}
init.num_parents = 1;
init.parent_names = &root_clock_name;
init.ops = &si5341_synth_clk_ops;
for (i = 0; i < data->num_synth; ++i) {
synth_clock_names[i] = devm_kasprintf(&client->dev, GFP_KERNEL,
"%s.N%u", client->dev.of_node->name, i);
if (!synth_clock_names[i]) {
err = -ENOMEM;
goto free_clk_names;
}
init.name = synth_clock_names[i];
data->synth[i].index = i;
data->synth[i].data = data;
data->synth[i].hw.init = &init;
err = devm_clk_hw_register(&client->dev, &data->synth[i].hw);
if (err) {
dev_err(&client->dev,
"synth N%u registration failed\n", i);
goto free_clk_names;
}
}
init.num_parents = data->num_synth;
init.parent_names = synth_clock_names;
init.ops = &si5341_output_clk_ops;
for (i = 0; i < data->num_outputs; ++i) {
init.name = kasprintf(GFP_KERNEL, "%s.%d",
client->dev.of_node->name, i);
if (!init.name) {
err = -ENOMEM;
goto free_clk_names;
}
init.flags = config[i].synth_master ? CLK_SET_RATE_PARENT : 0;
data->clk[i].index = i;
data->clk[i].data = data;
data->clk[i].hw.init = &init;
if (config[i].out_format_drv_bits & 0x07) {
regmap_write(data->regmap,
SI5341_OUT_FORMAT(&data->clk[i]),
config[i].out_format_drv_bits);
regmap_write(data->regmap,
SI5341_OUT_CM(&data->clk[i]),
config[i].out_cm_ampl_bits);
regmap_update_bits(data->regmap,
SI5341_OUT_MUX_SEL(&data->clk[i]),
SI5341_OUT_MUX_VDD_SEL_MASK,
config[i].vdd_sel_bits);
}
err = devm_clk_hw_register(&client->dev, &data->clk[i].hw);
kfree(init.name); /* clock framework made a copy of the name */
if (err) {
dev_err(&client->dev,
"output %u registration failed\n", i);
goto free_clk_names;
}
if (config[i].always_on)
clk_prepare(data->clk[i].hw.clk);
}
err = devm_of_clk_add_hw_provider(&client->dev, of_clk_si5341_get,
data);
if (err) {
dev_err(&client->dev, "unable to add clk provider\n");
goto free_clk_names;
}
if (initialization_required) {
/* Synchronize */
regcache_cache_only(data->regmap, false);
err = regcache_sync(data->regmap);
if (err < 0)
goto free_clk_names;
err = si5341_finalize_defaults(data);
if (err < 0)
goto free_clk_names;
}
/* wait for device to report input clock present and PLL lock */
err = regmap_read_poll_timeout(data->regmap, SI5341_STATUS, status,
!(status & (SI5341_STATUS_LOSREF | SI5341_STATUS_LOL)),
10000, 250000);
if (err) {
dev_err(&client->dev, "Error waiting for input clock or PLL lock\n");
goto free_clk_names;
}
/* clear sticky alarm bits from initialization */
err = regmap_write(data->regmap, SI5341_STATUS_STICKY, 0);
if (err) {
dev_err(&client->dev, "unable to clear sticky status\n");
goto free_clk_names;
}
err = sysfs_create_files(&client->dev.kobj, si5341_attributes);
if (err)
dev_err(&client->dev, "unable to create sysfs files\n");
free_clk_names:
/* Free the names, clk framework makes copies */
for (i = 0; i < data->num_synth; ++i)
devm_kfree(&client->dev, (void *)synth_clock_names[i]);
cleanup:
if (err) {
for (i = 0; i < SI5341_MAX_NUM_OUTPUTS; ++i) {
if (data->clk[i].vddo_reg)
regulator_disable(data->clk[i].vddo_reg);
}
}
return err;
}
static void si5341_remove(struct i2c_client *client)
{
struct clk_si5341 *data = i2c_get_clientdata(client);
int i;
sysfs_remove_files(&client->dev.kobj, si5341_attributes);
for (i = 0; i < SI5341_MAX_NUM_OUTPUTS; ++i) {
if (data->clk[i].vddo_reg)
regulator_disable(data->clk[i].vddo_reg);
}
}
static const struct i2c_device_id si5341_id[] = {
{ "si5340", 0 },
{ "si5341", 1 },
{ "si5342", 2 },
{ "si5344", 4 },
{ "si5345", 5 },
{ }
};
MODULE_DEVICE_TABLE(i2c, si5341_id);
static const struct of_device_id clk_si5341_of_match[] = {
{ .compatible = "silabs,si5340" },
{ .compatible = "silabs,si5341" },
{ .compatible = "silabs,si5342" },
{ .compatible = "silabs,si5344" },
{ .compatible = "silabs,si5345" },
{ }
};
MODULE_DEVICE_TABLE(of, clk_si5341_of_match);
static struct i2c_driver si5341_driver = {
.driver = {
.name = "si5341",
.of_match_table = clk_si5341_of_match,
},
.probe = si5341_probe,
.remove = si5341_remove,
.id_table = si5341_id,
};
module_i2c_driver(si5341_driver);
MODULE_AUTHOR("Mike Looijmans <[email protected]>");
MODULE_DESCRIPTION("Si5341 driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/clk/clk-si5341.c |
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
/*
* Copyright (C) Sunplus Technology Co., Ltd.
* All rights reserved.
*/
#include <linux/module.h>
#include <linux/clk-provider.h>
#include <linux/of.h>
#include <linux/bitfield.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/err.h>
#include <linux/platform_device.h>
#include <dt-bindings/clock/sunplus,sp7021-clkc.h>
/* speical div_width values for PLLTV/PLLA */
#define DIV_TV 33
#define DIV_A 34
/* PLLTV parameters */
enum {
SEL_FRA,
SDM_MOD,
PH_SEL,
NFRA,
DIVR,
DIVN,
DIVM,
P_MAX
};
#define MASK_SEL_FRA GENMASK(1, 1)
#define MASK_SDM_MOD GENMASK(2, 2)
#define MASK_PH_SEL GENMASK(4, 4)
#define MASK_NFRA GENMASK(12, 6)
#define MASK_DIVR GENMASK(8, 7)
#define MASK_DIVN GENMASK(7, 0)
#define MASK_DIVM GENMASK(14, 8)
/* HIWORD_MASK FIELD_PREP */
#define HWM_FIELD_PREP(mask, value) \
({ \
u64 _m = mask; \
(_m << 16) | FIELD_PREP(_m, value); \
})
struct sp_pll {
struct clk_hw hw;
void __iomem *reg;
spinlock_t lock; /* lock for reg */
int div_shift;
int div_width;
int pd_bit; /* power down bit idx */
int bp_bit; /* bypass bit idx */
unsigned long brate; /* base rate, TODO: replace brate with muldiv */
u32 p[P_MAX]; /* for hold PLLTV/PLLA parameters */
};
#define to_sp_pll(_hw) container_of(_hw, struct sp_pll, hw)
struct sp_clk_gate_info {
u16 reg; /* reg_index_shift */
u16 ext_parent; /* parent is extclk */
};
static const struct sp_clk_gate_info sp_clk_gates[] = {
{ 0x02 },
{ 0x05 },
{ 0x06 },
{ 0x07 },
{ 0x09 },
{ 0x0b, 1 },
{ 0x0f, 1 },
{ 0x14 },
{ 0x15 },
{ 0x16 },
{ 0x17 },
{ 0x18, 1 },
{ 0x19, 1 },
{ 0x1a, 1 },
{ 0x1b, 1 },
{ 0x1c, 1 },
{ 0x1d, 1 },
{ 0x1e },
{ 0x1f, 1 },
{ 0x20 },
{ 0x21 },
{ 0x22 },
{ 0x23 },
{ 0x24 },
{ 0x25 },
{ 0x26 },
{ 0x2a },
{ 0x2b },
{ 0x2d },
{ 0x2e },
{ 0x30 },
{ 0x31 },
{ 0x32 },
{ 0x33 },
{ 0x3d },
{ 0x3e },
{ 0x3f },
{ 0x42 },
{ 0x44 },
{ 0x4b },
{ 0x4c },
{ 0x4d },
{ 0x4e },
{ 0x4f },
{ 0x50 },
{ 0x55 },
{ 0x60 },
{ 0x61 },
{ 0x6a },
{ 0x73 },
{ 0x86 },
{ 0x8a },
{ 0x8b },
{ 0x8d },
{ 0x8e },
{ 0x8f },
{ 0x90 },
{ 0x92 },
{ 0x93 },
{ 0x95 },
{ 0x96 },
{ 0x97 },
{ 0x98 },
{ 0x99 },
};
#define _M 1000000UL
#define F_27M (27 * _M)
/*********************************** PLL_TV **********************************/
/* TODO: set proper FVCO range */
#define FVCO_MIN (100 * _M)
#define FVCO_MAX (200 * _M)
#define F_MIN (FVCO_MIN / 8)
#define F_MAX (FVCO_MAX)
static long plltv_integer_div(struct sp_pll *clk, unsigned long freq)
{
/* valid m values: 27M must be divisible by m */
static const u32 m_table[] = {
1, 2, 3, 4, 5, 6, 8, 9, 10, 12, 15, 16, 18, 20, 24, 25, 27, 30, 32
};
u32 m, n, r;
unsigned long fvco, nf;
long ret;
freq = clamp(freq, F_MIN, F_MAX);
/* DIVR 0~3 */
for (r = 0; r <= 3; r++) {
fvco = freq << r;
if (fvco <= FVCO_MAX)
break;
}
/* DIVM */
for (m = 0; m < ARRAY_SIZE(m_table); m++) {
nf = fvco * m_table[m];
n = nf / F_27M;
if ((n * F_27M) == nf)
break;
}
if (m >= ARRAY_SIZE(m_table)) {
ret = -EINVAL;
goto err_not_found;
}
/* save parameters */
clk->p[SEL_FRA] = 0;
clk->p[DIVR] = r;
clk->p[DIVN] = n;
clk->p[DIVM] = m_table[m];
return freq;
err_not_found:
pr_err("%s: %s freq:%lu not found a valid setting\n",
__func__, clk_hw_get_name(&clk->hw), freq);
return ret;
}
/* parameters for PLLTV fractional divider */
static const u32 pt[][5] = {
/* conventional fractional */
{
1, /* factor */
5, /* 5 * p0 (nint) */
1, /* 1 * p0 */
F_27M, /* F_27M / p0 */
1, /* p0 / p2 */
},
/* phase rotation */
{
10, /* factor */
54, /* 5.4 * p0 (nint) */
2, /* 0.2 * p0 */
F_27M / 10, /* F_27M / p0 */
5, /* p0 / p2 */
},
};
static const u32 sdm_mod_vals[] = { 91, 55 };
static long plltv_fractional_div(struct sp_pll *clk, unsigned long freq)
{
u32 m, r;
u32 nint, nfra;
u32 df_quotient_min = 210000000;
u32 df_remainder_min = 0;
unsigned long fvco, nf, f, fout = 0;
int sdm, ph;
freq = clamp(freq, F_MIN, F_MAX);
/* DIVR 0~3 */
for (r = 0; r <= 3; r++) {
fvco = freq << r;
if (fvco <= FVCO_MAX)
break;
}
f = F_27M >> r;
/* PH_SEL */
for (ph = ARRAY_SIZE(pt) - 1; ph >= 0; ph--) {
const u32 *pp = pt[ph];
/* SDM_MOD */
for (sdm = 0; sdm < ARRAY_SIZE(sdm_mod_vals); sdm++) {
u32 mod = sdm_mod_vals[sdm];
/* DIVM 1~32 */
for (m = 1; m <= 32; m++) {
u32 df; /* diff freq */
u32 df_quotient, df_remainder;
nf = fvco * m;
nint = nf / pp[3];
if (nint < pp[1])
continue;
if (nint > pp[1])
break;
nfra = (((nf % pp[3]) * mod * pp[4]) + (F_27M / 2)) / F_27M;
if (nfra) {
u32 df0 = f * (nint + pp[2]) / pp[0];
u32 df1 = f * (mod - nfra) / mod / pp[4];
df = df0 - df1;
} else {
df = f * (nint) / pp[0];
}
df_quotient = df / m;
df_remainder = ((df % m) * 1000) / m;
if (freq > df_quotient) {
df_quotient = freq - df_quotient - 1;
df_remainder = 1000 - df_remainder;
} else {
df_quotient = df_quotient - freq;
}
if (df_quotient_min > df_quotient ||
(df_quotient_min == df_quotient &&
df_remainder_min > df_remainder)) {
/* found a closer freq, save parameters */
clk->p[SEL_FRA] = 1;
clk->p[SDM_MOD] = sdm;
clk->p[PH_SEL] = ph;
clk->p[NFRA] = nfra;
clk->p[DIVR] = r;
clk->p[DIVM] = m;
fout = df / m;
df_quotient_min = df_quotient;
df_remainder_min = df_remainder;
}
}
}
}
if (!fout) {
pr_err("%s: %s freq:%lu not found a valid setting\n",
__func__, clk_hw_get_name(&clk->hw), freq);
return -EINVAL;
}
return fout;
}
static long plltv_div(struct sp_pll *clk, unsigned long freq)
{
if (freq % 100)
return plltv_fractional_div(clk, freq);
return plltv_integer_div(clk, freq);
}
static int plltv_set_rate(struct sp_pll *clk)
{
unsigned long flags;
u32 r0, r1, r2;
r0 = BIT(clk->bp_bit + 16);
r0 |= HWM_FIELD_PREP(MASK_SEL_FRA, clk->p[SEL_FRA]);
r0 |= HWM_FIELD_PREP(MASK_SDM_MOD, clk->p[SDM_MOD]);
r0 |= HWM_FIELD_PREP(MASK_PH_SEL, clk->p[PH_SEL]);
r0 |= HWM_FIELD_PREP(MASK_NFRA, clk->p[NFRA]);
r1 = HWM_FIELD_PREP(MASK_DIVR, clk->p[DIVR]);
r2 = HWM_FIELD_PREP(MASK_DIVN, clk->p[DIVN] - 1);
r2 |= HWM_FIELD_PREP(MASK_DIVM, clk->p[DIVM] - 1);
spin_lock_irqsave(&clk->lock, flags);
writel(r0, clk->reg);
writel(r1, clk->reg + 4);
writel(r2, clk->reg + 8);
spin_unlock_irqrestore(&clk->lock, flags);
return 0;
}
/*********************************** PLL_A ***********************************/
/* from Q628_PLLs_REG_setting.xlsx */
static const struct {
u32 rate;
u32 regs[5];
} pa[] = {
{
.rate = 135475200,
.regs = {
0x4801,
0x02df,
0x248f,
0x0211,
0x33e9
}
},
{
.rate = 147456000,
.regs = {
0x4801,
0x1adf,
0x2490,
0x0349,
0x33e9
}
},
{
.rate = 196608000,
.regs = {
0x4801,
0x42ef,
0x2495,
0x01c6,
0x33e9
}
},
};
static int plla_set_rate(struct sp_pll *clk)
{
const u32 *pp = pa[clk->p[0]].regs;
unsigned long flags;
int i;
spin_lock_irqsave(&clk->lock, flags);
for (i = 0; i < ARRAY_SIZE(pa->regs); i++)
writel(0xffff0000 | pp[i], clk->reg + (i * 4));
spin_unlock_irqrestore(&clk->lock, flags);
return 0;
}
static long plla_round_rate(struct sp_pll *clk, unsigned long rate)
{
int i = ARRAY_SIZE(pa);
while (--i) {
if (rate >= pa[i].rate)
break;
}
clk->p[0] = i;
return pa[i].rate;
}
/********************************** SP_PLL ***********************************/
static long sp_pll_calc_div(struct sp_pll *clk, unsigned long rate)
{
u32 fbdiv;
u32 max = 1 << clk->div_width;
fbdiv = DIV_ROUND_CLOSEST(rate, clk->brate);
if (fbdiv > max)
fbdiv = max;
return fbdiv;
}
static long sp_pll_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *prate)
{
struct sp_pll *clk = to_sp_pll(hw);
long ret;
if (rate == *prate) {
ret = *prate; /* bypass */
} else if (clk->div_width == DIV_A) {
ret = plla_round_rate(clk, rate);
} else if (clk->div_width == DIV_TV) {
ret = plltv_div(clk, rate);
if (ret < 0)
ret = *prate;
} else {
ret = sp_pll_calc_div(clk, rate) * clk->brate;
}
return ret;
}
static unsigned long sp_pll_recalc_rate(struct clk_hw *hw,
unsigned long prate)
{
struct sp_pll *clk = to_sp_pll(hw);
u32 reg = readl(clk->reg);
unsigned long ret;
if (reg & BIT(clk->bp_bit)) {
ret = prate; /* bypass */
} else if (clk->div_width == DIV_A) {
ret = pa[clk->p[0]].rate;
} else if (clk->div_width == DIV_TV) {
u32 m, r, reg2;
r = FIELD_GET(MASK_DIVR, readl(clk->reg + 4));
reg2 = readl(clk->reg + 8);
m = FIELD_GET(MASK_DIVM, reg2) + 1;
if (reg & MASK_SEL_FRA) {
/* fractional divider */
u32 sdm = FIELD_GET(MASK_SDM_MOD, reg);
u32 ph = FIELD_GET(MASK_PH_SEL, reg);
u32 nfra = FIELD_GET(MASK_NFRA, reg);
const u32 *pp = pt[ph];
unsigned long r0, r1;
ret = prate >> r;
r0 = ret * (pp[1] + pp[2]) / pp[0];
r1 = ret * (sdm_mod_vals[sdm] - nfra) / sdm_mod_vals[sdm] / pp[4];
ret = (r0 - r1) / m;
} else {
/* integer divider */
u32 n = FIELD_GET(MASK_DIVN, reg2) + 1;
ret = (prate / m * n) >> r;
}
} else {
u32 fbdiv = ((reg >> clk->div_shift) & ((1 << clk->div_width) - 1)) + 1;
ret = clk->brate * fbdiv;
}
return ret;
}
static int sp_pll_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long prate)
{
struct sp_pll *clk = to_sp_pll(hw);
unsigned long flags;
u32 reg;
reg = BIT(clk->bp_bit + 16); /* HIWORD_MASK */
if (rate == prate) {
reg |= BIT(clk->bp_bit); /* bypass */
} else if (clk->div_width == DIV_A) {
return plla_set_rate(clk);
} else if (clk->div_width == DIV_TV) {
return plltv_set_rate(clk);
} else if (clk->div_width) {
u32 fbdiv = sp_pll_calc_div(clk, rate);
u32 mask = GENMASK(clk->div_shift + clk->div_width - 1, clk->div_shift);
reg |= mask << 16;
reg |= ((fbdiv - 1) << clk->div_shift) & mask;
}
spin_lock_irqsave(&clk->lock, flags);
writel(reg, clk->reg);
spin_unlock_irqrestore(&clk->lock, flags);
return 0;
}
static int sp_pll_enable(struct clk_hw *hw)
{
struct sp_pll *clk = to_sp_pll(hw);
writel(BIT(clk->pd_bit + 16) | BIT(clk->pd_bit), clk->reg);
return 0;
}
static void sp_pll_disable(struct clk_hw *hw)
{
struct sp_pll *clk = to_sp_pll(hw);
writel(BIT(clk->pd_bit + 16), clk->reg);
}
static int sp_pll_is_enabled(struct clk_hw *hw)
{
struct sp_pll *clk = to_sp_pll(hw);
return readl(clk->reg) & BIT(clk->pd_bit);
}
static const struct clk_ops sp_pll_ops = {
.enable = sp_pll_enable,
.disable = sp_pll_disable,
.is_enabled = sp_pll_is_enabled,
.round_rate = sp_pll_round_rate,
.recalc_rate = sp_pll_recalc_rate,
.set_rate = sp_pll_set_rate
};
static const struct clk_ops sp_pll_sub_ops = {
.enable = sp_pll_enable,
.disable = sp_pll_disable,
.is_enabled = sp_pll_is_enabled,
.recalc_rate = sp_pll_recalc_rate,
};
static struct clk_hw *sp_pll_register(struct device *dev, const char *name,
const struct clk_parent_data *parent_data,
void __iomem *reg, int pd_bit, int bp_bit,
unsigned long brate, int shift, int width,
unsigned long flags)
{
struct sp_pll *pll;
struct clk_hw *hw;
struct clk_init_data initd = {
.name = name,
.parent_data = parent_data,
.ops = (bp_bit >= 0) ? &sp_pll_ops : &sp_pll_sub_ops,
.num_parents = 1,
.flags = flags,
};
int ret;
pll = devm_kzalloc(dev, sizeof(*pll), GFP_KERNEL);
if (!pll)
return ERR_PTR(-ENOMEM);
pll->hw.init = &initd;
pll->reg = reg;
pll->pd_bit = pd_bit;
pll->bp_bit = bp_bit;
pll->brate = brate;
pll->div_shift = shift;
pll->div_width = width;
spin_lock_init(&pll->lock);
hw = &pll->hw;
ret = devm_clk_hw_register(dev, hw);
if (ret)
return ERR_PTR(ret);
return hw;
}
#define PLLA_CTL (pll_base + 0x1c)
#define PLLE_CTL (pll_base + 0x30)
#define PLLF_CTL (pll_base + 0x34)
#define PLLTV_CTL (pll_base + 0x38)
static int sp7021_clk_probe(struct platform_device *pdev)
{
static const u32 sp_clken[] = {
0x67ef, 0x03ff, 0xff03, 0xfff0, 0x0004, /* G0.1~5 */
0x0000, 0x8000, 0xffff, 0x0040, 0x0000, /* G0.6~10 */
};
static struct clk_parent_data pd_ext, pd_sys, pd_e;
struct device *dev = &pdev->dev;
void __iomem *clk_base, *pll_base, *sys_base;
struct clk_hw_onecell_data *clk_data;
struct clk_hw **hws;
int i;
clk_base = devm_platform_ioremap_resource(pdev, 0);
if (!clk_base)
return -ENXIO;
pll_base = devm_platform_ioremap_resource(pdev, 1);
if (!pll_base)
return -ENXIO;
sys_base = devm_platform_ioremap_resource(pdev, 2);
if (!sys_base)
return -ENXIO;
/* enable default clks */
for (i = 0; i < ARRAY_SIZE(sp_clken); i++)
writel((sp_clken[i] << 16) | sp_clken[i], clk_base + i * 4);
clk_data = devm_kzalloc(dev, struct_size(clk_data, hws, CLK_MAX),
GFP_KERNEL);
if (!clk_data)
return -ENOMEM;
clk_data->num = CLK_MAX;
hws = clk_data->hws;
pd_ext.index = 0;
/* PLLs */
hws[PLL_A] = sp_pll_register(dev, "plla", &pd_ext, PLLA_CTL,
11, 12, 27000000, 0, DIV_A, 0);
if (IS_ERR(hws[PLL_A]))
return PTR_ERR(hws[PLL_A]);
hws[PLL_E] = sp_pll_register(dev, "plle", &pd_ext, PLLE_CTL,
6, 2, 50000000, 0, 0, 0);
if (IS_ERR(hws[PLL_E]))
return PTR_ERR(hws[PLL_E]);
pd_e.hw = hws[PLL_E];
hws[PLL_E_2P5] = sp_pll_register(dev, "plle_2p5", &pd_e, PLLE_CTL,
13, -1, 2500000, 0, 0, 0);
if (IS_ERR(hws[PLL_E_2P5]))
return PTR_ERR(hws[PLL_E_2P5]);
hws[PLL_E_25] = sp_pll_register(dev, "plle_25", &pd_e, PLLE_CTL,
12, -1, 25000000, 0, 0, 0);
if (IS_ERR(hws[PLL_E_25]))
return PTR_ERR(hws[PLL_E_25]);
hws[PLL_E_112P5] = sp_pll_register(dev, "plle_112p5", &pd_e, PLLE_CTL,
11, -1, 112500000, 0, 0, 0);
if (IS_ERR(hws[PLL_E_112P5]))
return PTR_ERR(hws[PLL_E_112P5]);
hws[PLL_F] = sp_pll_register(dev, "pllf", &pd_ext, PLLF_CTL,
0, 10, 13500000, 1, 4, 0);
if (IS_ERR(hws[PLL_F]))
return PTR_ERR(hws[PLL_F]);
hws[PLL_TV] = sp_pll_register(dev, "plltv", &pd_ext, PLLTV_CTL,
0, 15, 27000000, 0, DIV_TV, 0);
if (IS_ERR(hws[PLL_TV]))
return PTR_ERR(hws[PLL_TV]);
hws[PLL_TV_A] = devm_clk_hw_register_divider(dev, "plltv_a", "plltv", 0,
PLLTV_CTL + 4, 5, 1,
CLK_DIVIDER_POWER_OF_TWO,
&to_sp_pll(hws[PLL_TV])->lock);
if (IS_ERR(hws[PLL_TV_A]))
return PTR_ERR(hws[PLL_TV_A]);
/* system clock, should not be disabled */
hws[PLL_SYS] = sp_pll_register(dev, "pllsys", &pd_ext, sys_base,
10, 9, 13500000, 0, 4, CLK_IS_CRITICAL);
if (IS_ERR(hws[PLL_SYS]))
return PTR_ERR(hws[PLL_SYS]);
pd_sys.hw = hws[PLL_SYS];
/* gates */
for (i = 0; i < ARRAY_SIZE(sp_clk_gates); i++) {
char name[10];
u32 j = sp_clk_gates[i].reg;
struct clk_parent_data *pd = sp_clk_gates[i].ext_parent ? &pd_ext : &pd_sys;
sprintf(name, "%02d_0x%02x", i, j);
hws[i] = devm_clk_hw_register_gate_parent_data(dev, name, pd, 0,
clk_base + (j >> 4) * 4,
j & 0x0f,
CLK_GATE_HIWORD_MASK,
NULL);
if (IS_ERR(hws[i]))
return PTR_ERR(hws[i]);
}
return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, clk_data);
}
static const struct of_device_id sp7021_clk_dt_ids[] = {
{ .compatible = "sunplus,sp7021-clkc" },
{ }
};
MODULE_DEVICE_TABLE(of, sp7021_clk_dt_ids);
static struct platform_driver sp7021_clk_driver = {
.probe = sp7021_clk_probe,
.driver = {
.name = "sp7021-clk",
.of_match_table = sp7021_clk_dt_ids,
},
};
module_platform_driver(sp7021_clk_driver);
MODULE_AUTHOR("Sunplus Technology");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Clock driver for Sunplus SP7021 SoC");
| linux-master | drivers/clk/clk-sp7021.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Layerscape FlexSPI clock driver
*
* Copyright 2020 Michael Walle <[email protected]>
*/
#include <linux/clk-provider.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/platform_device.h>
static const struct clk_div_table ls1028a_flexspi_divs[] = {
{ .val = 0, .div = 1, },
{ .val = 1, .div = 2, },
{ .val = 2, .div = 3, },
{ .val = 3, .div = 4, },
{ .val = 4, .div = 5, },
{ .val = 5, .div = 6, },
{ .val = 6, .div = 7, },
{ .val = 7, .div = 8, },
{ .val = 11, .div = 12, },
{ .val = 15, .div = 16, },
{ .val = 16, .div = 20, },
{ .val = 17, .div = 24, },
{ .val = 18, .div = 28, },
{ .val = 19, .div = 32, },
{ .val = 20, .div = 80, },
{}
};
static const struct clk_div_table lx2160a_flexspi_divs[] = {
{ .val = 1, .div = 2, },
{ .val = 3, .div = 4, },
{ .val = 5, .div = 6, },
{ .val = 7, .div = 8, },
{ .val = 11, .div = 12, },
{ .val = 15, .div = 16, },
{ .val = 16, .div = 20, },
{ .val = 17, .div = 24, },
{ .val = 18, .div = 28, },
{ .val = 19, .div = 32, },
{ .val = 20, .div = 80, },
{}
};
static int fsl_flexspi_clk_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
const char *clk_name = np->name;
const char *clk_parent;
struct resource *res;
void __iomem *reg;
struct clk_hw *hw;
const struct clk_div_table *divs;
divs = device_get_match_data(dev);
if (!divs)
return -ENOENT;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -ENOENT;
/*
* Can't use devm_ioremap_resource() or devm_of_iomap() because the
* resource might already be taken by the parent device.
*/
reg = devm_ioremap(dev, res->start, resource_size(res));
if (!reg)
return -ENOMEM;
clk_parent = of_clk_get_parent_name(np, 0);
if (!clk_parent)
return -EINVAL;
of_property_read_string(np, "clock-output-names", &clk_name);
hw = devm_clk_hw_register_divider_table(dev, clk_name, clk_parent, 0,
reg, 0, 5, 0, divs, NULL);
if (IS_ERR(hw))
return PTR_ERR(hw);
return devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, hw);
}
static const struct of_device_id fsl_flexspi_clk_dt_ids[] = {
{ .compatible = "fsl,ls1028a-flexspi-clk", .data = &ls1028a_flexspi_divs },
{ .compatible = "fsl,lx2160a-flexspi-clk", .data = &lx2160a_flexspi_divs },
{}
};
MODULE_DEVICE_TABLE(of, fsl_flexspi_clk_dt_ids);
static struct platform_driver fsl_flexspi_clk_driver = {
.driver = {
.name = "fsl-flexspi-clk",
.of_match_table = fsl_flexspi_clk_dt_ids,
},
.probe = fsl_flexspi_clk_probe,
};
module_platform_driver(fsl_flexspi_clk_driver);
MODULE_DESCRIPTION("FlexSPI clock driver for Layerscape SoCs");
MODULE_AUTHOR("Michael Walle <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/clk/clk-fsl-flexspi.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Microchip Sparx5 SoC Clock driver.
*
* Copyright (c) 2019 Microchip Inc.
*
* Author: Lars Povlsen <[email protected]>
*/
#include <linux/io.h>
#include <linux/module.h>
#include <linux/clk-provider.h>
#include <linux/bitfield.h>
#include <linux/of.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <dt-bindings/clock/microchip,sparx5.h>
#define PLL_DIV GENMASK(7, 0)
#define PLL_PRE_DIV GENMASK(10, 8)
#define PLL_ROT_DIR BIT(11)
#define PLL_ROT_SEL GENMASK(13, 12)
#define PLL_ROT_ENA BIT(14)
#define PLL_CLK_ENA BIT(15)
#define MAX_SEL 4
#define MAX_PRE BIT(3)
static const u8 sel_rates[MAX_SEL] = { 0, 2*8, 2*4, 2*2 };
static const char *clk_names[N_CLOCKS] = {
"core", "ddr", "cpu2", "arm2",
"aux1", "aux2", "aux3", "aux4",
"synce",
};
struct s5_hw_clk {
struct clk_hw hw;
void __iomem *reg;
};
struct s5_clk_data {
void __iomem *base;
struct s5_hw_clk s5_hw[N_CLOCKS];
};
struct s5_pll_conf {
unsigned long freq;
u8 div;
bool rot_ena;
u8 rot_sel;
u8 rot_dir;
u8 pre_div;
};
#define to_s5_pll(hw) container_of(hw, struct s5_hw_clk, hw)
static unsigned long s5_calc_freq(unsigned long parent_rate,
const struct s5_pll_conf *conf)
{
unsigned long rate = parent_rate / conf->div;
if (conf->rot_ena) {
int sign = conf->rot_dir ? -1 : 1;
int divt = sel_rates[conf->rot_sel] * (1 + conf->pre_div);
int divb = divt + sign;
rate = mult_frac(rate, divt, divb);
rate = roundup(rate, 1000);
}
return rate;
}
static void s5_search_fractional(unsigned long rate,
unsigned long parent_rate,
int div,
struct s5_pll_conf *conf)
{
struct s5_pll_conf best;
ulong cur_offset, best_offset = rate;
int d, i, j;
memset(conf, 0, sizeof(*conf));
conf->div = div;
conf->rot_ena = 1; /* Fractional rate */
for (d = 0; best_offset > 0 && d <= 1 ; d++) {
conf->rot_dir = !!d;
for (i = 0; best_offset > 0 && i < MAX_PRE; i++) {
conf->pre_div = i;
for (j = 1; best_offset > 0 && j < MAX_SEL; j++) {
conf->rot_sel = j;
conf->freq = s5_calc_freq(parent_rate, conf);
cur_offset = abs(rate - conf->freq);
if (cur_offset < best_offset) {
best_offset = cur_offset;
best = *conf;
}
}
}
}
/* Best match */
*conf = best;
}
static unsigned long s5_calc_params(unsigned long rate,
unsigned long parent_rate,
struct s5_pll_conf *conf)
{
if (parent_rate % rate) {
struct s5_pll_conf alt1, alt2;
int div;
div = DIV_ROUND_CLOSEST_ULL(parent_rate, rate);
s5_search_fractional(rate, parent_rate, div, &alt1);
/* Straight match? */
if (alt1.freq == rate) {
*conf = alt1;
} else {
/* Try without rounding divider */
div = parent_rate / rate;
if (div != alt1.div) {
s5_search_fractional(rate, parent_rate, div,
&alt2);
/* Select the better match */
if (abs(rate - alt1.freq) <
abs(rate - alt2.freq))
*conf = alt1;
else
*conf = alt2;
}
}
} else {
/* Straight fit */
memset(conf, 0, sizeof(*conf));
conf->div = parent_rate / rate;
}
return conf->freq;
}
static int s5_pll_enable(struct clk_hw *hw)
{
struct s5_hw_clk *pll = to_s5_pll(hw);
u32 val = readl(pll->reg);
val |= PLL_CLK_ENA;
writel(val, pll->reg);
return 0;
}
static void s5_pll_disable(struct clk_hw *hw)
{
struct s5_hw_clk *pll = to_s5_pll(hw);
u32 val = readl(pll->reg);
val &= ~PLL_CLK_ENA;
writel(val, pll->reg);
}
static int s5_pll_set_rate(struct clk_hw *hw,
unsigned long rate,
unsigned long parent_rate)
{
struct s5_hw_clk *pll = to_s5_pll(hw);
struct s5_pll_conf conf;
unsigned long eff_rate;
u32 val;
eff_rate = s5_calc_params(rate, parent_rate, &conf);
if (eff_rate != rate)
return -EOPNOTSUPP;
val = readl(pll->reg) & PLL_CLK_ENA;
val |= FIELD_PREP(PLL_DIV, conf.div);
if (conf.rot_ena) {
val |= PLL_ROT_ENA;
val |= FIELD_PREP(PLL_ROT_SEL, conf.rot_sel);
val |= FIELD_PREP(PLL_PRE_DIV, conf.pre_div);
if (conf.rot_dir)
val |= PLL_ROT_DIR;
}
writel(val, pll->reg);
return 0;
}
static unsigned long s5_pll_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct s5_hw_clk *pll = to_s5_pll(hw);
struct s5_pll_conf conf;
u32 val;
val = readl(pll->reg);
if (val & PLL_CLK_ENA) {
conf.div = FIELD_GET(PLL_DIV, val);
conf.pre_div = FIELD_GET(PLL_PRE_DIV, val);
conf.rot_ena = FIELD_GET(PLL_ROT_ENA, val);
conf.rot_dir = FIELD_GET(PLL_ROT_DIR, val);
conf.rot_sel = FIELD_GET(PLL_ROT_SEL, val);
conf.freq = s5_calc_freq(parent_rate, &conf);
} else {
conf.freq = 0;
}
return conf.freq;
}
static long s5_pll_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *parent_rate)
{
struct s5_pll_conf conf;
return s5_calc_params(rate, *parent_rate, &conf);
}
static const struct clk_ops s5_pll_ops = {
.enable = s5_pll_enable,
.disable = s5_pll_disable,
.set_rate = s5_pll_set_rate,
.round_rate = s5_pll_round_rate,
.recalc_rate = s5_pll_recalc_rate,
};
static struct clk_hw *s5_clk_hw_get(struct of_phandle_args *clkspec, void *data)
{
struct s5_clk_data *s5_clk = data;
unsigned int idx = clkspec->args[0];
if (idx >= N_CLOCKS) {
pr_err("%s: invalid index %u\n", __func__, idx);
return ERR_PTR(-EINVAL);
}
return &s5_clk->s5_hw[idx].hw;
}
static int s5_clk_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
int i, ret;
struct s5_clk_data *s5_clk;
struct clk_parent_data pdata = { .index = 0 };
struct clk_init_data init = {
.ops = &s5_pll_ops,
.num_parents = 1,
.parent_data = &pdata,
};
s5_clk = devm_kzalloc(dev, sizeof(*s5_clk), GFP_KERNEL);
if (!s5_clk)
return -ENOMEM;
s5_clk->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(s5_clk->base))
return PTR_ERR(s5_clk->base);
for (i = 0; i < N_CLOCKS; i++) {
struct s5_hw_clk *s5_hw = &s5_clk->s5_hw[i];
init.name = clk_names[i];
s5_hw->reg = s5_clk->base + (i * 4);
s5_hw->hw.init = &init;
ret = devm_clk_hw_register(dev, &s5_hw->hw);
if (ret) {
dev_err(dev, "failed to register %s clock\n",
init.name);
return ret;
}
}
return devm_of_clk_add_hw_provider(dev, s5_clk_hw_get, s5_clk);
}
static const struct of_device_id s5_clk_dt_ids[] = {
{ .compatible = "microchip,sparx5-dpll", },
{ }
};
MODULE_DEVICE_TABLE(of, s5_clk_dt_ids);
static struct platform_driver s5_clk_driver = {
.probe = s5_clk_probe,
.driver = {
.name = "sparx5-clk",
.of_match_table = s5_clk_dt_ids,
},
};
builtin_platform_driver(s5_clk_driver);
| linux-master | drivers/clk/clk-sparx5.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Clock implementation for VIA/Wondermedia SoC's
* Copyright (C) 2012 Tony Prisk <[email protected]>
*/
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/slab.h>
#include <linux/bitops.h>
#include <linux/clkdev.h>
#include <linux/clk-provider.h>
#define LEGACY_PMC_BASE 0xD8130000
/* All clocks share the same lock as none can be changed concurrently */
static DEFINE_SPINLOCK(_lock);
struct clk_device {
struct clk_hw hw;
void __iomem *div_reg;
unsigned int div_mask;
void __iomem *en_reg;
int en_bit;
spinlock_t *lock;
};
/*
* Add new PLL_TYPE_x definitions here as required. Use the first known model
* to support the new type as the name.
* Add case statements to vtwm_pll_recalc_rate(), vtwm_pll_round_round() and
* vtwm_pll_set_rate() to handle the new PLL_TYPE_x
*/
#define PLL_TYPE_VT8500 0
#define PLL_TYPE_WM8650 1
#define PLL_TYPE_WM8750 2
#define PLL_TYPE_WM8850 3
struct clk_pll {
struct clk_hw hw;
void __iomem *reg;
spinlock_t *lock;
int type;
};
static void __iomem *pmc_base;
static __init void vtwm_set_pmc_base(void)
{
struct device_node *np =
of_find_compatible_node(NULL, NULL, "via,vt8500-pmc");
if (np)
pmc_base = of_iomap(np, 0);
else
pmc_base = ioremap(LEGACY_PMC_BASE, 0x1000);
of_node_put(np);
if (!pmc_base)
pr_err("%s:of_iomap(pmc) failed\n", __func__);
}
#define to_clk_device(_hw) container_of(_hw, struct clk_device, hw)
#define VT8500_PMC_BUSY_MASK 0x18
static void vt8500_pmc_wait_busy(void)
{
while (readl(pmc_base) & VT8500_PMC_BUSY_MASK)
cpu_relax();
}
static int vt8500_dclk_enable(struct clk_hw *hw)
{
struct clk_device *cdev = to_clk_device(hw);
u32 en_val;
unsigned long flags = 0;
spin_lock_irqsave(cdev->lock, flags);
en_val = readl(cdev->en_reg);
en_val |= BIT(cdev->en_bit);
writel(en_val, cdev->en_reg);
spin_unlock_irqrestore(cdev->lock, flags);
return 0;
}
static void vt8500_dclk_disable(struct clk_hw *hw)
{
struct clk_device *cdev = to_clk_device(hw);
u32 en_val;
unsigned long flags = 0;
spin_lock_irqsave(cdev->lock, flags);
en_val = readl(cdev->en_reg);
en_val &= ~BIT(cdev->en_bit);
writel(en_val, cdev->en_reg);
spin_unlock_irqrestore(cdev->lock, flags);
}
static int vt8500_dclk_is_enabled(struct clk_hw *hw)
{
struct clk_device *cdev = to_clk_device(hw);
u32 en_val = (readl(cdev->en_reg) & BIT(cdev->en_bit));
return en_val ? 1 : 0;
}
static unsigned long vt8500_dclk_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct clk_device *cdev = to_clk_device(hw);
u32 div = readl(cdev->div_reg) & cdev->div_mask;
/* Special case for SDMMC devices */
if ((cdev->div_mask == 0x3F) && (div & BIT(5)))
div = 64 * (div & 0x1f);
/* div == 0 is actually the highest divisor */
if (div == 0)
div = (cdev->div_mask + 1);
return parent_rate / div;
}
static long vt8500_dclk_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *prate)
{
struct clk_device *cdev = to_clk_device(hw);
u32 divisor;
if (rate == 0)
return 0;
divisor = *prate / rate;
/* If prate / rate would be decimal, incr the divisor */
if (rate * divisor < *prate)
divisor++;
/*
* If this is a request for SDMMC we have to adjust the divisor
* when >31 to use the fixed predivisor
*/
if ((cdev->div_mask == 0x3F) && (divisor > 31)) {
divisor = 64 * ((divisor / 64) + 1);
}
return *prate / divisor;
}
static int vt8500_dclk_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct clk_device *cdev = to_clk_device(hw);
u32 divisor;
unsigned long flags = 0;
if (rate == 0)
return 0;
divisor = parent_rate / rate;
if (divisor == cdev->div_mask + 1)
divisor = 0;
/* SDMMC mask may need to be corrected before testing if its valid */
if ((cdev->div_mask == 0x3F) && (divisor > 31)) {
/*
* Bit 5 is a fixed /64 predivisor. If the requested divisor
* is >31 then correct for the fixed divisor being required.
*/
divisor = 0x20 + (divisor / 64);
}
if (divisor > cdev->div_mask) {
pr_err("%s: invalid divisor for clock\n", __func__);
return -EINVAL;
}
spin_lock_irqsave(cdev->lock, flags);
vt8500_pmc_wait_busy();
writel(divisor, cdev->div_reg);
vt8500_pmc_wait_busy();
spin_unlock_irqrestore(cdev->lock, flags);
return 0;
}
static const struct clk_ops vt8500_gated_clk_ops = {
.enable = vt8500_dclk_enable,
.disable = vt8500_dclk_disable,
.is_enabled = vt8500_dclk_is_enabled,
};
static const struct clk_ops vt8500_divisor_clk_ops = {
.round_rate = vt8500_dclk_round_rate,
.set_rate = vt8500_dclk_set_rate,
.recalc_rate = vt8500_dclk_recalc_rate,
};
static const struct clk_ops vt8500_gated_divisor_clk_ops = {
.enable = vt8500_dclk_enable,
.disable = vt8500_dclk_disable,
.is_enabled = vt8500_dclk_is_enabled,
.round_rate = vt8500_dclk_round_rate,
.set_rate = vt8500_dclk_set_rate,
.recalc_rate = vt8500_dclk_recalc_rate,
};
#define CLK_INIT_GATED BIT(0)
#define CLK_INIT_DIVISOR BIT(1)
#define CLK_INIT_GATED_DIVISOR (CLK_INIT_DIVISOR | CLK_INIT_GATED)
static __init void vtwm_device_clk_init(struct device_node *node)
{
u32 en_reg, div_reg;
struct clk_hw *hw;
struct clk_device *dev_clk;
const char *clk_name = node->name;
const char *parent_name;
struct clk_init_data init;
int rc;
int clk_init_flags = 0;
if (!pmc_base)
vtwm_set_pmc_base();
dev_clk = kzalloc(sizeof(*dev_clk), GFP_KERNEL);
if (WARN_ON(!dev_clk))
return;
dev_clk->lock = &_lock;
rc = of_property_read_u32(node, "enable-reg", &en_reg);
if (!rc) {
dev_clk->en_reg = pmc_base + en_reg;
rc = of_property_read_u32(node, "enable-bit", &dev_clk->en_bit);
if (rc) {
pr_err("%s: enable-bit property required for gated clock\n",
__func__);
return;
}
clk_init_flags |= CLK_INIT_GATED;
}
rc = of_property_read_u32(node, "divisor-reg", &div_reg);
if (!rc) {
dev_clk->div_reg = pmc_base + div_reg;
/*
* use 0x1f as the default mask since it covers
* almost all the clocks and reduces dts properties
*/
dev_clk->div_mask = 0x1f;
of_property_read_u32(node, "divisor-mask", &dev_clk->div_mask);
clk_init_flags |= CLK_INIT_DIVISOR;
}
of_property_read_string(node, "clock-output-names", &clk_name);
switch (clk_init_flags) {
case CLK_INIT_GATED:
init.ops = &vt8500_gated_clk_ops;
break;
case CLK_INIT_DIVISOR:
init.ops = &vt8500_divisor_clk_ops;
break;
case CLK_INIT_GATED_DIVISOR:
init.ops = &vt8500_gated_divisor_clk_ops;
break;
default:
pr_err("%s: Invalid clock description in device tree\n",
__func__);
kfree(dev_clk);
return;
}
init.name = clk_name;
init.flags = 0;
parent_name = of_clk_get_parent_name(node, 0);
init.parent_names = &parent_name;
init.num_parents = 1;
dev_clk->hw.init = &init;
hw = &dev_clk->hw;
rc = clk_hw_register(NULL, hw);
if (WARN_ON(rc)) {
kfree(dev_clk);
return;
}
rc = of_clk_add_hw_provider(node, of_clk_hw_simple_get, hw);
clk_hw_register_clkdev(hw, clk_name, NULL);
}
CLK_OF_DECLARE(vt8500_device, "via,vt8500-device-clock", vtwm_device_clk_init);
/* PLL clock related functions */
#define to_clk_pll(_hw) container_of(_hw, struct clk_pll, hw)
/* Helper macros for PLL_VT8500 */
#define VT8500_PLL_MUL(x) ((x & 0x1F) << 1)
#define VT8500_PLL_DIV(x) ((x & 0x100) ? 1 : 2)
#define VT8500_BITS_TO_FREQ(r, m, d) \
((r / d) * m)
#define VT8500_BITS_TO_VAL(m, d) \
((d == 2 ? 0 : 0x100) | ((m >> 1) & 0x1F))
/* Helper macros for PLL_WM8650 */
#define WM8650_PLL_MUL(x) (x & 0x3FF)
#define WM8650_PLL_DIV(x) (((x >> 10) & 7) * (1 << ((x >> 13) & 3)))
#define WM8650_BITS_TO_FREQ(r, m, d1, d2) \
(r * m / (d1 * (1 << d2)))
#define WM8650_BITS_TO_VAL(m, d1, d2) \
((d2 << 13) | (d1 << 10) | (m & 0x3FF))
/* Helper macros for PLL_WM8750 */
#define WM8750_PLL_MUL(x) (((x >> 16) & 0xFF) + 1)
#define WM8750_PLL_DIV(x) ((((x >> 8) & 1) + 1) * (1 << (x & 7)))
#define WM8750_BITS_TO_FREQ(r, m, d1, d2) \
(r * (m+1) / ((d1+1) * (1 << d2)))
#define WM8750_BITS_TO_VAL(f, m, d1, d2) \
((f << 24) | ((m - 1) << 16) | ((d1 - 1) << 8) | d2)
/* Helper macros for PLL_WM8850 */
#define WM8850_PLL_MUL(x) ((((x >> 16) & 0x7F) + 1) * 2)
#define WM8850_PLL_DIV(x) ((((x >> 8) & 1) + 1) * (1 << (x & 3)))
#define WM8850_BITS_TO_FREQ(r, m, d1, d2) \
(r * ((m + 1) * 2) / ((d1+1) * (1 << d2)))
#define WM8850_BITS_TO_VAL(m, d1, d2) \
((((m / 2) - 1) << 16) | ((d1 - 1) << 8) | d2)
static int vt8500_find_pll_bits(unsigned long rate, unsigned long parent_rate,
u32 *multiplier, u32 *prediv)
{
unsigned long tclk;
/* sanity check */
if ((rate < parent_rate * 4) || (rate > parent_rate * 62)) {
pr_err("%s: requested rate out of range\n", __func__);
*multiplier = 0;
*prediv = 1;
return -EINVAL;
}
if (rate <= parent_rate * 31)
/* use the prediv to double the resolution */
*prediv = 2;
else
*prediv = 1;
*multiplier = rate / (parent_rate / *prediv);
tclk = (parent_rate / *prediv) * *multiplier;
if (tclk != rate)
pr_warn("%s: requested rate %lu, found rate %lu\n", __func__,
rate, tclk);
return 0;
}
/*
* M * parent [O1] => / P [O2] => / D [O3]
* Where O1 is 900MHz...3GHz;
* O2 is 600MHz >= (M * parent) / P >= 300MHz;
* M is 36...120 [25MHz parent]; D is 1 or 2 or 4 or 8.
* Possible ranges (O3):
* D = 8: 37,5MHz...75MHz
* D = 4: 75MHz...150MHz
* D = 2: 150MHz...300MHz
* D = 1: 300MHz...600MHz
*/
static int wm8650_find_pll_bits(unsigned long rate,
unsigned long parent_rate, u32 *multiplier, u32 *divisor1,
u32 *divisor2)
{
unsigned long O1, min_err, rate_err;
if (!parent_rate || (rate < 37500000) || (rate > 600000000))
return -EINVAL;
*divisor2 = rate <= 75000000 ? 3 : rate <= 150000000 ? 2 :
rate <= 300000000 ? 1 : 0;
/*
* Divisor P cannot be calculated. Test all divisors and find where M
* will be as close as possible to the requested rate.
*/
min_err = ULONG_MAX;
for (*divisor1 = 5; *divisor1 >= 3; (*divisor1)--) {
O1 = rate * *divisor1 * (1 << (*divisor2));
rate_err = O1 % parent_rate;
if (rate_err < min_err) {
*multiplier = O1 / parent_rate;
if (rate_err == 0)
return 0;
min_err = rate_err;
}
}
if ((*multiplier < 3) || (*multiplier > 1023))
return -EINVAL;
pr_warn("%s: rate error is %lu\n", __func__, min_err);
return 0;
}
static u32 wm8750_get_filter(u32 parent_rate, u32 divisor1)
{
/* calculate frequency (MHz) after pre-divisor */
u32 freq = (parent_rate / 1000000) / (divisor1 + 1);
if ((freq < 10) || (freq > 200))
pr_warn("%s: PLL recommended input frequency 10..200Mhz (requested %d Mhz)\n",
__func__, freq);
if (freq >= 166)
return 7;
else if (freq >= 104)
return 6;
else if (freq >= 65)
return 5;
else if (freq >= 42)
return 4;
else if (freq >= 26)
return 3;
else if (freq >= 16)
return 2;
else if (freq >= 10)
return 1;
return 0;
}
static int wm8750_find_pll_bits(unsigned long rate, unsigned long parent_rate,
u32 *filter, u32 *multiplier, u32 *divisor1, u32 *divisor2)
{
u32 mul;
int div1, div2;
unsigned long tclk, rate_err, best_err;
best_err = (unsigned long)-1;
/* Find the closest match (lower or equal to requested) */
for (div1 = 1; div1 >= 0; div1--)
for (div2 = 7; div2 >= 0; div2--)
for (mul = 0; mul <= 255; mul++) {
tclk = parent_rate * (mul + 1) / ((div1 + 1) * (1 << div2));
if (tclk > rate)
continue;
/* error will always be +ve */
rate_err = rate - tclk;
if (rate_err == 0) {
*filter = wm8750_get_filter(parent_rate, div1);
*multiplier = mul;
*divisor1 = div1;
*divisor2 = div2;
return 0;
}
if (rate_err < best_err) {
best_err = rate_err;
*multiplier = mul;
*divisor1 = div1;
*divisor2 = div2;
}
}
if (best_err == (unsigned long)-1) {
pr_warn("%s: impossible rate %lu\n", __func__, rate);
return -EINVAL;
}
/* if we got here, it wasn't an exact match */
pr_warn("%s: requested rate %lu, found rate %lu\n", __func__, rate,
rate - best_err);
*filter = wm8750_get_filter(parent_rate, *divisor1);
return 0;
}
static int wm8850_find_pll_bits(unsigned long rate, unsigned long parent_rate,
u32 *multiplier, u32 *divisor1, u32 *divisor2)
{
u32 mul;
int div1, div2;
unsigned long tclk, rate_err, best_err;
best_err = (unsigned long)-1;
/* Find the closest match (lower or equal to requested) */
for (div1 = 1; div1 >= 0; div1--)
for (div2 = 3; div2 >= 0; div2--)
for (mul = 0; mul <= 127; mul++) {
tclk = parent_rate * ((mul + 1) * 2) /
((div1 + 1) * (1 << div2));
if (tclk > rate)
continue;
/* error will always be +ve */
rate_err = rate - tclk;
if (rate_err == 0) {
*multiplier = mul;
*divisor1 = div1;
*divisor2 = div2;
return 0;
}
if (rate_err < best_err) {
best_err = rate_err;
*multiplier = mul;
*divisor1 = div1;
*divisor2 = div2;
}
}
if (best_err == (unsigned long)-1) {
pr_warn("%s: impossible rate %lu\n", __func__, rate);
return -EINVAL;
}
/* if we got here, it wasn't an exact match */
pr_warn("%s: requested rate %lu, found rate %lu\n", __func__, rate,
rate - best_err);
return 0;
}
static int vtwm_pll_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct clk_pll *pll = to_clk_pll(hw);
u32 filter, mul, div1, div2;
u32 pll_val;
unsigned long flags = 0;
int ret;
/* sanity check */
switch (pll->type) {
case PLL_TYPE_VT8500:
ret = vt8500_find_pll_bits(rate, parent_rate, &mul, &div1);
if (!ret)
pll_val = VT8500_BITS_TO_VAL(mul, div1);
break;
case PLL_TYPE_WM8650:
ret = wm8650_find_pll_bits(rate, parent_rate, &mul, &div1, &div2);
if (!ret)
pll_val = WM8650_BITS_TO_VAL(mul, div1, div2);
break;
case PLL_TYPE_WM8750:
ret = wm8750_find_pll_bits(rate, parent_rate, &filter, &mul, &div1, &div2);
if (!ret)
pll_val = WM8750_BITS_TO_VAL(filter, mul, div1, div2);
break;
case PLL_TYPE_WM8850:
ret = wm8850_find_pll_bits(rate, parent_rate, &mul, &div1, &div2);
if (!ret)
pll_val = WM8850_BITS_TO_VAL(mul, div1, div2);
break;
default:
pr_err("%s: invalid pll type\n", __func__);
ret = -EINVAL;
}
if (ret)
return ret;
spin_lock_irqsave(pll->lock, flags);
vt8500_pmc_wait_busy();
writel(pll_val, pll->reg);
vt8500_pmc_wait_busy();
spin_unlock_irqrestore(pll->lock, flags);
return 0;
}
static long vtwm_pll_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *prate)
{
struct clk_pll *pll = to_clk_pll(hw);
u32 filter, mul, div1, div2;
long round_rate;
int ret;
switch (pll->type) {
case PLL_TYPE_VT8500:
ret = vt8500_find_pll_bits(rate, *prate, &mul, &div1);
if (!ret)
round_rate = VT8500_BITS_TO_FREQ(*prate, mul, div1);
break;
case PLL_TYPE_WM8650:
ret = wm8650_find_pll_bits(rate, *prate, &mul, &div1, &div2);
if (!ret)
round_rate = WM8650_BITS_TO_FREQ(*prate, mul, div1, div2);
break;
case PLL_TYPE_WM8750:
ret = wm8750_find_pll_bits(rate, *prate, &filter, &mul, &div1, &div2);
if (!ret)
round_rate = WM8750_BITS_TO_FREQ(*prate, mul, div1, div2);
break;
case PLL_TYPE_WM8850:
ret = wm8850_find_pll_bits(rate, *prate, &mul, &div1, &div2);
if (!ret)
round_rate = WM8850_BITS_TO_FREQ(*prate, mul, div1, div2);
break;
default:
ret = -EINVAL;
}
if (ret)
return ret;
return round_rate;
}
static unsigned long vtwm_pll_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct clk_pll *pll = to_clk_pll(hw);
u32 pll_val = readl(pll->reg);
unsigned long pll_freq;
switch (pll->type) {
case PLL_TYPE_VT8500:
pll_freq = parent_rate * VT8500_PLL_MUL(pll_val);
pll_freq /= VT8500_PLL_DIV(pll_val);
break;
case PLL_TYPE_WM8650:
pll_freq = parent_rate * WM8650_PLL_MUL(pll_val);
pll_freq /= WM8650_PLL_DIV(pll_val);
break;
case PLL_TYPE_WM8750:
pll_freq = parent_rate * WM8750_PLL_MUL(pll_val);
pll_freq /= WM8750_PLL_DIV(pll_val);
break;
case PLL_TYPE_WM8850:
pll_freq = parent_rate * WM8850_PLL_MUL(pll_val);
pll_freq /= WM8850_PLL_DIV(pll_val);
break;
default:
pll_freq = 0;
}
return pll_freq;
}
static const struct clk_ops vtwm_pll_ops = {
.round_rate = vtwm_pll_round_rate,
.set_rate = vtwm_pll_set_rate,
.recalc_rate = vtwm_pll_recalc_rate,
};
static __init void vtwm_pll_clk_init(struct device_node *node, int pll_type)
{
u32 reg;
struct clk_hw *hw;
struct clk_pll *pll_clk;
const char *clk_name = node->name;
const char *parent_name;
struct clk_init_data init;
int rc;
if (!pmc_base)
vtwm_set_pmc_base();
rc = of_property_read_u32(node, "reg", ®);
if (WARN_ON(rc))
return;
pll_clk = kzalloc(sizeof(*pll_clk), GFP_KERNEL);
if (WARN_ON(!pll_clk))
return;
pll_clk->reg = pmc_base + reg;
pll_clk->lock = &_lock;
pll_clk->type = pll_type;
of_property_read_string(node, "clock-output-names", &clk_name);
init.name = clk_name;
init.ops = &vtwm_pll_ops;
init.flags = 0;
parent_name = of_clk_get_parent_name(node, 0);
init.parent_names = &parent_name;
init.num_parents = 1;
pll_clk->hw.init = &init;
hw = &pll_clk->hw;
rc = clk_hw_register(NULL, &pll_clk->hw);
if (WARN_ON(rc)) {
kfree(pll_clk);
return;
}
rc = of_clk_add_hw_provider(node, of_clk_hw_simple_get, hw);
clk_hw_register_clkdev(hw, clk_name, NULL);
}
/* Wrappers for initialization functions */
static void __init vt8500_pll_init(struct device_node *node)
{
vtwm_pll_clk_init(node, PLL_TYPE_VT8500);
}
CLK_OF_DECLARE(vt8500_pll, "via,vt8500-pll-clock", vt8500_pll_init);
static void __init wm8650_pll_init(struct device_node *node)
{
vtwm_pll_clk_init(node, PLL_TYPE_WM8650);
}
CLK_OF_DECLARE(wm8650_pll, "wm,wm8650-pll-clock", wm8650_pll_init);
static void __init wm8750_pll_init(struct device_node *node)
{
vtwm_pll_clk_init(node, PLL_TYPE_WM8750);
}
CLK_OF_DECLARE(wm8750_pll, "wm,wm8750-pll-clock", wm8750_pll_init);
static void __init wm8850_pll_init(struct device_node *node)
{
vtwm_pll_clk_init(node, PLL_TYPE_WM8850);
}
CLK_OF_DECLARE(wm8850_pll, "wm,wm8850-pll-clock", wm8850_pll_init);
| linux-master | drivers/clk/clk-vt8500.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* AXI clkgen driver
*
* Copyright 2012-2013 Analog Devices Inc.
* Author: Lars-Peter Clausen <[email protected]>
*/
#include <linux/platform_device.h>
#include <linux/clk-provider.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/module.h>
#include <linux/err.h>
#define AXI_CLKGEN_V2_REG_RESET 0x40
#define AXI_CLKGEN_V2_REG_CLKSEL 0x44
#define AXI_CLKGEN_V2_REG_DRP_CNTRL 0x70
#define AXI_CLKGEN_V2_REG_DRP_STATUS 0x74
#define AXI_CLKGEN_V2_RESET_MMCM_ENABLE BIT(1)
#define AXI_CLKGEN_V2_RESET_ENABLE BIT(0)
#define AXI_CLKGEN_V2_DRP_CNTRL_SEL BIT(29)
#define AXI_CLKGEN_V2_DRP_CNTRL_READ BIT(28)
#define AXI_CLKGEN_V2_DRP_STATUS_BUSY BIT(16)
#define MMCM_REG_CLKOUT5_2 0x07
#define MMCM_REG_CLKOUT0_1 0x08
#define MMCM_REG_CLKOUT0_2 0x09
#define MMCM_REG_CLKOUT6_2 0x13
#define MMCM_REG_CLK_FB1 0x14
#define MMCM_REG_CLK_FB2 0x15
#define MMCM_REG_CLK_DIV 0x16
#define MMCM_REG_LOCK1 0x18
#define MMCM_REG_LOCK2 0x19
#define MMCM_REG_LOCK3 0x1a
#define MMCM_REG_POWER 0x28
#define MMCM_REG_FILTER1 0x4e
#define MMCM_REG_FILTER2 0x4f
#define MMCM_CLKOUT_NOCOUNT BIT(6)
#define MMCM_CLK_DIV_DIVIDE BIT(11)
#define MMCM_CLK_DIV_NOCOUNT BIT(12)
struct axi_clkgen_limits {
unsigned int fpfd_min;
unsigned int fpfd_max;
unsigned int fvco_min;
unsigned int fvco_max;
};
struct axi_clkgen {
void __iomem *base;
struct clk_hw clk_hw;
struct axi_clkgen_limits limits;
};
static uint32_t axi_clkgen_lookup_filter(unsigned int m)
{
switch (m) {
case 0:
return 0x01001990;
case 1:
return 0x01001190;
case 2:
return 0x01009890;
case 3:
return 0x01001890;
case 4:
return 0x01008890;
case 5 ... 8:
return 0x01009090;
case 9 ... 11:
return 0x01000890;
case 12:
return 0x08009090;
case 13 ... 22:
return 0x01001090;
case 23 ... 36:
return 0x01008090;
case 37 ... 46:
return 0x08001090;
default:
return 0x08008090;
}
}
static const uint32_t axi_clkgen_lock_table[] = {
0x060603e8, 0x060603e8, 0x080803e8, 0x0b0b03e8,
0x0e0e03e8, 0x111103e8, 0x131303e8, 0x161603e8,
0x191903e8, 0x1c1c03e8, 0x1f1f0384, 0x1f1f0339,
0x1f1f02ee, 0x1f1f02bc, 0x1f1f028a, 0x1f1f0271,
0x1f1f023f, 0x1f1f0226, 0x1f1f020d, 0x1f1f01f4,
0x1f1f01db, 0x1f1f01c2, 0x1f1f01a9, 0x1f1f0190,
0x1f1f0190, 0x1f1f0177, 0x1f1f015e, 0x1f1f015e,
0x1f1f0145, 0x1f1f0145, 0x1f1f012c, 0x1f1f012c,
0x1f1f012c, 0x1f1f0113, 0x1f1f0113, 0x1f1f0113,
};
static uint32_t axi_clkgen_lookup_lock(unsigned int m)
{
if (m < ARRAY_SIZE(axi_clkgen_lock_table))
return axi_clkgen_lock_table[m];
return 0x1f1f00fa;
}
static const struct axi_clkgen_limits axi_clkgen_zynqmp_default_limits = {
.fpfd_min = 10000,
.fpfd_max = 450000,
.fvco_min = 800000,
.fvco_max = 1600000,
};
static const struct axi_clkgen_limits axi_clkgen_zynq_default_limits = {
.fpfd_min = 10000,
.fpfd_max = 300000,
.fvco_min = 600000,
.fvco_max = 1200000,
};
static void axi_clkgen_calc_params(const struct axi_clkgen_limits *limits,
unsigned long fin, unsigned long fout,
unsigned int *best_d, unsigned int *best_m, unsigned int *best_dout)
{
unsigned long d, d_min, d_max, _d_min, _d_max;
unsigned long m, m_min, m_max;
unsigned long f, dout, best_f, fvco;
unsigned long fract_shift = 0;
unsigned long fvco_min_fract, fvco_max_fract;
fin /= 1000;
fout /= 1000;
best_f = ULONG_MAX;
*best_d = 0;
*best_m = 0;
*best_dout = 0;
d_min = max_t(unsigned long, DIV_ROUND_UP(fin, limits->fpfd_max), 1);
d_max = min_t(unsigned long, fin / limits->fpfd_min, 80);
again:
fvco_min_fract = limits->fvco_min << fract_shift;
fvco_max_fract = limits->fvco_max << fract_shift;
m_min = max_t(unsigned long, DIV_ROUND_UP(fvco_min_fract, fin) * d_min, 1);
m_max = min_t(unsigned long, fvco_max_fract * d_max / fin, 64 << fract_shift);
for (m = m_min; m <= m_max; m++) {
_d_min = max(d_min, DIV_ROUND_UP(fin * m, fvco_max_fract));
_d_max = min(d_max, fin * m / fvco_min_fract);
for (d = _d_min; d <= _d_max; d++) {
fvco = fin * m / d;
dout = DIV_ROUND_CLOSEST(fvco, fout);
dout = clamp_t(unsigned long, dout, 1, 128 << fract_shift);
f = fvco / dout;
if (abs(f - fout) < abs(best_f - fout)) {
best_f = f;
*best_d = d;
*best_m = m << (3 - fract_shift);
*best_dout = dout << (3 - fract_shift);
if (best_f == fout)
return;
}
}
}
/* Lets see if we find a better setting in fractional mode */
if (fract_shift == 0) {
fract_shift = 3;
goto again;
}
}
struct axi_clkgen_div_params {
unsigned int low;
unsigned int high;
unsigned int edge;
unsigned int nocount;
unsigned int frac_en;
unsigned int frac;
unsigned int frac_wf_f;
unsigned int frac_wf_r;
unsigned int frac_phase;
};
static void axi_clkgen_calc_clk_params(unsigned int divider,
unsigned int frac_divider, struct axi_clkgen_div_params *params)
{
memset(params, 0x0, sizeof(*params));
if (divider == 1) {
params->nocount = 1;
return;
}
if (frac_divider == 0) {
params->high = divider / 2;
params->edge = divider % 2;
params->low = divider - params->high;
} else {
params->frac_en = 1;
params->frac = frac_divider;
params->high = divider / 2;
params->edge = divider % 2;
params->low = params->high;
if (params->edge == 0) {
params->high--;
params->frac_wf_r = 1;
}
if (params->edge == 0 || frac_divider == 1)
params->low--;
if (((params->edge == 0) ^ (frac_divider == 1)) ||
(divider == 2 && frac_divider == 1))
params->frac_wf_f = 1;
params->frac_phase = params->edge * 4 + frac_divider / 2;
}
}
static void axi_clkgen_write(struct axi_clkgen *axi_clkgen,
unsigned int reg, unsigned int val)
{
writel(val, axi_clkgen->base + reg);
}
static void axi_clkgen_read(struct axi_clkgen *axi_clkgen,
unsigned int reg, unsigned int *val)
{
*val = readl(axi_clkgen->base + reg);
}
static int axi_clkgen_wait_non_busy(struct axi_clkgen *axi_clkgen)
{
unsigned int timeout = 10000;
unsigned int val;
do {
axi_clkgen_read(axi_clkgen, AXI_CLKGEN_V2_REG_DRP_STATUS, &val);
} while ((val & AXI_CLKGEN_V2_DRP_STATUS_BUSY) && --timeout);
if (val & AXI_CLKGEN_V2_DRP_STATUS_BUSY)
return -EIO;
return val & 0xffff;
}
static int axi_clkgen_mmcm_read(struct axi_clkgen *axi_clkgen,
unsigned int reg, unsigned int *val)
{
unsigned int reg_val;
int ret;
ret = axi_clkgen_wait_non_busy(axi_clkgen);
if (ret < 0)
return ret;
reg_val = AXI_CLKGEN_V2_DRP_CNTRL_SEL | AXI_CLKGEN_V2_DRP_CNTRL_READ;
reg_val |= (reg << 16);
axi_clkgen_write(axi_clkgen, AXI_CLKGEN_V2_REG_DRP_CNTRL, reg_val);
ret = axi_clkgen_wait_non_busy(axi_clkgen);
if (ret < 0)
return ret;
*val = ret;
return 0;
}
static int axi_clkgen_mmcm_write(struct axi_clkgen *axi_clkgen,
unsigned int reg, unsigned int val, unsigned int mask)
{
unsigned int reg_val = 0;
int ret;
ret = axi_clkgen_wait_non_busy(axi_clkgen);
if (ret < 0)
return ret;
if (mask != 0xffff) {
axi_clkgen_mmcm_read(axi_clkgen, reg, ®_val);
reg_val &= ~mask;
}
reg_val |= AXI_CLKGEN_V2_DRP_CNTRL_SEL | (reg << 16) | (val & mask);
axi_clkgen_write(axi_clkgen, AXI_CLKGEN_V2_REG_DRP_CNTRL, reg_val);
return 0;
}
static void axi_clkgen_mmcm_enable(struct axi_clkgen *axi_clkgen,
bool enable)
{
unsigned int val = AXI_CLKGEN_V2_RESET_ENABLE;
if (enable)
val |= AXI_CLKGEN_V2_RESET_MMCM_ENABLE;
axi_clkgen_write(axi_clkgen, AXI_CLKGEN_V2_REG_RESET, val);
}
static struct axi_clkgen *clk_hw_to_axi_clkgen(struct clk_hw *clk_hw)
{
return container_of(clk_hw, struct axi_clkgen, clk_hw);
}
static void axi_clkgen_set_div(struct axi_clkgen *axi_clkgen,
unsigned int reg1, unsigned int reg2, unsigned int reg3,
struct axi_clkgen_div_params *params)
{
axi_clkgen_mmcm_write(axi_clkgen, reg1,
(params->high << 6) | params->low, 0xefff);
axi_clkgen_mmcm_write(axi_clkgen, reg2,
(params->frac << 12) | (params->frac_en << 11) |
(params->frac_wf_r << 10) | (params->edge << 7) |
(params->nocount << 6), 0x7fff);
if (reg3 != 0) {
axi_clkgen_mmcm_write(axi_clkgen, reg3,
(params->frac_phase << 11) | (params->frac_wf_f << 10), 0x3c00);
}
}
static int axi_clkgen_set_rate(struct clk_hw *clk_hw,
unsigned long rate, unsigned long parent_rate)
{
struct axi_clkgen *axi_clkgen = clk_hw_to_axi_clkgen(clk_hw);
const struct axi_clkgen_limits *limits = &axi_clkgen->limits;
unsigned int d, m, dout;
struct axi_clkgen_div_params params;
uint32_t power = 0;
uint32_t filter;
uint32_t lock;
if (parent_rate == 0 || rate == 0)
return -EINVAL;
axi_clkgen_calc_params(limits, parent_rate, rate, &d, &m, &dout);
if (d == 0 || dout == 0 || m == 0)
return -EINVAL;
if ((dout & 0x7) != 0 || (m & 0x7) != 0)
power |= 0x9800;
axi_clkgen_mmcm_write(axi_clkgen, MMCM_REG_POWER, power, 0x9800);
filter = axi_clkgen_lookup_filter(m - 1);
lock = axi_clkgen_lookup_lock(m - 1);
axi_clkgen_calc_clk_params(dout >> 3, dout & 0x7, ¶ms);
axi_clkgen_set_div(axi_clkgen, MMCM_REG_CLKOUT0_1, MMCM_REG_CLKOUT0_2,
MMCM_REG_CLKOUT5_2, ¶ms);
axi_clkgen_calc_clk_params(d, 0, ¶ms);
axi_clkgen_mmcm_write(axi_clkgen, MMCM_REG_CLK_DIV,
(params.edge << 13) | (params.nocount << 12) |
(params.high << 6) | params.low, 0x3fff);
axi_clkgen_calc_clk_params(m >> 3, m & 0x7, ¶ms);
axi_clkgen_set_div(axi_clkgen, MMCM_REG_CLK_FB1, MMCM_REG_CLK_FB2,
MMCM_REG_CLKOUT6_2, ¶ms);
axi_clkgen_mmcm_write(axi_clkgen, MMCM_REG_LOCK1, lock & 0x3ff, 0x3ff);
axi_clkgen_mmcm_write(axi_clkgen, MMCM_REG_LOCK2,
(((lock >> 16) & 0x1f) << 10) | 0x1, 0x7fff);
axi_clkgen_mmcm_write(axi_clkgen, MMCM_REG_LOCK3,
(((lock >> 24) & 0x1f) << 10) | 0x3e9, 0x7fff);
axi_clkgen_mmcm_write(axi_clkgen, MMCM_REG_FILTER1, filter >> 16, 0x9900);
axi_clkgen_mmcm_write(axi_clkgen, MMCM_REG_FILTER2, filter, 0x9900);
return 0;
}
static int axi_clkgen_determine_rate(struct clk_hw *hw,
struct clk_rate_request *req)
{
struct axi_clkgen *axi_clkgen = clk_hw_to_axi_clkgen(hw);
const struct axi_clkgen_limits *limits = &axi_clkgen->limits;
unsigned int d, m, dout;
unsigned long long tmp;
axi_clkgen_calc_params(limits, req->best_parent_rate, req->rate,
&d, &m, &dout);
if (d == 0 || dout == 0 || m == 0)
return -EINVAL;
tmp = (unsigned long long)req->best_parent_rate * m;
tmp = DIV_ROUND_CLOSEST_ULL(tmp, dout * d);
req->rate = min_t(unsigned long long, tmp, LONG_MAX);
return 0;
}
static unsigned int axi_clkgen_get_div(struct axi_clkgen *axi_clkgen,
unsigned int reg1, unsigned int reg2)
{
unsigned int val1, val2;
unsigned int div;
axi_clkgen_mmcm_read(axi_clkgen, reg2, &val2);
if (val2 & MMCM_CLKOUT_NOCOUNT)
return 8;
axi_clkgen_mmcm_read(axi_clkgen, reg1, &val1);
div = (val1 & 0x3f) + ((val1 >> 6) & 0x3f);
div <<= 3;
if (val2 & MMCM_CLK_DIV_DIVIDE) {
if ((val2 & BIT(7)) && (val2 & 0x7000) != 0x1000)
div += 8;
else
div += 16;
div += (val2 >> 12) & 0x7;
}
return div;
}
static unsigned long axi_clkgen_recalc_rate(struct clk_hw *clk_hw,
unsigned long parent_rate)
{
struct axi_clkgen *axi_clkgen = clk_hw_to_axi_clkgen(clk_hw);
unsigned int d, m, dout;
unsigned long long tmp;
unsigned int val;
dout = axi_clkgen_get_div(axi_clkgen, MMCM_REG_CLKOUT0_1,
MMCM_REG_CLKOUT0_2);
m = axi_clkgen_get_div(axi_clkgen, MMCM_REG_CLK_FB1,
MMCM_REG_CLK_FB2);
axi_clkgen_mmcm_read(axi_clkgen, MMCM_REG_CLK_DIV, &val);
if (val & MMCM_CLK_DIV_NOCOUNT)
d = 1;
else
d = (val & 0x3f) + ((val >> 6) & 0x3f);
if (d == 0 || dout == 0)
return 0;
tmp = (unsigned long long)parent_rate * m;
tmp = DIV_ROUND_CLOSEST_ULL(tmp, dout * d);
return min_t(unsigned long long, tmp, ULONG_MAX);
}
static int axi_clkgen_enable(struct clk_hw *clk_hw)
{
struct axi_clkgen *axi_clkgen = clk_hw_to_axi_clkgen(clk_hw);
axi_clkgen_mmcm_enable(axi_clkgen, true);
return 0;
}
static void axi_clkgen_disable(struct clk_hw *clk_hw)
{
struct axi_clkgen *axi_clkgen = clk_hw_to_axi_clkgen(clk_hw);
axi_clkgen_mmcm_enable(axi_clkgen, false);
}
static int axi_clkgen_set_parent(struct clk_hw *clk_hw, u8 index)
{
struct axi_clkgen *axi_clkgen = clk_hw_to_axi_clkgen(clk_hw);
axi_clkgen_write(axi_clkgen, AXI_CLKGEN_V2_REG_CLKSEL, index);
return 0;
}
static u8 axi_clkgen_get_parent(struct clk_hw *clk_hw)
{
struct axi_clkgen *axi_clkgen = clk_hw_to_axi_clkgen(clk_hw);
unsigned int parent;
axi_clkgen_read(axi_clkgen, AXI_CLKGEN_V2_REG_CLKSEL, &parent);
return parent;
}
static const struct clk_ops axi_clkgen_ops = {
.recalc_rate = axi_clkgen_recalc_rate,
.determine_rate = axi_clkgen_determine_rate,
.set_rate = axi_clkgen_set_rate,
.enable = axi_clkgen_enable,
.disable = axi_clkgen_disable,
.set_parent = axi_clkgen_set_parent,
.get_parent = axi_clkgen_get_parent,
};
static int axi_clkgen_probe(struct platform_device *pdev)
{
const struct axi_clkgen_limits *dflt_limits;
struct axi_clkgen *axi_clkgen;
struct clk_init_data init;
const char *parent_names[2];
const char *clk_name;
unsigned int i;
int ret;
dflt_limits = device_get_match_data(&pdev->dev);
if (!dflt_limits)
return -ENODEV;
axi_clkgen = devm_kzalloc(&pdev->dev, sizeof(*axi_clkgen), GFP_KERNEL);
if (!axi_clkgen)
return -ENOMEM;
axi_clkgen->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(axi_clkgen->base))
return PTR_ERR(axi_clkgen->base);
init.num_parents = of_clk_get_parent_count(pdev->dev.of_node);
if (init.num_parents < 1 || init.num_parents > 2)
return -EINVAL;
for (i = 0; i < init.num_parents; i++) {
parent_names[i] = of_clk_get_parent_name(pdev->dev.of_node, i);
if (!parent_names[i])
return -EINVAL;
}
memcpy(&axi_clkgen->limits, dflt_limits, sizeof(axi_clkgen->limits));
clk_name = pdev->dev.of_node->name;
of_property_read_string(pdev->dev.of_node, "clock-output-names",
&clk_name);
init.name = clk_name;
init.ops = &axi_clkgen_ops;
init.flags = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE;
init.parent_names = parent_names;
axi_clkgen_mmcm_enable(axi_clkgen, false);
axi_clkgen->clk_hw.init = &init;
ret = devm_clk_hw_register(&pdev->dev, &axi_clkgen->clk_hw);
if (ret)
return ret;
return devm_of_clk_add_hw_provider(&pdev->dev, of_clk_hw_simple_get,
&axi_clkgen->clk_hw);
}
static const struct of_device_id axi_clkgen_ids[] = {
{
.compatible = "adi,zynqmp-axi-clkgen-2.00.a",
.data = &axi_clkgen_zynqmp_default_limits,
},
{
.compatible = "adi,axi-clkgen-2.00.a",
.data = &axi_clkgen_zynq_default_limits,
},
{ }
};
MODULE_DEVICE_TABLE(of, axi_clkgen_ids);
static struct platform_driver axi_clkgen_driver = {
.driver = {
.name = "adi-axi-clkgen",
.of_match_table = axi_clkgen_ids,
},
.probe = axi_clkgen_probe,
};
module_platform_driver(axi_clkgen_driver);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Lars-Peter Clausen <[email protected]>");
MODULE_DESCRIPTION("Driver for the Analog Devices' AXI clkgen pcore clock generator");
| linux-master | drivers/clk/clk-axi-clkgen.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2010-2011 Canonical Ltd <[email protected]>
* Copyright (C) 2011-2012 Mike Turquette, Linaro Ltd <[email protected]>
*
* Fixed rate clock implementation
*/
#include <linux/clk-provider.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/err.h>
#include <linux/of.h>
#include <linux/platform_device.h>
/*
* DOC: basic fixed-rate clock that cannot gate
*
* Traits of this clock:
* prepare - clk_(un)prepare only ensures parents are prepared
* enable - clk_enable only ensures parents are enabled
* rate - rate is always a fixed value. No clk_set_rate support
* parent - fixed parent. No clk_set_parent support
*/
#define to_clk_fixed_rate(_hw) container_of(_hw, struct clk_fixed_rate, hw)
static unsigned long clk_fixed_rate_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
return to_clk_fixed_rate(hw)->fixed_rate;
}
static unsigned long clk_fixed_rate_recalc_accuracy(struct clk_hw *hw,
unsigned long parent_accuracy)
{
struct clk_fixed_rate *fixed = to_clk_fixed_rate(hw);
if (fixed->flags & CLK_FIXED_RATE_PARENT_ACCURACY)
return parent_accuracy;
return fixed->fixed_accuracy;
}
const struct clk_ops clk_fixed_rate_ops = {
.recalc_rate = clk_fixed_rate_recalc_rate,
.recalc_accuracy = clk_fixed_rate_recalc_accuracy,
};
EXPORT_SYMBOL_GPL(clk_fixed_rate_ops);
static void devm_clk_hw_register_fixed_rate_release(struct device *dev, void *res)
{
struct clk_fixed_rate *fix = res;
/*
* We can not use clk_hw_unregister_fixed_rate, since it will kfree()
* the hw, resulting in double free. Just unregister the hw and let
* devres code kfree() it.
*/
clk_hw_unregister(&fix->hw);
}
struct clk_hw *__clk_hw_register_fixed_rate(struct device *dev,
struct device_node *np, const char *name,
const char *parent_name, const struct clk_hw *parent_hw,
const struct clk_parent_data *parent_data, unsigned long flags,
unsigned long fixed_rate, unsigned long fixed_accuracy,
unsigned long clk_fixed_flags, bool devm)
{
struct clk_fixed_rate *fixed;
struct clk_hw *hw;
struct clk_init_data init = {};
int ret = -EINVAL;
/* allocate fixed-rate clock */
if (devm)
fixed = devres_alloc(devm_clk_hw_register_fixed_rate_release,
sizeof(*fixed), GFP_KERNEL);
else
fixed = kzalloc(sizeof(*fixed), GFP_KERNEL);
if (!fixed)
return ERR_PTR(-ENOMEM);
init.name = name;
init.ops = &clk_fixed_rate_ops;
init.flags = flags;
init.parent_names = parent_name ? &parent_name : NULL;
init.parent_hws = parent_hw ? &parent_hw : NULL;
init.parent_data = parent_data;
if (parent_name || parent_hw || parent_data)
init.num_parents = 1;
else
init.num_parents = 0;
/* struct clk_fixed_rate assignments */
fixed->flags = clk_fixed_flags;
fixed->fixed_rate = fixed_rate;
fixed->fixed_accuracy = fixed_accuracy;
fixed->hw.init = &init;
/* register the clock */
hw = &fixed->hw;
if (dev || !np)
ret = clk_hw_register(dev, hw);
else
ret = of_clk_hw_register(np, hw);
if (ret) {
if (devm)
devres_free(fixed);
else
kfree(fixed);
hw = ERR_PTR(ret);
} else if (devm)
devres_add(dev, fixed);
return hw;
}
EXPORT_SYMBOL_GPL(__clk_hw_register_fixed_rate);
struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
const char *parent_name, unsigned long flags,
unsigned long fixed_rate)
{
struct clk_hw *hw;
hw = clk_hw_register_fixed_rate_with_accuracy(dev, name, parent_name,
flags, fixed_rate, 0);
if (IS_ERR(hw))
return ERR_CAST(hw);
return hw->clk;
}
EXPORT_SYMBOL_GPL(clk_register_fixed_rate);
void clk_unregister_fixed_rate(struct clk *clk)
{
struct clk_hw *hw;
hw = __clk_get_hw(clk);
if (!hw)
return;
clk_unregister(clk);
kfree(to_clk_fixed_rate(hw));
}
EXPORT_SYMBOL_GPL(clk_unregister_fixed_rate);
void clk_hw_unregister_fixed_rate(struct clk_hw *hw)
{
struct clk_fixed_rate *fixed;
fixed = to_clk_fixed_rate(hw);
clk_hw_unregister(hw);
kfree(fixed);
}
EXPORT_SYMBOL_GPL(clk_hw_unregister_fixed_rate);
#ifdef CONFIG_OF
static struct clk_hw *_of_fixed_clk_setup(struct device_node *node)
{
struct clk_hw *hw;
const char *clk_name = node->name;
u32 rate;
u32 accuracy = 0;
int ret;
if (of_property_read_u32(node, "clock-frequency", &rate))
return ERR_PTR(-EIO);
of_property_read_u32(node, "clock-accuracy", &accuracy);
of_property_read_string(node, "clock-output-names", &clk_name);
hw = clk_hw_register_fixed_rate_with_accuracy(NULL, clk_name, NULL,
0, rate, accuracy);
if (IS_ERR(hw))
return hw;
ret = of_clk_add_hw_provider(node, of_clk_hw_simple_get, hw);
if (ret) {
clk_hw_unregister_fixed_rate(hw);
return ERR_PTR(ret);
}
return hw;
}
/**
* of_fixed_clk_setup() - Setup function for simple fixed rate clock
* @node: device node for the clock
*/
void __init of_fixed_clk_setup(struct device_node *node)
{
_of_fixed_clk_setup(node);
}
CLK_OF_DECLARE(fixed_clk, "fixed-clock", of_fixed_clk_setup);
static void of_fixed_clk_remove(struct platform_device *pdev)
{
struct clk_hw *hw = platform_get_drvdata(pdev);
of_clk_del_provider(pdev->dev.of_node);
clk_hw_unregister_fixed_rate(hw);
}
static int of_fixed_clk_probe(struct platform_device *pdev)
{
struct clk_hw *hw;
/*
* This function is not executed when of_fixed_clk_setup
* succeeded.
*/
hw = _of_fixed_clk_setup(pdev->dev.of_node);
if (IS_ERR(hw))
return PTR_ERR(hw);
platform_set_drvdata(pdev, hw);
return 0;
}
static const struct of_device_id of_fixed_clk_ids[] = {
{ .compatible = "fixed-clock" },
{ }
};
static struct platform_driver of_fixed_clk_driver = {
.driver = {
.name = "of_fixed_clk",
.of_match_table = of_fixed_clk_ids,
},
.probe = of_fixed_clk_probe,
.remove_new = of_fixed_clk_remove,
};
builtin_platform_driver(of_fixed_clk_driver);
#endif
| linux-master | drivers/clk/clk-fixed-rate.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* drivers/clk/clkdev.c
*
* Copyright (C) 2008 Russell King.
*
* Helper for the clk API to assist looking up a struct clk.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/list.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/string.h>
#include <linux/mutex.h>
#include <linux/clk.h>
#include <linux/clkdev.h>
#include <linux/clk-provider.h>
#include <linux/of.h>
#include "clk.h"
static LIST_HEAD(clocks);
static DEFINE_MUTEX(clocks_mutex);
/*
* Find the correct struct clk for the device and connection ID.
* We do slightly fuzzy matching here:
* An entry with a NULL ID is assumed to be a wildcard.
* If an entry has a device ID, it must match
* If an entry has a connection ID, it must match
* Then we take the most specific entry - with the following
* order of precedence: dev+con > dev only > con only.
*/
static struct clk_lookup *clk_find(const char *dev_id, const char *con_id)
{
struct clk_lookup *p, *cl = NULL;
int match, best_found = 0, best_possible = 0;
if (dev_id)
best_possible += 2;
if (con_id)
best_possible += 1;
lockdep_assert_held(&clocks_mutex);
list_for_each_entry(p, &clocks, node) {
match = 0;
if (p->dev_id) {
if (!dev_id || strcmp(p->dev_id, dev_id))
continue;
match += 2;
}
if (p->con_id) {
if (!con_id || strcmp(p->con_id, con_id))
continue;
match += 1;
}
if (match > best_found) {
cl = p;
if (match != best_possible)
best_found = match;
else
break;
}
}
return cl;
}
struct clk_hw *clk_find_hw(const char *dev_id, const char *con_id)
{
struct clk_lookup *cl;
struct clk_hw *hw = ERR_PTR(-ENOENT);
mutex_lock(&clocks_mutex);
cl = clk_find(dev_id, con_id);
if (cl)
hw = cl->clk_hw;
mutex_unlock(&clocks_mutex);
return hw;
}
static struct clk *__clk_get_sys(struct device *dev, const char *dev_id,
const char *con_id)
{
struct clk_hw *hw = clk_find_hw(dev_id, con_id);
return clk_hw_create_clk(dev, hw, dev_id, con_id);
}
struct clk *clk_get_sys(const char *dev_id, const char *con_id)
{
return __clk_get_sys(NULL, dev_id, con_id);
}
EXPORT_SYMBOL(clk_get_sys);
struct clk *clk_get(struct device *dev, const char *con_id)
{
const char *dev_id = dev ? dev_name(dev) : NULL;
struct clk_hw *hw;
if (dev && dev->of_node) {
hw = of_clk_get_hw(dev->of_node, 0, con_id);
if (!IS_ERR(hw) || PTR_ERR(hw) == -EPROBE_DEFER)
return clk_hw_create_clk(dev, hw, dev_id, con_id);
}
return __clk_get_sys(dev, dev_id, con_id);
}
EXPORT_SYMBOL(clk_get);
void clk_put(struct clk *clk)
{
__clk_put(clk);
}
EXPORT_SYMBOL(clk_put);
static void __clkdev_add(struct clk_lookup *cl)
{
mutex_lock(&clocks_mutex);
list_add_tail(&cl->node, &clocks);
mutex_unlock(&clocks_mutex);
}
void clkdev_add(struct clk_lookup *cl)
{
if (!cl->clk_hw)
cl->clk_hw = __clk_get_hw(cl->clk);
__clkdev_add(cl);
}
EXPORT_SYMBOL(clkdev_add);
void clkdev_add_table(struct clk_lookup *cl, size_t num)
{
mutex_lock(&clocks_mutex);
while (num--) {
cl->clk_hw = __clk_get_hw(cl->clk);
list_add_tail(&cl->node, &clocks);
cl++;
}
mutex_unlock(&clocks_mutex);
}
#define MAX_DEV_ID 20
#define MAX_CON_ID 16
struct clk_lookup_alloc {
struct clk_lookup cl;
char dev_id[MAX_DEV_ID];
char con_id[MAX_CON_ID];
};
static struct clk_lookup * __ref
vclkdev_alloc(struct clk_hw *hw, const char *con_id, const char *dev_fmt,
va_list ap)
{
struct clk_lookup_alloc *cla;
cla = kzalloc(sizeof(*cla), GFP_KERNEL);
if (!cla)
return NULL;
cla->cl.clk_hw = hw;
if (con_id) {
strscpy(cla->con_id, con_id, sizeof(cla->con_id));
cla->cl.con_id = cla->con_id;
}
if (dev_fmt) {
vscnprintf(cla->dev_id, sizeof(cla->dev_id), dev_fmt, ap);
cla->cl.dev_id = cla->dev_id;
}
return &cla->cl;
}
static struct clk_lookup *
vclkdev_create(struct clk_hw *hw, const char *con_id, const char *dev_fmt,
va_list ap)
{
struct clk_lookup *cl;
cl = vclkdev_alloc(hw, con_id, dev_fmt, ap);
if (cl)
__clkdev_add(cl);
return cl;
}
/**
* clkdev_create - allocate and add a clkdev lookup structure
* @clk: struct clk to associate with all clk_lookups
* @con_id: connection ID string on device
* @dev_fmt: format string describing device name
*
* Returns a clk_lookup structure, which can be later unregistered and
* freed.
*/
struct clk_lookup *clkdev_create(struct clk *clk, const char *con_id,
const char *dev_fmt, ...)
{
struct clk_lookup *cl;
va_list ap;
va_start(ap, dev_fmt);
cl = vclkdev_create(__clk_get_hw(clk), con_id, dev_fmt, ap);
va_end(ap);
return cl;
}
EXPORT_SYMBOL_GPL(clkdev_create);
/**
* clkdev_hw_create - allocate and add a clkdev lookup structure
* @hw: struct clk_hw to associate with all clk_lookups
* @con_id: connection ID string on device
* @dev_fmt: format string describing device name
*
* Returns a clk_lookup structure, which can be later unregistered and
* freed.
*/
struct clk_lookup *clkdev_hw_create(struct clk_hw *hw, const char *con_id,
const char *dev_fmt, ...)
{
struct clk_lookup *cl;
va_list ap;
va_start(ap, dev_fmt);
cl = vclkdev_create(hw, con_id, dev_fmt, ap);
va_end(ap);
return cl;
}
EXPORT_SYMBOL_GPL(clkdev_hw_create);
int clk_add_alias(const char *alias, const char *alias_dev_name,
const char *con_id, struct device *dev)
{
struct clk *r = clk_get(dev, con_id);
struct clk_lookup *l;
if (IS_ERR(r))
return PTR_ERR(r);
l = clkdev_create(r, alias, alias_dev_name ? "%s" : NULL,
alias_dev_name);
clk_put(r);
return l ? 0 : -ENODEV;
}
EXPORT_SYMBOL(clk_add_alias);
/*
* clkdev_drop - remove a clock dynamically allocated
*/
void clkdev_drop(struct clk_lookup *cl)
{
mutex_lock(&clocks_mutex);
list_del(&cl->node);
mutex_unlock(&clocks_mutex);
kfree(cl);
}
EXPORT_SYMBOL(clkdev_drop);
static struct clk_lookup *__clk_register_clkdev(struct clk_hw *hw,
const char *con_id,
const char *dev_id, ...)
{
struct clk_lookup *cl;
va_list ap;
va_start(ap, dev_id);
cl = vclkdev_create(hw, con_id, dev_id, ap);
va_end(ap);
return cl;
}
static int do_clk_register_clkdev(struct clk_hw *hw,
struct clk_lookup **cl, const char *con_id, const char *dev_id)
{
if (IS_ERR(hw))
return PTR_ERR(hw);
/*
* Since dev_id can be NULL, and NULL is handled specially, we must
* pass it as either a NULL format string, or with "%s".
*/
if (dev_id)
*cl = __clk_register_clkdev(hw, con_id, "%s", dev_id);
else
*cl = __clk_register_clkdev(hw, con_id, NULL);
return *cl ? 0 : -ENOMEM;
}
/**
* clk_register_clkdev - register one clock lookup for a struct clk
* @clk: struct clk to associate with all clk_lookups
* @con_id: connection ID string on device
* @dev_id: string describing device name
*
* con_id or dev_id may be NULL as a wildcard, just as in the rest of
* clkdev.
*
* To make things easier for mass registration, we detect error clks
* from a previous clk_register() call, and return the error code for
* those. This is to permit this function to be called immediately
* after clk_register().
*/
int clk_register_clkdev(struct clk *clk, const char *con_id,
const char *dev_id)
{
struct clk_lookup *cl;
if (IS_ERR(clk))
return PTR_ERR(clk);
return do_clk_register_clkdev(__clk_get_hw(clk), &cl, con_id,
dev_id);
}
EXPORT_SYMBOL(clk_register_clkdev);
/**
* clk_hw_register_clkdev - register one clock lookup for a struct clk_hw
* @hw: struct clk_hw to associate with all clk_lookups
* @con_id: connection ID string on device
* @dev_id: format string describing device name
*
* con_id or dev_id may be NULL as a wildcard, just as in the rest of
* clkdev.
*
* To make things easier for mass registration, we detect error clk_hws
* from a previous clk_hw_register_*() call, and return the error code for
* those. This is to permit this function to be called immediately
* after clk_hw_register_*().
*/
int clk_hw_register_clkdev(struct clk_hw *hw, const char *con_id,
const char *dev_id)
{
struct clk_lookup *cl;
return do_clk_register_clkdev(hw, &cl, con_id, dev_id);
}
EXPORT_SYMBOL(clk_hw_register_clkdev);
static void devm_clkdev_release(void *res)
{
clkdev_drop(res);
}
/**
* devm_clk_hw_register_clkdev - managed clk lookup registration for clk_hw
* @dev: device this lookup is bound
* @hw: struct clk_hw to associate with all clk_lookups
* @con_id: connection ID string on device
* @dev_id: format string describing device name
*
* con_id or dev_id may be NULL as a wildcard, just as in the rest of
* clkdev.
*
* To make things easier for mass registration, we detect error clk_hws
* from a previous clk_hw_register_*() call, and return the error code for
* those. This is to permit this function to be called immediately
* after clk_hw_register_*().
*/
int devm_clk_hw_register_clkdev(struct device *dev, struct clk_hw *hw,
const char *con_id, const char *dev_id)
{
struct clk_lookup *cl;
int rval;
rval = do_clk_register_clkdev(hw, &cl, con_id, dev_id);
if (rval)
return rval;
return devm_add_action_or_reset(dev, devm_clkdev_release, cl);
}
EXPORT_SYMBOL(devm_clk_hw_register_clkdev);
| linux-master | drivers/clk/clkdev.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2018 ROHM Semiconductors
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/err.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/mfd/rohm-generic.h>
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
#include <linux/regmap.h>
/* clk control registers */
/* BD71815 */
#define BD71815_REG_OUT32K 0x1d
/* BD71828 */
#define BD71828_REG_OUT32K 0x4B
/* BD71837 and BD71847 */
#define BD718XX_REG_OUT32K 0x2E
/*
* BD71837, BD71847, and BD71828 all use bit [0] to clk output control
*/
#define CLK_OUT_EN_MASK BIT(0)
struct bd718xx_clk {
struct clk_hw hw;
u8 reg;
u8 mask;
struct platform_device *pdev;
struct regmap *regmap;
};
static int bd71837_clk_set(struct bd718xx_clk *c, unsigned int status)
{
return regmap_update_bits(c->regmap, c->reg, c->mask, status);
}
static void bd71837_clk_disable(struct clk_hw *hw)
{
int rv;
struct bd718xx_clk *c = container_of(hw, struct bd718xx_clk, hw);
rv = bd71837_clk_set(c, 0);
if (rv)
dev_dbg(&c->pdev->dev, "Failed to disable 32K clk (%d)\n", rv);
}
static int bd71837_clk_enable(struct clk_hw *hw)
{
struct bd718xx_clk *c = container_of(hw, struct bd718xx_clk, hw);
return bd71837_clk_set(c, 0xffffffff);
}
static int bd71837_clk_is_enabled(struct clk_hw *hw)
{
int enabled;
int rval;
struct bd718xx_clk *c = container_of(hw, struct bd718xx_clk, hw);
rval = regmap_read(c->regmap, c->reg, &enabled);
if (rval)
return rval;
return enabled & c->mask;
}
static const struct clk_ops bd71837_clk_ops = {
.prepare = &bd71837_clk_enable,
.unprepare = &bd71837_clk_disable,
.is_prepared = &bd71837_clk_is_enabled,
};
static int bd71837_clk_probe(struct platform_device *pdev)
{
struct bd718xx_clk *c;
int rval = -ENOMEM;
const char *parent_clk;
struct device *parent = pdev->dev.parent;
struct clk_init_data init = {
.name = "bd718xx-32k-out",
.ops = &bd71837_clk_ops,
};
enum rohm_chip_type chip = platform_get_device_id(pdev)->driver_data;
c = devm_kzalloc(&pdev->dev, sizeof(*c), GFP_KERNEL);
if (!c)
return -ENOMEM;
c->regmap = dev_get_regmap(pdev->dev.parent, NULL);
if (!c->regmap)
return -ENODEV;
init.num_parents = 1;
parent_clk = of_clk_get_parent_name(parent->of_node, 0);
init.parent_names = &parent_clk;
if (!parent_clk) {
dev_err(&pdev->dev, "No parent clk found\n");
return -EINVAL;
}
switch (chip) {
case ROHM_CHIP_TYPE_BD71837:
case ROHM_CHIP_TYPE_BD71847:
c->reg = BD718XX_REG_OUT32K;
c->mask = CLK_OUT_EN_MASK;
break;
case ROHM_CHIP_TYPE_BD71828:
c->reg = BD71828_REG_OUT32K;
c->mask = CLK_OUT_EN_MASK;
break;
case ROHM_CHIP_TYPE_BD71815:
c->reg = BD71815_REG_OUT32K;
c->mask = CLK_OUT_EN_MASK;
break;
default:
dev_err(&pdev->dev, "Unknown clk chip\n");
return -EINVAL;
}
c->pdev = pdev;
c->hw.init = &init;
of_property_read_string_index(parent->of_node,
"clock-output-names", 0, &init.name);
rval = devm_clk_hw_register(&pdev->dev, &c->hw);
if (rval) {
dev_err(&pdev->dev, "failed to register 32K clk");
return rval;
}
rval = devm_of_clk_add_hw_provider(&pdev->dev, of_clk_hw_simple_get,
&c->hw);
if (rval)
dev_err(&pdev->dev, "adding clk provider failed\n");
return rval;
}
static const struct platform_device_id bd718x7_clk_id[] = {
{ "bd71837-clk", ROHM_CHIP_TYPE_BD71837 },
{ "bd71847-clk", ROHM_CHIP_TYPE_BD71847 },
{ "bd71828-clk", ROHM_CHIP_TYPE_BD71828 },
{ "bd71815-clk", ROHM_CHIP_TYPE_BD71815 },
{ },
};
MODULE_DEVICE_TABLE(platform, bd718x7_clk_id);
static struct platform_driver bd71837_clk = {
.driver = {
.name = "bd718xx-clk",
},
.probe = bd71837_clk_probe,
.id_table = bd718x7_clk_id,
};
module_platform_driver(bd71837_clk);
MODULE_AUTHOR("Matti Vaittinen <[email protected]>");
MODULE_DESCRIPTION("BD718(15/18/28/37/47/50) and chip clk driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:bd718xx-clk");
| linux-master | drivers/clk/clk-bd718x7.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Driver for Silicon Labs Si514 Programmable Oscillator
*
* Copyright (C) 2015 Topic Embedded Products
*
* Author: Mike Looijmans <[email protected]>
*/
#include <linux/clk-provider.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/i2c.h>
#include <linux/regmap.h>
#include <linux/slab.h>
/* I2C registers */
#define SI514_REG_LP 0
#define SI514_REG_M_FRAC1 5
#define SI514_REG_M_FRAC2 6
#define SI514_REG_M_FRAC3 7
#define SI514_REG_M_INT_FRAC 8
#define SI514_REG_M_INT 9
#define SI514_REG_HS_DIV 10
#define SI514_REG_LS_HS_DIV 11
#define SI514_REG_OE_STATE 14
#define SI514_REG_RESET 128
#define SI514_REG_CONTROL 132
/* Register values */
#define SI514_RESET_RST BIT(7)
#define SI514_CONTROL_FCAL BIT(0)
#define SI514_CONTROL_OE BIT(2)
#define SI514_MIN_FREQ 100000U
#define SI514_MAX_FREQ 250000000U
#define FXO 31980000U
#define FVCO_MIN 2080000000U
#define FVCO_MAX 2500000000U
#define HS_DIV_MAX 1022
struct clk_si514 {
struct clk_hw hw;
struct regmap *regmap;
struct i2c_client *i2c_client;
};
#define to_clk_si514(_hw) container_of(_hw, struct clk_si514, hw)
/* Multiplier/divider settings */
struct clk_si514_muldiv {
u32 m_frac; /* 29-bit Fractional part of multiplier M */
u8 m_int; /* Integer part of multiplier M, 65..78 */
u8 ls_div_bits; /* 2nd divider, as 2^x */
u16 hs_div; /* 1st divider, must be even and 10<=x<=1022 */
};
/* Enables or disables the output driver */
static int si514_enable_output(struct clk_si514 *data, bool enable)
{
return regmap_update_bits(data->regmap, SI514_REG_CONTROL,
SI514_CONTROL_OE, enable ? SI514_CONTROL_OE : 0);
}
static int si514_prepare(struct clk_hw *hw)
{
struct clk_si514 *data = to_clk_si514(hw);
return si514_enable_output(data, true);
}
static void si514_unprepare(struct clk_hw *hw)
{
struct clk_si514 *data = to_clk_si514(hw);
si514_enable_output(data, false);
}
static int si514_is_prepared(struct clk_hw *hw)
{
struct clk_si514 *data = to_clk_si514(hw);
unsigned int val;
int err;
err = regmap_read(data->regmap, SI514_REG_CONTROL, &val);
if (err < 0)
return err;
return !!(val & SI514_CONTROL_OE);
}
/* Retrieve clock multiplier and dividers from hardware */
static int si514_get_muldiv(struct clk_si514 *data,
struct clk_si514_muldiv *settings)
{
int err;
u8 reg[7];
err = regmap_bulk_read(data->regmap, SI514_REG_M_FRAC1,
reg, ARRAY_SIZE(reg));
if (err)
return err;
settings->m_frac = reg[0] | reg[1] << 8 | reg[2] << 16 |
(reg[3] & 0x1F) << 24;
settings->m_int = (reg[4] & 0x3f) << 3 | reg[3] >> 5;
settings->ls_div_bits = (reg[6] >> 4) & 0x07;
settings->hs_div = (reg[6] & 0x03) << 8 | reg[5];
return 0;
}
static int si514_set_muldiv(struct clk_si514 *data,
struct clk_si514_muldiv *settings)
{
u8 lp;
u8 reg[7];
int err;
/* Calculate LP1/LP2 according to table 13 in the datasheet */
/* 65.259980246 */
if (settings->m_int < 65 ||
(settings->m_int == 65 && settings->m_frac <= 139575831))
lp = 0x22;
/* 67.859763463 */
else if (settings->m_int < 67 ||
(settings->m_int == 67 && settings->m_frac <= 461581994))
lp = 0x23;
/* 72.937624981 */
else if (settings->m_int < 72 ||
(settings->m_int == 72 && settings->m_frac <= 503383578))
lp = 0x33;
/* 75.843265046 */
else if (settings->m_int < 75 ||
(settings->m_int == 75 && settings->m_frac <= 452724474))
lp = 0x34;
else
lp = 0x44;
err = regmap_write(data->regmap, SI514_REG_LP, lp);
if (err < 0)
return err;
reg[0] = settings->m_frac;
reg[1] = settings->m_frac >> 8;
reg[2] = settings->m_frac >> 16;
reg[3] = settings->m_frac >> 24 | settings->m_int << 5;
reg[4] = settings->m_int >> 3;
reg[5] = settings->hs_div;
reg[6] = (settings->hs_div >> 8) | (settings->ls_div_bits << 4);
err = regmap_bulk_write(data->regmap, SI514_REG_HS_DIV, reg + 5, 2);
if (err < 0)
return err;
/*
* Writing to SI514_REG_M_INT_FRAC triggers the clock change, so that
* must be written last
*/
return regmap_bulk_write(data->regmap, SI514_REG_M_FRAC1, reg, 5);
}
/* Calculate divider settings for a given frequency */
static int si514_calc_muldiv(struct clk_si514_muldiv *settings,
unsigned long frequency)
{
u64 m;
u32 ls_freq;
u32 tmp;
u8 res;
if ((frequency < SI514_MIN_FREQ) || (frequency > SI514_MAX_FREQ))
return -EINVAL;
/* Determine the minimum value of LS_DIV and resulting target freq. */
ls_freq = frequency;
if (frequency >= (FVCO_MIN / HS_DIV_MAX))
settings->ls_div_bits = 0;
else {
res = 1;
tmp = 2 * HS_DIV_MAX;
while (tmp <= (HS_DIV_MAX * 32)) {
if ((frequency * tmp) >= FVCO_MIN)
break;
++res;
tmp <<= 1;
}
settings->ls_div_bits = res;
ls_freq = frequency << res;
}
/* Determine minimum HS_DIV, round up to even number */
settings->hs_div = DIV_ROUND_UP(FVCO_MIN >> 1, ls_freq) << 1;
/* M = LS_DIV x HS_DIV x frequency / F_XO (in fixed-point) */
m = ((u64)(ls_freq * settings->hs_div) << 29) + (FXO / 2);
do_div(m, FXO);
settings->m_frac = (u32)m & (BIT(29) - 1);
settings->m_int = (u32)(m >> 29);
return 0;
}
/* Calculate resulting frequency given the register settings */
static unsigned long si514_calc_rate(struct clk_si514_muldiv *settings)
{
u64 m = settings->m_frac | ((u64)settings->m_int << 29);
u32 d = settings->hs_div * BIT(settings->ls_div_bits);
return ((u32)(((m * FXO) + (FXO / 2)) >> 29)) / d;
}
static unsigned long si514_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct clk_si514 *data = to_clk_si514(hw);
struct clk_si514_muldiv settings;
int err;
err = si514_get_muldiv(data, &settings);
if (err) {
dev_err(&data->i2c_client->dev, "unable to retrieve settings\n");
return 0;
}
return si514_calc_rate(&settings);
}
static long si514_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *parent_rate)
{
struct clk_si514_muldiv settings;
int err;
if (!rate)
return 0;
err = si514_calc_muldiv(&settings, rate);
if (err)
return err;
return si514_calc_rate(&settings);
}
/*
* Update output frequency for big frequency changes (> 1000 ppm).
* The chip supports <1000ppm changes "on the fly", we haven't implemented
* that here.
*/
static int si514_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct clk_si514 *data = to_clk_si514(hw);
struct clk_si514_muldiv settings;
unsigned int old_oe_state;
int err;
err = si514_calc_muldiv(&settings, rate);
if (err)
return err;
err = regmap_read(data->regmap, SI514_REG_CONTROL, &old_oe_state);
if (err)
return err;
si514_enable_output(data, false);
err = si514_set_muldiv(data, &settings);
if (err < 0)
return err; /* Undefined state now, best to leave disabled */
/* Trigger calibration */
err = regmap_write(data->regmap, SI514_REG_CONTROL, SI514_CONTROL_FCAL);
if (err < 0)
return err;
/* Applying a new frequency can take up to 10ms */
usleep_range(10000, 12000);
if (old_oe_state & SI514_CONTROL_OE)
si514_enable_output(data, true);
return err;
}
static const struct clk_ops si514_clk_ops = {
.prepare = si514_prepare,
.unprepare = si514_unprepare,
.is_prepared = si514_is_prepared,
.recalc_rate = si514_recalc_rate,
.round_rate = si514_round_rate,
.set_rate = si514_set_rate,
};
static bool si514_regmap_is_volatile(struct device *dev, unsigned int reg)
{
switch (reg) {
case SI514_REG_CONTROL:
case SI514_REG_RESET:
return true;
default:
return false;
}
}
static bool si514_regmap_is_writeable(struct device *dev, unsigned int reg)
{
switch (reg) {
case SI514_REG_LP:
case SI514_REG_M_FRAC1 ... SI514_REG_LS_HS_DIV:
case SI514_REG_OE_STATE:
case SI514_REG_RESET:
case SI514_REG_CONTROL:
return true;
default:
return false;
}
}
static const struct regmap_config si514_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.cache_type = REGCACHE_RBTREE,
.max_register = SI514_REG_CONTROL,
.writeable_reg = si514_regmap_is_writeable,
.volatile_reg = si514_regmap_is_volatile,
};
static int si514_probe(struct i2c_client *client)
{
struct clk_si514 *data;
struct clk_init_data init;
int err;
data = devm_kzalloc(&client->dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
init.ops = &si514_clk_ops;
init.flags = 0;
init.num_parents = 0;
data->hw.init = &init;
data->i2c_client = client;
if (of_property_read_string(client->dev.of_node, "clock-output-names",
&init.name))
init.name = client->dev.of_node->name;
data->regmap = devm_regmap_init_i2c(client, &si514_regmap_config);
if (IS_ERR(data->regmap)) {
dev_err(&client->dev, "failed to allocate register map\n");
return PTR_ERR(data->regmap);
}
i2c_set_clientdata(client, data);
err = devm_clk_hw_register(&client->dev, &data->hw);
if (err) {
dev_err(&client->dev, "clock registration failed\n");
return err;
}
err = devm_of_clk_add_hw_provider(&client->dev, of_clk_hw_simple_get,
&data->hw);
if (err) {
dev_err(&client->dev, "unable to add clk provider\n");
return err;
}
return 0;
}
static const struct i2c_device_id si514_id[] = {
{ "si514", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, si514_id);
static const struct of_device_id clk_si514_of_match[] = {
{ .compatible = "silabs,si514" },
{ },
};
MODULE_DEVICE_TABLE(of, clk_si514_of_match);
static struct i2c_driver si514_driver = {
.driver = {
.name = "si514",
.of_match_table = clk_si514_of_match,
},
.probe = si514_probe,
.id_table = si514_id,
};
module_i2c_driver(si514_driver);
MODULE_AUTHOR("Mike Looijmans <[email protected]>");
MODULE_DESCRIPTION("Si514 driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/clk/clk-si514.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2013 - 2014 Texas Instruments Incorporated - https://www.ti.com
*
* Authors:
* Jyri Sarha <[email protected]>
* Sergej Sawazki <[email protected]>
*
* Gpio controlled clock implementation
*/
#include <linux/clk-provider.h>
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/gpio/consumer.h>
#include <linux/err.h>
#include <linux/device.h>
#include <linux/of.h>
#include <linux/platform_device.h>
/**
* DOC: basic gpio gated clock which can be enabled and disabled
* with gpio output
* Traits of this clock:
* prepare - clk_(un)prepare only ensures parent is (un)prepared
* enable - clk_enable and clk_disable are functional & control gpio
* rate - inherits rate from parent. No clk_set_rate support
* parent - fixed parent. No clk_set_parent support
*/
/**
* struct clk_gpio - gpio gated clock
*
* @hw: handle between common and hardware-specific interfaces
* @gpiod: gpio descriptor
*
* Clock with a gpio control for enabling and disabling the parent clock
* or switching between two parents by asserting or deasserting the gpio.
*
* Implements .enable, .disable and .is_enabled or
* .get_parent, .set_parent and .determine_rate depending on which clk_ops
* is used.
*/
struct clk_gpio {
struct clk_hw hw;
struct gpio_desc *gpiod;
};
#define to_clk_gpio(_hw) container_of(_hw, struct clk_gpio, hw)
static int clk_gpio_gate_enable(struct clk_hw *hw)
{
struct clk_gpio *clk = to_clk_gpio(hw);
gpiod_set_value(clk->gpiod, 1);
return 0;
}
static void clk_gpio_gate_disable(struct clk_hw *hw)
{
struct clk_gpio *clk = to_clk_gpio(hw);
gpiod_set_value(clk->gpiod, 0);
}
static int clk_gpio_gate_is_enabled(struct clk_hw *hw)
{
struct clk_gpio *clk = to_clk_gpio(hw);
return gpiod_get_value(clk->gpiod);
}
static const struct clk_ops clk_gpio_gate_ops = {
.enable = clk_gpio_gate_enable,
.disable = clk_gpio_gate_disable,
.is_enabled = clk_gpio_gate_is_enabled,
};
static int clk_sleeping_gpio_gate_prepare(struct clk_hw *hw)
{
struct clk_gpio *clk = to_clk_gpio(hw);
gpiod_set_value_cansleep(clk->gpiod, 1);
return 0;
}
static void clk_sleeping_gpio_gate_unprepare(struct clk_hw *hw)
{
struct clk_gpio *clk = to_clk_gpio(hw);
gpiod_set_value_cansleep(clk->gpiod, 0);
}
static int clk_sleeping_gpio_gate_is_prepared(struct clk_hw *hw)
{
struct clk_gpio *clk = to_clk_gpio(hw);
return gpiod_get_value_cansleep(clk->gpiod);
}
static const struct clk_ops clk_sleeping_gpio_gate_ops = {
.prepare = clk_sleeping_gpio_gate_prepare,
.unprepare = clk_sleeping_gpio_gate_unprepare,
.is_prepared = clk_sleeping_gpio_gate_is_prepared,
};
/**
* DOC: basic clock multiplexer which can be controlled with a gpio output
* Traits of this clock:
* prepare - clk_prepare only ensures that parents are prepared
* rate - rate is only affected by parent switching. No clk_set_rate support
* parent - parent is adjustable through clk_set_parent
*/
static u8 clk_gpio_mux_get_parent(struct clk_hw *hw)
{
struct clk_gpio *clk = to_clk_gpio(hw);
return gpiod_get_value_cansleep(clk->gpiod);
}
static int clk_gpio_mux_set_parent(struct clk_hw *hw, u8 index)
{
struct clk_gpio *clk = to_clk_gpio(hw);
gpiod_set_value_cansleep(clk->gpiod, index);
return 0;
}
static const struct clk_ops clk_gpio_mux_ops = {
.get_parent = clk_gpio_mux_get_parent,
.set_parent = clk_gpio_mux_set_parent,
.determine_rate = __clk_mux_determine_rate,
};
static struct clk_hw *clk_register_gpio(struct device *dev, u8 num_parents,
struct gpio_desc *gpiod,
const struct clk_ops *clk_gpio_ops)
{
struct clk_gpio *clk_gpio;
struct clk_hw *hw;
struct clk_init_data init = {};
int err;
const struct clk_parent_data gpio_parent_data[] = {
{ .index = 0 },
{ .index = 1 },
};
clk_gpio = devm_kzalloc(dev, sizeof(*clk_gpio), GFP_KERNEL);
if (!clk_gpio)
return ERR_PTR(-ENOMEM);
init.name = dev->of_node->name;
init.ops = clk_gpio_ops;
init.parent_data = gpio_parent_data;
init.num_parents = num_parents;
init.flags = CLK_SET_RATE_PARENT;
clk_gpio->gpiod = gpiod;
clk_gpio->hw.init = &init;
hw = &clk_gpio->hw;
err = devm_clk_hw_register(dev, hw);
if (err)
return ERR_PTR(err);
return hw;
}
static struct clk_hw *clk_hw_register_gpio_gate(struct device *dev,
int num_parents,
struct gpio_desc *gpiod)
{
const struct clk_ops *ops;
if (gpiod_cansleep(gpiod))
ops = &clk_sleeping_gpio_gate_ops;
else
ops = &clk_gpio_gate_ops;
return clk_register_gpio(dev, num_parents, gpiod, ops);
}
static struct clk_hw *clk_hw_register_gpio_mux(struct device *dev,
struct gpio_desc *gpiod)
{
return clk_register_gpio(dev, 2, gpiod, &clk_gpio_mux_ops);
}
static int gpio_clk_driver_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *node = dev->of_node;
const char *gpio_name;
unsigned int num_parents;
struct gpio_desc *gpiod;
struct clk_hw *hw;
bool is_mux;
int ret;
is_mux = of_device_is_compatible(node, "gpio-mux-clock");
num_parents = of_clk_get_parent_count(node);
if (is_mux && num_parents != 2) {
dev_err(dev, "mux-clock must have 2 parents\n");
return -EINVAL;
}
gpio_name = is_mux ? "select" : "enable";
gpiod = devm_gpiod_get(dev, gpio_name, GPIOD_OUT_LOW);
if (IS_ERR(gpiod)) {
ret = PTR_ERR(gpiod);
if (ret == -EPROBE_DEFER)
pr_debug("%pOFn: %s: GPIOs not yet available, retry later\n",
node, __func__);
else
pr_err("%pOFn: %s: Can't get '%s' named GPIO property\n",
node, __func__,
gpio_name);
return ret;
}
if (is_mux)
hw = clk_hw_register_gpio_mux(dev, gpiod);
else
hw = clk_hw_register_gpio_gate(dev, num_parents, gpiod);
if (IS_ERR(hw))
return PTR_ERR(hw);
return devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, hw);
}
static const struct of_device_id gpio_clk_match_table[] = {
{ .compatible = "gpio-mux-clock" },
{ .compatible = "gpio-gate-clock" },
{ }
};
static struct platform_driver gpio_clk_driver = {
.probe = gpio_clk_driver_probe,
.driver = {
.name = "gpio-clk",
.of_match_table = gpio_clk_match_table,
},
};
builtin_platform_driver(gpio_clk_driver);
| linux-master | drivers/clk/clk-gpio.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Kunit test for clk gate basic type
*/
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/platform_device.h>
#include <kunit/test.h>
static void clk_gate_register_test_dev(struct kunit *test)
{
struct clk_hw *ret;
struct platform_device *pdev;
pdev = platform_device_register_simple("test_gate_device", -1, NULL, 0);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, pdev);
ret = clk_hw_register_gate(&pdev->dev, "test_gate", NULL, 0, NULL,
0, 0, NULL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ret);
KUNIT_EXPECT_STREQ(test, "test_gate", clk_hw_get_name(ret));
KUNIT_EXPECT_EQ(test, 0UL, clk_hw_get_flags(ret));
clk_hw_unregister_gate(ret);
platform_device_put(pdev);
}
static void clk_gate_register_test_parent_names(struct kunit *test)
{
struct clk_hw *parent;
struct clk_hw *ret;
parent = clk_hw_register_fixed_rate(NULL, "test_parent", NULL, 0,
1000000);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
ret = clk_hw_register_gate(NULL, "test_gate", "test_parent", 0, NULL,
0, 0, NULL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ret);
KUNIT_EXPECT_PTR_EQ(test, parent, clk_hw_get_parent(ret));
clk_hw_unregister_gate(ret);
clk_hw_unregister_fixed_rate(parent);
}
static void clk_gate_register_test_parent_data(struct kunit *test)
{
struct clk_hw *parent;
struct clk_hw *ret;
struct clk_parent_data pdata = { };
parent = clk_hw_register_fixed_rate(NULL, "test_parent", NULL, 0,
1000000);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
pdata.hw = parent;
ret = clk_hw_register_gate_parent_data(NULL, "test_gate", &pdata, 0,
NULL, 0, 0, NULL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ret);
KUNIT_EXPECT_PTR_EQ(test, parent, clk_hw_get_parent(ret));
clk_hw_unregister_gate(ret);
clk_hw_unregister_fixed_rate(parent);
}
static void clk_gate_register_test_parent_data_legacy(struct kunit *test)
{
struct clk_hw *parent;
struct clk_hw *ret;
struct clk_parent_data pdata = { };
parent = clk_hw_register_fixed_rate(NULL, "test_parent", NULL, 0,
1000000);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
pdata.name = "test_parent";
ret = clk_hw_register_gate_parent_data(NULL, "test_gate", &pdata, 0,
NULL, 0, 0, NULL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ret);
KUNIT_EXPECT_PTR_EQ(test, parent, clk_hw_get_parent(ret));
clk_hw_unregister_gate(ret);
clk_hw_unregister_fixed_rate(parent);
}
static void clk_gate_register_test_parent_hw(struct kunit *test)
{
struct clk_hw *parent;
struct clk_hw *ret;
parent = clk_hw_register_fixed_rate(NULL, "test_parent", NULL, 0,
1000000);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
ret = clk_hw_register_gate_parent_hw(NULL, "test_gate", parent, 0, NULL,
0, 0, NULL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ret);
KUNIT_EXPECT_PTR_EQ(test, parent, clk_hw_get_parent(ret));
clk_hw_unregister_gate(ret);
clk_hw_unregister_fixed_rate(parent);
}
static void clk_gate_register_test_hiword_invalid(struct kunit *test)
{
struct clk_hw *ret;
ret = clk_hw_register_gate(NULL, "test_gate", NULL, 0, NULL,
20, CLK_GATE_HIWORD_MASK, NULL);
KUNIT_EXPECT_TRUE(test, IS_ERR(ret));
}
static struct kunit_case clk_gate_register_test_cases[] = {
KUNIT_CASE(clk_gate_register_test_dev),
KUNIT_CASE(clk_gate_register_test_parent_names),
KUNIT_CASE(clk_gate_register_test_parent_data),
KUNIT_CASE(clk_gate_register_test_parent_data_legacy),
KUNIT_CASE(clk_gate_register_test_parent_hw),
KUNIT_CASE(clk_gate_register_test_hiword_invalid),
{}
};
static struct kunit_suite clk_gate_register_test_suite = {
.name = "clk-gate-register-test",
.test_cases = clk_gate_register_test_cases,
};
struct clk_gate_test_context {
void __iomem *fake_mem;
struct clk_hw *hw;
struct clk_hw *parent;
u32 fake_reg; /* Keep at end, KASAN can detect out of bounds */
};
static struct clk_gate_test_context *clk_gate_test_alloc_ctx(struct kunit *test)
{
struct clk_gate_test_context *ctx;
test->priv = ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
ctx->fake_mem = (void __force __iomem *)&ctx->fake_reg;
return ctx;
}
static void clk_gate_test_parent_rate(struct kunit *test)
{
struct clk_gate_test_context *ctx = test->priv;
struct clk_hw *parent = ctx->parent;
struct clk_hw *hw = ctx->hw;
unsigned long prate = clk_hw_get_rate(parent);
unsigned long rate = clk_hw_get_rate(hw);
KUNIT_EXPECT_EQ(test, prate, rate);
}
static void clk_gate_test_enable(struct kunit *test)
{
struct clk_gate_test_context *ctx = test->priv;
struct clk_hw *parent = ctx->parent;
struct clk_hw *hw = ctx->hw;
struct clk *clk = hw->clk;
u32 enable_val = BIT(5);
KUNIT_ASSERT_EQ(test, clk_prepare_enable(clk), 0);
KUNIT_EXPECT_EQ(test, enable_val, ctx->fake_reg);
KUNIT_EXPECT_TRUE(test, clk_hw_is_enabled(hw));
KUNIT_EXPECT_TRUE(test, clk_hw_is_prepared(hw));
KUNIT_EXPECT_TRUE(test, clk_hw_is_enabled(parent));
KUNIT_EXPECT_TRUE(test, clk_hw_is_prepared(parent));
}
static void clk_gate_test_disable(struct kunit *test)
{
struct clk_gate_test_context *ctx = test->priv;
struct clk_hw *parent = ctx->parent;
struct clk_hw *hw = ctx->hw;
struct clk *clk = hw->clk;
u32 enable_val = BIT(5);
u32 disable_val = 0;
KUNIT_ASSERT_EQ(test, clk_prepare_enable(clk), 0);
KUNIT_ASSERT_EQ(test, enable_val, ctx->fake_reg);
clk_disable_unprepare(clk);
KUNIT_EXPECT_EQ(test, disable_val, ctx->fake_reg);
KUNIT_EXPECT_FALSE(test, clk_hw_is_enabled(hw));
KUNIT_EXPECT_FALSE(test, clk_hw_is_prepared(hw));
KUNIT_EXPECT_FALSE(test, clk_hw_is_enabled(parent));
KUNIT_EXPECT_FALSE(test, clk_hw_is_prepared(parent));
}
static struct kunit_case clk_gate_test_cases[] = {
KUNIT_CASE(clk_gate_test_parent_rate),
KUNIT_CASE(clk_gate_test_enable),
KUNIT_CASE(clk_gate_test_disable),
{}
};
static int clk_gate_test_init(struct kunit *test)
{
struct clk_hw *parent;
struct clk_hw *hw;
struct clk_gate_test_context *ctx;
ctx = clk_gate_test_alloc_ctx(test);
parent = clk_hw_register_fixed_rate(NULL, "test_parent", NULL, 0,
2000000);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
hw = clk_hw_register_gate_parent_hw(NULL, "test_gate", parent, 0,
ctx->fake_mem, 5, 0, NULL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, hw);
ctx->hw = hw;
ctx->parent = parent;
return 0;
}
static void clk_gate_test_exit(struct kunit *test)
{
struct clk_gate_test_context *ctx = test->priv;
clk_hw_unregister_gate(ctx->hw);
clk_hw_unregister_fixed_rate(ctx->parent);
}
static struct kunit_suite clk_gate_test_suite = {
.name = "clk-gate-test",
.init = clk_gate_test_init,
.exit = clk_gate_test_exit,
.test_cases = clk_gate_test_cases,
};
static void clk_gate_test_invert_enable(struct kunit *test)
{
struct clk_gate_test_context *ctx = test->priv;
struct clk_hw *parent = ctx->parent;
struct clk_hw *hw = ctx->hw;
struct clk *clk = hw->clk;
u32 enable_val = 0;
KUNIT_ASSERT_EQ(test, clk_prepare_enable(clk), 0);
KUNIT_EXPECT_EQ(test, enable_val, ctx->fake_reg);
KUNIT_EXPECT_TRUE(test, clk_hw_is_enabled(hw));
KUNIT_EXPECT_TRUE(test, clk_hw_is_prepared(hw));
KUNIT_EXPECT_TRUE(test, clk_hw_is_enabled(parent));
KUNIT_EXPECT_TRUE(test, clk_hw_is_prepared(parent));
}
static void clk_gate_test_invert_disable(struct kunit *test)
{
struct clk_gate_test_context *ctx = test->priv;
struct clk_hw *parent = ctx->parent;
struct clk_hw *hw = ctx->hw;
struct clk *clk = hw->clk;
u32 enable_val = 0;
u32 disable_val = BIT(15);
KUNIT_ASSERT_EQ(test, clk_prepare_enable(clk), 0);
KUNIT_ASSERT_EQ(test, enable_val, ctx->fake_reg);
clk_disable_unprepare(clk);
KUNIT_EXPECT_EQ(test, disable_val, ctx->fake_reg);
KUNIT_EXPECT_FALSE(test, clk_hw_is_enabled(hw));
KUNIT_EXPECT_FALSE(test, clk_hw_is_prepared(hw));
KUNIT_EXPECT_FALSE(test, clk_hw_is_enabled(parent));
KUNIT_EXPECT_FALSE(test, clk_hw_is_prepared(parent));
}
static struct kunit_case clk_gate_test_invert_cases[] = {
KUNIT_CASE(clk_gate_test_invert_enable),
KUNIT_CASE(clk_gate_test_invert_disable),
{}
};
static int clk_gate_test_invert_init(struct kunit *test)
{
struct clk_hw *parent;
struct clk_hw *hw;
struct clk_gate_test_context *ctx;
ctx = clk_gate_test_alloc_ctx(test);
parent = clk_hw_register_fixed_rate(NULL, "test_parent", NULL, 0,
2000000);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
ctx->fake_reg = BIT(15); /* Default to off */
hw = clk_hw_register_gate_parent_hw(NULL, "test_gate", parent, 0,
ctx->fake_mem, 15,
CLK_GATE_SET_TO_DISABLE, NULL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, hw);
ctx->hw = hw;
ctx->parent = parent;
return 0;
}
static struct kunit_suite clk_gate_test_invert_suite = {
.name = "clk-gate-invert-test",
.init = clk_gate_test_invert_init,
.exit = clk_gate_test_exit,
.test_cases = clk_gate_test_invert_cases,
};
static void clk_gate_test_hiword_enable(struct kunit *test)
{
struct clk_gate_test_context *ctx = test->priv;
struct clk_hw *parent = ctx->parent;
struct clk_hw *hw = ctx->hw;
struct clk *clk = hw->clk;
u32 enable_val = BIT(9) | BIT(9 + 16);
KUNIT_ASSERT_EQ(test, clk_prepare_enable(clk), 0);
KUNIT_EXPECT_EQ(test, enable_val, ctx->fake_reg);
KUNIT_EXPECT_TRUE(test, clk_hw_is_enabled(hw));
KUNIT_EXPECT_TRUE(test, clk_hw_is_prepared(hw));
KUNIT_EXPECT_TRUE(test, clk_hw_is_enabled(parent));
KUNIT_EXPECT_TRUE(test, clk_hw_is_prepared(parent));
}
static void clk_gate_test_hiword_disable(struct kunit *test)
{
struct clk_gate_test_context *ctx = test->priv;
struct clk_hw *parent = ctx->parent;
struct clk_hw *hw = ctx->hw;
struct clk *clk = hw->clk;
u32 enable_val = BIT(9) | BIT(9 + 16);
u32 disable_val = BIT(9 + 16);
KUNIT_ASSERT_EQ(test, clk_prepare_enable(clk), 0);
KUNIT_ASSERT_EQ(test, enable_val, ctx->fake_reg);
clk_disable_unprepare(clk);
KUNIT_EXPECT_EQ(test, disable_val, ctx->fake_reg);
KUNIT_EXPECT_FALSE(test, clk_hw_is_enabled(hw));
KUNIT_EXPECT_FALSE(test, clk_hw_is_prepared(hw));
KUNIT_EXPECT_FALSE(test, clk_hw_is_enabled(parent));
KUNIT_EXPECT_FALSE(test, clk_hw_is_prepared(parent));
}
static struct kunit_case clk_gate_test_hiword_cases[] = {
KUNIT_CASE(clk_gate_test_hiword_enable),
KUNIT_CASE(clk_gate_test_hiword_disable),
{}
};
static int clk_gate_test_hiword_init(struct kunit *test)
{
struct clk_hw *parent;
struct clk_hw *hw;
struct clk_gate_test_context *ctx;
ctx = clk_gate_test_alloc_ctx(test);
parent = clk_hw_register_fixed_rate(NULL, "test_parent", NULL, 0,
2000000);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
hw = clk_hw_register_gate_parent_hw(NULL, "test_gate", parent, 0,
ctx->fake_mem, 9,
CLK_GATE_HIWORD_MASK, NULL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, hw);
ctx->hw = hw;
ctx->parent = parent;
return 0;
}
static struct kunit_suite clk_gate_test_hiword_suite = {
.name = "clk-gate-hiword-test",
.init = clk_gate_test_hiword_init,
.exit = clk_gate_test_exit,
.test_cases = clk_gate_test_hiword_cases,
};
static void clk_gate_test_is_enabled(struct kunit *test)
{
struct clk_hw *hw;
struct clk_gate_test_context *ctx;
ctx = clk_gate_test_alloc_ctx(test);
ctx->fake_reg = BIT(7);
hw = clk_hw_register_gate(NULL, "test_gate", NULL, 0, ctx->fake_mem, 7,
0, NULL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, hw);
KUNIT_ASSERT_TRUE(test, clk_hw_is_enabled(hw));
clk_hw_unregister_gate(hw);
}
static void clk_gate_test_is_disabled(struct kunit *test)
{
struct clk_hw *hw;
struct clk_gate_test_context *ctx;
ctx = clk_gate_test_alloc_ctx(test);
ctx->fake_reg = BIT(4);
hw = clk_hw_register_gate(NULL, "test_gate", NULL, 0, ctx->fake_mem, 7,
0, NULL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, hw);
KUNIT_ASSERT_FALSE(test, clk_hw_is_enabled(hw));
clk_hw_unregister_gate(hw);
}
static void clk_gate_test_is_enabled_inverted(struct kunit *test)
{
struct clk_hw *hw;
struct clk_gate_test_context *ctx;
ctx = clk_gate_test_alloc_ctx(test);
ctx->fake_reg = BIT(31);
hw = clk_hw_register_gate(NULL, "test_gate", NULL, 0, ctx->fake_mem, 2,
CLK_GATE_SET_TO_DISABLE, NULL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, hw);
KUNIT_ASSERT_TRUE(test, clk_hw_is_enabled(hw));
clk_hw_unregister_gate(hw);
}
static void clk_gate_test_is_disabled_inverted(struct kunit *test)
{
struct clk_hw *hw;
struct clk_gate_test_context *ctx;
ctx = clk_gate_test_alloc_ctx(test);
ctx->fake_reg = BIT(29);
hw = clk_hw_register_gate(NULL, "test_gate", NULL, 0, ctx->fake_mem, 29,
CLK_GATE_SET_TO_DISABLE, NULL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, hw);
KUNIT_ASSERT_FALSE(test, clk_hw_is_enabled(hw));
clk_hw_unregister_gate(hw);
}
static struct kunit_case clk_gate_test_enabled_cases[] = {
KUNIT_CASE(clk_gate_test_is_enabled),
KUNIT_CASE(clk_gate_test_is_disabled),
KUNIT_CASE(clk_gate_test_is_enabled_inverted),
KUNIT_CASE(clk_gate_test_is_disabled_inverted),
{}
};
static struct kunit_suite clk_gate_test_enabled_suite = {
.name = "clk-gate-is_enabled-test",
.test_cases = clk_gate_test_enabled_cases,
};
kunit_test_suites(
&clk_gate_register_test_suite,
&clk_gate_test_suite,
&clk_gate_test_invert_suite,
&clk_gate_test_hiword_suite,
&clk_gate_test_enabled_suite
);
MODULE_LICENSE("GPL v2");
| linux-master | drivers/clk/clk-gate_test.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2011 Sascha Hauer, Pengutronix <[email protected]>
* Copyright (C) 2011 Richard Zhao, Linaro <[email protected]>
* Copyright (C) 2011-2012 Mike Turquette, Linaro Ltd <[email protected]>
*
* Adjustable divider clock implementation
*/
#include <linux/clk-provider.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/err.h>
#include <linux/string.h>
#include <linux/log2.h>
/*
* DOC: basic adjustable divider clock that cannot gate
*
* Traits of this clock:
* prepare - clk_prepare only ensures that parents are prepared
* enable - clk_enable only ensures that parents are enabled
* rate - rate is adjustable. clk->rate = ceiling(parent->rate / divisor)
* parent - fixed parent. No clk_set_parent support
*/
static inline u32 clk_div_readl(struct clk_divider *divider)
{
if (divider->flags & CLK_DIVIDER_BIG_ENDIAN)
return ioread32be(divider->reg);
return readl(divider->reg);
}
static inline void clk_div_writel(struct clk_divider *divider, u32 val)
{
if (divider->flags & CLK_DIVIDER_BIG_ENDIAN)
iowrite32be(val, divider->reg);
else
writel(val, divider->reg);
}
static unsigned int _get_table_maxdiv(const struct clk_div_table *table,
u8 width)
{
unsigned int maxdiv = 0, mask = clk_div_mask(width);
const struct clk_div_table *clkt;
for (clkt = table; clkt->div; clkt++)
if (clkt->div > maxdiv && clkt->val <= mask)
maxdiv = clkt->div;
return maxdiv;
}
static unsigned int _get_table_mindiv(const struct clk_div_table *table)
{
unsigned int mindiv = UINT_MAX;
const struct clk_div_table *clkt;
for (clkt = table; clkt->div; clkt++)
if (clkt->div < mindiv)
mindiv = clkt->div;
return mindiv;
}
static unsigned int _get_maxdiv(const struct clk_div_table *table, u8 width,
unsigned long flags)
{
if (flags & CLK_DIVIDER_ONE_BASED)
return clk_div_mask(width);
if (flags & CLK_DIVIDER_POWER_OF_TWO)
return 1 << clk_div_mask(width);
if (table)
return _get_table_maxdiv(table, width);
return clk_div_mask(width) + 1;
}
static unsigned int _get_table_div(const struct clk_div_table *table,
unsigned int val)
{
const struct clk_div_table *clkt;
for (clkt = table; clkt->div; clkt++)
if (clkt->val == val)
return clkt->div;
return 0;
}
static unsigned int _get_div(const struct clk_div_table *table,
unsigned int val, unsigned long flags, u8 width)
{
if (flags & CLK_DIVIDER_ONE_BASED)
return val;
if (flags & CLK_DIVIDER_POWER_OF_TWO)
return 1 << val;
if (flags & CLK_DIVIDER_MAX_AT_ZERO)
return val ? val : clk_div_mask(width) + 1;
if (table)
return _get_table_div(table, val);
return val + 1;
}
static unsigned int _get_table_val(const struct clk_div_table *table,
unsigned int div)
{
const struct clk_div_table *clkt;
for (clkt = table; clkt->div; clkt++)
if (clkt->div == div)
return clkt->val;
return 0;
}
static unsigned int _get_val(const struct clk_div_table *table,
unsigned int div, unsigned long flags, u8 width)
{
if (flags & CLK_DIVIDER_ONE_BASED)
return div;
if (flags & CLK_DIVIDER_POWER_OF_TWO)
return __ffs(div);
if (flags & CLK_DIVIDER_MAX_AT_ZERO)
return (div == clk_div_mask(width) + 1) ? 0 : div;
if (table)
return _get_table_val(table, div);
return div - 1;
}
unsigned long divider_recalc_rate(struct clk_hw *hw, unsigned long parent_rate,
unsigned int val,
const struct clk_div_table *table,
unsigned long flags, unsigned long width)
{
unsigned int div;
div = _get_div(table, val, flags, width);
if (!div) {
WARN(!(flags & CLK_DIVIDER_ALLOW_ZERO),
"%s: Zero divisor and CLK_DIVIDER_ALLOW_ZERO not set\n",
clk_hw_get_name(hw));
return parent_rate;
}
return DIV_ROUND_UP_ULL((u64)parent_rate, div);
}
EXPORT_SYMBOL_GPL(divider_recalc_rate);
static unsigned long clk_divider_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct clk_divider *divider = to_clk_divider(hw);
unsigned int val;
val = clk_div_readl(divider) >> divider->shift;
val &= clk_div_mask(divider->width);
return divider_recalc_rate(hw, parent_rate, val, divider->table,
divider->flags, divider->width);
}
static bool _is_valid_table_div(const struct clk_div_table *table,
unsigned int div)
{
const struct clk_div_table *clkt;
for (clkt = table; clkt->div; clkt++)
if (clkt->div == div)
return true;
return false;
}
static bool _is_valid_div(const struct clk_div_table *table, unsigned int div,
unsigned long flags)
{
if (flags & CLK_DIVIDER_POWER_OF_TWO)
return is_power_of_2(div);
if (table)
return _is_valid_table_div(table, div);
return true;
}
static int _round_up_table(const struct clk_div_table *table, int div)
{
const struct clk_div_table *clkt;
int up = INT_MAX;
for (clkt = table; clkt->div; clkt++) {
if (clkt->div == div)
return clkt->div;
else if (clkt->div < div)
continue;
if ((clkt->div - div) < (up - div))
up = clkt->div;
}
return up;
}
static int _round_down_table(const struct clk_div_table *table, int div)
{
const struct clk_div_table *clkt;
int down = _get_table_mindiv(table);
for (clkt = table; clkt->div; clkt++) {
if (clkt->div == div)
return clkt->div;
else if (clkt->div > div)
continue;
if ((div - clkt->div) < (div - down))
down = clkt->div;
}
return down;
}
static int _div_round_up(const struct clk_div_table *table,
unsigned long parent_rate, unsigned long rate,
unsigned long flags)
{
int div = DIV_ROUND_UP_ULL((u64)parent_rate, rate);
if (flags & CLK_DIVIDER_POWER_OF_TWO)
div = __roundup_pow_of_two(div);
if (table)
div = _round_up_table(table, div);
return div;
}
static int _div_round_closest(const struct clk_div_table *table,
unsigned long parent_rate, unsigned long rate,
unsigned long flags)
{
int up, down;
unsigned long up_rate, down_rate;
up = DIV_ROUND_UP_ULL((u64)parent_rate, rate);
down = parent_rate / rate;
if (flags & CLK_DIVIDER_POWER_OF_TWO) {
up = __roundup_pow_of_two(up);
down = __rounddown_pow_of_two(down);
} else if (table) {
up = _round_up_table(table, up);
down = _round_down_table(table, down);
}
up_rate = DIV_ROUND_UP_ULL((u64)parent_rate, up);
down_rate = DIV_ROUND_UP_ULL((u64)parent_rate, down);
return (rate - up_rate) <= (down_rate - rate) ? up : down;
}
static int _div_round(const struct clk_div_table *table,
unsigned long parent_rate, unsigned long rate,
unsigned long flags)
{
if (flags & CLK_DIVIDER_ROUND_CLOSEST)
return _div_round_closest(table, parent_rate, rate, flags);
return _div_round_up(table, parent_rate, rate, flags);
}
static bool _is_best_div(unsigned long rate, unsigned long now,
unsigned long best, unsigned long flags)
{
if (flags & CLK_DIVIDER_ROUND_CLOSEST)
return abs(rate - now) < abs(rate - best);
return now <= rate && now > best;
}
static int _next_div(const struct clk_div_table *table, int div,
unsigned long flags)
{
div++;
if (flags & CLK_DIVIDER_POWER_OF_TWO)
return __roundup_pow_of_two(div);
if (table)
return _round_up_table(table, div);
return div;
}
static int clk_divider_bestdiv(struct clk_hw *hw, struct clk_hw *parent,
unsigned long rate,
unsigned long *best_parent_rate,
const struct clk_div_table *table, u8 width,
unsigned long flags)
{
int i, bestdiv = 0;
unsigned long parent_rate, best = 0, now, maxdiv;
unsigned long parent_rate_saved = *best_parent_rate;
if (!rate)
rate = 1;
maxdiv = _get_maxdiv(table, width, flags);
if (!(clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT)) {
parent_rate = *best_parent_rate;
bestdiv = _div_round(table, parent_rate, rate, flags);
bestdiv = bestdiv == 0 ? 1 : bestdiv;
bestdiv = bestdiv > maxdiv ? maxdiv : bestdiv;
return bestdiv;
}
/*
* The maximum divider we can use without overflowing
* unsigned long in rate * i below
*/
maxdiv = min(ULONG_MAX / rate, maxdiv);
for (i = _next_div(table, 0, flags); i <= maxdiv;
i = _next_div(table, i, flags)) {
if (rate * i == parent_rate_saved) {
/*
* It's the most ideal case if the requested rate can be
* divided from parent clock without needing to change
* parent rate, so return the divider immediately.
*/
*best_parent_rate = parent_rate_saved;
return i;
}
parent_rate = clk_hw_round_rate(parent, rate * i);
now = DIV_ROUND_UP_ULL((u64)parent_rate, i);
if (_is_best_div(rate, now, best, flags)) {
bestdiv = i;
best = now;
*best_parent_rate = parent_rate;
}
}
if (!bestdiv) {
bestdiv = _get_maxdiv(table, width, flags);
*best_parent_rate = clk_hw_round_rate(parent, 1);
}
return bestdiv;
}
int divider_determine_rate(struct clk_hw *hw, struct clk_rate_request *req,
const struct clk_div_table *table, u8 width,
unsigned long flags)
{
int div;
div = clk_divider_bestdiv(hw, req->best_parent_hw, req->rate,
&req->best_parent_rate, table, width, flags);
req->rate = DIV_ROUND_UP_ULL((u64)req->best_parent_rate, div);
return 0;
}
EXPORT_SYMBOL_GPL(divider_determine_rate);
int divider_ro_determine_rate(struct clk_hw *hw, struct clk_rate_request *req,
const struct clk_div_table *table, u8 width,
unsigned long flags, unsigned int val)
{
int div;
div = _get_div(table, val, flags, width);
/* Even a read-only clock can propagate a rate change */
if (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) {
if (!req->best_parent_hw)
return -EINVAL;
req->best_parent_rate = clk_hw_round_rate(req->best_parent_hw,
req->rate * div);
}
req->rate = DIV_ROUND_UP_ULL((u64)req->best_parent_rate, div);
return 0;
}
EXPORT_SYMBOL_GPL(divider_ro_determine_rate);
long divider_round_rate_parent(struct clk_hw *hw, struct clk_hw *parent,
unsigned long rate, unsigned long *prate,
const struct clk_div_table *table,
u8 width, unsigned long flags)
{
struct clk_rate_request req;
int ret;
clk_hw_init_rate_request(hw, &req, rate);
req.best_parent_rate = *prate;
req.best_parent_hw = parent;
ret = divider_determine_rate(hw, &req, table, width, flags);
if (ret)
return ret;
*prate = req.best_parent_rate;
return req.rate;
}
EXPORT_SYMBOL_GPL(divider_round_rate_parent);
long divider_ro_round_rate_parent(struct clk_hw *hw, struct clk_hw *parent,
unsigned long rate, unsigned long *prate,
const struct clk_div_table *table, u8 width,
unsigned long flags, unsigned int val)
{
struct clk_rate_request req;
int ret;
clk_hw_init_rate_request(hw, &req, rate);
req.best_parent_rate = *prate;
req.best_parent_hw = parent;
ret = divider_ro_determine_rate(hw, &req, table, width, flags, val);
if (ret)
return ret;
*prate = req.best_parent_rate;
return req.rate;
}
EXPORT_SYMBOL_GPL(divider_ro_round_rate_parent);
static long clk_divider_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *prate)
{
struct clk_divider *divider = to_clk_divider(hw);
/* if read only, just return current value */
if (divider->flags & CLK_DIVIDER_READ_ONLY) {
u32 val;
val = clk_div_readl(divider) >> divider->shift;
val &= clk_div_mask(divider->width);
return divider_ro_round_rate(hw, rate, prate, divider->table,
divider->width, divider->flags,
val);
}
return divider_round_rate(hw, rate, prate, divider->table,
divider->width, divider->flags);
}
static int clk_divider_determine_rate(struct clk_hw *hw,
struct clk_rate_request *req)
{
struct clk_divider *divider = to_clk_divider(hw);
/* if read only, just return current value */
if (divider->flags & CLK_DIVIDER_READ_ONLY) {
u32 val;
val = clk_div_readl(divider) >> divider->shift;
val &= clk_div_mask(divider->width);
return divider_ro_determine_rate(hw, req, divider->table,
divider->width,
divider->flags, val);
}
return divider_determine_rate(hw, req, divider->table, divider->width,
divider->flags);
}
int divider_get_val(unsigned long rate, unsigned long parent_rate,
const struct clk_div_table *table, u8 width,
unsigned long flags)
{
unsigned int div, value;
div = DIV_ROUND_UP_ULL((u64)parent_rate, rate);
if (!_is_valid_div(table, div, flags))
return -EINVAL;
value = _get_val(table, div, flags, width);
return min_t(unsigned int, value, clk_div_mask(width));
}
EXPORT_SYMBOL_GPL(divider_get_val);
static int clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct clk_divider *divider = to_clk_divider(hw);
int value;
unsigned long flags = 0;
u32 val;
value = divider_get_val(rate, parent_rate, divider->table,
divider->width, divider->flags);
if (value < 0)
return value;
if (divider->lock)
spin_lock_irqsave(divider->lock, flags);
else
__acquire(divider->lock);
if (divider->flags & CLK_DIVIDER_HIWORD_MASK) {
val = clk_div_mask(divider->width) << (divider->shift + 16);
} else {
val = clk_div_readl(divider);
val &= ~(clk_div_mask(divider->width) << divider->shift);
}
val |= (u32)value << divider->shift;
clk_div_writel(divider, val);
if (divider->lock)
spin_unlock_irqrestore(divider->lock, flags);
else
__release(divider->lock);
return 0;
}
const struct clk_ops clk_divider_ops = {
.recalc_rate = clk_divider_recalc_rate,
.round_rate = clk_divider_round_rate,
.determine_rate = clk_divider_determine_rate,
.set_rate = clk_divider_set_rate,
};
EXPORT_SYMBOL_GPL(clk_divider_ops);
const struct clk_ops clk_divider_ro_ops = {
.recalc_rate = clk_divider_recalc_rate,
.round_rate = clk_divider_round_rate,
.determine_rate = clk_divider_determine_rate,
};
EXPORT_SYMBOL_GPL(clk_divider_ro_ops);
struct clk_hw *__clk_hw_register_divider(struct device *dev,
struct device_node *np, const char *name,
const char *parent_name, const struct clk_hw *parent_hw,
const struct clk_parent_data *parent_data, unsigned long flags,
void __iomem *reg, u8 shift, u8 width, u8 clk_divider_flags,
const struct clk_div_table *table, spinlock_t *lock)
{
struct clk_divider *div;
struct clk_hw *hw;
struct clk_init_data init = {};
int ret;
if (clk_divider_flags & CLK_DIVIDER_HIWORD_MASK) {
if (width + shift > 16) {
pr_warn("divider value exceeds LOWORD field\n");
return ERR_PTR(-EINVAL);
}
}
/* allocate the divider */
div = kzalloc(sizeof(*div), GFP_KERNEL);
if (!div)
return ERR_PTR(-ENOMEM);
init.name = name;
if (clk_divider_flags & CLK_DIVIDER_READ_ONLY)
init.ops = &clk_divider_ro_ops;
else
init.ops = &clk_divider_ops;
init.flags = flags;
init.parent_names = parent_name ? &parent_name : NULL;
init.parent_hws = parent_hw ? &parent_hw : NULL;
init.parent_data = parent_data;
if (parent_name || parent_hw || parent_data)
init.num_parents = 1;
else
init.num_parents = 0;
/* struct clk_divider assignments */
div->reg = reg;
div->shift = shift;
div->width = width;
div->flags = clk_divider_flags;
div->lock = lock;
div->hw.init = &init;
div->table = table;
/* register the clock */
hw = &div->hw;
ret = clk_hw_register(dev, hw);
if (ret) {
kfree(div);
hw = ERR_PTR(ret);
}
return hw;
}
EXPORT_SYMBOL_GPL(__clk_hw_register_divider);
/**
* clk_register_divider_table - register a table based divider clock with
* the clock framework
* @dev: device registering this clock
* @name: name of this clock
* @parent_name: name of clock's parent
* @flags: framework-specific flags
* @reg: register address to adjust divider
* @shift: number of bits to shift the bitfield
* @width: width of the bitfield
* @clk_divider_flags: divider-specific flags for this clock
* @table: array of divider/value pairs ending with a div set to 0
* @lock: shared register lock for this clock
*/
struct clk *clk_register_divider_table(struct device *dev, const char *name,
const char *parent_name, unsigned long flags,
void __iomem *reg, u8 shift, u8 width,
u8 clk_divider_flags, const struct clk_div_table *table,
spinlock_t *lock)
{
struct clk_hw *hw;
hw = __clk_hw_register_divider(dev, NULL, name, parent_name, NULL,
NULL, flags, reg, shift, width, clk_divider_flags,
table, lock);
if (IS_ERR(hw))
return ERR_CAST(hw);
return hw->clk;
}
EXPORT_SYMBOL_GPL(clk_register_divider_table);
void clk_unregister_divider(struct clk *clk)
{
struct clk_divider *div;
struct clk_hw *hw;
hw = __clk_get_hw(clk);
if (!hw)
return;
div = to_clk_divider(hw);
clk_unregister(clk);
kfree(div);
}
EXPORT_SYMBOL_GPL(clk_unregister_divider);
/**
* clk_hw_unregister_divider - unregister a clk divider
* @hw: hardware-specific clock data to unregister
*/
void clk_hw_unregister_divider(struct clk_hw *hw)
{
struct clk_divider *div;
div = to_clk_divider(hw);
clk_hw_unregister(hw);
kfree(div);
}
EXPORT_SYMBOL_GPL(clk_hw_unregister_divider);
static void devm_clk_hw_release_divider(struct device *dev, void *res)
{
clk_hw_unregister_divider(*(struct clk_hw **)res);
}
struct clk_hw *__devm_clk_hw_register_divider(struct device *dev,
struct device_node *np, const char *name,
const char *parent_name, const struct clk_hw *parent_hw,
const struct clk_parent_data *parent_data, unsigned long flags,
void __iomem *reg, u8 shift, u8 width, u8 clk_divider_flags,
const struct clk_div_table *table, spinlock_t *lock)
{
struct clk_hw **ptr, *hw;
ptr = devres_alloc(devm_clk_hw_release_divider, sizeof(*ptr), GFP_KERNEL);
if (!ptr)
return ERR_PTR(-ENOMEM);
hw = __clk_hw_register_divider(dev, np, name, parent_name, parent_hw,
parent_data, flags, reg, shift, width,
clk_divider_flags, table, lock);
if (!IS_ERR(hw)) {
*ptr = hw;
devres_add(dev, ptr);
} else {
devres_free(ptr);
}
return hw;
}
EXPORT_SYMBOL_GPL(__devm_clk_hw_register_divider);
| linux-master | drivers/clk/clk-divider.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2010-2011 Canonical Ltd <[email protected]>
* Copyright (C) 2011-2012 Mike Turquette, Linaro Ltd <[email protected]>
*
* Gated clock implementation
*/
#include <linux/clk-provider.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/err.h>
#include <linux/string.h>
/**
* DOC: basic gatable clock which can gate and ungate it's ouput
*
* Traits of this clock:
* prepare - clk_(un)prepare only ensures parent is (un)prepared
* enable - clk_enable and clk_disable are functional & control gating
* rate - inherits rate from parent. No clk_set_rate support
* parent - fixed parent. No clk_set_parent support
*/
static inline u32 clk_gate_readl(struct clk_gate *gate)
{
if (gate->flags & CLK_GATE_BIG_ENDIAN)
return ioread32be(gate->reg);
return readl(gate->reg);
}
static inline void clk_gate_writel(struct clk_gate *gate, u32 val)
{
if (gate->flags & CLK_GATE_BIG_ENDIAN)
iowrite32be(val, gate->reg);
else
writel(val, gate->reg);
}
/*
* It works on following logic:
*
* For enabling clock, enable = 1
* set2dis = 1 -> clear bit -> set = 0
* set2dis = 0 -> set bit -> set = 1
*
* For disabling clock, enable = 0
* set2dis = 1 -> set bit -> set = 1
* set2dis = 0 -> clear bit -> set = 0
*
* So, result is always: enable xor set2dis.
*/
static void clk_gate_endisable(struct clk_hw *hw, int enable)
{
struct clk_gate *gate = to_clk_gate(hw);
int set = gate->flags & CLK_GATE_SET_TO_DISABLE ? 1 : 0;
unsigned long flags;
u32 reg;
set ^= enable;
if (gate->lock)
spin_lock_irqsave(gate->lock, flags);
else
__acquire(gate->lock);
if (gate->flags & CLK_GATE_HIWORD_MASK) {
reg = BIT(gate->bit_idx + 16);
if (set)
reg |= BIT(gate->bit_idx);
} else {
reg = clk_gate_readl(gate);
if (set)
reg |= BIT(gate->bit_idx);
else
reg &= ~BIT(gate->bit_idx);
}
clk_gate_writel(gate, reg);
if (gate->lock)
spin_unlock_irqrestore(gate->lock, flags);
else
__release(gate->lock);
}
static int clk_gate_enable(struct clk_hw *hw)
{
clk_gate_endisable(hw, 1);
return 0;
}
static void clk_gate_disable(struct clk_hw *hw)
{
clk_gate_endisable(hw, 0);
}
int clk_gate_is_enabled(struct clk_hw *hw)
{
u32 reg;
struct clk_gate *gate = to_clk_gate(hw);
reg = clk_gate_readl(gate);
/* if a set bit disables this clk, flip it before masking */
if (gate->flags & CLK_GATE_SET_TO_DISABLE)
reg ^= BIT(gate->bit_idx);
reg &= BIT(gate->bit_idx);
return reg ? 1 : 0;
}
EXPORT_SYMBOL_GPL(clk_gate_is_enabled);
const struct clk_ops clk_gate_ops = {
.enable = clk_gate_enable,
.disable = clk_gate_disable,
.is_enabled = clk_gate_is_enabled,
};
EXPORT_SYMBOL_GPL(clk_gate_ops);
struct clk_hw *__clk_hw_register_gate(struct device *dev,
struct device_node *np, const char *name,
const char *parent_name, const struct clk_hw *parent_hw,
const struct clk_parent_data *parent_data,
unsigned long flags,
void __iomem *reg, u8 bit_idx,
u8 clk_gate_flags, spinlock_t *lock)
{
struct clk_gate *gate;
struct clk_hw *hw;
struct clk_init_data init = {};
int ret = -EINVAL;
if (clk_gate_flags & CLK_GATE_HIWORD_MASK) {
if (bit_idx > 15) {
pr_err("gate bit exceeds LOWORD field\n");
return ERR_PTR(-EINVAL);
}
}
/* allocate the gate */
gate = kzalloc(sizeof(*gate), GFP_KERNEL);
if (!gate)
return ERR_PTR(-ENOMEM);
init.name = name;
init.ops = &clk_gate_ops;
init.flags = flags;
init.parent_names = parent_name ? &parent_name : NULL;
init.parent_hws = parent_hw ? &parent_hw : NULL;
init.parent_data = parent_data;
if (parent_name || parent_hw || parent_data)
init.num_parents = 1;
else
init.num_parents = 0;
/* struct clk_gate assignments */
gate->reg = reg;
gate->bit_idx = bit_idx;
gate->flags = clk_gate_flags;
gate->lock = lock;
gate->hw.init = &init;
hw = &gate->hw;
if (dev || !np)
ret = clk_hw_register(dev, hw);
else if (np)
ret = of_clk_hw_register(np, hw);
if (ret) {
kfree(gate);
hw = ERR_PTR(ret);
}
return hw;
}
EXPORT_SYMBOL_GPL(__clk_hw_register_gate);
struct clk *clk_register_gate(struct device *dev, const char *name,
const char *parent_name, unsigned long flags,
void __iomem *reg, u8 bit_idx,
u8 clk_gate_flags, spinlock_t *lock)
{
struct clk_hw *hw;
hw = clk_hw_register_gate(dev, name, parent_name, flags, reg,
bit_idx, clk_gate_flags, lock);
if (IS_ERR(hw))
return ERR_CAST(hw);
return hw->clk;
}
EXPORT_SYMBOL_GPL(clk_register_gate);
void clk_unregister_gate(struct clk *clk)
{
struct clk_gate *gate;
struct clk_hw *hw;
hw = __clk_get_hw(clk);
if (!hw)
return;
gate = to_clk_gate(hw);
clk_unregister(clk);
kfree(gate);
}
EXPORT_SYMBOL_GPL(clk_unregister_gate);
void clk_hw_unregister_gate(struct clk_hw *hw)
{
struct clk_gate *gate;
gate = to_clk_gate(hw);
clk_hw_unregister(hw);
kfree(gate);
}
EXPORT_SYMBOL_GPL(clk_hw_unregister_gate);
static void devm_clk_hw_release_gate(struct device *dev, void *res)
{
clk_hw_unregister_gate(*(struct clk_hw **)res);
}
struct clk_hw *__devm_clk_hw_register_gate(struct device *dev,
struct device_node *np, const char *name,
const char *parent_name, const struct clk_hw *parent_hw,
const struct clk_parent_data *parent_data,
unsigned long flags,
void __iomem *reg, u8 bit_idx,
u8 clk_gate_flags, spinlock_t *lock)
{
struct clk_hw **ptr, *hw;
ptr = devres_alloc(devm_clk_hw_release_gate, sizeof(*ptr), GFP_KERNEL);
if (!ptr)
return ERR_PTR(-ENOMEM);
hw = __clk_hw_register_gate(dev, np, name, parent_name, parent_hw,
parent_data, flags, reg, bit_idx,
clk_gate_flags, lock);
if (!IS_ERR(hw)) {
*ptr = hw;
devres_add(dev, ptr);
} else {
devres_free(ptr);
}
return hw;
}
EXPORT_SYMBOL_GPL(__devm_clk_hw_register_gate);
| linux-master | drivers/clk/clk-gate.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Lochnagar clock control
*
* Copyright (c) 2017-2018 Cirrus Logic, Inc. and
* Cirrus Logic International Semiconductor Ltd.
*
* Author: Charles Keepax <[email protected]>
*/
#include <linux/clk-provider.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/mfd/lochnagar1_regs.h>
#include <linux/mfd/lochnagar2_regs.h>
#include <dt-bindings/clock/lochnagar.h>
#define LOCHNAGAR_NUM_CLOCKS (LOCHNAGAR_SPDIF_CLKOUT + 1)
struct lochnagar_clk {
const char * const name;
struct clk_hw hw;
struct lochnagar_clk_priv *priv;
u16 cfg_reg;
u16 ena_mask;
u16 src_reg;
u16 src_mask;
};
struct lochnagar_clk_priv {
struct device *dev;
struct regmap *regmap;
struct lochnagar_clk lclks[LOCHNAGAR_NUM_CLOCKS];
};
#define LN_PARENT(NAME) { .name = NAME, .fw_name = NAME }
static const struct clk_parent_data lochnagar1_clk_parents[] = {
LN_PARENT("ln-none"),
LN_PARENT("ln-spdif-mclk"),
LN_PARENT("ln-psia1-mclk"),
LN_PARENT("ln-psia2-mclk"),
LN_PARENT("ln-cdc-clkout"),
LN_PARENT("ln-dsp-clkout"),
LN_PARENT("ln-pmic-32k"),
LN_PARENT("ln-gf-mclk1"),
LN_PARENT("ln-gf-mclk3"),
LN_PARENT("ln-gf-mclk2"),
LN_PARENT("ln-gf-mclk4"),
};
static const struct clk_parent_data lochnagar2_clk_parents[] = {
LN_PARENT("ln-none"),
LN_PARENT("ln-cdc-clkout"),
LN_PARENT("ln-dsp-clkout"),
LN_PARENT("ln-pmic-32k"),
LN_PARENT("ln-spdif-mclk"),
LN_PARENT("ln-clk-12m"),
LN_PARENT("ln-clk-11m"),
LN_PARENT("ln-clk-24m"),
LN_PARENT("ln-clk-22m"),
LN_PARENT("ln-clk-8m"),
LN_PARENT("ln-usb-clk-24m"),
LN_PARENT("ln-gf-mclk1"),
LN_PARENT("ln-gf-mclk3"),
LN_PARENT("ln-gf-mclk2"),
LN_PARENT("ln-psia1-mclk"),
LN_PARENT("ln-psia2-mclk"),
LN_PARENT("ln-spdif-clkout"),
LN_PARENT("ln-adat-mclk"),
LN_PARENT("ln-usb-clk-12m"),
};
#define LN1_CLK(ID, NAME, REG) \
[LOCHNAGAR_##ID] = { \
.name = NAME, \
.cfg_reg = LOCHNAGAR1_##REG, \
.ena_mask = LOCHNAGAR1_##ID##_ENA_MASK, \
.src_reg = LOCHNAGAR1_##ID##_SEL, \
.src_mask = LOCHNAGAR1_SRC_MASK, \
}
#define LN2_CLK(ID, NAME) \
[LOCHNAGAR_##ID] = { \
.name = NAME, \
.cfg_reg = LOCHNAGAR2_##ID##_CTRL, \
.src_reg = LOCHNAGAR2_##ID##_CTRL, \
.ena_mask = LOCHNAGAR2_CLK_ENA_MASK, \
.src_mask = LOCHNAGAR2_CLK_SRC_MASK, \
}
static const struct lochnagar_clk lochnagar1_clks[LOCHNAGAR_NUM_CLOCKS] = {
LN1_CLK(CDC_MCLK1, "ln-cdc-mclk1", CDC_AIF_CTRL2),
LN1_CLK(CDC_MCLK2, "ln-cdc-mclk2", CDC_AIF_CTRL2),
LN1_CLK(DSP_CLKIN, "ln-dsp-clkin", DSP_AIF),
LN1_CLK(GF_CLKOUT1, "ln-gf-clkout1", GF_AIF1),
};
static const struct lochnagar_clk lochnagar2_clks[LOCHNAGAR_NUM_CLOCKS] = {
LN2_CLK(CDC_MCLK1, "ln-cdc-mclk1"),
LN2_CLK(CDC_MCLK2, "ln-cdc-mclk2"),
LN2_CLK(DSP_CLKIN, "ln-dsp-clkin"),
LN2_CLK(GF_CLKOUT1, "ln-gf-clkout1"),
LN2_CLK(GF_CLKOUT2, "ln-gf-clkout2"),
LN2_CLK(PSIA1_MCLK, "ln-psia1-mclk"),
LN2_CLK(PSIA2_MCLK, "ln-psia2-mclk"),
LN2_CLK(SPDIF_MCLK, "ln-spdif-mclk"),
LN2_CLK(ADAT_MCLK, "ln-adat-mclk"),
LN2_CLK(SOUNDCARD_MCLK, "ln-soundcard-mclk"),
};
struct lochnagar_config {
const struct clk_parent_data *parents;
int nparents;
const struct lochnagar_clk *clks;
};
static const struct lochnagar_config lochnagar1_conf = {
.parents = lochnagar1_clk_parents,
.nparents = ARRAY_SIZE(lochnagar1_clk_parents),
.clks = lochnagar1_clks,
};
static const struct lochnagar_config lochnagar2_conf = {
.parents = lochnagar2_clk_parents,
.nparents = ARRAY_SIZE(lochnagar2_clk_parents),
.clks = lochnagar2_clks,
};
static inline struct lochnagar_clk *lochnagar_hw_to_lclk(struct clk_hw *hw)
{
return container_of(hw, struct lochnagar_clk, hw);
}
static int lochnagar_clk_prepare(struct clk_hw *hw)
{
struct lochnagar_clk *lclk = lochnagar_hw_to_lclk(hw);
struct lochnagar_clk_priv *priv = lclk->priv;
struct regmap *regmap = priv->regmap;
int ret;
ret = regmap_update_bits(regmap, lclk->cfg_reg,
lclk->ena_mask, lclk->ena_mask);
if (ret < 0)
dev_dbg(priv->dev, "Failed to prepare %s: %d\n",
lclk->name, ret);
return ret;
}
static void lochnagar_clk_unprepare(struct clk_hw *hw)
{
struct lochnagar_clk *lclk = lochnagar_hw_to_lclk(hw);
struct lochnagar_clk_priv *priv = lclk->priv;
struct regmap *regmap = priv->regmap;
int ret;
ret = regmap_update_bits(regmap, lclk->cfg_reg, lclk->ena_mask, 0);
if (ret < 0)
dev_dbg(priv->dev, "Failed to unprepare %s: %d\n",
lclk->name, ret);
}
static int lochnagar_clk_set_parent(struct clk_hw *hw, u8 index)
{
struct lochnagar_clk *lclk = lochnagar_hw_to_lclk(hw);
struct lochnagar_clk_priv *priv = lclk->priv;
struct regmap *regmap = priv->regmap;
int ret;
ret = regmap_update_bits(regmap, lclk->src_reg, lclk->src_mask, index);
if (ret < 0)
dev_dbg(priv->dev, "Failed to reparent %s: %d\n",
lclk->name, ret);
return ret;
}
static u8 lochnagar_clk_get_parent(struct clk_hw *hw)
{
struct lochnagar_clk *lclk = lochnagar_hw_to_lclk(hw);
struct lochnagar_clk_priv *priv = lclk->priv;
struct regmap *regmap = priv->regmap;
unsigned int val;
int ret;
ret = regmap_read(regmap, lclk->src_reg, &val);
if (ret < 0) {
dev_dbg(priv->dev, "Failed to read parent of %s: %d\n",
lclk->name, ret);
return clk_hw_get_num_parents(hw);
}
val &= lclk->src_mask;
return val;
}
static const struct clk_ops lochnagar_clk_ops = {
.prepare = lochnagar_clk_prepare,
.unprepare = lochnagar_clk_unprepare,
.determine_rate = clk_hw_determine_rate_no_reparent,
.set_parent = lochnagar_clk_set_parent,
.get_parent = lochnagar_clk_get_parent,
};
static struct clk_hw *
lochnagar_of_clk_hw_get(struct of_phandle_args *clkspec, void *data)
{
struct lochnagar_clk_priv *priv = data;
unsigned int idx = clkspec->args[0];
if (idx >= ARRAY_SIZE(priv->lclks)) {
dev_err(priv->dev, "Invalid index %u\n", idx);
return ERR_PTR(-EINVAL);
}
return &priv->lclks[idx].hw;
}
static const struct of_device_id lochnagar_of_match[] = {
{ .compatible = "cirrus,lochnagar1-clk", .data = &lochnagar1_conf },
{ .compatible = "cirrus,lochnagar2-clk", .data = &lochnagar2_conf },
{}
};
MODULE_DEVICE_TABLE(of, lochnagar_of_match);
static int lochnagar_clk_probe(struct platform_device *pdev)
{
struct clk_init_data clk_init = {
.ops = &lochnagar_clk_ops,
};
struct device *dev = &pdev->dev;
struct lochnagar_clk_priv *priv;
const struct of_device_id *of_id;
struct lochnagar_clk *lclk;
struct lochnagar_config *conf;
int ret, i;
of_id = of_match_device(lochnagar_of_match, dev);
if (!of_id)
return -EINVAL;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->dev = dev;
priv->regmap = dev_get_regmap(dev->parent, NULL);
conf = (struct lochnagar_config *)of_id->data;
memcpy(priv->lclks, conf->clks, sizeof(priv->lclks));
clk_init.parent_data = conf->parents;
clk_init.num_parents = conf->nparents;
for (i = 0; i < ARRAY_SIZE(priv->lclks); i++) {
lclk = &priv->lclks[i];
if (!lclk->name)
continue;
clk_init.name = lclk->name;
lclk->priv = priv;
lclk->hw.init = &clk_init;
ret = devm_clk_hw_register(dev, &lclk->hw);
if (ret) {
dev_err(dev, "Failed to register %s: %d\n",
lclk->name, ret);
return ret;
}
}
ret = devm_of_clk_add_hw_provider(dev, lochnagar_of_clk_hw_get, priv);
if (ret < 0)
dev_err(dev, "Failed to register provider: %d\n", ret);
return ret;
}
static struct platform_driver lochnagar_clk_driver = {
.driver = {
.name = "lochnagar-clk",
.of_match_table = lochnagar_of_match,
},
.probe = lochnagar_clk_probe,
};
module_platform_driver(lochnagar_clk_driver);
MODULE_AUTHOR("Charles Keepax <[email protected]>");
MODULE_DESCRIPTION("Clock driver for Cirrus Logic Lochnagar Board");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/clk/clk-lochnagar.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* clk-xgene.c - AppliedMicro X-Gene Clock Interface
*
* Copyright (c) 2013, Applied Micro Circuits Corporation
* Author: Loc Ho <[email protected]>
*/
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/clkdev.h>
#include <linux/clk-provider.h>
#include <linux/of_address.h>
/* Register SCU_PCPPLL bit fields */
#define N_DIV_RD(src) ((src) & 0x000001ff)
#define SC_N_DIV_RD(src) ((src) & 0x0000007f)
#define SC_OUTDIV2(src) (((src) & 0x00000100) >> 8)
/* Register SCU_SOCPLL bit fields */
#define CLKR_RD(src) (((src) & 0x07000000)>>24)
#define CLKOD_RD(src) (((src) & 0x00300000)>>20)
#define REGSPEC_RESET_F1_MASK 0x00010000
#define CLKF_RD(src) (((src) & 0x000001ff))
#define XGENE_CLK_DRIVER_VER "0.1"
static DEFINE_SPINLOCK(clk_lock);
static inline u32 xgene_clk_read(void __iomem *csr)
{
return readl_relaxed(csr);
}
static inline void xgene_clk_write(u32 data, void __iomem *csr)
{
writel_relaxed(data, csr);
}
/* PLL Clock */
enum xgene_pll_type {
PLL_TYPE_PCP = 0,
PLL_TYPE_SOC = 1,
};
struct xgene_clk_pll {
struct clk_hw hw;
void __iomem *reg;
spinlock_t *lock;
u32 pll_offset;
enum xgene_pll_type type;
int version;
};
#define to_xgene_clk_pll(_hw) container_of(_hw, struct xgene_clk_pll, hw)
static int xgene_clk_pll_is_enabled(struct clk_hw *hw)
{
struct xgene_clk_pll *pllclk = to_xgene_clk_pll(hw);
u32 data;
data = xgene_clk_read(pllclk->reg + pllclk->pll_offset);
pr_debug("%s pll %s\n", clk_hw_get_name(hw),
data & REGSPEC_RESET_F1_MASK ? "disabled" : "enabled");
return data & REGSPEC_RESET_F1_MASK ? 0 : 1;
}
static unsigned long xgene_clk_pll_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct xgene_clk_pll *pllclk = to_xgene_clk_pll(hw);
unsigned long fref;
unsigned long fvco;
u32 pll;
u32 nref;
u32 nout;
u32 nfb;
pll = xgene_clk_read(pllclk->reg + pllclk->pll_offset);
if (pllclk->version <= 1) {
if (pllclk->type == PLL_TYPE_PCP) {
/*
* PLL VCO = Reference clock * NF
* PCP PLL = PLL_VCO / 2
*/
nout = 2;
fvco = parent_rate * (N_DIV_RD(pll) + 4);
} else {
/*
* Fref = Reference Clock / NREF;
* Fvco = Fref * NFB;
* Fout = Fvco / NOUT;
*/
nref = CLKR_RD(pll) + 1;
nout = CLKOD_RD(pll) + 1;
nfb = CLKF_RD(pll);
fref = parent_rate / nref;
fvco = fref * nfb;
}
} else {
/*
* fvco = Reference clock * FBDIVC
* PLL freq = fvco / NOUT
*/
nout = SC_OUTDIV2(pll) ? 2 : 3;
fvco = parent_rate * SC_N_DIV_RD(pll);
}
pr_debug("%s pll recalc rate %ld parent %ld version %d\n",
clk_hw_get_name(hw), fvco / nout, parent_rate,
pllclk->version);
return fvco / nout;
}
static const struct clk_ops xgene_clk_pll_ops = {
.is_enabled = xgene_clk_pll_is_enabled,
.recalc_rate = xgene_clk_pll_recalc_rate,
};
static struct clk *xgene_register_clk_pll(struct device *dev,
const char *name, const char *parent_name,
unsigned long flags, void __iomem *reg, u32 pll_offset,
u32 type, spinlock_t *lock, int version)
{
struct xgene_clk_pll *apmclk;
struct clk *clk;
struct clk_init_data init;
/* allocate the APM clock structure */
apmclk = kzalloc(sizeof(*apmclk), GFP_KERNEL);
if (!apmclk)
return ERR_PTR(-ENOMEM);
init.name = name;
init.ops = &xgene_clk_pll_ops;
init.flags = flags;
init.parent_names = parent_name ? &parent_name : NULL;
init.num_parents = parent_name ? 1 : 0;
apmclk->version = version;
apmclk->reg = reg;
apmclk->lock = lock;
apmclk->pll_offset = pll_offset;
apmclk->type = type;
apmclk->hw.init = &init;
/* Register the clock */
clk = clk_register(dev, &apmclk->hw);
if (IS_ERR(clk)) {
pr_err("%s: could not register clk %s\n", __func__, name);
kfree(apmclk);
return NULL;
}
return clk;
}
static int xgene_pllclk_version(struct device_node *np)
{
if (of_device_is_compatible(np, "apm,xgene-socpll-clock"))
return 1;
if (of_device_is_compatible(np, "apm,xgene-pcppll-clock"))
return 1;
return 2;
}
static void xgene_pllclk_init(struct device_node *np, enum xgene_pll_type pll_type)
{
const char *clk_name = np->full_name;
struct clk *clk;
void __iomem *reg;
int version = xgene_pllclk_version(np);
reg = of_iomap(np, 0);
if (!reg) {
pr_err("Unable to map CSR register for %pOF\n", np);
return;
}
of_property_read_string(np, "clock-output-names", &clk_name);
clk = xgene_register_clk_pll(NULL,
clk_name, of_clk_get_parent_name(np, 0),
0, reg, 0, pll_type, &clk_lock,
version);
if (!IS_ERR(clk)) {
of_clk_add_provider(np, of_clk_src_simple_get, clk);
clk_register_clkdev(clk, clk_name, NULL);
pr_debug("Add %s clock PLL\n", clk_name);
}
}
static void xgene_socpllclk_init(struct device_node *np)
{
xgene_pllclk_init(np, PLL_TYPE_SOC);
}
static void xgene_pcppllclk_init(struct device_node *np)
{
xgene_pllclk_init(np, PLL_TYPE_PCP);
}
/**
* struct xgene_clk_pmd - PMD clock
*
* @hw: handle between common and hardware-specific interfaces
* @reg: register containing the fractional scale multiplier (scaler)
* @shift: shift to the unit bit field
* @mask: mask to the unit bit field
* @denom: 1/denominator unit
* @lock: register lock
* @flags: XGENE_CLK_PMD_SCALE_INVERTED - By default the scaler is the value read
* from the register plus one. For example,
* 0 for (0 + 1) / denom,
* 1 for (1 + 1) / denom and etc.
* If this flag is set, it is
* 0 for (denom - 0) / denom,
* 1 for (denom - 1) / denom and etc.
*/
struct xgene_clk_pmd {
struct clk_hw hw;
void __iomem *reg;
u8 shift;
u32 mask;
u64 denom;
u32 flags;
spinlock_t *lock;
};
#define to_xgene_clk_pmd(_hw) container_of(_hw, struct xgene_clk_pmd, hw)
#define XGENE_CLK_PMD_SCALE_INVERTED BIT(0)
#define XGENE_CLK_PMD_SHIFT 8
#define XGENE_CLK_PMD_WIDTH 3
static unsigned long xgene_clk_pmd_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct xgene_clk_pmd *fd = to_xgene_clk_pmd(hw);
unsigned long flags = 0;
u64 ret, scale;
u32 val;
if (fd->lock)
spin_lock_irqsave(fd->lock, flags);
else
__acquire(fd->lock);
val = readl(fd->reg);
if (fd->lock)
spin_unlock_irqrestore(fd->lock, flags);
else
__release(fd->lock);
ret = (u64)parent_rate;
scale = (val & fd->mask) >> fd->shift;
if (fd->flags & XGENE_CLK_PMD_SCALE_INVERTED)
scale = fd->denom - scale;
else
scale++;
/* freq = parent_rate * scaler / denom */
do_div(ret, fd->denom);
ret *= scale;
if (ret == 0)
ret = (u64)parent_rate;
return ret;
}
static long xgene_clk_pmd_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *parent_rate)
{
struct xgene_clk_pmd *fd = to_xgene_clk_pmd(hw);
u64 ret, scale;
if (!rate || rate >= *parent_rate)
return *parent_rate;
/* freq = parent_rate * scaler / denom */
ret = rate * fd->denom;
scale = DIV_ROUND_UP_ULL(ret, *parent_rate);
ret = (u64)*parent_rate * scale;
do_div(ret, fd->denom);
return ret;
}
static int xgene_clk_pmd_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct xgene_clk_pmd *fd = to_xgene_clk_pmd(hw);
unsigned long flags = 0;
u64 scale, ret;
u32 val;
/*
* Compute the scaler:
*
* freq = parent_rate * scaler / denom, or
* scaler = freq * denom / parent_rate
*/
ret = rate * fd->denom;
scale = DIV_ROUND_UP_ULL(ret, (u64)parent_rate);
/* Check if inverted */
if (fd->flags & XGENE_CLK_PMD_SCALE_INVERTED)
scale = fd->denom - scale;
else
scale--;
if (fd->lock)
spin_lock_irqsave(fd->lock, flags);
else
__acquire(fd->lock);
val = readl(fd->reg);
val &= ~fd->mask;
val |= (scale << fd->shift);
writel(val, fd->reg);
if (fd->lock)
spin_unlock_irqrestore(fd->lock, flags);
else
__release(fd->lock);
return 0;
}
static const struct clk_ops xgene_clk_pmd_ops = {
.recalc_rate = xgene_clk_pmd_recalc_rate,
.round_rate = xgene_clk_pmd_round_rate,
.set_rate = xgene_clk_pmd_set_rate,
};
static struct clk *
xgene_register_clk_pmd(struct device *dev,
const char *name, const char *parent_name,
unsigned long flags, void __iomem *reg, u8 shift,
u8 width, u64 denom, u32 clk_flags, spinlock_t *lock)
{
struct xgene_clk_pmd *fd;
struct clk_init_data init;
struct clk *clk;
fd = kzalloc(sizeof(*fd), GFP_KERNEL);
if (!fd)
return ERR_PTR(-ENOMEM);
init.name = name;
init.ops = &xgene_clk_pmd_ops;
init.flags = flags;
init.parent_names = parent_name ? &parent_name : NULL;
init.num_parents = parent_name ? 1 : 0;
fd->reg = reg;
fd->shift = shift;
fd->mask = (BIT(width) - 1) << shift;
fd->denom = denom;
fd->flags = clk_flags;
fd->lock = lock;
fd->hw.init = &init;
clk = clk_register(dev, &fd->hw);
if (IS_ERR(clk)) {
pr_err("%s: could not register clk %s\n", __func__, name);
kfree(fd);
return NULL;
}
return clk;
}
static void xgene_pmdclk_init(struct device_node *np)
{
const char *clk_name = np->full_name;
void __iomem *csr_reg;
struct resource res;
struct clk *clk;
u64 denom;
u32 flags = 0;
int rc;
/* Check if the entry is disabled */
if (!of_device_is_available(np))
return;
/* Parse the DTS register for resource */
rc = of_address_to_resource(np, 0, &res);
if (rc != 0) {
pr_err("no DTS register for %pOF\n", np);
return;
}
csr_reg = of_iomap(np, 0);
if (!csr_reg) {
pr_err("Unable to map resource for %pOF\n", np);
return;
}
of_property_read_string(np, "clock-output-names", &clk_name);
denom = BIT(XGENE_CLK_PMD_WIDTH);
flags |= XGENE_CLK_PMD_SCALE_INVERTED;
clk = xgene_register_clk_pmd(NULL, clk_name,
of_clk_get_parent_name(np, 0), 0,
csr_reg, XGENE_CLK_PMD_SHIFT,
XGENE_CLK_PMD_WIDTH, denom,
flags, &clk_lock);
if (!IS_ERR(clk)) {
of_clk_add_provider(np, of_clk_src_simple_get, clk);
clk_register_clkdev(clk, clk_name, NULL);
pr_debug("Add %s clock\n", clk_name);
} else {
if (csr_reg)
iounmap(csr_reg);
}
}
/* IP Clock */
struct xgene_dev_parameters {
void __iomem *csr_reg; /* CSR for IP clock */
u32 reg_clk_offset; /* Offset to clock enable CSR */
u32 reg_clk_mask; /* Mask bit for clock enable */
u32 reg_csr_offset; /* Offset to CSR reset */
u32 reg_csr_mask; /* Mask bit for disable CSR reset */
void __iomem *divider_reg; /* CSR for divider */
u32 reg_divider_offset; /* Offset to divider register */
u32 reg_divider_shift; /* Bit shift to divider field */
u32 reg_divider_width; /* Width of the bit to divider field */
};
struct xgene_clk {
struct clk_hw hw;
spinlock_t *lock;
struct xgene_dev_parameters param;
};
#define to_xgene_clk(_hw) container_of(_hw, struct xgene_clk, hw)
static int xgene_clk_enable(struct clk_hw *hw)
{
struct xgene_clk *pclk = to_xgene_clk(hw);
unsigned long flags = 0;
u32 data;
if (pclk->lock)
spin_lock_irqsave(pclk->lock, flags);
if (pclk->param.csr_reg) {
pr_debug("%s clock enabled\n", clk_hw_get_name(hw));
/* First enable the clock */
data = xgene_clk_read(pclk->param.csr_reg +
pclk->param.reg_clk_offset);
data |= pclk->param.reg_clk_mask;
xgene_clk_write(data, pclk->param.csr_reg +
pclk->param.reg_clk_offset);
pr_debug("%s clk offset 0x%08X mask 0x%08X value 0x%08X\n",
clk_hw_get_name(hw),
pclk->param.reg_clk_offset, pclk->param.reg_clk_mask,
data);
/* Second enable the CSR */
data = xgene_clk_read(pclk->param.csr_reg +
pclk->param.reg_csr_offset);
data &= ~pclk->param.reg_csr_mask;
xgene_clk_write(data, pclk->param.csr_reg +
pclk->param.reg_csr_offset);
pr_debug("%s csr offset 0x%08X mask 0x%08X value 0x%08X\n",
clk_hw_get_name(hw),
pclk->param.reg_csr_offset, pclk->param.reg_csr_mask,
data);
}
if (pclk->lock)
spin_unlock_irqrestore(pclk->lock, flags);
return 0;
}
static void xgene_clk_disable(struct clk_hw *hw)
{
struct xgene_clk *pclk = to_xgene_clk(hw);
unsigned long flags = 0;
u32 data;
if (pclk->lock)
spin_lock_irqsave(pclk->lock, flags);
if (pclk->param.csr_reg) {
pr_debug("%s clock disabled\n", clk_hw_get_name(hw));
/* First put the CSR in reset */
data = xgene_clk_read(pclk->param.csr_reg +
pclk->param.reg_csr_offset);
data |= pclk->param.reg_csr_mask;
xgene_clk_write(data, pclk->param.csr_reg +
pclk->param.reg_csr_offset);
/* Second disable the clock */
data = xgene_clk_read(pclk->param.csr_reg +
pclk->param.reg_clk_offset);
data &= ~pclk->param.reg_clk_mask;
xgene_clk_write(data, pclk->param.csr_reg +
pclk->param.reg_clk_offset);
}
if (pclk->lock)
spin_unlock_irqrestore(pclk->lock, flags);
}
static int xgene_clk_is_enabled(struct clk_hw *hw)
{
struct xgene_clk *pclk = to_xgene_clk(hw);
u32 data = 0;
if (pclk->param.csr_reg) {
pr_debug("%s clock checking\n", clk_hw_get_name(hw));
data = xgene_clk_read(pclk->param.csr_reg +
pclk->param.reg_clk_offset);
pr_debug("%s clock is %s\n", clk_hw_get_name(hw),
data & pclk->param.reg_clk_mask ? "enabled" :
"disabled");
} else {
return 1;
}
return data & pclk->param.reg_clk_mask ? 1 : 0;
}
static unsigned long xgene_clk_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct xgene_clk *pclk = to_xgene_clk(hw);
u32 data;
if (pclk->param.divider_reg) {
data = xgene_clk_read(pclk->param.divider_reg +
pclk->param.reg_divider_offset);
data >>= pclk->param.reg_divider_shift;
data &= (1 << pclk->param.reg_divider_width) - 1;
pr_debug("%s clock recalc rate %ld parent %ld\n",
clk_hw_get_name(hw),
parent_rate / data, parent_rate);
return parent_rate / data;
} else {
pr_debug("%s clock recalc rate %ld parent %ld\n",
clk_hw_get_name(hw), parent_rate, parent_rate);
return parent_rate;
}
}
static int xgene_clk_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct xgene_clk *pclk = to_xgene_clk(hw);
unsigned long flags = 0;
u32 data;
u32 divider;
u32 divider_save;
if (pclk->lock)
spin_lock_irqsave(pclk->lock, flags);
if (pclk->param.divider_reg) {
/* Let's compute the divider */
if (rate > parent_rate)
rate = parent_rate;
divider_save = divider = parent_rate / rate; /* Rounded down */
divider &= (1 << pclk->param.reg_divider_width) - 1;
divider <<= pclk->param.reg_divider_shift;
/* Set new divider */
data = xgene_clk_read(pclk->param.divider_reg +
pclk->param.reg_divider_offset);
data &= ~(((1 << pclk->param.reg_divider_width) - 1)
<< pclk->param.reg_divider_shift);
data |= divider;
xgene_clk_write(data, pclk->param.divider_reg +
pclk->param.reg_divider_offset);
pr_debug("%s clock set rate %ld\n", clk_hw_get_name(hw),
parent_rate / divider_save);
} else {
divider_save = 1;
}
if (pclk->lock)
spin_unlock_irqrestore(pclk->lock, flags);
return parent_rate / divider_save;
}
static long xgene_clk_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *prate)
{
struct xgene_clk *pclk = to_xgene_clk(hw);
unsigned long parent_rate = *prate;
u32 divider;
if (pclk->param.divider_reg) {
/* Let's compute the divider */
if (rate > parent_rate)
rate = parent_rate;
divider = parent_rate / rate; /* Rounded down */
} else {
divider = 1;
}
return parent_rate / divider;
}
static const struct clk_ops xgene_clk_ops = {
.enable = xgene_clk_enable,
.disable = xgene_clk_disable,
.is_enabled = xgene_clk_is_enabled,
.recalc_rate = xgene_clk_recalc_rate,
.set_rate = xgene_clk_set_rate,
.round_rate = xgene_clk_round_rate,
};
static struct clk *xgene_register_clk(struct device *dev,
const char *name, const char *parent_name,
struct xgene_dev_parameters *parameters, spinlock_t *lock)
{
struct xgene_clk *apmclk;
struct clk *clk;
struct clk_init_data init;
int rc;
/* allocate the APM clock structure */
apmclk = kzalloc(sizeof(*apmclk), GFP_KERNEL);
if (!apmclk)
return ERR_PTR(-ENOMEM);
init.name = name;
init.ops = &xgene_clk_ops;
init.flags = 0;
init.parent_names = parent_name ? &parent_name : NULL;
init.num_parents = parent_name ? 1 : 0;
apmclk->lock = lock;
apmclk->hw.init = &init;
apmclk->param = *parameters;
/* Register the clock */
clk = clk_register(dev, &apmclk->hw);
if (IS_ERR(clk)) {
pr_err("%s: could not register clk %s\n", __func__, name);
kfree(apmclk);
return clk;
}
/* Register the clock for lookup */
rc = clk_register_clkdev(clk, name, NULL);
if (rc != 0) {
pr_err("%s: could not register lookup clk %s\n",
__func__, name);
}
return clk;
}
static void __init xgene_devclk_init(struct device_node *np)
{
const char *clk_name = np->full_name;
struct clk *clk;
struct resource res;
int rc;
struct xgene_dev_parameters parameters;
int i;
/* Check if the entry is disabled */
if (!of_device_is_available(np))
return;
/* Parse the DTS register for resource */
parameters.csr_reg = NULL;
parameters.divider_reg = NULL;
for (i = 0; i < 2; i++) {
void __iomem *map_res;
rc = of_address_to_resource(np, i, &res);
if (rc != 0) {
if (i == 0) {
pr_err("no DTS register for %pOF\n", np);
return;
}
break;
}
map_res = of_iomap(np, i);
if (!map_res) {
pr_err("Unable to map resource %d for %pOF\n", i, np);
goto err;
}
if (strcmp(res.name, "div-reg") == 0)
parameters.divider_reg = map_res;
else /* if (strcmp(res->name, "csr-reg") == 0) */
parameters.csr_reg = map_res;
}
if (of_property_read_u32(np, "csr-offset", ¶meters.reg_csr_offset))
parameters.reg_csr_offset = 0;
if (of_property_read_u32(np, "csr-mask", ¶meters.reg_csr_mask))
parameters.reg_csr_mask = 0xF;
if (of_property_read_u32(np, "enable-offset",
¶meters.reg_clk_offset))
parameters.reg_clk_offset = 0x8;
if (of_property_read_u32(np, "enable-mask", ¶meters.reg_clk_mask))
parameters.reg_clk_mask = 0xF;
if (of_property_read_u32(np, "divider-offset",
¶meters.reg_divider_offset))
parameters.reg_divider_offset = 0;
if (of_property_read_u32(np, "divider-width",
¶meters.reg_divider_width))
parameters.reg_divider_width = 0;
if (of_property_read_u32(np, "divider-shift",
¶meters.reg_divider_shift))
parameters.reg_divider_shift = 0;
of_property_read_string(np, "clock-output-names", &clk_name);
clk = xgene_register_clk(NULL, clk_name,
of_clk_get_parent_name(np, 0), ¶meters, &clk_lock);
if (IS_ERR(clk))
goto err;
pr_debug("Add %s clock\n", clk_name);
rc = of_clk_add_provider(np, of_clk_src_simple_get, clk);
if (rc != 0)
pr_err("%s: could register provider clk %pOF\n", __func__, np);
return;
err:
if (parameters.csr_reg)
iounmap(parameters.csr_reg);
if (parameters.divider_reg)
iounmap(parameters.divider_reg);
}
CLK_OF_DECLARE(xgene_socpll_clock, "apm,xgene-socpll-clock", xgene_socpllclk_init);
CLK_OF_DECLARE(xgene_pcppll_clock, "apm,xgene-pcppll-clock", xgene_pcppllclk_init);
CLK_OF_DECLARE(xgene_pmd_clock, "apm,xgene-pmd-clock", xgene_pmdclk_init);
CLK_OF_DECLARE(xgene_socpll_v2_clock, "apm,xgene-socpll-v2-clock",
xgene_socpllclk_init);
CLK_OF_DECLARE(xgene_pcppll_v2_clock, "apm,xgene-pcppll-v2-clock",
xgene_pcppllclk_init);
CLK_OF_DECLARE(xgene_dev_clock, "apm,xgene-device-clock", xgene_devclk_init);
| linux-master | drivers/clk/clk-xgene.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2011-2012 Calxeda, Inc.
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/clk-provider.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#define HB_PLL_LOCK_500 0x20000000
#define HB_PLL_LOCK 0x10000000
#define HB_PLL_DIVF_SHIFT 20
#define HB_PLL_DIVF_MASK 0x0ff00000
#define HB_PLL_DIVQ_SHIFT 16
#define HB_PLL_DIVQ_MASK 0x00070000
#define HB_PLL_DIVR_SHIFT 8
#define HB_PLL_DIVR_MASK 0x00001f00
#define HB_PLL_RANGE_SHIFT 4
#define HB_PLL_RANGE_MASK 0x00000070
#define HB_PLL_BYPASS 0x00000008
#define HB_PLL_RESET 0x00000004
#define HB_PLL_EXT_BYPASS 0x00000002
#define HB_PLL_EXT_ENA 0x00000001
#define HB_PLL_VCO_MIN_FREQ 2133000000
#define HB_PLL_MAX_FREQ HB_PLL_VCO_MIN_FREQ
#define HB_PLL_MIN_FREQ (HB_PLL_VCO_MIN_FREQ / 64)
#define HB_A9_BCLK_DIV_MASK 0x00000006
#define HB_A9_BCLK_DIV_SHIFT 1
#define HB_A9_PCLK_DIV 0x00000001
struct hb_clk {
struct clk_hw hw;
void __iomem *reg;
char *parent_name;
};
#define to_hb_clk(p) container_of(p, struct hb_clk, hw)
static int clk_pll_prepare(struct clk_hw *hwclk)
{
struct hb_clk *hbclk = to_hb_clk(hwclk);
u32 reg;
reg = readl(hbclk->reg);
reg &= ~HB_PLL_RESET;
writel(reg, hbclk->reg);
while ((readl(hbclk->reg) & HB_PLL_LOCK) == 0)
;
while ((readl(hbclk->reg) & HB_PLL_LOCK_500) == 0)
;
return 0;
}
static void clk_pll_unprepare(struct clk_hw *hwclk)
{
struct hb_clk *hbclk = to_hb_clk(hwclk);
u32 reg;
reg = readl(hbclk->reg);
reg |= HB_PLL_RESET;
writel(reg, hbclk->reg);
}
static int clk_pll_enable(struct clk_hw *hwclk)
{
struct hb_clk *hbclk = to_hb_clk(hwclk);
u32 reg;
reg = readl(hbclk->reg);
reg |= HB_PLL_EXT_ENA;
writel(reg, hbclk->reg);
return 0;
}
static void clk_pll_disable(struct clk_hw *hwclk)
{
struct hb_clk *hbclk = to_hb_clk(hwclk);
u32 reg;
reg = readl(hbclk->reg);
reg &= ~HB_PLL_EXT_ENA;
writel(reg, hbclk->reg);
}
static unsigned long clk_pll_recalc_rate(struct clk_hw *hwclk,
unsigned long parent_rate)
{
struct hb_clk *hbclk = to_hb_clk(hwclk);
unsigned long divf, divq, vco_freq, reg;
reg = readl(hbclk->reg);
if (reg & HB_PLL_EXT_BYPASS)
return parent_rate;
divf = (reg & HB_PLL_DIVF_MASK) >> HB_PLL_DIVF_SHIFT;
divq = (reg & HB_PLL_DIVQ_MASK) >> HB_PLL_DIVQ_SHIFT;
vco_freq = parent_rate * (divf + 1);
return vco_freq / (1 << divq);
}
static void clk_pll_calc(unsigned long rate, unsigned long ref_freq,
u32 *pdivq, u32 *pdivf)
{
u32 divq, divf;
unsigned long vco_freq;
if (rate < HB_PLL_MIN_FREQ)
rate = HB_PLL_MIN_FREQ;
if (rate > HB_PLL_MAX_FREQ)
rate = HB_PLL_MAX_FREQ;
for (divq = 1; divq <= 6; divq++) {
if ((rate * (1 << divq)) >= HB_PLL_VCO_MIN_FREQ)
break;
}
vco_freq = rate * (1 << divq);
divf = (vco_freq + (ref_freq / 2)) / ref_freq;
divf--;
*pdivq = divq;
*pdivf = divf;
}
static long clk_pll_round_rate(struct clk_hw *hwclk, unsigned long rate,
unsigned long *parent_rate)
{
u32 divq, divf;
unsigned long ref_freq = *parent_rate;
clk_pll_calc(rate, ref_freq, &divq, &divf);
return (ref_freq * (divf + 1)) / (1 << divq);
}
static int clk_pll_set_rate(struct clk_hw *hwclk, unsigned long rate,
unsigned long parent_rate)
{
struct hb_clk *hbclk = to_hb_clk(hwclk);
u32 divq, divf;
u32 reg;
clk_pll_calc(rate, parent_rate, &divq, &divf);
reg = readl(hbclk->reg);
if (divf != ((reg & HB_PLL_DIVF_MASK) >> HB_PLL_DIVF_SHIFT)) {
/* Need to re-lock PLL, so put it into bypass mode */
reg |= HB_PLL_EXT_BYPASS;
writel(reg | HB_PLL_EXT_BYPASS, hbclk->reg);
writel(reg | HB_PLL_RESET, hbclk->reg);
reg &= ~(HB_PLL_DIVF_MASK | HB_PLL_DIVQ_MASK);
reg |= (divf << HB_PLL_DIVF_SHIFT) | (divq << HB_PLL_DIVQ_SHIFT);
writel(reg | HB_PLL_RESET, hbclk->reg);
writel(reg, hbclk->reg);
while ((readl(hbclk->reg) & HB_PLL_LOCK) == 0)
;
while ((readl(hbclk->reg) & HB_PLL_LOCK_500) == 0)
;
reg |= HB_PLL_EXT_ENA;
reg &= ~HB_PLL_EXT_BYPASS;
} else {
writel(reg | HB_PLL_EXT_BYPASS, hbclk->reg);
reg &= ~HB_PLL_DIVQ_MASK;
reg |= divq << HB_PLL_DIVQ_SHIFT;
writel(reg | HB_PLL_EXT_BYPASS, hbclk->reg);
}
writel(reg, hbclk->reg);
return 0;
}
static const struct clk_ops clk_pll_ops = {
.prepare = clk_pll_prepare,
.unprepare = clk_pll_unprepare,
.enable = clk_pll_enable,
.disable = clk_pll_disable,
.recalc_rate = clk_pll_recalc_rate,
.round_rate = clk_pll_round_rate,
.set_rate = clk_pll_set_rate,
};
static unsigned long clk_cpu_periphclk_recalc_rate(struct clk_hw *hwclk,
unsigned long parent_rate)
{
struct hb_clk *hbclk = to_hb_clk(hwclk);
u32 div = (readl(hbclk->reg) & HB_A9_PCLK_DIV) ? 8 : 4;
return parent_rate / div;
}
static const struct clk_ops a9periphclk_ops = {
.recalc_rate = clk_cpu_periphclk_recalc_rate,
};
static unsigned long clk_cpu_a9bclk_recalc_rate(struct clk_hw *hwclk,
unsigned long parent_rate)
{
struct hb_clk *hbclk = to_hb_clk(hwclk);
u32 div = (readl(hbclk->reg) & HB_A9_BCLK_DIV_MASK) >> HB_A9_BCLK_DIV_SHIFT;
return parent_rate / (div + 2);
}
static const struct clk_ops a9bclk_ops = {
.recalc_rate = clk_cpu_a9bclk_recalc_rate,
};
static unsigned long clk_periclk_recalc_rate(struct clk_hw *hwclk,
unsigned long parent_rate)
{
struct hb_clk *hbclk = to_hb_clk(hwclk);
u32 div;
div = readl(hbclk->reg) & 0x1f;
div++;
div *= 2;
return parent_rate / div;
}
static long clk_periclk_round_rate(struct clk_hw *hwclk, unsigned long rate,
unsigned long *parent_rate)
{
u32 div;
div = *parent_rate / rate;
div++;
div &= ~0x1;
return *parent_rate / div;
}
static int clk_periclk_set_rate(struct clk_hw *hwclk, unsigned long rate,
unsigned long parent_rate)
{
struct hb_clk *hbclk = to_hb_clk(hwclk);
u32 div;
div = parent_rate / rate;
if (div & 0x1)
return -EINVAL;
writel(div >> 1, hbclk->reg);
return 0;
}
static const struct clk_ops periclk_ops = {
.recalc_rate = clk_periclk_recalc_rate,
.round_rate = clk_periclk_round_rate,
.set_rate = clk_periclk_set_rate,
};
static void __init hb_clk_init(struct device_node *node, const struct clk_ops *ops, unsigned long clkflags)
{
u32 reg;
struct hb_clk *hb_clk;
const char *clk_name = node->name;
const char *parent_name;
struct clk_init_data init;
struct device_node *srnp;
int rc;
rc = of_property_read_u32(node, "reg", ®);
if (WARN_ON(rc))
return;
hb_clk = kzalloc(sizeof(*hb_clk), GFP_KERNEL);
if (WARN_ON(!hb_clk))
return;
/* Map system registers */
srnp = of_find_compatible_node(NULL, NULL, "calxeda,hb-sregs");
hb_clk->reg = of_iomap(srnp, 0);
of_node_put(srnp);
BUG_ON(!hb_clk->reg);
hb_clk->reg += reg;
of_property_read_string(node, "clock-output-names", &clk_name);
init.name = clk_name;
init.ops = ops;
init.flags = clkflags;
parent_name = of_clk_get_parent_name(node, 0);
init.parent_names = &parent_name;
init.num_parents = 1;
hb_clk->hw.init = &init;
rc = clk_hw_register(NULL, &hb_clk->hw);
if (WARN_ON(rc)) {
kfree(hb_clk);
return;
}
of_clk_add_hw_provider(node, of_clk_hw_simple_get, &hb_clk->hw);
}
static void __init hb_pll_init(struct device_node *node)
{
hb_clk_init(node, &clk_pll_ops, 0);
}
CLK_OF_DECLARE(hb_pll, "calxeda,hb-pll-clock", hb_pll_init);
static void __init hb_a9periph_init(struct device_node *node)
{
hb_clk_init(node, &a9periphclk_ops, 0);
}
CLK_OF_DECLARE(hb_a9periph, "calxeda,hb-a9periph-clock", hb_a9periph_init);
static void __init hb_a9bus_init(struct device_node *node)
{
hb_clk_init(node, &a9bclk_ops, CLK_IS_CRITICAL);
}
CLK_OF_DECLARE(hb_a9bus, "calxeda,hb-a9bus-clock", hb_a9bus_init);
static void __init hb_emmc_init(struct device_node *node)
{
hb_clk_init(node, &periclk_ops, 0);
}
CLK_OF_DECLARE(hb_emmc, "calxeda,hb-emmc-clock", hb_emmc_init);
| linux-master | drivers/clk/clk-highbank.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Clock driver for Loongson-1 SoC
*
* Copyright (C) 2012-2023 Keguang Zhang <[email protected]>
*/
#include <linux/bits.h>
#include <linux/clk-provider.h>
#include <linux/container_of.h>
#include <linux/io.h>
#include <linux/of_address.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/printk.h>
#include <dt-bindings/clock/loongson,ls1x-clk.h>
/* Loongson 1 Clock Register Definitions */
#define CLK_PLL_FREQ 0x0
#define CLK_PLL_DIV 0x4
static DEFINE_SPINLOCK(ls1x_clk_div_lock);
struct ls1x_clk_pll_data {
u32 fixed;
u8 shift;
u8 int_shift;
u8 int_width;
u8 frac_shift;
u8 frac_width;
};
struct ls1x_clk_div_data {
u8 shift;
u8 width;
unsigned long flags;
const struct clk_div_table *table;
u8 bypass_shift;
u8 bypass_inv;
spinlock_t *lock; /* protect access to DIV registers */
};
struct ls1x_clk {
void __iomem *reg;
unsigned int offset;
struct clk_hw hw;
const void *data;
};
#define to_ls1x_clk(_hw) container_of(_hw, struct ls1x_clk, hw)
static inline unsigned long ls1x_pll_rate_part(unsigned int val,
unsigned int shift,
unsigned int width)
{
return (val & GENMASK(shift + width, shift)) >> shift;
}
static unsigned long ls1x_pll_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct ls1x_clk *ls1x_clk = to_ls1x_clk(hw);
const struct ls1x_clk_pll_data *d = ls1x_clk->data;
u32 val, rate;
val = readl(ls1x_clk->reg);
rate = d->fixed;
rate += ls1x_pll_rate_part(val, d->int_shift, d->int_width);
if (d->frac_width)
rate += ls1x_pll_rate_part(val, d->frac_shift, d->frac_width);
rate *= parent_rate;
rate >>= d->shift;
return rate;
}
static const struct clk_ops ls1x_pll_clk_ops = {
.recalc_rate = ls1x_pll_recalc_rate,
};
static unsigned long ls1x_divider_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct ls1x_clk *ls1x_clk = to_ls1x_clk(hw);
const struct ls1x_clk_div_data *d = ls1x_clk->data;
unsigned int val;
val = readl(ls1x_clk->reg) >> d->shift;
val &= clk_div_mask(d->width);
return divider_recalc_rate(hw, parent_rate, val, d->table,
d->flags, d->width);
}
static long ls1x_divider_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *prate)
{
struct ls1x_clk *ls1x_clk = to_ls1x_clk(hw);
const struct ls1x_clk_div_data *d = ls1x_clk->data;
return divider_round_rate(hw, rate, prate, d->table,
d->width, d->flags);
}
static int ls1x_divider_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct ls1x_clk *ls1x_clk = to_ls1x_clk(hw);
const struct ls1x_clk_div_data *d = ls1x_clk->data;
int val, div_val;
unsigned long flags = 0;
div_val = divider_get_val(rate, parent_rate, d->table,
d->width, d->flags);
if (div_val < 0)
return div_val;
spin_lock_irqsave(d->lock, flags);
/* Bypass the clock */
val = readl(ls1x_clk->reg);
if (d->bypass_inv)
val &= ~BIT(d->bypass_shift);
else
val |= BIT(d->bypass_shift);
writel(val, ls1x_clk->reg);
val = readl(ls1x_clk->reg);
val &= ~(clk_div_mask(d->width) << d->shift);
val |= (u32)div_val << d->shift;
writel(val, ls1x_clk->reg);
/* Restore the clock */
val = readl(ls1x_clk->reg);
if (d->bypass_inv)
val |= BIT(d->bypass_shift);
else
val &= ~BIT(d->bypass_shift);
writel(val, ls1x_clk->reg);
spin_unlock_irqrestore(d->lock, flags);
return 0;
}
static const struct clk_ops ls1x_clk_divider_ops = {
.recalc_rate = ls1x_divider_recalc_rate,
.round_rate = ls1x_divider_round_rate,
.set_rate = ls1x_divider_set_rate,
};
#define LS1X_CLK_PLL(_name, _offset, _fixed, _shift, \
f_shift, f_width, i_shift, i_width) \
struct ls1x_clk _name = { \
.offset = (_offset), \
.data = &(const struct ls1x_clk_pll_data) { \
.fixed = (_fixed), \
.shift = (_shift), \
.int_shift = (i_shift), \
.int_width = (i_width), \
.frac_shift = (f_shift), \
.frac_width = (f_width), \
}, \
.hw.init = &(const struct clk_init_data) { \
.name = #_name, \
.ops = &ls1x_pll_clk_ops, \
.parent_data = &(const struct clk_parent_data) { \
.fw_name = "xtal", \
.name = "xtal", \
.index = -1, \
}, \
.num_parents = 1, \
}, \
}
#define LS1X_CLK_DIV(_name, _pname, _offset, _shift, _width, \
_table, _bypass_shift, _bypass_inv, _flags) \
struct ls1x_clk _name = { \
.offset = (_offset), \
.data = &(const struct ls1x_clk_div_data){ \
.shift = (_shift), \
.width = (_width), \
.table = (_table), \
.flags = (_flags), \
.bypass_shift = (_bypass_shift), \
.bypass_inv = (_bypass_inv), \
.lock = &ls1x_clk_div_lock, \
}, \
.hw.init = &(const struct clk_init_data) { \
.name = #_name, \
.ops = &ls1x_clk_divider_ops, \
.parent_hws = (const struct clk_hw *[]) { _pname }, \
.num_parents = 1, \
.flags = CLK_GET_RATE_NOCACHE, \
}, \
}
static LS1X_CLK_PLL(ls1b_clk_pll, CLK_PLL_FREQ, 12, 1, 0, 5, 0, 0);
static LS1X_CLK_DIV(ls1b_clk_cpu, &ls1b_clk_pll.hw, CLK_PLL_DIV,
20, 4, NULL, 8, 0,
CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ROUND_CLOSEST);
static LS1X_CLK_DIV(ls1b_clk_dc, &ls1b_clk_pll.hw, CLK_PLL_DIV,
26, 4, NULL, 12, 0, CLK_DIVIDER_ONE_BASED);
static LS1X_CLK_DIV(ls1b_clk_ahb, &ls1b_clk_pll.hw, CLK_PLL_DIV,
14, 4, NULL, 10, 0, CLK_DIVIDER_ONE_BASED);
static CLK_FIXED_FACTOR(ls1b_clk_apb, "ls1b_clk_apb", "ls1b_clk_ahb", 2, 1,
CLK_SET_RATE_PARENT);
static struct clk_hw_onecell_data ls1b_clk_hw_data = {
.hws = {
[LS1X_CLKID_PLL] = &ls1b_clk_pll.hw,
[LS1X_CLKID_CPU] = &ls1b_clk_cpu.hw,
[LS1X_CLKID_DC] = &ls1b_clk_dc.hw,
[LS1X_CLKID_AHB] = &ls1b_clk_ahb.hw,
[LS1X_CLKID_APB] = &ls1b_clk_apb.hw,
},
.num = CLK_NR_CLKS,
};
static const struct clk_div_table ls1c_ahb_div_table[] = {
[0] = { .val = 0, .div = 2 },
[1] = { .val = 1, .div = 4 },
[2] = { .val = 2, .div = 3 },
[3] = { .val = 3, .div = 3 },
[4] = { /* sentinel */ }
};
static LS1X_CLK_PLL(ls1c_clk_pll, CLK_PLL_FREQ, 0, 2, 8, 8, 16, 8);
static LS1X_CLK_DIV(ls1c_clk_cpu, &ls1c_clk_pll.hw, CLK_PLL_DIV,
8, 7, NULL, 0, 1,
CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ROUND_CLOSEST);
static LS1X_CLK_DIV(ls1c_clk_dc, &ls1c_clk_pll.hw, CLK_PLL_DIV,
24, 7, NULL, 4, 1, CLK_DIVIDER_ONE_BASED);
static LS1X_CLK_DIV(ls1c_clk_ahb, &ls1c_clk_cpu.hw, CLK_PLL_FREQ,
0, 2, ls1c_ahb_div_table, 0, 0, CLK_DIVIDER_ALLOW_ZERO);
static CLK_FIXED_FACTOR(ls1c_clk_apb, "ls1c_clk_apb", "ls1c_clk_ahb", 1, 1,
CLK_SET_RATE_PARENT);
static struct clk_hw_onecell_data ls1c_clk_hw_data = {
.hws = {
[LS1X_CLKID_PLL] = &ls1c_clk_pll.hw,
[LS1X_CLKID_CPU] = &ls1c_clk_cpu.hw,
[LS1X_CLKID_DC] = &ls1c_clk_dc.hw,
[LS1X_CLKID_AHB] = &ls1c_clk_ahb.hw,
[LS1X_CLKID_APB] = &ls1c_clk_apb.hw,
},
.num = CLK_NR_CLKS,
};
static void __init ls1x_clk_init(struct device_node *np,
struct clk_hw_onecell_data *hw_data)
{
struct ls1x_clk *ls1x_clk;
void __iomem *reg;
int i, ret;
reg = of_iomap(np, 0);
if (!reg) {
pr_err("Unable to map base for %pOF\n", np);
return;
}
for (i = 0; i < hw_data->num; i++) {
/* array might be sparse */
if (!hw_data->hws[i])
continue;
if (i != LS1X_CLKID_APB) {
ls1x_clk = to_ls1x_clk(hw_data->hws[i]);
ls1x_clk->reg = reg + ls1x_clk->offset;
}
ret = of_clk_hw_register(np, hw_data->hws[i]);
if (ret)
goto err;
}
ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, hw_data);
if (!ret)
return;
err:
pr_err("Failed to register %pOF\n", np);
while (--i >= 0)
clk_hw_unregister(hw_data->hws[i]);
iounmap(reg);
}
static void __init ls1b_clk_init(struct device_node *np)
{
return ls1x_clk_init(np, &ls1b_clk_hw_data);
}
static void __init ls1c_clk_init(struct device_node *np)
{
return ls1x_clk_init(np, &ls1c_clk_hw_data);
}
CLK_OF_DECLARE(ls1b_clk, "loongson,ls1b-clk", ls1b_clk_init);
CLK_OF_DECLARE(ls1c_clk, "loongson,ls1c-clk", ls1c_clk_init);
| linux-master | drivers/clk/clk-loongson1.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* MOXA ART SoCs clock driver.
*
* Copyright (C) 2013 Jonas Jensen
*
* Jonas Jensen <[email protected]>
*/
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/io.h>
#include <linux/of_address.h>
#include <linux/clkdev.h>
static void __init moxart_of_pll_clk_init(struct device_node *node)
{
void __iomem *base;
struct clk_hw *hw;
struct clk *ref_clk;
unsigned int mul;
const char *name = node->name;
const char *parent_name;
of_property_read_string(node, "clock-output-names", &name);
parent_name = of_clk_get_parent_name(node, 0);
base = of_iomap(node, 0);
if (!base) {
pr_err("%pOF: of_iomap failed\n", node);
return;
}
mul = readl(base + 0x30) >> 3 & 0x3f;
iounmap(base);
ref_clk = of_clk_get(node, 0);
if (IS_ERR(ref_clk)) {
pr_err("%pOF: of_clk_get failed\n", node);
return;
}
hw = clk_hw_register_fixed_factor(NULL, name, parent_name, 0, mul, 1);
if (IS_ERR(hw)) {
pr_err("%pOF: failed to register clock\n", node);
return;
}
clk_hw_register_clkdev(hw, NULL, name);
of_clk_add_hw_provider(node, of_clk_hw_simple_get, hw);
}
CLK_OF_DECLARE(moxart_pll_clock, "moxa,moxart-pll-clock",
moxart_of_pll_clk_init);
static void __init moxart_of_apb_clk_init(struct device_node *node)
{
void __iomem *base;
struct clk_hw *hw;
struct clk *pll_clk;
unsigned int div, val;
unsigned int div_idx[] = { 2, 3, 4, 6, 8};
const char *name = node->name;
const char *parent_name;
of_property_read_string(node, "clock-output-names", &name);
parent_name = of_clk_get_parent_name(node, 0);
base = of_iomap(node, 0);
if (!base) {
pr_err("%pOF: of_iomap failed\n", node);
return;
}
val = readl(base + 0xc) >> 4 & 0x7;
iounmap(base);
if (val > 4)
val = 0;
div = div_idx[val] * 2;
pll_clk = of_clk_get(node, 0);
if (IS_ERR(pll_clk)) {
pr_err("%pOF: of_clk_get failed\n", node);
return;
}
hw = clk_hw_register_fixed_factor(NULL, name, parent_name, 0, 1, div);
if (IS_ERR(hw)) {
pr_err("%pOF: failed to register clock\n", node);
return;
}
clk_hw_register_clkdev(hw, NULL, name);
of_clk_add_hw_provider(node, of_clk_hw_simple_get, hw);
}
CLK_OF_DECLARE(moxart_apb_clock, "moxa,moxart-apb-clock",
moxart_of_apb_clk_init);
| linux-master | drivers/clk/clk-moxart.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Driver for IDT Versaclock 5
*
* Copyright (C) 2017 Marek Vasut <[email protected]>
*/
/*
* Possible optimizations:
* - Use spread spectrum
* - Use integer divider in FOD if applicable
*/
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/delay.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/slab.h>
#include <dt-bindings/clock/versaclock.h>
/* VersaClock5 registers */
#define VC5_OTP_CONTROL 0x00
/* Factory-reserved register block */
#define VC5_RSVD_DEVICE_ID 0x01
#define VC5_RSVD_ADC_GAIN_7_0 0x02
#define VC5_RSVD_ADC_GAIN_15_8 0x03
#define VC5_RSVD_ADC_OFFSET_7_0 0x04
#define VC5_RSVD_ADC_OFFSET_15_8 0x05
#define VC5_RSVD_TEMPY 0x06
#define VC5_RSVD_OFFSET_TBIN 0x07
#define VC5_RSVD_GAIN 0x08
#define VC5_RSVD_TEST_NP 0x09
#define VC5_RSVD_UNUSED 0x0a
#define VC5_RSVD_BANDGAP_TRIM_UP 0x0b
#define VC5_RSVD_BANDGAP_TRIM_DN 0x0c
#define VC5_RSVD_CLK_R_12_CLK_AMP_4 0x0d
#define VC5_RSVD_CLK_R_34_CLK_AMP_4 0x0e
#define VC5_RSVD_CLK_AMP_123 0x0f
/* Configuration register block */
#define VC5_PRIM_SRC_SHDN 0x10
#define VC5_PRIM_SRC_SHDN_EN_XTAL BIT(7)
#define VC5_PRIM_SRC_SHDN_EN_CLKIN BIT(6)
#define VC5_PRIM_SRC_SHDN_EN_DOUBLE_XTAL_FREQ BIT(3)
#define VC5_PRIM_SRC_SHDN_SP BIT(1)
#define VC5_PRIM_SRC_SHDN_EN_GBL_SHDN BIT(0)
#define VC5_VCO_BAND 0x11
#define VC5_XTAL_X1_LOAD_CAP 0x12
#define VC5_XTAL_X2_LOAD_CAP 0x13
#define VC5_REF_DIVIDER 0x15
#define VC5_REF_DIVIDER_SEL_PREDIV2 BIT(7)
#define VC5_REF_DIVIDER_REF_DIV(n) ((n) & 0x3f)
#define VC5_VCO_CTRL_AND_PREDIV 0x16
#define VC5_VCO_CTRL_AND_PREDIV_BYPASS_PREDIV BIT(7)
#define VC5_FEEDBACK_INT_DIV 0x17
#define VC5_FEEDBACK_INT_DIV_BITS 0x18
#define VC5_FEEDBACK_FRAC_DIV(n) (0x19 + (n))
#define VC5_RC_CONTROL0 0x1e
#define VC5_RC_CONTROL1 0x1f
/* These registers are named "Unused Factory Reserved Registers" */
#define VC5_RESERVED_X0(idx) (0x20 + ((idx) * 0x10))
#define VC5_RESERVED_X0_BYPASS_SYNC BIT(7) /* bypass_sync<idx> bit */
/* Output divider control for divider 1,2,3,4 */
#define VC5_OUT_DIV_CONTROL(idx) (0x21 + ((idx) * 0x10))
#define VC5_OUT_DIV_CONTROL_RESET BIT(7)
#define VC5_OUT_DIV_CONTROL_SELB_NORM BIT(3)
#define VC5_OUT_DIV_CONTROL_SEL_EXT BIT(2)
#define VC5_OUT_DIV_CONTROL_INT_MODE BIT(1)
#define VC5_OUT_DIV_CONTROL_EN_FOD BIT(0)
#define VC5_OUT_DIV_FRAC(idx, n) (0x22 + ((idx) * 0x10) + (n))
#define VC5_OUT_DIV_FRAC4_OD_SCEE BIT(1)
#define VC5_OUT_DIV_STEP_SPREAD(idx, n) (0x26 + ((idx) * 0x10) + (n))
#define VC5_OUT_DIV_SPREAD_MOD(idx, n) (0x29 + ((idx) * 0x10) + (n))
#define VC5_OUT_DIV_SKEW_INT(idx, n) (0x2b + ((idx) * 0x10) + (n))
#define VC5_OUT_DIV_INT(idx, n) (0x2d + ((idx) * 0x10) + (n))
#define VC5_OUT_DIV_SKEW_FRAC(idx) (0x2f + ((idx) * 0x10))
/* Clock control register for clock 1,2 */
#define VC5_CLK_OUTPUT_CFG(idx, n) (0x60 + ((idx) * 0x2) + (n))
#define VC5_CLK_OUTPUT_CFG0_CFG_SHIFT 5
#define VC5_CLK_OUTPUT_CFG0_CFG_MASK GENMASK(7, VC5_CLK_OUTPUT_CFG0_CFG_SHIFT)
#define VC5_CLK_OUTPUT_CFG0_CFG_LVPECL (VC5_LVPECL)
#define VC5_CLK_OUTPUT_CFG0_CFG_CMOS (VC5_CMOS)
#define VC5_CLK_OUTPUT_CFG0_CFG_HCSL33 (VC5_HCSL33)
#define VC5_CLK_OUTPUT_CFG0_CFG_LVDS (VC5_LVDS)
#define VC5_CLK_OUTPUT_CFG0_CFG_CMOS2 (VC5_CMOS2)
#define VC5_CLK_OUTPUT_CFG0_CFG_CMOSD (VC5_CMOSD)
#define VC5_CLK_OUTPUT_CFG0_CFG_HCSL25 (VC5_HCSL25)
#define VC5_CLK_OUTPUT_CFG0_PWR_SHIFT 3
#define VC5_CLK_OUTPUT_CFG0_PWR_MASK GENMASK(4, VC5_CLK_OUTPUT_CFG0_PWR_SHIFT)
#define VC5_CLK_OUTPUT_CFG0_PWR_18 (0<<VC5_CLK_OUTPUT_CFG0_PWR_SHIFT)
#define VC5_CLK_OUTPUT_CFG0_PWR_25 (2<<VC5_CLK_OUTPUT_CFG0_PWR_SHIFT)
#define VC5_CLK_OUTPUT_CFG0_PWR_33 (3<<VC5_CLK_OUTPUT_CFG0_PWR_SHIFT)
#define VC5_CLK_OUTPUT_CFG0_SLEW_SHIFT 0
#define VC5_CLK_OUTPUT_CFG0_SLEW_MASK GENMASK(1, VC5_CLK_OUTPUT_CFG0_SLEW_SHIFT)
#define VC5_CLK_OUTPUT_CFG0_SLEW_80 (0<<VC5_CLK_OUTPUT_CFG0_SLEW_SHIFT)
#define VC5_CLK_OUTPUT_CFG0_SLEW_85 (1<<VC5_CLK_OUTPUT_CFG0_SLEW_SHIFT)
#define VC5_CLK_OUTPUT_CFG0_SLEW_90 (2<<VC5_CLK_OUTPUT_CFG0_SLEW_SHIFT)
#define VC5_CLK_OUTPUT_CFG0_SLEW_100 (3<<VC5_CLK_OUTPUT_CFG0_SLEW_SHIFT)
#define VC5_CLK_OUTPUT_CFG1_EN_CLKBUF BIT(0)
#define VC5_CLK_OE_SHDN 0x68
#define VC5_CLK_OS_SHDN 0x69
#define VC5_GLOBAL_REGISTER 0x76
#define VC5_GLOBAL_REGISTER_GLOBAL_RESET BIT(5)
/* The minimum VCO frequency is 2.5 GHz. The maximum is variant specific. */
#define VC5_PLL_VCO_MIN 2500000000UL
/* VC5 Input mux settings */
#define VC5_MUX_IN_XIN BIT(0)
#define VC5_MUX_IN_CLKIN BIT(1)
/* Maximum number of clk_out supported by this driver */
#define VC5_MAX_CLK_OUT_NUM 5
/* Maximum number of FODs supported by this driver */
#define VC5_MAX_FOD_NUM 4
/* flags to describe chip features */
/* chip has built-in oscilator */
#define VC5_HAS_INTERNAL_XTAL BIT(0)
/* chip has PFD requency doubler */
#define VC5_HAS_PFD_FREQ_DBL BIT(1)
/* chip has bits to disable FOD sync */
#define VC5_HAS_BYPASS_SYNC_BIT BIT(2)
/* Supported IDT VC5 models. */
enum vc5_model {
IDT_VC5_5P49V5923,
IDT_VC5_5P49V5925,
IDT_VC5_5P49V5933,
IDT_VC5_5P49V5935,
IDT_VC6_5P49V60,
IDT_VC6_5P49V6901,
IDT_VC6_5P49V6965,
IDT_VC6_5P49V6975,
};
/* Structure to describe features of a particular VC5 model */
struct vc5_chip_info {
const enum vc5_model model;
const unsigned int clk_fod_cnt;
const unsigned int clk_out_cnt;
const u32 flags;
const unsigned long vco_max;
};
struct vc5_driver_data;
struct vc5_hw_data {
struct clk_hw hw;
struct vc5_driver_data *vc5;
u32 div_int;
u32 div_frc;
unsigned int num;
};
struct vc5_out_data {
struct clk_hw hw;
struct vc5_driver_data *vc5;
unsigned int num;
unsigned int clk_output_cfg0;
unsigned int clk_output_cfg0_mask;
};
struct vc5_driver_data {
struct i2c_client *client;
struct regmap *regmap;
const struct vc5_chip_info *chip_info;
struct clk *pin_xin;
struct clk *pin_clkin;
unsigned char clk_mux_ins;
struct clk_hw clk_mux;
struct clk_hw clk_mul;
struct clk_hw clk_pfd;
struct vc5_hw_data clk_pll;
struct vc5_hw_data clk_fod[VC5_MAX_FOD_NUM];
struct vc5_out_data clk_out[VC5_MAX_CLK_OUT_NUM];
};
/*
* VersaClock5 i2c regmap
*/
static bool vc5_regmap_is_writeable(struct device *dev, unsigned int reg)
{
/* Factory reserved regs, make them read-only */
if (reg <= 0xf)
return false;
/* Factory reserved regs, make them read-only */
if (reg == 0x14 || reg == 0x1c || reg == 0x1d)
return false;
return true;
}
static const struct regmap_config vc5_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.cache_type = REGCACHE_RBTREE,
.max_register = 0x76,
.writeable_reg = vc5_regmap_is_writeable,
};
/*
* VersaClock5 input multiplexer between XTAL and CLKIN divider
*/
static unsigned char vc5_mux_get_parent(struct clk_hw *hw)
{
struct vc5_driver_data *vc5 =
container_of(hw, struct vc5_driver_data, clk_mux);
const u8 mask = VC5_PRIM_SRC_SHDN_EN_XTAL | VC5_PRIM_SRC_SHDN_EN_CLKIN;
unsigned int src;
int ret;
ret = regmap_read(vc5->regmap, VC5_PRIM_SRC_SHDN, &src);
if (ret)
return 0;
src &= mask;
if (src == VC5_PRIM_SRC_SHDN_EN_XTAL)
return 0;
if (src == VC5_PRIM_SRC_SHDN_EN_CLKIN)
return 1;
dev_warn(&vc5->client->dev,
"Invalid clock input configuration (%02x)\n", src);
return 0;
}
static int vc5_mux_set_parent(struct clk_hw *hw, u8 index)
{
struct vc5_driver_data *vc5 =
container_of(hw, struct vc5_driver_data, clk_mux);
const u8 mask = VC5_PRIM_SRC_SHDN_EN_XTAL | VC5_PRIM_SRC_SHDN_EN_CLKIN;
u8 src;
if ((index > 1) || !vc5->clk_mux_ins)
return -EINVAL;
if (vc5->clk_mux_ins == (VC5_MUX_IN_CLKIN | VC5_MUX_IN_XIN)) {
if (index == 0)
src = VC5_PRIM_SRC_SHDN_EN_XTAL;
if (index == 1)
src = VC5_PRIM_SRC_SHDN_EN_CLKIN;
} else {
if (index != 0)
return -EINVAL;
if (vc5->clk_mux_ins == VC5_MUX_IN_XIN)
src = VC5_PRIM_SRC_SHDN_EN_XTAL;
else if (vc5->clk_mux_ins == VC5_MUX_IN_CLKIN)
src = VC5_PRIM_SRC_SHDN_EN_CLKIN;
else /* Invalid; should have been caught by vc5_probe() */
return -EINVAL;
}
return regmap_update_bits(vc5->regmap, VC5_PRIM_SRC_SHDN, mask, src);
}
static const struct clk_ops vc5_mux_ops = {
.determine_rate = clk_hw_determine_rate_no_reparent,
.set_parent = vc5_mux_set_parent,
.get_parent = vc5_mux_get_parent,
};
static unsigned long vc5_dbl_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct vc5_driver_data *vc5 =
container_of(hw, struct vc5_driver_data, clk_mul);
unsigned int premul;
int ret;
ret = regmap_read(vc5->regmap, VC5_PRIM_SRC_SHDN, &premul);
if (ret)
return 0;
if (premul & VC5_PRIM_SRC_SHDN_EN_DOUBLE_XTAL_FREQ)
parent_rate *= 2;
return parent_rate;
}
static long vc5_dbl_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *parent_rate)
{
if ((*parent_rate == rate) || ((*parent_rate * 2) == rate))
return rate;
else
return -EINVAL;
}
static int vc5_dbl_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct vc5_driver_data *vc5 =
container_of(hw, struct vc5_driver_data, clk_mul);
u32 mask;
if ((parent_rate * 2) == rate)
mask = VC5_PRIM_SRC_SHDN_EN_DOUBLE_XTAL_FREQ;
else
mask = 0;
return regmap_update_bits(vc5->regmap, VC5_PRIM_SRC_SHDN,
VC5_PRIM_SRC_SHDN_EN_DOUBLE_XTAL_FREQ,
mask);
}
static const struct clk_ops vc5_dbl_ops = {
.recalc_rate = vc5_dbl_recalc_rate,
.round_rate = vc5_dbl_round_rate,
.set_rate = vc5_dbl_set_rate,
};
static unsigned long vc5_pfd_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct vc5_driver_data *vc5 =
container_of(hw, struct vc5_driver_data, clk_pfd);
unsigned int prediv, div;
int ret;
ret = regmap_read(vc5->regmap, VC5_VCO_CTRL_AND_PREDIV, &prediv);
if (ret)
return 0;
/* The bypass_prediv is set, PLL fed from Ref_in directly. */
if (prediv & VC5_VCO_CTRL_AND_PREDIV_BYPASS_PREDIV)
return parent_rate;
ret = regmap_read(vc5->regmap, VC5_REF_DIVIDER, &div);
if (ret)
return 0;
/* The Sel_prediv2 is set, PLL fed from prediv2 (Ref_in / 2) */
if (div & VC5_REF_DIVIDER_SEL_PREDIV2)
return parent_rate / 2;
else
return parent_rate / VC5_REF_DIVIDER_REF_DIV(div);
}
static long vc5_pfd_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *parent_rate)
{
unsigned long idiv;
/* PLL cannot operate with input clock above 50 MHz. */
if (rate > 50000000)
return -EINVAL;
/* CLKIN within range of PLL input, feed directly to PLL. */
if (*parent_rate <= 50000000)
return *parent_rate;
idiv = DIV_ROUND_UP(*parent_rate, rate);
if (idiv > 127)
return -EINVAL;
return *parent_rate / idiv;
}
static int vc5_pfd_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct vc5_driver_data *vc5 =
container_of(hw, struct vc5_driver_data, clk_pfd);
unsigned long idiv;
int ret;
u8 div;
/* CLKIN within range of PLL input, feed directly to PLL. */
if (parent_rate <= 50000000) {
ret = regmap_set_bits(vc5->regmap, VC5_VCO_CTRL_AND_PREDIV,
VC5_VCO_CTRL_AND_PREDIV_BYPASS_PREDIV);
if (ret)
return ret;
return regmap_update_bits(vc5->regmap, VC5_REF_DIVIDER, 0xff, 0x00);
}
idiv = DIV_ROUND_UP(parent_rate, rate);
/* We have dedicated div-2 predivider. */
if (idiv == 2)
div = VC5_REF_DIVIDER_SEL_PREDIV2;
else
div = VC5_REF_DIVIDER_REF_DIV(idiv);
ret = regmap_update_bits(vc5->regmap, VC5_REF_DIVIDER, 0xff, div);
if (ret)
return ret;
return regmap_clear_bits(vc5->regmap, VC5_VCO_CTRL_AND_PREDIV,
VC5_VCO_CTRL_AND_PREDIV_BYPASS_PREDIV);
}
static const struct clk_ops vc5_pfd_ops = {
.recalc_rate = vc5_pfd_recalc_rate,
.round_rate = vc5_pfd_round_rate,
.set_rate = vc5_pfd_set_rate,
};
/*
* VersaClock5 PLL/VCO
*/
static unsigned long vc5_pll_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct vc5_hw_data *hwdata = container_of(hw, struct vc5_hw_data, hw);
struct vc5_driver_data *vc5 = hwdata->vc5;
u32 div_int, div_frc;
u8 fb[5];
regmap_bulk_read(vc5->regmap, VC5_FEEDBACK_INT_DIV, fb, 5);
div_int = (fb[0] << 4) | (fb[1] >> 4);
div_frc = (fb[2] << 16) | (fb[3] << 8) | fb[4];
/* The PLL divider has 12 integer bits and 24 fractional bits */
return (parent_rate * div_int) + ((parent_rate * div_frc) >> 24);
}
static long vc5_pll_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *parent_rate)
{
struct vc5_hw_data *hwdata = container_of(hw, struct vc5_hw_data, hw);
struct vc5_driver_data *vc5 = hwdata->vc5;
u32 div_int;
u64 div_frc;
rate = clamp(rate, VC5_PLL_VCO_MIN, vc5->chip_info->vco_max);
/* Determine integer part, which is 12 bit wide */
div_int = rate / *parent_rate;
if (div_int > 0xfff)
rate = *parent_rate * 0xfff;
/* Determine best fractional part, which is 24 bit wide */
div_frc = rate % *parent_rate;
div_frc *= BIT(24) - 1;
do_div(div_frc, *parent_rate);
hwdata->div_int = div_int;
hwdata->div_frc = (u32)div_frc;
return (*parent_rate * div_int) + ((*parent_rate * div_frc) >> 24);
}
static int vc5_pll_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct vc5_hw_data *hwdata = container_of(hw, struct vc5_hw_data, hw);
struct vc5_driver_data *vc5 = hwdata->vc5;
u8 fb[5];
fb[0] = hwdata->div_int >> 4;
fb[1] = hwdata->div_int << 4;
fb[2] = hwdata->div_frc >> 16;
fb[3] = hwdata->div_frc >> 8;
fb[4] = hwdata->div_frc;
return regmap_bulk_write(vc5->regmap, VC5_FEEDBACK_INT_DIV, fb, 5);
}
static const struct clk_ops vc5_pll_ops = {
.recalc_rate = vc5_pll_recalc_rate,
.round_rate = vc5_pll_round_rate,
.set_rate = vc5_pll_set_rate,
};
static unsigned long vc5_fod_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct vc5_hw_data *hwdata = container_of(hw, struct vc5_hw_data, hw);
struct vc5_driver_data *vc5 = hwdata->vc5;
/* VCO frequency is divided by two before entering FOD */
u32 f_in = parent_rate / 2;
u32 div_int, div_frc;
u8 od_int[2];
u8 od_frc[4];
regmap_bulk_read(vc5->regmap, VC5_OUT_DIV_INT(hwdata->num, 0),
od_int, 2);
regmap_bulk_read(vc5->regmap, VC5_OUT_DIV_FRAC(hwdata->num, 0),
od_frc, 4);
div_int = (od_int[0] << 4) | (od_int[1] >> 4);
div_frc = (od_frc[0] << 22) | (od_frc[1] << 14) |
(od_frc[2] << 6) | (od_frc[3] >> 2);
/* Avoid division by zero if the output is not configured. */
if (div_int == 0 && div_frc == 0)
return 0;
/* The PLL divider has 12 integer bits and 30 fractional bits */
return div64_u64((u64)f_in << 24ULL, ((u64)div_int << 24ULL) + div_frc);
}
static long vc5_fod_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *parent_rate)
{
struct vc5_hw_data *hwdata = container_of(hw, struct vc5_hw_data, hw);
/* VCO frequency is divided by two before entering FOD */
u32 f_in = *parent_rate / 2;
u32 div_int;
u64 div_frc;
/* Determine integer part, which is 12 bit wide */
div_int = f_in / rate;
/*
* WARNING: The clock chip does not output signal if the integer part
* of the divider is 0xfff and fractional part is non-zero.
* Clamp the divider at 0xffe to keep the code simple.
*/
if (div_int > 0xffe) {
div_int = 0xffe;
rate = f_in / div_int;
}
/* Determine best fractional part, which is 30 bit wide */
div_frc = f_in % rate;
div_frc <<= 24;
do_div(div_frc, rate);
hwdata->div_int = div_int;
hwdata->div_frc = (u32)div_frc;
return div64_u64((u64)f_in << 24ULL, ((u64)div_int << 24ULL) + div_frc);
}
static int vc5_fod_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct vc5_hw_data *hwdata = container_of(hw, struct vc5_hw_data, hw);
struct vc5_driver_data *vc5 = hwdata->vc5;
u8 data[14] = {
hwdata->div_frc >> 22, hwdata->div_frc >> 14,
hwdata->div_frc >> 6, hwdata->div_frc << 2,
0, 0, 0, 0, 0,
0, 0,
hwdata->div_int >> 4, hwdata->div_int << 4,
0
};
int ret;
ret = regmap_bulk_write(vc5->regmap, VC5_OUT_DIV_FRAC(hwdata->num, 0),
data, 14);
if (ret)
return ret;
/*
* Toggle magic bit in undocumented register for unknown reason.
* This is what the IDT timing commander tool does and the chip
* datasheet somewhat implies this is needed, but the register
* and the bit is not documented.
*/
ret = regmap_clear_bits(vc5->regmap, VC5_GLOBAL_REGISTER,
VC5_GLOBAL_REGISTER_GLOBAL_RESET);
if (ret)
return ret;
return regmap_set_bits(vc5->regmap, VC5_GLOBAL_REGISTER,
VC5_GLOBAL_REGISTER_GLOBAL_RESET);
}
static const struct clk_ops vc5_fod_ops = {
.recalc_rate = vc5_fod_recalc_rate,
.round_rate = vc5_fod_round_rate,
.set_rate = vc5_fod_set_rate,
};
static int vc5_clk_out_prepare(struct clk_hw *hw)
{
struct vc5_out_data *hwdata = container_of(hw, struct vc5_out_data, hw);
struct vc5_driver_data *vc5 = hwdata->vc5;
const u8 mask = VC5_OUT_DIV_CONTROL_SELB_NORM |
VC5_OUT_DIV_CONTROL_SEL_EXT |
VC5_OUT_DIV_CONTROL_EN_FOD;
unsigned int src;
int ret;
/*
* When enabling a FOD, all currently enabled FODs are briefly
* stopped in order to synchronize all of them. This causes a clock
* disruption to any unrelated chips that might be already using
* other clock outputs. Bypass the sync feature to avoid the issue,
* which is possible on the VersaClock 6E family via reserved
* registers.
*/
if (vc5->chip_info->flags & VC5_HAS_BYPASS_SYNC_BIT) {
ret = regmap_set_bits(vc5->regmap,
VC5_RESERVED_X0(hwdata->num),
VC5_RESERVED_X0_BYPASS_SYNC);
if (ret)
return ret;
}
/*
* If the input mux is disabled, enable it first and
* select source from matching FOD.
*/
ret = regmap_read(vc5->regmap, VC5_OUT_DIV_CONTROL(hwdata->num), &src);
if (ret)
return ret;
if ((src & mask) == 0) {
src = VC5_OUT_DIV_CONTROL_RESET | VC5_OUT_DIV_CONTROL_EN_FOD;
ret = regmap_update_bits(vc5->regmap,
VC5_OUT_DIV_CONTROL(hwdata->num),
mask | VC5_OUT_DIV_CONTROL_RESET, src);
if (ret)
return ret;
}
/* Enable the clock buffer */
ret = regmap_set_bits(vc5->regmap, VC5_CLK_OUTPUT_CFG(hwdata->num, 1),
VC5_CLK_OUTPUT_CFG1_EN_CLKBUF);
if (ret)
return ret;
if (hwdata->clk_output_cfg0_mask) {
dev_dbg(&vc5->client->dev, "Update output %d mask 0x%0X val 0x%0X\n",
hwdata->num, hwdata->clk_output_cfg0_mask,
hwdata->clk_output_cfg0);
ret = regmap_update_bits(vc5->regmap,
VC5_CLK_OUTPUT_CFG(hwdata->num, 0),
hwdata->clk_output_cfg0_mask,
hwdata->clk_output_cfg0);
if (ret)
return ret;
}
return 0;
}
static void vc5_clk_out_unprepare(struct clk_hw *hw)
{
struct vc5_out_data *hwdata = container_of(hw, struct vc5_out_data, hw);
struct vc5_driver_data *vc5 = hwdata->vc5;
/* Disable the clock buffer */
regmap_clear_bits(vc5->regmap, VC5_CLK_OUTPUT_CFG(hwdata->num, 1),
VC5_CLK_OUTPUT_CFG1_EN_CLKBUF);
}
static unsigned char vc5_clk_out_get_parent(struct clk_hw *hw)
{
struct vc5_out_data *hwdata = container_of(hw, struct vc5_out_data, hw);
struct vc5_driver_data *vc5 = hwdata->vc5;
const u8 mask = VC5_OUT_DIV_CONTROL_SELB_NORM |
VC5_OUT_DIV_CONTROL_SEL_EXT |
VC5_OUT_DIV_CONTROL_EN_FOD;
const u8 fodclkmask = VC5_OUT_DIV_CONTROL_SELB_NORM |
VC5_OUT_DIV_CONTROL_EN_FOD;
const u8 extclk = VC5_OUT_DIV_CONTROL_SELB_NORM |
VC5_OUT_DIV_CONTROL_SEL_EXT;
unsigned int src;
int ret;
ret = regmap_read(vc5->regmap, VC5_OUT_DIV_CONTROL(hwdata->num), &src);
if (ret)
return 0;
src &= mask;
if (src == 0) /* Input mux set to DISABLED */
return 0;
if ((src & fodclkmask) == VC5_OUT_DIV_CONTROL_EN_FOD)
return 0;
if (src == extclk)
return 1;
dev_warn(&vc5->client->dev,
"Invalid clock output configuration (%02x)\n", src);
return 0;
}
static int vc5_clk_out_set_parent(struct clk_hw *hw, u8 index)
{
struct vc5_out_data *hwdata = container_of(hw, struct vc5_out_data, hw);
struct vc5_driver_data *vc5 = hwdata->vc5;
const u8 mask = VC5_OUT_DIV_CONTROL_RESET |
VC5_OUT_DIV_CONTROL_SELB_NORM |
VC5_OUT_DIV_CONTROL_SEL_EXT |
VC5_OUT_DIV_CONTROL_EN_FOD;
const u8 extclk = VC5_OUT_DIV_CONTROL_SELB_NORM |
VC5_OUT_DIV_CONTROL_SEL_EXT;
u8 src = VC5_OUT_DIV_CONTROL_RESET;
if (index == 0)
src |= VC5_OUT_DIV_CONTROL_EN_FOD;
else
src |= extclk;
return regmap_update_bits(vc5->regmap, VC5_OUT_DIV_CONTROL(hwdata->num),
mask, src);
}
static const struct clk_ops vc5_clk_out_ops = {
.prepare = vc5_clk_out_prepare,
.unprepare = vc5_clk_out_unprepare,
.determine_rate = clk_hw_determine_rate_no_reparent,
.set_parent = vc5_clk_out_set_parent,
.get_parent = vc5_clk_out_get_parent,
};
static struct clk_hw *vc5_of_clk_get(struct of_phandle_args *clkspec,
void *data)
{
struct vc5_driver_data *vc5 = data;
unsigned int idx = clkspec->args[0];
if (idx >= vc5->chip_info->clk_out_cnt)
return ERR_PTR(-EINVAL);
return &vc5->clk_out[idx].hw;
}
static int vc5_map_index_to_output(const enum vc5_model model,
const unsigned int n)
{
switch (model) {
case IDT_VC5_5P49V5933:
return (n == 0) ? 0 : 3;
case IDT_VC5_5P49V5923:
case IDT_VC5_5P49V5925:
case IDT_VC5_5P49V5935:
case IDT_VC6_5P49V6901:
case IDT_VC6_5P49V6965:
case IDT_VC6_5P49V6975:
default:
return n;
}
}
static int vc5_update_mode(struct device_node *np_output,
struct vc5_out_data *clk_out)
{
u32 value;
if (!of_property_read_u32(np_output, "idt,mode", &value)) {
clk_out->clk_output_cfg0_mask |= VC5_CLK_OUTPUT_CFG0_CFG_MASK;
switch (value) {
case VC5_CLK_OUTPUT_CFG0_CFG_LVPECL:
case VC5_CLK_OUTPUT_CFG0_CFG_CMOS:
case VC5_CLK_OUTPUT_CFG0_CFG_HCSL33:
case VC5_CLK_OUTPUT_CFG0_CFG_LVDS:
case VC5_CLK_OUTPUT_CFG0_CFG_CMOS2:
case VC5_CLK_OUTPUT_CFG0_CFG_CMOSD:
case VC5_CLK_OUTPUT_CFG0_CFG_HCSL25:
clk_out->clk_output_cfg0 |=
value << VC5_CLK_OUTPUT_CFG0_CFG_SHIFT;
break;
default:
return -EINVAL;
}
}
return 0;
}
static int vc5_update_power(struct device_node *np_output,
struct vc5_out_data *clk_out)
{
u32 value;
if (!of_property_read_u32(np_output, "idt,voltage-microvolt",
&value)) {
clk_out->clk_output_cfg0_mask |= VC5_CLK_OUTPUT_CFG0_PWR_MASK;
switch (value) {
case 1800000:
clk_out->clk_output_cfg0 |= VC5_CLK_OUTPUT_CFG0_PWR_18;
break;
case 2500000:
clk_out->clk_output_cfg0 |= VC5_CLK_OUTPUT_CFG0_PWR_25;
break;
case 3300000:
clk_out->clk_output_cfg0 |= VC5_CLK_OUTPUT_CFG0_PWR_33;
break;
default:
return -EINVAL;
}
}
return 0;
}
static int vc5_map_cap_value(u32 femtofarads)
{
int mapped_value;
/*
* The datasheet explicitly states 9000 - 25000 with 0.5pF
* steps, but the Programmer's guide shows the steps are 0.430pF.
* After getting feedback from Renesas, the .5pF steps were the
* goal, but 430nF was the actual values.
* Because of this, the actual range goes to 22760 instead of 25000
*/
if (femtofarads < 9000 || femtofarads > 22760)
return -EINVAL;
/*
* The Programmer's guide shows XTAL[5:0] but in reality,
* XTAL[0] and XTAL[1] are both LSB which makes the math
* strange. With clarfication from Renesas, setting the
* values should be simpler by ignoring XTAL[0]
*/
mapped_value = DIV_ROUND_CLOSEST(femtofarads - 9000, 430);
/*
* Since the calculation ignores XTAL[0], there is one
* special case where mapped_value = 32. In reality, this means
* the real mapped value should be 111111b. In other cases,
* the mapped_value needs to be shifted 1 to the left.
*/
if (mapped_value > 31)
mapped_value = 0x3f;
else
mapped_value <<= 1;
return mapped_value;
}
static int vc5_update_cap_load(struct device_node *node, struct vc5_driver_data *vc5)
{
u32 value;
int mapped_value;
int ret;
if (of_property_read_u32(node, "idt,xtal-load-femtofarads", &value))
return 0;
mapped_value = vc5_map_cap_value(value);
if (mapped_value < 0)
return mapped_value;
/*
* The mapped_value is really the high 6 bits of
* VC5_XTAL_X1_LOAD_CAP and VC5_XTAL_X2_LOAD_CAP, so
* shift the value 2 places.
*/
ret = regmap_update_bits(vc5->regmap, VC5_XTAL_X1_LOAD_CAP, ~0x03,
mapped_value << 2);
if (ret)
return ret;
return regmap_update_bits(vc5->regmap, VC5_XTAL_X2_LOAD_CAP, ~0x03,
mapped_value << 2);
}
static int vc5_update_slew(struct device_node *np_output,
struct vc5_out_data *clk_out)
{
u32 value;
if (!of_property_read_u32(np_output, "idt,slew-percent", &value)) {
clk_out->clk_output_cfg0_mask |= VC5_CLK_OUTPUT_CFG0_SLEW_MASK;
switch (value) {
case 80:
clk_out->clk_output_cfg0 |= VC5_CLK_OUTPUT_CFG0_SLEW_80;
break;
case 85:
clk_out->clk_output_cfg0 |= VC5_CLK_OUTPUT_CFG0_SLEW_85;
break;
case 90:
clk_out->clk_output_cfg0 |= VC5_CLK_OUTPUT_CFG0_SLEW_90;
break;
case 100:
clk_out->clk_output_cfg0 |=
VC5_CLK_OUTPUT_CFG0_SLEW_100;
break;
default:
return -EINVAL;
}
}
return 0;
}
static int vc5_get_output_config(struct i2c_client *client,
struct vc5_out_data *clk_out)
{
struct device_node *np_output;
char *child_name;
int ret = 0;
child_name = kasprintf(GFP_KERNEL, "OUT%d", clk_out->num + 1);
if (!child_name)
return -ENOMEM;
np_output = of_get_child_by_name(client->dev.of_node, child_name);
kfree(child_name);
if (!np_output)
return 0;
ret = vc5_update_mode(np_output, clk_out);
if (ret)
goto output_error;
ret = vc5_update_power(np_output, clk_out);
if (ret)
goto output_error;
ret = vc5_update_slew(np_output, clk_out);
output_error:
if (ret) {
dev_err(&client->dev,
"Invalid clock output configuration OUT%d\n",
clk_out->num + 1);
}
of_node_put(np_output);
return ret;
}
static const struct of_device_id clk_vc5_of_match[];
static int vc5_probe(struct i2c_client *client)
{
unsigned int oe, sd, src_mask = 0, src_val = 0;
struct vc5_driver_data *vc5;
struct clk_init_data init;
const char *parent_names[2];
unsigned int n, idx = 0;
int ret;
vc5 = devm_kzalloc(&client->dev, sizeof(*vc5), GFP_KERNEL);
if (!vc5)
return -ENOMEM;
i2c_set_clientdata(client, vc5);
vc5->client = client;
vc5->chip_info = i2c_get_match_data(client);
vc5->pin_xin = devm_clk_get(&client->dev, "xin");
if (PTR_ERR(vc5->pin_xin) == -EPROBE_DEFER)
return -EPROBE_DEFER;
vc5->pin_clkin = devm_clk_get(&client->dev, "clkin");
if (PTR_ERR(vc5->pin_clkin) == -EPROBE_DEFER)
return -EPROBE_DEFER;
vc5->regmap = devm_regmap_init_i2c(client, &vc5_regmap_config);
if (IS_ERR(vc5->regmap))
return dev_err_probe(&client->dev, PTR_ERR(vc5->regmap),
"failed to allocate register map\n");
ret = of_property_read_u32(client->dev.of_node, "idt,shutdown", &sd);
if (!ret) {
src_mask |= VC5_PRIM_SRC_SHDN_EN_GBL_SHDN;
if (sd)
src_val |= VC5_PRIM_SRC_SHDN_EN_GBL_SHDN;
} else if (ret != -EINVAL) {
return dev_err_probe(&client->dev, ret,
"could not read idt,shutdown\n");
}
ret = of_property_read_u32(client->dev.of_node,
"idt,output-enable-active", &oe);
if (!ret) {
src_mask |= VC5_PRIM_SRC_SHDN_SP;
if (oe)
src_val |= VC5_PRIM_SRC_SHDN_SP;
} else if (ret != -EINVAL) {
return dev_err_probe(&client->dev, ret,
"could not read idt,output-enable-active\n");
}
ret = regmap_update_bits(vc5->regmap, VC5_PRIM_SRC_SHDN, src_mask,
src_val);
if (ret)
return ret;
/* Register clock input mux */
memset(&init, 0, sizeof(init));
if (!IS_ERR(vc5->pin_xin)) {
vc5->clk_mux_ins |= VC5_MUX_IN_XIN;
parent_names[init.num_parents++] = __clk_get_name(vc5->pin_xin);
} else if (vc5->chip_info->flags & VC5_HAS_INTERNAL_XTAL) {
vc5->pin_xin = clk_register_fixed_rate(&client->dev,
"internal-xtal", NULL,
0, 25000000);
if (IS_ERR(vc5->pin_xin))
return PTR_ERR(vc5->pin_xin);
vc5->clk_mux_ins |= VC5_MUX_IN_XIN;
parent_names[init.num_parents++] = __clk_get_name(vc5->pin_xin);
}
if (!IS_ERR(vc5->pin_clkin)) {
vc5->clk_mux_ins |= VC5_MUX_IN_CLKIN;
parent_names[init.num_parents++] =
__clk_get_name(vc5->pin_clkin);
}
if (!init.num_parents)
return dev_err_probe(&client->dev, -EINVAL,
"no input clock specified!\n");
/* Configure Optional Loading Capacitance for external XTAL */
if (!(vc5->chip_info->flags & VC5_HAS_INTERNAL_XTAL)) {
ret = vc5_update_cap_load(client->dev.of_node, vc5);
if (ret)
goto err_clk_register;
}
init.name = kasprintf(GFP_KERNEL, "%pOFn.mux", client->dev.of_node);
if (!init.name) {
ret = -ENOMEM;
goto err_clk;
}
init.ops = &vc5_mux_ops;
init.flags = 0;
init.parent_names = parent_names;
vc5->clk_mux.init = &init;
ret = devm_clk_hw_register(&client->dev, &vc5->clk_mux);
if (ret)
goto err_clk_register;
kfree(init.name); /* clock framework made a copy of the name */
if (vc5->chip_info->flags & VC5_HAS_PFD_FREQ_DBL) {
/* Register frequency doubler */
memset(&init, 0, sizeof(init));
init.name = kasprintf(GFP_KERNEL, "%pOFn.dbl",
client->dev.of_node);
if (!init.name) {
ret = -ENOMEM;
goto err_clk;
}
init.ops = &vc5_dbl_ops;
init.flags = CLK_SET_RATE_PARENT;
init.parent_names = parent_names;
parent_names[0] = clk_hw_get_name(&vc5->clk_mux);
init.num_parents = 1;
vc5->clk_mul.init = &init;
ret = devm_clk_hw_register(&client->dev, &vc5->clk_mul);
if (ret)
goto err_clk_register;
kfree(init.name); /* clock framework made a copy of the name */
}
/* Register PFD */
memset(&init, 0, sizeof(init));
init.name = kasprintf(GFP_KERNEL, "%pOFn.pfd", client->dev.of_node);
if (!init.name) {
ret = -ENOMEM;
goto err_clk;
}
init.ops = &vc5_pfd_ops;
init.flags = CLK_SET_RATE_PARENT;
init.parent_names = parent_names;
if (vc5->chip_info->flags & VC5_HAS_PFD_FREQ_DBL)
parent_names[0] = clk_hw_get_name(&vc5->clk_mul);
else
parent_names[0] = clk_hw_get_name(&vc5->clk_mux);
init.num_parents = 1;
vc5->clk_pfd.init = &init;
ret = devm_clk_hw_register(&client->dev, &vc5->clk_pfd);
if (ret)
goto err_clk_register;
kfree(init.name); /* clock framework made a copy of the name */
/* Register PLL */
memset(&init, 0, sizeof(init));
init.name = kasprintf(GFP_KERNEL, "%pOFn.pll", client->dev.of_node);
if (!init.name) {
ret = -ENOMEM;
goto err_clk;
}
init.ops = &vc5_pll_ops;
init.flags = CLK_SET_RATE_PARENT;
init.parent_names = parent_names;
parent_names[0] = clk_hw_get_name(&vc5->clk_pfd);
init.num_parents = 1;
vc5->clk_pll.num = 0;
vc5->clk_pll.vc5 = vc5;
vc5->clk_pll.hw.init = &init;
ret = devm_clk_hw_register(&client->dev, &vc5->clk_pll.hw);
if (ret)
goto err_clk_register;
kfree(init.name); /* clock framework made a copy of the name */
/* Register FODs */
for (n = 0; n < vc5->chip_info->clk_fod_cnt; n++) {
idx = vc5_map_index_to_output(vc5->chip_info->model, n);
memset(&init, 0, sizeof(init));
init.name = kasprintf(GFP_KERNEL, "%pOFn.fod%d",
client->dev.of_node, idx);
if (!init.name) {
ret = -ENOMEM;
goto err_clk;
}
init.ops = &vc5_fod_ops;
init.flags = CLK_SET_RATE_PARENT;
init.parent_names = parent_names;
parent_names[0] = clk_hw_get_name(&vc5->clk_pll.hw);
init.num_parents = 1;
vc5->clk_fod[n].num = idx;
vc5->clk_fod[n].vc5 = vc5;
vc5->clk_fod[n].hw.init = &init;
ret = devm_clk_hw_register(&client->dev, &vc5->clk_fod[n].hw);
if (ret)
goto err_clk_register;
kfree(init.name); /* clock framework made a copy of the name */
}
/* Register MUX-connected OUT0_I2C_SELB output */
memset(&init, 0, sizeof(init));
init.name = kasprintf(GFP_KERNEL, "%pOFn.out0_sel_i2cb",
client->dev.of_node);
if (!init.name) {
ret = -ENOMEM;
goto err_clk;
}
init.ops = &vc5_clk_out_ops;
init.flags = CLK_SET_RATE_PARENT;
init.parent_names = parent_names;
parent_names[0] = clk_hw_get_name(&vc5->clk_mux);
init.num_parents = 1;
vc5->clk_out[0].num = idx;
vc5->clk_out[0].vc5 = vc5;
vc5->clk_out[0].hw.init = &init;
ret = devm_clk_hw_register(&client->dev, &vc5->clk_out[0].hw);
if (ret)
goto err_clk_register;
kfree(init.name); /* clock framework made a copy of the name */
/* Register FOD-connected OUTx outputs */
for (n = 1; n < vc5->chip_info->clk_out_cnt; n++) {
idx = vc5_map_index_to_output(vc5->chip_info->model, n - 1);
parent_names[0] = clk_hw_get_name(&vc5->clk_fod[idx].hw);
if (n == 1)
parent_names[1] = clk_hw_get_name(&vc5->clk_mux);
else
parent_names[1] =
clk_hw_get_name(&vc5->clk_out[n - 1].hw);
memset(&init, 0, sizeof(init));
init.name = kasprintf(GFP_KERNEL, "%pOFn.out%d",
client->dev.of_node, idx + 1);
if (!init.name) {
ret = -ENOMEM;
goto err_clk;
}
init.ops = &vc5_clk_out_ops;
init.flags = CLK_SET_RATE_PARENT;
init.parent_names = parent_names;
init.num_parents = 2;
vc5->clk_out[n].num = idx;
vc5->clk_out[n].vc5 = vc5;
vc5->clk_out[n].hw.init = &init;
ret = devm_clk_hw_register(&client->dev, &vc5->clk_out[n].hw);
if (ret)
goto err_clk_register;
kfree(init.name); /* clock framework made a copy of the name */
/* Fetch Clock Output configuration from DT (if specified) */
ret = vc5_get_output_config(client, &vc5->clk_out[n]);
if (ret)
goto err_clk;
}
ret = of_clk_add_hw_provider(client->dev.of_node, vc5_of_clk_get, vc5);
if (ret) {
dev_err_probe(&client->dev, ret,
"unable to add clk provider\n");
goto err_clk;
}
return 0;
err_clk_register:
dev_err_probe(&client->dev, ret,
"unable to register %s\n", init.name);
kfree(init.name); /* clock framework made a copy of the name */
err_clk:
if (vc5->chip_info->flags & VC5_HAS_INTERNAL_XTAL)
clk_unregister_fixed_rate(vc5->pin_xin);
return ret;
}
static void vc5_remove(struct i2c_client *client)
{
struct vc5_driver_data *vc5 = i2c_get_clientdata(client);
of_clk_del_provider(client->dev.of_node);
if (vc5->chip_info->flags & VC5_HAS_INTERNAL_XTAL)
clk_unregister_fixed_rate(vc5->pin_xin);
}
static int __maybe_unused vc5_suspend(struct device *dev)
{
struct vc5_driver_data *vc5 = dev_get_drvdata(dev);
regcache_cache_only(vc5->regmap, true);
regcache_mark_dirty(vc5->regmap);
return 0;
}
static int __maybe_unused vc5_resume(struct device *dev)
{
struct vc5_driver_data *vc5 = dev_get_drvdata(dev);
int ret;
regcache_cache_only(vc5->regmap, false);
ret = regcache_sync(vc5->regmap);
if (ret)
dev_err(dev, "Failed to restore register map: %d\n", ret);
return ret;
}
static const struct vc5_chip_info idt_5p49v5923_info = {
.model = IDT_VC5_5P49V5923,
.clk_fod_cnt = 2,
.clk_out_cnt = 3,
.flags = 0,
.vco_max = 3000000000UL,
};
static const struct vc5_chip_info idt_5p49v5925_info = {
.model = IDT_VC5_5P49V5925,
.clk_fod_cnt = 4,
.clk_out_cnt = 5,
.flags = 0,
.vco_max = 3000000000UL,
};
static const struct vc5_chip_info idt_5p49v5933_info = {
.model = IDT_VC5_5P49V5933,
.clk_fod_cnt = 2,
.clk_out_cnt = 3,
.flags = VC5_HAS_INTERNAL_XTAL,
.vco_max = 3000000000UL,
};
static const struct vc5_chip_info idt_5p49v5935_info = {
.model = IDT_VC5_5P49V5935,
.clk_fod_cnt = 4,
.clk_out_cnt = 5,
.flags = VC5_HAS_INTERNAL_XTAL,
.vco_max = 3000000000UL,
};
static const struct vc5_chip_info idt_5p49v60_info = {
.model = IDT_VC6_5P49V60,
.clk_fod_cnt = 4,
.clk_out_cnt = 5,
.flags = VC5_HAS_PFD_FREQ_DBL | VC5_HAS_BYPASS_SYNC_BIT,
.vco_max = 2700000000UL,
};
static const struct vc5_chip_info idt_5p49v6901_info = {
.model = IDT_VC6_5P49V6901,
.clk_fod_cnt = 4,
.clk_out_cnt = 5,
.flags = VC5_HAS_PFD_FREQ_DBL | VC5_HAS_BYPASS_SYNC_BIT,
.vco_max = 3000000000UL,
};
static const struct vc5_chip_info idt_5p49v6965_info = {
.model = IDT_VC6_5P49V6965,
.clk_fod_cnt = 4,
.clk_out_cnt = 5,
.flags = VC5_HAS_BYPASS_SYNC_BIT,
.vco_max = 3000000000UL,
};
static const struct vc5_chip_info idt_5p49v6975_info = {
.model = IDT_VC6_5P49V6975,
.clk_fod_cnt = 4,
.clk_out_cnt = 5,
.flags = VC5_HAS_BYPASS_SYNC_BIT | VC5_HAS_INTERNAL_XTAL,
.vco_max = 3000000000UL,
};
static const struct i2c_device_id vc5_id[] = {
{ "5p49v5923", .driver_data = (kernel_ulong_t)&idt_5p49v5923_info },
{ "5p49v5925", .driver_data = (kernel_ulong_t)&idt_5p49v5925_info },
{ "5p49v5933", .driver_data = (kernel_ulong_t)&idt_5p49v5933_info },
{ "5p49v5935", .driver_data = (kernel_ulong_t)&idt_5p49v5935_info },
{ "5p49v60", .driver_data = (kernel_ulong_t)&idt_5p49v60_info },
{ "5p49v6901", .driver_data = (kernel_ulong_t)&idt_5p49v6901_info },
{ "5p49v6965", .driver_data = (kernel_ulong_t)&idt_5p49v6965_info },
{ "5p49v6975", .driver_data = (kernel_ulong_t)&idt_5p49v6975_info },
{ }
};
MODULE_DEVICE_TABLE(i2c, vc5_id);
static const struct of_device_id clk_vc5_of_match[] = {
{ .compatible = "idt,5p49v5923", .data = &idt_5p49v5923_info },
{ .compatible = "idt,5p49v5925", .data = &idt_5p49v5925_info },
{ .compatible = "idt,5p49v5933", .data = &idt_5p49v5933_info },
{ .compatible = "idt,5p49v5935", .data = &idt_5p49v5935_info },
{ .compatible = "idt,5p49v60", .data = &idt_5p49v60_info },
{ .compatible = "idt,5p49v6901", .data = &idt_5p49v6901_info },
{ .compatible = "idt,5p49v6965", .data = &idt_5p49v6965_info },
{ .compatible = "idt,5p49v6975", .data = &idt_5p49v6975_info },
{ },
};
MODULE_DEVICE_TABLE(of, clk_vc5_of_match);
static SIMPLE_DEV_PM_OPS(vc5_pm_ops, vc5_suspend, vc5_resume);
static struct i2c_driver vc5_driver = {
.driver = {
.name = "vc5",
.pm = &vc5_pm_ops,
.of_match_table = clk_vc5_of_match,
},
.probe = vc5_probe,
.remove = vc5_remove,
.id_table = vc5_id,
};
module_i2c_driver(vc5_driver);
MODULE_AUTHOR("Marek Vasut <[email protected]>");
MODULE_DESCRIPTION("IDT VersaClock 5 driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/clk/clk-versaclock5.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Freescale SAI BCLK as a generic clock driver
*
* Copyright 2020 Michael Walle <[email protected]>
*/
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/clk-provider.h>
#include <linux/err.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/slab.h>
#define I2S_CSR 0x00
#define I2S_CR2 0x08
#define CSR_BCE_BIT 28
#define CR2_BCD BIT(24)
#define CR2_DIV_SHIFT 0
#define CR2_DIV_WIDTH 8
struct fsl_sai_clk {
struct clk_divider div;
struct clk_gate gate;
spinlock_t lock;
};
static int fsl_sai_clk_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct fsl_sai_clk *sai_clk;
struct clk_parent_data pdata = { .index = 0 };
void __iomem *base;
struct clk_hw *hw;
sai_clk = devm_kzalloc(dev, sizeof(*sai_clk), GFP_KERNEL);
if (!sai_clk)
return -ENOMEM;
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
spin_lock_init(&sai_clk->lock);
sai_clk->gate.reg = base + I2S_CSR;
sai_clk->gate.bit_idx = CSR_BCE_BIT;
sai_clk->gate.lock = &sai_clk->lock;
sai_clk->div.reg = base + I2S_CR2;
sai_clk->div.shift = CR2_DIV_SHIFT;
sai_clk->div.width = CR2_DIV_WIDTH;
sai_clk->div.lock = &sai_clk->lock;
/* set clock direction, we are the BCLK master */
writel(CR2_BCD, base + I2S_CR2);
hw = devm_clk_hw_register_composite_pdata(dev, dev->of_node->name,
&pdata, 1, NULL, NULL,
&sai_clk->div.hw,
&clk_divider_ops,
&sai_clk->gate.hw,
&clk_gate_ops,
CLK_SET_RATE_GATE);
if (IS_ERR(hw))
return PTR_ERR(hw);
return devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, hw);
}
static const struct of_device_id of_fsl_sai_clk_ids[] = {
{ .compatible = "fsl,vf610-sai-clock" },
{ }
};
MODULE_DEVICE_TABLE(of, of_fsl_sai_clk_ids);
static struct platform_driver fsl_sai_clk_driver = {
.probe = fsl_sai_clk_probe,
.driver = {
.name = "fsl-sai-clk",
.of_match_table = of_fsl_sai_clk_ids,
},
};
module_platform_driver(fsl_sai_clk_driver);
MODULE_DESCRIPTION("Freescale SAI bitclock-as-a-clock driver");
MODULE_AUTHOR("Michael Walle <[email protected]>");
MODULE_ALIAS("platform:fsl-sai-clk");
| linux-master | drivers/clk/clk-fsl-sai.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2018 Socionext Inc.
* Copyright (C) 2016 Linaro Ltd.
*/
#include <linux/clk-provider.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#define M10V_CLKSEL1 0x0
#define CLKSEL(n) (((n) - 1) * 4 + M10V_CLKSEL1)
#define M10V_PLL1 "pll1"
#define M10V_PLL1DIV2 "pll1-2"
#define M10V_PLL2 "pll2"
#define M10V_PLL2DIV2 "pll2-2"
#define M10V_PLL6 "pll6"
#define M10V_PLL6DIV2 "pll6-2"
#define M10V_PLL6DIV3 "pll6-3"
#define M10V_PLL7 "pll7"
#define M10V_PLL7DIV2 "pll7-2"
#define M10V_PLL7DIV5 "pll7-5"
#define M10V_PLL9 "pll9"
#define M10V_PLL10 "pll10"
#define M10V_PLL10DIV2 "pll10-2"
#define M10V_PLL11 "pll11"
#define M10V_SPI_PARENT0 "spi-parent0"
#define M10V_SPI_PARENT1 "spi-parent1"
#define M10V_SPI_PARENT2 "spi-parent2"
#define M10V_UHS1CLK2_PARENT0 "uhs1clk2-parent0"
#define M10V_UHS1CLK2_PARENT1 "uhs1clk2-parent1"
#define M10V_UHS1CLK2_PARENT2 "uhs1clk2-parent2"
#define M10V_UHS1CLK1_PARENT0 "uhs1clk1-parent0"
#define M10V_UHS1CLK1_PARENT1 "uhs1clk1-parent1"
#define M10V_NFCLK_PARENT0 "nfclk-parent0"
#define M10V_NFCLK_PARENT1 "nfclk-parent1"
#define M10V_NFCLK_PARENT2 "nfclk-parent2"
#define M10V_NFCLK_PARENT3 "nfclk-parent3"
#define M10V_NFCLK_PARENT4 "nfclk-parent4"
#define M10V_NFCLK_PARENT5 "nfclk-parent5"
#define M10V_DCHREQ 1
#define M10V_UPOLL_RATE 1
#define M10V_UTIMEOUT 250
#define M10V_EMMCCLK_ID 0
#define M10V_ACLK_ID 1
#define M10V_HCLK_ID 2
#define M10V_PCLK_ID 3
#define M10V_RCLK_ID 4
#define M10V_SPICLK_ID 5
#define M10V_NFCLK_ID 6
#define M10V_UHS1CLK2_ID 7
#define M10V_NUM_CLKS 8
#define to_m10v_div(_hw) container_of(_hw, struct m10v_clk_divider, hw)
static struct clk_hw_onecell_data *m10v_clk_data;
static DEFINE_SPINLOCK(m10v_crglock);
struct m10v_clk_div_factors {
const char *name;
const char *parent_name;
u32 offset;
u8 shift;
u8 width;
const struct clk_div_table *table;
unsigned long div_flags;
int onecell_idx;
};
struct m10v_clk_div_fixed_data {
const char *name;
const char *parent_name;
u8 div;
u8 mult;
int onecell_idx;
};
struct m10v_clk_mux_factors {
const char *name;
const char * const *parent_names;
u8 num_parents;
u32 offset;
u8 shift;
u8 mask;
u32 *table;
unsigned long mux_flags;
int onecell_idx;
};
static const struct clk_div_table emmcclk_table[] = {
{ .val = 0, .div = 8 },
{ .val = 1, .div = 9 },
{ .val = 2, .div = 10 },
{ .val = 3, .div = 15 },
{ .div = 0 },
};
static const struct clk_div_table mclk400_table[] = {
{ .val = 1, .div = 2 },
{ .val = 3, .div = 4 },
{ .div = 0 },
};
static const struct clk_div_table mclk200_table[] = {
{ .val = 3, .div = 4 },
{ .val = 7, .div = 8 },
{ .div = 0 },
};
static const struct clk_div_table aclk400_table[] = {
{ .val = 1, .div = 2 },
{ .val = 3, .div = 4 },
{ .div = 0 },
};
static const struct clk_div_table aclk300_table[] = {
{ .val = 0, .div = 2 },
{ .val = 1, .div = 3 },
{ .div = 0 },
};
static const struct clk_div_table aclk_table[] = {
{ .val = 3, .div = 4 },
{ .val = 7, .div = 8 },
{ .div = 0 },
};
static const struct clk_div_table aclkexs_table[] = {
{ .val = 3, .div = 4 },
{ .val = 4, .div = 5 },
{ .val = 5, .div = 6 },
{ .val = 7, .div = 8 },
{ .div = 0 },
};
static const struct clk_div_table hclk_table[] = {
{ .val = 7, .div = 8 },
{ .val = 15, .div = 16 },
{ .div = 0 },
};
static const struct clk_div_table hclkbmh_table[] = {
{ .val = 3, .div = 4 },
{ .val = 7, .div = 8 },
{ .div = 0 },
};
static const struct clk_div_table pclk_table[] = {
{ .val = 15, .div = 16 },
{ .val = 31, .div = 32 },
{ .div = 0 },
};
static const struct clk_div_table rclk_table[] = {
{ .val = 0, .div = 8 },
{ .val = 1, .div = 16 },
{ .val = 2, .div = 24 },
{ .val = 3, .div = 32 },
{ .div = 0 },
};
static const struct clk_div_table uhs1clk0_table[] = {
{ .val = 0, .div = 2 },
{ .val = 1, .div = 3 },
{ .val = 2, .div = 4 },
{ .val = 3, .div = 8 },
{ .val = 4, .div = 16 },
{ .div = 0 },
};
static const struct clk_div_table uhs2clk_table[] = {
{ .val = 0, .div = 9 },
{ .val = 1, .div = 10 },
{ .val = 2, .div = 11 },
{ .val = 3, .div = 12 },
{ .val = 4, .div = 13 },
{ .val = 5, .div = 14 },
{ .val = 6, .div = 16 },
{ .val = 7, .div = 18 },
{ .div = 0 },
};
static u32 spi_mux_table[] = {0, 1, 2};
static const char * const spi_mux_names[] = {
M10V_SPI_PARENT0, M10V_SPI_PARENT1, M10V_SPI_PARENT2
};
static u32 uhs1clk2_mux_table[] = {2, 3, 4, 8};
static const char * const uhs1clk2_mux_names[] = {
M10V_UHS1CLK2_PARENT0, M10V_UHS1CLK2_PARENT1,
M10V_UHS1CLK2_PARENT2, M10V_PLL6DIV2
};
static u32 uhs1clk1_mux_table[] = {3, 4, 8};
static const char * const uhs1clk1_mux_names[] = {
M10V_UHS1CLK1_PARENT0, M10V_UHS1CLK1_PARENT1, M10V_PLL6DIV2
};
static u32 nfclk_mux_table[] = {0, 1, 2, 3, 4, 8};
static const char * const nfclk_mux_names[] = {
M10V_NFCLK_PARENT0, M10V_NFCLK_PARENT1, M10V_NFCLK_PARENT2,
M10V_NFCLK_PARENT3, M10V_NFCLK_PARENT4, M10V_NFCLK_PARENT5
};
static const struct m10v_clk_div_fixed_data m10v_pll_fixed_data[] = {
{M10V_PLL1, NULL, 1, 40, -1},
{M10V_PLL2, NULL, 1, 30, -1},
{M10V_PLL6, NULL, 1, 35, -1},
{M10V_PLL7, NULL, 1, 40, -1},
{M10V_PLL9, NULL, 1, 33, -1},
{M10V_PLL10, NULL, 5, 108, -1},
{M10V_PLL10DIV2, M10V_PLL10, 2, 1, -1},
{M10V_PLL11, NULL, 2, 75, -1},
};
static const struct m10v_clk_div_fixed_data m10v_div_fixed_data[] = {
{"usb2", NULL, 2, 1, -1},
{"pcisuppclk", NULL, 20, 1, -1},
{M10V_PLL1DIV2, M10V_PLL1, 2, 1, -1},
{M10V_PLL2DIV2, M10V_PLL2, 2, 1, -1},
{M10V_PLL6DIV2, M10V_PLL6, 2, 1, -1},
{M10V_PLL6DIV3, M10V_PLL6, 3, 1, -1},
{M10V_PLL7DIV2, M10V_PLL7, 2, 1, -1},
{M10V_PLL7DIV5, M10V_PLL7, 5, 1, -1},
{"ca7wd", M10V_PLL2DIV2, 12, 1, -1},
{"pclkca7wd", M10V_PLL1DIV2, 16, 1, -1},
{M10V_SPI_PARENT0, M10V_PLL10DIV2, 2, 1, -1},
{M10V_SPI_PARENT1, M10V_PLL10DIV2, 4, 1, -1},
{M10V_SPI_PARENT2, M10V_PLL7DIV2, 8, 1, -1},
{M10V_UHS1CLK2_PARENT0, M10V_PLL7, 4, 1, -1},
{M10V_UHS1CLK2_PARENT1, M10V_PLL7, 8, 1, -1},
{M10V_UHS1CLK2_PARENT2, M10V_PLL7, 16, 1, -1},
{M10V_UHS1CLK1_PARENT0, M10V_PLL7, 8, 1, -1},
{M10V_UHS1CLK1_PARENT1, M10V_PLL7, 16, 1, -1},
{M10V_NFCLK_PARENT0, M10V_PLL7DIV2, 8, 1, -1},
{M10V_NFCLK_PARENT1, M10V_PLL7DIV2, 10, 1, -1},
{M10V_NFCLK_PARENT2, M10V_PLL7DIV2, 13, 1, -1},
{M10V_NFCLK_PARENT3, M10V_PLL7DIV2, 16, 1, -1},
{M10V_NFCLK_PARENT4, M10V_PLL7DIV2, 40, 1, -1},
{M10V_NFCLK_PARENT5, M10V_PLL7DIV5, 10, 1, -1},
};
static const struct m10v_clk_div_factors m10v_div_factor_data[] = {
{"emmc", M10V_PLL11, CLKSEL(1), 28, 3, emmcclk_table, 0,
M10V_EMMCCLK_ID},
{"mclk400", M10V_PLL1DIV2, CLKSEL(10), 7, 3, mclk400_table, 0, -1},
{"mclk200", M10V_PLL1DIV2, CLKSEL(10), 3, 4, mclk200_table, 0, -1},
{"aclk400", M10V_PLL1DIV2, CLKSEL(10), 0, 3, aclk400_table, 0, -1},
{"aclk300", M10V_PLL2DIV2, CLKSEL(12), 0, 2, aclk300_table, 0, -1},
{"aclk", M10V_PLL1DIV2, CLKSEL(9), 20, 4, aclk_table, 0, M10V_ACLK_ID},
{"aclkexs", M10V_PLL1DIV2, CLKSEL(9), 16, 4, aclkexs_table, 0, -1},
{"hclk", M10V_PLL1DIV2, CLKSEL(9), 7, 5, hclk_table, 0, M10V_HCLK_ID},
{"hclkbmh", M10V_PLL1DIV2, CLKSEL(9), 12, 4, hclkbmh_table, 0, -1},
{"pclk", M10V_PLL1DIV2, CLKSEL(9), 0, 7, pclk_table, 0, M10V_PCLK_ID},
{"uhs1clk0", M10V_PLL7, CLKSEL(1), 3, 5, uhs1clk0_table, 0, -1},
{"uhs2clk", M10V_PLL6DIV3, CLKSEL(1), 18, 4, uhs2clk_table, 0, -1},
};
static const struct m10v_clk_mux_factors m10v_mux_factor_data[] = {
{"spi", spi_mux_names, ARRAY_SIZE(spi_mux_names),
CLKSEL(8), 3, 7, spi_mux_table, 0, M10V_SPICLK_ID},
{"uhs1clk2", uhs1clk2_mux_names, ARRAY_SIZE(uhs1clk2_mux_names),
CLKSEL(1), 13, 31, uhs1clk2_mux_table, 0, M10V_UHS1CLK2_ID},
{"uhs1clk1", uhs1clk1_mux_names, ARRAY_SIZE(uhs1clk1_mux_names),
CLKSEL(1), 8, 31, uhs1clk1_mux_table, 0, -1},
{"nfclk", nfclk_mux_names, ARRAY_SIZE(nfclk_mux_names),
CLKSEL(1), 22, 127, nfclk_mux_table, 0, M10V_NFCLK_ID},
};
static u8 m10v_mux_get_parent(struct clk_hw *hw)
{
struct clk_mux *mux = to_clk_mux(hw);
u32 val;
val = readl(mux->reg) >> mux->shift;
val &= mux->mask;
return clk_mux_val_to_index(hw, mux->table, mux->flags, val);
}
static int m10v_mux_set_parent(struct clk_hw *hw, u8 index)
{
struct clk_mux *mux = to_clk_mux(hw);
u32 val = clk_mux_index_to_val(mux->table, mux->flags, index);
unsigned long flags = 0;
u32 reg;
u32 write_en = BIT(fls(mux->mask) - 1);
if (mux->lock)
spin_lock_irqsave(mux->lock, flags);
else
__acquire(mux->lock);
reg = readl(mux->reg);
reg &= ~(mux->mask << mux->shift);
val = (val | write_en) << mux->shift;
reg |= val;
writel(reg, mux->reg);
if (mux->lock)
spin_unlock_irqrestore(mux->lock, flags);
else
__release(mux->lock);
return 0;
}
static const struct clk_ops m10v_mux_ops = {
.get_parent = m10v_mux_get_parent,
.set_parent = m10v_mux_set_parent,
.determine_rate = __clk_mux_determine_rate,
};
static struct clk_hw *m10v_clk_hw_register_mux(struct device *dev,
const char *name, const char * const *parent_names,
u8 num_parents, unsigned long flags, void __iomem *reg,
u8 shift, u32 mask, u8 clk_mux_flags, u32 *table,
spinlock_t *lock)
{
struct clk_mux *mux;
struct clk_hw *hw;
struct clk_init_data init;
int ret;
mux = kzalloc(sizeof(*mux), GFP_KERNEL);
if (!mux)
return ERR_PTR(-ENOMEM);
init.name = name;
init.ops = &m10v_mux_ops;
init.flags = flags;
init.parent_names = parent_names;
init.num_parents = num_parents;
mux->reg = reg;
mux->shift = shift;
mux->mask = mask;
mux->flags = clk_mux_flags;
mux->lock = lock;
mux->table = table;
mux->hw.init = &init;
hw = &mux->hw;
ret = clk_hw_register(dev, hw);
if (ret) {
kfree(mux);
hw = ERR_PTR(ret);
}
return hw;
}
struct m10v_clk_divider {
struct clk_hw hw;
void __iomem *reg;
u8 shift;
u8 width;
u8 flags;
const struct clk_div_table *table;
spinlock_t *lock;
void __iomem *write_valid_reg;
};
static unsigned long m10v_clk_divider_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct m10v_clk_divider *divider = to_m10v_div(hw);
unsigned int val;
val = readl(divider->reg) >> divider->shift;
val &= clk_div_mask(divider->width);
return divider_recalc_rate(hw, parent_rate, val, divider->table,
divider->flags, divider->width);
}
static long m10v_clk_divider_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *prate)
{
struct m10v_clk_divider *divider = to_m10v_div(hw);
/* if read only, just return current value */
if (divider->flags & CLK_DIVIDER_READ_ONLY) {
u32 val;
val = readl(divider->reg) >> divider->shift;
val &= clk_div_mask(divider->width);
return divider_ro_round_rate(hw, rate, prate, divider->table,
divider->width, divider->flags,
val);
}
return divider_round_rate(hw, rate, prate, divider->table,
divider->width, divider->flags);
}
static int m10v_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct m10v_clk_divider *divider = to_m10v_div(hw);
int value;
unsigned long flags = 0;
u32 val;
u32 write_en = BIT(divider->width - 1);
value = divider_get_val(rate, parent_rate, divider->table,
divider->width, divider->flags);
if (value < 0)
return value;
if (divider->lock)
spin_lock_irqsave(divider->lock, flags);
else
__acquire(divider->lock);
val = readl(divider->reg);
val &= ~(clk_div_mask(divider->width) << divider->shift);
val |= ((u32)value | write_en) << divider->shift;
writel(val, divider->reg);
if (divider->write_valid_reg) {
writel(M10V_DCHREQ, divider->write_valid_reg);
if (readl_poll_timeout(divider->write_valid_reg, val,
!val, M10V_UPOLL_RATE, M10V_UTIMEOUT))
pr_err("%s:%s couldn't stabilize\n",
__func__, clk_hw_get_name(hw));
}
if (divider->lock)
spin_unlock_irqrestore(divider->lock, flags);
else
__release(divider->lock);
return 0;
}
static const struct clk_ops m10v_clk_divider_ops = {
.recalc_rate = m10v_clk_divider_recalc_rate,
.round_rate = m10v_clk_divider_round_rate,
.set_rate = m10v_clk_divider_set_rate,
};
static struct clk_hw *m10v_clk_hw_register_divider(struct device *dev,
const char *name, const char *parent_name, unsigned long flags,
void __iomem *reg, u8 shift, u8 width,
u8 clk_divider_flags, const struct clk_div_table *table,
spinlock_t *lock, void __iomem *write_valid_reg)
{
struct m10v_clk_divider *div;
struct clk_hw *hw;
struct clk_init_data init;
int ret;
div = kzalloc(sizeof(*div), GFP_KERNEL);
if (!div)
return ERR_PTR(-ENOMEM);
init.name = name;
init.ops = &m10v_clk_divider_ops;
init.flags = flags;
init.parent_names = &parent_name;
init.num_parents = 1;
div->reg = reg;
div->shift = shift;
div->width = width;
div->flags = clk_divider_flags;
div->lock = lock;
div->hw.init = &init;
div->table = table;
div->write_valid_reg = write_valid_reg;
/* register the clock */
hw = &div->hw;
ret = clk_hw_register(dev, hw);
if (ret) {
kfree(div);
hw = ERR_PTR(ret);
}
return hw;
}
static void m10v_reg_div_pre(const struct m10v_clk_div_factors *factors,
struct clk_hw_onecell_data *clk_data,
void __iomem *base)
{
struct clk_hw *hw;
void __iomem *write_valid_reg;
/*
* The registers on CLKSEL(9) or CLKSEL(10) need additional
* writing to become valid.
*/
if ((factors->offset == CLKSEL(9)) || (factors->offset == CLKSEL(10)))
write_valid_reg = base + CLKSEL(11);
else
write_valid_reg = NULL;
hw = m10v_clk_hw_register_divider(NULL, factors->name,
factors->parent_name,
CLK_SET_RATE_PARENT,
base + factors->offset,
factors->shift,
factors->width, factors->div_flags,
factors->table,
&m10v_crglock, write_valid_reg);
if (factors->onecell_idx >= 0)
clk_data->hws[factors->onecell_idx] = hw;
}
static void m10v_reg_fixed_pre(const struct m10v_clk_div_fixed_data *factors,
struct clk_hw_onecell_data *clk_data,
const char *parent_name)
{
struct clk_hw *hw;
const char *pn = factors->parent_name ?
factors->parent_name : parent_name;
hw = clk_hw_register_fixed_factor(NULL, factors->name, pn, 0,
factors->mult, factors->div);
if (factors->onecell_idx >= 0)
clk_data->hws[factors->onecell_idx] = hw;
}
static void m10v_reg_mux_pre(const struct m10v_clk_mux_factors *factors,
struct clk_hw_onecell_data *clk_data,
void __iomem *base)
{
struct clk_hw *hw;
hw = m10v_clk_hw_register_mux(NULL, factors->name,
factors->parent_names,
factors->num_parents,
CLK_SET_RATE_PARENT,
base + factors->offset, factors->shift,
factors->mask, factors->mux_flags,
factors->table, &m10v_crglock);
if (factors->onecell_idx >= 0)
clk_data->hws[factors->onecell_idx] = hw;
}
static int m10v_clk_probe(struct platform_device *pdev)
{
int id;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
void __iomem *base;
const char *parent_name;
base = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
if (IS_ERR(base))
return PTR_ERR(base);
parent_name = of_clk_get_parent_name(np, 0);
for (id = 0; id < ARRAY_SIZE(m10v_div_factor_data); ++id)
m10v_reg_div_pre(&m10v_div_factor_data[id],
m10v_clk_data, base);
for (id = 0; id < ARRAY_SIZE(m10v_div_fixed_data); ++id)
m10v_reg_fixed_pre(&m10v_div_fixed_data[id],
m10v_clk_data, parent_name);
for (id = 0; id < ARRAY_SIZE(m10v_mux_factor_data); ++id)
m10v_reg_mux_pre(&m10v_mux_factor_data[id],
m10v_clk_data, base);
for (id = 0; id < M10V_NUM_CLKS; id++) {
if (IS_ERR(m10v_clk_data->hws[id]))
return PTR_ERR(m10v_clk_data->hws[id]);
}
return 0;
}
static const struct of_device_id m10v_clk_dt_ids[] = {
{ .compatible = "socionext,milbeaut-m10v-ccu", },
{ }
};
static struct platform_driver m10v_clk_driver = {
.probe = m10v_clk_probe,
.driver = {
.name = "m10v-ccu",
.of_match_table = m10v_clk_dt_ids,
},
};
builtin_platform_driver(m10v_clk_driver);
static void __init m10v_cc_init(struct device_node *np)
{
int id;
void __iomem *base;
const char *parent_name;
struct clk_hw *hw;
m10v_clk_data = kzalloc(struct_size(m10v_clk_data, hws,
M10V_NUM_CLKS),
GFP_KERNEL);
if (!m10v_clk_data)
return;
m10v_clk_data->num = M10V_NUM_CLKS;
base = of_iomap(np, 0);
if (!base) {
kfree(m10v_clk_data);
return;
}
parent_name = of_clk_get_parent_name(np, 0);
if (!parent_name) {
kfree(m10v_clk_data);
iounmap(base);
return;
}
/*
* This way all clocks fetched before the platform device probes,
* except those we assign here for early use, will be deferred.
*/
for (id = 0; id < M10V_NUM_CLKS; id++)
m10v_clk_data->hws[id] = ERR_PTR(-EPROBE_DEFER);
/*
* PLLs are set by bootloader so this driver registers them as the
* fixed factor.
*/
for (id = 0; id < ARRAY_SIZE(m10v_pll_fixed_data); ++id)
m10v_reg_fixed_pre(&m10v_pll_fixed_data[id],
m10v_clk_data, parent_name);
/*
* timer consumes "rclk" so it needs to register here.
*/
hw = m10v_clk_hw_register_divider(NULL, "rclk", M10V_PLL10DIV2, 0,
base + CLKSEL(1), 0, 3, 0, rclk_table,
&m10v_crglock, NULL);
m10v_clk_data->hws[M10V_RCLK_ID] = hw;
of_clk_add_hw_provider(np, of_clk_hw_onecell_get, m10v_clk_data);
}
CLK_OF_DECLARE_DRIVER(m10v_cc, "socionext,milbeaut-m10v-ccu", m10v_cc_init);
| linux-master | drivers/clk/clk-milbeaut.c |
// SPDX-License-Identifier: GPL-2.0+
// Copyright IBM Corp
#define pr_fmt(fmt) "clk-aspeed: " fmt
#include <linux/mfd/syscon.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/slab.h>
#include <dt-bindings/clock/aspeed-clock.h>
#include "clk-aspeed.h"
#define ASPEED_NUM_CLKS 38
#define ASPEED_RESET2_OFFSET 32
#define ASPEED_RESET_CTRL 0x04
#define ASPEED_CLK_SELECTION 0x08
#define ASPEED_CLK_STOP_CTRL 0x0c
#define ASPEED_MPLL_PARAM 0x20
#define ASPEED_HPLL_PARAM 0x24
#define AST2500_HPLL_BYPASS_EN BIT(20)
#define AST2400_HPLL_PROGRAMMED BIT(18)
#define AST2400_HPLL_BYPASS_EN BIT(17)
#define ASPEED_MISC_CTRL 0x2c
#define UART_DIV13_EN BIT(12)
#define ASPEED_MAC_CLK_DLY 0x48
#define ASPEED_STRAP 0x70
#define CLKIN_25MHZ_EN BIT(23)
#define AST2400_CLK_SOURCE_SEL BIT(18)
#define ASPEED_CLK_SELECTION_2 0xd8
#define ASPEED_RESET_CTRL2 0xd4
/* Globally visible clocks */
static DEFINE_SPINLOCK(aspeed_clk_lock);
/* Keeps track of all clocks */
static struct clk_hw_onecell_data *aspeed_clk_data;
static void __iomem *scu_base;
/* TODO: ask Aspeed about the actual parent data */
static const struct aspeed_gate_data aspeed_gates[] = {
/* clk rst name parent flags */
[ASPEED_CLK_GATE_ECLK] = { 0, 6, "eclk-gate", "eclk", 0 }, /* Video Engine */
[ASPEED_CLK_GATE_GCLK] = { 1, 7, "gclk-gate", NULL, 0 }, /* 2D engine */
[ASPEED_CLK_GATE_MCLK] = { 2, -1, "mclk-gate", "mpll", CLK_IS_CRITICAL }, /* SDRAM */
[ASPEED_CLK_GATE_VCLK] = { 3, -1, "vclk-gate", NULL, 0 }, /* Video Capture */
[ASPEED_CLK_GATE_BCLK] = { 4, 8, "bclk-gate", "bclk", CLK_IS_CRITICAL }, /* PCIe/PCI */
[ASPEED_CLK_GATE_DCLK] = { 5, -1, "dclk-gate", NULL, CLK_IS_CRITICAL }, /* DAC */
[ASPEED_CLK_GATE_REFCLK] = { 6, -1, "refclk-gate", "clkin", CLK_IS_CRITICAL },
[ASPEED_CLK_GATE_USBPORT2CLK] = { 7, 3, "usb-port2-gate", NULL, 0 }, /* USB2.0 Host port 2 */
[ASPEED_CLK_GATE_LCLK] = { 8, 5, "lclk-gate", NULL, 0 }, /* LPC */
[ASPEED_CLK_GATE_USBUHCICLK] = { 9, 15, "usb-uhci-gate", NULL, 0 }, /* USB1.1 (requires port 2 enabled) */
[ASPEED_CLK_GATE_D1CLK] = { 10, 13, "d1clk-gate", NULL, 0 }, /* GFX CRT */
[ASPEED_CLK_GATE_YCLK] = { 13, 4, "yclk-gate", NULL, 0 }, /* HAC */
[ASPEED_CLK_GATE_USBPORT1CLK] = { 14, 14, "usb-port1-gate", NULL, 0 }, /* USB2 hub/USB2 host port 1/USB1.1 dev */
[ASPEED_CLK_GATE_UART1CLK] = { 15, -1, "uart1clk-gate", "uart", 0 }, /* UART1 */
[ASPEED_CLK_GATE_UART2CLK] = { 16, -1, "uart2clk-gate", "uart", 0 }, /* UART2 */
[ASPEED_CLK_GATE_UART5CLK] = { 17, -1, "uart5clk-gate", "uart", 0 }, /* UART5 */
[ASPEED_CLK_GATE_ESPICLK] = { 19, -1, "espiclk-gate", NULL, 0 }, /* eSPI */
[ASPEED_CLK_GATE_MAC1CLK] = { 20, 11, "mac1clk-gate", "mac", 0 }, /* MAC1 */
[ASPEED_CLK_GATE_MAC2CLK] = { 21, 12, "mac2clk-gate", "mac", 0 }, /* MAC2 */
[ASPEED_CLK_GATE_RSACLK] = { 24, -1, "rsaclk-gate", NULL, 0 }, /* RSA */
[ASPEED_CLK_GATE_UART3CLK] = { 25, -1, "uart3clk-gate", "uart", 0 }, /* UART3 */
[ASPEED_CLK_GATE_UART4CLK] = { 26, -1, "uart4clk-gate", "uart", 0 }, /* UART4 */
[ASPEED_CLK_GATE_SDCLK] = { 27, 16, "sdclk-gate", NULL, 0 }, /* SDIO/SD */
[ASPEED_CLK_GATE_LHCCLK] = { 28, -1, "lhclk-gate", "lhclk", 0 }, /* LPC master/LPC+ */
};
static const char * const eclk_parent_names[] = {
"mpll",
"hpll",
"dpll",
};
static const struct clk_div_table ast2500_eclk_div_table[] = {
{ 0x0, 2 },
{ 0x1, 2 },
{ 0x2, 3 },
{ 0x3, 4 },
{ 0x4, 5 },
{ 0x5, 6 },
{ 0x6, 7 },
{ 0x7, 8 },
{ 0 }
};
static const struct clk_div_table ast2500_mac_div_table[] = {
{ 0x0, 4 }, /* Yep, really. Aspeed confirmed this is correct */
{ 0x1, 4 },
{ 0x2, 6 },
{ 0x3, 8 },
{ 0x4, 10 },
{ 0x5, 12 },
{ 0x6, 14 },
{ 0x7, 16 },
{ 0 }
};
static const struct clk_div_table ast2400_div_table[] = {
{ 0x0, 2 },
{ 0x1, 4 },
{ 0x2, 6 },
{ 0x3, 8 },
{ 0x4, 10 },
{ 0x5, 12 },
{ 0x6, 14 },
{ 0x7, 16 },
{ 0 }
};
static const struct clk_div_table ast2500_div_table[] = {
{ 0x0, 4 },
{ 0x1, 8 },
{ 0x2, 12 },
{ 0x3, 16 },
{ 0x4, 20 },
{ 0x5, 24 },
{ 0x6, 28 },
{ 0x7, 32 },
{ 0 }
};
static struct clk_hw *aspeed_ast2400_calc_pll(const char *name, u32 val)
{
unsigned int mult, div;
if (val & AST2400_HPLL_BYPASS_EN) {
/* Pass through mode */
mult = div = 1;
} else {
/* F = 24Mhz * (2-OD) * [(N + 2) / (D + 1)] */
u32 n = (val >> 5) & 0x3f;
u32 od = (val >> 4) & 0x1;
u32 d = val & 0xf;
mult = (2 - od) * (n + 2);
div = d + 1;
}
return clk_hw_register_fixed_factor(NULL, name, "clkin", 0,
mult, div);
};
static struct clk_hw *aspeed_ast2500_calc_pll(const char *name, u32 val)
{
unsigned int mult, div;
if (val & AST2500_HPLL_BYPASS_EN) {
/* Pass through mode */
mult = div = 1;
} else {
/* F = clkin * [(M+1) / (N+1)] / (P + 1) */
u32 p = (val >> 13) & 0x3f;
u32 m = (val >> 5) & 0xff;
u32 n = val & 0x1f;
mult = (m + 1) / (n + 1);
div = p + 1;
}
return clk_hw_register_fixed_factor(NULL, name, "clkin", 0,
mult, div);
}
static const struct aspeed_clk_soc_data ast2500_data = {
.div_table = ast2500_div_table,
.eclk_div_table = ast2500_eclk_div_table,
.mac_div_table = ast2500_mac_div_table,
.calc_pll = aspeed_ast2500_calc_pll,
};
static const struct aspeed_clk_soc_data ast2400_data = {
.div_table = ast2400_div_table,
.eclk_div_table = ast2400_div_table,
.mac_div_table = ast2400_div_table,
.calc_pll = aspeed_ast2400_calc_pll,
};
static int aspeed_clk_is_enabled(struct clk_hw *hw)
{
struct aspeed_clk_gate *gate = to_aspeed_clk_gate(hw);
u32 clk = BIT(gate->clock_idx);
u32 rst = BIT(gate->reset_idx);
u32 enval = (gate->flags & CLK_GATE_SET_TO_DISABLE) ? 0 : clk;
u32 reg;
/*
* If the IP is in reset, treat the clock as not enabled,
* this happens with some clocks such as the USB one when
* coming from cold reset. Without this, aspeed_clk_enable()
* will fail to lift the reset.
*/
if (gate->reset_idx >= 0) {
regmap_read(gate->map, ASPEED_RESET_CTRL, ®);
if (reg & rst)
return 0;
}
regmap_read(gate->map, ASPEED_CLK_STOP_CTRL, ®);
return ((reg & clk) == enval) ? 1 : 0;
}
static int aspeed_clk_enable(struct clk_hw *hw)
{
struct aspeed_clk_gate *gate = to_aspeed_clk_gate(hw);
unsigned long flags;
u32 clk = BIT(gate->clock_idx);
u32 rst = BIT(gate->reset_idx);
u32 enval;
spin_lock_irqsave(gate->lock, flags);
if (aspeed_clk_is_enabled(hw)) {
spin_unlock_irqrestore(gate->lock, flags);
return 0;
}
if (gate->reset_idx >= 0) {
/* Put IP in reset */
regmap_update_bits(gate->map, ASPEED_RESET_CTRL, rst, rst);
/* Delay 100us */
udelay(100);
}
/* Enable clock */
enval = (gate->flags & CLK_GATE_SET_TO_DISABLE) ? 0 : clk;
regmap_update_bits(gate->map, ASPEED_CLK_STOP_CTRL, clk, enval);
if (gate->reset_idx >= 0) {
/* A delay of 10ms is specified by the ASPEED docs */
mdelay(10);
/* Take IP out of reset */
regmap_update_bits(gate->map, ASPEED_RESET_CTRL, rst, 0);
}
spin_unlock_irqrestore(gate->lock, flags);
return 0;
}
static void aspeed_clk_disable(struct clk_hw *hw)
{
struct aspeed_clk_gate *gate = to_aspeed_clk_gate(hw);
unsigned long flags;
u32 clk = BIT(gate->clock_idx);
u32 enval;
spin_lock_irqsave(gate->lock, flags);
enval = (gate->flags & CLK_GATE_SET_TO_DISABLE) ? clk : 0;
regmap_update_bits(gate->map, ASPEED_CLK_STOP_CTRL, clk, enval);
spin_unlock_irqrestore(gate->lock, flags);
}
static const struct clk_ops aspeed_clk_gate_ops = {
.enable = aspeed_clk_enable,
.disable = aspeed_clk_disable,
.is_enabled = aspeed_clk_is_enabled,
};
static const u8 aspeed_resets[] = {
/* SCU04 resets */
[ASPEED_RESET_XDMA] = 25,
[ASPEED_RESET_MCTP] = 24,
[ASPEED_RESET_ADC] = 23,
[ASPEED_RESET_JTAG_MASTER] = 22,
[ASPEED_RESET_MIC] = 18,
[ASPEED_RESET_PWM] = 9,
[ASPEED_RESET_PECI] = 10,
[ASPEED_RESET_I2C] = 2,
[ASPEED_RESET_AHB] = 1,
/*
* SCUD4 resets start at an offset to separate them from
* the SCU04 resets.
*/
[ASPEED_RESET_CRT1] = ASPEED_RESET2_OFFSET + 5,
};
static int aspeed_reset_deassert(struct reset_controller_dev *rcdev,
unsigned long id)
{
struct aspeed_reset *ar = to_aspeed_reset(rcdev);
u32 reg = ASPEED_RESET_CTRL;
u32 bit = aspeed_resets[id];
if (bit >= ASPEED_RESET2_OFFSET) {
bit -= ASPEED_RESET2_OFFSET;
reg = ASPEED_RESET_CTRL2;
}
return regmap_update_bits(ar->map, reg, BIT(bit), 0);
}
static int aspeed_reset_assert(struct reset_controller_dev *rcdev,
unsigned long id)
{
struct aspeed_reset *ar = to_aspeed_reset(rcdev);
u32 reg = ASPEED_RESET_CTRL;
u32 bit = aspeed_resets[id];
if (bit >= ASPEED_RESET2_OFFSET) {
bit -= ASPEED_RESET2_OFFSET;
reg = ASPEED_RESET_CTRL2;
}
return regmap_update_bits(ar->map, reg, BIT(bit), BIT(bit));
}
static int aspeed_reset_status(struct reset_controller_dev *rcdev,
unsigned long id)
{
struct aspeed_reset *ar = to_aspeed_reset(rcdev);
u32 reg = ASPEED_RESET_CTRL;
u32 bit = aspeed_resets[id];
int ret, val;
if (bit >= ASPEED_RESET2_OFFSET) {
bit -= ASPEED_RESET2_OFFSET;
reg = ASPEED_RESET_CTRL2;
}
ret = regmap_read(ar->map, reg, &val);
if (ret)
return ret;
return !!(val & BIT(bit));
}
static const struct reset_control_ops aspeed_reset_ops = {
.assert = aspeed_reset_assert,
.deassert = aspeed_reset_deassert,
.status = aspeed_reset_status,
};
static struct clk_hw *aspeed_clk_hw_register_gate(struct device *dev,
const char *name, const char *parent_name, unsigned long flags,
struct regmap *map, u8 clock_idx, u8 reset_idx,
u8 clk_gate_flags, spinlock_t *lock)
{
struct aspeed_clk_gate *gate;
struct clk_init_data init;
struct clk_hw *hw;
int ret;
gate = kzalloc(sizeof(*gate), GFP_KERNEL);
if (!gate)
return ERR_PTR(-ENOMEM);
init.name = name;
init.ops = &aspeed_clk_gate_ops;
init.flags = flags;
init.parent_names = parent_name ? &parent_name : NULL;
init.num_parents = parent_name ? 1 : 0;
gate->map = map;
gate->clock_idx = clock_idx;
gate->reset_idx = reset_idx;
gate->flags = clk_gate_flags;
gate->lock = lock;
gate->hw.init = &init;
hw = &gate->hw;
ret = clk_hw_register(dev, hw);
if (ret) {
kfree(gate);
hw = ERR_PTR(ret);
}
return hw;
}
static int aspeed_clk_probe(struct platform_device *pdev)
{
const struct aspeed_clk_soc_data *soc_data;
struct device *dev = &pdev->dev;
struct aspeed_reset *ar;
struct regmap *map;
struct clk_hw *hw;
u32 val, rate;
int i, ret;
map = syscon_node_to_regmap(dev->of_node);
if (IS_ERR(map)) {
dev_err(dev, "no syscon regmap\n");
return PTR_ERR(map);
}
ar = devm_kzalloc(dev, sizeof(*ar), GFP_KERNEL);
if (!ar)
return -ENOMEM;
ar->map = map;
ar->rcdev.owner = THIS_MODULE;
ar->rcdev.nr_resets = ARRAY_SIZE(aspeed_resets);
ar->rcdev.ops = &aspeed_reset_ops;
ar->rcdev.of_node = dev->of_node;
ret = devm_reset_controller_register(dev, &ar->rcdev);
if (ret) {
dev_err(dev, "could not register reset controller\n");
return ret;
}
/* SoC generations share common layouts but have different divisors */
soc_data = of_device_get_match_data(dev);
if (!soc_data) {
dev_err(dev, "no match data for platform\n");
return -EINVAL;
}
/* UART clock div13 setting */
regmap_read(map, ASPEED_MISC_CTRL, &val);
if (val & UART_DIV13_EN)
rate = 24000000 / 13;
else
rate = 24000000;
/* TODO: Find the parent data for the uart clock */
hw = clk_hw_register_fixed_rate(dev, "uart", NULL, 0, rate);
if (IS_ERR(hw))
return PTR_ERR(hw);
aspeed_clk_data->hws[ASPEED_CLK_UART] = hw;
/*
* Memory controller (M-PLL) PLL. This clock is configured by the
* bootloader, and is exposed to Linux as a read-only clock rate.
*/
regmap_read(map, ASPEED_MPLL_PARAM, &val);
hw = soc_data->calc_pll("mpll", val);
if (IS_ERR(hw))
return PTR_ERR(hw);
aspeed_clk_data->hws[ASPEED_CLK_MPLL] = hw;
/* SD/SDIO clock divider and gate */
hw = clk_hw_register_gate(dev, "sd_extclk_gate", "hpll", 0,
scu_base + ASPEED_CLK_SELECTION, 15, 0,
&aspeed_clk_lock);
if (IS_ERR(hw))
return PTR_ERR(hw);
hw = clk_hw_register_divider_table(dev, "sd_extclk", "sd_extclk_gate",
0, scu_base + ASPEED_CLK_SELECTION, 12, 3, 0,
soc_data->div_table,
&aspeed_clk_lock);
if (IS_ERR(hw))
return PTR_ERR(hw);
aspeed_clk_data->hws[ASPEED_CLK_SDIO] = hw;
/* MAC AHB bus clock divider */
hw = clk_hw_register_divider_table(dev, "mac", "hpll", 0,
scu_base + ASPEED_CLK_SELECTION, 16, 3, 0,
soc_data->mac_div_table,
&aspeed_clk_lock);
if (IS_ERR(hw))
return PTR_ERR(hw);
aspeed_clk_data->hws[ASPEED_CLK_MAC] = hw;
if (of_device_is_compatible(pdev->dev.of_node, "aspeed,ast2500-scu")) {
/* RMII 50MHz RCLK */
hw = clk_hw_register_fixed_rate(dev, "mac12rclk", "hpll", 0,
50000000);
if (IS_ERR(hw))
return PTR_ERR(hw);
/* RMII1 50MHz (RCLK) output enable */
hw = clk_hw_register_gate(dev, "mac1rclk", "mac12rclk", 0,
scu_base + ASPEED_MAC_CLK_DLY, 29, 0,
&aspeed_clk_lock);
if (IS_ERR(hw))
return PTR_ERR(hw);
aspeed_clk_data->hws[ASPEED_CLK_MAC1RCLK] = hw;
/* RMII2 50MHz (RCLK) output enable */
hw = clk_hw_register_gate(dev, "mac2rclk", "mac12rclk", 0,
scu_base + ASPEED_MAC_CLK_DLY, 30, 0,
&aspeed_clk_lock);
if (IS_ERR(hw))
return PTR_ERR(hw);
aspeed_clk_data->hws[ASPEED_CLK_MAC2RCLK] = hw;
}
/* LPC Host (LHCLK) clock divider */
hw = clk_hw_register_divider_table(dev, "lhclk", "hpll", 0,
scu_base + ASPEED_CLK_SELECTION, 20, 3, 0,
soc_data->div_table,
&aspeed_clk_lock);
if (IS_ERR(hw))
return PTR_ERR(hw);
aspeed_clk_data->hws[ASPEED_CLK_LHCLK] = hw;
/* P-Bus (BCLK) clock divider */
hw = clk_hw_register_divider_table(dev, "bclk", "hpll", 0,
scu_base + ASPEED_CLK_SELECTION_2, 0, 2, 0,
soc_data->div_table,
&aspeed_clk_lock);
if (IS_ERR(hw))
return PTR_ERR(hw);
aspeed_clk_data->hws[ASPEED_CLK_BCLK] = hw;
/* Fixed 24MHz clock */
hw = clk_hw_register_fixed_rate(NULL, "fixed-24m", "clkin",
0, 24000000);
if (IS_ERR(hw))
return PTR_ERR(hw);
aspeed_clk_data->hws[ASPEED_CLK_24M] = hw;
hw = clk_hw_register_mux(dev, "eclk-mux", eclk_parent_names,
ARRAY_SIZE(eclk_parent_names), 0,
scu_base + ASPEED_CLK_SELECTION, 2, 0x3, 0,
&aspeed_clk_lock);
if (IS_ERR(hw))
return PTR_ERR(hw);
aspeed_clk_data->hws[ASPEED_CLK_ECLK_MUX] = hw;
hw = clk_hw_register_divider_table(dev, "eclk", "eclk-mux", 0,
scu_base + ASPEED_CLK_SELECTION, 28,
3, 0, soc_data->eclk_div_table,
&aspeed_clk_lock);
if (IS_ERR(hw))
return PTR_ERR(hw);
aspeed_clk_data->hws[ASPEED_CLK_ECLK] = hw;
/*
* TODO: There are a number of clocks that not included in this driver
* as more information is required:
* D2-PLL
* D-PLL
* YCLK
* RGMII
* RMII
* UART[1..5] clock source mux
*/
for (i = 0; i < ARRAY_SIZE(aspeed_gates); i++) {
const struct aspeed_gate_data *gd = &aspeed_gates[i];
u32 gate_flags;
/* Special case: the USB port 1 clock (bit 14) is always
* working the opposite way from the other ones.
*/
gate_flags = (gd->clock_idx == 14) ? 0 : CLK_GATE_SET_TO_DISABLE;
hw = aspeed_clk_hw_register_gate(dev,
gd->name,
gd->parent_name,
gd->flags,
map,
gd->clock_idx,
gd->reset_idx,
gate_flags,
&aspeed_clk_lock);
if (IS_ERR(hw))
return PTR_ERR(hw);
aspeed_clk_data->hws[i] = hw;
}
return 0;
};
static const struct of_device_id aspeed_clk_dt_ids[] = {
{ .compatible = "aspeed,ast2400-scu", .data = &ast2400_data },
{ .compatible = "aspeed,ast2500-scu", .data = &ast2500_data },
{ }
};
static struct platform_driver aspeed_clk_driver = {
.probe = aspeed_clk_probe,
.driver = {
.name = "aspeed-clk",
.of_match_table = aspeed_clk_dt_ids,
.suppress_bind_attrs = true,
},
};
builtin_platform_driver(aspeed_clk_driver);
static void __init aspeed_ast2400_cc(struct regmap *map)
{
struct clk_hw *hw;
u32 val, div, clkin, hpll;
const u16 hpll_rates[][4] = {
{384, 360, 336, 408},
{400, 375, 350, 425},
};
int rate;
/*
* CLKIN is the crystal oscillator, 24, 48 or 25MHz selected by
* strapping
*/
regmap_read(map, ASPEED_STRAP, &val);
rate = (val >> 8) & 3;
if (val & CLKIN_25MHZ_EN) {
clkin = 25000000;
hpll = hpll_rates[1][rate];
} else if (val & AST2400_CLK_SOURCE_SEL) {
clkin = 48000000;
hpll = hpll_rates[0][rate];
} else {
clkin = 24000000;
hpll = hpll_rates[0][rate];
}
hw = clk_hw_register_fixed_rate(NULL, "clkin", NULL, 0, clkin);
pr_debug("clkin @%u MHz\n", clkin / 1000000);
/*
* High-speed PLL clock derived from the crystal. This the CPU clock,
* and we assume that it is enabled. It can be configured through the
* HPLL_PARAM register, or set to a specified frequency by strapping.
*/
regmap_read(map, ASPEED_HPLL_PARAM, &val);
if (val & AST2400_HPLL_PROGRAMMED)
hw = aspeed_ast2400_calc_pll("hpll", val);
else
hw = clk_hw_register_fixed_rate(NULL, "hpll", "clkin", 0,
hpll * 1000000);
aspeed_clk_data->hws[ASPEED_CLK_HPLL] = hw;
/*
* Strap bits 11:10 define the CPU/AHB clock frequency ratio (aka HCLK)
* 00: Select CPU:AHB = 1:1
* 01: Select CPU:AHB = 2:1
* 10: Select CPU:AHB = 4:1
* 11: Select CPU:AHB = 3:1
*/
regmap_read(map, ASPEED_STRAP, &val);
val = (val >> 10) & 0x3;
div = val + 1;
if (div == 3)
div = 4;
else if (div == 4)
div = 3;
hw = clk_hw_register_fixed_factor(NULL, "ahb", "hpll", 0, 1, div);
aspeed_clk_data->hws[ASPEED_CLK_AHB] = hw;
/* APB clock clock selection register SCU08 (aka PCLK) */
hw = clk_hw_register_divider_table(NULL, "apb", "hpll", 0,
scu_base + ASPEED_CLK_SELECTION, 23, 3, 0,
ast2400_div_table,
&aspeed_clk_lock);
aspeed_clk_data->hws[ASPEED_CLK_APB] = hw;
}
static void __init aspeed_ast2500_cc(struct regmap *map)
{
struct clk_hw *hw;
u32 val, freq, div;
/* CLKIN is the crystal oscillator, 24 or 25MHz selected by strapping */
regmap_read(map, ASPEED_STRAP, &val);
if (val & CLKIN_25MHZ_EN)
freq = 25000000;
else
freq = 24000000;
hw = clk_hw_register_fixed_rate(NULL, "clkin", NULL, 0, freq);
pr_debug("clkin @%u MHz\n", freq / 1000000);
/*
* High-speed PLL clock derived from the crystal. This the CPU clock,
* and we assume that it is enabled
*/
regmap_read(map, ASPEED_HPLL_PARAM, &val);
aspeed_clk_data->hws[ASPEED_CLK_HPLL] = aspeed_ast2500_calc_pll("hpll", val);
/* Strap bits 11:9 define the AXI/AHB clock frequency ratio (aka HCLK)*/
regmap_read(map, ASPEED_STRAP, &val);
val = (val >> 9) & 0x7;
WARN(val == 0, "strapping is zero: cannot determine ahb clock");
div = 2 * (val + 1);
hw = clk_hw_register_fixed_factor(NULL, "ahb", "hpll", 0, 1, div);
aspeed_clk_data->hws[ASPEED_CLK_AHB] = hw;
/* APB clock clock selection register SCU08 (aka PCLK) */
regmap_read(map, ASPEED_CLK_SELECTION, &val);
val = (val >> 23) & 0x7;
div = 4 * (val + 1);
hw = clk_hw_register_fixed_factor(NULL, "apb", "hpll", 0, 1, div);
aspeed_clk_data->hws[ASPEED_CLK_APB] = hw;
};
static void __init aspeed_cc_init(struct device_node *np)
{
struct regmap *map;
u32 val;
int ret;
int i;
scu_base = of_iomap(np, 0);
if (!scu_base)
return;
aspeed_clk_data = kzalloc(struct_size(aspeed_clk_data, hws,
ASPEED_NUM_CLKS),
GFP_KERNEL);
if (!aspeed_clk_data)
return;
aspeed_clk_data->num = ASPEED_NUM_CLKS;
/*
* This way all clocks fetched before the platform device probes,
* except those we assign here for early use, will be deferred.
*/
for (i = 0; i < ASPEED_NUM_CLKS; i++)
aspeed_clk_data->hws[i] = ERR_PTR(-EPROBE_DEFER);
map = syscon_node_to_regmap(np);
if (IS_ERR(map)) {
pr_err("no syscon regmap\n");
return;
}
/*
* We check that the regmap works on this very first access,
* but as this is an MMIO-backed regmap, subsequent regmap
* access is not going to fail and we skip error checks from
* this point.
*/
ret = regmap_read(map, ASPEED_STRAP, &val);
if (ret) {
pr_err("failed to read strapping register\n");
return;
}
if (of_device_is_compatible(np, "aspeed,ast2400-scu"))
aspeed_ast2400_cc(map);
else if (of_device_is_compatible(np, "aspeed,ast2500-scu"))
aspeed_ast2500_cc(map);
else
pr_err("unknown platform, failed to add clocks\n");
ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, aspeed_clk_data);
if (ret)
pr_err("failed to add DT provider: %d\n", ret);
};
CLK_OF_DECLARE_DRIVER(aspeed_cc_g5, "aspeed,ast2500-scu", aspeed_cc_init);
CLK_OF_DECLARE_DRIVER(aspeed_cc_g4, "aspeed,ast2400-scu", aspeed_cc_init);
| linux-master | drivers/clk/clk-aspeed.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Driver for Silicon Labs Si544 Programmable Oscillator
* Copyright (C) 2018 Topic Embedded Products
* Author: Mike Looijmans <[email protected]>
*/
#include <linux/clk-provider.h>
#include <linux/delay.h>
#include <linux/math64.h>
#include <linux/module.h>
#include <linux/i2c.h>
#include <linux/regmap.h>
#include <linux/slab.h>
/* I2C registers (decimal as in datasheet) */
#define SI544_REG_CONTROL 7
#define SI544_REG_OE_STATE 17
#define SI544_REG_HS_DIV 23
#define SI544_REG_LS_HS_DIV 24
#define SI544_REG_FBDIV0 26
#define SI544_REG_FBDIV8 27
#define SI544_REG_FBDIV16 28
#define SI544_REG_FBDIV24 29
#define SI544_REG_FBDIV32 30
#define SI544_REG_FBDIV40 31
#define SI544_REG_FCAL_OVR 69
#define SI544_REG_ADPLL_DELTA_M0 231
#define SI544_REG_ADPLL_DELTA_M8 232
#define SI544_REG_ADPLL_DELTA_M16 233
#define SI544_REG_PAGE_SELECT 255
/* Register values */
#define SI544_CONTROL_RESET BIT(7)
#define SI544_CONTROL_MS_ICAL2 BIT(3)
#define SI544_OE_STATE_ODC_OE BIT(0)
/* Max freq depends on speed grade */
#define SI544_MIN_FREQ 200000U
/* Si544 Internal oscilator runs at 55.05 MHz */
#define FXO 55050000U
/* VCO range is 10.8 .. 12.1 GHz, max depends on speed grade */
#define FVCO_MIN 10800000000ULL
#define HS_DIV_MAX 2046
#define HS_DIV_MAX_ODD 33
/* Lowest frequency synthesizeable using only the HS divider */
#define MIN_HSDIV_FREQ (FVCO_MIN / HS_DIV_MAX)
/* Range and interpretation of the adjustment value */
#define DELTA_M_MAX 8161512
#define DELTA_M_FRAC_NUM 19
#define DELTA_M_FRAC_DEN 20000
enum si544_speed_grade {
si544a,
si544b,
si544c,
};
struct clk_si544 {
struct clk_hw hw;
struct regmap *regmap;
struct i2c_client *i2c_client;
enum si544_speed_grade speed_grade;
};
#define to_clk_si544(_hw) container_of(_hw, struct clk_si544, hw)
/**
* struct clk_si544_muldiv - Multiplier/divider settings
* @fb_div_frac: integer part of feedback divider (32 bits)
* @fb_div_int: fractional part of feedback divider (11 bits)
* @hs_div: 1st divider, 5..2046, must be even when >33
* @ls_div_bits: 2nd divider, as 2^x, range 0..5
* If ls_div_bits is non-zero, hs_div must be even
* @delta_m: Frequency shift for small -950..+950 ppm changes, 24 bit
*/
struct clk_si544_muldiv {
u32 fb_div_frac;
u16 fb_div_int;
u16 hs_div;
u8 ls_div_bits;
s32 delta_m;
};
/* Enables or disables the output driver */
static int si544_enable_output(struct clk_si544 *data, bool enable)
{
return regmap_update_bits(data->regmap, SI544_REG_OE_STATE,
SI544_OE_STATE_ODC_OE, enable ? SI544_OE_STATE_ODC_OE : 0);
}
static int si544_prepare(struct clk_hw *hw)
{
struct clk_si544 *data = to_clk_si544(hw);
return si544_enable_output(data, true);
}
static void si544_unprepare(struct clk_hw *hw)
{
struct clk_si544 *data = to_clk_si544(hw);
si544_enable_output(data, false);
}
static int si544_is_prepared(struct clk_hw *hw)
{
struct clk_si544 *data = to_clk_si544(hw);
unsigned int val;
int err;
err = regmap_read(data->regmap, SI544_REG_OE_STATE, &val);
if (err < 0)
return err;
return !!(val & SI544_OE_STATE_ODC_OE);
}
/* Retrieve clock multiplier and dividers from hardware */
static int si544_get_muldiv(struct clk_si544 *data,
struct clk_si544_muldiv *settings)
{
int err;
u8 reg[6];
err = regmap_bulk_read(data->regmap, SI544_REG_HS_DIV, reg, 2);
if (err)
return err;
settings->ls_div_bits = (reg[1] >> 4) & 0x07;
settings->hs_div = (reg[1] & 0x07) << 8 | reg[0];
err = regmap_bulk_read(data->regmap, SI544_REG_FBDIV0, reg, 6);
if (err)
return err;
settings->fb_div_int = reg[4] | (reg[5] & 0x07) << 8;
settings->fb_div_frac = reg[0] | reg[1] << 8 | reg[2] << 16 |
reg[3] << 24;
err = regmap_bulk_read(data->regmap, SI544_REG_ADPLL_DELTA_M0, reg, 3);
if (err)
return err;
/* Interpret as 24-bit signed number */
settings->delta_m = reg[0] << 8 | reg[1] << 16 | reg[2] << 24;
settings->delta_m >>= 8;
return 0;
}
static int si544_set_delta_m(struct clk_si544 *data, s32 delta_m)
{
u8 reg[3];
reg[0] = delta_m;
reg[1] = delta_m >> 8;
reg[2] = delta_m >> 16;
return regmap_bulk_write(data->regmap, SI544_REG_ADPLL_DELTA_M0,
reg, 3);
}
static int si544_set_muldiv(struct clk_si544 *data,
struct clk_si544_muldiv *settings)
{
int err;
u8 reg[6];
reg[0] = settings->hs_div;
reg[1] = settings->hs_div >> 8 | settings->ls_div_bits << 4;
err = regmap_bulk_write(data->regmap, SI544_REG_HS_DIV, reg, 2);
if (err < 0)
return err;
reg[0] = settings->fb_div_frac;
reg[1] = settings->fb_div_frac >> 8;
reg[2] = settings->fb_div_frac >> 16;
reg[3] = settings->fb_div_frac >> 24;
reg[4] = settings->fb_div_int;
reg[5] = settings->fb_div_int >> 8;
/*
* Writing to SI544_REG_FBDIV40 triggers the clock change, so that
* must be written last
*/
return regmap_bulk_write(data->regmap, SI544_REG_FBDIV0, reg, 6);
}
static bool is_valid_frequency(const struct clk_si544 *data,
unsigned long frequency)
{
unsigned long max_freq = 0;
if (frequency < SI544_MIN_FREQ)
return false;
switch (data->speed_grade) {
case si544a:
max_freq = 1500000000;
break;
case si544b:
max_freq = 800000000;
break;
case si544c:
max_freq = 350000000;
break;
}
return frequency <= max_freq;
}
/* Calculate divider settings for a given frequency */
static int si544_calc_muldiv(struct clk_si544_muldiv *settings,
unsigned long frequency)
{
u64 vco;
u32 ls_freq;
u32 tmp;
u8 res;
/* Determine the minimum value of LS_DIV and resulting target freq. */
ls_freq = frequency;
settings->ls_div_bits = 0;
if (frequency >= MIN_HSDIV_FREQ) {
settings->ls_div_bits = 0;
} else {
res = 1;
tmp = 2 * HS_DIV_MAX;
while (tmp <= (HS_DIV_MAX * 32)) {
if (((u64)frequency * tmp) >= FVCO_MIN)
break;
++res;
tmp <<= 1;
}
settings->ls_div_bits = res;
ls_freq = frequency << res;
}
/* Determine minimum HS_DIV by rounding up */
vco = FVCO_MIN + ls_freq - 1;
do_div(vco, ls_freq);
settings->hs_div = vco;
/* round up to even number when required */
if ((settings->hs_div & 1) &&
(settings->hs_div > HS_DIV_MAX_ODD || settings->ls_div_bits))
++settings->hs_div;
/* Calculate VCO frequency (in 10..12GHz range) */
vco = (u64)ls_freq * settings->hs_div;
/* Calculate the integer part of the feedback divider */
tmp = do_div(vco, FXO);
settings->fb_div_int = vco;
/* And the fractional bits using the remainder */
vco = (u64)tmp << 32;
vco += FXO / 2; /* Round to nearest multiple */
do_div(vco, FXO);
settings->fb_div_frac = vco;
/* Reset the frequency adjustment */
settings->delta_m = 0;
return 0;
}
/* Calculate resulting frequency given the register settings */
static unsigned long si544_calc_center_rate(
const struct clk_si544_muldiv *settings)
{
u32 d = settings->hs_div * BIT(settings->ls_div_bits);
u64 vco;
/* Calculate VCO from the fractional part */
vco = (u64)settings->fb_div_frac * FXO;
vco += (FXO / 2);
vco >>= 32;
/* Add the integer part of the VCO frequency */
vco += (u64)settings->fb_div_int * FXO;
/* Apply divider to obtain the generated frequency */
do_div(vco, d);
return vco;
}
static unsigned long si544_calc_rate(const struct clk_si544_muldiv *settings)
{
unsigned long rate = si544_calc_center_rate(settings);
s64 delta = (s64)rate * (DELTA_M_FRAC_NUM * settings->delta_m);
/*
* The clock adjustment is much smaller than 1 Hz, round to the
* nearest multiple. Apparently div64_s64 rounds towards zero, hence
* check the sign and adjust into the proper direction.
*/
if (settings->delta_m < 0)
delta -= ((s64)DELTA_M_MAX * DELTA_M_FRAC_DEN) / 2;
else
delta += ((s64)DELTA_M_MAX * DELTA_M_FRAC_DEN) / 2;
delta = div64_s64(delta, ((s64)DELTA_M_MAX * DELTA_M_FRAC_DEN));
return rate + delta;
}
static unsigned long si544_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct clk_si544 *data = to_clk_si544(hw);
struct clk_si544_muldiv settings;
int err;
err = si544_get_muldiv(data, &settings);
if (err)
return 0;
return si544_calc_rate(&settings);
}
static long si544_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *parent_rate)
{
struct clk_si544 *data = to_clk_si544(hw);
if (!is_valid_frequency(data, rate))
return -EINVAL;
/* The accuracy is less than 1 Hz, so any rate is possible */
return rate;
}
/* Calculates the maximum "small" change, 950 * rate / 1000000 */
static unsigned long si544_max_delta(unsigned long rate)
{
u64 num = rate;
num *= DELTA_M_FRAC_NUM;
do_div(num, DELTA_M_FRAC_DEN);
return num;
}
static s32 si544_calc_delta(s32 delta, s32 max_delta)
{
s64 n = (s64)delta * DELTA_M_MAX;
return div_s64(n, max_delta);
}
static int si544_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct clk_si544 *data = to_clk_si544(hw);
struct clk_si544_muldiv settings;
unsigned long center;
long max_delta;
long delta;
unsigned int old_oe_state;
int err;
if (!is_valid_frequency(data, rate))
return -EINVAL;
/* Try using the frequency adjustment feature for a <= 950ppm change */
err = si544_get_muldiv(data, &settings);
if (err)
return err;
center = si544_calc_center_rate(&settings);
max_delta = si544_max_delta(center);
delta = rate - center;
if (abs(delta) <= max_delta)
return si544_set_delta_m(data,
si544_calc_delta(delta, max_delta));
/* Too big for the delta adjustment, need to reprogram */
err = si544_calc_muldiv(&settings, rate);
if (err)
return err;
err = regmap_read(data->regmap, SI544_REG_OE_STATE, &old_oe_state);
if (err)
return err;
si544_enable_output(data, false);
/* Allow FCAL for this frequency update */
err = regmap_write(data->regmap, SI544_REG_FCAL_OVR, 0);
if (err < 0)
return err;
err = si544_set_delta_m(data, settings.delta_m);
if (err < 0)
return err;
err = si544_set_muldiv(data, &settings);
if (err < 0)
return err; /* Undefined state now, best to leave disabled */
/* Trigger calibration */
err = regmap_write(data->regmap, SI544_REG_CONTROL,
SI544_CONTROL_MS_ICAL2);
if (err < 0)
return err;
/* Applying a new frequency can take up to 10ms */
usleep_range(10000, 12000);
if (old_oe_state & SI544_OE_STATE_ODC_OE)
si544_enable_output(data, true);
return err;
}
static const struct clk_ops si544_clk_ops = {
.prepare = si544_prepare,
.unprepare = si544_unprepare,
.is_prepared = si544_is_prepared,
.recalc_rate = si544_recalc_rate,
.round_rate = si544_round_rate,
.set_rate = si544_set_rate,
};
static bool si544_regmap_is_volatile(struct device *dev, unsigned int reg)
{
switch (reg) {
case SI544_REG_CONTROL:
case SI544_REG_FCAL_OVR:
return true;
default:
return false;
}
}
static const struct regmap_config si544_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.cache_type = REGCACHE_RBTREE,
.max_register = SI544_REG_PAGE_SELECT,
.volatile_reg = si544_regmap_is_volatile,
};
static const struct i2c_device_id si544_id[] = {
{ "si544a", si544a },
{ "si544b", si544b },
{ "si544c", si544c },
{ }
};
MODULE_DEVICE_TABLE(i2c, si544_id);
static int si544_probe(struct i2c_client *client)
{
struct clk_si544 *data;
struct clk_init_data init;
const struct i2c_device_id *id = i2c_match_id(si544_id, client);
int err;
data = devm_kzalloc(&client->dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
init.ops = &si544_clk_ops;
init.flags = 0;
init.num_parents = 0;
data->hw.init = &init;
data->i2c_client = client;
data->speed_grade = id->driver_data;
if (of_property_read_string(client->dev.of_node, "clock-output-names",
&init.name))
init.name = client->dev.of_node->name;
data->regmap = devm_regmap_init_i2c(client, &si544_regmap_config);
if (IS_ERR(data->regmap))
return PTR_ERR(data->regmap);
i2c_set_clientdata(client, data);
/* Select page 0, just to be sure, there appear to be no more */
err = regmap_write(data->regmap, SI544_REG_PAGE_SELECT, 0);
if (err < 0)
return err;
err = devm_clk_hw_register(&client->dev, &data->hw);
if (err) {
dev_err(&client->dev, "clock registration failed\n");
return err;
}
err = devm_of_clk_add_hw_provider(&client->dev, of_clk_hw_simple_get,
&data->hw);
if (err) {
dev_err(&client->dev, "unable to add clk provider\n");
return err;
}
return 0;
}
static const struct of_device_id clk_si544_of_match[] = {
{ .compatible = "silabs,si544a" },
{ .compatible = "silabs,si544b" },
{ .compatible = "silabs,si544c" },
{ },
};
MODULE_DEVICE_TABLE(of, clk_si544_of_match);
static struct i2c_driver si544_driver = {
.driver = {
.name = "si544",
.of_match_table = clk_si544_of_match,
},
.probe = si544_probe,
.id_table = si544_id,
};
module_i2c_driver(si544_driver);
MODULE_AUTHOR("Mike Looijmans <[email protected]>");
MODULE_DESCRIPTION("Si544 driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/clk/clk-si544.c |
// SPDX-License-Identifier: GPL-2.0-or-later
// Copyright IBM Corp
// Copyright ASPEED Technology
#define pr_fmt(fmt) "clk-ast2600: " fmt
#include <linux/mfd/syscon.h>
#include <linux/mod_devicetable.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/slab.h>
#include <dt-bindings/clock/ast2600-clock.h>
#include "clk-aspeed.h"
/*
* This includes the gates (configured from aspeed_g6_gates), plus the
* explicitly-configured clocks (ASPEED_CLK_HPLL and up).
*/
#define ASPEED_G6_NUM_CLKS 72
#define ASPEED_G6_SILICON_REV 0x014
#define CHIP_REVISION_ID GENMASK(23, 16)
#define ASPEED_G6_RESET_CTRL 0x040
#define ASPEED_G6_RESET_CTRL2 0x050
#define ASPEED_G6_CLK_STOP_CTRL 0x080
#define ASPEED_G6_CLK_STOP_CTRL2 0x090
#define ASPEED_G6_MISC_CTRL 0x0C0
#define UART_DIV13_EN BIT(12)
#define ASPEED_G6_CLK_SELECTION1 0x300
#define ASPEED_G6_CLK_SELECTION2 0x304
#define ASPEED_G6_CLK_SELECTION4 0x310
#define ASPEED_G6_CLK_SELECTION5 0x314
#define I3C_CLK_SELECTION_SHIFT 31
#define I3C_CLK_SELECTION BIT(31)
#define I3C_CLK_SELECT_HCLK (0 << I3C_CLK_SELECTION_SHIFT)
#define I3C_CLK_SELECT_APLL_DIV (1 << I3C_CLK_SELECTION_SHIFT)
#define APLL_DIV_SELECTION_SHIFT 28
#define APLL_DIV_SELECTION GENMASK(30, 28)
#define APLL_DIV_2 (0b001 << APLL_DIV_SELECTION_SHIFT)
#define APLL_DIV_3 (0b010 << APLL_DIV_SELECTION_SHIFT)
#define APLL_DIV_4 (0b011 << APLL_DIV_SELECTION_SHIFT)
#define APLL_DIV_5 (0b100 << APLL_DIV_SELECTION_SHIFT)
#define APLL_DIV_6 (0b101 << APLL_DIV_SELECTION_SHIFT)
#define APLL_DIV_7 (0b110 << APLL_DIV_SELECTION_SHIFT)
#define APLL_DIV_8 (0b111 << APLL_DIV_SELECTION_SHIFT)
#define ASPEED_HPLL_PARAM 0x200
#define ASPEED_APLL_PARAM 0x210
#define ASPEED_MPLL_PARAM 0x220
#define ASPEED_EPLL_PARAM 0x240
#define ASPEED_DPLL_PARAM 0x260
#define ASPEED_G6_STRAP1 0x500
#define ASPEED_MAC12_CLK_DLY 0x340
#define ASPEED_MAC34_CLK_DLY 0x350
/* Globally visible clocks */
static DEFINE_SPINLOCK(aspeed_g6_clk_lock);
/* Keeps track of all clocks */
static struct clk_hw_onecell_data *aspeed_g6_clk_data;
static void __iomem *scu_g6_base;
/* AST2600 revision: A0, A1, A2, etc */
static u8 soc_rev;
/*
* The majority of the clocks in the system are gates paired with a reset
* controller that holds the IP in reset; this is represented by the @reset_idx
* member of entries here.
*
* This borrows from clk_hw_register_gate, but registers two 'gates', one
* to control the clock enable register and the other to control the reset
* IP. This allows us to enforce the ordering:
*
* 1. Place IP in reset
* 2. Enable clock
* 3. Delay
* 4. Release reset
*
* Consequently, if reset_idx is set, reset control is implicit: the clock
* consumer does not need its own reset handling, as enabling the clock will
* also deassert reset.
*
* There are some gates that do not have an associated reset; these are
* handled by using -1 as the index for the reset, and the consumer must
* explictly assert/deassert reset lines as required.
*
* Clocks marked with CLK_IS_CRITICAL:
*
* ref0 and ref1 are essential for the SoC to operate
* mpll is required if SDRAM is used
*/
static const struct aspeed_gate_data aspeed_g6_gates[] = {
/* clk rst name parent flags */
[ASPEED_CLK_GATE_MCLK] = { 0, -1, "mclk-gate", "mpll", CLK_IS_CRITICAL }, /* SDRAM */
[ASPEED_CLK_GATE_ECLK] = { 1, 6, "eclk-gate", "eclk", 0 }, /* Video Engine */
[ASPEED_CLK_GATE_GCLK] = { 2, 7, "gclk-gate", NULL, 0 }, /* 2D engine */
/* vclk parent - dclk/d1clk/hclk/mclk */
[ASPEED_CLK_GATE_VCLK] = { 3, -1, "vclk-gate", NULL, 0 }, /* Video Capture */
[ASPEED_CLK_GATE_BCLK] = { 4, 8, "bclk-gate", "bclk", 0 }, /* PCIe/PCI */
/* From dpll */
[ASPEED_CLK_GATE_DCLK] = { 5, -1, "dclk-gate", NULL, CLK_IS_CRITICAL }, /* DAC */
[ASPEED_CLK_GATE_REF0CLK] = { 6, -1, "ref0clk-gate", "clkin", CLK_IS_CRITICAL },
[ASPEED_CLK_GATE_USBPORT2CLK] = { 7, 3, "usb-port2-gate", NULL, 0 }, /* USB2.0 Host port 2 */
/* Reserved 8 */
[ASPEED_CLK_GATE_USBUHCICLK] = { 9, 15, "usb-uhci-gate", NULL, 0 }, /* USB1.1 (requires port 2 enabled) */
/* From dpll/epll/40mhz usb p1 phy/gpioc6/dp phy pll */
[ASPEED_CLK_GATE_D1CLK] = { 10, 13, "d1clk-gate", "d1clk", 0 }, /* GFX CRT */
/* Reserved 11/12 */
[ASPEED_CLK_GATE_YCLK] = { 13, 4, "yclk-gate", NULL, 0 }, /* HAC */
[ASPEED_CLK_GATE_USBPORT1CLK] = { 14, 14, "usb-port1-gate", NULL, 0 }, /* USB2 hub/USB2 host port 1/USB1.1 dev */
[ASPEED_CLK_GATE_UART5CLK] = { 15, -1, "uart5clk-gate", "uart", 0 }, /* UART5 */
/* Reserved 16/19 */
[ASPEED_CLK_GATE_MAC1CLK] = { 20, 11, "mac1clk-gate", "mac12", 0 }, /* MAC1 */
[ASPEED_CLK_GATE_MAC2CLK] = { 21, 12, "mac2clk-gate", "mac12", 0 }, /* MAC2 */
/* Reserved 22/23 */
[ASPEED_CLK_GATE_RSACLK] = { 24, 4, "rsaclk-gate", NULL, 0 }, /* HAC */
[ASPEED_CLK_GATE_RVASCLK] = { 25, 9, "rvasclk-gate", NULL, 0 }, /* RVAS */
/* Reserved 26 */
[ASPEED_CLK_GATE_EMMCCLK] = { 27, 16, "emmcclk-gate", NULL, 0 }, /* For card clk */
/* Reserved 28/29/30 */
[ASPEED_CLK_GATE_LCLK] = { 32, 32, "lclk-gate", NULL, 0 }, /* LPC */
[ASPEED_CLK_GATE_ESPICLK] = { 33, -1, "espiclk-gate", NULL, 0 }, /* eSPI */
[ASPEED_CLK_GATE_REF1CLK] = { 34, -1, "ref1clk-gate", "clkin", CLK_IS_CRITICAL },
/* Reserved 35 */
[ASPEED_CLK_GATE_SDCLK] = { 36, 56, "sdclk-gate", NULL, 0 }, /* SDIO/SD */
[ASPEED_CLK_GATE_LHCCLK] = { 37, -1, "lhclk-gate", "lhclk", 0 }, /* LPC master/LPC+ */
/* Reserved 38 RSA: no longer used */
/* Reserved 39 */
[ASPEED_CLK_GATE_I3C0CLK] = { 40, 40, "i3c0clk-gate", "i3cclk", 0 }, /* I3C0 */
[ASPEED_CLK_GATE_I3C1CLK] = { 41, 41, "i3c1clk-gate", "i3cclk", 0 }, /* I3C1 */
[ASPEED_CLK_GATE_I3C2CLK] = { 42, 42, "i3c2clk-gate", "i3cclk", 0 }, /* I3C2 */
[ASPEED_CLK_GATE_I3C3CLK] = { 43, 43, "i3c3clk-gate", "i3cclk", 0 }, /* I3C3 */
[ASPEED_CLK_GATE_I3C4CLK] = { 44, 44, "i3c4clk-gate", "i3cclk", 0 }, /* I3C4 */
[ASPEED_CLK_GATE_I3C5CLK] = { 45, 45, "i3c5clk-gate", "i3cclk", 0 }, /* I3C5 */
/* Reserved: 46 & 47 */
[ASPEED_CLK_GATE_UART1CLK] = { 48, -1, "uart1clk-gate", "uart", 0 }, /* UART1 */
[ASPEED_CLK_GATE_UART2CLK] = { 49, -1, "uart2clk-gate", "uart", 0 }, /* UART2 */
[ASPEED_CLK_GATE_UART3CLK] = { 50, -1, "uart3clk-gate", "uart", 0 }, /* UART3 */
[ASPEED_CLK_GATE_UART4CLK] = { 51, -1, "uart4clk-gate", "uart", 0 }, /* UART4 */
[ASPEED_CLK_GATE_MAC3CLK] = { 52, 52, "mac3clk-gate", "mac34", 0 }, /* MAC3 */
[ASPEED_CLK_GATE_MAC4CLK] = { 53, 53, "mac4clk-gate", "mac34", 0 }, /* MAC4 */
[ASPEED_CLK_GATE_UART6CLK] = { 54, -1, "uart6clk-gate", "uartx", 0 }, /* UART6 */
[ASPEED_CLK_GATE_UART7CLK] = { 55, -1, "uart7clk-gate", "uartx", 0 }, /* UART7 */
[ASPEED_CLK_GATE_UART8CLK] = { 56, -1, "uart8clk-gate", "uartx", 0 }, /* UART8 */
[ASPEED_CLK_GATE_UART9CLK] = { 57, -1, "uart9clk-gate", "uartx", 0 }, /* UART9 */
[ASPEED_CLK_GATE_UART10CLK] = { 58, -1, "uart10clk-gate", "uartx", 0 }, /* UART10 */
[ASPEED_CLK_GATE_UART11CLK] = { 59, -1, "uart11clk-gate", "uartx", 0 }, /* UART11 */
[ASPEED_CLK_GATE_UART12CLK] = { 60, -1, "uart12clk-gate", "uartx", 0 }, /* UART12 */
[ASPEED_CLK_GATE_UART13CLK] = { 61, -1, "uart13clk-gate", "uartx", 0 }, /* UART13 */
[ASPEED_CLK_GATE_FSICLK] = { 62, 59, "fsiclk-gate", NULL, 0 }, /* FSI */
};
static const struct clk_div_table ast2600_eclk_div_table[] = {
{ 0x0, 2 },
{ 0x1, 2 },
{ 0x2, 3 },
{ 0x3, 4 },
{ 0x4, 5 },
{ 0x5, 6 },
{ 0x6, 7 },
{ 0x7, 8 },
{ 0 }
};
static const struct clk_div_table ast2600_emmc_extclk_div_table[] = {
{ 0x0, 2 },
{ 0x1, 4 },
{ 0x2, 6 },
{ 0x3, 8 },
{ 0x4, 10 },
{ 0x5, 12 },
{ 0x6, 14 },
{ 0x7, 16 },
{ 0 }
};
static const struct clk_div_table ast2600_mac_div_table[] = {
{ 0x0, 4 },
{ 0x1, 4 },
{ 0x2, 6 },
{ 0x3, 8 },
{ 0x4, 10 },
{ 0x5, 12 },
{ 0x6, 14 },
{ 0x7, 16 },
{ 0 }
};
static const struct clk_div_table ast2600_div_table[] = {
{ 0x0, 4 },
{ 0x1, 8 },
{ 0x2, 12 },
{ 0x3, 16 },
{ 0x4, 20 },
{ 0x5, 24 },
{ 0x6, 28 },
{ 0x7, 32 },
{ 0 }
};
/* For hpll/dpll/epll/mpll */
static struct clk_hw *ast2600_calc_pll(const char *name, u32 val)
{
unsigned int mult, div;
if (val & BIT(24)) {
/* Pass through mode */
mult = div = 1;
} else {
/* F = 25Mhz * [(M + 2) / (n + 1)] / (p + 1) */
u32 m = val & 0x1fff;
u32 n = (val >> 13) & 0x3f;
u32 p = (val >> 19) & 0xf;
mult = (m + 1) / (n + 1);
div = (p + 1);
}
return clk_hw_register_fixed_factor(NULL, name, "clkin", 0,
mult, div);
};
static struct clk_hw *ast2600_calc_apll(const char *name, u32 val)
{
unsigned int mult, div;
if (soc_rev >= 2) {
if (val & BIT(24)) {
/* Pass through mode */
mult = div = 1;
} else {
/* F = 25Mhz * [(m + 1) / (n + 1)] / (p + 1) */
u32 m = val & 0x1fff;
u32 n = (val >> 13) & 0x3f;
u32 p = (val >> 19) & 0xf;
mult = (m + 1);
div = (n + 1) * (p + 1);
}
} else {
if (val & BIT(20)) {
/* Pass through mode */
mult = div = 1;
} else {
/* F = 25Mhz * (2-od) * [(m + 2) / (n + 1)] */
u32 m = (val >> 5) & 0x3f;
u32 od = (val >> 4) & 0x1;
u32 n = val & 0xf;
mult = (2 - od) * (m + 2);
div = n + 1;
}
}
return clk_hw_register_fixed_factor(NULL, name, "clkin", 0,
mult, div);
};
static u32 get_bit(u8 idx)
{
return BIT(idx % 32);
}
static u32 get_reset_reg(struct aspeed_clk_gate *gate)
{
if (gate->reset_idx < 32)
return ASPEED_G6_RESET_CTRL;
return ASPEED_G6_RESET_CTRL2;
}
static u32 get_clock_reg(struct aspeed_clk_gate *gate)
{
if (gate->clock_idx < 32)
return ASPEED_G6_CLK_STOP_CTRL;
return ASPEED_G6_CLK_STOP_CTRL2;
}
static int aspeed_g6_clk_is_enabled(struct clk_hw *hw)
{
struct aspeed_clk_gate *gate = to_aspeed_clk_gate(hw);
u32 clk = get_bit(gate->clock_idx);
u32 rst = get_bit(gate->reset_idx);
u32 reg;
u32 enval;
/*
* If the IP is in reset, treat the clock as not enabled,
* this happens with some clocks such as the USB one when
* coming from cold reset. Without this, aspeed_clk_enable()
* will fail to lift the reset.
*/
if (gate->reset_idx >= 0) {
regmap_read(gate->map, get_reset_reg(gate), ®);
if (reg & rst)
return 0;
}
regmap_read(gate->map, get_clock_reg(gate), ®);
enval = (gate->flags & CLK_GATE_SET_TO_DISABLE) ? 0 : clk;
return ((reg & clk) == enval) ? 1 : 0;
}
static int aspeed_g6_clk_enable(struct clk_hw *hw)
{
struct aspeed_clk_gate *gate = to_aspeed_clk_gate(hw);
unsigned long flags;
u32 clk = get_bit(gate->clock_idx);
u32 rst = get_bit(gate->reset_idx);
spin_lock_irqsave(gate->lock, flags);
if (aspeed_g6_clk_is_enabled(hw)) {
spin_unlock_irqrestore(gate->lock, flags);
return 0;
}
if (gate->reset_idx >= 0) {
/* Put IP in reset */
regmap_write(gate->map, get_reset_reg(gate), rst);
/* Delay 100us */
udelay(100);
}
/* Enable clock */
if (gate->flags & CLK_GATE_SET_TO_DISABLE) {
/* Clock is clear to enable, so use set to clear register */
regmap_write(gate->map, get_clock_reg(gate) + 0x04, clk);
} else {
/* Clock is set to enable, so use write to set register */
regmap_write(gate->map, get_clock_reg(gate), clk);
}
if (gate->reset_idx >= 0) {
/* A delay of 10ms is specified by the ASPEED docs */
mdelay(10);
/* Take IP out of reset */
regmap_write(gate->map, get_reset_reg(gate) + 0x4, rst);
}
spin_unlock_irqrestore(gate->lock, flags);
return 0;
}
static void aspeed_g6_clk_disable(struct clk_hw *hw)
{
struct aspeed_clk_gate *gate = to_aspeed_clk_gate(hw);
unsigned long flags;
u32 clk = get_bit(gate->clock_idx);
spin_lock_irqsave(gate->lock, flags);
if (gate->flags & CLK_GATE_SET_TO_DISABLE) {
regmap_write(gate->map, get_clock_reg(gate), clk);
} else {
/* Use set to clear register */
regmap_write(gate->map, get_clock_reg(gate) + 0x4, clk);
}
spin_unlock_irqrestore(gate->lock, flags);
}
static const struct clk_ops aspeed_g6_clk_gate_ops = {
.enable = aspeed_g6_clk_enable,
.disable = aspeed_g6_clk_disable,
.is_enabled = aspeed_g6_clk_is_enabled,
};
static int aspeed_g6_reset_deassert(struct reset_controller_dev *rcdev,
unsigned long id)
{
struct aspeed_reset *ar = to_aspeed_reset(rcdev);
u32 rst = get_bit(id);
u32 reg = id >= 32 ? ASPEED_G6_RESET_CTRL2 : ASPEED_G6_RESET_CTRL;
/* Use set to clear register */
return regmap_write(ar->map, reg + 0x04, rst);
}
static int aspeed_g6_reset_assert(struct reset_controller_dev *rcdev,
unsigned long id)
{
struct aspeed_reset *ar = to_aspeed_reset(rcdev);
u32 rst = get_bit(id);
u32 reg = id >= 32 ? ASPEED_G6_RESET_CTRL2 : ASPEED_G6_RESET_CTRL;
return regmap_write(ar->map, reg, rst);
}
static int aspeed_g6_reset_status(struct reset_controller_dev *rcdev,
unsigned long id)
{
struct aspeed_reset *ar = to_aspeed_reset(rcdev);
int ret;
u32 val;
u32 rst = get_bit(id);
u32 reg = id >= 32 ? ASPEED_G6_RESET_CTRL2 : ASPEED_G6_RESET_CTRL;
ret = regmap_read(ar->map, reg, &val);
if (ret)
return ret;
return !!(val & rst);
}
static const struct reset_control_ops aspeed_g6_reset_ops = {
.assert = aspeed_g6_reset_assert,
.deassert = aspeed_g6_reset_deassert,
.status = aspeed_g6_reset_status,
};
static struct clk_hw *aspeed_g6_clk_hw_register_gate(struct device *dev,
const char *name, const char *parent_name, unsigned long flags,
struct regmap *map, u8 clock_idx, u8 reset_idx,
u8 clk_gate_flags, spinlock_t *lock)
{
struct aspeed_clk_gate *gate;
struct clk_init_data init;
struct clk_hw *hw;
int ret;
gate = kzalloc(sizeof(*gate), GFP_KERNEL);
if (!gate)
return ERR_PTR(-ENOMEM);
init.name = name;
init.ops = &aspeed_g6_clk_gate_ops;
init.flags = flags;
init.parent_names = parent_name ? &parent_name : NULL;
init.num_parents = parent_name ? 1 : 0;
gate->map = map;
gate->clock_idx = clock_idx;
gate->reset_idx = reset_idx;
gate->flags = clk_gate_flags;
gate->lock = lock;
gate->hw.init = &init;
hw = &gate->hw;
ret = clk_hw_register(dev, hw);
if (ret) {
kfree(gate);
hw = ERR_PTR(ret);
}
return hw;
}
static const char *const emmc_extclk_parent_names[] = {
"emmc_extclk_hpll_in",
"mpll",
};
static const char * const vclk_parent_names[] = {
"dpll",
"d1pll",
"hclk",
"mclk",
};
static const char * const d1clk_parent_names[] = {
"dpll",
"epll",
"usb-phy-40m",
"gpioc6_clkin",
"dp_phy_pll",
};
static int aspeed_g6_clk_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct aspeed_reset *ar;
struct regmap *map;
struct clk_hw *hw;
u32 val, rate;
int i, ret;
map = syscon_node_to_regmap(dev->of_node);
if (IS_ERR(map)) {
dev_err(dev, "no syscon regmap\n");
return PTR_ERR(map);
}
ar = devm_kzalloc(dev, sizeof(*ar), GFP_KERNEL);
if (!ar)
return -ENOMEM;
ar->map = map;
ar->rcdev.owner = THIS_MODULE;
ar->rcdev.nr_resets = 64;
ar->rcdev.ops = &aspeed_g6_reset_ops;
ar->rcdev.of_node = dev->of_node;
ret = devm_reset_controller_register(dev, &ar->rcdev);
if (ret) {
dev_err(dev, "could not register reset controller\n");
return ret;
}
/* UART clock div13 setting */
regmap_read(map, ASPEED_G6_MISC_CTRL, &val);
if (val & UART_DIV13_EN)
rate = 24000000 / 13;
else
rate = 24000000;
hw = clk_hw_register_fixed_rate(dev, "uart", NULL, 0, rate);
if (IS_ERR(hw))
return PTR_ERR(hw);
aspeed_g6_clk_data->hws[ASPEED_CLK_UART] = hw;
/* UART6~13 clock div13 setting */
regmap_read(map, 0x80, &val);
if (val & BIT(31))
rate = 24000000 / 13;
else
rate = 24000000;
hw = clk_hw_register_fixed_rate(dev, "uartx", NULL, 0, rate);
if (IS_ERR(hw))
return PTR_ERR(hw);
aspeed_g6_clk_data->hws[ASPEED_CLK_UARTX] = hw;
/* EMMC ext clock */
hw = clk_hw_register_fixed_factor(dev, "emmc_extclk_hpll_in", "hpll",
0, 1, 2);
if (IS_ERR(hw))
return PTR_ERR(hw);
hw = clk_hw_register_mux(dev, "emmc_extclk_mux",
emmc_extclk_parent_names,
ARRAY_SIZE(emmc_extclk_parent_names), 0,
scu_g6_base + ASPEED_G6_CLK_SELECTION1, 11, 1,
0, &aspeed_g6_clk_lock);
if (IS_ERR(hw))
return PTR_ERR(hw);
hw = clk_hw_register_gate(dev, "emmc_extclk_gate", "emmc_extclk_mux",
0, scu_g6_base + ASPEED_G6_CLK_SELECTION1,
15, 0, &aspeed_g6_clk_lock);
if (IS_ERR(hw))
return PTR_ERR(hw);
hw = clk_hw_register_divider_table(dev, "emmc_extclk",
"emmc_extclk_gate", 0,
scu_g6_base +
ASPEED_G6_CLK_SELECTION1, 12,
3, 0, ast2600_emmc_extclk_div_table,
&aspeed_g6_clk_lock);
if (IS_ERR(hw))
return PTR_ERR(hw);
aspeed_g6_clk_data->hws[ASPEED_CLK_EMMC] = hw;
/* SD/SDIO clock divider and gate */
hw = clk_hw_register_gate(dev, "sd_extclk_gate", "hpll", 0,
scu_g6_base + ASPEED_G6_CLK_SELECTION4, 31, 0,
&aspeed_g6_clk_lock);
if (IS_ERR(hw))
return PTR_ERR(hw);
hw = clk_hw_register_divider_table(dev, "sd_extclk", "sd_extclk_gate",
0, scu_g6_base + ASPEED_G6_CLK_SELECTION4, 28, 3, 0,
ast2600_div_table,
&aspeed_g6_clk_lock);
if (IS_ERR(hw))
return PTR_ERR(hw);
aspeed_g6_clk_data->hws[ASPEED_CLK_SDIO] = hw;
/* MAC1/2 RMII 50MHz RCLK */
hw = clk_hw_register_fixed_rate(dev, "mac12rclk", "hpll", 0, 50000000);
if (IS_ERR(hw))
return PTR_ERR(hw);
/* MAC1/2 AHB bus clock divider */
hw = clk_hw_register_divider_table(dev, "mac12", "hpll", 0,
scu_g6_base + ASPEED_G6_CLK_SELECTION1, 16, 3, 0,
ast2600_mac_div_table,
&aspeed_g6_clk_lock);
if (IS_ERR(hw))
return PTR_ERR(hw);
aspeed_g6_clk_data->hws[ASPEED_CLK_MAC12] = hw;
/* RMII1 50MHz (RCLK) output enable */
hw = clk_hw_register_gate(dev, "mac1rclk", "mac12rclk", 0,
scu_g6_base + ASPEED_MAC12_CLK_DLY, 29, 0,
&aspeed_g6_clk_lock);
if (IS_ERR(hw))
return PTR_ERR(hw);
aspeed_g6_clk_data->hws[ASPEED_CLK_MAC1RCLK] = hw;
/* RMII2 50MHz (RCLK) output enable */
hw = clk_hw_register_gate(dev, "mac2rclk", "mac12rclk", 0,
scu_g6_base + ASPEED_MAC12_CLK_DLY, 30, 0,
&aspeed_g6_clk_lock);
if (IS_ERR(hw))
return PTR_ERR(hw);
aspeed_g6_clk_data->hws[ASPEED_CLK_MAC2RCLK] = hw;
/* MAC1/2 RMII 50MHz RCLK */
hw = clk_hw_register_fixed_rate(dev, "mac34rclk", "hclk", 0, 50000000);
if (IS_ERR(hw))
return PTR_ERR(hw);
/* MAC3/4 AHB bus clock divider */
hw = clk_hw_register_divider_table(dev, "mac34", "hpll", 0,
scu_g6_base + 0x310, 24, 3, 0,
ast2600_mac_div_table,
&aspeed_g6_clk_lock);
if (IS_ERR(hw))
return PTR_ERR(hw);
aspeed_g6_clk_data->hws[ASPEED_CLK_MAC34] = hw;
/* RMII3 50MHz (RCLK) output enable */
hw = clk_hw_register_gate(dev, "mac3rclk", "mac34rclk", 0,
scu_g6_base + ASPEED_MAC34_CLK_DLY, 29, 0,
&aspeed_g6_clk_lock);
if (IS_ERR(hw))
return PTR_ERR(hw);
aspeed_g6_clk_data->hws[ASPEED_CLK_MAC3RCLK] = hw;
/* RMII4 50MHz (RCLK) output enable */
hw = clk_hw_register_gate(dev, "mac4rclk", "mac34rclk", 0,
scu_g6_base + ASPEED_MAC34_CLK_DLY, 30, 0,
&aspeed_g6_clk_lock);
if (IS_ERR(hw))
return PTR_ERR(hw);
aspeed_g6_clk_data->hws[ASPEED_CLK_MAC4RCLK] = hw;
/* LPC Host (LHCLK) clock divider */
hw = clk_hw_register_divider_table(dev, "lhclk", "hpll", 0,
scu_g6_base + ASPEED_G6_CLK_SELECTION1, 20, 3, 0,
ast2600_div_table,
&aspeed_g6_clk_lock);
if (IS_ERR(hw))
return PTR_ERR(hw);
aspeed_g6_clk_data->hws[ASPEED_CLK_LHCLK] = hw;
/* gfx d1clk : use dp clk */
regmap_update_bits(map, ASPEED_G6_CLK_SELECTION1, GENMASK(10, 8), BIT(10));
/* SoC Display clock selection */
hw = clk_hw_register_mux(dev, "d1clk", d1clk_parent_names,
ARRAY_SIZE(d1clk_parent_names), 0,
scu_g6_base + ASPEED_G6_CLK_SELECTION1, 8, 3, 0,
&aspeed_g6_clk_lock);
if (IS_ERR(hw))
return PTR_ERR(hw);
aspeed_g6_clk_data->hws[ASPEED_CLK_D1CLK] = hw;
/* d1 clk div 0x308[17:15] x [14:12] - 8,7,6,5,4,3,2,1 */
regmap_write(map, 0x308, 0x12000); /* 3x3 = 9 */
/* P-Bus (BCLK) clock divider */
hw = clk_hw_register_divider_table(dev, "bclk", "epll", 0,
scu_g6_base + ASPEED_G6_CLK_SELECTION1, 20, 3, 0,
ast2600_div_table,
&aspeed_g6_clk_lock);
if (IS_ERR(hw))
return PTR_ERR(hw);
aspeed_g6_clk_data->hws[ASPEED_CLK_BCLK] = hw;
/* Video Capture clock selection */
hw = clk_hw_register_mux(dev, "vclk", vclk_parent_names,
ARRAY_SIZE(vclk_parent_names), 0,
scu_g6_base + ASPEED_G6_CLK_SELECTION2, 12, 3, 0,
&aspeed_g6_clk_lock);
if (IS_ERR(hw))
return PTR_ERR(hw);
aspeed_g6_clk_data->hws[ASPEED_CLK_VCLK] = hw;
/* Video Engine clock divider */
hw = clk_hw_register_divider_table(dev, "eclk", NULL, 0,
scu_g6_base + ASPEED_G6_CLK_SELECTION1, 28, 3, 0,
ast2600_eclk_div_table,
&aspeed_g6_clk_lock);
if (IS_ERR(hw))
return PTR_ERR(hw);
aspeed_g6_clk_data->hws[ASPEED_CLK_ECLK] = hw;
for (i = 0; i < ARRAY_SIZE(aspeed_g6_gates); i++) {
const struct aspeed_gate_data *gd = &aspeed_g6_gates[i];
u32 gate_flags;
if (!gd->name)
continue;
/*
* Special case: the USB port 1 clock (bit 14) is always
* working the opposite way from the other ones.
*/
gate_flags = (gd->clock_idx == 14) ? 0 : CLK_GATE_SET_TO_DISABLE;
hw = aspeed_g6_clk_hw_register_gate(dev,
gd->name,
gd->parent_name,
gd->flags,
map,
gd->clock_idx,
gd->reset_idx,
gate_flags,
&aspeed_g6_clk_lock);
if (IS_ERR(hw))
return PTR_ERR(hw);
aspeed_g6_clk_data->hws[i] = hw;
}
return 0;
};
static const struct of_device_id aspeed_g6_clk_dt_ids[] = {
{ .compatible = "aspeed,ast2600-scu" },
{ }
};
static struct platform_driver aspeed_g6_clk_driver = {
.probe = aspeed_g6_clk_probe,
.driver = {
.name = "ast2600-clk",
.of_match_table = aspeed_g6_clk_dt_ids,
.suppress_bind_attrs = true,
},
};
builtin_platform_driver(aspeed_g6_clk_driver);
static const u32 ast2600_a0_axi_ahb_div_table[] = {
2, 2, 3, 5,
};
static const u32 ast2600_a1_axi_ahb_div0_tbl[] = {
3, 2, 3, 4,
};
static const u32 ast2600_a1_axi_ahb_div1_tbl[] = {
3, 4, 6, 8,
};
static const u32 ast2600_a1_axi_ahb200_tbl[] = {
3, 4, 3, 4, 2, 2, 2, 2,
};
static void __init aspeed_g6_cc(struct regmap *map)
{
struct clk_hw *hw;
u32 val, div, divbits, axi_div, ahb_div;
clk_hw_register_fixed_rate(NULL, "clkin", NULL, 0, 25000000);
/*
* High-speed PLL clock derived from the crystal. This the CPU clock,
* and we assume that it is enabled
*/
regmap_read(map, ASPEED_HPLL_PARAM, &val);
aspeed_g6_clk_data->hws[ASPEED_CLK_HPLL] = ast2600_calc_pll("hpll", val);
regmap_read(map, ASPEED_MPLL_PARAM, &val);
aspeed_g6_clk_data->hws[ASPEED_CLK_MPLL] = ast2600_calc_pll("mpll", val);
regmap_read(map, ASPEED_DPLL_PARAM, &val);
aspeed_g6_clk_data->hws[ASPEED_CLK_DPLL] = ast2600_calc_pll("dpll", val);
regmap_read(map, ASPEED_EPLL_PARAM, &val);
aspeed_g6_clk_data->hws[ASPEED_CLK_EPLL] = ast2600_calc_pll("epll", val);
regmap_read(map, ASPEED_APLL_PARAM, &val);
aspeed_g6_clk_data->hws[ASPEED_CLK_APLL] = ast2600_calc_apll("apll", val);
/* Strap bits 12:11 define the AXI/AHB clock frequency ratio (aka HCLK)*/
regmap_read(map, ASPEED_G6_STRAP1, &val);
if (val & BIT(16))
axi_div = 1;
else
axi_div = 2;
divbits = (val >> 11) & 0x3;
if (soc_rev >= 1) {
if (!divbits) {
ahb_div = ast2600_a1_axi_ahb200_tbl[(val >> 8) & 0x3];
if (val & BIT(16))
ahb_div *= 2;
} else {
if (val & BIT(16))
ahb_div = ast2600_a1_axi_ahb_div1_tbl[divbits];
else
ahb_div = ast2600_a1_axi_ahb_div0_tbl[divbits];
}
} else {
ahb_div = ast2600_a0_axi_ahb_div_table[(val >> 11) & 0x3];
}
hw = clk_hw_register_fixed_factor(NULL, "ahb", "hpll", 0, 1, axi_div * ahb_div);
aspeed_g6_clk_data->hws[ASPEED_CLK_AHB] = hw;
regmap_read(map, ASPEED_G6_CLK_SELECTION1, &val);
val = (val >> 23) & 0x7;
div = 4 * (val + 1);
hw = clk_hw_register_fixed_factor(NULL, "apb1", "hpll", 0, 1, div);
aspeed_g6_clk_data->hws[ASPEED_CLK_APB1] = hw;
regmap_read(map, ASPEED_G6_CLK_SELECTION4, &val);
val = (val >> 9) & 0x7;
div = 2 * (val + 1);
hw = clk_hw_register_fixed_factor(NULL, "apb2", "ahb", 0, 1, div);
aspeed_g6_clk_data->hws[ASPEED_CLK_APB2] = hw;
/* USB 2.0 port1 phy 40MHz clock */
hw = clk_hw_register_fixed_rate(NULL, "usb-phy-40m", NULL, 0, 40000000);
aspeed_g6_clk_data->hws[ASPEED_CLK_USBPHY_40M] = hw;
/* i3c clock: source from apll, divide by 8 */
regmap_update_bits(map, ASPEED_G6_CLK_SELECTION5,
I3C_CLK_SELECTION | APLL_DIV_SELECTION,
I3C_CLK_SELECT_APLL_DIV | APLL_DIV_8);
hw = clk_hw_register_fixed_factor(NULL, "i3cclk", "apll", 0, 1, 8);
aspeed_g6_clk_data->hws[ASPEED_CLK_I3C] = hw;
};
static void __init aspeed_g6_cc_init(struct device_node *np)
{
struct regmap *map;
int ret;
int i;
scu_g6_base = of_iomap(np, 0);
if (!scu_g6_base)
return;
soc_rev = (readl(scu_g6_base + ASPEED_G6_SILICON_REV) & CHIP_REVISION_ID) >> 16;
aspeed_g6_clk_data = kzalloc(struct_size(aspeed_g6_clk_data, hws,
ASPEED_G6_NUM_CLKS), GFP_KERNEL);
if (!aspeed_g6_clk_data)
return;
aspeed_g6_clk_data->num = ASPEED_G6_NUM_CLKS;
/*
* This way all clocks fetched before the platform device probes,
* except those we assign here for early use, will be deferred.
*/
for (i = 0; i < ASPEED_G6_NUM_CLKS; i++)
aspeed_g6_clk_data->hws[i] = ERR_PTR(-EPROBE_DEFER);
/*
* We check that the regmap works on this very first access,
* but as this is an MMIO-backed regmap, subsequent regmap
* access is not going to fail and we skip error checks from
* this point.
*/
map = syscon_node_to_regmap(np);
if (IS_ERR(map)) {
pr_err("no syscon regmap\n");
return;
}
aspeed_g6_cc(map);
ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, aspeed_g6_clk_data);
if (ret)
pr_err("failed to add DT provider: %d\n", ret);
};
CLK_OF_DECLARE_DRIVER(aspeed_cc_g6, "aspeed,ast2600-scu", aspeed_g6_cc_init);
| linux-master | drivers/clk/clk-ast2600.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Driver for Skyworks Si521xx PCIe clock generator driver
*
* The following series can be supported:
* - Si52144 - 4x DIFF
* - Si52146 - 6x DIFF
* - Si52147 - 9x DIFF
* Currently tested:
* - Si52144
*
* Copyright (C) 2022 Marek Vasut <[email protected]>
*/
#include <linux/bitfield.h>
#include <linux/bitrev.h>
#include <linux/clk-provider.h>
#include <linux/i2c.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/regmap.h>
/* OE1 and OE2 register */
#define SI521XX_REG_OE(n) (((n) & 0x1) + 1)
#define SI521XX_REG_ID 0x3
#define SI521XX_REG_ID_PROG GENMASK(7, 4)
#define SI521XX_REG_ID_VENDOR GENMASK(3, 0)
#define SI521XX_REG_BC 0x4
#define SI521XX_REG_DA 0x5
#define SI521XX_REG_DA_AMP_SEL BIT(7)
#define SI521XX_REG_DA_AMP_MASK GENMASK(6, 4)
#define SI521XX_REG_DA_AMP_MIN 300000
#define SI521XX_REG_DA_AMP_DEFAULT 800000
#define SI521XX_REG_DA_AMP_MAX 1000000
#define SI521XX_REG_DA_AMP_STEP 100000
#define SI521XX_REG_DA_AMP(UV) \
FIELD_PREP(SI521XX_REG_DA_AMP_MASK, \
((UV) - SI521XX_REG_DA_AMP_MIN) / SI521XX_REG_DA_AMP_STEP)
#define SI521XX_REG_DA_UNKNOWN BIT(3) /* Always set */
/* Count of populated OE bits in control register ref, 1 and 2 */
#define SI521XX_OE_MAP(cr1, cr2) (((cr2) << 8) | (cr1))
#define SI521XX_OE_MAP_GET_OE(oe, map) (((map) >> (((oe) - 1) * 8)) & 0xff)
#define SI521XX_DIFF_MULT 4
#define SI521XX_DIFF_DIV 1
/* Supported Skyworks Si521xx models. */
enum si521xx_model {
SI52144 = 0x44,
SI52146 = 0x46,
SI52147 = 0x47,
};
struct si521xx;
struct si_clk {
struct clk_hw hw;
struct si521xx *si;
u8 reg;
u8 bit;
};
struct si521xx {
struct i2c_client *client;
struct regmap *regmap;
struct si_clk clk_dif[9];
u16 chip_info;
u8 pll_amplitude;
};
/*
* Si521xx i2c regmap
*/
static const struct regmap_range si521xx_readable_ranges[] = {
regmap_reg_range(SI521XX_REG_OE(0), SI521XX_REG_DA),
};
static const struct regmap_access_table si521xx_readable_table = {
.yes_ranges = si521xx_readable_ranges,
.n_yes_ranges = ARRAY_SIZE(si521xx_readable_ranges),
};
static const struct regmap_range si521xx_writeable_ranges[] = {
regmap_reg_range(SI521XX_REG_OE(0), SI521XX_REG_OE(1)),
regmap_reg_range(SI521XX_REG_BC, SI521XX_REG_DA),
};
static const struct regmap_access_table si521xx_writeable_table = {
.yes_ranges = si521xx_writeable_ranges,
.n_yes_ranges = ARRAY_SIZE(si521xx_writeable_ranges),
};
static int si521xx_regmap_i2c_write(void *context, unsigned int reg,
unsigned int val)
{
struct i2c_client *i2c = context;
const u8 data[3] = { reg, 1, val };
const int count = ARRAY_SIZE(data);
int ret;
ret = i2c_master_send(i2c, data, count);
if (ret == count)
return 0;
else if (ret < 0)
return ret;
else
return -EIO;
}
static int si521xx_regmap_i2c_read(void *context, unsigned int reg,
unsigned int *val)
{
struct i2c_client *i2c = context;
struct i2c_msg xfer[2];
u8 txdata = reg;
u8 rxdata[2];
int ret;
xfer[0].addr = i2c->addr;
xfer[0].flags = 0;
xfer[0].len = 1;
xfer[0].buf = (void *)&txdata;
xfer[1].addr = i2c->addr;
xfer[1].flags = I2C_M_RD;
xfer[1].len = 2;
xfer[1].buf = (void *)rxdata;
ret = i2c_transfer(i2c->adapter, xfer, 2);
if (ret < 0)
return ret;
if (ret != 2)
return -EIO;
/*
* Byte 0 is transfer length, which is always 1 due
* to BCP register programming to 1 in si521xx_probe(),
* ignore it and use data from Byte 1.
*/
*val = rxdata[1];
return 0;
}
static const struct regmap_config si521xx_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.cache_type = REGCACHE_NONE,
.max_register = SI521XX_REG_DA,
.rd_table = &si521xx_readable_table,
.wr_table = &si521xx_writeable_table,
.reg_write = si521xx_regmap_i2c_write,
.reg_read = si521xx_regmap_i2c_read,
};
static unsigned long si521xx_diff_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
unsigned long long rate;
rate = (unsigned long long)parent_rate * SI521XX_DIFF_MULT;
do_div(rate, SI521XX_DIFF_DIV);
return (unsigned long)rate;
}
static long si521xx_diff_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *prate)
{
unsigned long best_parent;
best_parent = (rate / SI521XX_DIFF_MULT) * SI521XX_DIFF_DIV;
*prate = clk_hw_round_rate(clk_hw_get_parent(hw), best_parent);
return (*prate / SI521XX_DIFF_DIV) * SI521XX_DIFF_MULT;
}
static int si521xx_diff_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
/*
* We must report success but we can do so unconditionally because
* si521xx_diff_round_rate returns values that ensure this call is a
* nop.
*/
return 0;
}
#define to_si521xx_clk(_hw) container_of(_hw, struct si_clk, hw)
static int si521xx_diff_prepare(struct clk_hw *hw)
{
struct si_clk *si_clk = to_si521xx_clk(hw);
struct si521xx *si = si_clk->si;
regmap_set_bits(si->regmap, SI521XX_REG_OE(si_clk->reg), si_clk->bit);
return 0;
}
static void si521xx_diff_unprepare(struct clk_hw *hw)
{
struct si_clk *si_clk = to_si521xx_clk(hw);
struct si521xx *si = si_clk->si;
regmap_clear_bits(si->regmap, SI521XX_REG_OE(si_clk->reg), si_clk->bit);
}
static const struct clk_ops si521xx_diff_clk_ops = {
.round_rate = si521xx_diff_round_rate,
.set_rate = si521xx_diff_set_rate,
.recalc_rate = si521xx_diff_recalc_rate,
.prepare = si521xx_diff_prepare,
.unprepare = si521xx_diff_unprepare,
};
static int si521xx_get_common_config(struct si521xx *si)
{
struct i2c_client *client = si->client;
struct device_node *np = client->dev.of_node;
unsigned int amp;
int ret;
/* Set defaults */
si->pll_amplitude = SI521XX_REG_DA_AMP(SI521XX_REG_DA_AMP_DEFAULT);
/* Output clock amplitude */
ret = of_property_read_u32(np, "skyworks,out-amplitude-microvolt",
&);
if (!ret) {
if (amp < SI521XX_REG_DA_AMP_MIN || amp > SI521XX_REG_DA_AMP_MAX ||
amp % SI521XX_REG_DA_AMP_STEP) {
return dev_err_probe(&client->dev, -EINVAL,
"Invalid skyworks,out-amplitude-microvolt value\n");
}
si->pll_amplitude = SI521XX_REG_DA_AMP(amp);
}
return 0;
}
static void si521xx_update_config(struct si521xx *si)
{
/* If amplitude is non-default, update it. */
if (si->pll_amplitude == SI521XX_REG_DA_AMP(SI521XX_REG_DA_AMP_DEFAULT))
return;
regmap_update_bits(si->regmap, SI521XX_REG_DA,
SI521XX_REG_DA_AMP_MASK, si->pll_amplitude);
}
static void si521xx_diff_idx_to_reg_bit(const u16 chip_info, const int idx,
struct si_clk *clk)
{
unsigned long mask;
int oe, b, ctr = 0;
for (oe = 1; oe <= 2; oe++) {
mask = bitrev8(SI521XX_OE_MAP_GET_OE(oe, chip_info));
for_each_set_bit(b, &mask, 8) {
if (ctr++ != idx)
continue;
clk->reg = SI521XX_REG_OE(oe);
clk->bit = 7 - b;
return;
}
}
}
static struct clk_hw *
si521xx_of_clk_get(struct of_phandle_args *clkspec, void *data)
{
struct si521xx *si = data;
unsigned int idx = clkspec->args[0];
return &si->clk_dif[idx].hw;
}
static int si521xx_probe(struct i2c_client *client)
{
const u16 chip_info = (u16)(uintptr_t)device_get_match_data(&client->dev);
const struct clk_parent_data clk_parent_data = { .index = 0 };
struct si521xx *si;
unsigned char name[6] = "DIFF0";
struct clk_init_data init = {};
int i, ret;
if (!chip_info)
return -EINVAL;
si = devm_kzalloc(&client->dev, sizeof(*si), GFP_KERNEL);
if (!si)
return -ENOMEM;
i2c_set_clientdata(client, si);
si->client = client;
/* Fetch common configuration from DT (if specified) */
ret = si521xx_get_common_config(si);
if (ret)
return ret;
si->regmap = devm_regmap_init(&client->dev, NULL, client,
&si521xx_regmap_config);
if (IS_ERR(si->regmap))
return dev_err_probe(&client->dev, PTR_ERR(si->regmap),
"Failed to allocate register map\n");
/* Always read back 1 Byte via I2C */
ret = regmap_write(si->regmap, SI521XX_REG_BC, 1);
if (ret < 0)
return ret;
/* Register clock */
for (i = 0; i < hweight16(chip_info); i++) {
memset(&init, 0, sizeof(init));
snprintf(name, 6, "DIFF%d", i);
init.name = name;
init.ops = &si521xx_diff_clk_ops;
init.parent_data = &clk_parent_data;
init.num_parents = 1;
init.flags = CLK_SET_RATE_PARENT;
si->clk_dif[i].hw.init = &init;
si->clk_dif[i].si = si;
si521xx_diff_idx_to_reg_bit(chip_info, i, &si->clk_dif[i]);
ret = devm_clk_hw_register(&client->dev, &si->clk_dif[i].hw);
if (ret)
return ret;
}
ret = devm_of_clk_add_hw_provider(&client->dev, si521xx_of_clk_get, si);
if (!ret)
si521xx_update_config(si);
return ret;
}
static int __maybe_unused si521xx_suspend(struct device *dev)
{
struct si521xx *si = dev_get_drvdata(dev);
regcache_cache_only(si->regmap, true);
regcache_mark_dirty(si->regmap);
return 0;
}
static int __maybe_unused si521xx_resume(struct device *dev)
{
struct si521xx *si = dev_get_drvdata(dev);
int ret;
regcache_cache_only(si->regmap, false);
ret = regcache_sync(si->regmap);
if (ret)
dev_err(dev, "Failed to restore register map: %d\n", ret);
return ret;
}
static const struct i2c_device_id si521xx_id[] = {
{ "si52144", .driver_data = SI521XX_OE_MAP(0x5, 0xc0) },
{ "si52146", .driver_data = SI521XX_OE_MAP(0x15, 0xe0) },
{ "si52147", .driver_data = SI521XX_OE_MAP(0x17, 0xf8) },
{ }
};
MODULE_DEVICE_TABLE(i2c, si521xx_id);
static const struct of_device_id clk_si521xx_of_match[] = {
{ .compatible = "skyworks,si52144", .data = (void *)SI521XX_OE_MAP(0x5, 0xc0) },
{ .compatible = "skyworks,si52146", .data = (void *)SI521XX_OE_MAP(0x15, 0xe0) },
{ .compatible = "skyworks,si52147", .data = (void *)SI521XX_OE_MAP(0x15, 0xf8) },
{ }
};
MODULE_DEVICE_TABLE(of, clk_si521xx_of_match);
static SIMPLE_DEV_PM_OPS(si521xx_pm_ops, si521xx_suspend, si521xx_resume);
static struct i2c_driver si521xx_driver = {
.driver = {
.name = "clk-si521xx",
.pm = &si521xx_pm_ops,
.of_match_table = clk_si521xx_of_match,
},
.probe = si521xx_probe,
.id_table = si521xx_id,
};
module_i2c_driver(si521xx_driver);
MODULE_AUTHOR("Marek Vasut <[email protected]>");
MODULE_DESCRIPTION("Skyworks Si521xx PCIe clock generator driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/clk/clk-si521xx.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* drivers/clk/clk-axm5516.c
*
* Provides clock implementations for three different types of clock devices on
* the Axxia device: PLL clock, a clock divider and a clock mux.
*
* Copyright (C) 2014 LSI Corporation
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/clk-provider.h>
#include <linux/regmap.h>
#include <dt-bindings/clock/lsi,axm5516-clks.h>
/**
* struct axxia_clk - Common struct to all Axxia clocks.
* @hw: clk_hw for the common clk framework
* @regmap: Regmap for the clock control registers
*/
struct axxia_clk {
struct clk_hw hw;
struct regmap *regmap;
};
#define to_axxia_clk(_hw) container_of(_hw, struct axxia_clk, hw)
/**
* struct axxia_pllclk - Axxia PLL generated clock.
* @aclk: Common struct
* @reg: Offset into regmap for PLL control register
*/
struct axxia_pllclk {
struct axxia_clk aclk;
u32 reg;
};
#define to_axxia_pllclk(_aclk) container_of(_aclk, struct axxia_pllclk, aclk)
/**
* axxia_pllclk_recalc - Calculate the PLL generated clock rate given the
* parent clock rate.
*/
static unsigned long
axxia_pllclk_recalc(struct clk_hw *hw, unsigned long parent_rate)
{
struct axxia_clk *aclk = to_axxia_clk(hw);
struct axxia_pllclk *pll = to_axxia_pllclk(aclk);
unsigned long rate, fbdiv, refdiv, postdiv;
u32 control;
regmap_read(aclk->regmap, pll->reg, &control);
postdiv = ((control >> 0) & 0xf) + 1;
fbdiv = ((control >> 4) & 0xfff) + 3;
refdiv = ((control >> 16) & 0x1f) + 1;
rate = (parent_rate / (refdiv * postdiv)) * fbdiv;
return rate;
}
static const struct clk_ops axxia_pllclk_ops = {
.recalc_rate = axxia_pllclk_recalc,
};
/**
* struct axxia_divclk - Axxia clock divider
* @aclk: Common struct
* @reg: Offset into regmap for PLL control register
* @shift: Bit position for divider value
* @width: Number of bits in divider value
*/
struct axxia_divclk {
struct axxia_clk aclk;
u32 reg;
u32 shift;
u32 width;
};
#define to_axxia_divclk(_aclk) container_of(_aclk, struct axxia_divclk, aclk)
/**
* axxia_divclk_recalc_rate - Calculate clock divider output rage
*/
static unsigned long
axxia_divclk_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
{
struct axxia_clk *aclk = to_axxia_clk(hw);
struct axxia_divclk *divclk = to_axxia_divclk(aclk);
u32 ctrl, div;
regmap_read(aclk->regmap, divclk->reg, &ctrl);
div = 1 + ((ctrl >> divclk->shift) & ((1 << divclk->width)-1));
return parent_rate / div;
}
static const struct clk_ops axxia_divclk_ops = {
.recalc_rate = axxia_divclk_recalc_rate,
};
/**
* struct axxia_clkmux - Axxia clock mux
* @aclk: Common struct
* @reg: Offset into regmap for PLL control register
* @shift: Bit position for selection value
* @width: Number of bits in selection value
*/
struct axxia_clkmux {
struct axxia_clk aclk;
u32 reg;
u32 shift;
u32 width;
};
#define to_axxia_clkmux(_aclk) container_of(_aclk, struct axxia_clkmux, aclk)
/**
* axxia_clkmux_get_parent - Return the index of selected parent clock
*/
static u8 axxia_clkmux_get_parent(struct clk_hw *hw)
{
struct axxia_clk *aclk = to_axxia_clk(hw);
struct axxia_clkmux *mux = to_axxia_clkmux(aclk);
u32 ctrl, parent;
regmap_read(aclk->regmap, mux->reg, &ctrl);
parent = (ctrl >> mux->shift) & ((1 << mux->width) - 1);
return (u8) parent;
}
static const struct clk_ops axxia_clkmux_ops = {
.get_parent = axxia_clkmux_get_parent,
};
/*
* PLLs
*/
static struct axxia_pllclk clk_fab_pll = {
.aclk.hw.init = &(struct clk_init_data){
.name = "clk_fab_pll",
.parent_names = (const char *[]){
"clk_ref0"
},
.num_parents = 1,
.ops = &axxia_pllclk_ops,
},
.reg = 0x01800,
};
static struct axxia_pllclk clk_cpu_pll = {
.aclk.hw.init = &(struct clk_init_data){
.name = "clk_cpu_pll",
.parent_names = (const char *[]){
"clk_ref0"
},
.num_parents = 1,
.ops = &axxia_pllclk_ops,
},
.reg = 0x02000,
};
static struct axxia_pllclk clk_sys_pll = {
.aclk.hw.init = &(struct clk_init_data){
.name = "clk_sys_pll",
.parent_names = (const char *[]){
"clk_ref0"
},
.num_parents = 1,
.ops = &axxia_pllclk_ops,
},
.reg = 0x02800,
};
static struct axxia_pllclk clk_sm0_pll = {
.aclk.hw.init = &(struct clk_init_data){
.name = "clk_sm0_pll",
.parent_names = (const char *[]){
"clk_ref2"
},
.num_parents = 1,
.ops = &axxia_pllclk_ops,
},
.reg = 0x03000,
};
static struct axxia_pllclk clk_sm1_pll = {
.aclk.hw.init = &(struct clk_init_data){
.name = "clk_sm1_pll",
.parent_names = (const char *[]){
"clk_ref1"
},
.num_parents = 1,
.ops = &axxia_pllclk_ops,
},
.reg = 0x03800,
};
/*
* Clock dividers
*/
static struct axxia_divclk clk_cpu0_div = {
.aclk.hw.init = &(struct clk_init_data){
.name = "clk_cpu0_div",
.parent_names = (const char *[]){
"clk_cpu_pll"
},
.num_parents = 1,
.ops = &axxia_divclk_ops,
},
.reg = 0x10008,
.shift = 0,
.width = 4,
};
static struct axxia_divclk clk_cpu1_div = {
.aclk.hw.init = &(struct clk_init_data){
.name = "clk_cpu1_div",
.parent_names = (const char *[]){
"clk_cpu_pll"
},
.num_parents = 1,
.ops = &axxia_divclk_ops,
},
.reg = 0x10008,
.shift = 4,
.width = 4,
};
static struct axxia_divclk clk_cpu2_div = {
.aclk.hw.init = &(struct clk_init_data){
.name = "clk_cpu2_div",
.parent_names = (const char *[]){
"clk_cpu_pll"
},
.num_parents = 1,
.ops = &axxia_divclk_ops,
},
.reg = 0x10008,
.shift = 8,
.width = 4,
};
static struct axxia_divclk clk_cpu3_div = {
.aclk.hw.init = &(struct clk_init_data){
.name = "clk_cpu3_div",
.parent_names = (const char *[]){
"clk_cpu_pll"
},
.num_parents = 1,
.ops = &axxia_divclk_ops,
},
.reg = 0x10008,
.shift = 12,
.width = 4,
};
static struct axxia_divclk clk_nrcp_div = {
.aclk.hw.init = &(struct clk_init_data){
.name = "clk_nrcp_div",
.parent_names = (const char *[]){
"clk_sys_pll"
},
.num_parents = 1,
.ops = &axxia_divclk_ops,
},
.reg = 0x1000c,
.shift = 0,
.width = 4,
};
static struct axxia_divclk clk_sys_div = {
.aclk.hw.init = &(struct clk_init_data){
.name = "clk_sys_div",
.parent_names = (const char *[]){
"clk_sys_pll"
},
.num_parents = 1,
.ops = &axxia_divclk_ops,
},
.reg = 0x1000c,
.shift = 4,
.width = 4,
};
static struct axxia_divclk clk_fab_div = {
.aclk.hw.init = &(struct clk_init_data){
.name = "clk_fab_div",
.parent_names = (const char *[]){
"clk_fab_pll"
},
.num_parents = 1,
.ops = &axxia_divclk_ops,
},
.reg = 0x1000c,
.shift = 8,
.width = 4,
};
static struct axxia_divclk clk_per_div = {
.aclk.hw.init = &(struct clk_init_data){
.name = "clk_per_div",
.parent_names = (const char *[]){
"clk_sm1_pll"
},
.num_parents = 1,
.ops = &axxia_divclk_ops,
},
.reg = 0x1000c,
.shift = 12,
.width = 4,
};
static struct axxia_divclk clk_mmc_div = {
.aclk.hw.init = &(struct clk_init_data){
.name = "clk_mmc_div",
.parent_names = (const char *[]){
"clk_sm1_pll"
},
.num_parents = 1,
.ops = &axxia_divclk_ops,
},
.reg = 0x1000c,
.shift = 16,
.width = 4,
};
/*
* Clock MUXes
*/
static struct axxia_clkmux clk_cpu0_mux = {
.aclk.hw.init = &(struct clk_init_data){
.name = "clk_cpu0",
.parent_names = (const char *[]){
"clk_ref0",
"clk_cpu_pll",
"clk_cpu0_div",
"clk_cpu0_div"
},
.num_parents = 4,
.ops = &axxia_clkmux_ops,
},
.reg = 0x10000,
.shift = 0,
.width = 2,
};
static struct axxia_clkmux clk_cpu1_mux = {
.aclk.hw.init = &(struct clk_init_data){
.name = "clk_cpu1",
.parent_names = (const char *[]){
"clk_ref0",
"clk_cpu_pll",
"clk_cpu1_div",
"clk_cpu1_div"
},
.num_parents = 4,
.ops = &axxia_clkmux_ops,
},
.reg = 0x10000,
.shift = 2,
.width = 2,
};
static struct axxia_clkmux clk_cpu2_mux = {
.aclk.hw.init = &(struct clk_init_data){
.name = "clk_cpu2",
.parent_names = (const char *[]){
"clk_ref0",
"clk_cpu_pll",
"clk_cpu2_div",
"clk_cpu2_div"
},
.num_parents = 4,
.ops = &axxia_clkmux_ops,
},
.reg = 0x10000,
.shift = 4,
.width = 2,
};
static struct axxia_clkmux clk_cpu3_mux = {
.aclk.hw.init = &(struct clk_init_data){
.name = "clk_cpu3",
.parent_names = (const char *[]){
"clk_ref0",
"clk_cpu_pll",
"clk_cpu3_div",
"clk_cpu3_div"
},
.num_parents = 4,
.ops = &axxia_clkmux_ops,
},
.reg = 0x10000,
.shift = 6,
.width = 2,
};
static struct axxia_clkmux clk_nrcp_mux = {
.aclk.hw.init = &(struct clk_init_data){
.name = "clk_nrcp",
.parent_names = (const char *[]){
"clk_ref0",
"clk_sys_pll",
"clk_nrcp_div",
"clk_nrcp_div"
},
.num_parents = 4,
.ops = &axxia_clkmux_ops,
},
.reg = 0x10004,
.shift = 0,
.width = 2,
};
static struct axxia_clkmux clk_sys_mux = {
.aclk.hw.init = &(struct clk_init_data){
.name = "clk_sys",
.parent_names = (const char *[]){
"clk_ref0",
"clk_sys_pll",
"clk_sys_div",
"clk_sys_div"
},
.num_parents = 4,
.ops = &axxia_clkmux_ops,
},
.reg = 0x10004,
.shift = 2,
.width = 2,
};
static struct axxia_clkmux clk_fab_mux = {
.aclk.hw.init = &(struct clk_init_data){
.name = "clk_fab",
.parent_names = (const char *[]){
"clk_ref0",
"clk_fab_pll",
"clk_fab_div",
"clk_fab_div"
},
.num_parents = 4,
.ops = &axxia_clkmux_ops,
},
.reg = 0x10004,
.shift = 4,
.width = 2,
};
static struct axxia_clkmux clk_per_mux = {
.aclk.hw.init = &(struct clk_init_data){
.name = "clk_per",
.parent_names = (const char *[]){
"clk_ref1",
"clk_per_div"
},
.num_parents = 2,
.ops = &axxia_clkmux_ops,
},
.reg = 0x10004,
.shift = 6,
.width = 1,
};
static struct axxia_clkmux clk_mmc_mux = {
.aclk.hw.init = &(struct clk_init_data){
.name = "clk_mmc",
.parent_names = (const char *[]){
"clk_ref1",
"clk_mmc_div"
},
.num_parents = 2,
.ops = &axxia_clkmux_ops,
},
.reg = 0x10004,
.shift = 9,
.width = 1,
};
/* Table of all supported clocks indexed by the clock identifiers from the
* device tree binding
*/
static struct axxia_clk *axmclk_clocks[] = {
[AXXIA_CLK_FAB_PLL] = &clk_fab_pll.aclk,
[AXXIA_CLK_CPU_PLL] = &clk_cpu_pll.aclk,
[AXXIA_CLK_SYS_PLL] = &clk_sys_pll.aclk,
[AXXIA_CLK_SM0_PLL] = &clk_sm0_pll.aclk,
[AXXIA_CLK_SM1_PLL] = &clk_sm1_pll.aclk,
[AXXIA_CLK_FAB_DIV] = &clk_fab_div.aclk,
[AXXIA_CLK_SYS_DIV] = &clk_sys_div.aclk,
[AXXIA_CLK_NRCP_DIV] = &clk_nrcp_div.aclk,
[AXXIA_CLK_CPU0_DIV] = &clk_cpu0_div.aclk,
[AXXIA_CLK_CPU1_DIV] = &clk_cpu1_div.aclk,
[AXXIA_CLK_CPU2_DIV] = &clk_cpu2_div.aclk,
[AXXIA_CLK_CPU3_DIV] = &clk_cpu3_div.aclk,
[AXXIA_CLK_PER_DIV] = &clk_per_div.aclk,
[AXXIA_CLK_MMC_DIV] = &clk_mmc_div.aclk,
[AXXIA_CLK_FAB] = &clk_fab_mux.aclk,
[AXXIA_CLK_SYS] = &clk_sys_mux.aclk,
[AXXIA_CLK_NRCP] = &clk_nrcp_mux.aclk,
[AXXIA_CLK_CPU0] = &clk_cpu0_mux.aclk,
[AXXIA_CLK_CPU1] = &clk_cpu1_mux.aclk,
[AXXIA_CLK_CPU2] = &clk_cpu2_mux.aclk,
[AXXIA_CLK_CPU3] = &clk_cpu3_mux.aclk,
[AXXIA_CLK_PER] = &clk_per_mux.aclk,
[AXXIA_CLK_MMC] = &clk_mmc_mux.aclk,
};
static struct clk_hw *
of_clk_axmclk_get(struct of_phandle_args *clkspec, void *unused)
{
unsigned int idx = clkspec->args[0];
if (idx >= ARRAY_SIZE(axmclk_clocks)) {
pr_err("%s: invalid index %u\n", __func__, idx);
return ERR_PTR(-EINVAL);
}
return &axmclk_clocks[idx]->hw;
}
static const struct regmap_config axmclk_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
.max_register = 0x1fffc,
.fast_io = true,
};
static const struct of_device_id axmclk_match_table[] = {
{ .compatible = "lsi,axm5516-clks" },
{ }
};
MODULE_DEVICE_TABLE(of, axmclk_match_table);
static int axmclk_probe(struct platform_device *pdev)
{
void __iomem *base;
int i, ret;
struct device *dev = &pdev->dev;
struct regmap *regmap;
size_t num_clks;
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
regmap = devm_regmap_init_mmio(dev, base, &axmclk_regmap_config);
if (IS_ERR(regmap))
return PTR_ERR(regmap);
num_clks = ARRAY_SIZE(axmclk_clocks);
pr_info("axmclk: supporting %zu clocks\n", num_clks);
/* Update each entry with the allocated regmap and register the clock
* with the common clock framework
*/
for (i = 0; i < num_clks; i++) {
axmclk_clocks[i]->regmap = regmap;
ret = devm_clk_hw_register(dev, &axmclk_clocks[i]->hw);
if (ret)
return ret;
}
return devm_of_clk_add_hw_provider(dev, of_clk_axmclk_get, NULL);
}
static struct platform_driver axmclk_driver = {
.probe = axmclk_probe,
.driver = {
.name = "clk-axm5516",
.of_match_table = axmclk_match_table,
},
};
static int __init axmclk_init(void)
{
return platform_driver_register(&axmclk_driver);
}
core_initcall(axmclk_init);
static void __exit axmclk_exit(void)
{
platform_driver_unregister(&axmclk_driver);
}
module_exit(axmclk_exit);
MODULE_DESCRIPTION("AXM5516 clock driver");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:clk-axm5516");
| linux-master | drivers/clk/clk-axm5516.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright 2022 NXP
*
* Peng Fan <[email protected]>
*/
#include <linux/clk-provider.h>
#include <linux/errno.h>
#include <linux/export.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/slab.h>
#include "clk.h"
#define DIRECT_OFFSET 0x0
/*
* 0b000 - LPCG will be OFF in any CPU mode.
* 0b100 - LPCG will be ON in any CPU mode.
*/
#define LPM_SETTING_OFF 0x0
#define LPM_SETTING_ON 0x4
#define LPM_CUR_OFFSET 0x1c
#define AUTHEN_OFFSET 0x30
#define CPULPM_EN BIT(2)
#define TZ_NS_SHIFT 9
#define TZ_NS_MASK BIT(9)
#define WHITE_LIST_SHIFT 16
struct imx93_clk_gate {
struct clk_hw hw;
void __iomem *reg;
u32 bit_idx;
u32 val;
u32 mask;
spinlock_t *lock;
unsigned int *share_count;
};
#define to_imx93_clk_gate(_hw) container_of(_hw, struct imx93_clk_gate, hw)
static void imx93_clk_gate_do_hardware(struct clk_hw *hw, bool enable)
{
struct imx93_clk_gate *gate = to_imx93_clk_gate(hw);
u32 val;
val = readl(gate->reg + AUTHEN_OFFSET);
if (val & CPULPM_EN) {
val = enable ? LPM_SETTING_ON : LPM_SETTING_OFF;
writel(val, gate->reg + LPM_CUR_OFFSET);
} else {
val = readl(gate->reg + DIRECT_OFFSET);
val &= ~(gate->mask << gate->bit_idx);
if (enable)
val |= (gate->val & gate->mask) << gate->bit_idx;
writel(val, gate->reg + DIRECT_OFFSET);
}
}
static int imx93_clk_gate_enable(struct clk_hw *hw)
{
struct imx93_clk_gate *gate = to_imx93_clk_gate(hw);
unsigned long flags;
spin_lock_irqsave(gate->lock, flags);
if (gate->share_count && (*gate->share_count)++ > 0)
goto out;
imx93_clk_gate_do_hardware(hw, true);
out:
spin_unlock_irqrestore(gate->lock, flags);
return 0;
}
static void imx93_clk_gate_disable(struct clk_hw *hw)
{
struct imx93_clk_gate *gate = to_imx93_clk_gate(hw);
unsigned long flags;
spin_lock_irqsave(gate->lock, flags);
if (gate->share_count) {
if (WARN_ON(*gate->share_count == 0))
goto out;
else if (--(*gate->share_count) > 0)
goto out;
}
imx93_clk_gate_do_hardware(hw, false);
out:
spin_unlock_irqrestore(gate->lock, flags);
}
static int imx93_clk_gate_reg_is_enabled(struct imx93_clk_gate *gate)
{
u32 val = readl(gate->reg + AUTHEN_OFFSET);
if (val & CPULPM_EN) {
val = readl(gate->reg + LPM_CUR_OFFSET);
if (val == LPM_SETTING_ON)
return 1;
} else {
val = readl(gate->reg);
if (((val >> gate->bit_idx) & gate->mask) == gate->val)
return 1;
}
return 0;
}
static int imx93_clk_gate_is_enabled(struct clk_hw *hw)
{
struct imx93_clk_gate *gate = to_imx93_clk_gate(hw);
unsigned long flags;
int ret;
spin_lock_irqsave(gate->lock, flags);
ret = imx93_clk_gate_reg_is_enabled(gate);
spin_unlock_irqrestore(gate->lock, flags);
return ret;
}
static void imx93_clk_gate_disable_unused(struct clk_hw *hw)
{
struct imx93_clk_gate *gate = to_imx93_clk_gate(hw);
unsigned long flags;
spin_lock_irqsave(gate->lock, flags);
if (!gate->share_count || *gate->share_count == 0)
imx93_clk_gate_do_hardware(hw, false);
spin_unlock_irqrestore(gate->lock, flags);
}
static const struct clk_ops imx93_clk_gate_ops = {
.enable = imx93_clk_gate_enable,
.disable = imx93_clk_gate_disable,
.disable_unused = imx93_clk_gate_disable_unused,
.is_enabled = imx93_clk_gate_is_enabled,
};
static const struct clk_ops imx93_clk_gate_ro_ops = {
.is_enabled = imx93_clk_gate_is_enabled,
};
struct clk_hw *imx93_clk_gate(struct device *dev, const char *name, const char *parent_name,
unsigned long flags, void __iomem *reg, u32 bit_idx, u32 val,
u32 mask, u32 domain_id, unsigned int *share_count)
{
struct imx93_clk_gate *gate;
struct clk_hw *hw;
struct clk_init_data init;
int ret;
u32 authen;
gate = kzalloc(sizeof(struct imx93_clk_gate), GFP_KERNEL);
if (!gate)
return ERR_PTR(-ENOMEM);
gate->reg = reg;
gate->lock = &imx_ccm_lock;
gate->bit_idx = bit_idx;
gate->val = val;
gate->mask = mask;
gate->share_count = share_count;
init.name = name;
init.ops = &imx93_clk_gate_ops;
init.flags = flags | CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE;
init.parent_names = parent_name ? &parent_name : NULL;
init.num_parents = parent_name ? 1 : 0;
gate->hw.init = &init;
hw = &gate->hw;
authen = readl(reg + AUTHEN_OFFSET);
if (!(authen & TZ_NS_MASK) || !(authen & BIT(WHITE_LIST_SHIFT + domain_id)))
init.ops = &imx93_clk_gate_ro_ops;
ret = clk_hw_register(dev, hw);
if (ret) {
kfree(gate);
return ERR_PTR(ret);
}
return hw;
}
EXPORT_SYMBOL_GPL(imx93_clk_gate);
| linux-master | drivers/clk/imx/clk-gate-93.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright 2012 Freescale Semiconductor, Inc.
* Copyright 2012 Linaro Ltd.
*/
#include <linux/clk-provider.h>
#include <linux/delay.h>
#include <linux/export.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/slab.h>
#include <linux/jiffies.h>
#include <linux/err.h>
#include "clk.h"
#define PLL_NUM_OFFSET 0x10
#define PLL_DENOM_OFFSET 0x20
#define PLL_IMX7_NUM_OFFSET 0x20
#define PLL_IMX7_DENOM_OFFSET 0x30
#define PLL_VF610_NUM_OFFSET 0x20
#define PLL_VF610_DENOM_OFFSET 0x30
#define BM_PLL_POWER (0x1 << 12)
#define BM_PLL_LOCK (0x1 << 31)
#define IMX7_ENET_PLL_POWER (0x1 << 5)
#define IMX7_DDR_PLL_POWER (0x1 << 20)
#define PLL_LOCK_TIMEOUT 10000
/**
* struct clk_pllv3 - IMX PLL clock version 3
* @hw: clock source
* @base: base address of PLL registers
* @power_bit: pll power bit mask
* @powerup_set: set power_bit to power up the PLL
* @div_mask: mask of divider bits
* @div_shift: shift of divider bits
* @ref_clock: reference clock rate
* @num_offset: num register offset
* @denom_offset: denom register offset
*
* IMX PLL clock version 3, found on i.MX6 series. Divider for pllv3
* is actually a multiplier, and always sits at bit 0.
*/
struct clk_pllv3 {
struct clk_hw hw;
void __iomem *base;
u32 power_bit;
bool powerup_set;
u32 div_mask;
u32 div_shift;
unsigned long ref_clock;
u32 num_offset;
u32 denom_offset;
};
#define to_clk_pllv3(_hw) container_of(_hw, struct clk_pllv3, hw)
static int clk_pllv3_wait_lock(struct clk_pllv3 *pll)
{
u32 val = readl_relaxed(pll->base) & pll->power_bit;
/* No need to wait for lock when pll is not powered up */
if ((pll->powerup_set && !val) || (!pll->powerup_set && val))
return 0;
return readl_relaxed_poll_timeout(pll->base, val, val & BM_PLL_LOCK,
500, PLL_LOCK_TIMEOUT);
}
static int clk_pllv3_prepare(struct clk_hw *hw)
{
struct clk_pllv3 *pll = to_clk_pllv3(hw);
u32 val;
val = readl_relaxed(pll->base);
if (pll->powerup_set)
val |= pll->power_bit;
else
val &= ~pll->power_bit;
writel_relaxed(val, pll->base);
return clk_pllv3_wait_lock(pll);
}
static void clk_pllv3_unprepare(struct clk_hw *hw)
{
struct clk_pllv3 *pll = to_clk_pllv3(hw);
u32 val;
val = readl_relaxed(pll->base);
if (pll->powerup_set)
val &= ~pll->power_bit;
else
val |= pll->power_bit;
writel_relaxed(val, pll->base);
}
static int clk_pllv3_is_prepared(struct clk_hw *hw)
{
struct clk_pllv3 *pll = to_clk_pllv3(hw);
if (readl_relaxed(pll->base) & BM_PLL_LOCK)
return 1;
return 0;
}
static unsigned long clk_pllv3_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct clk_pllv3 *pll = to_clk_pllv3(hw);
u32 div = (readl_relaxed(pll->base) >> pll->div_shift) & pll->div_mask;
return (div == 1) ? parent_rate * 22 : parent_rate * 20;
}
static long clk_pllv3_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *prate)
{
unsigned long parent_rate = *prate;
return (rate >= parent_rate * 22) ? parent_rate * 22 :
parent_rate * 20;
}
static int clk_pllv3_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct clk_pllv3 *pll = to_clk_pllv3(hw);
u32 val, div;
if (rate == parent_rate * 22)
div = 1;
else if (rate == parent_rate * 20)
div = 0;
else
return -EINVAL;
val = readl_relaxed(pll->base);
val &= ~(pll->div_mask << pll->div_shift);
val |= (div << pll->div_shift);
writel_relaxed(val, pll->base);
return clk_pllv3_wait_lock(pll);
}
static const struct clk_ops clk_pllv3_ops = {
.prepare = clk_pllv3_prepare,
.unprepare = clk_pllv3_unprepare,
.is_prepared = clk_pllv3_is_prepared,
.recalc_rate = clk_pllv3_recalc_rate,
.round_rate = clk_pllv3_round_rate,
.set_rate = clk_pllv3_set_rate,
};
static unsigned long clk_pllv3_sys_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct clk_pllv3 *pll = to_clk_pllv3(hw);
u32 div = readl_relaxed(pll->base) & pll->div_mask;
return parent_rate * div / 2;
}
static long clk_pllv3_sys_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *prate)
{
unsigned long parent_rate = *prate;
unsigned long min_rate = parent_rate * 54 / 2;
unsigned long max_rate = parent_rate * 108 / 2;
u32 div;
if (rate > max_rate)
rate = max_rate;
else if (rate < min_rate)
rate = min_rate;
div = rate * 2 / parent_rate;
return parent_rate * div / 2;
}
static int clk_pllv3_sys_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct clk_pllv3 *pll = to_clk_pllv3(hw);
unsigned long min_rate = parent_rate * 54 / 2;
unsigned long max_rate = parent_rate * 108 / 2;
u32 val, div;
if (rate < min_rate || rate > max_rate)
return -EINVAL;
div = rate * 2 / parent_rate;
val = readl_relaxed(pll->base);
val &= ~pll->div_mask;
val |= div;
writel_relaxed(val, pll->base);
return clk_pllv3_wait_lock(pll);
}
static const struct clk_ops clk_pllv3_sys_ops = {
.prepare = clk_pllv3_prepare,
.unprepare = clk_pllv3_unprepare,
.is_prepared = clk_pllv3_is_prepared,
.recalc_rate = clk_pllv3_sys_recalc_rate,
.round_rate = clk_pllv3_sys_round_rate,
.set_rate = clk_pllv3_sys_set_rate,
};
static unsigned long clk_pllv3_av_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct clk_pllv3 *pll = to_clk_pllv3(hw);
u32 mfn = readl_relaxed(pll->base + pll->num_offset);
u32 mfd = readl_relaxed(pll->base + pll->denom_offset);
u32 div = readl_relaxed(pll->base) & pll->div_mask;
u64 temp64 = (u64)parent_rate;
temp64 *= mfn;
do_div(temp64, mfd);
return parent_rate * div + (unsigned long)temp64;
}
static long clk_pllv3_av_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *prate)
{
unsigned long parent_rate = *prate;
unsigned long min_rate = parent_rate * 27;
unsigned long max_rate = parent_rate * 54;
u32 div;
u32 mfn, mfd = 1000000;
u32 max_mfd = 0x3FFFFFFF;
u64 temp64;
if (rate > max_rate)
rate = max_rate;
else if (rate < min_rate)
rate = min_rate;
if (parent_rate <= max_mfd)
mfd = parent_rate;
div = rate / parent_rate;
temp64 = (u64) (rate - div * parent_rate);
temp64 *= mfd;
temp64 = div64_ul(temp64, parent_rate);
mfn = temp64;
temp64 = (u64)parent_rate;
temp64 *= mfn;
do_div(temp64, mfd);
return parent_rate * div + (unsigned long)temp64;
}
static int clk_pllv3_av_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct clk_pllv3 *pll = to_clk_pllv3(hw);
unsigned long min_rate = parent_rate * 27;
unsigned long max_rate = parent_rate * 54;
u32 val, div;
u32 mfn, mfd = 1000000;
u32 max_mfd = 0x3FFFFFFF;
u64 temp64;
if (rate < min_rate || rate > max_rate)
return -EINVAL;
if (parent_rate <= max_mfd)
mfd = parent_rate;
div = rate / parent_rate;
temp64 = (u64) (rate - div * parent_rate);
temp64 *= mfd;
temp64 = div64_ul(temp64, parent_rate);
mfn = temp64;
val = readl_relaxed(pll->base);
val &= ~pll->div_mask;
val |= div;
writel_relaxed(val, pll->base);
writel_relaxed(mfn, pll->base + pll->num_offset);
writel_relaxed(mfd, pll->base + pll->denom_offset);
return clk_pllv3_wait_lock(pll);
}
static const struct clk_ops clk_pllv3_av_ops = {
.prepare = clk_pllv3_prepare,
.unprepare = clk_pllv3_unprepare,
.is_prepared = clk_pllv3_is_prepared,
.recalc_rate = clk_pllv3_av_recalc_rate,
.round_rate = clk_pllv3_av_round_rate,
.set_rate = clk_pllv3_av_set_rate,
};
struct clk_pllv3_vf610_mf {
u32 mfi; /* integer part, can be 20 or 22 */
u32 mfn; /* numerator, 30-bit value */
u32 mfd; /* denominator, 30-bit value, must be less than mfn */
};
static unsigned long clk_pllv3_vf610_mf_to_rate(unsigned long parent_rate,
struct clk_pllv3_vf610_mf mf)
{
u64 temp64;
temp64 = parent_rate;
temp64 *= mf.mfn;
do_div(temp64, mf.mfd);
return (parent_rate * mf.mfi) + temp64;
}
static struct clk_pllv3_vf610_mf clk_pllv3_vf610_rate_to_mf(
unsigned long parent_rate, unsigned long rate)
{
struct clk_pllv3_vf610_mf mf;
u64 temp64;
mf.mfi = (rate >= 22 * parent_rate) ? 22 : 20;
mf.mfd = 0x3fffffff; /* use max supported value for best accuracy */
if (rate <= parent_rate * mf.mfi)
mf.mfn = 0;
else if (rate >= parent_rate * (mf.mfi + 1))
mf.mfn = mf.mfd - 1;
else {
/* rate = parent_rate * (mfi + mfn/mfd) */
temp64 = rate - parent_rate * mf.mfi;
temp64 *= mf.mfd;
temp64 = div64_ul(temp64, parent_rate);
mf.mfn = temp64;
}
return mf;
}
static unsigned long clk_pllv3_vf610_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct clk_pllv3 *pll = to_clk_pllv3(hw);
struct clk_pllv3_vf610_mf mf;
mf.mfn = readl_relaxed(pll->base + pll->num_offset);
mf.mfd = readl_relaxed(pll->base + pll->denom_offset);
mf.mfi = (readl_relaxed(pll->base) & pll->div_mask) ? 22 : 20;
return clk_pllv3_vf610_mf_to_rate(parent_rate, mf);
}
static long clk_pllv3_vf610_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *prate)
{
struct clk_pllv3_vf610_mf mf = clk_pllv3_vf610_rate_to_mf(*prate, rate);
return clk_pllv3_vf610_mf_to_rate(*prate, mf);
}
static int clk_pllv3_vf610_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct clk_pllv3 *pll = to_clk_pllv3(hw);
struct clk_pllv3_vf610_mf mf =
clk_pllv3_vf610_rate_to_mf(parent_rate, rate);
u32 val;
val = readl_relaxed(pll->base);
if (mf.mfi == 20)
val &= ~pll->div_mask; /* clear bit for mfi=20 */
else
val |= pll->div_mask; /* set bit for mfi=22 */
writel_relaxed(val, pll->base);
writel_relaxed(mf.mfn, pll->base + pll->num_offset);
writel_relaxed(mf.mfd, pll->base + pll->denom_offset);
return clk_pllv3_wait_lock(pll);
}
static const struct clk_ops clk_pllv3_vf610_ops = {
.prepare = clk_pllv3_prepare,
.unprepare = clk_pllv3_unprepare,
.is_prepared = clk_pllv3_is_prepared,
.recalc_rate = clk_pllv3_vf610_recalc_rate,
.round_rate = clk_pllv3_vf610_round_rate,
.set_rate = clk_pllv3_vf610_set_rate,
};
static unsigned long clk_pllv3_enet_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct clk_pllv3 *pll = to_clk_pllv3(hw);
return pll->ref_clock;
}
static const struct clk_ops clk_pllv3_enet_ops = {
.prepare = clk_pllv3_prepare,
.unprepare = clk_pllv3_unprepare,
.is_prepared = clk_pllv3_is_prepared,
.recalc_rate = clk_pllv3_enet_recalc_rate,
};
struct clk_hw *imx_clk_hw_pllv3(enum imx_pllv3_type type, const char *name,
const char *parent_name, void __iomem *base,
u32 div_mask)
{
struct clk_pllv3 *pll;
const struct clk_ops *ops;
struct clk_hw *hw;
struct clk_init_data init;
int ret;
pll = kzalloc(sizeof(*pll), GFP_KERNEL);
if (!pll)
return ERR_PTR(-ENOMEM);
pll->power_bit = BM_PLL_POWER;
pll->num_offset = PLL_NUM_OFFSET;
pll->denom_offset = PLL_DENOM_OFFSET;
switch (type) {
case IMX_PLLV3_SYS:
ops = &clk_pllv3_sys_ops;
break;
case IMX_PLLV3_SYS_VF610:
ops = &clk_pllv3_vf610_ops;
pll->num_offset = PLL_VF610_NUM_OFFSET;
pll->denom_offset = PLL_VF610_DENOM_OFFSET;
break;
case IMX_PLLV3_USB_VF610:
pll->div_shift = 1;
fallthrough;
case IMX_PLLV3_USB:
ops = &clk_pllv3_ops;
pll->powerup_set = true;
break;
case IMX_PLLV3_AV_IMX7:
pll->num_offset = PLL_IMX7_NUM_OFFSET;
pll->denom_offset = PLL_IMX7_DENOM_OFFSET;
fallthrough;
case IMX_PLLV3_AV:
ops = &clk_pllv3_av_ops;
break;
case IMX_PLLV3_ENET_IMX7:
pll->power_bit = IMX7_ENET_PLL_POWER;
pll->ref_clock = 1000000000;
ops = &clk_pllv3_enet_ops;
break;
case IMX_PLLV3_ENET:
pll->ref_clock = 500000000;
ops = &clk_pllv3_enet_ops;
break;
case IMX_PLLV3_DDR_IMX7:
pll->power_bit = IMX7_DDR_PLL_POWER;
pll->num_offset = PLL_IMX7_NUM_OFFSET;
pll->denom_offset = PLL_IMX7_DENOM_OFFSET;
ops = &clk_pllv3_av_ops;
break;
default:
ops = &clk_pllv3_ops;
}
pll->base = base;
pll->div_mask = div_mask;
init.name = name;
init.ops = ops;
init.flags = 0;
init.parent_names = &parent_name;
init.num_parents = 1;
pll->hw.init = &init;
hw = &pll->hw;
ret = clk_hw_register(NULL, hw);
if (ret) {
kfree(pll);
return ERR_PTR(ret);
}
return hw;
}
EXPORT_SYMBOL_GPL(imx_clk_hw_pllv3);
| linux-master | drivers/clk/imx/clk-pllv3.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2014 Freescale Semiconductor, Inc.
*/
#include <linux/clk-provider.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/slab.h>
#include "clk.h"
/**
* struct clk_gate_exclusive - i.MX specific gate clock which is mutually
* exclusive with other gate clocks
*
* @gate: the parent class
* @exclusive_mask: mask of gate bits which are mutually exclusive to this
* gate clock
*
* The imx exclusive gate clock is a subclass of basic clk_gate
* with an addtional mask to indicate which other gate bits in the same
* register is mutually exclusive to this gate clock.
*/
struct clk_gate_exclusive {
struct clk_gate gate;
u32 exclusive_mask;
};
static int clk_gate_exclusive_enable(struct clk_hw *hw)
{
struct clk_gate *gate = to_clk_gate(hw);
struct clk_gate_exclusive *exgate = container_of(gate,
struct clk_gate_exclusive, gate);
u32 val = readl(gate->reg);
if (val & exgate->exclusive_mask)
return -EBUSY;
return clk_gate_ops.enable(hw);
}
static void clk_gate_exclusive_disable(struct clk_hw *hw)
{
clk_gate_ops.disable(hw);
}
static int clk_gate_exclusive_is_enabled(struct clk_hw *hw)
{
return clk_gate_ops.is_enabled(hw);
}
static const struct clk_ops clk_gate_exclusive_ops = {
.enable = clk_gate_exclusive_enable,
.disable = clk_gate_exclusive_disable,
.is_enabled = clk_gate_exclusive_is_enabled,
};
struct clk_hw *imx_clk_hw_gate_exclusive(const char *name, const char *parent,
void __iomem *reg, u8 shift, u32 exclusive_mask)
{
struct clk_gate_exclusive *exgate;
struct clk_gate *gate;
struct clk_hw *hw;
struct clk_init_data init;
int ret;
if (exclusive_mask == 0)
return ERR_PTR(-EINVAL);
exgate = kzalloc(sizeof(*exgate), GFP_KERNEL);
if (!exgate)
return ERR_PTR(-ENOMEM);
gate = &exgate->gate;
init.name = name;
init.ops = &clk_gate_exclusive_ops;
init.flags = CLK_SET_RATE_PARENT;
init.parent_names = parent ? &parent : NULL;
init.num_parents = parent ? 1 : 0;
gate->reg = reg;
gate->bit_idx = shift;
gate->lock = &imx_ccm_lock;
gate->hw.init = &init;
exgate->exclusive_mask = exclusive_mask;
hw = &gate->hw;
ret = clk_hw_register(NULL, hw);
if (ret) {
kfree(gate);
return ERR_PTR(ret);
}
return hw;
}
| linux-master | drivers/clk/imx/clk-gate-exclusive.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Driver for i.MX8M Plus Audio BLK_CTRL
*
* Copyright (C) 2022 Marek Vasut <[email protected]>
*/
#include <linux/clk-provider.h>
#include <linux/device.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <dt-bindings/clock/imx8mp-clock.h>
#include "clk.h"
#define CLKEN0 0x000
#define CLKEN1 0x004
#define SAI_MCLK_SEL(n) (0x300 + 4 * (n)) /* n in 0..5 */
#define PDM_SEL 0x318
#define SAI_PLL_GNRL_CTL 0x400
#define SAIn_MCLK1_PARENT(n) \
static const struct clk_parent_data \
clk_imx8mp_audiomix_sai##n##_mclk1_parents[] = { \
{ \
.fw_name = "sai"__stringify(n), \
.name = "sai"__stringify(n) \
}, { \
.fw_name = "sai"__stringify(n)"_mclk", \
.name = "sai"__stringify(n)"_mclk" \
}, \
}
SAIn_MCLK1_PARENT(1);
SAIn_MCLK1_PARENT(2);
SAIn_MCLK1_PARENT(3);
SAIn_MCLK1_PARENT(5);
SAIn_MCLK1_PARENT(6);
SAIn_MCLK1_PARENT(7);
static const struct clk_parent_data clk_imx8mp_audiomix_sai_mclk2_parents[] = {
{ .fw_name = "sai1", .name = "sai1" },
{ .fw_name = "sai2", .name = "sai2" },
{ .fw_name = "sai3", .name = "sai3" },
{ .name = "dummy" },
{ .fw_name = "sai5", .name = "sai5" },
{ .fw_name = "sai6", .name = "sai6" },
{ .fw_name = "sai7", .name = "sai7" },
{ .fw_name = "sai1_mclk", .name = "sai1_mclk" },
{ .fw_name = "sai2_mclk", .name = "sai2_mclk" },
{ .fw_name = "sai3_mclk", .name = "sai3_mclk" },
{ .name = "dummy" },
{ .fw_name = "sai5_mclk", .name = "sai5_mclk" },
{ .fw_name = "sai6_mclk", .name = "sai6_mclk" },
{ .fw_name = "sai7_mclk", .name = "sai7_mclk" },
{ .fw_name = "spdif_extclk", .name = "spdif_extclk" },
{ .name = "dummy" },
};
static const struct clk_parent_data clk_imx8mp_audiomix_pdm_parents[] = {
{ .fw_name = "pdm", .name = "pdm" },
{ .name = "sai_pll_out_div2" },
{ .fw_name = "sai1_mclk", .name = "sai1_mclk" },
{ .name = "dummy" },
};
static const struct clk_parent_data clk_imx8mp_audiomix_pll_parents[] = {
{ .fw_name = "osc_24m", .name = "osc_24m" },
{ .name = "dummy" },
{ .name = "dummy" },
{ .name = "dummy" },
};
static const struct clk_parent_data clk_imx8mp_audiomix_pll_bypass_sels[] = {
{ .fw_name = "sai_pll", .name = "sai_pll" },
{ .fw_name = "sai_pll_ref_sel", .name = "sai_pll_ref_sel" },
};
#define CLK_GATE(gname, cname) \
{ \
gname"_cg", \
IMX8MP_CLK_AUDIOMIX_##cname, \
{ .fw_name = "ahb", .name = "ahb" }, NULL, 1, \
CLKEN0 + 4 * !!(IMX8MP_CLK_AUDIOMIX_##cname / 32), \
1, IMX8MP_CLK_AUDIOMIX_##cname % 32 \
}
#define CLK_SAIn(n) \
{ \
"sai"__stringify(n)"_mclk1_sel", \
IMX8MP_CLK_AUDIOMIX_SAI##n##_MCLK1_SEL, {}, \
clk_imx8mp_audiomix_sai##n##_mclk1_parents, \
ARRAY_SIZE(clk_imx8mp_audiomix_sai##n##_mclk1_parents), \
SAI_MCLK_SEL(n), 1, 0 \
}, { \
"sai"__stringify(n)"_mclk2_sel", \
IMX8MP_CLK_AUDIOMIX_SAI##n##_MCLK2_SEL, {}, \
clk_imx8mp_audiomix_sai_mclk2_parents, \
ARRAY_SIZE(clk_imx8mp_audiomix_sai_mclk2_parents), \
SAI_MCLK_SEL(n), 4, 1 \
}, { \
"sai"__stringify(n)"_ipg_cg", \
IMX8MP_CLK_AUDIOMIX_SAI##n##_IPG, \
{ .fw_name = "ahb", .name = "ahb" }, NULL, 1, \
CLKEN0, 1, IMX8MP_CLK_AUDIOMIX_SAI##n##_IPG \
}, { \
"sai"__stringify(n)"_mclk1_cg", \
IMX8MP_CLK_AUDIOMIX_SAI##n##_MCLK1, \
{ \
.fw_name = "sai"__stringify(n)"_mclk1_sel", \
.name = "sai"__stringify(n)"_mclk1_sel" \
}, NULL, 1, \
CLKEN0, 1, IMX8MP_CLK_AUDIOMIX_SAI##n##_MCLK1 \
}, { \
"sai"__stringify(n)"_mclk2_cg", \
IMX8MP_CLK_AUDIOMIX_SAI##n##_MCLK2, \
{ \
.fw_name = "sai"__stringify(n)"_mclk2_sel", \
.name = "sai"__stringify(n)"_mclk2_sel" \
}, NULL, 1, \
CLKEN0, 1, IMX8MP_CLK_AUDIOMIX_SAI##n##_MCLK2 \
}, { \
"sai"__stringify(n)"_mclk3_cg", \
IMX8MP_CLK_AUDIOMIX_SAI##n##_MCLK3, \
{ \
.fw_name = "sai_pll_out_div2", \
.name = "sai_pll_out_div2" \
}, NULL, 1, \
CLKEN0, 1, IMX8MP_CLK_AUDIOMIX_SAI##n##_MCLK3 \
}
#define CLK_PDM \
{ \
"pdm_sel", IMX8MP_CLK_AUDIOMIX_PDM_SEL, {}, \
clk_imx8mp_audiomix_pdm_parents, \
ARRAY_SIZE(clk_imx8mp_audiomix_pdm_parents), \
PDM_SEL, 2, 0 \
}
struct clk_imx8mp_audiomix_sel {
const char *name;
int clkid;
const struct clk_parent_data parent; /* For gate */
const struct clk_parent_data *parents; /* For mux */
int num_parents;
u16 reg;
u8 width;
u8 shift;
};
static struct clk_imx8mp_audiomix_sel sels[] = {
CLK_GATE("asrc", ASRC_IPG),
CLK_GATE("pdm", PDM_IPG),
CLK_GATE("earc", EARC_IPG),
CLK_GATE("ocrama", OCRAMA_IPG),
CLK_GATE("aud2htx", AUD2HTX_IPG),
CLK_GATE("earc_phy", EARC_PHY),
CLK_GATE("sdma2", SDMA2_ROOT),
CLK_GATE("sdma3", SDMA3_ROOT),
CLK_GATE("spba2", SPBA2_ROOT),
CLK_GATE("dsp", DSP_ROOT),
CLK_GATE("dspdbg", DSPDBG_ROOT),
CLK_GATE("edma", EDMA_ROOT),
CLK_GATE("audpll", AUDPLL_ROOT),
CLK_GATE("mu2", MU2_ROOT),
CLK_GATE("mu3", MU3_ROOT),
CLK_PDM,
CLK_SAIn(1),
CLK_SAIn(2),
CLK_SAIn(3),
CLK_SAIn(5),
CLK_SAIn(6),
CLK_SAIn(7)
};
static int clk_imx8mp_audiomix_probe(struct platform_device *pdev)
{
struct clk_hw_onecell_data *priv;
struct device *dev = &pdev->dev;
void __iomem *base;
struct clk_hw *hw;
int i;
priv = devm_kzalloc(dev,
struct_size(priv, hws, IMX8MP_CLK_AUDIOMIX_END),
GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->num = IMX8MP_CLK_AUDIOMIX_END;
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
for (i = 0; i < ARRAY_SIZE(sels); i++) {
if (sels[i].num_parents == 1) {
hw = devm_clk_hw_register_gate_parent_data(dev,
sels[i].name, &sels[i].parent, 0,
base + sels[i].reg, sels[i].shift, 0, NULL);
} else {
hw = devm_clk_hw_register_mux_parent_data_table(dev,
sels[i].name, sels[i].parents,
sels[i].num_parents, 0,
base + sels[i].reg,
sels[i].shift, sels[i].width,
0, NULL, NULL);
}
if (IS_ERR(hw))
return PTR_ERR(hw);
priv->hws[sels[i].clkid] = hw;
}
/* SAI PLL */
hw = devm_clk_hw_register_mux_parent_data_table(dev,
"sai_pll_ref_sel", clk_imx8mp_audiomix_pll_parents,
ARRAY_SIZE(clk_imx8mp_audiomix_pll_parents),
CLK_SET_RATE_NO_REPARENT, base + SAI_PLL_GNRL_CTL,
0, 2, 0, NULL, NULL);
priv->hws[IMX8MP_CLK_AUDIOMIX_SAI_PLL_REF_SEL] = hw;
hw = imx_dev_clk_hw_pll14xx(dev, "sai_pll", "sai_pll_ref_sel",
base + 0x400, &imx_1443x_pll);
if (IS_ERR(hw))
return PTR_ERR(hw);
priv->hws[IMX8MP_CLK_AUDIOMIX_SAI_PLL] = hw;
hw = devm_clk_hw_register_mux_parent_data_table(dev,
"sai_pll_bypass", clk_imx8mp_audiomix_pll_bypass_sels,
ARRAY_SIZE(clk_imx8mp_audiomix_pll_bypass_sels),
CLK_SET_RATE_NO_REPARENT | CLK_SET_RATE_PARENT,
base + SAI_PLL_GNRL_CTL, 16, 1, 0, NULL, NULL);
if (IS_ERR(hw))
return PTR_ERR(hw);
priv->hws[IMX8MP_CLK_AUDIOMIX_SAI_PLL_BYPASS] = hw;
hw = devm_clk_hw_register_gate(dev, "sai_pll_out", "sai_pll_bypass",
0, base + SAI_PLL_GNRL_CTL, 13,
0, NULL);
if (IS_ERR(hw))
return PTR_ERR(hw);
priv->hws[IMX8MP_CLK_AUDIOMIX_SAI_PLL_OUT] = hw;
hw = devm_clk_hw_register_fixed_factor(dev, "sai_pll_out_div2",
"sai_pll_out", 0, 1, 2);
if (IS_ERR(hw))
return PTR_ERR(hw);
return devm_of_clk_add_hw_provider(&pdev->dev, of_clk_hw_onecell_get,
priv);
}
static const struct of_device_id clk_imx8mp_audiomix_of_match[] = {
{ .compatible = "fsl,imx8mp-audio-blk-ctrl" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, clk_imx8mp_audiomix_of_match);
static struct platform_driver clk_imx8mp_audiomix_driver = {
.probe = clk_imx8mp_audiomix_probe,
.driver = {
.name = "imx8mp-audio-blk-ctrl",
.of_match_table = clk_imx8mp_audiomix_of_match,
},
};
module_platform_driver(clk_imx8mp_audiomix_driver);
MODULE_AUTHOR("Marek Vasut <[email protected]>");
MODULE_DESCRIPTION("Freescale i.MX8MP Audio Block Controller driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/clk/imx/clk-imx8mp-audiomix.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2014-2015 Freescale Semiconductor, Inc.
*/
#include <dt-bindings/clock/imx7d-clock.h>
#include <linux/bits.h>
#include <linux/clk.h>
#include <linux/clkdev.h>
#include <linux/clk-provider.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/types.h>
#include "clk.h"
static u32 share_count_sai1;
static u32 share_count_sai2;
static u32 share_count_sai3;
static u32 share_count_nand;
static u32 share_count_enet1;
static u32 share_count_enet2;
static const struct clk_div_table test_div_table[] = {
{ .val = 3, .div = 1, },
{ .val = 2, .div = 1, },
{ .val = 1, .div = 2, },
{ .val = 0, .div = 4, },
{ }
};
static const struct clk_div_table post_div_table[] = {
{ .val = 3, .div = 4, },
{ .val = 2, .div = 1, },
{ .val = 1, .div = 2, },
{ .val = 0, .div = 1, },
{ }
};
static const char *arm_a7_sel[] = { "osc", "pll_arm_main_clk",
"pll_enet_500m_clk", "pll_dram_main_clk",
"pll_sys_main_clk", "pll_sys_pfd0_392m_clk", "pll_audio_post_div",
"pll_usb_main_clk", };
static const char *arm_m4_sel[] = { "osc", "pll_sys_main_240m_clk",
"pll_enet_250m_clk", "pll_sys_pfd2_270m_clk",
"pll_dram_533m_clk", "pll_audio_post_div", "pll_video_post_div",
"pll_usb_main_clk", };
static const char *axi_sel[] = { "osc", "pll_sys_pfd1_332m_clk",
"pll_dram_533m_clk", "pll_enet_250m_clk", "pll_sys_pfd5_clk",
"pll_audio_post_div", "pll_video_post_div", "pll_sys_pfd7_clk", };
static const char *disp_axi_sel[] = { "osc", "pll_sys_pfd1_332m_clk",
"pll_dram_533m_clk", "pll_enet_250m_clk", "pll_sys_pfd6_clk",
"pll_sys_pfd7_clk", "pll_audio_post_div", "pll_video_post_div", };
static const char *enet_axi_sel[] = { "osc", "pll_sys_pfd2_270m_clk",
"pll_dram_533m_clk", "pll_enet_250m_clk",
"pll_sys_main_240m_clk", "pll_audio_post_div", "pll_video_post_div",
"pll_sys_pfd4_clk", };
static const char *nand_usdhc_bus_sel[] = { "osc", "pll_sys_pfd2_270m_clk",
"pll_dram_533m_clk", "pll_sys_main_240m_clk",
"pll_sys_pfd2_135m_clk", "pll_sys_pfd6_clk", "pll_enet_250m_clk",
"pll_audio_post_div", };
static const char *ahb_channel_sel[] = { "osc", "pll_sys_pfd2_270m_clk",
"pll_dram_533m_clk", "pll_sys_pfd0_392m_clk",
"pll_enet_250m_clk", "pll_usb_main_clk", "pll_audio_post_div",
"pll_video_post_div", };
static const char *dram_phym_sel[] = { "pll_dram_main_clk",
"dram_phym_alt_clk", };
static const char *dram_sel[] = { "pll_dram_main_clk",
"dram_alt_root_clk", };
static const char *dram_phym_alt_sel[] = { "osc", "pll_dram_533m_clk",
"pll_sys_main_clk", "pll_enet_500m_clk",
"pll_usb_main_clk", "pll_sys_pfd7_clk", "pll_audio_post_div",
"pll_video_post_div", };
static const char *dram_alt_sel[] = { "osc", "pll_dram_533m_clk",
"pll_sys_main_clk", "pll_enet_500m_clk",
"pll_enet_250m_clk", "pll_sys_pfd0_392m_clk",
"pll_audio_post_div", "pll_sys_pfd2_270m_clk", };
static const char *usb_hsic_sel[] = { "osc", "pll_sys_main_clk",
"pll_usb_main_clk", "pll_sys_pfd3_clk", "pll_sys_pfd4_clk",
"pll_sys_pfd5_clk", "pll_sys_pfd6_clk", "pll_sys_pfd7_clk", };
static const char *pcie_ctrl_sel[] = { "osc", "pll_enet_250m_clk",
"pll_sys_main_240m_clk", "pll_sys_pfd2_270m_clk",
"pll_dram_533m_clk", "pll_enet_500m_clk",
"pll_sys_pfd1_332m_clk", "pll_sys_pfd6_clk", };
static const char *pcie_phy_sel[] = { "osc", "pll_enet_100m_clk",
"pll_enet_500m_clk", "ext_clk_1", "ext_clk_2", "ext_clk_3",
"ext_clk_4", "pll_sys_pfd0_392m_clk", };
static const char *epdc_pixel_sel[] = { "osc", "pll_sys_pfd1_332m_clk",
"pll_dram_533m_clk", "pll_sys_main_clk", "pll_sys_pfd5_clk",
"pll_sys_pfd6_clk", "pll_sys_pfd7_clk", "pll_video_post_div", };
static const char *lcdif_pixel_sel[] = { "osc", "pll_sys_pfd5_clk",
"pll_dram_533m_clk", "ext_clk_3", "pll_sys_pfd4_clk",
"pll_sys_pfd2_270m_clk", "pll_video_post_div",
"pll_usb_main_clk", };
static const char *mipi_dsi_sel[] = { "osc", "pll_sys_pfd5_clk",
"pll_sys_pfd3_clk", "pll_sys_main_clk", "pll_sys_pfd0_196m_clk",
"pll_dram_533m_clk", "pll_video_post_div", "pll_audio_post_div", };
static const char *mipi_csi_sel[] = { "osc", "pll_sys_pfd4_clk",
"pll_sys_pfd3_clk", "pll_sys_main_clk", "pll_sys_pfd0_196m_clk",
"pll_dram_533m_clk", "pll_video_post_div", "pll_audio_post_div", };
static const char *mipi_dphy_sel[] = { "osc", "pll_sys_main_120m_clk",
"pll_dram_533m_clk", "pll_sys_pfd5_clk", "ref_1m_clk", "ext_clk_2",
"pll_video_post_div", "ext_clk_3", };
static const char *sai1_sel[] = { "osc", "pll_sys_pfd2_135m_clk",
"pll_audio_post_div", "pll_dram_533m_clk", "pll_video_post_div",
"pll_sys_pfd4_clk", "pll_enet_125m_clk", "ext_clk_2", };
static const char *sai2_sel[] = { "osc", "pll_sys_pfd2_135m_clk",
"pll_audio_post_div", "pll_dram_533m_clk", "pll_video_post_div",
"pll_sys_pfd4_clk", "pll_enet_125m_clk", "ext_clk_2", };
static const char *sai3_sel[] = { "osc", "pll_sys_pfd2_135m_clk",
"pll_audio_post_div", "pll_dram_533m_clk", "pll_video_post_div",
"pll_sys_pfd4_clk", "pll_enet_125m_clk", "ext_clk_3", };
static const char *spdif_sel[] = { "osc", "pll_sys_pfd2_135m_clk",
"pll_audio_post_div", "pll_dram_533m_clk", "pll_video_post_div",
"pll_sys_pfd4_clk", "pll_enet_125m_clk", "ext_3_clk", };
static const char *enet1_ref_sel[] = { "osc", "pll_enet_125m_clk",
"pll_enet_50m_clk", "pll_enet_25m_clk",
"pll_sys_main_120m_clk", "pll_audio_post_div", "pll_video_post_div",
"ext_clk_4", };
static const char *enet1_time_sel[] = { "osc", "pll_enet_100m_clk",
"pll_audio_post_div", "ext_clk_1", "ext_clk_2", "ext_clk_3",
"ext_clk_4", "pll_video_post_div", };
static const char *enet2_ref_sel[] = { "osc", "pll_enet_125m_clk",
"pll_enet_50m_clk", "pll_enet_25m_clk",
"pll_sys_main_120m_clk", "pll_audio_post_div", "pll_video_post_div",
"ext_clk_4", };
static const char *enet2_time_sel[] = { "osc", "pll_enet_100m_clk",
"pll_audio_post_div", "ext_clk_1", "ext_clk_2", "ext_clk_3",
"ext_clk_4", "pll_video_post_div", };
static const char *enet_phy_ref_sel[] = { "osc", "pll_enet_25m_clk",
"pll_enet_50m_clk", "pll_enet_125m_clk",
"pll_dram_533m_clk", "pll_audio_post_div", "pll_video_post_div",
"pll_sys_pfd3_clk", };
static const char *eim_sel[] = { "osc", "pll_sys_pfd2_135m_clk",
"pll_sys_main_120m_clk", "pll_dram_533m_clk",
"pll_sys_pfd2_270m_clk", "pll_sys_pfd3_clk", "pll_enet_125m_clk",
"pll_usb_main_clk", };
static const char *nand_sel[] = { "osc", "pll_sys_main_clk",
"pll_dram_533m_clk", "pll_sys_pfd0_392m_clk", "pll_sys_pfd3_clk",
"pll_enet_500m_clk", "pll_enet_250m_clk",
"pll_video_post_div", };
static const char *qspi_sel[] = { "osc", "pll_sys_pfd4_clk",
"pll_dram_533m_clk", "pll_enet_500m_clk", "pll_sys_pfd3_clk",
"pll_sys_pfd2_270m_clk", "pll_sys_pfd6_clk", "pll_sys_pfd7_clk", };
static const char *usdhc1_sel[] = { "osc", "pll_sys_pfd0_392m_clk",
"pll_dram_533m_clk", "pll_enet_500m_clk", "pll_sys_pfd4_clk",
"pll_sys_pfd2_270m_clk", "pll_sys_pfd6_clk", "pll_sys_pfd7_clk", };
static const char *usdhc2_sel[] = { "osc", "pll_sys_pfd0_392m_clk",
"pll_dram_533m_clk", "pll_enet_500m_clk", "pll_sys_pfd4_clk",
"pll_sys_pfd2_270m_clk", "pll_sys_pfd6_clk", "pll_sys_pfd7_clk", };
static const char *usdhc3_sel[] = { "osc", "pll_sys_pfd0_392m_clk",
"pll_dram_533m_clk", "pll_enet_500m_clk", "pll_sys_pfd4_clk",
"pll_sys_pfd2_270m_clk", "pll_sys_pfd6_clk", "pll_sys_pfd7_clk", };
static const char *can1_sel[] = { "osc", "pll_sys_main_120m_clk",
"pll_dram_533m_clk", "pll_sys_main_clk",
"pll_enet_40m_clk", "pll_usb_main_clk", "ext_clk_1",
"ext_clk_4", };
static const char *can2_sel[] = { "osc", "pll_sys_main_120m_clk",
"pll_dram_533m_clk", "pll_sys_main_clk",
"pll_enet_40m_clk", "pll_usb_main_clk", "ext_clk_1",
"ext_clk_3", };
static const char *i2c1_sel[] = { "osc", "pll_sys_main_120m_clk",
"pll_enet_50m_clk", "pll_dram_533m_clk",
"pll_audio_post_div", "pll_video_post_div", "pll_usb_main_clk",
"pll_sys_pfd2_135m_clk", };
static const char *i2c2_sel[] = { "osc", "pll_sys_main_120m_clk",
"pll_enet_50m_clk", "pll_dram_533m_clk",
"pll_audio_post_div", "pll_video_post_div", "pll_usb_main_clk",
"pll_sys_pfd2_135m_clk", };
static const char *i2c3_sel[] = { "osc", "pll_sys_main_120m_clk",
"pll_enet_50m_clk", "pll_dram_533m_clk",
"pll_audio_post_div", "pll_video_post_div", "pll_usb_main_clk",
"pll_sys_pfd2_135m_clk", };
static const char *i2c4_sel[] = { "osc", "pll_sys_main_120m_clk",
"pll_enet_50m_clk", "pll_dram_533m_clk",
"pll_audio_post_div", "pll_video_post_div", "pll_usb_main_clk",
"pll_sys_pfd2_135m_clk", };
static const char *uart1_sel[] = { "osc", "pll_sys_main_240m_clk",
"pll_enet_40m_clk", "pll_enet_100m_clk",
"pll_sys_main_clk", "ext_clk_2", "ext_clk_4",
"pll_usb_main_clk", };
static const char *uart2_sel[] = { "osc", "pll_sys_main_240m_clk",
"pll_enet_40m_clk", "pll_enet_100m_clk",
"pll_sys_main_clk", "ext_clk_2", "ext_clk_3",
"pll_usb_main_clk", };
static const char *uart3_sel[] = { "osc", "pll_sys_main_240m_clk",
"pll_enet_40m_clk", "pll_enet_100m_clk",
"pll_sys_main_clk", "ext_clk_2", "ext_clk_4",
"pll_usb_main_clk", };
static const char *uart4_sel[] = { "osc", "pll_sys_main_240m_clk",
"pll_enet_40m_clk", "pll_enet_100m_clk",
"pll_sys_main_clk", "ext_clk_2", "ext_clk_3",
"pll_usb_main_clk", };
static const char *uart5_sel[] = { "osc", "pll_sys_main_240m_clk",
"pll_enet_40m_clk", "pll_enet_100m_clk",
"pll_sys_main_clk", "ext_clk_2", "ext_clk_4",
"pll_usb_main_clk", };
static const char *uart6_sel[] = { "osc", "pll_sys_main_240m_clk",
"pll_enet_40m_clk", "pll_enet_100m_clk",
"pll_sys_main_clk", "ext_clk_2", "ext_clk_3",
"pll_usb_main_clk", };
static const char *uart7_sel[] = { "osc", "pll_sys_main_240m_clk",
"pll_enet_40m_clk", "pll_enet_100m_clk",
"pll_sys_main_clk", "ext_clk_2", "ext_clk_4",
"pll_usb_main_clk", };
static const char *ecspi1_sel[] = { "osc", "pll_sys_main_240m_clk",
"pll_enet_40m_clk", "pll_sys_main_120m_clk",
"pll_sys_main_clk", "pll_sys_pfd4_clk", "pll_enet_250m_clk",
"pll_usb_main_clk", };
static const char *ecspi2_sel[] = { "osc", "pll_sys_main_240m_clk",
"pll_enet_40m_clk", "pll_sys_main_120m_clk",
"pll_sys_main_clk", "pll_sys_pfd4_clk", "pll_enet_250m_clk",
"pll_usb_main_clk", };
static const char *ecspi3_sel[] = { "osc", "pll_sys_main_240m_clk",
"pll_enet_40m_clk", "pll_sys_main_120m_clk",
"pll_sys_main_clk", "pll_sys_pfd4_clk", "pll_enet_250m_clk",
"pll_usb_main_clk", };
static const char *ecspi4_sel[] = { "osc", "pll_sys_main_240m_clk",
"pll_enet_40m_clk", "pll_sys_main_120m_clk",
"pll_sys_main_clk", "pll_sys_pfd4_clk", "pll_enet_250m_clk",
"pll_usb_main_clk", };
static const char *pwm1_sel[] = { "osc", "pll_enet_100m_clk",
"pll_sys_main_120m_clk", "pll_enet_40m_clk", "pll_audio_post_div",
"ext_clk_1", "ref_1m_clk", "pll_video_post_div", };
static const char *pwm2_sel[] = { "osc", "pll_enet_100m_clk",
"pll_sys_main_120m_clk", "pll_enet_40m_clk", "pll_audio_post_div",
"ext_clk_1", "ref_1m_clk", "pll_video_post_div", };
static const char *pwm3_sel[] = { "osc", "pll_enet_100m_clk",
"pll_sys_main_120m_clk", "pll_enet_40m_clk", "pll_audio_post_div",
"ext_clk_2", "ref_1m_clk", "pll_video_post_div", };
static const char *pwm4_sel[] = { "osc", "pll_enet_100m_clk",
"pll_sys_main_120m_clk", "pll_enet_40m_clk", "pll_audio_post_div",
"ext_clk_2", "ref_1m_clk", "pll_video_post_div", };
static const char *flextimer1_sel[] = { "osc", "pll_enet_100m_clk",
"pll_sys_main_120m_clk", "pll_enet_40m_clk", "pll_audio_post_div",
"ext_clk_3", "ref_1m_clk", "pll_video_post_div", };
static const char *flextimer2_sel[] = { "osc", "pll_enet_100m_clk",
"pll_sys_main_120m_clk", "pll_enet_40m_clk", "pll_audio_post_div",
"ext_clk_3", "ref_1m_clk", "pll_video_post_div", };
static const char *sim1_sel[] = { "osc", "pll_sys_pfd2_135m_clk",
"pll_sys_main_120m_clk", "pll_dram_533m_clk",
"pll_usb_main_clk", "pll_audio_post_div", "pll_enet_125m_clk",
"pll_sys_pfd7_clk", };
static const char *sim2_sel[] = { "osc", "pll_sys_pfd2_135m_clk",
"pll_sys_main_120m_clk", "pll_dram_533m_clk",
"pll_usb_main_clk", "pll_video_post_div", "pll_enet_125m_clk",
"pll_sys_pfd7_clk", };
static const char *gpt1_sel[] = { "osc", "pll_enet_100m_clk",
"pll_sys_pfd0_392m_clk", "pll_enet_40m_clk", "pll_video_post_div",
"ref_1m_clk", "pll_audio_post_div", "ext_clk_1", };
static const char *gpt2_sel[] = { "osc", "pll_enet_100m_clk",
"pll_sys_pfd0_392m_clk", "pll_enet_40m_clk", "pll_video_post_div",
"ref_1m_clk", "pll_audio_post_div", "ext_clk_2", };
static const char *gpt3_sel[] = { "osc", "pll_enet_100m_clk",
"pll_sys_pfd0_392m_clk", "pll_enet_40m_clk", "pll_video_post_div",
"ref_1m_clk", "pll_audio_post_div", "ext_clk_3", };
static const char *gpt4_sel[] = { "osc", "pll_enet_100m_clk",
"pll_sys_pfd0_392m_clk", "pll_enet_40m_clk", "pll_video_post_div",
"ref_1m_clk", "pll_audio_post_div", "ext_clk_4", };
static const char *trace_sel[] = { "osc", "pll_sys_pfd2_135m_clk",
"pll_sys_main_120m_clk", "pll_dram_533m_clk",
"pll_enet_125m_clk", "pll_usb_main_clk", "ext_clk_2",
"ext_clk_3", };
static const char *wdog_sel[] = { "osc", "pll_sys_pfd2_135m_clk",
"pll_sys_main_120m_clk", "pll_dram_533m_clk",
"pll_enet_125m_clk", "pll_usb_main_clk", "ref_1m_clk",
"pll_sys_pfd1_166m_clk", };
static const char *csi_mclk_sel[] = { "osc", "pll_sys_pfd2_135m_clk",
"pll_sys_main_120m_clk", "pll_dram_533m_clk",
"pll_enet_125m_clk", "pll_audio_post_div", "pll_video_post_div",
"pll_usb_main_clk", };
static const char *audio_mclk_sel[] = { "osc", "pll_sys_pfd2_135m_clk",
"pll_sys_main_120m_clk", "pll_dram_533m_clk",
"pll_enet_125m_clk", "pll_audio_post_div", "pll_video_post_div",
"pll_usb_main_clk", };
static const char *wrclk_sel[] = { "osc", "pll_enet_40m_clk",
"pll_dram_533m_clk", "pll_usb_main_clk",
"pll_sys_main_240m_clk", "pll_sys_pfd2_270m_clk",
"pll_enet_500m_clk", "pll_sys_pfd7_clk", };
static const char *clko1_sel[] = { "osc", "pll_sys_main_clk",
"pll_sys_main_240m_clk", "pll_sys_pfd0_196m_clk", "pll_sys_pfd3_clk",
"pll_enet_500m_clk", "pll_dram_533m_clk", "ref_1m_clk", };
static const char *clko2_sel[] = { "osc", "pll_sys_main_240m_clk",
"pll_sys_pfd0_392m_clk", "pll_sys_pfd1_166m_clk", "pll_sys_pfd4_clk",
"pll_audio_post_div", "pll_video_post_div", "ckil", };
static const char *lvds1_sel[] = { "pll_arm_main_clk",
"pll_sys_main_clk", "pll_sys_pfd0_392m_clk", "pll_sys_pfd1_332m_clk",
"pll_sys_pfd2_270m_clk", "pll_sys_pfd3_clk", "pll_sys_pfd4_clk",
"pll_sys_pfd5_clk", "pll_sys_pfd6_clk", "pll_sys_pfd7_clk",
"pll_audio_post_div", "pll_video_post_div", "pll_enet_500m_clk",
"pll_enet_250m_clk", "pll_enet_125m_clk", "pll_enet_100m_clk",
"pll_enet_50m_clk", "pll_enet_40m_clk", "pll_enet_25m_clk",
"pll_dram_main_clk", };
static const char *pll_bypass_src_sel[] = { "osc", "dummy", };
static const char *pll_arm_bypass_sel[] = { "pll_arm_main", "pll_arm_main_src", };
static const char *pll_dram_bypass_sel[] = { "pll_dram_main", "pll_dram_main_src", };
static const char *pll_sys_bypass_sel[] = { "pll_sys_main", "pll_sys_main_src", };
static const char *pll_enet_bypass_sel[] = { "pll_enet_main", "pll_enet_main_src", };
static const char *pll_audio_bypass_sel[] = { "pll_audio_main", "pll_audio_main_src", };
static const char *pll_video_bypass_sel[] = { "pll_video_main", "pll_video_main_src", };
static struct clk_hw **hws;
static struct clk_hw_onecell_data *clk_hw_data;
static void __init imx7d_clocks_init(struct device_node *ccm_node)
{
struct device_node *np;
void __iomem *base;
clk_hw_data = kzalloc(struct_size(clk_hw_data, hws,
IMX7D_CLK_END), GFP_KERNEL);
if (WARN_ON(!clk_hw_data))
return;
clk_hw_data->num = IMX7D_CLK_END;
hws = clk_hw_data->hws;
hws[IMX7D_CLK_DUMMY] = imx_clk_hw_fixed("dummy", 0);
hws[IMX7D_OSC_24M_CLK] = imx_get_clk_hw_by_name(ccm_node, "osc");
hws[IMX7D_CKIL] = imx_get_clk_hw_by_name(ccm_node, "ckil");
np = of_find_compatible_node(NULL, NULL, "fsl,imx7d-anatop");
base = of_iomap(np, 0);
WARN_ON(!base);
of_node_put(np);
hws[IMX7D_PLL_ARM_MAIN_SRC] = imx_clk_hw_mux("pll_arm_main_src", base + 0x60, 14, 2, pll_bypass_src_sel, ARRAY_SIZE(pll_bypass_src_sel));
hws[IMX7D_PLL_DRAM_MAIN_SRC] = imx_clk_hw_mux("pll_dram_main_src", base + 0x70, 14, 2, pll_bypass_src_sel, ARRAY_SIZE(pll_bypass_src_sel));
hws[IMX7D_PLL_SYS_MAIN_SRC] = imx_clk_hw_mux("pll_sys_main_src", base + 0xb0, 14, 2, pll_bypass_src_sel, ARRAY_SIZE(pll_bypass_src_sel));
hws[IMX7D_PLL_ENET_MAIN_SRC] = imx_clk_hw_mux("pll_enet_main_src", base + 0xe0, 14, 2, pll_bypass_src_sel, ARRAY_SIZE(pll_bypass_src_sel));
hws[IMX7D_PLL_AUDIO_MAIN_SRC] = imx_clk_hw_mux("pll_audio_main_src", base + 0xf0, 14, 2, pll_bypass_src_sel, ARRAY_SIZE(pll_bypass_src_sel));
hws[IMX7D_PLL_VIDEO_MAIN_SRC] = imx_clk_hw_mux("pll_video_main_src", base + 0x130, 14, 2, pll_bypass_src_sel, ARRAY_SIZE(pll_bypass_src_sel));
hws[IMX7D_PLL_ARM_MAIN] = imx_clk_hw_pllv3(IMX_PLLV3_SYS, "pll_arm_main", "osc", base + 0x60, 0x7f);
hws[IMX7D_PLL_DRAM_MAIN] = imx_clk_hw_pllv3(IMX_PLLV3_DDR_IMX7, "pll_dram_main", "osc", base + 0x70, 0x7f);
hws[IMX7D_PLL_SYS_MAIN] = imx_clk_hw_pllv3(IMX_PLLV3_GENERIC, "pll_sys_main", "osc", base + 0xb0, 0x1);
hws[IMX7D_PLL_ENET_MAIN] = imx_clk_hw_pllv3(IMX_PLLV3_ENET_IMX7, "pll_enet_main", "osc", base + 0xe0, 0x0);
hws[IMX7D_PLL_AUDIO_MAIN] = imx_clk_hw_pllv3(IMX_PLLV3_AV_IMX7, "pll_audio_main", "osc", base + 0xf0, 0x7f);
hws[IMX7D_PLL_VIDEO_MAIN] = imx_clk_hw_pllv3(IMX_PLLV3_AV_IMX7, "pll_video_main", "osc", base + 0x130, 0x7f);
hws[IMX7D_PLL_ARM_MAIN_BYPASS] = imx_clk_hw_mux_flags("pll_arm_main_bypass", base + 0x60, 16, 1, pll_arm_bypass_sel, ARRAY_SIZE(pll_arm_bypass_sel), CLK_SET_RATE_PARENT);
hws[IMX7D_PLL_DRAM_MAIN_BYPASS] = imx_clk_hw_mux_flags("pll_dram_main_bypass", base + 0x70, 16, 1, pll_dram_bypass_sel, ARRAY_SIZE(pll_dram_bypass_sel), CLK_SET_RATE_PARENT);
hws[IMX7D_PLL_SYS_MAIN_BYPASS] = imx_clk_hw_mux_flags("pll_sys_main_bypass", base + 0xb0, 16, 1, pll_sys_bypass_sel, ARRAY_SIZE(pll_sys_bypass_sel), CLK_SET_RATE_PARENT);
hws[IMX7D_PLL_ENET_MAIN_BYPASS] = imx_clk_hw_mux_flags("pll_enet_main_bypass", base + 0xe0, 16, 1, pll_enet_bypass_sel, ARRAY_SIZE(pll_enet_bypass_sel), CLK_SET_RATE_PARENT);
hws[IMX7D_PLL_AUDIO_MAIN_BYPASS] = imx_clk_hw_mux_flags("pll_audio_main_bypass", base + 0xf0, 16, 1, pll_audio_bypass_sel, ARRAY_SIZE(pll_audio_bypass_sel), CLK_SET_RATE_PARENT);
hws[IMX7D_PLL_VIDEO_MAIN_BYPASS] = imx_clk_hw_mux_flags("pll_video_main_bypass", base + 0x130, 16, 1, pll_video_bypass_sel, ARRAY_SIZE(pll_video_bypass_sel), CLK_SET_RATE_PARENT);
hws[IMX7D_PLL_ARM_MAIN_CLK] = imx_clk_hw_gate("pll_arm_main_clk", "pll_arm_main_bypass", base + 0x60, 13);
hws[IMX7D_PLL_DRAM_MAIN_CLK] = imx_clk_hw_gate("pll_dram_main_clk", "pll_dram_test_div", base + 0x70, 13);
hws[IMX7D_PLL_SYS_MAIN_CLK] = imx_clk_hw_gate("pll_sys_main_clk", "pll_sys_main_bypass", base + 0xb0, 13);
hws[IMX7D_PLL_AUDIO_MAIN_CLK] = imx_clk_hw_gate("pll_audio_main_clk", "pll_audio_main_bypass", base + 0xf0, 13);
hws[IMX7D_PLL_VIDEO_MAIN_CLK] = imx_clk_hw_gate("pll_video_main_clk", "pll_video_main_bypass", base + 0x130, 13);
hws[IMX7D_PLL_DRAM_TEST_DIV] = clk_hw_register_divider_table(NULL, "pll_dram_test_div", "pll_dram_main_bypass",
CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE, base + 0x70, 21, 2, 0, test_div_table, &imx_ccm_lock);
hws[IMX7D_PLL_AUDIO_TEST_DIV] = clk_hw_register_divider_table(NULL, "pll_audio_test_div", "pll_audio_main_clk",
CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE, base + 0xf0, 19, 2, 0, test_div_table, &imx_ccm_lock);
hws[IMX7D_PLL_AUDIO_POST_DIV] = clk_hw_register_divider_table(NULL, "pll_audio_post_div", "pll_audio_test_div",
CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE, base + 0xf0, 22, 2, 0, post_div_table, &imx_ccm_lock);
hws[IMX7D_PLL_VIDEO_TEST_DIV] = clk_hw_register_divider_table(NULL, "pll_video_test_div", "pll_video_main_clk",
CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE, base + 0x130, 19, 2, 0, test_div_table, &imx_ccm_lock);
hws[IMX7D_PLL_VIDEO_POST_DIV] = clk_hw_register_divider_table(NULL, "pll_video_post_div", "pll_video_test_div",
CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE, base + 0x130, 22, 2, 0, post_div_table, &imx_ccm_lock);
hws[IMX7D_PLL_SYS_PFD0_392M_CLK] = imx_clk_hw_pfd("pll_sys_pfd0_392m_clk", "pll_sys_main_clk", base + 0xc0, 0);
hws[IMX7D_PLL_SYS_PFD1_332M_CLK] = imx_clk_hw_pfd("pll_sys_pfd1_332m_clk", "pll_sys_main_clk", base + 0xc0, 1);
hws[IMX7D_PLL_SYS_PFD2_270M_CLK] = imx_clk_hw_pfd("pll_sys_pfd2_270m_clk", "pll_sys_main_clk", base + 0xc0, 2);
hws[IMX7D_PLL_SYS_PFD3_CLK] = imx_clk_hw_pfd("pll_sys_pfd3_clk", "pll_sys_main_clk", base + 0xc0, 3);
hws[IMX7D_PLL_SYS_PFD4_CLK] = imx_clk_hw_pfd("pll_sys_pfd4_clk", "pll_sys_main_clk", base + 0xd0, 0);
hws[IMX7D_PLL_SYS_PFD5_CLK] = imx_clk_hw_pfd("pll_sys_pfd5_clk", "pll_sys_main_clk", base + 0xd0, 1);
hws[IMX7D_PLL_SYS_PFD6_CLK] = imx_clk_hw_pfd("pll_sys_pfd6_clk", "pll_sys_main_clk", base + 0xd0, 2);
hws[IMX7D_PLL_SYS_PFD7_CLK] = imx_clk_hw_pfd("pll_sys_pfd7_clk", "pll_sys_main_clk", base + 0xd0, 3);
hws[IMX7D_PLL_SYS_MAIN_480M] = imx_clk_hw_fixed_factor("pll_sys_main_480m", "pll_sys_main_clk", 1, 1);
hws[IMX7D_PLL_SYS_MAIN_240M] = imx_clk_hw_fixed_factor("pll_sys_main_240m", "pll_sys_main_clk", 1, 2);
hws[IMX7D_PLL_SYS_MAIN_120M] = imx_clk_hw_fixed_factor("pll_sys_main_120m", "pll_sys_main_clk", 1, 4);
hws[IMX7D_PLL_DRAM_MAIN_533M] = imx_clk_hw_fixed_factor("pll_dram_533m", "pll_dram_main_clk", 1, 2);
hws[IMX7D_PLL_SYS_MAIN_480M_CLK] = imx_clk_hw_gate_dis_flags("pll_sys_main_480m_clk", "pll_sys_main_480m", base + 0xb0, 4, CLK_IS_CRITICAL);
hws[IMX7D_PLL_SYS_MAIN_240M_CLK] = imx_clk_hw_gate_dis("pll_sys_main_240m_clk", "pll_sys_main_240m", base + 0xb0, 5);
hws[IMX7D_PLL_SYS_MAIN_120M_CLK] = imx_clk_hw_gate_dis("pll_sys_main_120m_clk", "pll_sys_main_120m", base + 0xb0, 6);
hws[IMX7D_PLL_DRAM_MAIN_533M_CLK] = imx_clk_hw_gate("pll_dram_533m_clk", "pll_dram_533m", base + 0x70, 12);
hws[IMX7D_PLL_SYS_PFD0_196M] = imx_clk_hw_fixed_factor("pll_sys_pfd0_196m", "pll_sys_pfd0_392m_clk", 1, 2);
hws[IMX7D_PLL_SYS_PFD1_166M] = imx_clk_hw_fixed_factor("pll_sys_pfd1_166m", "pll_sys_pfd1_332m_clk", 1, 2);
hws[IMX7D_PLL_SYS_PFD2_135M] = imx_clk_hw_fixed_factor("pll_sys_pfd2_135m", "pll_sys_pfd2_270m_clk", 1, 2);
hws[IMX7D_PLL_SYS_PFD0_196M_CLK] = imx_clk_hw_gate_dis("pll_sys_pfd0_196m_clk", "pll_sys_pfd0_196m", base + 0xb0, 26);
hws[IMX7D_PLL_SYS_PFD1_166M_CLK] = imx_clk_hw_gate_dis("pll_sys_pfd1_166m_clk", "pll_sys_pfd1_166m", base + 0xb0, 27);
hws[IMX7D_PLL_SYS_PFD2_135M_CLK] = imx_clk_hw_gate_dis("pll_sys_pfd2_135m_clk", "pll_sys_pfd2_135m", base + 0xb0, 28);
hws[IMX7D_PLL_ENET_MAIN_CLK] = imx_clk_hw_fixed_factor("pll_enet_main_clk", "pll_enet_main_bypass", 1, 1);
hws[IMX7D_PLL_ENET_MAIN_500M] = imx_clk_hw_fixed_factor("pll_enet_500m", "pll_enet_main_clk", 1, 2);
hws[IMX7D_PLL_ENET_MAIN_250M] = imx_clk_hw_fixed_factor("pll_enet_250m", "pll_enet_main_clk", 1, 4);
hws[IMX7D_PLL_ENET_MAIN_125M] = imx_clk_hw_fixed_factor("pll_enet_125m", "pll_enet_main_clk", 1, 8);
hws[IMX7D_PLL_ENET_MAIN_100M] = imx_clk_hw_fixed_factor("pll_enet_100m", "pll_enet_main_clk", 1, 10);
hws[IMX7D_PLL_ENET_MAIN_50M] = imx_clk_hw_fixed_factor("pll_enet_50m", "pll_enet_main_clk", 1, 20);
hws[IMX7D_PLL_ENET_MAIN_40M] = imx_clk_hw_fixed_factor("pll_enet_40m", "pll_enet_main_clk", 1, 25);
hws[IMX7D_PLL_ENET_MAIN_25M] = imx_clk_hw_fixed_factor("pll_enet_25m", "pll_enet_main_clk", 1, 40);
hws[IMX7D_PLL_ENET_MAIN_500M_CLK] = imx_clk_hw_gate("pll_enet_500m_clk", "pll_enet_500m", base + 0xe0, 12);
hws[IMX7D_PLL_ENET_MAIN_250M_CLK] = imx_clk_hw_gate("pll_enet_250m_clk", "pll_enet_250m", base + 0xe0, 11);
hws[IMX7D_PLL_ENET_MAIN_125M_CLK] = imx_clk_hw_gate("pll_enet_125m_clk", "pll_enet_125m", base + 0xe0, 10);
hws[IMX7D_PLL_ENET_MAIN_100M_CLK] = imx_clk_hw_gate("pll_enet_100m_clk", "pll_enet_100m", base + 0xe0, 9);
hws[IMX7D_PLL_ENET_MAIN_50M_CLK] = imx_clk_hw_gate("pll_enet_50m_clk", "pll_enet_50m", base + 0xe0, 8);
hws[IMX7D_PLL_ENET_MAIN_40M_CLK] = imx_clk_hw_gate("pll_enet_40m_clk", "pll_enet_40m", base + 0xe0, 7);
hws[IMX7D_PLL_ENET_MAIN_25M_CLK] = imx_clk_hw_gate("pll_enet_25m_clk", "pll_enet_25m", base + 0xe0, 6);
hws[IMX7D_LVDS1_OUT_SEL] = imx_clk_hw_mux("lvds1_sel", base + 0x170, 0, 5, lvds1_sel, ARRAY_SIZE(lvds1_sel));
hws[IMX7D_LVDS1_OUT_CLK] = imx_clk_hw_gate_exclusive("lvds1_out", "lvds1_sel", base + 0x170, 5, BIT(6));
np = ccm_node;
base = of_iomap(np, 0);
WARN_ON(!base);
hws[IMX7D_ARM_A7_ROOT_SRC] = imx_clk_hw_mux2("arm_a7_src", base + 0x8000, 24, 3, arm_a7_sel, ARRAY_SIZE(arm_a7_sel));
hws[IMX7D_ARM_M4_ROOT_SRC] = imx_clk_hw_mux2("arm_m4_src", base + 0x8080, 24, 3, arm_m4_sel, ARRAY_SIZE(arm_m4_sel));
hws[IMX7D_MAIN_AXI_ROOT_SRC] = imx_clk_hw_mux2("axi_src", base + 0x8800, 24, 3, axi_sel, ARRAY_SIZE(axi_sel));
hws[IMX7D_DISP_AXI_ROOT_SRC] = imx_clk_hw_mux2("disp_axi_src", base + 0x8880, 24, 3, disp_axi_sel, ARRAY_SIZE(disp_axi_sel));
hws[IMX7D_AHB_CHANNEL_ROOT_SRC] = imx_clk_hw_mux2("ahb_src", base + 0x9000, 24, 3, ahb_channel_sel, ARRAY_SIZE(ahb_channel_sel));
hws[IMX7D_ENET_AXI_ROOT_SRC] = imx_clk_hw_mux2_flags("enet_axi_src", base + 0x8900, 24, 3, enet_axi_sel, ARRAY_SIZE(enet_axi_sel), CLK_SET_PARENT_GATE);
hws[IMX7D_NAND_USDHC_BUS_ROOT_SRC] = imx_clk_hw_mux2_flags("nand_usdhc_src", base + 0x8980, 24, 3, nand_usdhc_bus_sel, ARRAY_SIZE(nand_usdhc_bus_sel), CLK_SET_PARENT_GATE);
hws[IMX7D_DRAM_PHYM_ROOT_SRC] = imx_clk_hw_mux2_flags("dram_phym_src", base + 0x9800, 24, 1, dram_phym_sel, ARRAY_SIZE(dram_phym_sel), CLK_SET_PARENT_GATE);
hws[IMX7D_DRAM_ROOT_SRC] = imx_clk_hw_mux2_flags("dram_src", base + 0x9880, 24, 1, dram_sel, ARRAY_SIZE(dram_sel), CLK_SET_PARENT_GATE);
hws[IMX7D_DRAM_PHYM_ALT_ROOT_SRC] = imx_clk_hw_mux2_flags("dram_phym_alt_src", base + 0xa000, 24, 3, dram_phym_alt_sel, ARRAY_SIZE(dram_phym_alt_sel), CLK_SET_PARENT_GATE);
hws[IMX7D_DRAM_ALT_ROOT_SRC] = imx_clk_hw_mux2_flags("dram_alt_src", base + 0xa080, 24, 3, dram_alt_sel, ARRAY_SIZE(dram_alt_sel), CLK_SET_PARENT_GATE);
hws[IMX7D_USB_HSIC_ROOT_SRC] = imx_clk_hw_mux2_flags("usb_hsic_src", base + 0xa100, 24, 3, usb_hsic_sel, ARRAY_SIZE(usb_hsic_sel), CLK_SET_PARENT_GATE);
hws[IMX7D_PCIE_CTRL_ROOT_SRC] = imx_clk_hw_mux2_flags("pcie_ctrl_src", base + 0xa180, 24, 3, pcie_ctrl_sel, ARRAY_SIZE(pcie_ctrl_sel), CLK_SET_PARENT_GATE);
hws[IMX7D_PCIE_PHY_ROOT_SRC] = imx_clk_hw_mux2_flags("pcie_phy_src", base + 0xa200, 24, 3, pcie_phy_sel, ARRAY_SIZE(pcie_phy_sel), CLK_SET_PARENT_GATE);
hws[IMX7D_EPDC_PIXEL_ROOT_SRC] = imx_clk_hw_mux2_flags("epdc_pixel_src", base + 0xa280, 24, 3, epdc_pixel_sel, ARRAY_SIZE(epdc_pixel_sel), CLK_SET_PARENT_GATE);
hws[IMX7D_LCDIF_PIXEL_ROOT_SRC] = imx_clk_hw_mux2_flags("lcdif_pixel_src", base + 0xa300, 24, 3, lcdif_pixel_sel, ARRAY_SIZE(lcdif_pixel_sel), CLK_SET_PARENT_GATE);
hws[IMX7D_MIPI_DSI_ROOT_SRC] = imx_clk_hw_mux2_flags("mipi_dsi_src", base + 0xa380, 24, 3, mipi_dsi_sel, ARRAY_SIZE(mipi_dsi_sel), CLK_SET_PARENT_GATE);
hws[IMX7D_MIPI_CSI_ROOT_SRC] = imx_clk_hw_mux2_flags("mipi_csi_src", base + 0xa400, 24, 3, mipi_csi_sel, ARRAY_SIZE(mipi_csi_sel), CLK_SET_PARENT_GATE);
hws[IMX7D_MIPI_DPHY_ROOT_SRC] = imx_clk_hw_mux2_flags("mipi_dphy_src", base + 0xa480, 24, 3, mipi_dphy_sel, ARRAY_SIZE(mipi_dphy_sel), CLK_SET_PARENT_GATE);
hws[IMX7D_SAI1_ROOT_SRC] = imx_clk_hw_mux2_flags("sai1_src", base + 0xa500, 24, 3, sai1_sel, ARRAY_SIZE(sai1_sel), CLK_SET_PARENT_GATE);
hws[IMX7D_SAI2_ROOT_SRC] = imx_clk_hw_mux2_flags("sai2_src", base + 0xa580, 24, 3, sai2_sel, ARRAY_SIZE(sai2_sel), CLK_SET_PARENT_GATE);
hws[IMX7D_SAI3_ROOT_SRC] = imx_clk_hw_mux2_flags("sai3_src", base + 0xa600, 24, 3, sai3_sel, ARRAY_SIZE(sai3_sel), CLK_SET_PARENT_GATE);
hws[IMX7D_SPDIF_ROOT_SRC] = imx_clk_hw_mux2_flags("spdif_src", base + 0xa680, 24, 3, spdif_sel, ARRAY_SIZE(spdif_sel), CLK_SET_PARENT_GATE);
hws[IMX7D_ENET1_REF_ROOT_SRC] = imx_clk_hw_mux2_flags("enet1_ref_src", base + 0xa700, 24, 3, enet1_ref_sel, ARRAY_SIZE(enet1_ref_sel), CLK_SET_PARENT_GATE);
hws[IMX7D_ENET1_TIME_ROOT_SRC] = imx_clk_hw_mux2_flags("enet1_time_src", base + 0xa780, 24, 3, enet1_time_sel, ARRAY_SIZE(enet1_time_sel), CLK_SET_PARENT_GATE);
hws[IMX7D_ENET2_REF_ROOT_SRC] = imx_clk_hw_mux2_flags("enet2_ref_src", base + 0xa800, 24, 3, enet2_ref_sel, ARRAY_SIZE(enet2_ref_sel), CLK_SET_PARENT_GATE);
hws[IMX7D_ENET2_TIME_ROOT_SRC] = imx_clk_hw_mux2_flags("enet2_time_src", base + 0xa880, 24, 3, enet2_time_sel, ARRAY_SIZE(enet2_time_sel), CLK_SET_PARENT_GATE);
hws[IMX7D_ENET_PHY_REF_ROOT_SRC] = imx_clk_hw_mux2_flags("enet_phy_ref_src", base + 0xa900, 24, 3, enet_phy_ref_sel, ARRAY_SIZE(enet_phy_ref_sel), CLK_SET_PARENT_GATE);
hws[IMX7D_EIM_ROOT_SRC] = imx_clk_hw_mux2_flags("eim_src", base + 0xa980, 24, 3, eim_sel, ARRAY_SIZE(eim_sel), CLK_SET_PARENT_GATE);
hws[IMX7D_NAND_ROOT_SRC] = imx_clk_hw_mux2_flags("nand_src", base + 0xaa00, 24, 3, nand_sel, ARRAY_SIZE(nand_sel), CLK_SET_PARENT_GATE);
hws[IMX7D_QSPI_ROOT_SRC] = imx_clk_hw_mux2_flags("qspi_src", base + 0xaa80, 24, 3, qspi_sel, ARRAY_SIZE(qspi_sel), CLK_SET_PARENT_GATE);
hws[IMX7D_USDHC1_ROOT_SRC] = imx_clk_hw_mux2_flags("usdhc1_src", base + 0xab00, 24, 3, usdhc1_sel, ARRAY_SIZE(usdhc1_sel), CLK_SET_PARENT_GATE);
hws[IMX7D_USDHC2_ROOT_SRC] = imx_clk_hw_mux2_flags("usdhc2_src", base + 0xab80, 24, 3, usdhc2_sel, ARRAY_SIZE(usdhc2_sel), CLK_SET_PARENT_GATE);
hws[IMX7D_USDHC3_ROOT_SRC] = imx_clk_hw_mux2_flags("usdhc3_src", base + 0xac00, 24, 3, usdhc3_sel, ARRAY_SIZE(usdhc3_sel), CLK_SET_PARENT_GATE);
hws[IMX7D_CAN1_ROOT_SRC] = imx_clk_hw_mux2_flags("can1_src", base + 0xac80, 24, 3, can1_sel, ARRAY_SIZE(can1_sel), CLK_SET_PARENT_GATE);
hws[IMX7D_CAN2_ROOT_SRC] = imx_clk_hw_mux2_flags("can2_src", base + 0xad00, 24, 3, can2_sel, ARRAY_SIZE(can2_sel), CLK_SET_PARENT_GATE);
hws[IMX7D_I2C1_ROOT_SRC] = imx_clk_hw_mux2_flags("i2c1_src", base + 0xad80, 24, 3, i2c1_sel, ARRAY_SIZE(i2c1_sel), CLK_SET_PARENT_GATE);
hws[IMX7D_I2C2_ROOT_SRC] = imx_clk_hw_mux2_flags("i2c2_src", base + 0xae00, 24, 3, i2c2_sel, ARRAY_SIZE(i2c2_sel), CLK_SET_PARENT_GATE);
hws[IMX7D_I2C3_ROOT_SRC] = imx_clk_hw_mux2_flags("i2c3_src", base + 0xae80, 24, 3, i2c3_sel, ARRAY_SIZE(i2c3_sel), CLK_SET_PARENT_GATE);
hws[IMX7D_I2C4_ROOT_SRC] = imx_clk_hw_mux2_flags("i2c4_src", base + 0xaf00, 24, 3, i2c4_sel, ARRAY_SIZE(i2c4_sel), CLK_SET_PARENT_GATE);
hws[IMX7D_UART1_ROOT_SRC] = imx_clk_hw_mux2_flags("uart1_src", base + 0xaf80, 24, 3, uart1_sel, ARRAY_SIZE(uart1_sel), CLK_SET_PARENT_GATE);
hws[IMX7D_UART2_ROOT_SRC] = imx_clk_hw_mux2_flags("uart2_src", base + 0xb000, 24, 3, uart2_sel, ARRAY_SIZE(uart2_sel), CLK_SET_PARENT_GATE);
hws[IMX7D_UART3_ROOT_SRC] = imx_clk_hw_mux2_flags("uart3_src", base + 0xb080, 24, 3, uart3_sel, ARRAY_SIZE(uart3_sel), CLK_SET_PARENT_GATE);
hws[IMX7D_UART4_ROOT_SRC] = imx_clk_hw_mux2_flags("uart4_src", base + 0xb100, 24, 3, uart4_sel, ARRAY_SIZE(uart4_sel), CLK_SET_PARENT_GATE);
hws[IMX7D_UART5_ROOT_SRC] = imx_clk_hw_mux2_flags("uart5_src", base + 0xb180, 24, 3, uart5_sel, ARRAY_SIZE(uart5_sel), CLK_SET_PARENT_GATE);
hws[IMX7D_UART6_ROOT_SRC] = imx_clk_hw_mux2_flags("uart6_src", base + 0xb200, 24, 3, uart6_sel, ARRAY_SIZE(uart6_sel), CLK_SET_PARENT_GATE);
hws[IMX7D_UART7_ROOT_SRC] = imx_clk_hw_mux2_flags("uart7_src", base + 0xb280, 24, 3, uart7_sel, ARRAY_SIZE(uart7_sel), CLK_SET_PARENT_GATE);
hws[IMX7D_ECSPI1_ROOT_SRC] = imx_clk_hw_mux2_flags("ecspi1_src", base + 0xb300, 24, 3, ecspi1_sel, ARRAY_SIZE(ecspi1_sel), CLK_SET_PARENT_GATE);
hws[IMX7D_ECSPI2_ROOT_SRC] = imx_clk_hw_mux2_flags("ecspi2_src", base + 0xb380, 24, 3, ecspi2_sel, ARRAY_SIZE(ecspi2_sel), CLK_SET_PARENT_GATE);
hws[IMX7D_ECSPI3_ROOT_SRC] = imx_clk_hw_mux2_flags("ecspi3_src", base + 0xb400, 24, 3, ecspi3_sel, ARRAY_SIZE(ecspi3_sel), CLK_SET_PARENT_GATE);
hws[IMX7D_ECSPI4_ROOT_SRC] = imx_clk_hw_mux2_flags("ecspi4_src", base + 0xb480, 24, 3, ecspi4_sel, ARRAY_SIZE(ecspi4_sel), CLK_SET_PARENT_GATE);
hws[IMX7D_PWM1_ROOT_SRC] = imx_clk_hw_mux2_flags("pwm1_src", base + 0xb500, 24, 3, pwm1_sel, ARRAY_SIZE(pwm1_sel), CLK_SET_PARENT_GATE);
hws[IMX7D_PWM2_ROOT_SRC] = imx_clk_hw_mux2_flags("pwm2_src", base + 0xb580, 24, 3, pwm2_sel, ARRAY_SIZE(pwm2_sel), CLK_SET_PARENT_GATE);
hws[IMX7D_PWM3_ROOT_SRC] = imx_clk_hw_mux2_flags("pwm3_src", base + 0xb600, 24, 3, pwm3_sel, ARRAY_SIZE(pwm3_sel), CLK_SET_PARENT_GATE);
hws[IMX7D_PWM4_ROOT_SRC] = imx_clk_hw_mux2_flags("pwm4_src", base + 0xb680, 24, 3, pwm4_sel, ARRAY_SIZE(pwm4_sel), CLK_SET_PARENT_GATE);
hws[IMX7D_FLEXTIMER1_ROOT_SRC] = imx_clk_hw_mux2_flags("flextimer1_src", base + 0xb700, 24, 3, flextimer1_sel, ARRAY_SIZE(flextimer1_sel), CLK_SET_PARENT_GATE);
hws[IMX7D_FLEXTIMER2_ROOT_SRC] = imx_clk_hw_mux2_flags("flextimer2_src", base + 0xb780, 24, 3, flextimer2_sel, ARRAY_SIZE(flextimer2_sel), CLK_SET_PARENT_GATE);
hws[IMX7D_SIM1_ROOT_SRC] = imx_clk_hw_mux2_flags("sim1_src", base + 0xb800, 24, 3, sim1_sel, ARRAY_SIZE(sim1_sel), CLK_SET_PARENT_GATE);
hws[IMX7D_SIM2_ROOT_SRC] = imx_clk_hw_mux2_flags("sim2_src", base + 0xb880, 24, 3, sim2_sel, ARRAY_SIZE(sim2_sel), CLK_SET_PARENT_GATE);
hws[IMX7D_GPT1_ROOT_SRC] = imx_clk_hw_mux2_flags("gpt1_src", base + 0xb900, 24, 3, gpt1_sel, ARRAY_SIZE(gpt1_sel), CLK_SET_PARENT_GATE);
hws[IMX7D_GPT2_ROOT_SRC] = imx_clk_hw_mux2_flags("gpt2_src", base + 0xb980, 24, 3, gpt2_sel, ARRAY_SIZE(gpt2_sel), CLK_SET_PARENT_GATE);
hws[IMX7D_GPT3_ROOT_SRC] = imx_clk_hw_mux2_flags("gpt3_src", base + 0xba00, 24, 3, gpt3_sel, ARRAY_SIZE(gpt3_sel), CLK_SET_PARENT_GATE);
hws[IMX7D_GPT4_ROOT_SRC] = imx_clk_hw_mux2_flags("gpt4_src", base + 0xba80, 24, 3, gpt4_sel, ARRAY_SIZE(gpt4_sel), CLK_SET_PARENT_GATE);
hws[IMX7D_TRACE_ROOT_SRC] = imx_clk_hw_mux2_flags("trace_src", base + 0xbb00, 24, 3, trace_sel, ARRAY_SIZE(trace_sel), CLK_SET_PARENT_GATE);
hws[IMX7D_WDOG_ROOT_SRC] = imx_clk_hw_mux2_flags("wdog_src", base + 0xbb80, 24, 3, wdog_sel, ARRAY_SIZE(wdog_sel), CLK_SET_PARENT_GATE);
hws[IMX7D_CSI_MCLK_ROOT_SRC] = imx_clk_hw_mux2_flags("csi_mclk_src", base + 0xbc00, 24, 3, csi_mclk_sel, ARRAY_SIZE(csi_mclk_sel), CLK_SET_PARENT_GATE);
hws[IMX7D_AUDIO_MCLK_ROOT_SRC] = imx_clk_hw_mux2_flags("audio_mclk_src", base + 0xbc80, 24, 3, audio_mclk_sel, ARRAY_SIZE(audio_mclk_sel), CLK_SET_PARENT_GATE);
hws[IMX7D_WRCLK_ROOT_SRC] = imx_clk_hw_mux2_flags("wrclk_src", base + 0xbd00, 24, 3, wrclk_sel, ARRAY_SIZE(wrclk_sel), CLK_SET_PARENT_GATE);
hws[IMX7D_CLKO1_ROOT_SRC] = imx_clk_hw_mux2_flags("clko1_src", base + 0xbd80, 24, 3, clko1_sel, ARRAY_SIZE(clko1_sel), CLK_SET_PARENT_GATE);
hws[IMX7D_CLKO2_ROOT_SRC] = imx_clk_hw_mux2_flags("clko2_src", base + 0xbe00, 24, 3, clko2_sel, ARRAY_SIZE(clko2_sel), CLK_SET_PARENT_GATE);
hws[IMX7D_ARM_A7_ROOT_CG] = imx_clk_hw_gate3("arm_a7_cg", "arm_a7_src", base + 0x8000, 28);
hws[IMX7D_ARM_M4_ROOT_CG] = imx_clk_hw_gate3("arm_m4_cg", "arm_m4_src", base + 0x8080, 28);
hws[IMX7D_MAIN_AXI_ROOT_CG] = imx_clk_hw_gate3("axi_cg", "axi_src", base + 0x8800, 28);
hws[IMX7D_DISP_AXI_ROOT_CG] = imx_clk_hw_gate3("disp_axi_cg", "disp_axi_src", base + 0x8880, 28);
hws[IMX7D_ENET_AXI_ROOT_CG] = imx_clk_hw_gate3("enet_axi_cg", "enet_axi_src", base + 0x8900, 28);
hws[IMX7D_NAND_USDHC_BUS_ROOT_CG] = imx_clk_hw_gate3("nand_usdhc_cg", "nand_usdhc_src", base + 0x8980, 28);
hws[IMX7D_AHB_CHANNEL_ROOT_CG] = imx_clk_hw_gate3("ahb_cg", "ahb_src", base + 0x9000, 28);
hws[IMX7D_DRAM_PHYM_ROOT_CG] = imx_clk_hw_gate3("dram_phym_cg", "dram_phym_src", base + 0x9800, 28);
hws[IMX7D_DRAM_ROOT_CG] = imx_clk_hw_gate3("dram_cg", "dram_src", base + 0x9880, 28);
hws[IMX7D_DRAM_PHYM_ALT_ROOT_CG] = imx_clk_hw_gate3("dram_phym_alt_cg", "dram_phym_alt_src", base + 0xa000, 28);
hws[IMX7D_DRAM_ALT_ROOT_CG] = imx_clk_hw_gate3("dram_alt_cg", "dram_alt_src", base + 0xa080, 28);
hws[IMX7D_USB_HSIC_ROOT_CG] = imx_clk_hw_gate3("usb_hsic_cg", "usb_hsic_src", base + 0xa100, 28);
hws[IMX7D_PCIE_CTRL_ROOT_CG] = imx_clk_hw_gate3("pcie_ctrl_cg", "pcie_ctrl_src", base + 0xa180, 28);
hws[IMX7D_PCIE_PHY_ROOT_CG] = imx_clk_hw_gate3("pcie_phy_cg", "pcie_phy_src", base + 0xa200, 28);
hws[IMX7D_EPDC_PIXEL_ROOT_CG] = imx_clk_hw_gate3("epdc_pixel_cg", "epdc_pixel_src", base + 0xa280, 28);
hws[IMX7D_LCDIF_PIXEL_ROOT_CG] = imx_clk_hw_gate3("lcdif_pixel_cg", "lcdif_pixel_src", base + 0xa300, 28);
hws[IMX7D_MIPI_DSI_ROOT_CG] = imx_clk_hw_gate3("mipi_dsi_cg", "mipi_dsi_src", base + 0xa380, 28);
hws[IMX7D_MIPI_CSI_ROOT_CG] = imx_clk_hw_gate3("mipi_csi_cg", "mipi_csi_src", base + 0xa400, 28);
hws[IMX7D_MIPI_DPHY_ROOT_CG] = imx_clk_hw_gate3("mipi_dphy_cg", "mipi_dphy_src", base + 0xa480, 28);
hws[IMX7D_SAI1_ROOT_CG] = imx_clk_hw_gate3("sai1_cg", "sai1_src", base + 0xa500, 28);
hws[IMX7D_SAI2_ROOT_CG] = imx_clk_hw_gate3("sai2_cg", "sai2_src", base + 0xa580, 28);
hws[IMX7D_SAI3_ROOT_CG] = imx_clk_hw_gate3("sai3_cg", "sai3_src", base + 0xa600, 28);
hws[IMX7D_SPDIF_ROOT_CG] = imx_clk_hw_gate3("spdif_cg", "spdif_src", base + 0xa680, 28);
hws[IMX7D_ENET1_REF_ROOT_CG] = imx_clk_hw_gate3("enet1_ref_cg", "enet1_ref_src", base + 0xa700, 28);
hws[IMX7D_ENET1_TIME_ROOT_CG] = imx_clk_hw_gate3("enet1_time_cg", "enet1_time_src", base + 0xa780, 28);
hws[IMX7D_ENET2_REF_ROOT_CG] = imx_clk_hw_gate3("enet2_ref_cg", "enet2_ref_src", base + 0xa800, 28);
hws[IMX7D_ENET2_TIME_ROOT_CG] = imx_clk_hw_gate3("enet2_time_cg", "enet2_time_src", base + 0xa880, 28);
hws[IMX7D_ENET_PHY_REF_ROOT_CG] = imx_clk_hw_gate3("enet_phy_ref_cg", "enet_phy_ref_src", base + 0xa900, 28);
hws[IMX7D_EIM_ROOT_CG] = imx_clk_hw_gate3("eim_cg", "eim_src", base + 0xa980, 28);
hws[IMX7D_NAND_ROOT_CG] = imx_clk_hw_gate3("nand_cg", "nand_src", base + 0xaa00, 28);
hws[IMX7D_QSPI_ROOT_CG] = imx_clk_hw_gate3("qspi_cg", "qspi_src", base + 0xaa80, 28);
hws[IMX7D_USDHC1_ROOT_CG] = imx_clk_hw_gate3("usdhc1_cg", "usdhc1_src", base + 0xab00, 28);
hws[IMX7D_USDHC2_ROOT_CG] = imx_clk_hw_gate3("usdhc2_cg", "usdhc2_src", base + 0xab80, 28);
hws[IMX7D_USDHC3_ROOT_CG] = imx_clk_hw_gate3("usdhc3_cg", "usdhc3_src", base + 0xac00, 28);
hws[IMX7D_CAN1_ROOT_CG] = imx_clk_hw_gate3("can1_cg", "can1_src", base + 0xac80, 28);
hws[IMX7D_CAN2_ROOT_CG] = imx_clk_hw_gate3("can2_cg", "can2_src", base + 0xad00, 28);
hws[IMX7D_I2C1_ROOT_CG] = imx_clk_hw_gate3("i2c1_cg", "i2c1_src", base + 0xad80, 28);
hws[IMX7D_I2C2_ROOT_CG] = imx_clk_hw_gate3("i2c2_cg", "i2c2_src", base + 0xae00, 28);
hws[IMX7D_I2C3_ROOT_CG] = imx_clk_hw_gate3("i2c3_cg", "i2c3_src", base + 0xae80, 28);
hws[IMX7D_I2C4_ROOT_CG] = imx_clk_hw_gate3("i2c4_cg", "i2c4_src", base + 0xaf00, 28);
hws[IMX7D_UART1_ROOT_CG] = imx_clk_hw_gate3("uart1_cg", "uart1_src", base + 0xaf80, 28);
hws[IMX7D_UART2_ROOT_CG] = imx_clk_hw_gate3("uart2_cg", "uart2_src", base + 0xb000, 28);
hws[IMX7D_UART3_ROOT_CG] = imx_clk_hw_gate3("uart3_cg", "uart3_src", base + 0xb080, 28);
hws[IMX7D_UART4_ROOT_CG] = imx_clk_hw_gate3("uart4_cg", "uart4_src", base + 0xb100, 28);
hws[IMX7D_UART5_ROOT_CG] = imx_clk_hw_gate3("uart5_cg", "uart5_src", base + 0xb180, 28);
hws[IMX7D_UART6_ROOT_CG] = imx_clk_hw_gate3("uart6_cg", "uart6_src", base + 0xb200, 28);
hws[IMX7D_UART7_ROOT_CG] = imx_clk_hw_gate3("uart7_cg", "uart7_src", base + 0xb280, 28);
hws[IMX7D_ECSPI1_ROOT_CG] = imx_clk_hw_gate3("ecspi1_cg", "ecspi1_src", base + 0xb300, 28);
hws[IMX7D_ECSPI2_ROOT_CG] = imx_clk_hw_gate3("ecspi2_cg", "ecspi2_src", base + 0xb380, 28);
hws[IMX7D_ECSPI3_ROOT_CG] = imx_clk_hw_gate3("ecspi3_cg", "ecspi3_src", base + 0xb400, 28);
hws[IMX7D_ECSPI4_ROOT_CG] = imx_clk_hw_gate3("ecspi4_cg", "ecspi4_src", base + 0xb480, 28);
hws[IMX7D_PWM1_ROOT_CG] = imx_clk_hw_gate3("pwm1_cg", "pwm1_src", base + 0xb500, 28);
hws[IMX7D_PWM2_ROOT_CG] = imx_clk_hw_gate3("pwm2_cg", "pwm2_src", base + 0xb580, 28);
hws[IMX7D_PWM3_ROOT_CG] = imx_clk_hw_gate3("pwm3_cg", "pwm3_src", base + 0xb600, 28);
hws[IMX7D_PWM4_ROOT_CG] = imx_clk_hw_gate3("pwm4_cg", "pwm4_src", base + 0xb680, 28);
hws[IMX7D_FLEXTIMER1_ROOT_CG] = imx_clk_hw_gate3("flextimer1_cg", "flextimer1_src", base + 0xb700, 28);
hws[IMX7D_FLEXTIMER2_ROOT_CG] = imx_clk_hw_gate3("flextimer2_cg", "flextimer2_src", base + 0xb780, 28);
hws[IMX7D_SIM1_ROOT_CG] = imx_clk_hw_gate3("sim1_cg", "sim1_src", base + 0xb800, 28);
hws[IMX7D_SIM2_ROOT_CG] = imx_clk_hw_gate3("sim2_cg", "sim2_src", base + 0xb880, 28);
hws[IMX7D_GPT1_ROOT_CG] = imx_clk_hw_gate3("gpt1_cg", "gpt1_src", base + 0xb900, 28);
hws[IMX7D_GPT2_ROOT_CG] = imx_clk_hw_gate3("gpt2_cg", "gpt2_src", base + 0xb980, 28);
hws[IMX7D_GPT3_ROOT_CG] = imx_clk_hw_gate3("gpt3_cg", "gpt3_src", base + 0xbA00, 28);
hws[IMX7D_GPT4_ROOT_CG] = imx_clk_hw_gate3("gpt4_cg", "gpt4_src", base + 0xbA80, 28);
hws[IMX7D_TRACE_ROOT_CG] = imx_clk_hw_gate3("trace_cg", "trace_src", base + 0xbb00, 28);
hws[IMX7D_WDOG_ROOT_CG] = imx_clk_hw_gate3("wdog_cg", "wdog_src", base + 0xbb80, 28);
hws[IMX7D_CSI_MCLK_ROOT_CG] = imx_clk_hw_gate3("csi_mclk_cg", "csi_mclk_src", base + 0xbc00, 28);
hws[IMX7D_AUDIO_MCLK_ROOT_CG] = imx_clk_hw_gate3("audio_mclk_cg", "audio_mclk_src", base + 0xbc80, 28);
hws[IMX7D_WRCLK_ROOT_CG] = imx_clk_hw_gate3("wrclk_cg", "wrclk_src", base + 0xbd00, 28);
hws[IMX7D_CLKO1_ROOT_CG] = imx_clk_hw_gate3("clko1_cg", "clko1_src", base + 0xbd80, 28);
hws[IMX7D_CLKO2_ROOT_CG] = imx_clk_hw_gate3("clko2_cg", "clko2_src", base + 0xbe00, 28);
hws[IMX7D_MAIN_AXI_ROOT_PRE_DIV] = imx_clk_hw_divider2("axi_pre_div", "axi_cg", base + 0x8800, 16, 3);
hws[IMX7D_DISP_AXI_ROOT_PRE_DIV] = imx_clk_hw_divider2("disp_axi_pre_div", "disp_axi_cg", base + 0x8880, 16, 3);
hws[IMX7D_ENET_AXI_ROOT_PRE_DIV] = imx_clk_hw_divider2("enet_axi_pre_div", "enet_axi_cg", base + 0x8900, 16, 3);
hws[IMX7D_NAND_USDHC_BUS_ROOT_PRE_DIV] = imx_clk_hw_divider2("nand_usdhc_pre_div", "nand_usdhc_cg", base + 0x8980, 16, 3);
hws[IMX7D_AHB_CHANNEL_ROOT_PRE_DIV] = imx_clk_hw_divider2("ahb_pre_div", "ahb_cg", base + 0x9000, 16, 3);
hws[IMX7D_DRAM_PHYM_ALT_ROOT_PRE_DIV] = imx_clk_hw_divider2("dram_phym_alt_pre_div", "dram_phym_alt_cg", base + 0xa000, 16, 3);
hws[IMX7D_DRAM_ALT_ROOT_PRE_DIV] = imx_clk_hw_divider2("dram_alt_pre_div", "dram_alt_cg", base + 0xa080, 16, 3);
hws[IMX7D_USB_HSIC_ROOT_PRE_DIV] = imx_clk_hw_divider2("usb_hsic_pre_div", "usb_hsic_cg", base + 0xa100, 16, 3);
hws[IMX7D_PCIE_CTRL_ROOT_PRE_DIV] = imx_clk_hw_divider2("pcie_ctrl_pre_div", "pcie_ctrl_cg", base + 0xa180, 16, 3);
hws[IMX7D_PCIE_PHY_ROOT_PRE_DIV] = imx_clk_hw_divider2("pcie_phy_pre_div", "pcie_phy_cg", base + 0xa200, 16, 3);
hws[IMX7D_EPDC_PIXEL_ROOT_PRE_DIV] = imx_clk_hw_divider2("epdc_pixel_pre_div", "epdc_pixel_cg", base + 0xa280, 16, 3);
hws[IMX7D_LCDIF_PIXEL_ROOT_PRE_DIV] = imx_clk_hw_divider2("lcdif_pixel_pre_div", "lcdif_pixel_cg", base + 0xa300, 16, 3);
hws[IMX7D_MIPI_DSI_ROOT_PRE_DIV] = imx_clk_hw_divider2("mipi_dsi_pre_div", "mipi_dsi_cg", base + 0xa380, 16, 3);
hws[IMX7D_MIPI_CSI_ROOT_PRE_DIV] = imx_clk_hw_divider2("mipi_csi_pre_div", "mipi_csi_cg", base + 0xa400, 16, 3);
hws[IMX7D_MIPI_DPHY_ROOT_PRE_DIV] = imx_clk_hw_divider2("mipi_dphy_pre_div", "mipi_dphy_cg", base + 0xa480, 16, 3);
hws[IMX7D_SAI1_ROOT_PRE_DIV] = imx_clk_hw_divider2("sai1_pre_div", "sai1_cg", base + 0xa500, 16, 3);
hws[IMX7D_SAI2_ROOT_PRE_DIV] = imx_clk_hw_divider2("sai2_pre_div", "sai2_cg", base + 0xa580, 16, 3);
hws[IMX7D_SAI3_ROOT_PRE_DIV] = imx_clk_hw_divider2("sai3_pre_div", "sai3_cg", base + 0xa600, 16, 3);
hws[IMX7D_SPDIF_ROOT_PRE_DIV] = imx_clk_hw_divider2("spdif_pre_div", "spdif_cg", base + 0xa680, 16, 3);
hws[IMX7D_ENET1_REF_ROOT_PRE_DIV] = imx_clk_hw_divider2("enet1_ref_pre_div", "enet1_ref_cg", base + 0xa700, 16, 3);
hws[IMX7D_ENET1_TIME_ROOT_PRE_DIV] = imx_clk_hw_divider2("enet1_time_pre_div", "enet1_time_cg", base + 0xa780, 16, 3);
hws[IMX7D_ENET2_REF_ROOT_PRE_DIV] = imx_clk_hw_divider2("enet2_ref_pre_div", "enet2_ref_cg", base + 0xa800, 16, 3);
hws[IMX7D_ENET2_TIME_ROOT_PRE_DIV] = imx_clk_hw_divider2("enet2_time_pre_div", "enet2_time_cg", base + 0xa880, 16, 3);
hws[IMX7D_ENET_PHY_REF_ROOT_PRE_DIV] = imx_clk_hw_divider2("enet_phy_ref_pre_div", "enet_phy_ref_cg", base + 0xa900, 16, 3);
hws[IMX7D_EIM_ROOT_PRE_DIV] = imx_clk_hw_divider2("eim_pre_div", "eim_cg", base + 0xa980, 16, 3);
hws[IMX7D_NAND_ROOT_PRE_DIV] = imx_clk_hw_divider2("nand_pre_div", "nand_cg", base + 0xaa00, 16, 3);
hws[IMX7D_QSPI_ROOT_PRE_DIV] = imx_clk_hw_divider2("qspi_pre_div", "qspi_cg", base + 0xaa80, 16, 3);
hws[IMX7D_USDHC1_ROOT_PRE_DIV] = imx_clk_hw_divider2("usdhc1_pre_div", "usdhc1_cg", base + 0xab00, 16, 3);
hws[IMX7D_USDHC2_ROOT_PRE_DIV] = imx_clk_hw_divider2("usdhc2_pre_div", "usdhc2_cg", base + 0xab80, 16, 3);
hws[IMX7D_USDHC3_ROOT_PRE_DIV] = imx_clk_hw_divider2("usdhc3_pre_div", "usdhc3_cg", base + 0xac00, 16, 3);
hws[IMX7D_CAN1_ROOT_PRE_DIV] = imx_clk_hw_divider2("can1_pre_div", "can1_cg", base + 0xac80, 16, 3);
hws[IMX7D_CAN2_ROOT_PRE_DIV] = imx_clk_hw_divider2("can2_pre_div", "can2_cg", base + 0xad00, 16, 3);
hws[IMX7D_I2C1_ROOT_PRE_DIV] = imx_clk_hw_divider2("i2c1_pre_div", "i2c1_cg", base + 0xad80, 16, 3);
hws[IMX7D_I2C2_ROOT_PRE_DIV] = imx_clk_hw_divider2("i2c2_pre_div", "i2c2_cg", base + 0xae00, 16, 3);
hws[IMX7D_I2C3_ROOT_PRE_DIV] = imx_clk_hw_divider2("i2c3_pre_div", "i2c3_cg", base + 0xae80, 16, 3);
hws[IMX7D_I2C4_ROOT_PRE_DIV] = imx_clk_hw_divider2("i2c4_pre_div", "i2c4_cg", base + 0xaf00, 16, 3);
hws[IMX7D_UART1_ROOT_PRE_DIV] = imx_clk_hw_divider2("uart1_pre_div", "uart1_cg", base + 0xaf80, 16, 3);
hws[IMX7D_UART2_ROOT_PRE_DIV] = imx_clk_hw_divider2("uart2_pre_div", "uart2_cg", base + 0xb000, 16, 3);
hws[IMX7D_UART3_ROOT_PRE_DIV] = imx_clk_hw_divider2("uart3_pre_div", "uart3_cg", base + 0xb080, 16, 3);
hws[IMX7D_UART4_ROOT_PRE_DIV] = imx_clk_hw_divider2("uart4_pre_div", "uart4_cg", base + 0xb100, 16, 3);
hws[IMX7D_UART5_ROOT_PRE_DIV] = imx_clk_hw_divider2("uart5_pre_div", "uart5_cg", base + 0xb180, 16, 3);
hws[IMX7D_UART6_ROOT_PRE_DIV] = imx_clk_hw_divider2("uart6_pre_div", "uart6_cg", base + 0xb200, 16, 3);
hws[IMX7D_UART7_ROOT_PRE_DIV] = imx_clk_hw_divider2("uart7_pre_div", "uart7_cg", base + 0xb280, 16, 3);
hws[IMX7D_ECSPI1_ROOT_PRE_DIV] = imx_clk_hw_divider2("ecspi1_pre_div", "ecspi1_cg", base + 0xb300, 16, 3);
hws[IMX7D_ECSPI2_ROOT_PRE_DIV] = imx_clk_hw_divider2("ecspi2_pre_div", "ecspi2_cg", base + 0xb380, 16, 3);
hws[IMX7D_ECSPI3_ROOT_PRE_DIV] = imx_clk_hw_divider2("ecspi3_pre_div", "ecspi3_cg", base + 0xb400, 16, 3);
hws[IMX7D_ECSPI4_ROOT_PRE_DIV] = imx_clk_hw_divider2("ecspi4_pre_div", "ecspi4_cg", base + 0xb480, 16, 3);
hws[IMX7D_PWM1_ROOT_PRE_DIV] = imx_clk_hw_divider2("pwm1_pre_div", "pwm1_cg", base + 0xb500, 16, 3);
hws[IMX7D_PWM2_ROOT_PRE_DIV] = imx_clk_hw_divider2("pwm2_pre_div", "pwm2_cg", base + 0xb580, 16, 3);
hws[IMX7D_PWM3_ROOT_PRE_DIV] = imx_clk_hw_divider2("pwm3_pre_div", "pwm3_cg", base + 0xb600, 16, 3);
hws[IMX7D_PWM4_ROOT_PRE_DIV] = imx_clk_hw_divider2("pwm4_pre_div", "pwm4_cg", base + 0xb680, 16, 3);
hws[IMX7D_FLEXTIMER1_ROOT_PRE_DIV] = imx_clk_hw_divider2("flextimer1_pre_div", "flextimer1_cg", base + 0xb700, 16, 3);
hws[IMX7D_FLEXTIMER2_ROOT_PRE_DIV] = imx_clk_hw_divider2("flextimer2_pre_div", "flextimer2_cg", base + 0xb780, 16, 3);
hws[IMX7D_SIM1_ROOT_PRE_DIV] = imx_clk_hw_divider2("sim1_pre_div", "sim1_cg", base + 0xb800, 16, 3);
hws[IMX7D_SIM2_ROOT_PRE_DIV] = imx_clk_hw_divider2("sim2_pre_div", "sim2_cg", base + 0xb880, 16, 3);
hws[IMX7D_GPT1_ROOT_PRE_DIV] = imx_clk_hw_divider2("gpt1_pre_div", "gpt1_cg", base + 0xb900, 16, 3);
hws[IMX7D_GPT2_ROOT_PRE_DIV] = imx_clk_hw_divider2("gpt2_pre_div", "gpt2_cg", base + 0xb980, 16, 3);
hws[IMX7D_GPT3_ROOT_PRE_DIV] = imx_clk_hw_divider2("gpt3_pre_div", "gpt3_cg", base + 0xba00, 16, 3);
hws[IMX7D_GPT4_ROOT_PRE_DIV] = imx_clk_hw_divider2("gpt4_pre_div", "gpt4_cg", base + 0xba80, 16, 3);
hws[IMX7D_TRACE_ROOT_PRE_DIV] = imx_clk_hw_divider2("trace_pre_div", "trace_cg", base + 0xbb00, 16, 3);
hws[IMX7D_WDOG_ROOT_PRE_DIV] = imx_clk_hw_divider2("wdog_pre_div", "wdog_cg", base + 0xbb80, 16, 3);
hws[IMX7D_CSI_MCLK_ROOT_PRE_DIV] = imx_clk_hw_divider2("csi_mclk_pre_div", "csi_mclk_cg", base + 0xbc00, 16, 3);
hws[IMX7D_AUDIO_MCLK_ROOT_PRE_DIV] = imx_clk_hw_divider2("audio_mclk_pre_div", "audio_mclk_cg", base + 0xbc80, 16, 3);
hws[IMX7D_WRCLK_ROOT_PRE_DIV] = imx_clk_hw_divider2("wrclk_pre_div", "wrclk_cg", base + 0xbd00, 16, 3);
hws[IMX7D_CLKO1_ROOT_PRE_DIV] = imx_clk_hw_divider2("clko1_pre_div", "clko1_cg", base + 0xbd80, 16, 3);
hws[IMX7D_CLKO2_ROOT_PRE_DIV] = imx_clk_hw_divider2("clko2_pre_div", "clko2_cg", base + 0xbe00, 16, 3);
hws[IMX7D_ARM_A7_ROOT_DIV] = imx_clk_hw_divider2("arm_a7_div", "arm_a7_cg", base + 0x8000, 0, 3);
hws[IMX7D_ARM_M4_ROOT_DIV] = imx_clk_hw_divider2("arm_m4_div", "arm_m4_cg", base + 0x8080, 0, 3);
hws[IMX7D_MAIN_AXI_ROOT_DIV] = imx_clk_hw_divider2("axi_post_div", "axi_pre_div", base + 0x8800, 0, 6);
hws[IMX7D_DISP_AXI_ROOT_DIV] = imx_clk_hw_divider2("disp_axi_post_div", "disp_axi_pre_div", base + 0x8880, 0, 6);
hws[IMX7D_ENET_AXI_ROOT_DIV] = imx_clk_hw_divider2("enet_axi_post_div", "enet_axi_pre_div", base + 0x8900, 0, 6);
hws[IMX7D_NAND_USDHC_BUS_ROOT_CLK] = imx_clk_hw_divider2("nand_usdhc_root_clk", "nand_usdhc_pre_div", base + 0x8980, 0, 6);
hws[IMX7D_AHB_CHANNEL_ROOT_DIV] = imx_clk_hw_divider2("ahb_root_clk", "ahb_pre_div", base + 0x9000, 0, 6);
hws[IMX7D_IPG_ROOT_CLK] = imx_clk_hw_divider_flags("ipg_root_clk", "ahb_root_clk", base + 0x9080, 0, 2, CLK_IS_CRITICAL | CLK_OPS_PARENT_ENABLE | CLK_SET_RATE_PARENT);
hws[IMX7D_DRAM_ROOT_DIV] = imx_clk_hw_divider2("dram_post_div", "dram_cg", base + 0x9880, 0, 3);
hws[IMX7D_DRAM_PHYM_ALT_ROOT_DIV] = imx_clk_hw_divider2("dram_phym_alt_post_div", "dram_phym_alt_pre_div", base + 0xa000, 0, 3);
hws[IMX7D_DRAM_ALT_ROOT_DIV] = imx_clk_hw_divider2("dram_alt_post_div", "dram_alt_pre_div", base + 0xa080, 0, 3);
hws[IMX7D_USB_HSIC_ROOT_DIV] = imx_clk_hw_divider2("usb_hsic_post_div", "usb_hsic_pre_div", base + 0xa100, 0, 6);
hws[IMX7D_PCIE_CTRL_ROOT_DIV] = imx_clk_hw_divider2("pcie_ctrl_post_div", "pcie_ctrl_pre_div", base + 0xa180, 0, 6);
hws[IMX7D_PCIE_PHY_ROOT_DIV] = imx_clk_hw_divider2("pcie_phy_post_div", "pcie_phy_pre_div", base + 0xa200, 0, 6);
hws[IMX7D_EPDC_PIXEL_ROOT_DIV] = imx_clk_hw_divider2("epdc_pixel_post_div", "epdc_pixel_pre_div", base + 0xa280, 0, 6);
hws[IMX7D_LCDIF_PIXEL_ROOT_DIV] = imx_clk_hw_divider2("lcdif_pixel_post_div", "lcdif_pixel_pre_div", base + 0xa300, 0, 6);
hws[IMX7D_MIPI_DSI_ROOT_DIV] = imx_clk_hw_divider2("mipi_dsi_post_div", "mipi_dsi_pre_div", base + 0xa380, 0, 6);
hws[IMX7D_MIPI_CSI_ROOT_DIV] = imx_clk_hw_divider2("mipi_csi_post_div", "mipi_csi_pre_div", base + 0xa400, 0, 6);
hws[IMX7D_MIPI_DPHY_ROOT_DIV] = imx_clk_hw_divider2("mipi_dphy_post_div", "mipi_dphy_pre_div", base + 0xa480, 0, 6);
hws[IMX7D_SAI1_ROOT_DIV] = imx_clk_hw_divider2("sai1_post_div", "sai1_pre_div", base + 0xa500, 0, 6);
hws[IMX7D_SAI2_ROOT_DIV] = imx_clk_hw_divider2("sai2_post_div", "sai2_pre_div", base + 0xa580, 0, 6);
hws[IMX7D_SAI3_ROOT_DIV] = imx_clk_hw_divider2("sai3_post_div", "sai3_pre_div", base + 0xa600, 0, 6);
hws[IMX7D_SPDIF_ROOT_DIV] = imx_clk_hw_divider2("spdif_post_div", "spdif_pre_div", base + 0xa680, 0, 6);
hws[IMX7D_ENET1_REF_ROOT_DIV] = imx_clk_hw_divider2("enet1_ref_post_div", "enet1_ref_pre_div", base + 0xa700, 0, 6);
hws[IMX7D_ENET1_TIME_ROOT_DIV] = imx_clk_hw_divider2("enet1_time_post_div", "enet1_time_pre_div", base + 0xa780, 0, 6);
hws[IMX7D_ENET2_REF_ROOT_DIV] = imx_clk_hw_divider2("enet2_ref_post_div", "enet2_ref_pre_div", base + 0xa800, 0, 6);
hws[IMX7D_ENET2_TIME_ROOT_DIV] = imx_clk_hw_divider2("enet2_time_post_div", "enet2_time_pre_div", base + 0xa880, 0, 6);
hws[IMX7D_ENET_PHY_REF_ROOT_CLK] = imx_clk_hw_divider2("enet_phy_ref_root_clk", "enet_phy_ref_pre_div", base + 0xa900, 0, 6);
hws[IMX7D_EIM_ROOT_DIV] = imx_clk_hw_divider2("eim_post_div", "eim_pre_div", base + 0xa980, 0, 6);
hws[IMX7D_NAND_ROOT_CLK] = imx_clk_hw_divider2("nand_root_clk", "nand_pre_div", base + 0xaa00, 0, 6);
hws[IMX7D_QSPI_ROOT_DIV] = imx_clk_hw_divider2("qspi_post_div", "qspi_pre_div", base + 0xaa80, 0, 6);
hws[IMX7D_USDHC1_ROOT_DIV] = imx_clk_hw_divider2("usdhc1_post_div", "usdhc1_pre_div", base + 0xab00, 0, 6);
hws[IMX7D_USDHC2_ROOT_DIV] = imx_clk_hw_divider2("usdhc2_post_div", "usdhc2_pre_div", base + 0xab80, 0, 6);
hws[IMX7D_USDHC3_ROOT_DIV] = imx_clk_hw_divider2("usdhc3_post_div", "usdhc3_pre_div", base + 0xac00, 0, 6);
hws[IMX7D_CAN1_ROOT_DIV] = imx_clk_hw_divider2("can1_post_div", "can1_pre_div", base + 0xac80, 0, 6);
hws[IMX7D_CAN2_ROOT_DIV] = imx_clk_hw_divider2("can2_post_div", "can2_pre_div", base + 0xad00, 0, 6);
hws[IMX7D_I2C1_ROOT_DIV] = imx_clk_hw_divider2("i2c1_post_div", "i2c1_pre_div", base + 0xad80, 0, 6);
hws[IMX7D_I2C2_ROOT_DIV] = imx_clk_hw_divider2("i2c2_post_div", "i2c2_pre_div", base + 0xae00, 0, 6);
hws[IMX7D_I2C3_ROOT_DIV] = imx_clk_hw_divider2("i2c3_post_div", "i2c3_pre_div", base + 0xae80, 0, 6);
hws[IMX7D_I2C4_ROOT_DIV] = imx_clk_hw_divider2("i2c4_post_div", "i2c4_pre_div", base + 0xaf00, 0, 6);
hws[IMX7D_UART1_ROOT_DIV] = imx_clk_hw_divider2("uart1_post_div", "uart1_pre_div", base + 0xaf80, 0, 6);
hws[IMX7D_UART2_ROOT_DIV] = imx_clk_hw_divider2("uart2_post_div", "uart2_pre_div", base + 0xb000, 0, 6);
hws[IMX7D_UART3_ROOT_DIV] = imx_clk_hw_divider2("uart3_post_div", "uart3_pre_div", base + 0xb080, 0, 6);
hws[IMX7D_UART4_ROOT_DIV] = imx_clk_hw_divider2("uart4_post_div", "uart4_pre_div", base + 0xb100, 0, 6);
hws[IMX7D_UART5_ROOT_DIV] = imx_clk_hw_divider2("uart5_post_div", "uart5_pre_div", base + 0xb180, 0, 6);
hws[IMX7D_UART6_ROOT_DIV] = imx_clk_hw_divider2("uart6_post_div", "uart6_pre_div", base + 0xb200, 0, 6);
hws[IMX7D_UART7_ROOT_DIV] = imx_clk_hw_divider2("uart7_post_div", "uart7_pre_div", base + 0xb280, 0, 6);
hws[IMX7D_ECSPI1_ROOT_DIV] = imx_clk_hw_divider2("ecspi1_post_div", "ecspi1_pre_div", base + 0xb300, 0, 6);
hws[IMX7D_ECSPI2_ROOT_DIV] = imx_clk_hw_divider2("ecspi2_post_div", "ecspi2_pre_div", base + 0xb380, 0, 6);
hws[IMX7D_ECSPI3_ROOT_DIV] = imx_clk_hw_divider2("ecspi3_post_div", "ecspi3_pre_div", base + 0xb400, 0, 6);
hws[IMX7D_ECSPI4_ROOT_DIV] = imx_clk_hw_divider2("ecspi4_post_div", "ecspi4_pre_div", base + 0xb480, 0, 6);
hws[IMX7D_PWM1_ROOT_DIV] = imx_clk_hw_divider2("pwm1_post_div", "pwm1_pre_div", base + 0xb500, 0, 6);
hws[IMX7D_PWM2_ROOT_DIV] = imx_clk_hw_divider2("pwm2_post_div", "pwm2_pre_div", base + 0xb580, 0, 6);
hws[IMX7D_PWM3_ROOT_DIV] = imx_clk_hw_divider2("pwm3_post_div", "pwm3_pre_div", base + 0xb600, 0, 6);
hws[IMX7D_PWM4_ROOT_DIV] = imx_clk_hw_divider2("pwm4_post_div", "pwm4_pre_div", base + 0xb680, 0, 6);
hws[IMX7D_FLEXTIMER1_ROOT_DIV] = imx_clk_hw_divider2("flextimer1_post_div", "flextimer1_pre_div", base + 0xb700, 0, 6);
hws[IMX7D_FLEXTIMER2_ROOT_DIV] = imx_clk_hw_divider2("flextimer2_post_div", "flextimer2_pre_div", base + 0xb780, 0, 6);
hws[IMX7D_SIM1_ROOT_DIV] = imx_clk_hw_divider2("sim1_post_div", "sim1_pre_div", base + 0xb800, 0, 6);
hws[IMX7D_SIM2_ROOT_DIV] = imx_clk_hw_divider2("sim2_post_div", "sim2_pre_div", base + 0xb880, 0, 6);
hws[IMX7D_GPT1_ROOT_DIV] = imx_clk_hw_divider2("gpt1_post_div", "gpt1_pre_div", base + 0xb900, 0, 6);
hws[IMX7D_GPT2_ROOT_DIV] = imx_clk_hw_divider2("gpt2_post_div", "gpt2_pre_div", base + 0xb980, 0, 6);
hws[IMX7D_GPT3_ROOT_DIV] = imx_clk_hw_divider2("gpt3_post_div", "gpt3_pre_div", base + 0xba00, 0, 6);
hws[IMX7D_GPT4_ROOT_DIV] = imx_clk_hw_divider2("gpt4_post_div", "gpt4_pre_div", base + 0xba80, 0, 6);
hws[IMX7D_TRACE_ROOT_DIV] = imx_clk_hw_divider2("trace_post_div", "trace_pre_div", base + 0xbb00, 0, 6);
hws[IMX7D_WDOG_ROOT_DIV] = imx_clk_hw_divider2("wdog_post_div", "wdog_pre_div", base + 0xbb80, 0, 6);
hws[IMX7D_CSI_MCLK_ROOT_DIV] = imx_clk_hw_divider2("csi_mclk_post_div", "csi_mclk_pre_div", base + 0xbc00, 0, 6);
hws[IMX7D_AUDIO_MCLK_ROOT_DIV] = imx_clk_hw_divider2("audio_mclk_post_div", "audio_mclk_pre_div", base + 0xbc80, 0, 6);
hws[IMX7D_WRCLK_ROOT_DIV] = imx_clk_hw_divider2("wrclk_post_div", "wrclk_pre_div", base + 0xbd00, 0, 6);
hws[IMX7D_CLKO1_ROOT_DIV] = imx_clk_hw_divider2("clko1_post_div", "clko1_pre_div", base + 0xbd80, 0, 6);
hws[IMX7D_CLKO2_ROOT_DIV] = imx_clk_hw_divider2("clko2_post_div", "clko2_pre_div", base + 0xbe00, 0, 6);
hws[IMX7D_ARM_A7_ROOT_CLK] = imx_clk_hw_gate2_flags("arm_a7_root_clk", "arm_a7_div", base + 0x4000, 0, CLK_OPS_PARENT_ENABLE);
hws[IMX7D_ARM_M4_ROOT_CLK] = imx_clk_hw_gate4("arm_m4_root_clk", "arm_m4_div", base + 0x4010, 0);
hws[IMX7D_MAIN_AXI_ROOT_CLK] = imx_clk_hw_gate2_flags("main_axi_root_clk", "axi_post_div", base + 0x4040, 0, CLK_IS_CRITICAL | CLK_OPS_PARENT_ENABLE);
hws[IMX7D_DISP_AXI_ROOT_CLK] = imx_clk_hw_gate4("disp_axi_root_clk", "disp_axi_post_div", base + 0x4050, 0);
hws[IMX7D_ENET_AXI_ROOT_CLK] = imx_clk_hw_gate4("enet_axi_root_clk", "enet_axi_post_div", base + 0x4060, 0);
hws[IMX7D_OCRAM_CLK] = imx_clk_hw_gate4("ocram_clk", "main_axi_root_clk", base + 0x4110, 0);
hws[IMX7D_OCRAM_S_CLK] = imx_clk_hw_gate4("ocram_s_clk", "ahb_root_clk", base + 0x4120, 0);
hws[IMX7D_DRAM_ROOT_CLK] = imx_clk_hw_gate2_flags("dram_root_clk", "dram_post_div", base + 0x4130, 0, CLK_IS_CRITICAL | CLK_OPS_PARENT_ENABLE);
hws[IMX7D_DRAM_PHYM_ROOT_CLK] = imx_clk_hw_gate2_flags("dram_phym_root_clk", "dram_phym_cg", base + 0x4130, 0, CLK_IS_CRITICAL | CLK_OPS_PARENT_ENABLE);
hws[IMX7D_DRAM_PHYM_ALT_ROOT_CLK] = imx_clk_hw_gate2_flags("dram_phym_alt_root_clk", "dram_phym_alt_post_div", base + 0x4130, 0, CLK_IS_CRITICAL | CLK_OPS_PARENT_ENABLE);
hws[IMX7D_DRAM_ALT_ROOT_CLK] = imx_clk_hw_gate2_flags("dram_alt_root_clk", "dram_alt_post_div", base + 0x4130, 0, CLK_IS_CRITICAL | CLK_OPS_PARENT_ENABLE);
hws[IMX7D_OCOTP_CLK] = imx_clk_hw_gate4("ocotp_clk", "ipg_root_clk", base + 0x4230, 0);
hws[IMX7D_MU_ROOT_CLK] = imx_clk_hw_gate4("mu_root_clk", "ipg_root_clk", base + 0x4270, 0);
hws[IMX7D_CAAM_CLK] = imx_clk_hw_gate4("caam_clk", "ipg_root_clk", base + 0x4240, 0);
hws[IMX7D_USB_HSIC_ROOT_CLK] = imx_clk_hw_gate4("usb_hsic_root_clk", "usb_hsic_post_div", base + 0x4690, 0);
hws[IMX7D_SDMA_CORE_CLK] = imx_clk_hw_gate4("sdma_root_clk", "ahb_root_clk", base + 0x4480, 0);
hws[IMX7D_PCIE_CTRL_ROOT_CLK] = imx_clk_hw_gate4("pcie_ctrl_root_clk", "pcie_ctrl_post_div", base + 0x4600, 0);
hws[IMX7D_PCIE_PHY_ROOT_CLK] = imx_clk_hw_gate4("pcie_phy_root_clk", "pcie_phy_post_div", base + 0x4600, 0);
hws[IMX7D_EPDC_PIXEL_ROOT_CLK] = imx_clk_hw_gate4("epdc_pixel_root_clk", "epdc_pixel_post_div", base + 0x44a0, 0);
hws[IMX7D_LCDIF_PIXEL_ROOT_CLK] = imx_clk_hw_gate4("lcdif_pixel_root_clk", "lcdif_pixel_post_div", base + 0x44b0, 0);
hws[IMX7D_PXP_CLK] = imx_clk_hw_gate4("pxp_clk", "main_axi_root_clk", base + 0x44c0, 0);
hws[IMX7D_MIPI_DSI_ROOT_CLK] = imx_clk_hw_gate4("mipi_dsi_root_clk", "mipi_dsi_post_div", base + 0x4650, 0);
hws[IMX7D_MIPI_CSI_ROOT_CLK] = imx_clk_hw_gate4("mipi_csi_root_clk", "mipi_csi_post_div", base + 0x4640, 0);
hws[IMX7D_MIPI_DPHY_ROOT_CLK] = imx_clk_hw_gate4("mipi_dphy_root_clk", "mipi_dphy_post_div", base + 0x4660, 0);
hws[IMX7D_ENET1_IPG_ROOT_CLK] = imx_clk_hw_gate2_shared2("enet1_ipg_root_clk", "enet_axi_post_div", base + 0x4700, 0, &share_count_enet1);
hws[IMX7D_ENET1_TIME_ROOT_CLK] = imx_clk_hw_gate2_shared2("enet1_time_root_clk", "enet1_time_post_div", base + 0x4700, 0, &share_count_enet1);
hws[IMX7D_ENET2_IPG_ROOT_CLK] = imx_clk_hw_gate2_shared2("enet2_ipg_root_clk", "enet_axi_post_div", base + 0x4710, 0, &share_count_enet2);
hws[IMX7D_ENET2_TIME_ROOT_CLK] = imx_clk_hw_gate2_shared2("enet2_time_root_clk", "enet2_time_post_div", base + 0x4710, 0, &share_count_enet2);
hws[IMX7D_SAI1_ROOT_CLK] = imx_clk_hw_gate2_shared2("sai1_root_clk", "sai1_post_div", base + 0x48c0, 0, &share_count_sai1);
hws[IMX7D_SAI1_IPG_CLK] = imx_clk_hw_gate2_shared2("sai1_ipg_clk", "ipg_root_clk", base + 0x48c0, 0, &share_count_sai1);
hws[IMX7D_SAI2_ROOT_CLK] = imx_clk_hw_gate2_shared2("sai2_root_clk", "sai2_post_div", base + 0x48d0, 0, &share_count_sai2);
hws[IMX7D_SAI2_IPG_CLK] = imx_clk_hw_gate2_shared2("sai2_ipg_clk", "ipg_root_clk", base + 0x48d0, 0, &share_count_sai2);
hws[IMX7D_SAI3_ROOT_CLK] = imx_clk_hw_gate2_shared2("sai3_root_clk", "sai3_post_div", base + 0x48e0, 0, &share_count_sai3);
hws[IMX7D_SAI3_IPG_CLK] = imx_clk_hw_gate2_shared2("sai3_ipg_clk", "ipg_root_clk", base + 0x48e0, 0, &share_count_sai3);
hws[IMX7D_SPDIF_ROOT_CLK] = imx_clk_hw_gate4("spdif_root_clk", "spdif_post_div", base + 0x44d0, 0);
hws[IMX7D_EIM_ROOT_CLK] = imx_clk_hw_gate4("eim_root_clk", "eim_post_div", base + 0x4160, 0);
hws[IMX7D_NAND_RAWNAND_CLK] = imx_clk_hw_gate2_shared2("nand_rawnand_clk", "nand_root_clk", base + 0x4140, 0, &share_count_nand);
hws[IMX7D_NAND_USDHC_BUS_RAWNAND_CLK] = imx_clk_hw_gate2_shared2("nand_usdhc_rawnand_clk", "nand_usdhc_root_clk", base + 0x4140, 0, &share_count_nand);
hws[IMX7D_QSPI_ROOT_CLK] = imx_clk_hw_gate4("qspi_root_clk", "qspi_post_div", base + 0x4150, 0);
hws[IMX7D_USDHC1_ROOT_CLK] = imx_clk_hw_gate4("usdhc1_root_clk", "usdhc1_post_div", base + 0x46c0, 0);
hws[IMX7D_USDHC2_ROOT_CLK] = imx_clk_hw_gate4("usdhc2_root_clk", "usdhc2_post_div", base + 0x46d0, 0);
hws[IMX7D_USDHC3_ROOT_CLK] = imx_clk_hw_gate4("usdhc3_root_clk", "usdhc3_post_div", base + 0x46e0, 0);
hws[IMX7D_CAN1_ROOT_CLK] = imx_clk_hw_gate4("can1_root_clk", "can1_post_div", base + 0x4740, 0);
hws[IMX7D_CAN2_ROOT_CLK] = imx_clk_hw_gate4("can2_root_clk", "can2_post_div", base + 0x4750, 0);
hws[IMX7D_I2C1_ROOT_CLK] = imx_clk_hw_gate4("i2c1_root_clk", "i2c1_post_div", base + 0x4880, 0);
hws[IMX7D_I2C2_ROOT_CLK] = imx_clk_hw_gate4("i2c2_root_clk", "i2c2_post_div", base + 0x4890, 0);
hws[IMX7D_I2C3_ROOT_CLK] = imx_clk_hw_gate4("i2c3_root_clk", "i2c3_post_div", base + 0x48a0, 0);
hws[IMX7D_I2C4_ROOT_CLK] = imx_clk_hw_gate4("i2c4_root_clk", "i2c4_post_div", base + 0x48b0, 0);
hws[IMX7D_UART1_ROOT_CLK] = imx_clk_hw_gate4("uart1_root_clk", "uart1_post_div", base + 0x4940, 0);
hws[IMX7D_UART2_ROOT_CLK] = imx_clk_hw_gate4("uart2_root_clk", "uart2_post_div", base + 0x4950, 0);
hws[IMX7D_UART3_ROOT_CLK] = imx_clk_hw_gate4("uart3_root_clk", "uart3_post_div", base + 0x4960, 0);
hws[IMX7D_UART4_ROOT_CLK] = imx_clk_hw_gate4("uart4_root_clk", "uart4_post_div", base + 0x4970, 0);
hws[IMX7D_UART5_ROOT_CLK] = imx_clk_hw_gate4("uart5_root_clk", "uart5_post_div", base + 0x4980, 0);
hws[IMX7D_UART6_ROOT_CLK] = imx_clk_hw_gate4("uart6_root_clk", "uart6_post_div", base + 0x4990, 0);
hws[IMX7D_UART7_ROOT_CLK] = imx_clk_hw_gate4("uart7_root_clk", "uart7_post_div", base + 0x49a0, 0);
hws[IMX7D_ECSPI1_ROOT_CLK] = imx_clk_hw_gate4("ecspi1_root_clk", "ecspi1_post_div", base + 0x4780, 0);
hws[IMX7D_ECSPI2_ROOT_CLK] = imx_clk_hw_gate4("ecspi2_root_clk", "ecspi2_post_div", base + 0x4790, 0);
hws[IMX7D_ECSPI3_ROOT_CLK] = imx_clk_hw_gate4("ecspi3_root_clk", "ecspi3_post_div", base + 0x47a0, 0);
hws[IMX7D_ECSPI4_ROOT_CLK] = imx_clk_hw_gate4("ecspi4_root_clk", "ecspi4_post_div", base + 0x47b0, 0);
hws[IMX7D_PWM1_ROOT_CLK] = imx_clk_hw_gate4("pwm1_root_clk", "pwm1_post_div", base + 0x4840, 0);
hws[IMX7D_PWM2_ROOT_CLK] = imx_clk_hw_gate4("pwm2_root_clk", "pwm2_post_div", base + 0x4850, 0);
hws[IMX7D_PWM3_ROOT_CLK] = imx_clk_hw_gate4("pwm3_root_clk", "pwm3_post_div", base + 0x4860, 0);
hws[IMX7D_PWM4_ROOT_CLK] = imx_clk_hw_gate4("pwm4_root_clk", "pwm4_post_div", base + 0x4870, 0);
hws[IMX7D_FLEXTIMER1_ROOT_CLK] = imx_clk_hw_gate4("flextimer1_root_clk", "flextimer1_post_div", base + 0x4800, 0);
hws[IMX7D_FLEXTIMER2_ROOT_CLK] = imx_clk_hw_gate4("flextimer2_root_clk", "flextimer2_post_div", base + 0x4810, 0);
hws[IMX7D_SIM1_ROOT_CLK] = imx_clk_hw_gate4("sim1_root_clk", "sim1_post_div", base + 0x4900, 0);
hws[IMX7D_SIM2_ROOT_CLK] = imx_clk_hw_gate4("sim2_root_clk", "sim2_post_div", base + 0x4910, 0);
hws[IMX7D_GPT1_ROOT_CLK] = imx_clk_hw_gate4("gpt1_root_clk", "gpt1_post_div", base + 0x47c0, 0);
hws[IMX7D_GPT2_ROOT_CLK] = imx_clk_hw_gate4("gpt2_root_clk", "gpt2_post_div", base + 0x47d0, 0);
hws[IMX7D_GPT3_ROOT_CLK] = imx_clk_hw_gate4("gpt3_root_clk", "gpt3_post_div", base + 0x47e0, 0);
hws[IMX7D_GPT4_ROOT_CLK] = imx_clk_hw_gate4("gpt4_root_clk", "gpt4_post_div", base + 0x47f0, 0);
hws[IMX7D_TRACE_ROOT_CLK] = imx_clk_hw_gate4("trace_root_clk", "trace_post_div", base + 0x4300, 0);
hws[IMX7D_WDOG1_ROOT_CLK] = imx_clk_hw_gate4("wdog1_root_clk", "wdog_post_div", base + 0x49c0, 0);
hws[IMX7D_WDOG2_ROOT_CLK] = imx_clk_hw_gate4("wdog2_root_clk", "wdog_post_div", base + 0x49d0, 0);
hws[IMX7D_WDOG3_ROOT_CLK] = imx_clk_hw_gate4("wdog3_root_clk", "wdog_post_div", base + 0x49e0, 0);
hws[IMX7D_WDOG4_ROOT_CLK] = imx_clk_hw_gate4("wdog4_root_clk", "wdog_post_div", base + 0x49f0, 0);
hws[IMX7D_KPP_ROOT_CLK] = imx_clk_hw_gate4("kpp_root_clk", "ipg_root_clk", base + 0x4aa0, 0);
hws[IMX7D_CSI_MCLK_ROOT_CLK] = imx_clk_hw_gate4("csi_mclk_root_clk", "csi_mclk_post_div", base + 0x4490, 0);
hws[IMX7D_WRCLK_ROOT_CLK] = imx_clk_hw_gate4("wrclk_root_clk", "wrclk_post_div", base + 0x47a0, 0);
hws[IMX7D_USB_CTRL_CLK] = imx_clk_hw_gate4("usb_ctrl_clk", "ahb_root_clk", base + 0x4680, 0);
hws[IMX7D_USB_PHY1_CLK] = imx_clk_hw_gate4("usb_phy1_clk", "pll_usb1_main_clk", base + 0x46a0, 0);
hws[IMX7D_USB_PHY2_CLK] = imx_clk_hw_gate4("usb_phy2_clk", "pll_usb_main_clk", base + 0x46b0, 0);
hws[IMX7D_ADC_ROOT_CLK] = imx_clk_hw_gate4("adc_root_clk", "ipg_root_clk", base + 0x4200, 0);
hws[IMX7D_GPT_3M_CLK] = imx_clk_hw_fixed_factor("gpt_3m", "osc", 1, 8);
hws[IMX7D_CLK_ARM] = imx_clk_hw_cpu("arm", "arm_a7_root_clk",
hws[IMX7D_ARM_A7_ROOT_CLK]->clk,
hws[IMX7D_ARM_A7_ROOT_SRC]->clk,
hws[IMX7D_PLL_ARM_MAIN_CLK]->clk,
hws[IMX7D_PLL_SYS_MAIN_CLK]->clk);
imx_check_clk_hws(hws, IMX7D_CLK_END);
of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_hw_data);
clk_set_parent(hws[IMX7D_PLL_ARM_MAIN_BYPASS]->clk, hws[IMX7D_PLL_ARM_MAIN]->clk);
clk_set_parent(hws[IMX7D_PLL_DRAM_MAIN_BYPASS]->clk, hws[IMX7D_PLL_DRAM_MAIN]->clk);
clk_set_parent(hws[IMX7D_PLL_SYS_MAIN_BYPASS]->clk, hws[IMX7D_PLL_SYS_MAIN]->clk);
clk_set_parent(hws[IMX7D_PLL_ENET_MAIN_BYPASS]->clk, hws[IMX7D_PLL_ENET_MAIN]->clk);
clk_set_parent(hws[IMX7D_PLL_AUDIO_MAIN_BYPASS]->clk, hws[IMX7D_PLL_AUDIO_MAIN]->clk);
clk_set_parent(hws[IMX7D_PLL_VIDEO_MAIN_BYPASS]->clk, hws[IMX7D_PLL_VIDEO_MAIN]->clk);
clk_set_parent(hws[IMX7D_MIPI_CSI_ROOT_SRC]->clk, hws[IMX7D_PLL_SYS_PFD3_CLK]->clk);
/* use old gpt clk setting, gpt1 root clk must be twice as gpt counter freq */
clk_set_parent(hws[IMX7D_GPT1_ROOT_SRC]->clk, hws[IMX7D_OSC_24M_CLK]->clk);
/* Set clock rate for USBPHY, the USB_PLL at CCM is from USBOTG2 */
hws[IMX7D_USB1_MAIN_480M_CLK] = imx_clk_hw_fixed_factor("pll_usb1_main_clk", "osc", 20, 1);
hws[IMX7D_USB_MAIN_480M_CLK] = imx_clk_hw_fixed_factor("pll_usb_main_clk", "osc", 20, 1);
imx_register_uart_clocks();
}
CLK_OF_DECLARE(imx7d, "fsl,imx7d-ccm", imx7d_clocks_init);
| linux-master | drivers/clk/imx/clk-imx7d.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2016 Freescale Semiconductor, Inc.
* Copyright 2017-2018 NXP.
*/
#include <dt-bindings/clock/imx6sll-clock.h>
#include <linux/clk.h>
#include <linux/clkdev.h>
#include <linux/clk-provider.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include "clk.h"
#define CCM_ANALOG_PLL_BYPASS (0x1 << 16)
#define xPLL_CLR(offset) (offset + 0x8)
static const char *pll_bypass_src_sels[] = { "osc", "dummy", };
static const char *pll1_bypass_sels[] = { "pll1", "pll1_bypass_src", };
static const char *pll2_bypass_sels[] = { "pll2", "pll2_bypass_src", };
static const char *pll3_bypass_sels[] = { "pll3", "pll3_bypass_src", };
static const char *pll4_bypass_sels[] = { "pll4", "pll4_bypass_src", };
static const char *pll5_bypass_sels[] = { "pll5", "pll5_bypass_src", };
static const char *pll6_bypass_sels[] = { "pll6", "pll6_bypass_src", };
static const char *pll7_bypass_sels[] = { "pll7", "pll7_bypass_src", };
static const char *step_sels[] = { "osc", "pll2_pfd2_396m", };
static const char *pll1_sw_sels[] = { "pll1_sys", "step", };
static const char *axi_alt_sels[] = { "pll2_pfd2_396m", "pll3_pfd1_540m", };
static const char *axi_sels[] = {"periph", "axi_alt_sel", };
static const char *periph_pre_sels[] = { "pll2_bus", "pll2_pfd2_396m", "pll2_pfd0_352m", "pll2_198m", };
static const char *periph2_pre_sels[] = { "pll2_bus", "pll2_pfd2_396m", "pll2_pfd0_352m", "pll4_audio_div", };
static const char *periph_clk2_sels[] = { "pll3_usb_otg", "osc", "osc", };
static const char *periph2_clk2_sels[] = { "pll3_usb_otg", "osc", };
static const char *periph_sels[] = { "periph_pre", "periph_clk2", };
static const char *periph2_sels[] = { "periph2_pre", "periph2_clk2", };
static const char *usdhc_sels[] = { "pll2_pfd2_396m", "pll2_pfd0_352m", };
static const char *ssi_sels[] = {"pll3_pfd2_508m", "pll3_pfd3_454m", "pll4_audio_div", "dummy",};
static const char *spdif_sels[] = { "pll4_audio_div", "pll3_pfd2_508m", "pll5_video_div", "pll3_usb_otg", };
static const char *ldb_di0_div_sels[] = { "ldb_di0_div_3_5", "ldb_di0_div_7", };
static const char *ldb_di1_div_sels[] = { "ldb_di1_div_3_5", "ldb_di1_div_7", };
static const char *ldb_di0_sels[] = { "pll5_video_div", "pll2_pfd0_352m", "pll2_pfd2_396m", "pll2_pfd3_594m", "pll2_pfd1_594m", "pll3_pfd3_454m", };
static const char *ldb_di1_sels[] = { "pll3_usb_otg", "pll2_pfd0_352m", "pll2_pfd2_396m", "pll2_bus", "pll3_pfd3_454m", "pll3_pfd2_508m", };
static const char *lcdif_pre_sels[] = { "pll2_bus", "pll3_pfd3_454m", "pll5_video_div", "pll2_pfd0_352m", "pll2_pfd1_594m", "pll3_pfd1_540m", };
static const char *ecspi_sels[] = { "pll3_60m", "osc", };
static const char *uart_sels[] = { "pll3_80m", "osc", };
static const char *perclk_sels[] = { "ipg", "osc", };
static const char *lcdif_sels[] = { "lcdif_podf", "ipp_di0", "ipp_di1", "ldb_di0", "ldb_di1", };
static const char *epdc_pre_sels[] = { "pll2_bus", "pll3_usb_otg", "pll5_video_div", "pll2_pfd0_352m", "pll2_pfd2_396m", "pll3_pfd2_508m", };
static const char *epdc_sels[] = { "epdc_podf", "ipp_di0", "ipp_di1", "ldb_di0", "ldb_di1", };
static struct clk_hw **hws;
static struct clk_hw_onecell_data *clk_hw_data;
static const struct clk_div_table post_div_table[] = {
{ .val = 2, .div = 1, },
{ .val = 1, .div = 2, },
{ .val = 0, .div = 4, },
{ }
};
static const struct clk_div_table video_div_table[] = {
{ .val = 0, .div = 1, },
{ .val = 1, .div = 2, },
{ .val = 2, .div = 1, },
{ .val = 3, .div = 4, },
{ }
};
static u32 share_count_audio;
static u32 share_count_ssi1;
static u32 share_count_ssi2;
static u32 share_count_ssi3;
static void __init imx6sll_clocks_init(struct device_node *ccm_node)
{
struct device_node *np;
void __iomem *base;
clk_hw_data = kzalloc(struct_size(clk_hw_data, hws,
IMX6SLL_CLK_END), GFP_KERNEL);
if (WARN_ON(!clk_hw_data))
return;
clk_hw_data->num = IMX6SLL_CLK_END;
hws = clk_hw_data->hws;
hws[IMX6SLL_CLK_DUMMY] = imx_clk_hw_fixed("dummy", 0);
hws[IMX6SLL_CLK_CKIL] = imx_get_clk_hw_by_name(ccm_node, "ckil");
hws[IMX6SLL_CLK_OSC] = imx_get_clk_hw_by_name(ccm_node, "osc");
/* ipp_di clock is external input */
hws[IMX6SLL_CLK_IPP_DI0] = imx_get_clk_hw_by_name(ccm_node, "ipp_di0");
hws[IMX6SLL_CLK_IPP_DI1] = imx_get_clk_hw_by_name(ccm_node, "ipp_di1");
np = of_find_compatible_node(NULL, NULL, "fsl,imx6sll-anatop");
base = of_iomap(np, 0);
of_node_put(np);
WARN_ON(!base);
/* Do not bypass PLLs initially */
writel_relaxed(CCM_ANALOG_PLL_BYPASS, base + xPLL_CLR(0x0));
writel_relaxed(CCM_ANALOG_PLL_BYPASS, base + xPLL_CLR(0x10));
writel_relaxed(CCM_ANALOG_PLL_BYPASS, base + xPLL_CLR(0x20));
writel_relaxed(CCM_ANALOG_PLL_BYPASS, base + xPLL_CLR(0x30));
writel_relaxed(CCM_ANALOG_PLL_BYPASS, base + xPLL_CLR(0x70));
writel_relaxed(CCM_ANALOG_PLL_BYPASS, base + xPLL_CLR(0xa0));
writel_relaxed(CCM_ANALOG_PLL_BYPASS, base + xPLL_CLR(0xe0));
hws[IMX6SLL_PLL1_BYPASS_SRC] = imx_clk_hw_mux("pll1_bypass_src", base + 0x00, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels));
hws[IMX6SLL_PLL2_BYPASS_SRC] = imx_clk_hw_mux("pll2_bypass_src", base + 0x30, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels));
hws[IMX6SLL_PLL3_BYPASS_SRC] = imx_clk_hw_mux("pll3_bypass_src", base + 0x10, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels));
hws[IMX6SLL_PLL4_BYPASS_SRC] = imx_clk_hw_mux("pll4_bypass_src", base + 0x70, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels));
hws[IMX6SLL_PLL5_BYPASS_SRC] = imx_clk_hw_mux("pll5_bypass_src", base + 0xa0, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels));
hws[IMX6SLL_PLL6_BYPASS_SRC] = imx_clk_hw_mux("pll6_bypass_src", base + 0xe0, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels));
hws[IMX6SLL_PLL7_BYPASS_SRC] = imx_clk_hw_mux("pll7_bypass_src", base + 0x20, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels));
hws[IMX6SLL_CLK_PLL1] = imx_clk_hw_pllv3(IMX_PLLV3_SYS, "pll1", "pll1_bypass_src", base + 0x00, 0x7f);
hws[IMX6SLL_CLK_PLL2] = imx_clk_hw_pllv3(IMX_PLLV3_GENERIC, "pll2", "pll2_bypass_src", base + 0x30, 0x1);
hws[IMX6SLL_CLK_PLL3] = imx_clk_hw_pllv3(IMX_PLLV3_USB, "pll3", "pll3_bypass_src", base + 0x10, 0x3);
hws[IMX6SLL_CLK_PLL4] = imx_clk_hw_pllv3(IMX_PLLV3_AV, "pll4", "pll4_bypass_src", base + 0x70, 0x7f);
hws[IMX6SLL_CLK_PLL5] = imx_clk_hw_pllv3(IMX_PLLV3_AV, "pll5", "pll5_bypass_src", base + 0xa0, 0x7f);
hws[IMX6SLL_CLK_PLL6] = imx_clk_hw_pllv3(IMX_PLLV3_ENET, "pll6", "pll6_bypass_src", base + 0xe0, 0x3);
hws[IMX6SLL_CLK_PLL7] = imx_clk_hw_pllv3(IMX_PLLV3_USB, "pll7", "pll7_bypass_src", base + 0x20, 0x3);
hws[IMX6SLL_PLL1_BYPASS] = imx_clk_hw_mux_flags("pll1_bypass", base + 0x00, 16, 1, pll1_bypass_sels, ARRAY_SIZE(pll1_bypass_sels), CLK_SET_RATE_PARENT);
hws[IMX6SLL_PLL2_BYPASS] = imx_clk_hw_mux_flags("pll2_bypass", base + 0x30, 16, 1, pll2_bypass_sels, ARRAY_SIZE(pll2_bypass_sels), CLK_SET_RATE_PARENT);
hws[IMX6SLL_PLL3_BYPASS] = imx_clk_hw_mux_flags("pll3_bypass", base + 0x10, 16, 1, pll3_bypass_sels, ARRAY_SIZE(pll3_bypass_sels), CLK_SET_RATE_PARENT);
hws[IMX6SLL_PLL4_BYPASS] = imx_clk_hw_mux_flags("pll4_bypass", base + 0x70, 16, 1, pll4_bypass_sels, ARRAY_SIZE(pll4_bypass_sels), CLK_SET_RATE_PARENT);
hws[IMX6SLL_PLL5_BYPASS] = imx_clk_hw_mux_flags("pll5_bypass", base + 0xa0, 16, 1, pll5_bypass_sels, ARRAY_SIZE(pll5_bypass_sels), CLK_SET_RATE_PARENT);
hws[IMX6SLL_PLL6_BYPASS] = imx_clk_hw_mux_flags("pll6_bypass", base + 0xe0, 16, 1, pll6_bypass_sels, ARRAY_SIZE(pll6_bypass_sels), CLK_SET_RATE_PARENT);
hws[IMX6SLL_PLL7_BYPASS] = imx_clk_hw_mux_flags("pll7_bypass", base + 0x20, 16, 1, pll7_bypass_sels, ARRAY_SIZE(pll7_bypass_sels), CLK_SET_RATE_PARENT);
hws[IMX6SLL_CLK_PLL1_SYS] = imx_clk_hw_fixed_factor("pll1_sys", "pll1_bypass", 1, 1);
hws[IMX6SLL_CLK_PLL2_BUS] = imx_clk_hw_gate("pll2_bus", "pll2_bypass", base + 0x30, 13);
hws[IMX6SLL_CLK_PLL3_USB_OTG] = imx_clk_hw_gate("pll3_usb_otg", "pll3_bypass", base + 0x10, 13);
hws[IMX6SLL_CLK_PLL4_AUDIO] = imx_clk_hw_gate("pll4_audio", "pll4_bypass", base + 0x70, 13);
hws[IMX6SLL_CLK_PLL5_VIDEO] = imx_clk_hw_gate("pll5_video", "pll5_bypass", base + 0xa0, 13);
hws[IMX6SLL_CLK_PLL6_ENET] = imx_clk_hw_gate("pll6_enet", "pll6_bypass", base + 0xe0, 13);
hws[IMX6SLL_CLK_PLL7_USB_HOST] = imx_clk_hw_gate("pll7_usb_host", "pll7_bypass", base + 0x20, 13);
/*
* Bit 20 is the reserved and read-only bit, we do this only for:
* - Do nothing for usbphy clk_enable/disable
* - Keep refcount when do usbphy clk_enable/disable, in that case,
* the clk framework many need to enable/disable usbphy's parent
*/
hws[IMX6SLL_CLK_USBPHY1] = imx_clk_hw_gate("usbphy1", "pll3_usb_otg", base + 0x10, 20);
hws[IMX6SLL_CLK_USBPHY2] = imx_clk_hw_gate("usbphy2", "pll7_usb_host", base + 0x20, 20);
/*
* usbphy*_gate needs to be on after system boots up, and software
* never needs to control it anymore.
*/
if (IS_ENABLED(CONFIG_USB_MXS_PHY)) {
hws[IMX6SLL_CLK_USBPHY1_GATE] = imx_clk_hw_gate_flags("usbphy1_gate", "dummy", base + 0x10, 6, CLK_IS_CRITICAL);
hws[IMX6SLL_CLK_USBPHY2_GATE] = imx_clk_hw_gate_flags("usbphy2_gate", "dummy", base + 0x20, 6, CLK_IS_CRITICAL);
}
/* name parent_name reg idx */
hws[IMX6SLL_CLK_PLL2_PFD0] = imx_clk_hw_pfd("pll2_pfd0_352m", "pll2_bus", base + 0x100, 0);
hws[IMX6SLL_CLK_PLL2_PFD1] = imx_clk_hw_pfd("pll2_pfd1_594m", "pll2_bus", base + 0x100, 1);
hws[IMX6SLL_CLK_PLL2_PFD2] = imx_clk_hw_pfd("pll2_pfd2_396m", "pll2_bus", base + 0x100, 2);
hws[IMX6SLL_CLK_PLL2_PFD3] = imx_clk_hw_pfd("pll2_pfd3_594m", "pll2_bus", base + 0x100, 3);
hws[IMX6SLL_CLK_PLL3_PFD0] = imx_clk_hw_pfd("pll3_pfd0_720m", "pll3_usb_otg", base + 0xf0, 0);
hws[IMX6SLL_CLK_PLL3_PFD1] = imx_clk_hw_pfd("pll3_pfd1_540m", "pll3_usb_otg", base + 0xf0, 1);
hws[IMX6SLL_CLK_PLL3_PFD2] = imx_clk_hw_pfd("pll3_pfd2_508m", "pll3_usb_otg", base + 0xf0, 2);
hws[IMX6SLL_CLK_PLL3_PFD3] = imx_clk_hw_pfd("pll3_pfd3_454m", "pll3_usb_otg", base + 0xf0, 3);
hws[IMX6SLL_CLK_PLL4_POST_DIV] = clk_hw_register_divider_table(NULL, "pll4_post_div", "pll4_audio",
CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE, base + 0x70, 19, 2, 0, post_div_table, &imx_ccm_lock);
hws[IMX6SLL_CLK_PLL4_AUDIO_DIV] = clk_hw_register_divider(NULL, "pll4_audio_div", "pll4_post_div",
CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE, base + 0x170, 15, 1, 0, &imx_ccm_lock);
hws[IMX6SLL_CLK_PLL5_POST_DIV] = clk_hw_register_divider_table(NULL, "pll5_post_div", "pll5_video",
CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE, base + 0xa0, 19, 2, 0, post_div_table, &imx_ccm_lock);
hws[IMX6SLL_CLK_PLL5_VIDEO_DIV] = clk_hw_register_divider_table(NULL, "pll5_video_div", "pll5_post_div",
CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE, base + 0x170, 30, 2, 0, video_div_table, &imx_ccm_lock);
/* name parent_name mult div */
hws[IMX6SLL_CLK_PLL2_198M] = imx_clk_hw_fixed_factor("pll2_198m", "pll2_pfd2_396m", 1, 2);
hws[IMX6SLL_CLK_PLL3_120M] = imx_clk_hw_fixed_factor("pll3_120m", "pll3_usb_otg", 1, 4);
hws[IMX6SLL_CLK_PLL3_80M] = imx_clk_hw_fixed_factor("pll3_80m", "pll3_usb_otg", 1, 6);
hws[IMX6SLL_CLK_PLL3_60M] = imx_clk_hw_fixed_factor("pll3_60m", "pll3_usb_otg", 1, 8);
np = ccm_node;
base = of_iomap(np, 0);
WARN_ON(!base);
hws[IMX6SLL_CLK_STEP] = imx_clk_hw_mux("step", base + 0x0c, 8, 1, step_sels, ARRAY_SIZE(step_sels));
hws[IMX6SLL_CLK_PLL1_SW] = imx_clk_hw_mux_flags("pll1_sw", base + 0x0c, 2, 1, pll1_sw_sels, ARRAY_SIZE(pll1_sw_sels), 0);
hws[IMX6SLL_CLK_AXI_ALT_SEL] = imx_clk_hw_mux("axi_alt_sel", base + 0x14, 7, 1, axi_alt_sels, ARRAY_SIZE(axi_alt_sels));
hws[IMX6SLL_CLK_AXI_SEL] = imx_clk_hw_mux_flags("axi_sel", base + 0x14, 6, 1, axi_sels, ARRAY_SIZE(axi_sels), 0);
hws[IMX6SLL_CLK_PERIPH_PRE] = imx_clk_hw_mux("periph_pre", base + 0x18, 18, 2, periph_pre_sels, ARRAY_SIZE(periph_pre_sels));
hws[IMX6SLL_CLK_PERIPH2_PRE] = imx_clk_hw_mux("periph2_pre", base + 0x18, 21, 2, periph2_pre_sels, ARRAY_SIZE(periph2_pre_sels));
hws[IMX6SLL_CLK_PERIPH_CLK2_SEL] = imx_clk_hw_mux("periph_clk2_sel", base + 0x18, 12, 2, periph_clk2_sels, ARRAY_SIZE(periph_clk2_sels));
hws[IMX6SLL_CLK_PERIPH2_CLK2_SEL] = imx_clk_hw_mux("periph2_clk2_sel", base + 0x18, 20, 1, periph2_clk2_sels, ARRAY_SIZE(periph2_clk2_sels));
hws[IMX6SLL_CLK_USDHC1_SEL] = imx_clk_hw_mux("usdhc1_sel", base + 0x1c, 16, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels));
hws[IMX6SLL_CLK_USDHC2_SEL] = imx_clk_hw_mux("usdhc2_sel", base + 0x1c, 17, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels));
hws[IMX6SLL_CLK_USDHC3_SEL] = imx_clk_hw_mux("usdhc3_sel", base + 0x1c, 18, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels));
hws[IMX6SLL_CLK_SSI1_SEL] = imx_clk_hw_mux("ssi1_sel", base + 0x1c, 10, 2, ssi_sels, ARRAY_SIZE(ssi_sels));
hws[IMX6SLL_CLK_SSI2_SEL] = imx_clk_hw_mux("ssi2_sel", base + 0x1c, 12, 2, ssi_sels, ARRAY_SIZE(ssi_sels));
hws[IMX6SLL_CLK_SSI3_SEL] = imx_clk_hw_mux("ssi3_sel", base + 0x1c, 14, 2, ssi_sels, ARRAY_SIZE(ssi_sels));
hws[IMX6SLL_CLK_PERCLK_SEL] = imx_clk_hw_mux("perclk_sel", base + 0x1c, 6, 1, perclk_sels, ARRAY_SIZE(perclk_sels));
hws[IMX6SLL_CLK_UART_SEL] = imx_clk_hw_mux("uart_sel", base + 0x24, 6, 1, uart_sels, ARRAY_SIZE(uart_sels));
hws[IMX6SLL_CLK_SPDIF_SEL] = imx_clk_hw_mux("spdif_sel", base + 0x30, 20, 2, spdif_sels, ARRAY_SIZE(spdif_sels));
hws[IMX6SLL_CLK_EXTERN_AUDIO_SEL] = imx_clk_hw_mux("extern_audio_sel", base + 0x30, 7, 2, spdif_sels, ARRAY_SIZE(spdif_sels));
hws[IMX6SLL_CLK_EPDC_PRE_SEL] = imx_clk_hw_mux("epdc_pre_sel", base + 0x34, 15, 3, epdc_pre_sels, ARRAY_SIZE(epdc_pre_sels));
hws[IMX6SLL_CLK_EPDC_SEL] = imx_clk_hw_mux("epdc_sel", base + 0x34, 9, 3, epdc_sels, ARRAY_SIZE(epdc_sels));
hws[IMX6SLL_CLK_ECSPI_SEL] = imx_clk_hw_mux("ecspi_sel", base + 0x38, 18, 1, ecspi_sels, ARRAY_SIZE(ecspi_sels));
hws[IMX6SLL_CLK_LCDIF_PRE_SEL] = imx_clk_hw_mux("lcdif_pre_sel", base + 0x38, 15, 3, lcdif_pre_sels, ARRAY_SIZE(lcdif_pre_sels));
hws[IMX6SLL_CLK_LCDIF_SEL] = imx_clk_hw_mux("lcdif_sel", base + 0x38, 9, 3, lcdif_sels, ARRAY_SIZE(lcdif_sels));
hws[IMX6SLL_CLK_PERIPH] = imx_clk_hw_busy_mux("periph", base + 0x14, 25, 1, base + 0x48, 5, periph_sels, ARRAY_SIZE(periph_sels));
hws[IMX6SLL_CLK_PERIPH2] = imx_clk_hw_busy_mux("periph2", base + 0x14, 26, 1, base + 0x48, 3, periph2_sels, ARRAY_SIZE(periph2_sels));
hws[IMX6SLL_CLK_PERIPH_CLK2] = imx_clk_hw_divider("periph_clk2", "periph_clk2_sel", base + 0x14, 27, 3);
hws[IMX6SLL_CLK_PERIPH2_CLK2] = imx_clk_hw_divider("periph2_clk2", "periph2_clk2_sel", base + 0x14, 0, 3);
hws[IMX6SLL_CLK_IPG] = imx_clk_hw_divider("ipg", "ahb", base + 0x14, 8, 2);
hws[IMX6SLL_CLK_LCDIF_PODF] = imx_clk_hw_divider("lcdif_podf", "lcdif_pred", base + 0x18, 23, 3);
hws[IMX6SLL_CLK_PERCLK] = imx_clk_hw_divider("perclk", "perclk_sel", base + 0x1c, 0, 6);
hws[IMX6SLL_CLK_USDHC3_PODF] = imx_clk_hw_divider("usdhc3_podf", "usdhc3_sel", base + 0x24, 19, 3);
hws[IMX6SLL_CLK_USDHC2_PODF] = imx_clk_hw_divider("usdhc2_podf", "usdhc2_sel", base + 0x24, 16, 3);
hws[IMX6SLL_CLK_USDHC1_PODF] = imx_clk_hw_divider("usdhc1_podf", "usdhc1_sel", base + 0x24, 11, 3);
hws[IMX6SLL_CLK_UART_PODF] = imx_clk_hw_divider("uart_podf", "uart_sel", base + 0x24, 0, 6);
hws[IMX6SLL_CLK_SSI3_PRED] = imx_clk_hw_divider("ssi3_pred", "ssi3_sel", base + 0x28, 22, 3);
hws[IMX6SLL_CLK_SSI3_PODF] = imx_clk_hw_divider("ssi3_podf", "ssi3_pred", base + 0x28, 16, 6);
hws[IMX6SLL_CLK_SSI1_PRED] = imx_clk_hw_divider("ssi1_pred", "ssi1_sel", base + 0x28, 6, 3);
hws[IMX6SLL_CLK_SSI1_PODF] = imx_clk_hw_divider("ssi1_podf", "ssi1_pred", base + 0x28, 0, 6);
hws[IMX6SLL_CLK_SSI2_PRED] = imx_clk_hw_divider("ssi2_pred", "ssi2_sel", base + 0x2c, 6, 3);
hws[IMX6SLL_CLK_SSI2_PODF] = imx_clk_hw_divider("ssi2_podf", "ssi2_pred", base + 0x2c, 0, 6);
hws[IMX6SLL_CLK_SPDIF_PRED] = imx_clk_hw_divider("spdif_pred", "spdif_sel", base + 0x30, 25, 3);
hws[IMX6SLL_CLK_SPDIF_PODF] = imx_clk_hw_divider("spdif_podf", "spdif_pred", base + 0x30, 22, 3);
hws[IMX6SLL_CLK_EXTERN_AUDIO_PRED] = imx_clk_hw_divider("extern_audio_pred", "extern_audio_sel", base + 0x30, 12, 3);
hws[IMX6SLL_CLK_EXTERN_AUDIO_PODF] = imx_clk_hw_divider("extern_audio_podf", "extern_audio_pred", base + 0x30, 9, 3);
hws[IMX6SLL_CLK_EPDC_PODF] = imx_clk_hw_divider("epdc_podf", "epdc_pre_sel", base + 0x34, 12, 3);
hws[IMX6SLL_CLK_ECSPI_PODF] = imx_clk_hw_divider("ecspi_podf", "ecspi_sel", base + 0x38, 19, 6);
hws[IMX6SLL_CLK_LCDIF_PRED] = imx_clk_hw_divider("lcdif_pred", "lcdif_pre_sel", base + 0x38, 12, 3);
hws[IMX6SLL_CLK_ARM] = imx_clk_hw_busy_divider("arm", "pll1_sw", base + 0x10, 0, 3, base + 0x48, 16);
hws[IMX6SLL_CLK_MMDC_PODF] = imx_clk_hw_busy_divider("mmdc_podf", "periph2", base + 0x14, 3, 3, base + 0x48, 2);
hws[IMX6SLL_CLK_AXI_PODF] = imx_clk_hw_busy_divider("axi", "axi_sel", base + 0x14, 16, 3, base + 0x48, 0);
hws[IMX6SLL_CLK_AHB] = imx_clk_hw_busy_divider("ahb", "periph", base + 0x14, 10, 3, base + 0x48, 1);
hws[IMX6SLL_CLK_LDB_DI0_DIV_3_5] = imx_clk_hw_fixed_factor("ldb_di0_div_3_5", "ldb_di0_sel", 2, 7);
hws[IMX6SLL_CLK_LDB_DI0_DIV_7] = imx_clk_hw_fixed_factor("ldb_di0_div_7", "ldb_di0_sel", 1, 7);
hws[IMX6SLL_CLK_LDB_DI1_DIV_3_5] = imx_clk_hw_fixed_factor("ldb_di1_div_3_5", "ldb_di1_sel", 2, 7);
hws[IMX6SLL_CLK_LDB_DI1_DIV_7] = imx_clk_hw_fixed_factor("ldb_di1_div_7", "ldb_di1_sel", 1, 7);
hws[IMX6SLL_CLK_LDB_DI0_SEL] = imx_clk_hw_mux("ldb_di0_sel", base + 0x2c, 9, 3, ldb_di0_sels, ARRAY_SIZE(ldb_di0_sels));
hws[IMX6SLL_CLK_LDB_DI1_SEL] = imx_clk_hw_mux("ldb_di1_sel", base + 0x1c, 7, 3, ldb_di1_sels, ARRAY_SIZE(ldb_di1_sels));
hws[IMX6SLL_CLK_LDB_DI0_DIV_SEL] = imx_clk_hw_mux("ldb_di0_div_sel", base + 0x20, 10, 1, ldb_di0_div_sels, ARRAY_SIZE(ldb_di0_div_sels));
hws[IMX6SLL_CLK_LDB_DI1_DIV_SEL] = imx_clk_hw_mux("ldb_di1_div_sel", base + 0x20, 10, 1, ldb_di1_div_sels, ARRAY_SIZE(ldb_di1_div_sels));
/* CCGR0 */
hws[IMX6SLL_CLK_AIPSTZ1] = imx_clk_hw_gate2_flags("aips_tz1", "ahb", base + 0x68, 0, CLK_IS_CRITICAL);
hws[IMX6SLL_CLK_AIPSTZ2] = imx_clk_hw_gate2_flags("aips_tz2", "ahb", base + 0x68, 2, CLK_IS_CRITICAL);
hws[IMX6SLL_CLK_DCP] = imx_clk_hw_gate2("dcp", "ahb", base + 0x68, 10);
hws[IMX6SLL_CLK_UART2_IPG] = imx_clk_hw_gate2("uart2_ipg", "ipg", base + 0x68, 28);
hws[IMX6SLL_CLK_UART2_SERIAL] = imx_clk_hw_gate2("uart2_serial", "uart_podf", base + 0x68, 28);
hws[IMX6SLL_CLK_GPIO2] = imx_clk_hw_gate2("gpio2", "ipg", base + 0x68, 30);
/* CCGR1 */
hws[IMX6SLL_CLK_ECSPI1] = imx_clk_hw_gate2("ecspi1", "ecspi_podf", base + 0x6c, 0);
hws[IMX6SLL_CLK_ECSPI2] = imx_clk_hw_gate2("ecspi2", "ecspi_podf", base + 0x6c, 2);
hws[IMX6SLL_CLK_ECSPI3] = imx_clk_hw_gate2("ecspi3", "ecspi_podf", base + 0x6c, 4);
hws[IMX6SLL_CLK_ECSPI4] = imx_clk_hw_gate2("ecspi4", "ecspi_podf", base + 0x6c, 6);
hws[IMX6SLL_CLK_UART3_IPG] = imx_clk_hw_gate2("uart3_ipg", "ipg", base + 0x6c, 10);
hws[IMX6SLL_CLK_UART3_SERIAL] = imx_clk_hw_gate2("uart3_serial", "uart_podf", base + 0x6c, 10);
hws[IMX6SLL_CLK_EPIT1] = imx_clk_hw_gate2("epit1", "perclk", base + 0x6c, 12);
hws[IMX6SLL_CLK_EPIT2] = imx_clk_hw_gate2("epit2", "perclk", base + 0x6c, 14);
hws[IMX6SLL_CLK_GPT_BUS] = imx_clk_hw_gate2("gpt1_bus", "perclk", base + 0x6c, 20);
hws[IMX6SLL_CLK_GPT_SERIAL] = imx_clk_hw_gate2("gpt1_serial", "perclk", base + 0x6c, 22);
hws[IMX6SLL_CLK_UART4_IPG] = imx_clk_hw_gate2("uart4_ipg", "ipg", base + 0x6c, 24);
hws[IMX6SLL_CLK_UART4_SERIAL] = imx_clk_hw_gate2("uart4_serial", "uart_podf", base + 0x6c, 24);
hws[IMX6SLL_CLK_GPIO1] = imx_clk_hw_gate2("gpio1", "ipg", base + 0x6c, 26);
hws[IMX6SLL_CLK_GPIO5] = imx_clk_hw_gate2("gpio5", "ipg", base + 0x6c, 30);
/* CCGR2 */
hws[IMX6SLL_CLK_GPIO6] = imx_clk_hw_gate2("gpio6", "ipg", base + 0x70, 0);
hws[IMX6SLL_CLK_CSI] = imx_clk_hw_gate2("csi", "axi", base + 0x70, 2);
hws[IMX6SLL_CLK_I2C1] = imx_clk_hw_gate2("i2c1", "perclk", base + 0x70, 6);
hws[IMX6SLL_CLK_I2C2] = imx_clk_hw_gate2("i2c2", "perclk", base + 0x70, 8);
hws[IMX6SLL_CLK_I2C3] = imx_clk_hw_gate2("i2c3", "perclk", base + 0x70, 10);
hws[IMX6SLL_CLK_OCOTP] = imx_clk_hw_gate2("ocotp", "ipg", base + 0x70, 12);
hws[IMX6SLL_CLK_GPIO3] = imx_clk_hw_gate2("gpio3", "ipg", base + 0x70, 26);
hws[IMX6SLL_CLK_LCDIF_APB] = imx_clk_hw_gate2("lcdif_apb", "axi", base + 0x70, 28);
hws[IMX6SLL_CLK_PXP] = imx_clk_hw_gate2("pxp", "axi", base + 0x70, 30);
/* CCGR3 */
hws[IMX6SLL_CLK_UART5_IPG] = imx_clk_hw_gate2("uart5_ipg", "ipg", base + 0x74, 2);
hws[IMX6SLL_CLK_UART5_SERIAL] = imx_clk_hw_gate2("uart5_serial", "uart_podf", base + 0x74, 2);
hws[IMX6SLL_CLK_EPDC_AXI] = imx_clk_hw_gate2("epdc_aclk", "axi", base + 0x74, 4);
hws[IMX6SLL_CLK_EPDC_PIX] = imx_clk_hw_gate2("epdc_pix", "epdc_podf", base + 0x74, 4);
hws[IMX6SLL_CLK_LCDIF_PIX] = imx_clk_hw_gate2("lcdif_pix", "lcdif_podf", base + 0x74, 10);
hws[IMX6SLL_CLK_GPIO4] = imx_clk_hw_gate2("gpio4", "ipg", base + 0x74, 12);
hws[IMX6SLL_CLK_WDOG1] = imx_clk_hw_gate2("wdog1", "ipg", base + 0x74, 16);
hws[IMX6SLL_CLK_MMDC_P0_FAST] = imx_clk_hw_gate_flags("mmdc_p0_fast", "mmdc_podf", base + 0x74, 20, CLK_IS_CRITICAL);
hws[IMX6SLL_CLK_MMDC_P0_IPG] = imx_clk_hw_gate2_flags("mmdc_p0_ipg", "ipg", base + 0x74, 24, CLK_IS_CRITICAL);
hws[IMX6SLL_CLK_MMDC_P1_IPG] = imx_clk_hw_gate2_flags("mmdc_p1_ipg", "ipg", base + 0x74, 26, CLK_IS_CRITICAL);
hws[IMX6SLL_CLK_OCRAM] = imx_clk_hw_gate_flags("ocram", "ahb", base + 0x74, 28, CLK_IS_CRITICAL);
/* CCGR4 */
hws[IMX6SLL_CLK_PWM1] = imx_clk_hw_gate2("pwm1", "perclk", base + 0x78, 16);
hws[IMX6SLL_CLK_PWM2] = imx_clk_hw_gate2("pwm2", "perclk", base + 0x78, 18);
hws[IMX6SLL_CLK_PWM3] = imx_clk_hw_gate2("pwm3", "perclk", base + 0x78, 20);
hws[IMX6SLL_CLK_PWM4] = imx_clk_hw_gate2("pwm4", "perclk", base + 0x78, 22);
/* CCGR5 */
hws[IMX6SLL_CLK_ROM] = imx_clk_hw_gate2_flags("rom", "ahb", base + 0x7c, 0, CLK_IS_CRITICAL);
hws[IMX6SLL_CLK_SDMA] = imx_clk_hw_gate2("sdma", "ahb", base + 0x7c, 6);
hws[IMX6SLL_CLK_WDOG2] = imx_clk_hw_gate2("wdog2", "ipg", base + 0x7c, 10);
hws[IMX6SLL_CLK_SPBA] = imx_clk_hw_gate2("spba", "ipg", base + 0x7c, 12);
hws[IMX6SLL_CLK_EXTERN_AUDIO] = imx_clk_hw_gate2_shared("extern_audio", "extern_audio_podf", base + 0x7c, 14, &share_count_audio);
hws[IMX6SLL_CLK_SPDIF] = imx_clk_hw_gate2_shared("spdif", "spdif_podf", base + 0x7c, 14, &share_count_audio);
hws[IMX6SLL_CLK_SPDIF_GCLK] = imx_clk_hw_gate2_shared("spdif_gclk", "ipg", base + 0x7c, 14, &share_count_audio);
hws[IMX6SLL_CLK_SSI1] = imx_clk_hw_gate2_shared("ssi1", "ssi1_podf", base + 0x7c, 18, &share_count_ssi1);
hws[IMX6SLL_CLK_SSI1_IPG] = imx_clk_hw_gate2_shared("ssi1_ipg", "ipg", base + 0x7c, 18, &share_count_ssi1);
hws[IMX6SLL_CLK_SSI2] = imx_clk_hw_gate2_shared("ssi2", "ssi2_podf", base + 0x7c, 20, &share_count_ssi2);
hws[IMX6SLL_CLK_SSI2_IPG] = imx_clk_hw_gate2_shared("ssi2_ipg", "ipg", base + 0x7c, 20, &share_count_ssi2);
hws[IMX6SLL_CLK_SSI3] = imx_clk_hw_gate2_shared("ssi3", "ssi3_podf", base + 0x7c, 22, &share_count_ssi3);
hws[IMX6SLL_CLK_SSI3_IPG] = imx_clk_hw_gate2_shared("ssi3_ipg", "ipg", base + 0x7c, 22, &share_count_ssi3);
hws[IMX6SLL_CLK_UART1_IPG] = imx_clk_hw_gate2("uart1_ipg", "ipg", base + 0x7c, 24);
hws[IMX6SLL_CLK_UART1_SERIAL] = imx_clk_hw_gate2("uart1_serial", "uart_podf", base + 0x7c, 24);
/* CCGR6 */
hws[IMX6SLL_CLK_USBOH3] = imx_clk_hw_gate2("usboh3", "ipg", base + 0x80, 0);
hws[IMX6SLL_CLK_USDHC1] = imx_clk_hw_gate2("usdhc1", "usdhc1_podf", base + 0x80, 2);
hws[IMX6SLL_CLK_USDHC2] = imx_clk_hw_gate2("usdhc2", "usdhc2_podf", base + 0x80, 4);
hws[IMX6SLL_CLK_USDHC3] = imx_clk_hw_gate2("usdhc3", "usdhc3_podf", base + 0x80, 6);
/* mask handshake of mmdc */
imx_mmdc_mask_handshake(base, 0);
imx_check_clk_hws(hws, IMX6SLL_CLK_END);
of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_hw_data);
imx_register_uart_clocks();
/* Lower the AHB clock rate before changing the clock source. */
clk_set_rate(hws[IMX6SLL_CLK_AHB]->clk, 99000000);
/* Change periph_pre clock to pll2_bus to adjust AXI rate to 264MHz */
clk_set_parent(hws[IMX6SLL_CLK_PERIPH_CLK2_SEL]->clk, hws[IMX6SLL_CLK_PLL3_USB_OTG]->clk);
clk_set_parent(hws[IMX6SLL_CLK_PERIPH]->clk, hws[IMX6SLL_CLK_PERIPH_CLK2]->clk);
clk_set_parent(hws[IMX6SLL_CLK_PERIPH_PRE]->clk, hws[IMX6SLL_CLK_PLL2_BUS]->clk);
clk_set_parent(hws[IMX6SLL_CLK_PERIPH]->clk, hws[IMX6SLL_CLK_PERIPH_PRE]->clk);
clk_set_rate(hws[IMX6SLL_CLK_AHB]->clk, 132000000);
}
CLK_OF_DECLARE_DRIVER(imx6sll, "fsl,imx6sll-ccm", imx6sll_clocks_init);
| linux-master | drivers/clk/imx/clk-imx6sll.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright 2019-2021 NXP
* Dong Aisheng <[email protected]>
*/
#include <dt-bindings/firmware/imx/rsrc.h>
#include "clk-scu.h"
/* Keep sorted in the ascending order */
static const u32 imx8qm_clk_scu_rsrc_table[] = {
IMX_SC_R_A53,
IMX_SC_R_A72,
IMX_SC_R_DC_0_VIDEO0,
IMX_SC_R_DC_0_VIDEO1,
IMX_SC_R_DC_0,
IMX_SC_R_DC_0_PLL_0,
IMX_SC_R_DC_0_PLL_1,
IMX_SC_R_DC_1_VIDEO0,
IMX_SC_R_DC_1_VIDEO1,
IMX_SC_R_DC_1,
IMX_SC_R_DC_1_PLL_0,
IMX_SC_R_DC_1_PLL_1,
IMX_SC_R_SPI_0,
IMX_SC_R_SPI_1,
IMX_SC_R_SPI_2,
IMX_SC_R_SPI_3,
IMX_SC_R_UART_0,
IMX_SC_R_UART_1,
IMX_SC_R_UART_2,
IMX_SC_R_UART_3,
IMX_SC_R_UART_4,
IMX_SC_R_EMVSIM_0,
IMX_SC_R_EMVSIM_1,
IMX_SC_R_I2C_0,
IMX_SC_R_I2C_1,
IMX_SC_R_I2C_2,
IMX_SC_R_I2C_3,
IMX_SC_R_I2C_4,
IMX_SC_R_ADC_0,
IMX_SC_R_ADC_1,
IMX_SC_R_FTM_0,
IMX_SC_R_FTM_1,
IMX_SC_R_CAN_0,
IMX_SC_R_GPU_0_PID0,
IMX_SC_R_GPU_1_PID0,
IMX_SC_R_PWM_0,
IMX_SC_R_PWM_1,
IMX_SC_R_PWM_2,
IMX_SC_R_PWM_3,
IMX_SC_R_PWM_4,
IMX_SC_R_PWM_5,
IMX_SC_R_PWM_6,
IMX_SC_R_PWM_7,
IMX_SC_R_GPT_0,
IMX_SC_R_GPT_1,
IMX_SC_R_GPT_2,
IMX_SC_R_GPT_3,
IMX_SC_R_GPT_4,
IMX_SC_R_FSPI_0,
IMX_SC_R_FSPI_1,
IMX_SC_R_SDHC_0,
IMX_SC_R_SDHC_1,
IMX_SC_R_SDHC_2,
IMX_SC_R_ENET_0,
IMX_SC_R_ENET_1,
IMX_SC_R_MLB_0,
IMX_SC_R_USB_2,
IMX_SC_R_NAND,
IMX_SC_R_LVDS_0,
IMX_SC_R_LVDS_0_PWM_0,
IMX_SC_R_LVDS_0_I2C_0,
IMX_SC_R_LVDS_0_I2C_1,
IMX_SC_R_LVDS_1,
IMX_SC_R_LVDS_1_PWM_0,
IMX_SC_R_LVDS_1_I2C_0,
IMX_SC_R_LVDS_1_I2C_1,
IMX_SC_R_M4_0_I2C,
IMX_SC_R_M4_1_I2C,
IMX_SC_R_AUDIO_PLL_0,
IMX_SC_R_VPU_UART,
IMX_SC_R_VPUCORE,
IMX_SC_R_MIPI_0,
IMX_SC_R_MIPI_0_PWM_0,
IMX_SC_R_MIPI_0_I2C_0,
IMX_SC_R_MIPI_0_I2C_1,
IMX_SC_R_MIPI_1,
IMX_SC_R_MIPI_1_PWM_0,
IMX_SC_R_MIPI_1_I2C_0,
IMX_SC_R_MIPI_1_I2C_1,
IMX_SC_R_CSI_0,
IMX_SC_R_CSI_0_PWM_0,
IMX_SC_R_CSI_0_I2C_0,
IMX_SC_R_CSI_1,
IMX_SC_R_CSI_1_PWM_0,
IMX_SC_R_CSI_1_I2C_0,
IMX_SC_R_HDMI,
IMX_SC_R_HDMI_I2S,
IMX_SC_R_HDMI_I2C_0,
IMX_SC_R_HDMI_PLL_0,
IMX_SC_R_HDMI_RX,
IMX_SC_R_HDMI_RX_BYPASS,
IMX_SC_R_HDMI_RX_I2C_0,
IMX_SC_R_AUDIO_PLL_1,
IMX_SC_R_AUDIO_CLK_0,
IMX_SC_R_AUDIO_CLK_1,
IMX_SC_R_HDMI_RX_PWM_0,
IMX_SC_R_HDMI_PLL_1,
IMX_SC_R_VPU,
};
const struct imx_clk_scu_rsrc_table imx_clk_scu_rsrc_imx8qm = {
.rsrc = imx8qm_clk_scu_rsrc_table,
.num = ARRAY_SIZE(imx8qm_clk_scu_rsrc_table),
};
| linux-master | drivers/clk/imx/clk-imx8qm-rsrc.c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.