python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2012 Alexandre Pereira da Silva <[email protected]>
*/
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/pwm.h>
#include <linux/slab.h>
struct lpc32xx_pwm_chip {
struct pwm_chip chip;
struct clk *clk;
void __iomem *base;
};
#define PWM_ENABLE BIT(31)
#define PWM_PIN_LEVEL BIT(30)
#define to_lpc32xx_pwm_chip(_chip) \
container_of(_chip, struct lpc32xx_pwm_chip, chip)
static int lpc32xx_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
int duty_ns, int period_ns)
{
struct lpc32xx_pwm_chip *lpc32xx = to_lpc32xx_pwm_chip(chip);
unsigned long long c;
int period_cycles, duty_cycles;
u32 val;
c = clk_get_rate(lpc32xx->clk);
/* The highest acceptable divisor is 256, which is represented by 0 */
period_cycles = div64_u64(c * period_ns,
(unsigned long long)NSEC_PER_SEC * 256);
if (!period_cycles || period_cycles > 256)
return -ERANGE;
if (period_cycles == 256)
period_cycles = 0;
/* Compute 256 x #duty/period value and care for corner cases */
duty_cycles = div64_u64((unsigned long long)(period_ns - duty_ns) * 256,
period_ns);
if (!duty_cycles)
duty_cycles = 1;
if (duty_cycles > 255)
duty_cycles = 255;
val = readl(lpc32xx->base);
val &= ~0xFFFF;
val |= (period_cycles << 8) | duty_cycles;
writel(val, lpc32xx->base);
return 0;
}
static int lpc32xx_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
{
struct lpc32xx_pwm_chip *lpc32xx = to_lpc32xx_pwm_chip(chip);
u32 val;
int ret;
ret = clk_prepare_enable(lpc32xx->clk);
if (ret)
return ret;
val = readl(lpc32xx->base);
val |= PWM_ENABLE;
writel(val, lpc32xx->base);
return 0;
}
static void lpc32xx_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
{
struct lpc32xx_pwm_chip *lpc32xx = to_lpc32xx_pwm_chip(chip);
u32 val;
val = readl(lpc32xx->base);
val &= ~PWM_ENABLE;
writel(val, lpc32xx->base);
clk_disable_unprepare(lpc32xx->clk);
}
static int lpc32xx_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
const struct pwm_state *state)
{
int err;
if (state->polarity != PWM_POLARITY_NORMAL)
return -EINVAL;
if (!state->enabled) {
if (pwm->state.enabled)
lpc32xx_pwm_disable(chip, pwm);
return 0;
}
err = lpc32xx_pwm_config(pwm->chip, pwm, state->duty_cycle, state->period);
if (err)
return err;
if (!pwm->state.enabled)
err = lpc32xx_pwm_enable(chip, pwm);
return err;
}
static const struct pwm_ops lpc32xx_pwm_ops = {
.apply = lpc32xx_pwm_apply,
.owner = THIS_MODULE,
};
static int lpc32xx_pwm_probe(struct platform_device *pdev)
{
struct lpc32xx_pwm_chip *lpc32xx;
int ret;
u32 val;
lpc32xx = devm_kzalloc(&pdev->dev, sizeof(*lpc32xx), GFP_KERNEL);
if (!lpc32xx)
return -ENOMEM;
lpc32xx->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(lpc32xx->base))
return PTR_ERR(lpc32xx->base);
lpc32xx->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(lpc32xx->clk))
return PTR_ERR(lpc32xx->clk);
lpc32xx->chip.dev = &pdev->dev;
lpc32xx->chip.ops = &lpc32xx_pwm_ops;
lpc32xx->chip.npwm = 1;
/* If PWM is disabled, configure the output to the default value */
val = readl(lpc32xx->base);
val &= ~PWM_PIN_LEVEL;
writel(val, lpc32xx->base);
ret = devm_pwmchip_add(&pdev->dev, &lpc32xx->chip);
if (ret < 0) {
dev_err(&pdev->dev, "failed to add PWM chip, error %d\n", ret);
return ret;
}
return 0;
}
static const struct of_device_id lpc32xx_pwm_dt_ids[] = {
{ .compatible = "nxp,lpc3220-pwm", },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, lpc32xx_pwm_dt_ids);
static struct platform_driver lpc32xx_pwm_driver = {
.driver = {
.name = "lpc32xx-pwm",
.of_match_table = lpc32xx_pwm_dt_ids,
},
.probe = lpc32xx_pwm_probe,
};
module_platform_driver(lpc32xx_pwm_driver);
MODULE_ALIAS("platform:lpc32xx-pwm");
MODULE_AUTHOR("Alexandre Pereira da Silva <[email protected]>");
MODULE_DESCRIPTION("LPC32XX PWM Driver");
MODULE_LICENSE("GPL v2");
|
linux-master
|
drivers/pwm/pwm-lpc32xx.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Driver for Allwinner sun4i Pulse Width Modulation Controller
*
* Copyright (C) 2014 Alexandre Belloni <[email protected]>
*
* Limitations:
* - When outputing the source clock directly, the PWM logic will be bypassed
* and the currently running period is not guaranteed to be completed
*/
#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/jiffies.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pwm.h>
#include <linux/reset.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/time.h>
#define PWM_CTRL_REG 0x0
#define PWM_CH_PRD_BASE 0x4
#define PWM_CH_PRD_OFFSET 0x4
#define PWM_CH_PRD(ch) (PWM_CH_PRD_BASE + PWM_CH_PRD_OFFSET * (ch))
#define PWMCH_OFFSET 15
#define PWM_PRESCAL_MASK GENMASK(3, 0)
#define PWM_PRESCAL_OFF 0
#define PWM_EN BIT(4)
#define PWM_ACT_STATE BIT(5)
#define PWM_CLK_GATING BIT(6)
#define PWM_MODE BIT(7)
#define PWM_PULSE BIT(8)
#define PWM_BYPASS BIT(9)
#define PWM_RDY_BASE 28
#define PWM_RDY_OFFSET 1
#define PWM_RDY(ch) BIT(PWM_RDY_BASE + PWM_RDY_OFFSET * (ch))
#define PWM_PRD(prd) (((prd) - 1) << 16)
#define PWM_PRD_MASK GENMASK(15, 0)
#define PWM_DTY_MASK GENMASK(15, 0)
#define PWM_REG_PRD(reg) ((((reg) >> 16) & PWM_PRD_MASK) + 1)
#define PWM_REG_DTY(reg) ((reg) & PWM_DTY_MASK)
#define PWM_REG_PRESCAL(reg, chan) (((reg) >> ((chan) * PWMCH_OFFSET)) & PWM_PRESCAL_MASK)
#define BIT_CH(bit, chan) ((bit) << ((chan) * PWMCH_OFFSET))
static const u32 prescaler_table[] = {
120,
180,
240,
360,
480,
0,
0,
0,
12000,
24000,
36000,
48000,
72000,
0,
0,
0, /* Actually 1 but tested separately */
};
struct sun4i_pwm_data {
bool has_prescaler_bypass;
bool has_direct_mod_clk_output;
unsigned int npwm;
};
struct sun4i_pwm_chip {
struct pwm_chip chip;
struct clk *bus_clk;
struct clk *clk;
struct reset_control *rst;
void __iomem *base;
spinlock_t ctrl_lock;
const struct sun4i_pwm_data *data;
};
static inline struct sun4i_pwm_chip *to_sun4i_pwm_chip(struct pwm_chip *chip)
{
return container_of(chip, struct sun4i_pwm_chip, chip);
}
static inline u32 sun4i_pwm_readl(struct sun4i_pwm_chip *chip,
unsigned long offset)
{
return readl(chip->base + offset);
}
static inline void sun4i_pwm_writel(struct sun4i_pwm_chip *chip,
u32 val, unsigned long offset)
{
writel(val, chip->base + offset);
}
static int sun4i_pwm_get_state(struct pwm_chip *chip,
struct pwm_device *pwm,
struct pwm_state *state)
{
struct sun4i_pwm_chip *sun4i_pwm = to_sun4i_pwm_chip(chip);
u64 clk_rate, tmp;
u32 val;
unsigned int prescaler;
clk_rate = clk_get_rate(sun4i_pwm->clk);
if (!clk_rate)
return -EINVAL;
val = sun4i_pwm_readl(sun4i_pwm, PWM_CTRL_REG);
/*
* PWM chapter in H6 manual has a diagram which explains that if bypass
* bit is set, no other setting has any meaning. Even more, experiment
* proved that also enable bit is ignored in this case.
*/
if ((val & BIT_CH(PWM_BYPASS, pwm->hwpwm)) &&
sun4i_pwm->data->has_direct_mod_clk_output) {
state->period = DIV_ROUND_UP_ULL(NSEC_PER_SEC, clk_rate);
state->duty_cycle = DIV_ROUND_UP_ULL(state->period, 2);
state->polarity = PWM_POLARITY_NORMAL;
state->enabled = true;
return 0;
}
if ((PWM_REG_PRESCAL(val, pwm->hwpwm) == PWM_PRESCAL_MASK) &&
sun4i_pwm->data->has_prescaler_bypass)
prescaler = 1;
else
prescaler = prescaler_table[PWM_REG_PRESCAL(val, pwm->hwpwm)];
if (prescaler == 0)
return -EINVAL;
if (val & BIT_CH(PWM_ACT_STATE, pwm->hwpwm))
state->polarity = PWM_POLARITY_NORMAL;
else
state->polarity = PWM_POLARITY_INVERSED;
if ((val & BIT_CH(PWM_CLK_GATING | PWM_EN, pwm->hwpwm)) ==
BIT_CH(PWM_CLK_GATING | PWM_EN, pwm->hwpwm))
state->enabled = true;
else
state->enabled = false;
val = sun4i_pwm_readl(sun4i_pwm, PWM_CH_PRD(pwm->hwpwm));
tmp = (u64)prescaler * NSEC_PER_SEC * PWM_REG_DTY(val);
state->duty_cycle = DIV_ROUND_CLOSEST_ULL(tmp, clk_rate);
tmp = (u64)prescaler * NSEC_PER_SEC * PWM_REG_PRD(val);
state->period = DIV_ROUND_CLOSEST_ULL(tmp, clk_rate);
return 0;
}
static int sun4i_pwm_calculate(struct sun4i_pwm_chip *sun4i_pwm,
const struct pwm_state *state,
u32 *dty, u32 *prd, unsigned int *prsclr,
bool *bypass)
{
u64 clk_rate, div = 0;
unsigned int prescaler = 0;
clk_rate = clk_get_rate(sun4i_pwm->clk);
*bypass = sun4i_pwm->data->has_direct_mod_clk_output &&
state->enabled &&
(state->period * clk_rate >= NSEC_PER_SEC) &&
(state->period * clk_rate < 2 * NSEC_PER_SEC) &&
(state->duty_cycle * clk_rate * 2 >= NSEC_PER_SEC);
/* Skip calculation of other parameters if we bypass them */
if (*bypass)
return 0;
if (sun4i_pwm->data->has_prescaler_bypass) {
/* First, test without any prescaler when available */
prescaler = PWM_PRESCAL_MASK;
/*
* When not using any prescaler, the clock period in nanoseconds
* is not an integer so round it half up instead of
* truncating to get less surprising values.
*/
div = clk_rate * state->period + NSEC_PER_SEC / 2;
do_div(div, NSEC_PER_SEC);
if (div - 1 > PWM_PRD_MASK)
prescaler = 0;
}
if (prescaler == 0) {
/* Go up from the first divider */
for (prescaler = 0; prescaler < PWM_PRESCAL_MASK; prescaler++) {
unsigned int pval = prescaler_table[prescaler];
if (!pval)
continue;
div = clk_rate;
do_div(div, pval);
div = div * state->period;
do_div(div, NSEC_PER_SEC);
if (div - 1 <= PWM_PRD_MASK)
break;
}
if (div - 1 > PWM_PRD_MASK)
return -EINVAL;
}
*prd = div;
div *= state->duty_cycle;
do_div(div, state->period);
*dty = div;
*prsclr = prescaler;
return 0;
}
static int sun4i_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
const struct pwm_state *state)
{
struct sun4i_pwm_chip *sun4i_pwm = to_sun4i_pwm_chip(chip);
struct pwm_state cstate;
u32 ctrl, duty = 0, period = 0, val;
int ret;
unsigned int delay_us, prescaler = 0;
bool bypass;
pwm_get_state(pwm, &cstate);
if (!cstate.enabled) {
ret = clk_prepare_enable(sun4i_pwm->clk);
if (ret) {
dev_err(chip->dev, "failed to enable PWM clock\n");
return ret;
}
}
ret = sun4i_pwm_calculate(sun4i_pwm, state, &duty, &period, &prescaler,
&bypass);
if (ret) {
dev_err(chip->dev, "period exceeds the maximum value\n");
if (!cstate.enabled)
clk_disable_unprepare(sun4i_pwm->clk);
return ret;
}
spin_lock(&sun4i_pwm->ctrl_lock);
ctrl = sun4i_pwm_readl(sun4i_pwm, PWM_CTRL_REG);
if (sun4i_pwm->data->has_direct_mod_clk_output) {
if (bypass) {
ctrl |= BIT_CH(PWM_BYPASS, pwm->hwpwm);
/* We can skip other parameter */
sun4i_pwm_writel(sun4i_pwm, ctrl, PWM_CTRL_REG);
spin_unlock(&sun4i_pwm->ctrl_lock);
return 0;
}
ctrl &= ~BIT_CH(PWM_BYPASS, pwm->hwpwm);
}
if (PWM_REG_PRESCAL(ctrl, pwm->hwpwm) != prescaler) {
/* Prescaler changed, the clock has to be gated */
ctrl &= ~BIT_CH(PWM_CLK_GATING, pwm->hwpwm);
sun4i_pwm_writel(sun4i_pwm, ctrl, PWM_CTRL_REG);
ctrl &= ~BIT_CH(PWM_PRESCAL_MASK, pwm->hwpwm);
ctrl |= BIT_CH(prescaler, pwm->hwpwm);
}
val = (duty & PWM_DTY_MASK) | PWM_PRD(period);
sun4i_pwm_writel(sun4i_pwm, val, PWM_CH_PRD(pwm->hwpwm));
if (state->polarity != PWM_POLARITY_NORMAL)
ctrl &= ~BIT_CH(PWM_ACT_STATE, pwm->hwpwm);
else
ctrl |= BIT_CH(PWM_ACT_STATE, pwm->hwpwm);
ctrl |= BIT_CH(PWM_CLK_GATING, pwm->hwpwm);
if (state->enabled)
ctrl |= BIT_CH(PWM_EN, pwm->hwpwm);
sun4i_pwm_writel(sun4i_pwm, ctrl, PWM_CTRL_REG);
spin_unlock(&sun4i_pwm->ctrl_lock);
if (state->enabled)
return 0;
/* We need a full period to elapse before disabling the channel. */
delay_us = DIV_ROUND_UP_ULL(cstate.period, NSEC_PER_USEC);
if ((delay_us / 500) > MAX_UDELAY_MS)
msleep(delay_us / 1000 + 1);
else
usleep_range(delay_us, delay_us * 2);
spin_lock(&sun4i_pwm->ctrl_lock);
ctrl = sun4i_pwm_readl(sun4i_pwm, PWM_CTRL_REG);
ctrl &= ~BIT_CH(PWM_CLK_GATING, pwm->hwpwm);
ctrl &= ~BIT_CH(PWM_EN, pwm->hwpwm);
sun4i_pwm_writel(sun4i_pwm, ctrl, PWM_CTRL_REG);
spin_unlock(&sun4i_pwm->ctrl_lock);
clk_disable_unprepare(sun4i_pwm->clk);
return 0;
}
static const struct pwm_ops sun4i_pwm_ops = {
.apply = sun4i_pwm_apply,
.get_state = sun4i_pwm_get_state,
.owner = THIS_MODULE,
};
static const struct sun4i_pwm_data sun4i_pwm_dual_nobypass = {
.has_prescaler_bypass = false,
.npwm = 2,
};
static const struct sun4i_pwm_data sun4i_pwm_dual_bypass = {
.has_prescaler_bypass = true,
.npwm = 2,
};
static const struct sun4i_pwm_data sun4i_pwm_single_bypass = {
.has_prescaler_bypass = true,
.npwm = 1,
};
static const struct sun4i_pwm_data sun50i_a64_pwm_data = {
.has_prescaler_bypass = true,
.has_direct_mod_clk_output = true,
.npwm = 1,
};
static const struct sun4i_pwm_data sun50i_h6_pwm_data = {
.has_prescaler_bypass = true,
.has_direct_mod_clk_output = true,
.npwm = 2,
};
static const struct of_device_id sun4i_pwm_dt_ids[] = {
{
.compatible = "allwinner,sun4i-a10-pwm",
.data = &sun4i_pwm_dual_nobypass,
}, {
.compatible = "allwinner,sun5i-a10s-pwm",
.data = &sun4i_pwm_dual_bypass,
}, {
.compatible = "allwinner,sun5i-a13-pwm",
.data = &sun4i_pwm_single_bypass,
}, {
.compatible = "allwinner,sun7i-a20-pwm",
.data = &sun4i_pwm_dual_bypass,
}, {
.compatible = "allwinner,sun8i-h3-pwm",
.data = &sun4i_pwm_single_bypass,
}, {
.compatible = "allwinner,sun50i-a64-pwm",
.data = &sun50i_a64_pwm_data,
}, {
.compatible = "allwinner,sun50i-h6-pwm",
.data = &sun50i_h6_pwm_data,
}, {
/* sentinel */
},
};
MODULE_DEVICE_TABLE(of, sun4i_pwm_dt_ids);
static int sun4i_pwm_probe(struct platform_device *pdev)
{
struct sun4i_pwm_chip *sun4ichip;
int ret;
sun4ichip = devm_kzalloc(&pdev->dev, sizeof(*sun4ichip), GFP_KERNEL);
if (!sun4ichip)
return -ENOMEM;
sun4ichip->data = of_device_get_match_data(&pdev->dev);
if (!sun4ichip->data)
return -ENODEV;
sun4ichip->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(sun4ichip->base))
return PTR_ERR(sun4ichip->base);
/*
* All hardware variants need a source clock that is divided and
* then feeds the counter that defines the output wave form. In the
* device tree this clock is either unnamed or called "mod".
* Some variants (e.g. H6) need another clock to access the
* hardware registers; this is called "bus".
* So we request "mod" first (and ignore the corner case that a
* parent provides a "mod" clock while the right one would be the
* unnamed one of the PWM device) and if this is not found we fall
* back to the first clock of the PWM.
*/
sun4ichip->clk = devm_clk_get_optional(&pdev->dev, "mod");
if (IS_ERR(sun4ichip->clk))
return dev_err_probe(&pdev->dev, PTR_ERR(sun4ichip->clk),
"get mod clock failed\n");
if (!sun4ichip->clk) {
sun4ichip->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(sun4ichip->clk))
return dev_err_probe(&pdev->dev, PTR_ERR(sun4ichip->clk),
"get unnamed clock failed\n");
}
sun4ichip->bus_clk = devm_clk_get_optional(&pdev->dev, "bus");
if (IS_ERR(sun4ichip->bus_clk))
return dev_err_probe(&pdev->dev, PTR_ERR(sun4ichip->bus_clk),
"get bus clock failed\n");
sun4ichip->rst = devm_reset_control_get_optional_shared(&pdev->dev, NULL);
if (IS_ERR(sun4ichip->rst))
return dev_err_probe(&pdev->dev, PTR_ERR(sun4ichip->rst),
"get reset failed\n");
/* Deassert reset */
ret = reset_control_deassert(sun4ichip->rst);
if (ret) {
dev_err(&pdev->dev, "cannot deassert reset control: %pe\n",
ERR_PTR(ret));
return ret;
}
/*
* We're keeping the bus clock on for the sake of simplicity.
* Actually it only needs to be on for hardware register accesses.
*/
ret = clk_prepare_enable(sun4ichip->bus_clk);
if (ret) {
dev_err(&pdev->dev, "cannot prepare and enable bus_clk %pe\n",
ERR_PTR(ret));
goto err_bus;
}
sun4ichip->chip.dev = &pdev->dev;
sun4ichip->chip.ops = &sun4i_pwm_ops;
sun4ichip->chip.npwm = sun4ichip->data->npwm;
spin_lock_init(&sun4ichip->ctrl_lock);
ret = pwmchip_add(&sun4ichip->chip);
if (ret < 0) {
dev_err(&pdev->dev, "failed to add PWM chip: %d\n", ret);
goto err_pwm_add;
}
platform_set_drvdata(pdev, sun4ichip);
return 0;
err_pwm_add:
clk_disable_unprepare(sun4ichip->bus_clk);
err_bus:
reset_control_assert(sun4ichip->rst);
return ret;
}
static void sun4i_pwm_remove(struct platform_device *pdev)
{
struct sun4i_pwm_chip *sun4ichip = platform_get_drvdata(pdev);
pwmchip_remove(&sun4ichip->chip);
clk_disable_unprepare(sun4ichip->bus_clk);
reset_control_assert(sun4ichip->rst);
}
static struct platform_driver sun4i_pwm_driver = {
.driver = {
.name = "sun4i-pwm",
.of_match_table = sun4i_pwm_dt_ids,
},
.probe = sun4i_pwm_probe,
.remove_new = sun4i_pwm_remove,
};
module_platform_driver(sun4i_pwm_driver);
MODULE_ALIAS("platform:sun4i-pwm");
MODULE_AUTHOR("Alexandre Belloni <[email protected]>");
MODULE_DESCRIPTION("Allwinner sun4i PWM driver");
MODULE_LICENSE("GPL v2");
|
linux-master
|
drivers/pwm/pwm-sun4i.c
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* PWM Controller Driver for HiSilicon BVT SoCs
*
* Copyright (c) 2016 HiSilicon Technologies Co., Ltd.
*/
#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pwm.h>
#include <linux/reset.h>
#define PWM_CFG0_ADDR(x) (((x) * 0x20) + 0x0)
#define PWM_CFG1_ADDR(x) (((x) * 0x20) + 0x4)
#define PWM_CFG2_ADDR(x) (((x) * 0x20) + 0x8)
#define PWM_CTRL_ADDR(x) (((x) * 0x20) + 0xC)
#define PWM_ENABLE_SHIFT 0
#define PWM_ENABLE_MASK BIT(0)
#define PWM_POLARITY_SHIFT 1
#define PWM_POLARITY_MASK BIT(1)
#define PWM_KEEP_SHIFT 2
#define PWM_KEEP_MASK BIT(2)
#define PWM_PERIOD_MASK GENMASK(31, 0)
#define PWM_DUTY_MASK GENMASK(31, 0)
struct hibvt_pwm_chip {
struct pwm_chip chip;
struct clk *clk;
void __iomem *base;
struct reset_control *rstc;
const struct hibvt_pwm_soc *soc;
};
struct hibvt_pwm_soc {
u32 num_pwms;
bool quirk_force_enable;
};
static const struct hibvt_pwm_soc hi3516cv300_soc_info = {
.num_pwms = 4,
};
static const struct hibvt_pwm_soc hi3519v100_soc_info = {
.num_pwms = 8,
};
static const struct hibvt_pwm_soc hi3559v100_shub_soc_info = {
.num_pwms = 8,
.quirk_force_enable = true,
};
static const struct hibvt_pwm_soc hi3559v100_soc_info = {
.num_pwms = 2,
.quirk_force_enable = true,
};
static inline struct hibvt_pwm_chip *to_hibvt_pwm_chip(struct pwm_chip *chip)
{
return container_of(chip, struct hibvt_pwm_chip, chip);
}
static void hibvt_pwm_set_bits(void __iomem *base, u32 offset,
u32 mask, u32 data)
{
void __iomem *address = base + offset;
u32 value;
value = readl(address);
value &= ~mask;
value |= (data & mask);
writel(value, address);
}
static void hibvt_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
{
struct hibvt_pwm_chip *hi_pwm_chip = to_hibvt_pwm_chip(chip);
hibvt_pwm_set_bits(hi_pwm_chip->base, PWM_CTRL_ADDR(pwm->hwpwm),
PWM_ENABLE_MASK, 0x1);
}
static void hibvt_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
{
struct hibvt_pwm_chip *hi_pwm_chip = to_hibvt_pwm_chip(chip);
hibvt_pwm_set_bits(hi_pwm_chip->base, PWM_CTRL_ADDR(pwm->hwpwm),
PWM_ENABLE_MASK, 0x0);
}
static void hibvt_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
int duty_cycle_ns, int period_ns)
{
struct hibvt_pwm_chip *hi_pwm_chip = to_hibvt_pwm_chip(chip);
u32 freq, period, duty;
freq = div_u64(clk_get_rate(hi_pwm_chip->clk), 1000000);
period = div_u64(freq * period_ns, 1000);
duty = div_u64(period * duty_cycle_ns, period_ns);
hibvt_pwm_set_bits(hi_pwm_chip->base, PWM_CFG0_ADDR(pwm->hwpwm),
PWM_PERIOD_MASK, period);
hibvt_pwm_set_bits(hi_pwm_chip->base, PWM_CFG1_ADDR(pwm->hwpwm),
PWM_DUTY_MASK, duty);
}
static void hibvt_pwm_set_polarity(struct pwm_chip *chip,
struct pwm_device *pwm,
enum pwm_polarity polarity)
{
struct hibvt_pwm_chip *hi_pwm_chip = to_hibvt_pwm_chip(chip);
if (polarity == PWM_POLARITY_INVERSED)
hibvt_pwm_set_bits(hi_pwm_chip->base, PWM_CTRL_ADDR(pwm->hwpwm),
PWM_POLARITY_MASK, (0x1 << PWM_POLARITY_SHIFT));
else
hibvt_pwm_set_bits(hi_pwm_chip->base, PWM_CTRL_ADDR(pwm->hwpwm),
PWM_POLARITY_MASK, (0x0 << PWM_POLARITY_SHIFT));
}
static int hibvt_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
struct pwm_state *state)
{
struct hibvt_pwm_chip *hi_pwm_chip = to_hibvt_pwm_chip(chip);
void __iomem *base;
u32 freq, value;
freq = div_u64(clk_get_rate(hi_pwm_chip->clk), 1000000);
base = hi_pwm_chip->base;
value = readl(base + PWM_CFG0_ADDR(pwm->hwpwm));
state->period = div_u64(value * 1000, freq);
value = readl(base + PWM_CFG1_ADDR(pwm->hwpwm));
state->duty_cycle = div_u64(value * 1000, freq);
value = readl(base + PWM_CTRL_ADDR(pwm->hwpwm));
state->enabled = (PWM_ENABLE_MASK & value);
state->polarity = (PWM_POLARITY_MASK & value) ? PWM_POLARITY_INVERSED : PWM_POLARITY_NORMAL;
return 0;
}
static int hibvt_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
const struct pwm_state *state)
{
struct hibvt_pwm_chip *hi_pwm_chip = to_hibvt_pwm_chip(chip);
if (state->polarity != pwm->state.polarity)
hibvt_pwm_set_polarity(chip, pwm, state->polarity);
if (state->period != pwm->state.period ||
state->duty_cycle != pwm->state.duty_cycle) {
hibvt_pwm_config(chip, pwm, state->duty_cycle, state->period);
/*
* Some implementations require the PWM to be enabled twice
* each time the duty cycle is refreshed.
*/
if (hi_pwm_chip->soc->quirk_force_enable && state->enabled)
hibvt_pwm_enable(chip, pwm);
}
if (state->enabled != pwm->state.enabled) {
if (state->enabled)
hibvt_pwm_enable(chip, pwm);
else
hibvt_pwm_disable(chip, pwm);
}
return 0;
}
static const struct pwm_ops hibvt_pwm_ops = {
.get_state = hibvt_pwm_get_state,
.apply = hibvt_pwm_apply,
.owner = THIS_MODULE,
};
static int hibvt_pwm_probe(struct platform_device *pdev)
{
const struct hibvt_pwm_soc *soc =
of_device_get_match_data(&pdev->dev);
struct hibvt_pwm_chip *pwm_chip;
int ret, i;
pwm_chip = devm_kzalloc(&pdev->dev, sizeof(*pwm_chip), GFP_KERNEL);
if (pwm_chip == NULL)
return -ENOMEM;
pwm_chip->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(pwm_chip->clk)) {
dev_err(&pdev->dev, "getting clock failed with %ld\n",
PTR_ERR(pwm_chip->clk));
return PTR_ERR(pwm_chip->clk);
}
pwm_chip->chip.ops = &hibvt_pwm_ops;
pwm_chip->chip.dev = &pdev->dev;
pwm_chip->chip.npwm = soc->num_pwms;
pwm_chip->soc = soc;
pwm_chip->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pwm_chip->base))
return PTR_ERR(pwm_chip->base);
ret = clk_prepare_enable(pwm_chip->clk);
if (ret < 0)
return ret;
pwm_chip->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
if (IS_ERR(pwm_chip->rstc)) {
clk_disable_unprepare(pwm_chip->clk);
return PTR_ERR(pwm_chip->rstc);
}
reset_control_assert(pwm_chip->rstc);
msleep(30);
reset_control_deassert(pwm_chip->rstc);
ret = pwmchip_add(&pwm_chip->chip);
if (ret < 0) {
clk_disable_unprepare(pwm_chip->clk);
return ret;
}
for (i = 0; i < pwm_chip->chip.npwm; i++) {
hibvt_pwm_set_bits(pwm_chip->base, PWM_CTRL_ADDR(i),
PWM_KEEP_MASK, (0x1 << PWM_KEEP_SHIFT));
}
platform_set_drvdata(pdev, pwm_chip);
return 0;
}
static void hibvt_pwm_remove(struct platform_device *pdev)
{
struct hibvt_pwm_chip *pwm_chip;
pwm_chip = platform_get_drvdata(pdev);
pwmchip_remove(&pwm_chip->chip);
reset_control_assert(pwm_chip->rstc);
msleep(30);
reset_control_deassert(pwm_chip->rstc);
clk_disable_unprepare(pwm_chip->clk);
}
static const struct of_device_id hibvt_pwm_of_match[] = {
{ .compatible = "hisilicon,hi3516cv300-pwm",
.data = &hi3516cv300_soc_info },
{ .compatible = "hisilicon,hi3519v100-pwm",
.data = &hi3519v100_soc_info },
{ .compatible = "hisilicon,hi3559v100-shub-pwm",
.data = &hi3559v100_shub_soc_info },
{ .compatible = "hisilicon,hi3559v100-pwm",
.data = &hi3559v100_soc_info },
{ }
};
MODULE_DEVICE_TABLE(of, hibvt_pwm_of_match);
static struct platform_driver hibvt_pwm_driver = {
.driver = {
.name = "hibvt-pwm",
.of_match_table = hibvt_pwm_of_match,
},
.probe = hibvt_pwm_probe,
.remove_new = hibvt_pwm_remove,
};
module_platform_driver(hibvt_pwm_driver);
MODULE_AUTHOR("Jian Yuan");
MODULE_DESCRIPTION("HiSilicon BVT SoCs PWM driver");
MODULE_LICENSE("GPL");
|
linux-master
|
drivers/pwm/pwm-hibvt.c
|
// SPDX-License-Identifier: GPL-2.0 OR MIT
/*
* Driver for the Apple SoC PWM controller
*
* Copyright The Asahi Linux Contributors
*
* Limitations:
* - The writes to cycle registers are shadowed until a write to
* the control register.
* - If both OFF_CYCLES and ON_CYCLES are set to 0, the output
* is a constant off signal.
* - When APPLE_PWM_CTRL is set to 0, the output is constant low
*/
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pwm.h>
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/math64.h>
#define APPLE_PWM_CTRL 0x00
#define APPLE_PWM_ON_CYCLES 0x1c
#define APPLE_PWM_OFF_CYCLES 0x18
#define APPLE_PWM_CTRL_ENABLE BIT(0)
#define APPLE_PWM_CTRL_MODE BIT(2)
#define APPLE_PWM_CTRL_UPDATE BIT(5)
#define APPLE_PWM_CTRL_TRIGGER BIT(9)
#define APPLE_PWM_CTRL_INVERT BIT(10)
#define APPLE_PWM_CTRL_OUTPUT_ENABLE BIT(14)
struct apple_pwm {
struct pwm_chip chip;
void __iomem *base;
u64 clkrate;
};
static inline struct apple_pwm *to_apple_pwm(struct pwm_chip *chip)
{
return container_of(chip, struct apple_pwm, chip);
}
static int apple_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
const struct pwm_state *state)
{
struct apple_pwm *fpwm;
if (state->polarity == PWM_POLARITY_INVERSED)
return -EINVAL;
fpwm = to_apple_pwm(chip);
if (state->enabled) {
u64 on_cycles, off_cycles;
on_cycles = mul_u64_u64_div_u64(fpwm->clkrate,
state->duty_cycle, NSEC_PER_SEC);
if (on_cycles > 0xFFFFFFFF)
on_cycles = 0xFFFFFFFF;
off_cycles = mul_u64_u64_div_u64(fpwm->clkrate,
state->period, NSEC_PER_SEC) - on_cycles;
if (off_cycles > 0xFFFFFFFF)
off_cycles = 0xFFFFFFFF;
writel(on_cycles, fpwm->base + APPLE_PWM_ON_CYCLES);
writel(off_cycles, fpwm->base + APPLE_PWM_OFF_CYCLES);
writel(APPLE_PWM_CTRL_ENABLE | APPLE_PWM_CTRL_OUTPUT_ENABLE | APPLE_PWM_CTRL_UPDATE,
fpwm->base + APPLE_PWM_CTRL);
} else {
writel(0, fpwm->base + APPLE_PWM_CTRL);
}
return 0;
}
static int apple_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
struct pwm_state *state)
{
struct apple_pwm *fpwm;
u32 on_cycles, off_cycles, ctrl;
fpwm = to_apple_pwm(chip);
ctrl = readl(fpwm->base + APPLE_PWM_CTRL);
on_cycles = readl(fpwm->base + APPLE_PWM_ON_CYCLES);
off_cycles = readl(fpwm->base + APPLE_PWM_OFF_CYCLES);
state->enabled = (ctrl & APPLE_PWM_CTRL_ENABLE) && (ctrl & APPLE_PWM_CTRL_OUTPUT_ENABLE);
state->polarity = PWM_POLARITY_NORMAL;
// on_cycles + off_cycles is 33 bits, NSEC_PER_SEC is 30, there is no overflow
state->duty_cycle = DIV64_U64_ROUND_UP((u64)on_cycles * NSEC_PER_SEC, fpwm->clkrate);
state->period = DIV64_U64_ROUND_UP(((u64)off_cycles + (u64)on_cycles) *
NSEC_PER_SEC, fpwm->clkrate);
return 0;
}
static const struct pwm_ops apple_pwm_ops = {
.apply = apple_pwm_apply,
.get_state = apple_pwm_get_state,
.owner = THIS_MODULE,
};
static int apple_pwm_probe(struct platform_device *pdev)
{
struct apple_pwm *fpwm;
struct clk *clk;
int ret;
fpwm = devm_kzalloc(&pdev->dev, sizeof(*fpwm), GFP_KERNEL);
if (!fpwm)
return -ENOMEM;
fpwm->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(fpwm->base))
return PTR_ERR(fpwm->base);
clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(clk))
return dev_err_probe(&pdev->dev, PTR_ERR(clk), "unable to get the clock");
/*
* Uses the 24MHz system clock on all existing devices, can only
* happen if the device tree is broken
*
* This check is done to prevent an overflow in .apply
*/
fpwm->clkrate = clk_get_rate(clk);
if (fpwm->clkrate > NSEC_PER_SEC)
return dev_err_probe(&pdev->dev, -EINVAL, "pwm clock out of range");
fpwm->chip.dev = &pdev->dev;
fpwm->chip.npwm = 1;
fpwm->chip.ops = &apple_pwm_ops;
ret = devm_pwmchip_add(&pdev->dev, &fpwm->chip);
if (ret < 0)
return dev_err_probe(&pdev->dev, ret, "unable to add pwm chip");
return 0;
}
static const struct of_device_id apple_pwm_of_match[] = {
{ .compatible = "apple,s5l-fpwm" },
{}
};
MODULE_DEVICE_TABLE(of, apple_pwm_of_match);
static struct platform_driver apple_pwm_driver = {
.probe = apple_pwm_probe,
.driver = {
.name = "apple-pwm",
.of_match_table = apple_pwm_of_match,
},
};
module_platform_driver(apple_pwm_driver);
MODULE_DESCRIPTION("Apple SoC PWM driver");
MODULE_LICENSE("Dual MIT/GPL");
|
linux-master
|
drivers/pwm/pwm-apple.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* sl28cpld PWM driver
*
* Copyright (c) 2020 Michael Walle <[email protected]>
*
* There is no public datasheet available for this PWM core. But it is easy
* enough to be briefly explained. It consists of one 8-bit counter. The PWM
* supports four distinct frequencies by selecting when to reset the counter.
* With the prescaler setting you can select which bit of the counter is used
* to reset it. This implies that the higher the frequency the less remaining
* bits are available for the actual counter.
*
* Let cnt[7:0] be the counter, clocked at 32kHz:
* +-----------+--------+--------------+-----------+---------------+
* | prescaler | reset | counter bits | frequency | period length |
* +-----------+--------+--------------+-----------+---------------+
* | 0 | cnt[7] | cnt[6:0] | 250 Hz | 4000000 ns |
* | 1 | cnt[6] | cnt[5:0] | 500 Hz | 2000000 ns |
* | 2 | cnt[5] | cnt[4:0] | 1 kHz | 1000000 ns |
* | 3 | cnt[4] | cnt[3:0] | 2 kHz | 500000 ns |
* +-----------+--------+--------------+-----------+---------------+
*
* Limitations:
* - The hardware cannot generate a 100% duty cycle if the prescaler is 0.
* - The hardware cannot atomically set the prescaler and the counter value,
* which might lead to glitches and inconsistent states if a write fails.
* - The counter is not reset if you switch the prescaler which leads
* to glitches, too.
* - The duty cycle will switch immediately and not after a complete cycle.
* - Depending on the actual implementation, disabling the PWM might have
* side effects. For example, if the output pin is shared with a GPIO pin
* it will automatically switch back to GPIO mode.
*/
#include <linux/bitfield.h>
#include <linux/kernel.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/pwm.h>
#include <linux/regmap.h>
/*
* PWM timer block registers.
*/
#define SL28CPLD_PWM_CTRL 0x00
#define SL28CPLD_PWM_CTRL_ENABLE BIT(7)
#define SL28CPLD_PWM_CTRL_PRESCALER_MASK GENMASK(1, 0)
#define SL28CPLD_PWM_CYCLE 0x01
#define SL28CPLD_PWM_CYCLE_MAX GENMASK(6, 0)
#define SL28CPLD_PWM_CLK 32000 /* 32 kHz */
#define SL28CPLD_PWM_MAX_DUTY_CYCLE(prescaler) (1 << (7 - (prescaler)))
#define SL28CPLD_PWM_PERIOD(prescaler) \
(NSEC_PER_SEC / SL28CPLD_PWM_CLK * SL28CPLD_PWM_MAX_DUTY_CYCLE(prescaler))
/*
* We calculate the duty cycle like this:
* duty_cycle_ns = pwm_cycle_reg * max_period_ns / max_duty_cycle
*
* With
* max_period_ns = 1 << (7 - prescaler) / SL28CPLD_PWM_CLK * NSEC_PER_SEC
* max_duty_cycle = 1 << (7 - prescaler)
* this then simplifies to:
* duty_cycle_ns = pwm_cycle_reg / SL28CPLD_PWM_CLK * NSEC_PER_SEC
* = NSEC_PER_SEC / SL28CPLD_PWM_CLK * pwm_cycle_reg
*
* NSEC_PER_SEC is a multiple of SL28CPLD_PWM_CLK, therefore we're not losing
* precision by doing the divison first.
*/
#define SL28CPLD_PWM_TO_DUTY_CYCLE(reg) \
(NSEC_PER_SEC / SL28CPLD_PWM_CLK * (reg))
#define SL28CPLD_PWM_FROM_DUTY_CYCLE(duty_cycle) \
(DIV_ROUND_DOWN_ULL((duty_cycle), NSEC_PER_SEC / SL28CPLD_PWM_CLK))
#define sl28cpld_pwm_read(priv, reg, val) \
regmap_read((priv)->regmap, (priv)->offset + (reg), (val))
#define sl28cpld_pwm_write(priv, reg, val) \
regmap_write((priv)->regmap, (priv)->offset + (reg), (val))
struct sl28cpld_pwm {
struct pwm_chip chip;
struct regmap *regmap;
u32 offset;
};
static inline struct sl28cpld_pwm *sl28cpld_pwm_from_chip(struct pwm_chip *chip)
{
return container_of(chip, struct sl28cpld_pwm, chip);
}
static int sl28cpld_pwm_get_state(struct pwm_chip *chip,
struct pwm_device *pwm,
struct pwm_state *state)
{
struct sl28cpld_pwm *priv = sl28cpld_pwm_from_chip(chip);
unsigned int reg;
int prescaler;
sl28cpld_pwm_read(priv, SL28CPLD_PWM_CTRL, ®);
state->enabled = reg & SL28CPLD_PWM_CTRL_ENABLE;
prescaler = FIELD_GET(SL28CPLD_PWM_CTRL_PRESCALER_MASK, reg);
state->period = SL28CPLD_PWM_PERIOD(prescaler);
sl28cpld_pwm_read(priv, SL28CPLD_PWM_CYCLE, ®);
state->duty_cycle = SL28CPLD_PWM_TO_DUTY_CYCLE(reg);
state->polarity = PWM_POLARITY_NORMAL;
/*
* Sanitize values for the PWM core. Depending on the prescaler it
* might happen that we calculate a duty_cycle greater than the actual
* period. This might happen if someone (e.g. the bootloader) sets an
* invalid combination of values. The behavior of the hardware is
* undefined in this case. But we need to report sane values back to
* the PWM core.
*/
state->duty_cycle = min(state->duty_cycle, state->period);
return 0;
}
static int sl28cpld_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
const struct pwm_state *state)
{
struct sl28cpld_pwm *priv = sl28cpld_pwm_from_chip(chip);
unsigned int cycle, prescaler;
bool write_duty_cycle_first;
int ret;
u8 ctrl;
/* Polarity inversion is not supported */
if (state->polarity != PWM_POLARITY_NORMAL)
return -EINVAL;
/*
* Calculate the prescaler. Pick the biggest period that isn't
* bigger than the requested period.
*/
prescaler = DIV_ROUND_UP_ULL(SL28CPLD_PWM_PERIOD(0), state->period);
prescaler = order_base_2(prescaler);
if (prescaler > field_max(SL28CPLD_PWM_CTRL_PRESCALER_MASK))
return -ERANGE;
ctrl = FIELD_PREP(SL28CPLD_PWM_CTRL_PRESCALER_MASK, prescaler);
if (state->enabled)
ctrl |= SL28CPLD_PWM_CTRL_ENABLE;
cycle = SL28CPLD_PWM_FROM_DUTY_CYCLE(state->duty_cycle);
cycle = min_t(unsigned int, cycle, SL28CPLD_PWM_MAX_DUTY_CYCLE(prescaler));
/*
* Work around the hardware limitation. See also above. Trap 100% duty
* cycle if the prescaler is 0. Set prescaler to 1 instead. We don't
* care about the frequency because its "all-one" in either case.
*
* We don't need to check the actual prescaler setting, because only
* if the prescaler is 0 we can have this particular value.
*/
if (cycle == SL28CPLD_PWM_MAX_DUTY_CYCLE(0)) {
ctrl &= ~SL28CPLD_PWM_CTRL_PRESCALER_MASK;
ctrl |= FIELD_PREP(SL28CPLD_PWM_CTRL_PRESCALER_MASK, 1);
cycle = SL28CPLD_PWM_MAX_DUTY_CYCLE(1);
}
/*
* To avoid glitches when we switch the prescaler, we have to make sure
* we have a valid duty cycle for the new mode.
*
* Take the current prescaler (or the current period length) into
* account to decide whether we have to write the duty cycle or the new
* prescaler first. If the period length is decreasing we have to
* write the duty cycle first.
*/
write_duty_cycle_first = pwm->state.period > state->period;
if (write_duty_cycle_first) {
ret = sl28cpld_pwm_write(priv, SL28CPLD_PWM_CYCLE, cycle);
if (ret)
return ret;
}
ret = sl28cpld_pwm_write(priv, SL28CPLD_PWM_CTRL, ctrl);
if (ret)
return ret;
if (!write_duty_cycle_first) {
ret = sl28cpld_pwm_write(priv, SL28CPLD_PWM_CYCLE, cycle);
if (ret)
return ret;
}
return 0;
}
static const struct pwm_ops sl28cpld_pwm_ops = {
.apply = sl28cpld_pwm_apply,
.get_state = sl28cpld_pwm_get_state,
.owner = THIS_MODULE,
};
static int sl28cpld_pwm_probe(struct platform_device *pdev)
{
struct sl28cpld_pwm *priv;
struct pwm_chip *chip;
int ret;
if (!pdev->dev.parent) {
dev_err(&pdev->dev, "no parent device\n");
return -ENODEV;
}
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->regmap = dev_get_regmap(pdev->dev.parent, NULL);
if (!priv->regmap) {
dev_err(&pdev->dev, "could not get parent regmap\n");
return -ENODEV;
}
ret = device_property_read_u32(&pdev->dev, "reg", &priv->offset);
if (ret) {
dev_err(&pdev->dev, "no 'reg' property found (%pe)\n",
ERR_PTR(ret));
return -EINVAL;
}
/* Initialize the pwm_chip structure */
chip = &priv->chip;
chip->dev = &pdev->dev;
chip->ops = &sl28cpld_pwm_ops;
chip->npwm = 1;
ret = devm_pwmchip_add(&pdev->dev, chip);
if (ret) {
dev_err(&pdev->dev, "failed to add PWM chip (%pe)",
ERR_PTR(ret));
return ret;
}
return 0;
}
static const struct of_device_id sl28cpld_pwm_of_match[] = {
{ .compatible = "kontron,sl28cpld-pwm" },
{}
};
MODULE_DEVICE_TABLE(of, sl28cpld_pwm_of_match);
static struct platform_driver sl28cpld_pwm_driver = {
.probe = sl28cpld_pwm_probe,
.driver = {
.name = "sl28cpld-pwm",
.of_match_table = sl28cpld_pwm_of_match,
},
};
module_platform_driver(sl28cpld_pwm_driver);
MODULE_DESCRIPTION("sl28cpld PWM Driver");
MODULE_AUTHOR("Michael Walle <[email protected]>");
MODULE_LICENSE("GPL");
|
linux-master
|
drivers/pwm/pwm-sl28cpld.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Driver for TWL4030/6030 Pulse Width Modulator used as LED driver
*
* Copyright (C) 2012 Texas Instruments
* Author: Peter Ujfalusi <[email protected]>
*
* This driver is a complete rewrite of the former pwm-twl6030.c authorded by:
* Hemanth V <[email protected]>
*
* Reference manual for the twl6030 is available at:
* https://www.ti.com/lit/ds/symlink/twl6030.pdf
*
* Limitations:
* - The twl6030 hardware only supports two period lengths (128 clock ticks and
* 64 clock ticks), the driver only uses 128 ticks
* - The hardware doesn't support ON = 0, so the active part of a period doesn't
* start at its beginning.
* - The hardware could support inverted polarity (with a similar limitation as
* for normal: the last clock tick is always inactive).
* - The hardware emits a constant low output when disabled.
* - A request for .duty_cycle = 0 results in an output wave with one active
* clock tick per period. This should better use the disabled state.
* - The driver only implements setting the relative duty cycle.
* - The driver doesn't implement .get_state().
*/
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pwm.h>
#include <linux/mfd/twl.h>
#include <linux/slab.h>
/*
* This driver handles the PWM driven LED terminals of TWL4030 and TWL6030.
* To generate the signal on TWL4030:
* - LEDA uses PWMA
* - LEDB uses PWMB
* TWL6030 has one LED pin with dedicated LEDPWM
*/
#define TWL4030_LED_MAX 0x7f
#define TWL6030_LED_MAX 0xff
/* Registers, bits and macro for TWL4030 */
#define TWL4030_LEDEN_REG 0x00
#define TWL4030_PWMA_REG 0x01
#define TWL4030_LEDXON (1 << 0)
#define TWL4030_LEDXPWM (1 << 4)
#define TWL4030_LED_PINS (TWL4030_LEDXON | TWL4030_LEDXPWM)
#define TWL4030_LED_TOGGLE(led, x) ((x) << (led))
/* Register, bits and macro for TWL6030 */
#define TWL6030_LED_PWM_CTRL1 0xf4
#define TWL6030_LED_PWM_CTRL2 0xf5
#define TWL6040_LED_MODE_HW 0x00
#define TWL6040_LED_MODE_ON 0x01
#define TWL6040_LED_MODE_OFF 0x02
#define TWL6040_LED_MODE_MASK 0x03
struct twl_pwmled_chip {
struct pwm_chip chip;
struct mutex mutex;
};
static inline struct twl_pwmled_chip *to_twl(struct pwm_chip *chip)
{
return container_of(chip, struct twl_pwmled_chip, chip);
}
static int twl4030_pwmled_config(struct pwm_chip *chip, struct pwm_device *pwm,
int duty_ns, int period_ns)
{
int duty_cycle = DIV_ROUND_UP(duty_ns * TWL4030_LED_MAX, period_ns) + 1;
u8 pwm_config[2] = { 1, 0 };
int base, ret;
/*
* To configure the duty period:
* On-cycle is set to 1 (the minimum allowed value)
* The off time of 0 is not configurable, so the mapping is:
* 0 -> off cycle = 2,
* 1 -> off cycle = 2,
* 2 -> off cycle = 3,
* 126 - > off cycle 127,
* 127 - > off cycle 1
* When on cycle == off cycle the PWM will be always on
*/
if (duty_cycle == 1)
duty_cycle = 2;
else if (duty_cycle > TWL4030_LED_MAX)
duty_cycle = 1;
base = pwm->hwpwm * 2 + TWL4030_PWMA_REG;
pwm_config[1] = duty_cycle;
ret = twl_i2c_write(TWL4030_MODULE_LED, pwm_config, base, 2);
if (ret < 0)
dev_err(chip->dev, "%s: Failed to configure PWM\n", pwm->label);
return ret;
}
static int twl4030_pwmled_enable(struct pwm_chip *chip, struct pwm_device *pwm)
{
struct twl_pwmled_chip *twl = to_twl(chip);
int ret;
u8 val;
mutex_lock(&twl->mutex);
ret = twl_i2c_read_u8(TWL4030_MODULE_LED, &val, TWL4030_LEDEN_REG);
if (ret < 0) {
dev_err(chip->dev, "%s: Failed to read LEDEN\n", pwm->label);
goto out;
}
val |= TWL4030_LED_TOGGLE(pwm->hwpwm, TWL4030_LED_PINS);
ret = twl_i2c_write_u8(TWL4030_MODULE_LED, val, TWL4030_LEDEN_REG);
if (ret < 0)
dev_err(chip->dev, "%s: Failed to enable PWM\n", pwm->label);
out:
mutex_unlock(&twl->mutex);
return ret;
}
static void twl4030_pwmled_disable(struct pwm_chip *chip,
struct pwm_device *pwm)
{
struct twl_pwmled_chip *twl = to_twl(chip);
int ret;
u8 val;
mutex_lock(&twl->mutex);
ret = twl_i2c_read_u8(TWL4030_MODULE_LED, &val, TWL4030_LEDEN_REG);
if (ret < 0) {
dev_err(chip->dev, "%s: Failed to read LEDEN\n", pwm->label);
goto out;
}
val &= ~TWL4030_LED_TOGGLE(pwm->hwpwm, TWL4030_LED_PINS);
ret = twl_i2c_write_u8(TWL4030_MODULE_LED, val, TWL4030_LEDEN_REG);
if (ret < 0)
dev_err(chip->dev, "%s: Failed to disable PWM\n", pwm->label);
out:
mutex_unlock(&twl->mutex);
}
static int twl4030_pwmled_apply(struct pwm_chip *chip, struct pwm_device *pwm,
const struct pwm_state *state)
{
int ret;
if (state->polarity != PWM_POLARITY_NORMAL)
return -EINVAL;
if (!state->enabled) {
if (pwm->state.enabled)
twl4030_pwmled_disable(chip, pwm);
return 0;
}
/*
* We cannot skip calling ->config even if state->period ==
* pwm->state.period && state->duty_cycle == pwm->state.duty_cycle
* because we might have exited early in the last call to
* pwm_apply_state because of !state->enabled and so the two values in
* pwm->state might not be configured in hardware.
*/
ret = twl4030_pwmled_config(pwm->chip, pwm,
state->duty_cycle, state->period);
if (ret)
return ret;
if (!pwm->state.enabled)
ret = twl4030_pwmled_enable(chip, pwm);
return ret;
}
static const struct pwm_ops twl4030_pwmled_ops = {
.apply = twl4030_pwmled_apply,
.owner = THIS_MODULE,
};
static int twl6030_pwmled_config(struct pwm_chip *chip, struct pwm_device *pwm,
int duty_ns, int period_ns)
{
int duty_cycle = (duty_ns * TWL6030_LED_MAX) / period_ns;
u8 on_time;
int ret;
on_time = duty_cycle & 0xff;
ret = twl_i2c_write_u8(TWL6030_MODULE_ID1, on_time,
TWL6030_LED_PWM_CTRL1);
if (ret < 0)
dev_err(chip->dev, "%s: Failed to configure PWM\n", pwm->label);
return ret;
}
static int twl6030_pwmled_enable(struct pwm_chip *chip, struct pwm_device *pwm)
{
struct twl_pwmled_chip *twl = to_twl(chip);
int ret;
u8 val;
mutex_lock(&twl->mutex);
ret = twl_i2c_read_u8(TWL6030_MODULE_ID1, &val, TWL6030_LED_PWM_CTRL2);
if (ret < 0) {
dev_err(chip->dev, "%s: Failed to read PWM_CTRL2\n",
pwm->label);
goto out;
}
val &= ~TWL6040_LED_MODE_MASK;
val |= TWL6040_LED_MODE_ON;
ret = twl_i2c_write_u8(TWL6030_MODULE_ID1, val, TWL6030_LED_PWM_CTRL2);
if (ret < 0)
dev_err(chip->dev, "%s: Failed to enable PWM\n", pwm->label);
out:
mutex_unlock(&twl->mutex);
return ret;
}
static void twl6030_pwmled_disable(struct pwm_chip *chip,
struct pwm_device *pwm)
{
struct twl_pwmled_chip *twl = to_twl(chip);
int ret;
u8 val;
mutex_lock(&twl->mutex);
ret = twl_i2c_read_u8(TWL6030_MODULE_ID1, &val, TWL6030_LED_PWM_CTRL2);
if (ret < 0) {
dev_err(chip->dev, "%s: Failed to read PWM_CTRL2\n",
pwm->label);
goto out;
}
val &= ~TWL6040_LED_MODE_MASK;
val |= TWL6040_LED_MODE_OFF;
ret = twl_i2c_write_u8(TWL6030_MODULE_ID1, val, TWL6030_LED_PWM_CTRL2);
if (ret < 0)
dev_err(chip->dev, "%s: Failed to disable PWM\n", pwm->label);
out:
mutex_unlock(&twl->mutex);
}
static int twl6030_pwmled_apply(struct pwm_chip *chip, struct pwm_device *pwm,
const struct pwm_state *state)
{
int err;
if (state->polarity != pwm->state.polarity)
return -EINVAL;
if (!state->enabled) {
if (pwm->state.enabled)
twl6030_pwmled_disable(chip, pwm);
return 0;
}
err = twl6030_pwmled_config(pwm->chip, pwm,
state->duty_cycle, state->period);
if (err)
return err;
if (!pwm->state.enabled)
err = twl6030_pwmled_enable(chip, pwm);
return err;
}
static int twl6030_pwmled_request(struct pwm_chip *chip, struct pwm_device *pwm)
{
struct twl_pwmled_chip *twl = to_twl(chip);
int ret;
u8 val;
mutex_lock(&twl->mutex);
ret = twl_i2c_read_u8(TWL6030_MODULE_ID1, &val, TWL6030_LED_PWM_CTRL2);
if (ret < 0) {
dev_err(chip->dev, "%s: Failed to read PWM_CTRL2\n",
pwm->label);
goto out;
}
val &= ~TWL6040_LED_MODE_MASK;
val |= TWL6040_LED_MODE_OFF;
ret = twl_i2c_write_u8(TWL6030_MODULE_ID1, val, TWL6030_LED_PWM_CTRL2);
if (ret < 0)
dev_err(chip->dev, "%s: Failed to request PWM\n", pwm->label);
out:
mutex_unlock(&twl->mutex);
return ret;
}
static void twl6030_pwmled_free(struct pwm_chip *chip, struct pwm_device *pwm)
{
struct twl_pwmled_chip *twl = to_twl(chip);
int ret;
u8 val;
mutex_lock(&twl->mutex);
ret = twl_i2c_read_u8(TWL6030_MODULE_ID1, &val, TWL6030_LED_PWM_CTRL2);
if (ret < 0) {
dev_err(chip->dev, "%s: Failed to read PWM_CTRL2\n",
pwm->label);
goto out;
}
val &= ~TWL6040_LED_MODE_MASK;
val |= TWL6040_LED_MODE_HW;
ret = twl_i2c_write_u8(TWL6030_MODULE_ID1, val, TWL6030_LED_PWM_CTRL2);
if (ret < 0)
dev_err(chip->dev, "%s: Failed to free PWM\n", pwm->label);
out:
mutex_unlock(&twl->mutex);
}
static const struct pwm_ops twl6030_pwmled_ops = {
.apply = twl6030_pwmled_apply,
.request = twl6030_pwmled_request,
.free = twl6030_pwmled_free,
.owner = THIS_MODULE,
};
static int twl_pwmled_probe(struct platform_device *pdev)
{
struct twl_pwmled_chip *twl;
twl = devm_kzalloc(&pdev->dev, sizeof(*twl), GFP_KERNEL);
if (!twl)
return -ENOMEM;
if (twl_class_is_4030()) {
twl->chip.ops = &twl4030_pwmled_ops;
twl->chip.npwm = 2;
} else {
twl->chip.ops = &twl6030_pwmled_ops;
twl->chip.npwm = 1;
}
twl->chip.dev = &pdev->dev;
mutex_init(&twl->mutex);
return devm_pwmchip_add(&pdev->dev, &twl->chip);
}
#ifdef CONFIG_OF
static const struct of_device_id twl_pwmled_of_match[] = {
{ .compatible = "ti,twl4030-pwmled" },
{ .compatible = "ti,twl6030-pwmled" },
{ },
};
MODULE_DEVICE_TABLE(of, twl_pwmled_of_match);
#endif
static struct platform_driver twl_pwmled_driver = {
.driver = {
.name = "twl-pwmled",
.of_match_table = of_match_ptr(twl_pwmled_of_match),
},
.probe = twl_pwmled_probe,
};
module_platform_driver(twl_pwmled_driver);
MODULE_AUTHOR("Peter Ujfalusi <[email protected]>");
MODULE_DESCRIPTION("PWM driver for TWL4030 and TWL6030 LED outputs");
MODULE_ALIAS("platform:twl-pwmled");
MODULE_LICENSE("GPL");
|
linux-master
|
drivers/pwm/pwm-twl-led.c
|
// SPDX-License-Identifier: GPL-2.0
/*
* corePWM driver for Microchip "soft" FPGA IP cores.
*
* Copyright (c) 2021-2023 Microchip Corporation. All rights reserved.
* Author: Conor Dooley <[email protected]>
* Documentation:
* https://www.microsemi.com/document-portal/doc_download/1245275-corepwm-hb
*
* Limitations:
* - If the IP block is configured without "shadow registers", all register
* writes will take effect immediately, causing glitches on the output.
* If shadow registers *are* enabled, setting the "SYNC_UPDATE" register
* notifies the core that it needs to update the registers defining the
* waveform from the contents of the "shadow registers". Otherwise, changes
* will take effective immediately, even for those channels.
* As setting the period/duty cycle takes 4 register writes, there is a window
* in which this races against the start of a new period.
* - The IP block has no concept of a duty cycle, only rising/falling edges of
* the waveform. Unfortunately, if the rising & falling edges registers have
* the same value written to them the IP block will do whichever of a rising
* or a falling edge is possible. I.E. a 50% waveform at twice the requested
* period. Therefore to get a 0% waveform, the output is set the max high/low
* time depending on polarity.
* If the duty cycle is 0%, and the requested period is less than the
* available period resolution, this will manifest as a ~100% waveform (with
* some output glitches) rather than 50%.
* - The PWM period is set for the whole IP block not per channel. The driver
* will only change the period if no other PWM output is enabled.
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/ktime.h>
#include <linux/math.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pwm.h>
#define MCHPCOREPWM_PRESCALE_MAX 0xff
#define MCHPCOREPWM_PERIOD_STEPS_MAX 0xfe
#define MCHPCOREPWM_PERIOD_MAX 0xff00
#define MCHPCOREPWM_PRESCALE 0x00
#define MCHPCOREPWM_PERIOD 0x04
#define MCHPCOREPWM_EN(i) (0x08 + 0x04 * (i)) /* 0x08, 0x0c */
#define MCHPCOREPWM_POSEDGE(i) (0x10 + 0x08 * (i)) /* 0x10, 0x18, ..., 0x88 */
#define MCHPCOREPWM_NEGEDGE(i) (0x14 + 0x08 * (i)) /* 0x14, 0x1c, ..., 0x8c */
#define MCHPCOREPWM_SYNC_UPD 0xe4
#define MCHPCOREPWM_TIMEOUT_MS 100u
struct mchp_core_pwm_chip {
struct pwm_chip chip;
struct clk *clk;
void __iomem *base;
struct mutex lock; /* protects the shared period */
ktime_t update_timestamp;
u32 sync_update_mask;
u16 channel_enabled;
};
static inline struct mchp_core_pwm_chip *to_mchp_core_pwm(struct pwm_chip *chip)
{
return container_of(chip, struct mchp_core_pwm_chip, chip);
}
static void mchp_core_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm,
bool enable, u64 period)
{
struct mchp_core_pwm_chip *mchp_core_pwm = to_mchp_core_pwm(chip);
u8 channel_enable, reg_offset, shift;
/*
* There are two adjacent 8 bit control regs, the lower reg controls
* 0-7 and the upper reg 8-15. Check if the pwm is in the upper reg
* and if so, offset by the bus width.
*/
reg_offset = MCHPCOREPWM_EN(pwm->hwpwm >> 3);
shift = pwm->hwpwm & 7;
channel_enable = readb_relaxed(mchp_core_pwm->base + reg_offset);
channel_enable &= ~(1 << shift);
channel_enable |= (enable << shift);
writel_relaxed(channel_enable, mchp_core_pwm->base + reg_offset);
mchp_core_pwm->channel_enabled &= ~BIT(pwm->hwpwm);
mchp_core_pwm->channel_enabled |= enable << pwm->hwpwm;
/*
* The updated values will not appear on the bus until they have been
* applied to the waveform at the beginning of the next period.
* This is a NO-OP if the channel does not have shadow registers.
*/
if (mchp_core_pwm->sync_update_mask & (1 << pwm->hwpwm))
mchp_core_pwm->update_timestamp = ktime_add_ns(ktime_get(), period);
}
static void mchp_core_pwm_wait_for_sync_update(struct mchp_core_pwm_chip *mchp_core_pwm,
unsigned int channel)
{
/*
* If a shadow register is used for this PWM channel, and iff there is
* a pending update to the waveform, we must wait for it to be applied
* before attempting to read its state. Reading the registers yields
* the currently implemented settings & the new ones are only readable
* once the current period has ended.
*/
if (mchp_core_pwm->sync_update_mask & (1 << channel)) {
ktime_t current_time = ktime_get();
s64 remaining_ns;
u32 delay_us;
remaining_ns = ktime_to_ns(ktime_sub(mchp_core_pwm->update_timestamp,
current_time));
/*
* If the update has gone through, don't bother waiting for
* obvious reasons. Otherwise wait around for an appropriate
* amount of time for the update to go through.
*/
if (remaining_ns <= 0)
return;
delay_us = DIV_ROUND_UP_ULL(remaining_ns, NSEC_PER_USEC);
fsleep(delay_us);
}
}
static u64 mchp_core_pwm_calc_duty(const struct pwm_state *state, u64 clk_rate,
u8 prescale, u8 period_steps)
{
u64 duty_steps, tmp;
/*
* Calculate the duty cycle in multiples of the prescaled period:
* duty_steps = duty_in_ns / step_in_ns
* step_in_ns = (prescale * NSEC_PER_SEC) / clk_rate
* The code below is rearranged slightly to only divide once.
*/
tmp = (((u64)prescale) + 1) * NSEC_PER_SEC;
duty_steps = mul_u64_u64_div_u64(state->duty_cycle, clk_rate, tmp);
return duty_steps;
}
static void mchp_core_pwm_apply_duty(struct pwm_chip *chip, struct pwm_device *pwm,
const struct pwm_state *state, u64 duty_steps,
u16 period_steps)
{
struct mchp_core_pwm_chip *mchp_core_pwm = to_mchp_core_pwm(chip);
u8 posedge, negedge;
u8 first_edge = 0, second_edge = duty_steps;
/*
* Setting posedge == negedge doesn't yield a constant output,
* so that's an unsuitable setting to model duty_steps = 0.
* In that case set the unwanted edge to a value that never
* triggers.
*/
if (duty_steps == 0)
first_edge = period_steps + 1;
if (state->polarity == PWM_POLARITY_INVERSED) {
negedge = first_edge;
posedge = second_edge;
} else {
posedge = first_edge;
negedge = second_edge;
}
/*
* Set the sync bit which ensures that periods that already started are
* completed unaltered. At each counter reset event the values are
* updated from the shadow registers.
*/
writel_relaxed(posedge, mchp_core_pwm->base + MCHPCOREPWM_POSEDGE(pwm->hwpwm));
writel_relaxed(negedge, mchp_core_pwm->base + MCHPCOREPWM_NEGEDGE(pwm->hwpwm));
}
static int mchp_core_pwm_calc_period(const struct pwm_state *state, unsigned long clk_rate,
u16 *prescale, u16 *period_steps)
{
u64 tmp;
/*
* Calculate the period cycles and prescale values.
* The registers are each 8 bits wide & multiplied to compute the period
* using the formula:
* (prescale + 1) * (period_steps + 1)
* period = -------------------------------------
* clk_rate
* so the maximum period that can be generated is 0x10000 times the
* period of the input clock.
* However, due to the design of the "hardware", it is not possible to
* attain a 100% duty cycle if the full range of period_steps is used.
* Therefore period_steps is restricted to 0xfe and the maximum multiple
* of the clock period attainable is (0xff + 1) * (0xfe + 1) = 0xff00
*
* The prescale and period_steps registers operate similarly to
* CLK_DIVIDER_ONE_BASED, where the value used by the hardware is that
* in the register plus one.
* It's therefore not possible to set a period lower than 1/clk_rate, so
* if tmp is 0, abort. Without aborting, we will set a period that is
* greater than that requested and, more importantly, will trigger the
* neg-/pos-edge issue described in the limitations.
*/
tmp = mul_u64_u64_div_u64(state->period, clk_rate, NSEC_PER_SEC);
if (tmp >= MCHPCOREPWM_PERIOD_MAX) {
*prescale = MCHPCOREPWM_PRESCALE_MAX;
*period_steps = MCHPCOREPWM_PERIOD_STEPS_MAX;
return 0;
}
/*
* There are multiple strategies that could be used to choose the
* prescale & period_steps values.
* Here the idea is to pick values so that the selection of duty cycles
* is as finegrain as possible, while also keeping the period less than
* that requested.
*
* A simple way to satisfy the first condition is to always set
* period_steps to its maximum value. This neatly also satisfies the
* second condition too, since using the maximum value of period_steps
* to calculate prescale actually calculates its upper bound.
* Integer division will ensure a round down, so the period will thereby
* always be less than that requested.
*
* The downside of this approach is a significant degree of inaccuracy,
* especially as tmp approaches integer multiples of
* MCHPCOREPWM_PERIOD_STEPS_MAX.
*
* As we must produce a period less than that requested, and for the
* sake of creating a simple algorithm, disallow small values of tmp
* that would need special handling.
*/
if (tmp < MCHPCOREPWM_PERIOD_STEPS_MAX + 1)
return -EINVAL;
/*
* This "optimal" value for prescale is be calculated using the maximum
* permitted value of period_steps, 0xfe.
*
* period * clk_rate
* prescale = ------------------------- - 1
* NSEC_PER_SEC * (0xfe + 1)
*
*
* period * clk_rate
* ------------------- was precomputed as `tmp`
* NSEC_PER_SEC
*/
*prescale = ((u16)tmp) / (MCHPCOREPWM_PERIOD_STEPS_MAX + 1) - 1;
/*
* period_steps can be computed from prescale:
* period * clk_rate
* period_steps = ----------------------------- - 1
* NSEC_PER_SEC * (prescale + 1)
*
* However, in this approximation, we simply use the maximum value that
* was used to compute prescale.
*/
*period_steps = MCHPCOREPWM_PERIOD_STEPS_MAX;
return 0;
}
static int mchp_core_pwm_apply_locked(struct pwm_chip *chip, struct pwm_device *pwm,
const struct pwm_state *state)
{
struct mchp_core_pwm_chip *mchp_core_pwm = to_mchp_core_pwm(chip);
bool period_locked;
unsigned long clk_rate;
u64 duty_steps;
u16 prescale, period_steps;
int ret;
if (!state->enabled) {
mchp_core_pwm_enable(chip, pwm, false, pwm->state.period);
return 0;
}
/*
* If clk_rate is too big, the following multiplication might overflow.
* However this is implausible, as the fabric of current FPGAs cannot
* provide clocks at a rate high enough.
*/
clk_rate = clk_get_rate(mchp_core_pwm->clk);
if (clk_rate >= NSEC_PER_SEC)
return -EINVAL;
ret = mchp_core_pwm_calc_period(state, clk_rate, &prescale, &period_steps);
if (ret)
return ret;
/*
* If the only thing that has changed is the duty cycle or the polarity,
* we can shortcut the calculations and just compute/apply the new duty
* cycle pos & neg edges
* As all the channels share the same period, do not allow it to be
* changed if any other channels are enabled.
* If the period is locked, it may not be possible to use a period
* less than that requested. In that case, we just abort.
*/
period_locked = mchp_core_pwm->channel_enabled & ~(1 << pwm->hwpwm);
if (period_locked) {
u16 hw_prescale;
u16 hw_period_steps;
hw_prescale = readb_relaxed(mchp_core_pwm->base + MCHPCOREPWM_PRESCALE);
hw_period_steps = readb_relaxed(mchp_core_pwm->base + MCHPCOREPWM_PERIOD);
if ((period_steps + 1) * (prescale + 1) <
(hw_period_steps + 1) * (hw_prescale + 1))
return -EINVAL;
/*
* It is possible that something could have set the period_steps
* register to 0xff, which would prevent us from setting a 100%
* or 0% relative duty cycle, as explained above in
* mchp_core_pwm_calc_period().
* The period is locked and we cannot change this, so we abort.
*/
if (hw_period_steps == MCHPCOREPWM_PERIOD_STEPS_MAX)
return -EINVAL;
prescale = hw_prescale;
period_steps = hw_period_steps;
}
duty_steps = mchp_core_pwm_calc_duty(state, clk_rate, prescale, period_steps);
/*
* Because the period is not per channel, it is possible that the
* requested duty cycle is longer than the period, in which case cap it
* to the period, IOW a 100% duty cycle.
*/
if (duty_steps > period_steps)
duty_steps = period_steps + 1;
if (!period_locked) {
writel_relaxed(prescale, mchp_core_pwm->base + MCHPCOREPWM_PRESCALE);
writel_relaxed(period_steps, mchp_core_pwm->base + MCHPCOREPWM_PERIOD);
}
mchp_core_pwm_apply_duty(chip, pwm, state, duty_steps, period_steps);
mchp_core_pwm_enable(chip, pwm, true, pwm->state.period);
return 0;
}
static int mchp_core_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
const struct pwm_state *state)
{
struct mchp_core_pwm_chip *mchp_core_pwm = to_mchp_core_pwm(chip);
int ret;
mutex_lock(&mchp_core_pwm->lock);
mchp_core_pwm_wait_for_sync_update(mchp_core_pwm, pwm->hwpwm);
ret = mchp_core_pwm_apply_locked(chip, pwm, state);
mutex_unlock(&mchp_core_pwm->lock);
return ret;
}
static int mchp_core_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
struct pwm_state *state)
{
struct mchp_core_pwm_chip *mchp_core_pwm = to_mchp_core_pwm(chip);
u64 rate;
u16 prescale, period_steps;
u8 duty_steps, posedge, negedge;
mutex_lock(&mchp_core_pwm->lock);
mchp_core_pwm_wait_for_sync_update(mchp_core_pwm, pwm->hwpwm);
if (mchp_core_pwm->channel_enabled & (1 << pwm->hwpwm))
state->enabled = true;
else
state->enabled = false;
rate = clk_get_rate(mchp_core_pwm->clk);
/*
* Calculating the period:
* The registers are each 8 bits wide & multiplied to compute the period
* using the formula:
* (prescale + 1) * (period_steps + 1)
* period = -------------------------------------
* clk_rate
*
* Note:
* The prescale and period_steps registers operate similarly to
* CLK_DIVIDER_ONE_BASED, where the value used by the hardware is that
* in the register plus one.
*/
prescale = readb_relaxed(mchp_core_pwm->base + MCHPCOREPWM_PRESCALE);
period_steps = readb_relaxed(mchp_core_pwm->base + MCHPCOREPWM_PERIOD);
state->period = (period_steps + 1) * (prescale + 1);
state->period *= NSEC_PER_SEC;
state->period = DIV64_U64_ROUND_UP(state->period, rate);
posedge = readb_relaxed(mchp_core_pwm->base + MCHPCOREPWM_POSEDGE(pwm->hwpwm));
negedge = readb_relaxed(mchp_core_pwm->base + MCHPCOREPWM_NEGEDGE(pwm->hwpwm));
mutex_unlock(&mchp_core_pwm->lock);
if (negedge == posedge) {
state->duty_cycle = state->period;
state->period *= 2;
} else {
duty_steps = abs((s16)posedge - (s16)negedge);
state->duty_cycle = duty_steps * (prescale + 1) * NSEC_PER_SEC;
state->duty_cycle = DIV64_U64_ROUND_UP(state->duty_cycle, rate);
}
state->polarity = negedge < posedge ? PWM_POLARITY_INVERSED : PWM_POLARITY_NORMAL;
return 0;
}
static const struct pwm_ops mchp_core_pwm_ops = {
.apply = mchp_core_pwm_apply,
.get_state = mchp_core_pwm_get_state,
.owner = THIS_MODULE,
};
static const struct of_device_id mchp_core_of_match[] = {
{
.compatible = "microchip,corepwm-rtl-v4",
},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, mchp_core_of_match);
static int mchp_core_pwm_probe(struct platform_device *pdev)
{
struct mchp_core_pwm_chip *mchp_core_pwm;
struct resource *regs;
int ret;
mchp_core_pwm = devm_kzalloc(&pdev->dev, sizeof(*mchp_core_pwm), GFP_KERNEL);
if (!mchp_core_pwm)
return -ENOMEM;
mchp_core_pwm->base = devm_platform_get_and_ioremap_resource(pdev, 0, ®s);
if (IS_ERR(mchp_core_pwm->base))
return PTR_ERR(mchp_core_pwm->base);
mchp_core_pwm->clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(mchp_core_pwm->clk))
return dev_err_probe(&pdev->dev, PTR_ERR(mchp_core_pwm->clk),
"failed to get PWM clock\n");
if (of_property_read_u32(pdev->dev.of_node, "microchip,sync-update-mask",
&mchp_core_pwm->sync_update_mask))
mchp_core_pwm->sync_update_mask = 0;
mutex_init(&mchp_core_pwm->lock);
mchp_core_pwm->chip.dev = &pdev->dev;
mchp_core_pwm->chip.ops = &mchp_core_pwm_ops;
mchp_core_pwm->chip.npwm = 16;
mchp_core_pwm->channel_enabled = readb_relaxed(mchp_core_pwm->base + MCHPCOREPWM_EN(0));
mchp_core_pwm->channel_enabled |=
readb_relaxed(mchp_core_pwm->base + MCHPCOREPWM_EN(1)) << 8;
/*
* Enable synchronous update mode for all channels for which shadow
* registers have been synthesised.
*/
writel_relaxed(1U, mchp_core_pwm->base + MCHPCOREPWM_SYNC_UPD);
mchp_core_pwm->update_timestamp = ktime_get();
ret = devm_pwmchip_add(&pdev->dev, &mchp_core_pwm->chip);
if (ret)
return dev_err_probe(&pdev->dev, ret, "Failed to add pwmchip\n");
return 0;
}
static struct platform_driver mchp_core_pwm_driver = {
.driver = {
.name = "mchp-core-pwm",
.of_match_table = mchp_core_of_match,
},
.probe = mchp_core_pwm_probe,
};
module_platform_driver(mchp_core_pwm_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Conor Dooley <[email protected]>");
MODULE_DESCRIPTION("corePWM driver for Microchip FPGAs");
|
linux-master
|
drivers/pwm/pwm-microchip-core.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* MediaTek display pulse-width-modulation controller driver.
* Copyright (c) 2015 MediaTek Inc.
* Author: YH Huang <[email protected]>
*/
#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pwm.h>
#include <linux/slab.h>
#define DISP_PWM_EN 0x00
#define PWM_CLKDIV_SHIFT 16
#define PWM_CLKDIV_MAX 0x3ff
#define PWM_CLKDIV_MASK (PWM_CLKDIV_MAX << PWM_CLKDIV_SHIFT)
#define PWM_PERIOD_BIT_WIDTH 12
#define PWM_PERIOD_MASK ((1 << PWM_PERIOD_BIT_WIDTH) - 1)
#define PWM_HIGH_WIDTH_SHIFT 16
#define PWM_HIGH_WIDTH_MASK (0x1fff << PWM_HIGH_WIDTH_SHIFT)
struct mtk_pwm_data {
u32 enable_mask;
unsigned int con0;
u32 con0_sel;
unsigned int con1;
bool has_commit;
unsigned int commit;
unsigned int commit_mask;
unsigned int bls_debug;
u32 bls_debug_mask;
};
struct mtk_disp_pwm {
struct pwm_chip chip;
const struct mtk_pwm_data *data;
struct clk *clk_main;
struct clk *clk_mm;
void __iomem *base;
bool enabled;
};
static inline struct mtk_disp_pwm *to_mtk_disp_pwm(struct pwm_chip *chip)
{
return container_of(chip, struct mtk_disp_pwm, chip);
}
static void mtk_disp_pwm_update_bits(struct mtk_disp_pwm *mdp, u32 offset,
u32 mask, u32 data)
{
void __iomem *address = mdp->base + offset;
u32 value;
value = readl(address);
value &= ~mask;
value |= data;
writel(value, address);
}
static int mtk_disp_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
const struct pwm_state *state)
{
struct mtk_disp_pwm *mdp = to_mtk_disp_pwm(chip);
u32 clk_div, period, high_width, value;
u64 div, rate;
int err;
if (state->polarity != PWM_POLARITY_NORMAL)
return -EINVAL;
if (!state->enabled && mdp->enabled) {
mtk_disp_pwm_update_bits(mdp, DISP_PWM_EN,
mdp->data->enable_mask, 0x0);
clk_disable_unprepare(mdp->clk_mm);
clk_disable_unprepare(mdp->clk_main);
mdp->enabled = false;
return 0;
}
if (!mdp->enabled) {
err = clk_prepare_enable(mdp->clk_main);
if (err < 0) {
dev_err(chip->dev, "Can't enable mdp->clk_main: %pe\n",
ERR_PTR(err));
return err;
}
err = clk_prepare_enable(mdp->clk_mm);
if (err < 0) {
dev_err(chip->dev, "Can't enable mdp->clk_mm: %pe\n",
ERR_PTR(err));
clk_disable_unprepare(mdp->clk_main);
return err;
}
}
/*
* Find period, high_width and clk_div to suit duty_ns and period_ns.
* Calculate proper div value to keep period value in the bound.
*
* period_ns = 10^9 * (clk_div + 1) * (period + 1) / PWM_CLK_RATE
* duty_ns = 10^9 * (clk_div + 1) * high_width / PWM_CLK_RATE
*
* period = (PWM_CLK_RATE * period_ns) / (10^9 * (clk_div + 1)) - 1
* high_width = (PWM_CLK_RATE * duty_ns) / (10^9 * (clk_div + 1))
*/
rate = clk_get_rate(mdp->clk_main);
clk_div = mul_u64_u64_div_u64(state->period, rate, NSEC_PER_SEC) >>
PWM_PERIOD_BIT_WIDTH;
if (clk_div > PWM_CLKDIV_MAX) {
if (!mdp->enabled) {
clk_disable_unprepare(mdp->clk_mm);
clk_disable_unprepare(mdp->clk_main);
}
return -EINVAL;
}
div = NSEC_PER_SEC * (clk_div + 1);
period = mul_u64_u64_div_u64(state->period, rate, div);
if (period > 0)
period--;
high_width = mul_u64_u64_div_u64(state->duty_cycle, rate, div);
value = period | (high_width << PWM_HIGH_WIDTH_SHIFT);
if (mdp->data->bls_debug && !mdp->data->has_commit) {
/*
* For MT2701, disable double buffer before writing register
* and select manual mode and use PWM_PERIOD/PWM_HIGH_WIDTH.
*/
mtk_disp_pwm_update_bits(mdp, mdp->data->bls_debug,
mdp->data->bls_debug_mask,
mdp->data->bls_debug_mask);
mtk_disp_pwm_update_bits(mdp, mdp->data->con0,
mdp->data->con0_sel,
mdp->data->con0_sel);
}
mtk_disp_pwm_update_bits(mdp, mdp->data->con0,
PWM_CLKDIV_MASK,
clk_div << PWM_CLKDIV_SHIFT);
mtk_disp_pwm_update_bits(mdp, mdp->data->con1,
PWM_PERIOD_MASK | PWM_HIGH_WIDTH_MASK,
value);
if (mdp->data->has_commit) {
mtk_disp_pwm_update_bits(mdp, mdp->data->commit,
mdp->data->commit_mask,
mdp->data->commit_mask);
mtk_disp_pwm_update_bits(mdp, mdp->data->commit,
mdp->data->commit_mask,
0x0);
}
mtk_disp_pwm_update_bits(mdp, DISP_PWM_EN, mdp->data->enable_mask,
mdp->data->enable_mask);
mdp->enabled = true;
return 0;
}
static int mtk_disp_pwm_get_state(struct pwm_chip *chip,
struct pwm_device *pwm,
struct pwm_state *state)
{
struct mtk_disp_pwm *mdp = to_mtk_disp_pwm(chip);
u64 rate, period, high_width;
u32 clk_div, pwm_en, con0, con1;
int err;
err = clk_prepare_enable(mdp->clk_main);
if (err < 0) {
dev_err(chip->dev, "Can't enable mdp->clk_main: %pe\n", ERR_PTR(err));
return err;
}
err = clk_prepare_enable(mdp->clk_mm);
if (err < 0) {
dev_err(chip->dev, "Can't enable mdp->clk_mm: %pe\n", ERR_PTR(err));
clk_disable_unprepare(mdp->clk_main);
return err;
}
/*
* Apply DISP_PWM_DEBUG settings to choose whether to enable or disable
* registers double buffer and manual commit to working register before
* performing any read/write operation
*/
if (mdp->data->bls_debug)
mtk_disp_pwm_update_bits(mdp, mdp->data->bls_debug,
mdp->data->bls_debug_mask,
mdp->data->bls_debug_mask);
rate = clk_get_rate(mdp->clk_main);
con0 = readl(mdp->base + mdp->data->con0);
con1 = readl(mdp->base + mdp->data->con1);
pwm_en = readl(mdp->base + DISP_PWM_EN);
state->enabled = !!(pwm_en & mdp->data->enable_mask);
clk_div = FIELD_GET(PWM_CLKDIV_MASK, con0);
period = FIELD_GET(PWM_PERIOD_MASK, con1);
/*
* period has 12 bits, clk_div 11 and NSEC_PER_SEC has 30,
* so period * (clk_div + 1) * NSEC_PER_SEC doesn't overflow.
*/
state->period = DIV64_U64_ROUND_UP(period * (clk_div + 1) * NSEC_PER_SEC, rate);
high_width = FIELD_GET(PWM_HIGH_WIDTH_MASK, con1);
state->duty_cycle = DIV64_U64_ROUND_UP(high_width * (clk_div + 1) * NSEC_PER_SEC,
rate);
state->polarity = PWM_POLARITY_NORMAL;
clk_disable_unprepare(mdp->clk_mm);
clk_disable_unprepare(mdp->clk_main);
return 0;
}
static const struct pwm_ops mtk_disp_pwm_ops = {
.apply = mtk_disp_pwm_apply,
.get_state = mtk_disp_pwm_get_state,
.owner = THIS_MODULE,
};
static int mtk_disp_pwm_probe(struct platform_device *pdev)
{
struct mtk_disp_pwm *mdp;
int ret;
mdp = devm_kzalloc(&pdev->dev, sizeof(*mdp), GFP_KERNEL);
if (!mdp)
return -ENOMEM;
mdp->data = of_device_get_match_data(&pdev->dev);
mdp->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mdp->base))
return PTR_ERR(mdp->base);
mdp->clk_main = devm_clk_get(&pdev->dev, "main");
if (IS_ERR(mdp->clk_main))
return PTR_ERR(mdp->clk_main);
mdp->clk_mm = devm_clk_get(&pdev->dev, "mm");
if (IS_ERR(mdp->clk_mm))
return PTR_ERR(mdp->clk_mm);
mdp->chip.dev = &pdev->dev;
mdp->chip.ops = &mtk_disp_pwm_ops;
mdp->chip.npwm = 1;
ret = pwmchip_add(&mdp->chip);
if (ret < 0) {
dev_err(&pdev->dev, "pwmchip_add() failed: %pe\n", ERR_PTR(ret));
return ret;
}
platform_set_drvdata(pdev, mdp);
return 0;
}
static void mtk_disp_pwm_remove(struct platform_device *pdev)
{
struct mtk_disp_pwm *mdp = platform_get_drvdata(pdev);
pwmchip_remove(&mdp->chip);
}
static const struct mtk_pwm_data mt2701_pwm_data = {
.enable_mask = BIT(16),
.con0 = 0xa8,
.con0_sel = 0x2,
.con1 = 0xac,
.has_commit = false,
.bls_debug = 0xb0,
.bls_debug_mask = 0x3,
};
static const struct mtk_pwm_data mt8173_pwm_data = {
.enable_mask = BIT(0),
.con0 = 0x10,
.con0_sel = 0x0,
.con1 = 0x14,
.has_commit = true,
.commit = 0x8,
.commit_mask = 0x1,
};
static const struct mtk_pwm_data mt8183_pwm_data = {
.enable_mask = BIT(0),
.con0 = 0x18,
.con0_sel = 0x0,
.con1 = 0x1c,
.has_commit = false,
.bls_debug = 0x80,
.bls_debug_mask = 0x3,
};
static const struct of_device_id mtk_disp_pwm_of_match[] = {
{ .compatible = "mediatek,mt2701-disp-pwm", .data = &mt2701_pwm_data},
{ .compatible = "mediatek,mt6595-disp-pwm", .data = &mt8173_pwm_data},
{ .compatible = "mediatek,mt8173-disp-pwm", .data = &mt8173_pwm_data},
{ .compatible = "mediatek,mt8183-disp-pwm", .data = &mt8183_pwm_data},
{ }
};
MODULE_DEVICE_TABLE(of, mtk_disp_pwm_of_match);
static struct platform_driver mtk_disp_pwm_driver = {
.driver = {
.name = "mediatek-disp-pwm",
.of_match_table = mtk_disp_pwm_of_match,
},
.probe = mtk_disp_pwm_probe,
.remove_new = mtk_disp_pwm_remove,
};
module_platform_driver(mtk_disp_pwm_driver);
MODULE_AUTHOR("YH Huang <[email protected]>");
MODULE_DESCRIPTION("MediaTek SoC display PWM driver");
MODULE_LICENSE("GPL v2");
|
linux-master
|
drivers/pwm/pwm-mtk-disp.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* NXP LPC18xx State Configurable Timer - Pulse Width Modulator driver
*
* Copyright (c) 2015 Ariel D'Alessandro <[email protected]>
*
* Notes
* =====
* NXP LPC18xx provides a State Configurable Timer (SCT) which can be configured
* as a Pulse Width Modulator.
*
* SCT supports 16 outputs, 16 events and 16 registers. Each event will be
* triggered when its related register matches the SCT counter value, and it
* will set or clear a selected output.
*
* One of the events is preselected to generate the period, thus the maximum
* number of simultaneous channels is limited to 15. Notice that period is
* global to all the channels, thus PWM driver will refuse setting different
* values to it, unless there's only one channel requested.
*/
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pwm.h>
/* LPC18xx SCT registers */
#define LPC18XX_PWM_CONFIG 0x000
#define LPC18XX_PWM_CONFIG_UNIFY BIT(0)
#define LPC18XX_PWM_CONFIG_NORELOAD BIT(7)
#define LPC18XX_PWM_CTRL 0x004
#define LPC18XX_PWM_CTRL_HALT BIT(2)
#define LPC18XX_PWM_BIDIR BIT(4)
#define LPC18XX_PWM_PRE_SHIFT 5
#define LPC18XX_PWM_PRE_MASK (0xff << LPC18XX_PWM_PRE_SHIFT)
#define LPC18XX_PWM_PRE(x) (x << LPC18XX_PWM_PRE_SHIFT)
#define LPC18XX_PWM_LIMIT 0x008
#define LPC18XX_PWM_RES_BASE 0x058
#define LPC18XX_PWM_RES_SHIFT(_ch) (_ch * 2)
#define LPC18XX_PWM_RES(_ch, _action) (_action << LPC18XX_PWM_RES_SHIFT(_ch))
#define LPC18XX_PWM_RES_MASK(_ch) (0x3 << LPC18XX_PWM_RES_SHIFT(_ch))
#define LPC18XX_PWM_MATCH_BASE 0x100
#define LPC18XX_PWM_MATCH(_ch) (LPC18XX_PWM_MATCH_BASE + _ch * 4)
#define LPC18XX_PWM_MATCHREL_BASE 0x200
#define LPC18XX_PWM_MATCHREL(_ch) (LPC18XX_PWM_MATCHREL_BASE + _ch * 4)
#define LPC18XX_PWM_EVSTATEMSK_BASE 0x300
#define LPC18XX_PWM_EVSTATEMSK(_ch) (LPC18XX_PWM_EVSTATEMSK_BASE + _ch * 8)
#define LPC18XX_PWM_EVSTATEMSK_ALL 0xffffffff
#define LPC18XX_PWM_EVCTRL_BASE 0x304
#define LPC18XX_PWM_EVCTRL(_ev) (LPC18XX_PWM_EVCTRL_BASE + _ev * 8)
#define LPC18XX_PWM_EVCTRL_MATCH(_ch) _ch
#define LPC18XX_PWM_EVCTRL_COMB_SHIFT 12
#define LPC18XX_PWM_EVCTRL_COMB_MATCH (0x1 << LPC18XX_PWM_EVCTRL_COMB_SHIFT)
#define LPC18XX_PWM_OUTPUTSET_BASE 0x500
#define LPC18XX_PWM_OUTPUTSET(_ch) (LPC18XX_PWM_OUTPUTSET_BASE + _ch * 8)
#define LPC18XX_PWM_OUTPUTCL_BASE 0x504
#define LPC18XX_PWM_OUTPUTCL(_ch) (LPC18XX_PWM_OUTPUTCL_BASE + _ch * 8)
/* LPC18xx SCT unified counter */
#define LPC18XX_PWM_TIMER_MAX 0xffffffff
/* LPC18xx SCT events */
#define LPC18XX_PWM_EVENT_PERIOD 0
#define LPC18XX_PWM_EVENT_MAX 16
#define LPC18XX_NUM_PWMS 16
/* SCT conflict resolution */
enum lpc18xx_pwm_res_action {
LPC18XX_PWM_RES_NONE,
LPC18XX_PWM_RES_SET,
LPC18XX_PWM_RES_CLEAR,
LPC18XX_PWM_RES_TOGGLE,
};
struct lpc18xx_pwm_data {
unsigned int duty_event;
};
struct lpc18xx_pwm_chip {
struct device *dev;
struct pwm_chip chip;
void __iomem *base;
struct clk *pwm_clk;
unsigned long clk_rate;
unsigned int period_ns;
unsigned int min_period_ns;
u64 max_period_ns;
unsigned int period_event;
unsigned long event_map;
struct mutex res_lock;
struct mutex period_lock;
struct lpc18xx_pwm_data channeldata[LPC18XX_NUM_PWMS];
};
static inline struct lpc18xx_pwm_chip *
to_lpc18xx_pwm_chip(struct pwm_chip *chip)
{
return container_of(chip, struct lpc18xx_pwm_chip, chip);
}
static inline void lpc18xx_pwm_writel(struct lpc18xx_pwm_chip *lpc18xx_pwm,
u32 reg, u32 val)
{
writel(val, lpc18xx_pwm->base + reg);
}
static inline u32 lpc18xx_pwm_readl(struct lpc18xx_pwm_chip *lpc18xx_pwm,
u32 reg)
{
return readl(lpc18xx_pwm->base + reg);
}
static void lpc18xx_pwm_set_conflict_res(struct lpc18xx_pwm_chip *lpc18xx_pwm,
struct pwm_device *pwm,
enum lpc18xx_pwm_res_action action)
{
u32 val;
mutex_lock(&lpc18xx_pwm->res_lock);
/*
* Simultaneous set and clear may happen on an output, that is the case
* when duty_ns == period_ns. LPC18xx SCT allows to set a conflict
* resolution action to be taken in such a case.
*/
val = lpc18xx_pwm_readl(lpc18xx_pwm, LPC18XX_PWM_RES_BASE);
val &= ~LPC18XX_PWM_RES_MASK(pwm->hwpwm);
val |= LPC18XX_PWM_RES(pwm->hwpwm, action);
lpc18xx_pwm_writel(lpc18xx_pwm, LPC18XX_PWM_RES_BASE, val);
mutex_unlock(&lpc18xx_pwm->res_lock);
}
static void lpc18xx_pwm_config_period(struct pwm_chip *chip, u64 period_ns)
{
struct lpc18xx_pwm_chip *lpc18xx_pwm = to_lpc18xx_pwm_chip(chip);
u32 val;
/*
* With clk_rate < NSEC_PER_SEC this cannot overflow.
* With period_ns < max_period_ns this also fits into an u32.
* As period_ns >= min_period_ns = DIV_ROUND_UP(NSEC_PER_SEC, lpc18xx_pwm->clk_rate);
* we have val >= 1.
*/
val = mul_u64_u64_div_u64(period_ns, lpc18xx_pwm->clk_rate, NSEC_PER_SEC);
lpc18xx_pwm_writel(lpc18xx_pwm,
LPC18XX_PWM_MATCH(lpc18xx_pwm->period_event),
val - 1);
lpc18xx_pwm_writel(lpc18xx_pwm,
LPC18XX_PWM_MATCHREL(lpc18xx_pwm->period_event),
val - 1);
}
static void lpc18xx_pwm_config_duty(struct pwm_chip *chip,
struct pwm_device *pwm, u64 duty_ns)
{
struct lpc18xx_pwm_chip *lpc18xx_pwm = to_lpc18xx_pwm_chip(chip);
struct lpc18xx_pwm_data *lpc18xx_data = &lpc18xx_pwm->channeldata[pwm->hwpwm];
u32 val;
/*
* With clk_rate <= NSEC_PER_SEC this cannot overflow.
* With duty_ns <= period_ns < max_period_ns this also fits into an u32.
*/
val = mul_u64_u64_div_u64(duty_ns, lpc18xx_pwm->clk_rate, NSEC_PER_SEC);
lpc18xx_pwm_writel(lpc18xx_pwm,
LPC18XX_PWM_MATCH(lpc18xx_data->duty_event),
val);
lpc18xx_pwm_writel(lpc18xx_pwm,
LPC18XX_PWM_MATCHREL(lpc18xx_data->duty_event),
val);
}
static int lpc18xx_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
int duty_ns, int period_ns)
{
struct lpc18xx_pwm_chip *lpc18xx_pwm = to_lpc18xx_pwm_chip(chip);
int requested_events, i;
if (period_ns < lpc18xx_pwm->min_period_ns ||
period_ns > lpc18xx_pwm->max_period_ns) {
dev_err(chip->dev, "period %d not in range\n", period_ns);
return -ERANGE;
}
mutex_lock(&lpc18xx_pwm->period_lock);
requested_events = bitmap_weight(&lpc18xx_pwm->event_map,
LPC18XX_PWM_EVENT_MAX);
/*
* The PWM supports only a single period for all PWM channels.
* Once the period is set, it can only be changed if no more than one
* channel is requested at that moment.
*/
if (requested_events > 2 && lpc18xx_pwm->period_ns != period_ns &&
lpc18xx_pwm->period_ns) {
dev_err(chip->dev, "conflicting period requested for PWM %u\n",
pwm->hwpwm);
mutex_unlock(&lpc18xx_pwm->period_lock);
return -EBUSY;
}
if ((requested_events <= 2 && lpc18xx_pwm->period_ns != period_ns) ||
!lpc18xx_pwm->period_ns) {
lpc18xx_pwm->period_ns = period_ns;
for (i = 0; i < chip->npwm; i++)
pwm_set_period(&chip->pwms[i], period_ns);
lpc18xx_pwm_config_period(chip, period_ns);
}
mutex_unlock(&lpc18xx_pwm->period_lock);
lpc18xx_pwm_config_duty(chip, pwm, duty_ns);
return 0;
}
static int lpc18xx_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm, enum pwm_polarity polarity)
{
struct lpc18xx_pwm_chip *lpc18xx_pwm = to_lpc18xx_pwm_chip(chip);
struct lpc18xx_pwm_data *lpc18xx_data = &lpc18xx_pwm->channeldata[pwm->hwpwm];
enum lpc18xx_pwm_res_action res_action;
unsigned int set_event, clear_event;
lpc18xx_pwm_writel(lpc18xx_pwm,
LPC18XX_PWM_EVCTRL(lpc18xx_data->duty_event),
LPC18XX_PWM_EVCTRL_MATCH(lpc18xx_data->duty_event) |
LPC18XX_PWM_EVCTRL_COMB_MATCH);
lpc18xx_pwm_writel(lpc18xx_pwm,
LPC18XX_PWM_EVSTATEMSK(lpc18xx_data->duty_event),
LPC18XX_PWM_EVSTATEMSK_ALL);
if (polarity == PWM_POLARITY_NORMAL) {
set_event = lpc18xx_pwm->period_event;
clear_event = lpc18xx_data->duty_event;
res_action = LPC18XX_PWM_RES_SET;
} else {
set_event = lpc18xx_data->duty_event;
clear_event = lpc18xx_pwm->period_event;
res_action = LPC18XX_PWM_RES_CLEAR;
}
lpc18xx_pwm_writel(lpc18xx_pwm, LPC18XX_PWM_OUTPUTSET(pwm->hwpwm),
BIT(set_event));
lpc18xx_pwm_writel(lpc18xx_pwm, LPC18XX_PWM_OUTPUTCL(pwm->hwpwm),
BIT(clear_event));
lpc18xx_pwm_set_conflict_res(lpc18xx_pwm, pwm, res_action);
return 0;
}
static void lpc18xx_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
{
struct lpc18xx_pwm_chip *lpc18xx_pwm = to_lpc18xx_pwm_chip(chip);
struct lpc18xx_pwm_data *lpc18xx_data = &lpc18xx_pwm->channeldata[pwm->hwpwm];
lpc18xx_pwm_writel(lpc18xx_pwm,
LPC18XX_PWM_EVCTRL(lpc18xx_data->duty_event), 0);
lpc18xx_pwm_writel(lpc18xx_pwm, LPC18XX_PWM_OUTPUTSET(pwm->hwpwm), 0);
lpc18xx_pwm_writel(lpc18xx_pwm, LPC18XX_PWM_OUTPUTCL(pwm->hwpwm), 0);
}
static int lpc18xx_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
{
struct lpc18xx_pwm_chip *lpc18xx_pwm = to_lpc18xx_pwm_chip(chip);
struct lpc18xx_pwm_data *lpc18xx_data = &lpc18xx_pwm->channeldata[pwm->hwpwm];
unsigned long event;
event = find_first_zero_bit(&lpc18xx_pwm->event_map,
LPC18XX_PWM_EVENT_MAX);
if (event >= LPC18XX_PWM_EVENT_MAX) {
dev_err(lpc18xx_pwm->dev,
"maximum number of simultaneous channels reached\n");
return -EBUSY;
}
set_bit(event, &lpc18xx_pwm->event_map);
lpc18xx_data->duty_event = event;
return 0;
}
static void lpc18xx_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
{
struct lpc18xx_pwm_chip *lpc18xx_pwm = to_lpc18xx_pwm_chip(chip);
struct lpc18xx_pwm_data *lpc18xx_data = &lpc18xx_pwm->channeldata[pwm->hwpwm];
clear_bit(lpc18xx_data->duty_event, &lpc18xx_pwm->event_map);
}
static int lpc18xx_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
const struct pwm_state *state)
{
int err;
bool enabled = pwm->state.enabled;
if (state->polarity != pwm->state.polarity && pwm->state.enabled) {
lpc18xx_pwm_disable(chip, pwm);
enabled = false;
}
if (!state->enabled) {
if (enabled)
lpc18xx_pwm_disable(chip, pwm);
return 0;
}
err = lpc18xx_pwm_config(pwm->chip, pwm, state->duty_cycle, state->period);
if (err)
return err;
if (!enabled)
err = lpc18xx_pwm_enable(chip, pwm, state->polarity);
return err;
}
static const struct pwm_ops lpc18xx_pwm_ops = {
.apply = lpc18xx_pwm_apply,
.request = lpc18xx_pwm_request,
.free = lpc18xx_pwm_free,
.owner = THIS_MODULE,
};
static const struct of_device_id lpc18xx_pwm_of_match[] = {
{ .compatible = "nxp,lpc1850-sct-pwm" },
{}
};
MODULE_DEVICE_TABLE(of, lpc18xx_pwm_of_match);
static int lpc18xx_pwm_probe(struct platform_device *pdev)
{
struct lpc18xx_pwm_chip *lpc18xx_pwm;
int ret;
u64 val;
lpc18xx_pwm = devm_kzalloc(&pdev->dev, sizeof(*lpc18xx_pwm),
GFP_KERNEL);
if (!lpc18xx_pwm)
return -ENOMEM;
lpc18xx_pwm->dev = &pdev->dev;
lpc18xx_pwm->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(lpc18xx_pwm->base))
return PTR_ERR(lpc18xx_pwm->base);
lpc18xx_pwm->pwm_clk = devm_clk_get_enabled(&pdev->dev, "pwm");
if (IS_ERR(lpc18xx_pwm->pwm_clk))
return dev_err_probe(&pdev->dev, PTR_ERR(lpc18xx_pwm->pwm_clk),
"failed to get pwm clock\n");
lpc18xx_pwm->clk_rate = clk_get_rate(lpc18xx_pwm->pwm_clk);
if (!lpc18xx_pwm->clk_rate)
return dev_err_probe(&pdev->dev,
-EINVAL, "pwm clock has no frequency\n");
/*
* If clkrate is too fast, the calculations in .apply() might overflow.
*/
if (lpc18xx_pwm->clk_rate > NSEC_PER_SEC)
return dev_err_probe(&pdev->dev, -EINVAL, "pwm clock to fast\n");
mutex_init(&lpc18xx_pwm->res_lock);
mutex_init(&lpc18xx_pwm->period_lock);
lpc18xx_pwm->max_period_ns =
mul_u64_u64_div_u64(NSEC_PER_SEC, LPC18XX_PWM_TIMER_MAX, lpc18xx_pwm->clk_rate);
lpc18xx_pwm->min_period_ns = DIV_ROUND_UP(NSEC_PER_SEC,
lpc18xx_pwm->clk_rate);
lpc18xx_pwm->chip.dev = &pdev->dev;
lpc18xx_pwm->chip.ops = &lpc18xx_pwm_ops;
lpc18xx_pwm->chip.npwm = LPC18XX_NUM_PWMS;
/* SCT counter must be in unify (32 bit) mode */
lpc18xx_pwm_writel(lpc18xx_pwm, LPC18XX_PWM_CONFIG,
LPC18XX_PWM_CONFIG_UNIFY);
/*
* Everytime the timer counter reaches the period value, the related
* event will be triggered and the counter reset to 0.
*/
set_bit(LPC18XX_PWM_EVENT_PERIOD, &lpc18xx_pwm->event_map);
lpc18xx_pwm->period_event = LPC18XX_PWM_EVENT_PERIOD;
lpc18xx_pwm_writel(lpc18xx_pwm,
LPC18XX_PWM_EVSTATEMSK(lpc18xx_pwm->period_event),
LPC18XX_PWM_EVSTATEMSK_ALL);
val = LPC18XX_PWM_EVCTRL_MATCH(lpc18xx_pwm->period_event) |
LPC18XX_PWM_EVCTRL_COMB_MATCH;
lpc18xx_pwm_writel(lpc18xx_pwm,
LPC18XX_PWM_EVCTRL(lpc18xx_pwm->period_event), val);
lpc18xx_pwm_writel(lpc18xx_pwm, LPC18XX_PWM_LIMIT,
BIT(lpc18xx_pwm->period_event));
val = lpc18xx_pwm_readl(lpc18xx_pwm, LPC18XX_PWM_CTRL);
val &= ~LPC18XX_PWM_BIDIR;
val &= ~LPC18XX_PWM_CTRL_HALT;
val &= ~LPC18XX_PWM_PRE_MASK;
val |= LPC18XX_PWM_PRE(0);
lpc18xx_pwm_writel(lpc18xx_pwm, LPC18XX_PWM_CTRL, val);
ret = pwmchip_add(&lpc18xx_pwm->chip);
if (ret < 0)
return dev_err_probe(&pdev->dev, ret, "pwmchip_add failed\n");
platform_set_drvdata(pdev, lpc18xx_pwm);
return 0;
}
static void lpc18xx_pwm_remove(struct platform_device *pdev)
{
struct lpc18xx_pwm_chip *lpc18xx_pwm = platform_get_drvdata(pdev);
u32 val;
pwmchip_remove(&lpc18xx_pwm->chip);
val = lpc18xx_pwm_readl(lpc18xx_pwm, LPC18XX_PWM_CTRL);
lpc18xx_pwm_writel(lpc18xx_pwm, LPC18XX_PWM_CTRL,
val | LPC18XX_PWM_CTRL_HALT);
}
static struct platform_driver lpc18xx_pwm_driver = {
.driver = {
.name = "lpc18xx-sct-pwm",
.of_match_table = lpc18xx_pwm_of_match,
},
.probe = lpc18xx_pwm_probe,
.remove_new = lpc18xx_pwm_remove,
};
module_platform_driver(lpc18xx_pwm_driver);
MODULE_AUTHOR("Ariel D'Alessandro <[email protected]>");
MODULE_DESCRIPTION("NXP LPC18xx PWM driver");
MODULE_LICENSE("GPL v2");
|
linux-master
|
drivers/pwm/pwm-lpc18xx-sct.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Driver for PCA9685 16-channel 12-bit PWM LED controller
*
* Copyright (C) 2013 Steffen Trumtrar <[email protected]>
* Copyright (C) 2015 Clemens Gruber <[email protected]>
*
* based on the pwm-twl-led.c driver
*/
#include <linux/acpi.h>
#include <linux/gpio/driver.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/pwm.h>
#include <linux/regmap.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/pm_runtime.h>
#include <linux/bitmap.h>
/*
* Because the PCA9685 has only one prescaler per chip, only the first channel
* that is enabled is allowed to change the prescale register.
* PWM channels requested afterwards must use a period that results in the same
* prescale setting as the one set by the first requested channel.
* GPIOs do not count as enabled PWMs as they are not using the prescaler.
*/
#define PCA9685_MODE1 0x00
#define PCA9685_MODE2 0x01
#define PCA9685_SUBADDR1 0x02
#define PCA9685_SUBADDR2 0x03
#define PCA9685_SUBADDR3 0x04
#define PCA9685_ALLCALLADDR 0x05
#define PCA9685_LEDX_ON_L 0x06
#define PCA9685_LEDX_ON_H 0x07
#define PCA9685_LEDX_OFF_L 0x08
#define PCA9685_LEDX_OFF_H 0x09
#define PCA9685_ALL_LED_ON_L 0xFA
#define PCA9685_ALL_LED_ON_H 0xFB
#define PCA9685_ALL_LED_OFF_L 0xFC
#define PCA9685_ALL_LED_OFF_H 0xFD
#define PCA9685_PRESCALE 0xFE
#define PCA9685_PRESCALE_MIN 0x03 /* => max. frequency of 1526 Hz */
#define PCA9685_PRESCALE_MAX 0xFF /* => min. frequency of 24 Hz */
#define PCA9685_COUNTER_RANGE 4096
#define PCA9685_OSC_CLOCK_MHZ 25 /* Internal oscillator with 25 MHz */
#define PCA9685_NUMREGS 0xFF
#define PCA9685_MAXCHAN 0x10
#define LED_FULL BIT(4)
#define MODE1_ALLCALL BIT(0)
#define MODE1_SUB3 BIT(1)
#define MODE1_SUB2 BIT(2)
#define MODE1_SUB1 BIT(3)
#define MODE1_SLEEP BIT(4)
#define MODE2_INVRT BIT(4)
#define MODE2_OUTDRV BIT(2)
#define LED_N_ON_H(N) (PCA9685_LEDX_ON_H + (4 * (N)))
#define LED_N_ON_L(N) (PCA9685_LEDX_ON_L + (4 * (N)))
#define LED_N_OFF_H(N) (PCA9685_LEDX_OFF_H + (4 * (N)))
#define LED_N_OFF_L(N) (PCA9685_LEDX_OFF_L + (4 * (N)))
#define REG_ON_H(C) ((C) >= PCA9685_MAXCHAN ? PCA9685_ALL_LED_ON_H : LED_N_ON_H((C)))
#define REG_ON_L(C) ((C) >= PCA9685_MAXCHAN ? PCA9685_ALL_LED_ON_L : LED_N_ON_L((C)))
#define REG_OFF_H(C) ((C) >= PCA9685_MAXCHAN ? PCA9685_ALL_LED_OFF_H : LED_N_OFF_H((C)))
#define REG_OFF_L(C) ((C) >= PCA9685_MAXCHAN ? PCA9685_ALL_LED_OFF_L : LED_N_OFF_L((C)))
struct pca9685 {
struct pwm_chip chip;
struct regmap *regmap;
struct mutex lock;
DECLARE_BITMAP(pwms_enabled, PCA9685_MAXCHAN + 1);
#if IS_ENABLED(CONFIG_GPIOLIB)
struct gpio_chip gpio;
DECLARE_BITMAP(pwms_inuse, PCA9685_MAXCHAN + 1);
#endif
};
static inline struct pca9685 *to_pca(struct pwm_chip *chip)
{
return container_of(chip, struct pca9685, chip);
}
/* This function is supposed to be called with the lock mutex held */
static bool pca9685_prescaler_can_change(struct pca9685 *pca, int channel)
{
/* No PWM enabled: Change allowed */
if (bitmap_empty(pca->pwms_enabled, PCA9685_MAXCHAN + 1))
return true;
/* More than one PWM enabled: Change not allowed */
if (bitmap_weight(pca->pwms_enabled, PCA9685_MAXCHAN + 1) > 1)
return false;
/*
* Only one PWM enabled: Change allowed if the PWM about to
* be changed is the one that is already enabled
*/
return test_bit(channel, pca->pwms_enabled);
}
static int pca9685_read_reg(struct pca9685 *pca, unsigned int reg, unsigned int *val)
{
struct device *dev = pca->chip.dev;
int err;
err = regmap_read(pca->regmap, reg, val);
if (err)
dev_err(dev, "regmap_read of register 0x%x failed: %pe\n", reg, ERR_PTR(err));
return err;
}
static int pca9685_write_reg(struct pca9685 *pca, unsigned int reg, unsigned int val)
{
struct device *dev = pca->chip.dev;
int err;
err = regmap_write(pca->regmap, reg, val);
if (err)
dev_err(dev, "regmap_write to register 0x%x failed: %pe\n", reg, ERR_PTR(err));
return err;
}
/* Helper function to set the duty cycle ratio to duty/4096 (e.g. duty=2048 -> 50%) */
static void pca9685_pwm_set_duty(struct pca9685 *pca, int channel, unsigned int duty)
{
struct pwm_device *pwm = &pca->chip.pwms[channel];
unsigned int on, off;
if (duty == 0) {
/* Set the full OFF bit, which has the highest precedence */
pca9685_write_reg(pca, REG_OFF_H(channel), LED_FULL);
return;
} else if (duty >= PCA9685_COUNTER_RANGE) {
/* Set the full ON bit and clear the full OFF bit */
pca9685_write_reg(pca, REG_ON_H(channel), LED_FULL);
pca9685_write_reg(pca, REG_OFF_H(channel), 0);
return;
}
if (pwm->state.usage_power && channel < PCA9685_MAXCHAN) {
/*
* If usage_power is set, the pca9685 driver will phase shift
* the individual channels relative to their channel number.
* This improves EMI because the enabled channels no longer
* turn on at the same time, while still maintaining the
* configured duty cycle / power output.
*/
on = channel * PCA9685_COUNTER_RANGE / PCA9685_MAXCHAN;
} else
on = 0;
off = (on + duty) % PCA9685_COUNTER_RANGE;
/* Set ON time (clears full ON bit) */
pca9685_write_reg(pca, REG_ON_L(channel), on & 0xff);
pca9685_write_reg(pca, REG_ON_H(channel), (on >> 8) & 0xf);
/* Set OFF time (clears full OFF bit) */
pca9685_write_reg(pca, REG_OFF_L(channel), off & 0xff);
pca9685_write_reg(pca, REG_OFF_H(channel), (off >> 8) & 0xf);
}
static unsigned int pca9685_pwm_get_duty(struct pca9685 *pca, int channel)
{
struct pwm_device *pwm = &pca->chip.pwms[channel];
unsigned int off = 0, on = 0, val = 0;
if (WARN_ON(channel >= PCA9685_MAXCHAN)) {
/* HW does not support reading state of "all LEDs" channel */
return 0;
}
pca9685_read_reg(pca, LED_N_OFF_H(channel), &off);
if (off & LED_FULL) {
/* Full OFF bit is set */
return 0;
}
pca9685_read_reg(pca, LED_N_ON_H(channel), &on);
if (on & LED_FULL) {
/* Full ON bit is set */
return PCA9685_COUNTER_RANGE;
}
pca9685_read_reg(pca, LED_N_OFF_L(channel), &val);
off = ((off & 0xf) << 8) | (val & 0xff);
if (!pwm->state.usage_power)
return off;
/* Read ON register to calculate duty cycle of staggered output */
if (pca9685_read_reg(pca, LED_N_ON_L(channel), &val)) {
/* Reset val to 0 in case reading LED_N_ON_L failed */
val = 0;
}
on = ((on & 0xf) << 8) | (val & 0xff);
return (off - on) & (PCA9685_COUNTER_RANGE - 1);
}
#if IS_ENABLED(CONFIG_GPIOLIB)
static bool pca9685_pwm_test_and_set_inuse(struct pca9685 *pca, int pwm_idx)
{
bool is_inuse;
mutex_lock(&pca->lock);
if (pwm_idx >= PCA9685_MAXCHAN) {
/*
* "All LEDs" channel:
* pretend already in use if any of the PWMs are requested
*/
if (!bitmap_empty(pca->pwms_inuse, PCA9685_MAXCHAN)) {
is_inuse = true;
goto out;
}
} else {
/*
* Regular channel:
* pretend already in use if the "all LEDs" channel is requested
*/
if (test_bit(PCA9685_MAXCHAN, pca->pwms_inuse)) {
is_inuse = true;
goto out;
}
}
is_inuse = test_and_set_bit(pwm_idx, pca->pwms_inuse);
out:
mutex_unlock(&pca->lock);
return is_inuse;
}
static void pca9685_pwm_clear_inuse(struct pca9685 *pca, int pwm_idx)
{
mutex_lock(&pca->lock);
clear_bit(pwm_idx, pca->pwms_inuse);
mutex_unlock(&pca->lock);
}
static int pca9685_pwm_gpio_request(struct gpio_chip *gpio, unsigned int offset)
{
struct pca9685 *pca = gpiochip_get_data(gpio);
if (pca9685_pwm_test_and_set_inuse(pca, offset))
return -EBUSY;
pm_runtime_get_sync(pca->chip.dev);
return 0;
}
static int pca9685_pwm_gpio_get(struct gpio_chip *gpio, unsigned int offset)
{
struct pca9685 *pca = gpiochip_get_data(gpio);
return pca9685_pwm_get_duty(pca, offset) != 0;
}
static void pca9685_pwm_gpio_set(struct gpio_chip *gpio, unsigned int offset,
int value)
{
struct pca9685 *pca = gpiochip_get_data(gpio);
pca9685_pwm_set_duty(pca, offset, value ? PCA9685_COUNTER_RANGE : 0);
}
static void pca9685_pwm_gpio_free(struct gpio_chip *gpio, unsigned int offset)
{
struct pca9685 *pca = gpiochip_get_data(gpio);
pca9685_pwm_set_duty(pca, offset, 0);
pm_runtime_put(pca->chip.dev);
pca9685_pwm_clear_inuse(pca, offset);
}
static int pca9685_pwm_gpio_get_direction(struct gpio_chip *chip,
unsigned int offset)
{
/* Always out */
return GPIO_LINE_DIRECTION_OUT;
}
static int pca9685_pwm_gpio_direction_input(struct gpio_chip *gpio,
unsigned int offset)
{
return -EINVAL;
}
static int pca9685_pwm_gpio_direction_output(struct gpio_chip *gpio,
unsigned int offset, int value)
{
pca9685_pwm_gpio_set(gpio, offset, value);
return 0;
}
/*
* The PCA9685 has a bit for turning the PWM output full off or on. Some
* boards like Intel Galileo actually uses these as normal GPIOs so we
* expose a GPIO chip here which can exclusively take over the underlying
* PWM channel.
*/
static int pca9685_pwm_gpio_probe(struct pca9685 *pca)
{
struct device *dev = pca->chip.dev;
pca->gpio.label = dev_name(dev);
pca->gpio.parent = dev;
pca->gpio.request = pca9685_pwm_gpio_request;
pca->gpio.free = pca9685_pwm_gpio_free;
pca->gpio.get_direction = pca9685_pwm_gpio_get_direction;
pca->gpio.direction_input = pca9685_pwm_gpio_direction_input;
pca->gpio.direction_output = pca9685_pwm_gpio_direction_output;
pca->gpio.get = pca9685_pwm_gpio_get;
pca->gpio.set = pca9685_pwm_gpio_set;
pca->gpio.base = -1;
pca->gpio.ngpio = PCA9685_MAXCHAN;
pca->gpio.can_sleep = true;
return devm_gpiochip_add_data(dev, &pca->gpio, pca);
}
#else
static inline bool pca9685_pwm_test_and_set_inuse(struct pca9685 *pca,
int pwm_idx)
{
return false;
}
static inline void
pca9685_pwm_clear_inuse(struct pca9685 *pca, int pwm_idx)
{
}
static inline int pca9685_pwm_gpio_probe(struct pca9685 *pca)
{
return 0;
}
#endif
static void pca9685_set_sleep_mode(struct pca9685 *pca, bool enable)
{
struct device *dev = pca->chip.dev;
int err = regmap_update_bits(pca->regmap, PCA9685_MODE1,
MODE1_SLEEP, enable ? MODE1_SLEEP : 0);
if (err) {
dev_err(dev, "regmap_update_bits of register 0x%x failed: %pe\n",
PCA9685_MODE1, ERR_PTR(err));
return;
}
if (!enable) {
/* Wait 500us for the oscillator to be back up */
udelay(500);
}
}
static int __pca9685_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
const struct pwm_state *state)
{
struct pca9685 *pca = to_pca(chip);
unsigned long long duty, prescale;
unsigned int val = 0;
if (state->polarity != PWM_POLARITY_NORMAL)
return -EINVAL;
prescale = DIV_ROUND_CLOSEST_ULL(PCA9685_OSC_CLOCK_MHZ * state->period,
PCA9685_COUNTER_RANGE * 1000) - 1;
if (prescale < PCA9685_PRESCALE_MIN || prescale > PCA9685_PRESCALE_MAX) {
dev_err(chip->dev, "pwm not changed: period out of bounds!\n");
return -EINVAL;
}
if (!state->enabled) {
pca9685_pwm_set_duty(pca, pwm->hwpwm, 0);
return 0;
}
pca9685_read_reg(pca, PCA9685_PRESCALE, &val);
if (prescale != val) {
if (!pca9685_prescaler_can_change(pca, pwm->hwpwm)) {
dev_err(chip->dev,
"pwm not changed: periods of enabled pwms must match!\n");
return -EBUSY;
}
/*
* Putting the chip briefly into SLEEP mode
* at this point won't interfere with the
* pm_runtime framework, because the pm_runtime
* state is guaranteed active here.
*/
/* Put chip into sleep mode */
pca9685_set_sleep_mode(pca, true);
/* Change the chip-wide output frequency */
pca9685_write_reg(pca, PCA9685_PRESCALE, prescale);
/* Wake the chip up */
pca9685_set_sleep_mode(pca, false);
}
duty = PCA9685_COUNTER_RANGE * state->duty_cycle;
duty = DIV_ROUND_UP_ULL(duty, state->period);
pca9685_pwm_set_duty(pca, pwm->hwpwm, duty);
return 0;
}
static int pca9685_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
const struct pwm_state *state)
{
struct pca9685 *pca = to_pca(chip);
int ret;
mutex_lock(&pca->lock);
ret = __pca9685_pwm_apply(chip, pwm, state);
if (ret == 0) {
if (state->enabled)
set_bit(pwm->hwpwm, pca->pwms_enabled);
else
clear_bit(pwm->hwpwm, pca->pwms_enabled);
}
mutex_unlock(&pca->lock);
return ret;
}
static int pca9685_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
struct pwm_state *state)
{
struct pca9685 *pca = to_pca(chip);
unsigned long long duty;
unsigned int val = 0;
/* Calculate (chip-wide) period from prescale value */
pca9685_read_reg(pca, PCA9685_PRESCALE, &val);
/*
* PCA9685_OSC_CLOCK_MHZ is 25, i.e. an integer divider of 1000.
* The following calculation is therefore only a multiplication
* and we are not losing precision.
*/
state->period = (PCA9685_COUNTER_RANGE * 1000 / PCA9685_OSC_CLOCK_MHZ) *
(val + 1);
/* The (per-channel) polarity is fixed */
state->polarity = PWM_POLARITY_NORMAL;
if (pwm->hwpwm >= PCA9685_MAXCHAN) {
/*
* The "all LEDs" channel does not support HW readout
* Return 0 and disabled for backwards compatibility
*/
state->duty_cycle = 0;
state->enabled = false;
return 0;
}
state->enabled = true;
duty = pca9685_pwm_get_duty(pca, pwm->hwpwm);
state->duty_cycle = DIV_ROUND_DOWN_ULL(duty * state->period, PCA9685_COUNTER_RANGE);
return 0;
}
static int pca9685_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
{
struct pca9685 *pca = to_pca(chip);
if (pca9685_pwm_test_and_set_inuse(pca, pwm->hwpwm))
return -EBUSY;
if (pwm->hwpwm < PCA9685_MAXCHAN) {
/* PWMs - except the "all LEDs" channel - default to enabled */
mutex_lock(&pca->lock);
set_bit(pwm->hwpwm, pca->pwms_enabled);
mutex_unlock(&pca->lock);
}
pm_runtime_get_sync(chip->dev);
return 0;
}
static void pca9685_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
{
struct pca9685 *pca = to_pca(chip);
mutex_lock(&pca->lock);
pca9685_pwm_set_duty(pca, pwm->hwpwm, 0);
clear_bit(pwm->hwpwm, pca->pwms_enabled);
mutex_unlock(&pca->lock);
pm_runtime_put(chip->dev);
pca9685_pwm_clear_inuse(pca, pwm->hwpwm);
}
static const struct pwm_ops pca9685_pwm_ops = {
.apply = pca9685_pwm_apply,
.get_state = pca9685_pwm_get_state,
.request = pca9685_pwm_request,
.free = pca9685_pwm_free,
.owner = THIS_MODULE,
};
static const struct regmap_config pca9685_regmap_i2c_config = {
.reg_bits = 8,
.val_bits = 8,
.max_register = PCA9685_NUMREGS,
.cache_type = REGCACHE_NONE,
};
static int pca9685_pwm_probe(struct i2c_client *client)
{
struct pca9685 *pca;
unsigned int reg;
int ret;
pca = devm_kzalloc(&client->dev, sizeof(*pca), GFP_KERNEL);
if (!pca)
return -ENOMEM;
pca->regmap = devm_regmap_init_i2c(client, &pca9685_regmap_i2c_config);
if (IS_ERR(pca->regmap)) {
ret = PTR_ERR(pca->regmap);
dev_err(&client->dev, "Failed to initialize register map: %d\n",
ret);
return ret;
}
i2c_set_clientdata(client, pca);
mutex_init(&pca->lock);
ret = pca9685_read_reg(pca, PCA9685_MODE2, ®);
if (ret)
return ret;
if (device_property_read_bool(&client->dev, "invert"))
reg |= MODE2_INVRT;
else
reg &= ~MODE2_INVRT;
if (device_property_read_bool(&client->dev, "open-drain"))
reg &= ~MODE2_OUTDRV;
else
reg |= MODE2_OUTDRV;
ret = pca9685_write_reg(pca, PCA9685_MODE2, reg);
if (ret)
return ret;
/* Disable all LED ALLCALL and SUBx addresses to avoid bus collisions */
pca9685_read_reg(pca, PCA9685_MODE1, ®);
reg &= ~(MODE1_ALLCALL | MODE1_SUB1 | MODE1_SUB2 | MODE1_SUB3);
pca9685_write_reg(pca, PCA9685_MODE1, reg);
/* Reset OFF/ON registers to POR default */
pca9685_write_reg(pca, PCA9685_ALL_LED_OFF_L, 0);
pca9685_write_reg(pca, PCA9685_ALL_LED_OFF_H, LED_FULL);
pca9685_write_reg(pca, PCA9685_ALL_LED_ON_L, 0);
pca9685_write_reg(pca, PCA9685_ALL_LED_ON_H, LED_FULL);
pca->chip.ops = &pca9685_pwm_ops;
/* Add an extra channel for ALL_LED */
pca->chip.npwm = PCA9685_MAXCHAN + 1;
pca->chip.dev = &client->dev;
ret = pwmchip_add(&pca->chip);
if (ret < 0)
return ret;
ret = pca9685_pwm_gpio_probe(pca);
if (ret < 0) {
pwmchip_remove(&pca->chip);
return ret;
}
pm_runtime_enable(&client->dev);
if (pm_runtime_enabled(&client->dev)) {
/*
* Although the chip comes out of power-up in the sleep state,
* we force it to sleep in case it was woken up before
*/
pca9685_set_sleep_mode(pca, true);
pm_runtime_set_suspended(&client->dev);
} else {
/* Wake the chip up if runtime PM is disabled */
pca9685_set_sleep_mode(pca, false);
}
return 0;
}
static void pca9685_pwm_remove(struct i2c_client *client)
{
struct pca9685 *pca = i2c_get_clientdata(client);
pwmchip_remove(&pca->chip);
if (!pm_runtime_enabled(&client->dev)) {
/* Put chip in sleep state if runtime PM is disabled */
pca9685_set_sleep_mode(pca, true);
}
pm_runtime_disable(&client->dev);
}
static int __maybe_unused pca9685_pwm_runtime_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct pca9685 *pca = i2c_get_clientdata(client);
pca9685_set_sleep_mode(pca, true);
return 0;
}
static int __maybe_unused pca9685_pwm_runtime_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct pca9685 *pca = i2c_get_clientdata(client);
pca9685_set_sleep_mode(pca, false);
return 0;
}
static const struct i2c_device_id pca9685_id[] = {
{ "pca9685", 0 },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(i2c, pca9685_id);
#ifdef CONFIG_ACPI
static const struct acpi_device_id pca9685_acpi_ids[] = {
{ "INT3492", 0 },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(acpi, pca9685_acpi_ids);
#endif
#ifdef CONFIG_OF
static const struct of_device_id pca9685_dt_ids[] = {
{ .compatible = "nxp,pca9685-pwm", },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, pca9685_dt_ids);
#endif
static const struct dev_pm_ops pca9685_pwm_pm = {
SET_RUNTIME_PM_OPS(pca9685_pwm_runtime_suspend,
pca9685_pwm_runtime_resume, NULL)
};
static struct i2c_driver pca9685_i2c_driver = {
.driver = {
.name = "pca9685-pwm",
.acpi_match_table = ACPI_PTR(pca9685_acpi_ids),
.of_match_table = of_match_ptr(pca9685_dt_ids),
.pm = &pca9685_pwm_pm,
},
.probe = pca9685_pwm_probe,
.remove = pca9685_pwm_remove,
.id_table = pca9685_id,
};
module_i2c_driver(pca9685_i2c_driver);
MODULE_AUTHOR("Steffen Trumtrar <[email protected]>");
MODULE_DESCRIPTION("PWM driver for PCA9685");
MODULE_LICENSE("GPL");
|
linux-master
|
drivers/pwm/pwm-pca9685.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* TI/National Semiconductor LP3943 PWM driver
*
* Copyright 2013 Texas Instruments
*
* Author: Milo Kim <[email protected]>
*/
#include <linux/err.h>
#include <linux/mfd/lp3943.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pwm.h>
#include <linux/slab.h>
#define LP3943_MAX_DUTY 255
#define LP3943_MIN_PERIOD 6250
#define LP3943_MAX_PERIOD 1600000
struct lp3943_pwm {
struct pwm_chip chip;
struct lp3943 *lp3943;
struct lp3943_platform_data *pdata;
};
static inline struct lp3943_pwm *to_lp3943_pwm(struct pwm_chip *chip)
{
return container_of(chip, struct lp3943_pwm, chip);
}
static struct lp3943_pwm_map *
lp3943_pwm_request_map(struct lp3943_pwm *lp3943_pwm, int hwpwm)
{
struct lp3943_platform_data *pdata = lp3943_pwm->pdata;
struct lp3943 *lp3943 = lp3943_pwm->lp3943;
struct lp3943_pwm_map *pwm_map;
int i, offset;
pwm_map = kzalloc(sizeof(*pwm_map), GFP_KERNEL);
if (!pwm_map)
return ERR_PTR(-ENOMEM);
pwm_map->output = pdata->pwms[hwpwm]->output;
pwm_map->num_outputs = pdata->pwms[hwpwm]->num_outputs;
for (i = 0; i < pwm_map->num_outputs; i++) {
offset = pwm_map->output[i];
/* Return an error if the pin is already assigned */
if (test_and_set_bit(offset, &lp3943->pin_used)) {
kfree(pwm_map);
return ERR_PTR(-EBUSY);
}
}
return pwm_map;
}
static int lp3943_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
{
struct lp3943_pwm *lp3943_pwm = to_lp3943_pwm(chip);
struct lp3943_pwm_map *pwm_map;
pwm_map = lp3943_pwm_request_map(lp3943_pwm, pwm->hwpwm);
if (IS_ERR(pwm_map))
return PTR_ERR(pwm_map);
return pwm_set_chip_data(pwm, pwm_map);
}
static void lp3943_pwm_free_map(struct lp3943_pwm *lp3943_pwm,
struct lp3943_pwm_map *pwm_map)
{
struct lp3943 *lp3943 = lp3943_pwm->lp3943;
int i, offset;
for (i = 0; i < pwm_map->num_outputs; i++) {
offset = pwm_map->output[i];
clear_bit(offset, &lp3943->pin_used);
}
kfree(pwm_map);
}
static void lp3943_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
{
struct lp3943_pwm *lp3943_pwm = to_lp3943_pwm(chip);
struct lp3943_pwm_map *pwm_map = pwm_get_chip_data(pwm);
lp3943_pwm_free_map(lp3943_pwm, pwm_map);
}
static int lp3943_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
u64 duty_ns, u64 period_ns)
{
struct lp3943_pwm *lp3943_pwm = to_lp3943_pwm(chip);
struct lp3943 *lp3943 = lp3943_pwm->lp3943;
u8 val, reg_duty, reg_prescale;
int err;
/*
* How to configure the LP3943 PWMs
*
* 1) Period = 6250 ~ 1600000
* 2) Prescale = period / 6250 -1
* 3) Duty = input duty
*
* Prescale and duty are register values
*/
if (pwm->hwpwm == 0) {
reg_prescale = LP3943_REG_PRESCALE0;
reg_duty = LP3943_REG_PWM0;
} else {
reg_prescale = LP3943_REG_PRESCALE1;
reg_duty = LP3943_REG_PWM1;
}
/*
* Note that after this clamping, period_ns fits into an int. This is
* helpful because we can resort to integer division below instead of
* the (more expensive) 64 bit division.
*/
period_ns = clamp(period_ns, (u64)LP3943_MIN_PERIOD, (u64)LP3943_MAX_PERIOD);
val = (u8)((int)period_ns / LP3943_MIN_PERIOD - 1);
err = lp3943_write_byte(lp3943, reg_prescale, val);
if (err)
return err;
duty_ns = min(duty_ns, period_ns);
val = (u8)((int)duty_ns * LP3943_MAX_DUTY / (int)period_ns);
return lp3943_write_byte(lp3943, reg_duty, val);
}
static int lp3943_pwm_set_mode(struct lp3943_pwm *lp3943_pwm,
struct lp3943_pwm_map *pwm_map,
u8 val)
{
struct lp3943 *lp3943 = lp3943_pwm->lp3943;
const struct lp3943_reg_cfg *mux = lp3943->mux_cfg;
int i, index, err;
for (i = 0; i < pwm_map->num_outputs; i++) {
index = pwm_map->output[i];
err = lp3943_update_bits(lp3943, mux[index].reg,
mux[index].mask,
val << mux[index].shift);
if (err)
return err;
}
return 0;
}
static int lp3943_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
{
struct lp3943_pwm *lp3943_pwm = to_lp3943_pwm(chip);
struct lp3943_pwm_map *pwm_map = pwm_get_chip_data(pwm);
u8 val;
if (pwm->hwpwm == 0)
val = LP3943_DIM_PWM0;
else
val = LP3943_DIM_PWM1;
/*
* Each PWM generator is set to control any of outputs of LP3943.
* To enable/disable the PWM, these output pins should be configured.
*/
return lp3943_pwm_set_mode(lp3943_pwm, pwm_map, val);
}
static void lp3943_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
{
struct lp3943_pwm *lp3943_pwm = to_lp3943_pwm(chip);
struct lp3943_pwm_map *pwm_map = pwm_get_chip_data(pwm);
/*
* LP3943 outputs are open-drain, so the pin should be configured
* when the PWM is disabled.
*/
lp3943_pwm_set_mode(lp3943_pwm, pwm_map, LP3943_GPIO_OUT_HIGH);
}
static int lp3943_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
const struct pwm_state *state)
{
int err;
if (state->polarity != PWM_POLARITY_NORMAL)
return -EINVAL;
if (!state->enabled) {
if (pwm->state.enabled)
lp3943_pwm_disable(chip, pwm);
return 0;
}
err = lp3943_pwm_config(chip, pwm, state->duty_cycle, state->period);
if (err)
return err;
if (!pwm->state.enabled)
err = lp3943_pwm_enable(chip, pwm);
return err;
}
static const struct pwm_ops lp3943_pwm_ops = {
.request = lp3943_pwm_request,
.free = lp3943_pwm_free,
.apply = lp3943_pwm_apply,
.owner = THIS_MODULE,
};
static int lp3943_pwm_parse_dt(struct device *dev,
struct lp3943_pwm *lp3943_pwm)
{
static const char * const name[] = { "ti,pwm0", "ti,pwm1", };
struct device_node *node = dev->of_node;
struct lp3943_platform_data *pdata;
struct lp3943_pwm_map *pwm_map;
enum lp3943_pwm_output *output;
int i, err, proplen, count = 0;
u32 num_outputs;
if (!node)
return -EINVAL;
pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return -ENOMEM;
/*
* Read the output map configuration from the device tree.
* Each of the two PWM generators can drive zero or more outputs.
*/
for (i = 0; i < LP3943_NUM_PWMS; i++) {
if (!of_get_property(node, name[i], &proplen))
continue;
num_outputs = proplen / sizeof(u32);
if (num_outputs == 0)
continue;
output = devm_kcalloc(dev, num_outputs, sizeof(*output),
GFP_KERNEL);
if (!output)
return -ENOMEM;
err = of_property_read_u32_array(node, name[i], output,
num_outputs);
if (err)
return err;
pwm_map = devm_kzalloc(dev, sizeof(*pwm_map), GFP_KERNEL);
if (!pwm_map)
return -ENOMEM;
pwm_map->output = output;
pwm_map->num_outputs = num_outputs;
pdata->pwms[i] = pwm_map;
count++;
}
if (count == 0)
return -ENODATA;
lp3943_pwm->pdata = pdata;
return 0;
}
static int lp3943_pwm_probe(struct platform_device *pdev)
{
struct lp3943 *lp3943 = dev_get_drvdata(pdev->dev.parent);
struct lp3943_pwm *lp3943_pwm;
int ret;
lp3943_pwm = devm_kzalloc(&pdev->dev, sizeof(*lp3943_pwm), GFP_KERNEL);
if (!lp3943_pwm)
return -ENOMEM;
lp3943_pwm->pdata = lp3943->pdata;
if (!lp3943_pwm->pdata) {
if (IS_ENABLED(CONFIG_OF))
ret = lp3943_pwm_parse_dt(&pdev->dev, lp3943_pwm);
else
ret = -ENODEV;
if (ret)
return ret;
}
lp3943_pwm->lp3943 = lp3943;
lp3943_pwm->chip.dev = &pdev->dev;
lp3943_pwm->chip.ops = &lp3943_pwm_ops;
lp3943_pwm->chip.npwm = LP3943_NUM_PWMS;
return devm_pwmchip_add(&pdev->dev, &lp3943_pwm->chip);
}
#ifdef CONFIG_OF
static const struct of_device_id lp3943_pwm_of_match[] = {
{ .compatible = "ti,lp3943-pwm", },
{ }
};
MODULE_DEVICE_TABLE(of, lp3943_pwm_of_match);
#endif
static struct platform_driver lp3943_pwm_driver = {
.probe = lp3943_pwm_probe,
.driver = {
.name = "lp3943-pwm",
.of_match_table = of_match_ptr(lp3943_pwm_of_match),
},
};
module_platform_driver(lp3943_pwm_driver);
MODULE_DESCRIPTION("LP3943 PWM driver");
MODULE_ALIAS("platform:lp3943-pwm");
MODULE_AUTHOR("Milo Kim");
MODULE_LICENSE("GPL");
|
linux-master
|
drivers/pwm/pwm-lp3943.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Intel Low Power Subsystem PWM controller driver
*
* Copyright (C) 2014, Intel Corporation
*
* Derived from the original pwm-lpss.c
*/
#include <linux/kernel.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/property.h>
#include "pwm-lpss.h"
static int pwm_lpss_probe_platform(struct platform_device *pdev)
{
const struct pwm_lpss_boardinfo *info;
struct pwm_lpss_chip *lpwm;
void __iomem *base;
info = device_get_match_data(&pdev->dev);
if (!info)
return -ENODEV;
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
lpwm = devm_pwm_lpss_probe(&pdev->dev, base, info);
if (IS_ERR(lpwm))
return PTR_ERR(lpwm);
platform_set_drvdata(pdev, lpwm);
/*
* On Cherry Trail devices the GFX0._PS0 AML checks if the controller
* is on and if it is not on it turns it on and restores what it
* believes is the correct state to the PWM controller.
* Because of this we must disallow direct-complete, which keeps the
* controller (runtime)suspended on resume, to avoid 2 issues:
* 1. The controller getting turned on without the linux-pm code
* knowing about this. On devices where the controller is unused
* this causes it to stay on during the next suspend causing high
* battery drain (because S0i3 is not reached)
* 2. The state restoring code unexpectedly messing with the controller
*
* Leaving the controller runtime-suspended (skipping runtime-resume +
* normal-suspend) during suspend is fine.
*/
if (info->other_devices_aml_touches_pwm_regs)
dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NO_DIRECT_COMPLETE|
DPM_FLAG_SMART_SUSPEND);
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
return 0;
}
static void pwm_lpss_remove_platform(struct platform_device *pdev)
{
pm_runtime_disable(&pdev->dev);
}
static const struct acpi_device_id pwm_lpss_acpi_match[] = {
{ "80860F09", (unsigned long)&pwm_lpss_byt_info },
{ "80862288", (unsigned long)&pwm_lpss_bsw_info },
{ "80862289", (unsigned long)&pwm_lpss_bsw_info },
{ "80865AC8", (unsigned long)&pwm_lpss_bxt_info },
{ },
};
MODULE_DEVICE_TABLE(acpi, pwm_lpss_acpi_match);
static struct platform_driver pwm_lpss_driver_platform = {
.driver = {
.name = "pwm-lpss",
.acpi_match_table = pwm_lpss_acpi_match,
},
.probe = pwm_lpss_probe_platform,
.remove_new = pwm_lpss_remove_platform,
};
module_platform_driver(pwm_lpss_driver_platform);
MODULE_DESCRIPTION("PWM platform driver for Intel LPSS");
MODULE_LICENSE("GPL v2");
MODULE_IMPORT_NS(PWM_LPSS);
MODULE_ALIAS("platform:pwm-lpss");
|
linux-master
|
drivers/pwm/pwm-lpss-platform.c
|
// SPDX-License-Identifier: GPL-2.0-only
// Copyright (C) 2014 Broadcom Corporation
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/math64.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pwm.h>
#include <linux/slab.h>
#include <linux/types.h>
/*
* The Kona PWM has some unusual characteristics. Here are the main points.
*
* 1) There is no disable bit and the hardware docs advise programming a zero
* duty to achieve output equivalent to that of a normal disable operation.
*
* 2) Changes to prescale, duty, period, and polarity do not take effect until
* a subsequent rising edge of the trigger bit.
*
* 3) If the smooth bit and trigger bit are both low, the output is a constant
* high signal. Otherwise, the earlier waveform continues to be output.
*
* 4) If the smooth bit is set on the rising edge of the trigger bit, output
* will transition to the new settings on a period boundary (which could be
* seconds away). If the smooth bit is clear, new settings will be applied
* as soon as possible (the hardware always has a 400ns delay).
*
* 5) When the external clock that feeds the PWM is disabled, output is pegged
* high or low depending on its state at that exact instant.
*/
#define PWM_CONTROL_OFFSET 0x00000000
#define PWM_CONTROL_SMOOTH_SHIFT(chan) (24 + (chan))
#define PWM_CONTROL_TYPE_SHIFT(chan) (16 + (chan))
#define PWM_CONTROL_POLARITY_SHIFT(chan) (8 + (chan))
#define PWM_CONTROL_TRIGGER_SHIFT(chan) (chan)
#define PRESCALE_OFFSET 0x00000004
#define PRESCALE_SHIFT(chan) ((chan) << 2)
#define PRESCALE_MASK(chan) (0x7 << PRESCALE_SHIFT(chan))
#define PRESCALE_MIN 0x00000000
#define PRESCALE_MAX 0x00000007
#define PERIOD_COUNT_OFFSET(chan) (0x00000008 + ((chan) << 3))
#define PERIOD_COUNT_MIN 0x00000002
#define PERIOD_COUNT_MAX 0x00ffffff
#define DUTY_CYCLE_HIGH_OFFSET(chan) (0x0000000c + ((chan) << 3))
#define DUTY_CYCLE_HIGH_MIN 0x00000000
#define DUTY_CYCLE_HIGH_MAX 0x00ffffff
struct kona_pwmc {
struct pwm_chip chip;
void __iomem *base;
struct clk *clk;
};
static inline struct kona_pwmc *to_kona_pwmc(struct pwm_chip *chip)
{
return container_of(chip, struct kona_pwmc, chip);
}
/*
* Clear trigger bit but set smooth bit to maintain old output.
*/
static void kona_pwmc_prepare_for_settings(struct kona_pwmc *kp,
unsigned int chan)
{
unsigned int value = readl(kp->base + PWM_CONTROL_OFFSET);
value |= 1 << PWM_CONTROL_SMOOTH_SHIFT(chan);
value &= ~(1 << PWM_CONTROL_TRIGGER_SHIFT(chan));
writel(value, kp->base + PWM_CONTROL_OFFSET);
/*
* There must be a min 400ns delay between clearing trigger and setting
* it. Failing to do this may result in no PWM signal.
*/
ndelay(400);
}
static void kona_pwmc_apply_settings(struct kona_pwmc *kp, unsigned int chan)
{
unsigned int value = readl(kp->base + PWM_CONTROL_OFFSET);
/* Set trigger bit and clear smooth bit to apply new settings */
value &= ~(1 << PWM_CONTROL_SMOOTH_SHIFT(chan));
value |= 1 << PWM_CONTROL_TRIGGER_SHIFT(chan);
writel(value, kp->base + PWM_CONTROL_OFFSET);
/* Trigger bit must be held high for at least 400 ns. */
ndelay(400);
}
static int kona_pwmc_config(struct pwm_chip *chip, struct pwm_device *pwm,
u64 duty_ns, u64 period_ns)
{
struct kona_pwmc *kp = to_kona_pwmc(chip);
u64 div, rate;
unsigned long prescale = PRESCALE_MIN, pc, dc;
unsigned int value, chan = pwm->hwpwm;
/*
* Find period count, duty count and prescale to suit duty_ns and
* period_ns. This is done according to formulas described below:
*
* period_ns = 10^9 * (PRESCALE + 1) * PC / PWM_CLK_RATE
* duty_ns = 10^9 * (PRESCALE + 1) * DC / PWM_CLK_RATE
*
* PC = (PWM_CLK_RATE * period_ns) / (10^9 * (PRESCALE + 1))
* DC = (PWM_CLK_RATE * duty_ns) / (10^9 * (PRESCALE + 1))
*/
rate = clk_get_rate(kp->clk);
while (1) {
div = 1000000000;
div *= 1 + prescale;
pc = mul_u64_u64_div_u64(rate, period_ns, div);
dc = mul_u64_u64_div_u64(rate, duty_ns, div);
/* If duty_ns or period_ns are not achievable then return */
if (pc < PERIOD_COUNT_MIN)
return -EINVAL;
/* If pc and dc are in bounds, the calculation is done */
if (pc <= PERIOD_COUNT_MAX && dc <= DUTY_CYCLE_HIGH_MAX)
break;
/* Otherwise, increase prescale and recalculate pc and dc */
if (++prescale > PRESCALE_MAX)
return -EINVAL;
}
kona_pwmc_prepare_for_settings(kp, chan);
value = readl(kp->base + PRESCALE_OFFSET);
value &= ~PRESCALE_MASK(chan);
value |= prescale << PRESCALE_SHIFT(chan);
writel(value, kp->base + PRESCALE_OFFSET);
writel(pc, kp->base + PERIOD_COUNT_OFFSET(chan));
writel(dc, kp->base + DUTY_CYCLE_HIGH_OFFSET(chan));
kona_pwmc_apply_settings(kp, chan);
return 0;
}
static int kona_pwmc_set_polarity(struct pwm_chip *chip, struct pwm_device *pwm,
enum pwm_polarity polarity)
{
struct kona_pwmc *kp = to_kona_pwmc(chip);
unsigned int chan = pwm->hwpwm;
unsigned int value;
int ret;
ret = clk_prepare_enable(kp->clk);
if (ret < 0) {
dev_err(chip->dev, "failed to enable clock: %d\n", ret);
return ret;
}
kona_pwmc_prepare_for_settings(kp, chan);
value = readl(kp->base + PWM_CONTROL_OFFSET);
if (polarity == PWM_POLARITY_NORMAL)
value |= 1 << PWM_CONTROL_POLARITY_SHIFT(chan);
else
value &= ~(1 << PWM_CONTROL_POLARITY_SHIFT(chan));
writel(value, kp->base + PWM_CONTROL_OFFSET);
kona_pwmc_apply_settings(kp, chan);
clk_disable_unprepare(kp->clk);
return 0;
}
static int kona_pwmc_enable(struct pwm_chip *chip, struct pwm_device *pwm)
{
struct kona_pwmc *kp = to_kona_pwmc(chip);
int ret;
ret = clk_prepare_enable(kp->clk);
if (ret < 0) {
dev_err(chip->dev, "failed to enable clock: %d\n", ret);
return ret;
}
return 0;
}
static void kona_pwmc_disable(struct pwm_chip *chip, struct pwm_device *pwm)
{
struct kona_pwmc *kp = to_kona_pwmc(chip);
unsigned int chan = pwm->hwpwm;
unsigned int value;
kona_pwmc_prepare_for_settings(kp, chan);
/* Simulate a disable by configuring for zero duty */
writel(0, kp->base + DUTY_CYCLE_HIGH_OFFSET(chan));
writel(0, kp->base + PERIOD_COUNT_OFFSET(chan));
/* Set prescale to 0 for this channel */
value = readl(kp->base + PRESCALE_OFFSET);
value &= ~PRESCALE_MASK(chan);
writel(value, kp->base + PRESCALE_OFFSET);
kona_pwmc_apply_settings(kp, chan);
clk_disable_unprepare(kp->clk);
}
static int kona_pwmc_apply(struct pwm_chip *chip, struct pwm_device *pwm,
const struct pwm_state *state)
{
int err;
struct kona_pwmc *kp = to_kona_pwmc(chip);
bool enabled = pwm->state.enabled;
if (state->polarity != pwm->state.polarity) {
if (enabled) {
kona_pwmc_disable(chip, pwm);
enabled = false;
}
err = kona_pwmc_set_polarity(chip, pwm, state->polarity);
if (err)
return err;
pwm->state.polarity = state->polarity;
}
if (!state->enabled) {
if (enabled)
kona_pwmc_disable(chip, pwm);
return 0;
} else if (!enabled) {
/*
* This is a bit special here, usually the PWM should only be
* enabled when duty and period are setup. But before this
* driver was converted to .apply it was done the other way
* around and so this behaviour was kept even though this might
* result in a glitch. This might be improvable by someone with
* hardware and/or documentation.
*/
err = kona_pwmc_enable(chip, pwm);
if (err)
return err;
}
err = kona_pwmc_config(pwm->chip, pwm, state->duty_cycle, state->period);
if (err && !pwm->state.enabled)
clk_disable_unprepare(kp->clk);
return err;
}
static const struct pwm_ops kona_pwm_ops = {
.apply = kona_pwmc_apply,
.owner = THIS_MODULE,
};
static int kona_pwmc_probe(struct platform_device *pdev)
{
struct kona_pwmc *kp;
unsigned int chan;
unsigned int value = 0;
int ret = 0;
kp = devm_kzalloc(&pdev->dev, sizeof(*kp), GFP_KERNEL);
if (kp == NULL)
return -ENOMEM;
kp->chip.dev = &pdev->dev;
kp->chip.ops = &kona_pwm_ops;
kp->chip.npwm = 6;
kp->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(kp->base))
return PTR_ERR(kp->base);
kp->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(kp->clk)) {
dev_err(&pdev->dev, "failed to get clock: %ld\n",
PTR_ERR(kp->clk));
return PTR_ERR(kp->clk);
}
ret = clk_prepare_enable(kp->clk);
if (ret < 0) {
dev_err(&pdev->dev, "failed to enable clock: %d\n", ret);
return ret;
}
/* Set push/pull for all channels */
for (chan = 0; chan < kp->chip.npwm; chan++)
value |= (1 << PWM_CONTROL_TYPE_SHIFT(chan));
writel(value, kp->base + PWM_CONTROL_OFFSET);
clk_disable_unprepare(kp->clk);
ret = devm_pwmchip_add(&pdev->dev, &kp->chip);
if (ret < 0)
dev_err(&pdev->dev, "failed to add PWM chip: %d\n", ret);
return ret;
}
static const struct of_device_id bcm_kona_pwmc_dt[] = {
{ .compatible = "brcm,kona-pwm" },
{ },
};
MODULE_DEVICE_TABLE(of, bcm_kona_pwmc_dt);
static struct platform_driver kona_pwmc_driver = {
.driver = {
.name = "bcm-kona-pwm",
.of_match_table = bcm_kona_pwmc_dt,
},
.probe = kona_pwmc_probe,
};
module_platform_driver(kona_pwmc_driver);
MODULE_AUTHOR("Broadcom Corporation <[email protected]>");
MODULE_AUTHOR("Tim Kryger <[email protected]>");
MODULE_DESCRIPTION("Broadcom Kona PWM driver");
MODULE_LICENSE("GPL v2");
|
linux-master
|
drivers/pwm/pwm-bcm-kona.c
|
/*
* Marvell Berlin PWM driver
*
* Copyright (C) 2015 Marvell Technology Group Ltd.
*
* Author: Antoine Tenart <[email protected]>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pwm.h>
#include <linux/slab.h>
#define BERLIN_PWM_EN 0x0
#define BERLIN_PWM_ENABLE BIT(0)
#define BERLIN_PWM_CONTROL 0x4
/*
* The prescaler claims to support 8 different moduli, configured using the
* low three bits of PWM_CONTROL. (Sequentially, they are 1, 4, 8, 16, 64,
* 256, 1024, and 4096.) However, the moduli from 4 to 1024 appear to be
* implemented by internally shifting TCNT left without adding additional
* bits. So, the max TCNT that actually works for a modulus of 4 is 0x3fff;
* for 8, 0x1fff; and so on. This means that those moduli are entirely
* useless, as we could just do the shift ourselves. The 4096 modulus is
* implemented with a real prescaler, so we do use that, but we treat it
* as a flag instead of pretending the modulus is actually configurable.
*/
#define BERLIN_PWM_PRESCALE_4096 0x7
#define BERLIN_PWM_INVERT_POLARITY BIT(3)
#define BERLIN_PWM_DUTY 0x8
#define BERLIN_PWM_TCNT 0xc
#define BERLIN_PWM_MAX_TCNT 65535
struct berlin_pwm_channel {
u32 enable;
u32 ctrl;
u32 duty;
u32 tcnt;
};
struct berlin_pwm_chip {
struct pwm_chip chip;
struct clk *clk;
void __iomem *base;
};
static inline struct berlin_pwm_chip *to_berlin_pwm_chip(struct pwm_chip *chip)
{
return container_of(chip, struct berlin_pwm_chip, chip);
}
static inline u32 berlin_pwm_readl(struct berlin_pwm_chip *bpc,
unsigned int channel, unsigned long offset)
{
return readl_relaxed(bpc->base + channel * 0x10 + offset);
}
static inline void berlin_pwm_writel(struct berlin_pwm_chip *bpc,
unsigned int channel, u32 value,
unsigned long offset)
{
writel_relaxed(value, bpc->base + channel * 0x10 + offset);
}
static int berlin_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
{
struct berlin_pwm_channel *channel;
channel = kzalloc(sizeof(*channel), GFP_KERNEL);
if (!channel)
return -ENOMEM;
return pwm_set_chip_data(pwm, channel);
}
static void berlin_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
{
struct berlin_pwm_channel *channel = pwm_get_chip_data(pwm);
kfree(channel);
}
static int berlin_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
u64 duty_ns, u64 period_ns)
{
struct berlin_pwm_chip *bpc = to_berlin_pwm_chip(chip);
bool prescale_4096 = false;
u32 value, duty, period;
u64 cycles;
cycles = clk_get_rate(bpc->clk);
cycles *= period_ns;
do_div(cycles, NSEC_PER_SEC);
if (cycles > BERLIN_PWM_MAX_TCNT) {
prescale_4096 = true;
cycles >>= 12; // Prescaled by 4096
if (cycles > BERLIN_PWM_MAX_TCNT)
return -ERANGE;
}
period = cycles;
cycles *= duty_ns;
do_div(cycles, period_ns);
duty = cycles;
value = berlin_pwm_readl(bpc, pwm->hwpwm, BERLIN_PWM_CONTROL);
if (prescale_4096)
value |= BERLIN_PWM_PRESCALE_4096;
else
value &= ~BERLIN_PWM_PRESCALE_4096;
berlin_pwm_writel(bpc, pwm->hwpwm, value, BERLIN_PWM_CONTROL);
berlin_pwm_writel(bpc, pwm->hwpwm, duty, BERLIN_PWM_DUTY);
berlin_pwm_writel(bpc, pwm->hwpwm, period, BERLIN_PWM_TCNT);
return 0;
}
static int berlin_pwm_set_polarity(struct pwm_chip *chip,
struct pwm_device *pwm,
enum pwm_polarity polarity)
{
struct berlin_pwm_chip *bpc = to_berlin_pwm_chip(chip);
u32 value;
value = berlin_pwm_readl(bpc, pwm->hwpwm, BERLIN_PWM_CONTROL);
if (polarity == PWM_POLARITY_NORMAL)
value &= ~BERLIN_PWM_INVERT_POLARITY;
else
value |= BERLIN_PWM_INVERT_POLARITY;
berlin_pwm_writel(bpc, pwm->hwpwm, value, BERLIN_PWM_CONTROL);
return 0;
}
static int berlin_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
{
struct berlin_pwm_chip *bpc = to_berlin_pwm_chip(chip);
u32 value;
value = berlin_pwm_readl(bpc, pwm->hwpwm, BERLIN_PWM_EN);
value |= BERLIN_PWM_ENABLE;
berlin_pwm_writel(bpc, pwm->hwpwm, value, BERLIN_PWM_EN);
return 0;
}
static void berlin_pwm_disable(struct pwm_chip *chip,
struct pwm_device *pwm)
{
struct berlin_pwm_chip *bpc = to_berlin_pwm_chip(chip);
u32 value;
value = berlin_pwm_readl(bpc, pwm->hwpwm, BERLIN_PWM_EN);
value &= ~BERLIN_PWM_ENABLE;
berlin_pwm_writel(bpc, pwm->hwpwm, value, BERLIN_PWM_EN);
}
static int berlin_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
const struct pwm_state *state)
{
int err;
bool enabled = pwm->state.enabled;
if (state->polarity != pwm->state.polarity) {
if (enabled) {
berlin_pwm_disable(chip, pwm);
enabled = false;
}
err = berlin_pwm_set_polarity(chip, pwm, state->polarity);
if (err)
return err;
}
if (!state->enabled) {
if (enabled)
berlin_pwm_disable(chip, pwm);
return 0;
}
err = berlin_pwm_config(chip, pwm, state->duty_cycle, state->period);
if (err)
return err;
if (!enabled)
return berlin_pwm_enable(chip, pwm);
return 0;
}
static const struct pwm_ops berlin_pwm_ops = {
.request = berlin_pwm_request,
.free = berlin_pwm_free,
.apply = berlin_pwm_apply,
.owner = THIS_MODULE,
};
static const struct of_device_id berlin_pwm_match[] = {
{ .compatible = "marvell,berlin-pwm" },
{ },
};
MODULE_DEVICE_TABLE(of, berlin_pwm_match);
static int berlin_pwm_probe(struct platform_device *pdev)
{
struct berlin_pwm_chip *bpc;
int ret;
bpc = devm_kzalloc(&pdev->dev, sizeof(*bpc), GFP_KERNEL);
if (!bpc)
return -ENOMEM;
bpc->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(bpc->base))
return PTR_ERR(bpc->base);
bpc->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(bpc->clk))
return PTR_ERR(bpc->clk);
ret = clk_prepare_enable(bpc->clk);
if (ret)
return ret;
bpc->chip.dev = &pdev->dev;
bpc->chip.ops = &berlin_pwm_ops;
bpc->chip.npwm = 4;
ret = pwmchip_add(&bpc->chip);
if (ret < 0) {
dev_err(&pdev->dev, "failed to add PWM chip: %d\n", ret);
clk_disable_unprepare(bpc->clk);
return ret;
}
platform_set_drvdata(pdev, bpc);
return 0;
}
static void berlin_pwm_remove(struct platform_device *pdev)
{
struct berlin_pwm_chip *bpc = platform_get_drvdata(pdev);
pwmchip_remove(&bpc->chip);
clk_disable_unprepare(bpc->clk);
}
#ifdef CONFIG_PM_SLEEP
static int berlin_pwm_suspend(struct device *dev)
{
struct berlin_pwm_chip *bpc = dev_get_drvdata(dev);
unsigned int i;
for (i = 0; i < bpc->chip.npwm; i++) {
struct berlin_pwm_channel *channel;
channel = pwm_get_chip_data(&bpc->chip.pwms[i]);
if (!channel)
continue;
channel->enable = berlin_pwm_readl(bpc, i, BERLIN_PWM_ENABLE);
channel->ctrl = berlin_pwm_readl(bpc, i, BERLIN_PWM_CONTROL);
channel->duty = berlin_pwm_readl(bpc, i, BERLIN_PWM_DUTY);
channel->tcnt = berlin_pwm_readl(bpc, i, BERLIN_PWM_TCNT);
}
clk_disable_unprepare(bpc->clk);
return 0;
}
static int berlin_pwm_resume(struct device *dev)
{
struct berlin_pwm_chip *bpc = dev_get_drvdata(dev);
unsigned int i;
int ret;
ret = clk_prepare_enable(bpc->clk);
if (ret)
return ret;
for (i = 0; i < bpc->chip.npwm; i++) {
struct berlin_pwm_channel *channel;
channel = pwm_get_chip_data(&bpc->chip.pwms[i]);
if (!channel)
continue;
berlin_pwm_writel(bpc, i, channel->ctrl, BERLIN_PWM_CONTROL);
berlin_pwm_writel(bpc, i, channel->duty, BERLIN_PWM_DUTY);
berlin_pwm_writel(bpc, i, channel->tcnt, BERLIN_PWM_TCNT);
berlin_pwm_writel(bpc, i, channel->enable, BERLIN_PWM_ENABLE);
}
return 0;
}
#endif
static SIMPLE_DEV_PM_OPS(berlin_pwm_pm_ops, berlin_pwm_suspend,
berlin_pwm_resume);
static struct platform_driver berlin_pwm_driver = {
.probe = berlin_pwm_probe,
.remove_new = berlin_pwm_remove,
.driver = {
.name = "berlin-pwm",
.of_match_table = berlin_pwm_match,
.pm = &berlin_pwm_pm_ops,
},
};
module_platform_driver(berlin_pwm_driver);
MODULE_AUTHOR("Antoine Tenart <[email protected]>");
MODULE_DESCRIPTION("Marvell Berlin PWM driver");
MODULE_LICENSE("GPL v2");
|
linux-master
|
drivers/pwm/pwm-berlin.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* PWM driver for Rockchip SoCs
*
* Copyright (C) 2014 Beniamino Galvani <[email protected]>
* Copyright (C) 2014 ROCKCHIP, Inc.
*/
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pwm.h>
#include <linux/time.h>
#define PWM_CTRL_TIMER_EN (1 << 0)
#define PWM_CTRL_OUTPUT_EN (1 << 3)
#define PWM_ENABLE (1 << 0)
#define PWM_CONTINUOUS (1 << 1)
#define PWM_DUTY_POSITIVE (1 << 3)
#define PWM_DUTY_NEGATIVE (0 << 3)
#define PWM_INACTIVE_NEGATIVE (0 << 4)
#define PWM_INACTIVE_POSITIVE (1 << 4)
#define PWM_POLARITY_MASK (PWM_DUTY_POSITIVE | PWM_INACTIVE_POSITIVE)
#define PWM_OUTPUT_LEFT (0 << 5)
#define PWM_LOCK_EN (1 << 6)
#define PWM_LP_DISABLE (0 << 8)
struct rockchip_pwm_chip {
struct pwm_chip chip;
struct clk *clk;
struct clk *pclk;
const struct rockchip_pwm_data *data;
void __iomem *base;
};
struct rockchip_pwm_regs {
unsigned long duty;
unsigned long period;
unsigned long cntr;
unsigned long ctrl;
};
struct rockchip_pwm_data {
struct rockchip_pwm_regs regs;
unsigned int prescaler;
bool supports_polarity;
bool supports_lock;
u32 enable_conf;
};
static inline struct rockchip_pwm_chip *to_rockchip_pwm_chip(struct pwm_chip *chip)
{
return container_of(chip, struct rockchip_pwm_chip, chip);
}
static int rockchip_pwm_get_state(struct pwm_chip *chip,
struct pwm_device *pwm,
struct pwm_state *state)
{
struct rockchip_pwm_chip *pc = to_rockchip_pwm_chip(chip);
u32 enable_conf = pc->data->enable_conf;
unsigned long clk_rate;
u64 tmp;
u32 val;
int ret;
ret = clk_enable(pc->pclk);
if (ret)
return ret;
ret = clk_enable(pc->clk);
if (ret)
return ret;
clk_rate = clk_get_rate(pc->clk);
tmp = readl_relaxed(pc->base + pc->data->regs.period);
tmp *= pc->data->prescaler * NSEC_PER_SEC;
state->period = DIV_ROUND_CLOSEST_ULL(tmp, clk_rate);
tmp = readl_relaxed(pc->base + pc->data->regs.duty);
tmp *= pc->data->prescaler * NSEC_PER_SEC;
state->duty_cycle = DIV_ROUND_CLOSEST_ULL(tmp, clk_rate);
val = readl_relaxed(pc->base + pc->data->regs.ctrl);
state->enabled = (val & enable_conf) == enable_conf;
if (pc->data->supports_polarity && !(val & PWM_DUTY_POSITIVE))
state->polarity = PWM_POLARITY_INVERSED;
else
state->polarity = PWM_POLARITY_NORMAL;
clk_disable(pc->clk);
clk_disable(pc->pclk);
return 0;
}
static void rockchip_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
const struct pwm_state *state)
{
struct rockchip_pwm_chip *pc = to_rockchip_pwm_chip(chip);
unsigned long period, duty;
u64 clk_rate, div;
u32 ctrl;
clk_rate = clk_get_rate(pc->clk);
/*
* Since period and duty cycle registers have a width of 32
* bits, every possible input period can be obtained using the
* default prescaler value for all practical clock rate values.
*/
div = clk_rate * state->period;
period = DIV_ROUND_CLOSEST_ULL(div,
pc->data->prescaler * NSEC_PER_SEC);
div = clk_rate * state->duty_cycle;
duty = DIV_ROUND_CLOSEST_ULL(div, pc->data->prescaler * NSEC_PER_SEC);
/*
* Lock the period and duty of previous configuration, then
* change the duty and period, that would not be effective.
*/
ctrl = readl_relaxed(pc->base + pc->data->regs.ctrl);
if (pc->data->supports_lock) {
ctrl |= PWM_LOCK_EN;
writel_relaxed(ctrl, pc->base + pc->data->regs.ctrl);
}
writel(period, pc->base + pc->data->regs.period);
writel(duty, pc->base + pc->data->regs.duty);
if (pc->data->supports_polarity) {
ctrl &= ~PWM_POLARITY_MASK;
if (state->polarity == PWM_POLARITY_INVERSED)
ctrl |= PWM_DUTY_NEGATIVE | PWM_INACTIVE_POSITIVE;
else
ctrl |= PWM_DUTY_POSITIVE | PWM_INACTIVE_NEGATIVE;
}
/*
* Unlock and set polarity at the same time,
* the configuration of duty, period and polarity
* would be effective together at next period.
*/
if (pc->data->supports_lock)
ctrl &= ~PWM_LOCK_EN;
writel(ctrl, pc->base + pc->data->regs.ctrl);
}
static int rockchip_pwm_enable(struct pwm_chip *chip,
struct pwm_device *pwm,
bool enable)
{
struct rockchip_pwm_chip *pc = to_rockchip_pwm_chip(chip);
u32 enable_conf = pc->data->enable_conf;
int ret;
u32 val;
if (enable) {
ret = clk_enable(pc->clk);
if (ret)
return ret;
}
val = readl_relaxed(pc->base + pc->data->regs.ctrl);
if (enable)
val |= enable_conf;
else
val &= ~enable_conf;
writel_relaxed(val, pc->base + pc->data->regs.ctrl);
if (!enable)
clk_disable(pc->clk);
return 0;
}
static int rockchip_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
const struct pwm_state *state)
{
struct rockchip_pwm_chip *pc = to_rockchip_pwm_chip(chip);
struct pwm_state curstate;
bool enabled;
int ret = 0;
ret = clk_enable(pc->pclk);
if (ret)
return ret;
ret = clk_enable(pc->clk);
if (ret)
return ret;
pwm_get_state(pwm, &curstate);
enabled = curstate.enabled;
if (state->polarity != curstate.polarity && enabled &&
!pc->data->supports_lock) {
ret = rockchip_pwm_enable(chip, pwm, false);
if (ret)
goto out;
enabled = false;
}
rockchip_pwm_config(chip, pwm, state);
if (state->enabled != enabled) {
ret = rockchip_pwm_enable(chip, pwm, state->enabled);
if (ret)
goto out;
}
out:
clk_disable(pc->clk);
clk_disable(pc->pclk);
return ret;
}
static const struct pwm_ops rockchip_pwm_ops = {
.get_state = rockchip_pwm_get_state,
.apply = rockchip_pwm_apply,
.owner = THIS_MODULE,
};
static const struct rockchip_pwm_data pwm_data_v1 = {
.regs = {
.duty = 0x04,
.period = 0x08,
.cntr = 0x00,
.ctrl = 0x0c,
},
.prescaler = 2,
.supports_polarity = false,
.supports_lock = false,
.enable_conf = PWM_CTRL_OUTPUT_EN | PWM_CTRL_TIMER_EN,
};
static const struct rockchip_pwm_data pwm_data_v2 = {
.regs = {
.duty = 0x08,
.period = 0x04,
.cntr = 0x00,
.ctrl = 0x0c,
},
.prescaler = 1,
.supports_polarity = true,
.supports_lock = false,
.enable_conf = PWM_OUTPUT_LEFT | PWM_LP_DISABLE | PWM_ENABLE |
PWM_CONTINUOUS,
};
static const struct rockchip_pwm_data pwm_data_vop = {
.regs = {
.duty = 0x08,
.period = 0x04,
.cntr = 0x0c,
.ctrl = 0x00,
},
.prescaler = 1,
.supports_polarity = true,
.supports_lock = false,
.enable_conf = PWM_OUTPUT_LEFT | PWM_LP_DISABLE | PWM_ENABLE |
PWM_CONTINUOUS,
};
static const struct rockchip_pwm_data pwm_data_v3 = {
.regs = {
.duty = 0x08,
.period = 0x04,
.cntr = 0x00,
.ctrl = 0x0c,
},
.prescaler = 1,
.supports_polarity = true,
.supports_lock = true,
.enable_conf = PWM_OUTPUT_LEFT | PWM_LP_DISABLE | PWM_ENABLE |
PWM_CONTINUOUS,
};
static const struct of_device_id rockchip_pwm_dt_ids[] = {
{ .compatible = "rockchip,rk2928-pwm", .data = &pwm_data_v1},
{ .compatible = "rockchip,rk3288-pwm", .data = &pwm_data_v2},
{ .compatible = "rockchip,vop-pwm", .data = &pwm_data_vop},
{ .compatible = "rockchip,rk3328-pwm", .data = &pwm_data_v3},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, rockchip_pwm_dt_ids);
static int rockchip_pwm_probe(struct platform_device *pdev)
{
const struct of_device_id *id;
struct rockchip_pwm_chip *pc;
u32 enable_conf, ctrl;
bool enabled;
int ret, count;
id = of_match_device(rockchip_pwm_dt_ids, &pdev->dev);
if (!id)
return -EINVAL;
pc = devm_kzalloc(&pdev->dev, sizeof(*pc), GFP_KERNEL);
if (!pc)
return -ENOMEM;
pc->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pc->base))
return PTR_ERR(pc->base);
pc->clk = devm_clk_get(&pdev->dev, "pwm");
if (IS_ERR(pc->clk)) {
pc->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(pc->clk))
return dev_err_probe(&pdev->dev, PTR_ERR(pc->clk),
"Can't get PWM clk\n");
}
count = of_count_phandle_with_args(pdev->dev.of_node,
"clocks", "#clock-cells");
if (count == 2)
pc->pclk = devm_clk_get(&pdev->dev, "pclk");
else
pc->pclk = pc->clk;
if (IS_ERR(pc->pclk))
return dev_err_probe(&pdev->dev, PTR_ERR(pc->pclk), "Can't get APB clk\n");
ret = clk_prepare_enable(pc->clk);
if (ret)
return dev_err_probe(&pdev->dev, ret, "Can't prepare enable PWM clk\n");
ret = clk_prepare_enable(pc->pclk);
if (ret) {
dev_err_probe(&pdev->dev, ret, "Can't prepare enable APB clk\n");
goto err_clk;
}
platform_set_drvdata(pdev, pc);
pc->data = id->data;
pc->chip.dev = &pdev->dev;
pc->chip.ops = &rockchip_pwm_ops;
pc->chip.npwm = 1;
enable_conf = pc->data->enable_conf;
ctrl = readl_relaxed(pc->base + pc->data->regs.ctrl);
enabled = (ctrl & enable_conf) == enable_conf;
ret = pwmchip_add(&pc->chip);
if (ret < 0) {
dev_err_probe(&pdev->dev, ret, "pwmchip_add() failed\n");
goto err_pclk;
}
/* Keep the PWM clk enabled if the PWM appears to be up and running. */
if (!enabled)
clk_disable(pc->clk);
clk_disable(pc->pclk);
return 0;
err_pclk:
clk_disable_unprepare(pc->pclk);
err_clk:
clk_disable_unprepare(pc->clk);
return ret;
}
static void rockchip_pwm_remove(struct platform_device *pdev)
{
struct rockchip_pwm_chip *pc = platform_get_drvdata(pdev);
pwmchip_remove(&pc->chip);
clk_unprepare(pc->pclk);
clk_unprepare(pc->clk);
}
static struct platform_driver rockchip_pwm_driver = {
.driver = {
.name = "rockchip-pwm",
.of_match_table = rockchip_pwm_dt_ids,
},
.probe = rockchip_pwm_probe,
.remove_new = rockchip_pwm_remove,
};
module_platform_driver(rockchip_pwm_driver);
MODULE_AUTHOR("Beniamino Galvani <[email protected]>");
MODULE_DESCRIPTION("Rockchip SoC PWM driver");
MODULE_LICENSE("GPL v2");
|
linux-master
|
drivers/pwm/pwm-rockchip.c
|
// SPDX-License-Identifier: GPL-2.0
/*
* simple driver for PWM (Pulse Width Modulator) controller
*
* Derived from pxa PWM driver by eric miao <[email protected]>
*/
#include <linux/bitfield.h>
#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pwm.h>
#include <linux/slab.h>
#define MX1_PWMC 0x00 /* PWM Control Register */
#define MX1_PWMS 0x04 /* PWM Sample Register */
#define MX1_PWMP 0x08 /* PWM Period Register */
#define MX1_PWMC_EN BIT(4)
struct pwm_imx1_chip {
struct clk *clk_ipg;
struct clk *clk_per;
void __iomem *mmio_base;
struct pwm_chip chip;
};
#define to_pwm_imx1_chip(chip) container_of(chip, struct pwm_imx1_chip, chip)
static int pwm_imx1_clk_prepare_enable(struct pwm_chip *chip)
{
struct pwm_imx1_chip *imx = to_pwm_imx1_chip(chip);
int ret;
ret = clk_prepare_enable(imx->clk_ipg);
if (ret)
return ret;
ret = clk_prepare_enable(imx->clk_per);
if (ret) {
clk_disable_unprepare(imx->clk_ipg);
return ret;
}
return 0;
}
static void pwm_imx1_clk_disable_unprepare(struct pwm_chip *chip)
{
struct pwm_imx1_chip *imx = to_pwm_imx1_chip(chip);
clk_disable_unprepare(imx->clk_per);
clk_disable_unprepare(imx->clk_ipg);
}
static int pwm_imx1_config(struct pwm_chip *chip,
struct pwm_device *pwm, u64 duty_ns, u64 period_ns)
{
struct pwm_imx1_chip *imx = to_pwm_imx1_chip(chip);
u32 max, p;
/*
* The PWM subsystem allows for exact frequencies. However,
* I cannot connect a scope on my device to the PWM line and
* thus cannot provide the program the PWM controller
* exactly. Instead, I'm relying on the fact that the
* Bootloader (u-boot or WinCE+haret) has programmed the PWM
* function group already. So I'll just modify the PWM sample
* register to follow the ratio of duty_ns vs. period_ns
* accordingly.
*
* This is good enough for programming the brightness of
* the LCD backlight.
*
* The real implementation would divide PERCLK[0] first by
* both the prescaler (/1 .. /128) and then by CLKSEL
* (/2 .. /16).
*/
max = readl(imx->mmio_base + MX1_PWMP);
p = mul_u64_u64_div_u64(max, duty_ns, period_ns);
writel(max - p, imx->mmio_base + MX1_PWMS);
return 0;
}
static int pwm_imx1_enable(struct pwm_chip *chip, struct pwm_device *pwm)
{
struct pwm_imx1_chip *imx = to_pwm_imx1_chip(chip);
u32 value;
int ret;
ret = pwm_imx1_clk_prepare_enable(chip);
if (ret < 0)
return ret;
value = readl(imx->mmio_base + MX1_PWMC);
value |= MX1_PWMC_EN;
writel(value, imx->mmio_base + MX1_PWMC);
return 0;
}
static void pwm_imx1_disable(struct pwm_chip *chip, struct pwm_device *pwm)
{
struct pwm_imx1_chip *imx = to_pwm_imx1_chip(chip);
u32 value;
value = readl(imx->mmio_base + MX1_PWMC);
value &= ~MX1_PWMC_EN;
writel(value, imx->mmio_base + MX1_PWMC);
pwm_imx1_clk_disable_unprepare(chip);
}
static int pwm_imx1_apply(struct pwm_chip *chip, struct pwm_device *pwm,
const struct pwm_state *state)
{
int err;
if (state->polarity != PWM_POLARITY_NORMAL)
return -EINVAL;
if (!state->enabled) {
if (pwm->state.enabled)
pwm_imx1_disable(chip, pwm);
return 0;
}
err = pwm_imx1_config(chip, pwm, state->duty_cycle, state->period);
if (err)
return err;
if (!pwm->state.enabled)
return pwm_imx1_enable(chip, pwm);
return 0;
}
static const struct pwm_ops pwm_imx1_ops = {
.apply = pwm_imx1_apply,
.owner = THIS_MODULE,
};
static const struct of_device_id pwm_imx1_dt_ids[] = {
{ .compatible = "fsl,imx1-pwm", },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, pwm_imx1_dt_ids);
static int pwm_imx1_probe(struct platform_device *pdev)
{
struct pwm_imx1_chip *imx;
imx = devm_kzalloc(&pdev->dev, sizeof(*imx), GFP_KERNEL);
if (!imx)
return -ENOMEM;
imx->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
if (IS_ERR(imx->clk_ipg))
return dev_err_probe(&pdev->dev, PTR_ERR(imx->clk_ipg),
"getting ipg clock failed\n");
imx->clk_per = devm_clk_get(&pdev->dev, "per");
if (IS_ERR(imx->clk_per))
return dev_err_probe(&pdev->dev, PTR_ERR(imx->clk_per),
"failed to get peripheral clock\n");
imx->chip.ops = &pwm_imx1_ops;
imx->chip.dev = &pdev->dev;
imx->chip.npwm = 1;
imx->mmio_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(imx->mmio_base))
return PTR_ERR(imx->mmio_base);
return devm_pwmchip_add(&pdev->dev, &imx->chip);
}
static struct platform_driver pwm_imx1_driver = {
.driver = {
.name = "pwm-imx1",
.of_match_table = pwm_imx1_dt_ids,
},
.probe = pwm_imx1_probe,
};
module_platform_driver(pwm_imx1_driver);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Sascha Hauer <[email protected]>");
|
linux-master
|
drivers/pwm/pwm-imx1.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2015 Neil Armstrong <[email protected]>
* Copyright (c) 2014 Joachim Eastwood <[email protected]>
* Copyright (c) 2012 NeilBrown <[email protected]>
* Heavily based on earlier code which is:
* Copyright (c) 2010 Grant Erickson <[email protected]>
*
* Also based on pwm-samsung.c
*
* Description:
* This file is the core OMAP support for the generic, Linux
* PWM driver / controller, using the OMAP's dual-mode timers
* with a timer counter that goes up. When it overflows it gets
* reloaded with the load value and the pwm output goes up.
* When counter matches with match register, the output goes down.
* Reference Manual: https://www.ti.com/lit/ug/spruh73q/spruh73q.pdf
*
* Limitations:
* - When PWM is stopped, timer counter gets stopped immediately. This
* doesn't allow the current PWM period to complete and stops abruptly.
* - When PWM is running and changing both duty cycle and period,
* we cannot prevent in software that the output might produce
* a period with mixed settings. Especially when period/duty_cyle
* is updated while the pwm pin is high, current pwm period/duty_cycle
* can get updated as below based on the current timer counter:
* - period for current cycle = current_period + new period
* - duty_cycle for current period = current period + new duty_cycle.
* - PWM OMAP DM timer cannot change the polarity when pwm is active. When
* user requests a change in polarity when in active state:
* - PWM is stopped abruptly(without completing the current cycle)
* - Polarity is changed
* - A fresh cycle is started.
*/
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <clocksource/timer-ti-dm.h>
#include <linux/platform_data/dmtimer-omap.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/pwm.h>
#include <linux/slab.h>
#include <linux/time.h>
#define DM_TIMER_LOAD_MIN 0xfffffffe
#define DM_TIMER_MAX 0xffffffff
/**
* struct pwm_omap_dmtimer_chip - Structure representing a pwm chip
* corresponding to omap dmtimer.
* @chip: PWM chip structure representing PWM controller
* @mutex: Mutex to protect pwm apply state
* @dm_timer: Pointer to omap dm timer.
* @pdata: Pointer to omap dm timer ops.
* @dm_timer_pdev: Pointer to omap dm timer platform device
*/
struct pwm_omap_dmtimer_chip {
struct pwm_chip chip;
/* Mutex to protect pwm apply state */
struct mutex mutex;
struct omap_dm_timer *dm_timer;
const struct omap_dm_timer_ops *pdata;
struct platform_device *dm_timer_pdev;
};
static inline struct pwm_omap_dmtimer_chip *
to_pwm_omap_dmtimer_chip(struct pwm_chip *chip)
{
return container_of(chip, struct pwm_omap_dmtimer_chip, chip);
}
/**
* pwm_omap_dmtimer_get_clock_cycles() - Get clock cycles in a time frame
* @clk_rate: pwm timer clock rate
* @ns: time frame in nano seconds.
*
* Return number of clock cycles in a given period(ins ns).
*/
static u32 pwm_omap_dmtimer_get_clock_cycles(unsigned long clk_rate, int ns)
{
return DIV_ROUND_CLOSEST_ULL((u64)clk_rate * ns, NSEC_PER_SEC);
}
/**
* pwm_omap_dmtimer_start() - Start the pwm omap dm timer in pwm mode
* @omap: Pointer to pwm omap dm timer chip
*/
static void pwm_omap_dmtimer_start(struct pwm_omap_dmtimer_chip *omap)
{
/*
* According to OMAP 4 TRM section 22.2.4.10 the counter should be
* started at 0xFFFFFFFE when overflow and match is used to ensure
* that the PWM line is toggled on the first event.
*
* Note that omap_dm_timer_enable/disable is for register access and
* not the timer counter itself.
*/
omap->pdata->enable(omap->dm_timer);
omap->pdata->write_counter(omap->dm_timer, DM_TIMER_LOAD_MIN);
omap->pdata->disable(omap->dm_timer);
omap->pdata->start(omap->dm_timer);
}
/**
* pwm_omap_dmtimer_is_enabled() - Detect if the pwm is enabled.
* @omap: Pointer to pwm omap dm timer chip
*
* Return true if pwm is enabled else false.
*/
static bool pwm_omap_dmtimer_is_enabled(struct pwm_omap_dmtimer_chip *omap)
{
u32 status;
status = omap->pdata->get_pwm_status(omap->dm_timer);
return !!(status & OMAP_TIMER_CTRL_ST);
}
/**
* pwm_omap_dmtimer_polarity() - Detect the polarity of pwm.
* @omap: Pointer to pwm omap dm timer chip
*
* Return the polarity of pwm.
*/
static int pwm_omap_dmtimer_polarity(struct pwm_omap_dmtimer_chip *omap)
{
u32 status;
status = omap->pdata->get_pwm_status(omap->dm_timer);
return !!(status & OMAP_TIMER_CTRL_SCPWM);
}
/**
* pwm_omap_dmtimer_config() - Update the configuration of pwm omap dm timer
* @chip: Pointer to PWM controller
* @pwm: Pointer to PWM channel
* @duty_ns: New duty cycle in nano seconds
* @period_ns: New period in nano seconds
*
* Return 0 if successfully changed the period/duty_cycle else appropriate
* error.
*/
static int pwm_omap_dmtimer_config(struct pwm_chip *chip,
struct pwm_device *pwm,
int duty_ns, int period_ns)
{
struct pwm_omap_dmtimer_chip *omap = to_pwm_omap_dmtimer_chip(chip);
u32 period_cycles, duty_cycles;
u32 load_value, match_value;
unsigned long clk_rate;
struct clk *fclk;
dev_dbg(chip->dev, "requested duty cycle: %d ns, period: %d ns\n",
duty_ns, period_ns);
if (duty_ns == pwm_get_duty_cycle(pwm) &&
period_ns == pwm_get_period(pwm))
return 0;
fclk = omap->pdata->get_fclk(omap->dm_timer);
if (!fclk) {
dev_err(chip->dev, "invalid pmtimer fclk\n");
return -EINVAL;
}
clk_rate = clk_get_rate(fclk);
if (!clk_rate) {
dev_err(chip->dev, "invalid pmtimer fclk rate\n");
return -EINVAL;
}
dev_dbg(chip->dev, "clk rate: %luHz\n", clk_rate);
/*
* Calculate the appropriate load and match values based on the
* specified period and duty cycle. The load value determines the
* period time and the match value determines the duty time.
*
* The period lasts for (DM_TIMER_MAX-load_value+1) clock cycles.
* Similarly, the active time lasts (match_value-load_value+1) cycles.
* The non-active time is the remainder: (DM_TIMER_MAX-match_value)
* clock cycles.
*
* NOTE: It is required that: load_value <= match_value < DM_TIMER_MAX
*
* References:
* OMAP4430/60/70 TRM sections 22.2.4.10 and 22.2.4.11
* AM335x Sitara TRM sections 20.1.3.5 and 20.1.3.6
*/
period_cycles = pwm_omap_dmtimer_get_clock_cycles(clk_rate, period_ns);
duty_cycles = pwm_omap_dmtimer_get_clock_cycles(clk_rate, duty_ns);
if (period_cycles < 2) {
dev_info(chip->dev,
"period %d ns too short for clock rate %lu Hz\n",
period_ns, clk_rate);
return -EINVAL;
}
if (duty_cycles < 1) {
dev_dbg(chip->dev,
"duty cycle %d ns is too short for clock rate %lu Hz\n",
duty_ns, clk_rate);
dev_dbg(chip->dev, "using minimum of 1 clock cycle\n");
duty_cycles = 1;
} else if (duty_cycles >= period_cycles) {
dev_dbg(chip->dev,
"duty cycle %d ns is too long for period %d ns at clock rate %lu Hz\n",
duty_ns, period_ns, clk_rate);
dev_dbg(chip->dev, "using maximum of 1 clock cycle less than period\n");
duty_cycles = period_cycles - 1;
}
dev_dbg(chip->dev, "effective duty cycle: %lld ns, period: %lld ns\n",
DIV_ROUND_CLOSEST_ULL((u64)NSEC_PER_SEC * duty_cycles,
clk_rate),
DIV_ROUND_CLOSEST_ULL((u64)NSEC_PER_SEC * period_cycles,
clk_rate));
load_value = (DM_TIMER_MAX - period_cycles) + 1;
match_value = load_value + duty_cycles - 1;
omap->pdata->set_load(omap->dm_timer, load_value);
omap->pdata->set_match(omap->dm_timer, true, match_value);
dev_dbg(chip->dev, "load value: %#08x (%d), match value: %#08x (%d)\n",
load_value, load_value, match_value, match_value);
return 0;
}
/**
* pwm_omap_dmtimer_set_polarity() - Changes the polarity of the pwm dm timer.
* @chip: Pointer to PWM controller
* @pwm: Pointer to PWM channel
* @polarity: New pwm polarity to be set
*/
static void pwm_omap_dmtimer_set_polarity(struct pwm_chip *chip,
struct pwm_device *pwm,
enum pwm_polarity polarity)
{
struct pwm_omap_dmtimer_chip *omap = to_pwm_omap_dmtimer_chip(chip);
bool enabled;
/* Disable the PWM before changing the polarity. */
enabled = pwm_omap_dmtimer_is_enabled(omap);
if (enabled)
omap->pdata->stop(omap->dm_timer);
omap->pdata->set_pwm(omap->dm_timer,
polarity == PWM_POLARITY_INVERSED,
true, OMAP_TIMER_TRIGGER_OVERFLOW_AND_COMPARE,
true);
if (enabled)
pwm_omap_dmtimer_start(omap);
}
/**
* pwm_omap_dmtimer_apply() - Changes the state of the pwm omap dm timer.
* @chip: Pointer to PWM controller
* @pwm: Pointer to PWM channel
* @state: New state to apply
*
* Return 0 if successfully changed the state else appropriate error.
*/
static int pwm_omap_dmtimer_apply(struct pwm_chip *chip,
struct pwm_device *pwm,
const struct pwm_state *state)
{
struct pwm_omap_dmtimer_chip *omap = to_pwm_omap_dmtimer_chip(chip);
int ret = 0;
mutex_lock(&omap->mutex);
if (pwm_omap_dmtimer_is_enabled(omap) && !state->enabled) {
omap->pdata->stop(omap->dm_timer);
goto unlock_mutex;
}
if (pwm_omap_dmtimer_polarity(omap) != state->polarity)
pwm_omap_dmtimer_set_polarity(chip, pwm, state->polarity);
ret = pwm_omap_dmtimer_config(chip, pwm, state->duty_cycle,
state->period);
if (ret)
goto unlock_mutex;
if (!pwm_omap_dmtimer_is_enabled(omap) && state->enabled) {
omap->pdata->set_pwm(omap->dm_timer,
state->polarity == PWM_POLARITY_INVERSED,
true,
OMAP_TIMER_TRIGGER_OVERFLOW_AND_COMPARE,
true);
pwm_omap_dmtimer_start(omap);
}
unlock_mutex:
mutex_unlock(&omap->mutex);
return ret;
}
static const struct pwm_ops pwm_omap_dmtimer_ops = {
.apply = pwm_omap_dmtimer_apply,
.owner = THIS_MODULE,
};
static int pwm_omap_dmtimer_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct dmtimer_platform_data *timer_pdata;
const struct omap_dm_timer_ops *pdata;
struct platform_device *timer_pdev;
struct pwm_omap_dmtimer_chip *omap;
struct omap_dm_timer *dm_timer;
struct device_node *timer;
int ret = 0;
u32 v;
timer = of_parse_phandle(np, "ti,timers", 0);
if (!timer)
return -ENODEV;
timer_pdev = of_find_device_by_node(timer);
if (!timer_pdev) {
dev_err(&pdev->dev, "Unable to find Timer pdev\n");
ret = -ENODEV;
goto err_find_timer_pdev;
}
timer_pdata = dev_get_platdata(&timer_pdev->dev);
if (!timer_pdata) {
dev_dbg(&pdev->dev,
"dmtimer pdata structure NULL, deferring probe\n");
ret = -EPROBE_DEFER;
goto err_platdata;
}
pdata = timer_pdata->timer_ops;
if (!pdata || !pdata->request_by_node ||
!pdata->free ||
!pdata->enable ||
!pdata->disable ||
!pdata->get_fclk ||
!pdata->start ||
!pdata->stop ||
!pdata->set_load ||
!pdata->set_match ||
!pdata->set_pwm ||
!pdata->get_pwm_status ||
!pdata->set_prescaler ||
!pdata->write_counter) {
dev_err(&pdev->dev, "Incomplete dmtimer pdata structure\n");
ret = -EINVAL;
goto err_platdata;
}
if (!of_get_property(timer, "ti,timer-pwm", NULL)) {
dev_err(&pdev->dev, "Missing ti,timer-pwm capability\n");
ret = -ENODEV;
goto err_timer_property;
}
dm_timer = pdata->request_by_node(timer);
if (!dm_timer) {
ret = -EPROBE_DEFER;
goto err_request_timer;
}
omap = devm_kzalloc(&pdev->dev, sizeof(*omap), GFP_KERNEL);
if (!omap) {
ret = -ENOMEM;
goto err_alloc_omap;
}
omap->pdata = pdata;
omap->dm_timer = dm_timer;
omap->dm_timer_pdev = timer_pdev;
/*
* Ensure that the timer is stopped before we allow PWM core to call
* pwm_enable.
*/
if (pm_runtime_active(&omap->dm_timer_pdev->dev))
omap->pdata->stop(omap->dm_timer);
if (!of_property_read_u32(pdev->dev.of_node, "ti,prescaler", &v))
omap->pdata->set_prescaler(omap->dm_timer, v);
/* setup dmtimer clock source */
if (!of_property_read_u32(pdev->dev.of_node, "ti,clock-source", &v))
omap->pdata->set_source(omap->dm_timer, v);
omap->chip.dev = &pdev->dev;
omap->chip.ops = &pwm_omap_dmtimer_ops;
omap->chip.npwm = 1;
mutex_init(&omap->mutex);
ret = pwmchip_add(&omap->chip);
if (ret < 0) {
dev_err(&pdev->dev, "failed to register PWM\n");
goto err_pwmchip_add;
}
of_node_put(timer);
platform_set_drvdata(pdev, omap);
return 0;
err_pwmchip_add:
/*
* *omap is allocated using devm_kzalloc,
* so no free necessary here
*/
err_alloc_omap:
pdata->free(dm_timer);
err_request_timer:
err_timer_property:
err_platdata:
put_device(&timer_pdev->dev);
err_find_timer_pdev:
of_node_put(timer);
return ret;
}
static void pwm_omap_dmtimer_remove(struct platform_device *pdev)
{
struct pwm_omap_dmtimer_chip *omap = platform_get_drvdata(pdev);
pwmchip_remove(&omap->chip);
if (pm_runtime_active(&omap->dm_timer_pdev->dev))
omap->pdata->stop(omap->dm_timer);
omap->pdata->free(omap->dm_timer);
put_device(&omap->dm_timer_pdev->dev);
mutex_destroy(&omap->mutex);
}
static const struct of_device_id pwm_omap_dmtimer_of_match[] = {
{.compatible = "ti,omap-dmtimer-pwm"},
{}
};
MODULE_DEVICE_TABLE(of, pwm_omap_dmtimer_of_match);
static struct platform_driver pwm_omap_dmtimer_driver = {
.driver = {
.name = "omap-dmtimer-pwm",
.of_match_table = of_match_ptr(pwm_omap_dmtimer_of_match),
},
.probe = pwm_omap_dmtimer_probe,
.remove_new = pwm_omap_dmtimer_remove,
};
module_platform_driver(pwm_omap_dmtimer_driver);
MODULE_AUTHOR("Grant Erickson <[email protected]>");
MODULE_AUTHOR("NeilBrown <[email protected]>");
MODULE_AUTHOR("Neil Armstrong <[email protected]>");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("OMAP PWM Driver using Dual-mode Timers");
|
linux-master
|
drivers/pwm/pwm-omap-dmtimer.c
|
// SPDX-License-Identifier: GPL-2.0-only
// Copyright (C) 2016 Broadcom
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/math64.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pwm.h>
#define IPROC_PWM_CTRL_OFFSET 0x00
#define IPROC_PWM_CTRL_TYPE_SHIFT(ch) (15 + (ch))
#define IPROC_PWM_CTRL_POLARITY_SHIFT(ch) (8 + (ch))
#define IPROC_PWM_CTRL_EN_SHIFT(ch) (ch)
#define IPROC_PWM_PERIOD_OFFSET(ch) (0x04 + ((ch) << 3))
#define IPROC_PWM_PERIOD_MIN 0x02
#define IPROC_PWM_PERIOD_MAX 0xffff
#define IPROC_PWM_DUTY_CYCLE_OFFSET(ch) (0x08 + ((ch) << 3))
#define IPROC_PWM_DUTY_CYCLE_MIN 0x00
#define IPROC_PWM_DUTY_CYCLE_MAX 0xffff
#define IPROC_PWM_PRESCALE_OFFSET 0x24
#define IPROC_PWM_PRESCALE_BITS 0x06
#define IPROC_PWM_PRESCALE_SHIFT(ch) ((3 - (ch)) * \
IPROC_PWM_PRESCALE_BITS)
#define IPROC_PWM_PRESCALE_MASK(ch) (IPROC_PWM_PRESCALE_MAX << \
IPROC_PWM_PRESCALE_SHIFT(ch))
#define IPROC_PWM_PRESCALE_MIN 0x00
#define IPROC_PWM_PRESCALE_MAX 0x3f
struct iproc_pwmc {
struct pwm_chip chip;
void __iomem *base;
struct clk *clk;
};
static inline struct iproc_pwmc *to_iproc_pwmc(struct pwm_chip *chip)
{
return container_of(chip, struct iproc_pwmc, chip);
}
static void iproc_pwmc_enable(struct iproc_pwmc *ip, unsigned int channel)
{
u32 value;
value = readl(ip->base + IPROC_PWM_CTRL_OFFSET);
value |= 1 << IPROC_PWM_CTRL_EN_SHIFT(channel);
writel(value, ip->base + IPROC_PWM_CTRL_OFFSET);
/* must be a 400 ns delay between clearing and setting enable bit */
ndelay(400);
}
static void iproc_pwmc_disable(struct iproc_pwmc *ip, unsigned int channel)
{
u32 value;
value = readl(ip->base + IPROC_PWM_CTRL_OFFSET);
value &= ~(1 << IPROC_PWM_CTRL_EN_SHIFT(channel));
writel(value, ip->base + IPROC_PWM_CTRL_OFFSET);
/* must be a 400 ns delay between clearing and setting enable bit */
ndelay(400);
}
static int iproc_pwmc_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
struct pwm_state *state)
{
struct iproc_pwmc *ip = to_iproc_pwmc(chip);
u64 tmp, multi, rate;
u32 value, prescale;
value = readl(ip->base + IPROC_PWM_CTRL_OFFSET);
if (value & BIT(IPROC_PWM_CTRL_EN_SHIFT(pwm->hwpwm)))
state->enabled = true;
else
state->enabled = false;
if (value & BIT(IPROC_PWM_CTRL_POLARITY_SHIFT(pwm->hwpwm)))
state->polarity = PWM_POLARITY_NORMAL;
else
state->polarity = PWM_POLARITY_INVERSED;
rate = clk_get_rate(ip->clk);
if (rate == 0) {
state->period = 0;
state->duty_cycle = 0;
return 0;
}
value = readl(ip->base + IPROC_PWM_PRESCALE_OFFSET);
prescale = value >> IPROC_PWM_PRESCALE_SHIFT(pwm->hwpwm);
prescale &= IPROC_PWM_PRESCALE_MAX;
multi = NSEC_PER_SEC * (prescale + 1);
value = readl(ip->base + IPROC_PWM_PERIOD_OFFSET(pwm->hwpwm));
tmp = (value & IPROC_PWM_PERIOD_MAX) * multi;
state->period = div64_u64(tmp, rate);
value = readl(ip->base + IPROC_PWM_DUTY_CYCLE_OFFSET(pwm->hwpwm));
tmp = (value & IPROC_PWM_PERIOD_MAX) * multi;
state->duty_cycle = div64_u64(tmp, rate);
return 0;
}
static int iproc_pwmc_apply(struct pwm_chip *chip, struct pwm_device *pwm,
const struct pwm_state *state)
{
unsigned long prescale = IPROC_PWM_PRESCALE_MIN;
struct iproc_pwmc *ip = to_iproc_pwmc(chip);
u32 value, period, duty;
u64 rate;
rate = clk_get_rate(ip->clk);
/*
* Find period count, duty count and prescale to suit duty_cycle and
* period. This is done according to formulas described below:
*
* period_ns = 10^9 * (PRESCALE + 1) * PC / PWM_CLK_RATE
* duty_ns = 10^9 * (PRESCALE + 1) * DC / PWM_CLK_RATE
*
* PC = (PWM_CLK_RATE * period_ns) / (10^9 * (PRESCALE + 1))
* DC = (PWM_CLK_RATE * duty_ns) / (10^9 * (PRESCALE + 1))
*/
while (1) {
u64 value, div;
div = NSEC_PER_SEC * (prescale + 1);
value = rate * state->period;
period = div64_u64(value, div);
value = rate * state->duty_cycle;
duty = div64_u64(value, div);
if (period < IPROC_PWM_PERIOD_MIN)
return -EINVAL;
if (period <= IPROC_PWM_PERIOD_MAX &&
duty <= IPROC_PWM_DUTY_CYCLE_MAX)
break;
/* Otherwise, increase prescale and recalculate counts */
if (++prescale > IPROC_PWM_PRESCALE_MAX)
return -EINVAL;
}
iproc_pwmc_disable(ip, pwm->hwpwm);
/* Set prescale */
value = readl(ip->base + IPROC_PWM_PRESCALE_OFFSET);
value &= ~IPROC_PWM_PRESCALE_MASK(pwm->hwpwm);
value |= prescale << IPROC_PWM_PRESCALE_SHIFT(pwm->hwpwm);
writel(value, ip->base + IPROC_PWM_PRESCALE_OFFSET);
/* set period and duty cycle */
writel(period, ip->base + IPROC_PWM_PERIOD_OFFSET(pwm->hwpwm));
writel(duty, ip->base + IPROC_PWM_DUTY_CYCLE_OFFSET(pwm->hwpwm));
/* set polarity */
value = readl(ip->base + IPROC_PWM_CTRL_OFFSET);
if (state->polarity == PWM_POLARITY_NORMAL)
value |= 1 << IPROC_PWM_CTRL_POLARITY_SHIFT(pwm->hwpwm);
else
value &= ~(1 << IPROC_PWM_CTRL_POLARITY_SHIFT(pwm->hwpwm));
writel(value, ip->base + IPROC_PWM_CTRL_OFFSET);
if (state->enabled)
iproc_pwmc_enable(ip, pwm->hwpwm);
return 0;
}
static const struct pwm_ops iproc_pwm_ops = {
.apply = iproc_pwmc_apply,
.get_state = iproc_pwmc_get_state,
.owner = THIS_MODULE,
};
static int iproc_pwmc_probe(struct platform_device *pdev)
{
struct iproc_pwmc *ip;
unsigned int i;
u32 value;
int ret;
ip = devm_kzalloc(&pdev->dev, sizeof(*ip), GFP_KERNEL);
if (!ip)
return -ENOMEM;
platform_set_drvdata(pdev, ip);
ip->chip.dev = &pdev->dev;
ip->chip.ops = &iproc_pwm_ops;
ip->chip.npwm = 4;
ip->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ip->base))
return PTR_ERR(ip->base);
ip->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(ip->clk)) {
dev_err(&pdev->dev, "failed to get clock: %ld\n",
PTR_ERR(ip->clk));
return PTR_ERR(ip->clk);
}
ret = clk_prepare_enable(ip->clk);
if (ret < 0) {
dev_err(&pdev->dev, "failed to enable clock: %d\n", ret);
return ret;
}
/* Set full drive and normal polarity for all channels */
value = readl(ip->base + IPROC_PWM_CTRL_OFFSET);
for (i = 0; i < ip->chip.npwm; i++) {
value &= ~(1 << IPROC_PWM_CTRL_TYPE_SHIFT(i));
value |= 1 << IPROC_PWM_CTRL_POLARITY_SHIFT(i);
}
writel(value, ip->base + IPROC_PWM_CTRL_OFFSET);
ret = pwmchip_add(&ip->chip);
if (ret < 0) {
dev_err(&pdev->dev, "failed to add PWM chip: %d\n", ret);
clk_disable_unprepare(ip->clk);
}
return ret;
}
static void iproc_pwmc_remove(struct platform_device *pdev)
{
struct iproc_pwmc *ip = platform_get_drvdata(pdev);
pwmchip_remove(&ip->chip);
clk_disable_unprepare(ip->clk);
}
static const struct of_device_id bcm_iproc_pwmc_dt[] = {
{ .compatible = "brcm,iproc-pwm" },
{ },
};
MODULE_DEVICE_TABLE(of, bcm_iproc_pwmc_dt);
static struct platform_driver iproc_pwmc_driver = {
.driver = {
.name = "bcm-iproc-pwm",
.of_match_table = bcm_iproc_pwmc_dt,
},
.probe = iproc_pwmc_probe,
.remove_new = iproc_pwmc_remove,
};
module_platform_driver(iproc_pwmc_driver);
MODULE_AUTHOR("Yendapally Reddy Dhananjaya Reddy <[email protected]>");
MODULE_DESCRIPTION("Broadcom iProc PWM driver");
MODULE_LICENSE("GPL v2");
|
linux-master
|
drivers/pwm/pwm-bcm-iproc.c
|
// SPDX-License-Identifier: GPL-2.0
/*
* R-Mobile TPU PWM driver
*
* Copyright (C) 2012 Renesas Solutions Corp.
*/
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/pwm.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#define TPU_CHANNEL_MAX 4
#define TPU_TSTR 0x00 /* Timer start register (shared) */
#define TPU_TCRn 0x00 /* Timer control register */
#define TPU_TCR_CCLR_NONE (0 << 5)
#define TPU_TCR_CCLR_TGRA (1 << 5)
#define TPU_TCR_CCLR_TGRB (2 << 5)
#define TPU_TCR_CCLR_TGRC (5 << 5)
#define TPU_TCR_CCLR_TGRD (6 << 5)
#define TPU_TCR_CKEG_RISING (0 << 3)
#define TPU_TCR_CKEG_FALLING (1 << 3)
#define TPU_TCR_CKEG_BOTH (2 << 3)
#define TPU_TMDRn 0x04 /* Timer mode register */
#define TPU_TMDR_BFWT (1 << 6)
#define TPU_TMDR_BFB (1 << 5)
#define TPU_TMDR_BFA (1 << 4)
#define TPU_TMDR_MD_NORMAL (0 << 0)
#define TPU_TMDR_MD_PWM (2 << 0)
#define TPU_TIORn 0x08 /* Timer I/O control register */
#define TPU_TIOR_IOA_0 (0 << 0)
#define TPU_TIOR_IOA_0_CLR (1 << 0)
#define TPU_TIOR_IOA_0_SET (2 << 0)
#define TPU_TIOR_IOA_0_TOGGLE (3 << 0)
#define TPU_TIOR_IOA_1 (4 << 0)
#define TPU_TIOR_IOA_1_CLR (5 << 0)
#define TPU_TIOR_IOA_1_SET (6 << 0)
#define TPU_TIOR_IOA_1_TOGGLE (7 << 0)
#define TPU_TIERn 0x0c /* Timer interrupt enable register */
#define TPU_TSRn 0x10 /* Timer status register */
#define TPU_TCNTn 0x14 /* Timer counter */
#define TPU_TGRAn 0x18 /* Timer general register A */
#define TPU_TGRBn 0x1c /* Timer general register B */
#define TPU_TGRCn 0x20 /* Timer general register C */
#define TPU_TGRDn 0x24 /* Timer general register D */
#define TPU_CHANNEL_OFFSET 0x10
#define TPU_CHANNEL_SIZE 0x40
enum tpu_pin_state {
TPU_PIN_INACTIVE, /* Pin is driven inactive */
TPU_PIN_PWM, /* Pin is driven by PWM */
TPU_PIN_ACTIVE, /* Pin is driven active */
};
struct tpu_device;
struct tpu_pwm_device {
bool timer_on; /* Whether the timer is running */
struct tpu_device *tpu;
unsigned int channel; /* Channel number in the TPU */
enum pwm_polarity polarity;
unsigned int prescaler;
u16 period;
u16 duty;
};
struct tpu_device {
struct platform_device *pdev;
struct pwm_chip chip;
spinlock_t lock;
void __iomem *base;
struct clk *clk;
};
#define to_tpu_device(c) container_of(c, struct tpu_device, chip)
static void tpu_pwm_write(struct tpu_pwm_device *tpd, int reg_nr, u16 value)
{
void __iomem *base = tpd->tpu->base + TPU_CHANNEL_OFFSET
+ tpd->channel * TPU_CHANNEL_SIZE;
iowrite16(value, base + reg_nr);
}
static void tpu_pwm_set_pin(struct tpu_pwm_device *tpd,
enum tpu_pin_state state)
{
static const char * const states[] = { "inactive", "PWM", "active" };
dev_dbg(&tpd->tpu->pdev->dev, "%u: configuring pin as %s\n",
tpd->channel, states[state]);
switch (state) {
case TPU_PIN_INACTIVE:
tpu_pwm_write(tpd, TPU_TIORn,
tpd->polarity == PWM_POLARITY_INVERSED ?
TPU_TIOR_IOA_1 : TPU_TIOR_IOA_0);
break;
case TPU_PIN_PWM:
tpu_pwm_write(tpd, TPU_TIORn,
tpd->polarity == PWM_POLARITY_INVERSED ?
TPU_TIOR_IOA_0_SET : TPU_TIOR_IOA_1_CLR);
break;
case TPU_PIN_ACTIVE:
tpu_pwm_write(tpd, TPU_TIORn,
tpd->polarity == PWM_POLARITY_INVERSED ?
TPU_TIOR_IOA_0 : TPU_TIOR_IOA_1);
break;
}
}
static void tpu_pwm_start_stop(struct tpu_pwm_device *tpd, int start)
{
unsigned long flags;
u16 value;
spin_lock_irqsave(&tpd->tpu->lock, flags);
value = ioread16(tpd->tpu->base + TPU_TSTR);
if (start)
value |= 1 << tpd->channel;
else
value &= ~(1 << tpd->channel);
iowrite16(value, tpd->tpu->base + TPU_TSTR);
spin_unlock_irqrestore(&tpd->tpu->lock, flags);
}
static int tpu_pwm_timer_start(struct tpu_pwm_device *tpd)
{
int ret;
if (!tpd->timer_on) {
/* Wake up device and enable clock. */
pm_runtime_get_sync(&tpd->tpu->pdev->dev);
ret = clk_prepare_enable(tpd->tpu->clk);
if (ret) {
dev_err(&tpd->tpu->pdev->dev, "cannot enable clock\n");
return ret;
}
tpd->timer_on = true;
}
/*
* Make sure the channel is stopped, as we need to reconfigure it
* completely. First drive the pin to the inactive state to avoid
* glitches.
*/
tpu_pwm_set_pin(tpd, TPU_PIN_INACTIVE);
tpu_pwm_start_stop(tpd, false);
/*
* - Clear TCNT on TGRB match
* - Count on rising edge
* - Set prescaler
* - Output 0 until TGRA, output 1 until TGRB (active low polarity)
* - Output 1 until TGRA, output 0 until TGRB (active high polarity
* - PWM mode
*/
tpu_pwm_write(tpd, TPU_TCRn, TPU_TCR_CCLR_TGRB | TPU_TCR_CKEG_RISING |
tpd->prescaler);
tpu_pwm_write(tpd, TPU_TMDRn, TPU_TMDR_MD_PWM);
tpu_pwm_set_pin(tpd, TPU_PIN_PWM);
tpu_pwm_write(tpd, TPU_TGRAn, tpd->duty);
tpu_pwm_write(tpd, TPU_TGRBn, tpd->period);
dev_dbg(&tpd->tpu->pdev->dev, "%u: TGRA 0x%04x TGRB 0x%04x\n",
tpd->channel, tpd->duty, tpd->period);
/* Start the channel. */
tpu_pwm_start_stop(tpd, true);
return 0;
}
static void tpu_pwm_timer_stop(struct tpu_pwm_device *tpd)
{
if (!tpd->timer_on)
return;
/* Disable channel. */
tpu_pwm_start_stop(tpd, false);
/* Stop clock and mark device as idle. */
clk_disable_unprepare(tpd->tpu->clk);
pm_runtime_put(&tpd->tpu->pdev->dev);
tpd->timer_on = false;
}
/* -----------------------------------------------------------------------------
* PWM API
*/
static int tpu_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
{
struct tpu_device *tpu = to_tpu_device(chip);
struct tpu_pwm_device *tpd;
if (pwm->hwpwm >= TPU_CHANNEL_MAX)
return -EINVAL;
tpd = kzalloc(sizeof(*tpd), GFP_KERNEL);
if (tpd == NULL)
return -ENOMEM;
tpd->tpu = tpu;
tpd->channel = pwm->hwpwm;
tpd->polarity = PWM_POLARITY_NORMAL;
tpd->prescaler = 0;
tpd->period = 0;
tpd->duty = 0;
tpd->timer_on = false;
pwm_set_chip_data(pwm, tpd);
return 0;
}
static void tpu_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
{
struct tpu_pwm_device *tpd = pwm_get_chip_data(pwm);
tpu_pwm_timer_stop(tpd);
kfree(tpd);
}
static int tpu_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
u64 duty_ns, u64 period_ns, bool enabled)
{
struct tpu_pwm_device *tpd = pwm_get_chip_data(pwm);
struct tpu_device *tpu = to_tpu_device(chip);
unsigned int prescaler;
bool duty_only = false;
u32 clk_rate;
u64 period;
u32 duty;
int ret;
clk_rate = clk_get_rate(tpu->clk);
if (unlikely(clk_rate > NSEC_PER_SEC)) {
/*
* This won't happen in the nearer future, so this is only a
* safeguard to prevent the following calculation from
* overflowing. With this clk_rate * period_ns / NSEC_PER_SEC is
* not greater than period_ns and so fits into an u64.
*/
return -EINVAL;
}
period = mul_u64_u64_div_u64(clk_rate, period_ns, NSEC_PER_SEC);
/*
* Find the minimal prescaler in [0..3] such that
*
* period >> (2 * prescaler) < 0x10000
*
* This could be calculated using something like:
*
* prescaler = max(ilog2(period) / 2, 7) - 7;
*
* but given there are only four allowed results and that ilog2 isn't
* cheap on all platforms using a switch statement is more effective.
*/
switch (period) {
case 1 ... 0xffff:
prescaler = 0;
break;
case 0x10000 ... 0x3ffff:
prescaler = 1;
break;
case 0x40000 ... 0xfffff:
prescaler = 2;
break;
case 0x100000 ... 0x3fffff:
prescaler = 3;
break;
default:
return -EINVAL;
}
period >>= 2 * prescaler;
if (duty_ns)
duty = mul_u64_u64_div_u64(clk_rate, duty_ns,
(u64)NSEC_PER_SEC << (2 * prescaler));
else
duty = 0;
dev_dbg(&tpu->pdev->dev,
"rate %u, prescaler %u, period %u, duty %u\n",
clk_rate, 1 << (2 * prescaler), (u32)period, duty);
if (tpd->prescaler == prescaler && tpd->period == period)
duty_only = true;
tpd->prescaler = prescaler;
tpd->period = period;
tpd->duty = duty;
/* If the channel is disabled we're done. */
if (!enabled)
return 0;
if (duty_only && tpd->timer_on) {
/*
* If only the duty cycle changed and the timer is already
* running, there's no need to reconfigure it completely, Just
* modify the duty cycle.
*/
tpu_pwm_write(tpd, TPU_TGRAn, tpd->duty);
dev_dbg(&tpu->pdev->dev, "%u: TGRA 0x%04x\n", tpd->channel,
tpd->duty);
} else {
/* Otherwise perform a full reconfiguration. */
ret = tpu_pwm_timer_start(tpd);
if (ret < 0)
return ret;
}
if (duty == 0 || duty == period) {
/*
* To avoid running the timer when not strictly required, handle
* 0% and 100% duty cycles as fixed levels and stop the timer.
*/
tpu_pwm_set_pin(tpd, duty ? TPU_PIN_ACTIVE : TPU_PIN_INACTIVE);
tpu_pwm_timer_stop(tpd);
}
return 0;
}
static int tpu_pwm_set_polarity(struct pwm_chip *chip, struct pwm_device *pwm,
enum pwm_polarity polarity)
{
struct tpu_pwm_device *tpd = pwm_get_chip_data(pwm);
tpd->polarity = polarity;
return 0;
}
static int tpu_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
{
struct tpu_pwm_device *tpd = pwm_get_chip_data(pwm);
int ret;
ret = tpu_pwm_timer_start(tpd);
if (ret < 0)
return ret;
/*
* To avoid running the timer when not strictly required, handle 0% and
* 100% duty cycles as fixed levels and stop the timer.
*/
if (tpd->duty == 0 || tpd->duty == tpd->period) {
tpu_pwm_set_pin(tpd, tpd->duty ?
TPU_PIN_ACTIVE : TPU_PIN_INACTIVE);
tpu_pwm_timer_stop(tpd);
}
return 0;
}
static void tpu_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
{
struct tpu_pwm_device *tpd = pwm_get_chip_data(pwm);
/* The timer must be running to modify the pin output configuration. */
tpu_pwm_timer_start(tpd);
tpu_pwm_set_pin(tpd, TPU_PIN_INACTIVE);
tpu_pwm_timer_stop(tpd);
}
static int tpu_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
const struct pwm_state *state)
{
int err;
bool enabled = pwm->state.enabled;
if (state->polarity != pwm->state.polarity) {
if (enabled) {
tpu_pwm_disable(chip, pwm);
enabled = false;
}
err = tpu_pwm_set_polarity(chip, pwm, state->polarity);
if (err)
return err;
}
if (!state->enabled) {
if (enabled)
tpu_pwm_disable(chip, pwm);
return 0;
}
err = tpu_pwm_config(pwm->chip, pwm,
state->duty_cycle, state->period, enabled);
if (err)
return err;
if (!enabled)
err = tpu_pwm_enable(chip, pwm);
return err;
}
static const struct pwm_ops tpu_pwm_ops = {
.request = tpu_pwm_request,
.free = tpu_pwm_free,
.apply = tpu_pwm_apply,
.owner = THIS_MODULE,
};
/* -----------------------------------------------------------------------------
* Probe and remove
*/
static int tpu_probe(struct platform_device *pdev)
{
struct tpu_device *tpu;
int ret;
tpu = devm_kzalloc(&pdev->dev, sizeof(*tpu), GFP_KERNEL);
if (tpu == NULL)
return -ENOMEM;
spin_lock_init(&tpu->lock);
tpu->pdev = pdev;
/* Map memory, get clock and pin control. */
tpu->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(tpu->base))
return PTR_ERR(tpu->base);
tpu->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(tpu->clk))
return dev_err_probe(&pdev->dev, PTR_ERR(tpu->clk), "Failed to get clock\n");
/* Initialize and register the device. */
platform_set_drvdata(pdev, tpu);
tpu->chip.dev = &pdev->dev;
tpu->chip.ops = &tpu_pwm_ops;
tpu->chip.npwm = TPU_CHANNEL_MAX;
ret = devm_pm_runtime_enable(&pdev->dev);
if (ret < 0)
return dev_err_probe(&pdev->dev, ret, "Failed to enable runtime PM\n");
ret = devm_pwmchip_add(&pdev->dev, &tpu->chip);
if (ret < 0)
return dev_err_probe(&pdev->dev, ret, "Failed to register PWM chip\n");
return 0;
}
#ifdef CONFIG_OF
static const struct of_device_id tpu_of_table[] = {
{ .compatible = "renesas,tpu-r8a73a4", },
{ .compatible = "renesas,tpu-r8a7740", },
{ .compatible = "renesas,tpu-r8a7790", },
{ .compatible = "renesas,tpu", },
{ },
};
MODULE_DEVICE_TABLE(of, tpu_of_table);
#endif
static struct platform_driver tpu_driver = {
.probe = tpu_probe,
.driver = {
.name = "renesas-tpu-pwm",
.of_match_table = of_match_ptr(tpu_of_table),
}
};
module_platform_driver(tpu_driver);
MODULE_AUTHOR("Laurent Pinchart <[email protected]>");
MODULE_DESCRIPTION("Renesas TPU PWM Driver");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:renesas-tpu-pwm");
|
linux-master
|
drivers/pwm/pwm-renesas-tpu.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Intel Low Power Subsystem PWM controller driver
*
* Copyright (C) 2014, Intel Corporation
* Author: Mika Westerberg <[email protected]>
* Author: Chew Kean Ho <[email protected]>
* Author: Chang Rebecca Swee Fun <[email protected]>
* Author: Chew Chiau Ee <[email protected]>
* Author: Alan Cox <[email protected]>
*/
#include <linux/bits.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pm_runtime.h>
#include <linux/time.h>
#define DEFAULT_SYMBOL_NAMESPACE PWM_LPSS
#include "pwm-lpss.h"
#define PWM 0x00000000
#define PWM_ENABLE BIT(31)
#define PWM_SW_UPDATE BIT(30)
#define PWM_BASE_UNIT_SHIFT 8
#define PWM_ON_TIME_DIV_MASK GENMASK(7, 0)
/* Size of each PWM register space if multiple */
#define PWM_SIZE 0x400
/* BayTrail */
const struct pwm_lpss_boardinfo pwm_lpss_byt_info = {
.clk_rate = 25000000,
.npwm = 1,
.base_unit_bits = 16,
};
EXPORT_SYMBOL_GPL(pwm_lpss_byt_info);
/* Braswell */
const struct pwm_lpss_boardinfo pwm_lpss_bsw_info = {
.clk_rate = 19200000,
.npwm = 1,
.base_unit_bits = 16,
.other_devices_aml_touches_pwm_regs = true,
};
EXPORT_SYMBOL_GPL(pwm_lpss_bsw_info);
/* Broxton */
const struct pwm_lpss_boardinfo pwm_lpss_bxt_info = {
.clk_rate = 19200000,
.npwm = 4,
.base_unit_bits = 22,
.bypass = true,
};
EXPORT_SYMBOL_GPL(pwm_lpss_bxt_info);
/* Tangier */
const struct pwm_lpss_boardinfo pwm_lpss_tng_info = {
.clk_rate = 19200000,
.npwm = 4,
.base_unit_bits = 22,
};
EXPORT_SYMBOL_GPL(pwm_lpss_tng_info);
static inline struct pwm_lpss_chip *to_lpwm(struct pwm_chip *chip)
{
return container_of(chip, struct pwm_lpss_chip, chip);
}
static inline u32 pwm_lpss_read(const struct pwm_device *pwm)
{
struct pwm_lpss_chip *lpwm = to_lpwm(pwm->chip);
return readl(lpwm->regs + pwm->hwpwm * PWM_SIZE + PWM);
}
static inline void pwm_lpss_write(const struct pwm_device *pwm, u32 value)
{
struct pwm_lpss_chip *lpwm = to_lpwm(pwm->chip);
writel(value, lpwm->regs + pwm->hwpwm * PWM_SIZE + PWM);
}
static int pwm_lpss_wait_for_update(struct pwm_device *pwm)
{
struct pwm_lpss_chip *lpwm = to_lpwm(pwm->chip);
const void __iomem *addr = lpwm->regs + pwm->hwpwm * PWM_SIZE + PWM;
const unsigned int ms = 500 * USEC_PER_MSEC;
u32 val;
int err;
/*
* PWM Configuration register has SW_UPDATE bit that is set when a new
* configuration is written to the register. The bit is automatically
* cleared at the start of the next output cycle by the IP block.
*
* If one writes a new configuration to the register while it still has
* the bit enabled, PWM may freeze. That is, while one can still write
* to the register, it won't have an effect. Thus, we try to sleep long
* enough that the bit gets cleared and make sure the bit is not
* enabled while we update the configuration.
*/
err = readl_poll_timeout(addr, val, !(val & PWM_SW_UPDATE), 40, ms);
if (err)
dev_err(pwm->chip->dev, "PWM_SW_UPDATE was not cleared\n");
return err;
}
static inline int pwm_lpss_is_updating(struct pwm_device *pwm)
{
if (pwm_lpss_read(pwm) & PWM_SW_UPDATE) {
dev_err(pwm->chip->dev, "PWM_SW_UPDATE is still set, skipping update\n");
return -EBUSY;
}
return 0;
}
static void pwm_lpss_prepare(struct pwm_lpss_chip *lpwm, struct pwm_device *pwm,
int duty_ns, int period_ns)
{
unsigned long long on_time_div;
unsigned long c = lpwm->info->clk_rate, base_unit_range;
unsigned long long base_unit, freq = NSEC_PER_SEC;
u32 ctrl;
do_div(freq, period_ns);
/*
* The equation is:
* base_unit = round(base_unit_range * freq / c)
*/
base_unit_range = BIT(lpwm->info->base_unit_bits);
freq *= base_unit_range;
base_unit = DIV_ROUND_CLOSEST_ULL(freq, c);
/* base_unit must not be 0 and we also want to avoid overflowing it */
base_unit = clamp_val(base_unit, 1, base_unit_range - 1);
on_time_div = 255ULL * duty_ns;
do_div(on_time_div, period_ns);
on_time_div = 255ULL - on_time_div;
ctrl = pwm_lpss_read(pwm);
ctrl &= ~PWM_ON_TIME_DIV_MASK;
ctrl &= ~((base_unit_range - 1) << PWM_BASE_UNIT_SHIFT);
ctrl |= (u32) base_unit << PWM_BASE_UNIT_SHIFT;
ctrl |= on_time_div;
pwm_lpss_write(pwm, ctrl);
pwm_lpss_write(pwm, ctrl | PWM_SW_UPDATE);
}
static inline void pwm_lpss_cond_enable(struct pwm_device *pwm, bool cond)
{
if (cond)
pwm_lpss_write(pwm, pwm_lpss_read(pwm) | PWM_ENABLE);
}
static int pwm_lpss_prepare_enable(struct pwm_lpss_chip *lpwm,
struct pwm_device *pwm,
const struct pwm_state *state)
{
int ret;
ret = pwm_lpss_is_updating(pwm);
if (ret)
return ret;
pwm_lpss_prepare(lpwm, pwm, state->duty_cycle, state->period);
pwm_lpss_cond_enable(pwm, lpwm->info->bypass == false);
ret = pwm_lpss_wait_for_update(pwm);
if (ret)
return ret;
pwm_lpss_cond_enable(pwm, lpwm->info->bypass == true);
return 0;
}
static int pwm_lpss_apply(struct pwm_chip *chip, struct pwm_device *pwm,
const struct pwm_state *state)
{
struct pwm_lpss_chip *lpwm = to_lpwm(chip);
int ret = 0;
if (state->enabled) {
if (!pwm_is_enabled(pwm)) {
pm_runtime_get_sync(chip->dev);
ret = pwm_lpss_prepare_enable(lpwm, pwm, state);
if (ret)
pm_runtime_put(chip->dev);
} else {
ret = pwm_lpss_prepare_enable(lpwm, pwm, state);
}
} else if (pwm_is_enabled(pwm)) {
pwm_lpss_write(pwm, pwm_lpss_read(pwm) & ~PWM_ENABLE);
pm_runtime_put(chip->dev);
}
return ret;
}
static int pwm_lpss_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
struct pwm_state *state)
{
struct pwm_lpss_chip *lpwm = to_lpwm(chip);
unsigned long base_unit_range;
unsigned long long base_unit, freq, on_time_div;
u32 ctrl;
pm_runtime_get_sync(chip->dev);
base_unit_range = BIT(lpwm->info->base_unit_bits);
ctrl = pwm_lpss_read(pwm);
on_time_div = 255 - (ctrl & PWM_ON_TIME_DIV_MASK);
base_unit = (ctrl >> PWM_BASE_UNIT_SHIFT) & (base_unit_range - 1);
freq = base_unit * lpwm->info->clk_rate;
do_div(freq, base_unit_range);
if (freq == 0)
state->period = NSEC_PER_SEC;
else
state->period = NSEC_PER_SEC / (unsigned long)freq;
on_time_div *= state->period;
do_div(on_time_div, 255);
state->duty_cycle = on_time_div;
state->polarity = PWM_POLARITY_NORMAL;
state->enabled = !!(ctrl & PWM_ENABLE);
pm_runtime_put(chip->dev);
return 0;
}
static const struct pwm_ops pwm_lpss_ops = {
.apply = pwm_lpss_apply,
.get_state = pwm_lpss_get_state,
.owner = THIS_MODULE,
};
struct pwm_lpss_chip *devm_pwm_lpss_probe(struct device *dev, void __iomem *base,
const struct pwm_lpss_boardinfo *info)
{
struct pwm_lpss_chip *lpwm;
unsigned long c;
int i, ret;
u32 ctrl;
if (WARN_ON(info->npwm > LPSS_MAX_PWMS))
return ERR_PTR(-ENODEV);
lpwm = devm_kzalloc(dev, sizeof(*lpwm), GFP_KERNEL);
if (!lpwm)
return ERR_PTR(-ENOMEM);
lpwm->regs = base;
lpwm->info = info;
c = lpwm->info->clk_rate;
if (!c)
return ERR_PTR(-EINVAL);
lpwm->chip.dev = dev;
lpwm->chip.ops = &pwm_lpss_ops;
lpwm->chip.npwm = info->npwm;
ret = devm_pwmchip_add(dev, &lpwm->chip);
if (ret) {
dev_err(dev, "failed to add PWM chip: %d\n", ret);
return ERR_PTR(ret);
}
for (i = 0; i < lpwm->info->npwm; i++) {
ctrl = pwm_lpss_read(&lpwm->chip.pwms[i]);
if (ctrl & PWM_ENABLE)
pm_runtime_get(dev);
}
return lpwm;
}
EXPORT_SYMBOL_GPL(devm_pwm_lpss_probe);
MODULE_DESCRIPTION("PWM driver for Intel LPSS");
MODULE_AUTHOR("Mika Westerberg <[email protected]>");
MODULE_LICENSE("GPL v2");
|
linux-master
|
drivers/pwm/pwm-lpss.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Intel Low Power Subsystem PWM controller PCI driver
*
* Copyright (C) 2014, Intel Corporation
*
* Derived from the original pwm-lpss.c
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/pm_runtime.h>
#include "pwm-lpss.h"
static int pwm_lpss_probe_pci(struct pci_dev *pdev,
const struct pci_device_id *id)
{
const struct pwm_lpss_boardinfo *info;
struct pwm_lpss_chip *lpwm;
int err;
err = pcim_enable_device(pdev);
if (err < 0)
return err;
err = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev));
if (err)
return err;
info = (struct pwm_lpss_boardinfo *)id->driver_data;
lpwm = devm_pwm_lpss_probe(&pdev->dev, pcim_iomap_table(pdev)[0], info);
if (IS_ERR(lpwm))
return PTR_ERR(lpwm);
pci_set_drvdata(pdev, lpwm);
pm_runtime_put(&pdev->dev);
pm_runtime_allow(&pdev->dev);
return 0;
}
static void pwm_lpss_remove_pci(struct pci_dev *pdev)
{
pm_runtime_forbid(&pdev->dev);
pm_runtime_get_sync(&pdev->dev);
}
static int pwm_lpss_runtime_suspend_pci(struct device *dev)
{
/*
* The PCI core will handle transition to D3 automatically. We only
* need to provide runtime PM hooks for that to happen.
*/
return 0;
}
static int pwm_lpss_runtime_resume_pci(struct device *dev)
{
return 0;
}
static DEFINE_RUNTIME_DEV_PM_OPS(pwm_lpss_pci_pm,
pwm_lpss_runtime_suspend_pci,
pwm_lpss_runtime_resume_pci,
NULL);
static const struct pci_device_id pwm_lpss_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0x0ac8), (unsigned long)&pwm_lpss_bxt_info},
{ PCI_VDEVICE(INTEL, 0x0f08), (unsigned long)&pwm_lpss_byt_info},
{ PCI_VDEVICE(INTEL, 0x0f09), (unsigned long)&pwm_lpss_byt_info},
{ PCI_VDEVICE(INTEL, 0x11a5), (unsigned long)&pwm_lpss_tng_info},
{ PCI_VDEVICE(INTEL, 0x1ac8), (unsigned long)&pwm_lpss_bxt_info},
{ PCI_VDEVICE(INTEL, 0x2288), (unsigned long)&pwm_lpss_bsw_info},
{ PCI_VDEVICE(INTEL, 0x2289), (unsigned long)&pwm_lpss_bsw_info},
{ PCI_VDEVICE(INTEL, 0x31c8), (unsigned long)&pwm_lpss_bxt_info},
{ PCI_VDEVICE(INTEL, 0x5ac8), (unsigned long)&pwm_lpss_bxt_info},
{ },
};
MODULE_DEVICE_TABLE(pci, pwm_lpss_pci_ids);
static struct pci_driver pwm_lpss_driver_pci = {
.name = "pwm-lpss",
.id_table = pwm_lpss_pci_ids,
.probe = pwm_lpss_probe_pci,
.remove = pwm_lpss_remove_pci,
.driver = {
.pm = pm_ptr(&pwm_lpss_pci_pm),
},
};
module_pci_driver(pwm_lpss_driver_pci);
MODULE_DESCRIPTION("PWM PCI driver for Intel LPSS");
MODULE_LICENSE("GPL v2");
MODULE_IMPORT_NS(PWM_LPSS);
|
linux-master
|
drivers/pwm/pwm-lpss-pci.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Toshiba Visconti pulse-width-modulation controller driver
*
* Copyright (c) 2020 - 2021 TOSHIBA CORPORATION
* Copyright (c) 2020 - 2021 Toshiba Electronic Devices & Storage Corporation
*
* Authors: Nobuhiro Iwamatsu <[email protected]>
*
* Limitations:
* - The fixed input clock is running at 1 MHz and is divided by either 1,
* 2, 4 or 8.
* - When the settings of the PWM are modified, the new values are shadowed
* in hardware until the PIPGM_PCSR register is written and the currently
* running period is completed. This way the hardware switches atomically
* from the old setting to the new.
* - Disabling the hardware completes the currently running period and keeps
* the output at low level at all times.
*/
#include <linux/err.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pwm.h>
#define PIPGM_PCSR(ch) (0x400 + 4 * (ch))
#define PIPGM_PDUT(ch) (0x420 + 4 * (ch))
#define PIPGM_PWMC(ch) (0x440 + 4 * (ch))
#define PIPGM_PWMC_PWMACT BIT(5)
#define PIPGM_PWMC_CLK_MASK GENMASK(1, 0)
#define PIPGM_PWMC_POLARITY_MASK GENMASK(5, 5)
struct visconti_pwm_chip {
struct pwm_chip chip;
void __iomem *base;
};
static inline struct visconti_pwm_chip *visconti_pwm_from_chip(struct pwm_chip *chip)
{
return container_of(chip, struct visconti_pwm_chip, chip);
}
static int visconti_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
const struct pwm_state *state)
{
struct visconti_pwm_chip *priv = visconti_pwm_from_chip(chip);
u32 period, duty_cycle, pwmc0;
if (!state->enabled) {
writel(0, priv->base + PIPGM_PCSR(pwm->hwpwm));
return 0;
}
/*
* The biggest period the hardware can provide is
* (0xffff << 3) * 1000 ns
* This value fits easily in an u32, so simplify the maths by
* capping the values to 32 bit integers.
*/
if (state->period > (0xffff << 3) * 1000)
period = (0xffff << 3) * 1000;
else
period = state->period;
if (state->duty_cycle > period)
duty_cycle = period;
else
duty_cycle = state->duty_cycle;
/*
* The input clock runs fixed at 1 MHz, so we have only
* microsecond resolution and so can divide by
* NSEC_PER_SEC / CLKFREQ = 1000 without losing precision.
*/
period /= 1000;
duty_cycle /= 1000;
if (!period)
return -ERANGE;
/*
* PWMC controls a divider that divides the input clk by a power of two
* between 1 and 8. As a smaller divider yields higher precision, pick
* the smallest possible one. As period is at most 0xffff << 3, pwmc0 is
* in the intended range [0..3].
*/
pwmc0 = fls(period >> 16);
if (WARN_ON(pwmc0 > 3))
return -EINVAL;
period >>= pwmc0;
duty_cycle >>= pwmc0;
if (state->polarity == PWM_POLARITY_INVERSED)
pwmc0 |= PIPGM_PWMC_PWMACT;
writel(pwmc0, priv->base + PIPGM_PWMC(pwm->hwpwm));
writel(duty_cycle, priv->base + PIPGM_PDUT(pwm->hwpwm));
writel(period, priv->base + PIPGM_PCSR(pwm->hwpwm));
return 0;
}
static int visconti_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
struct pwm_state *state)
{
struct visconti_pwm_chip *priv = visconti_pwm_from_chip(chip);
u32 period, duty, pwmc0, pwmc0_clk;
period = readl(priv->base + PIPGM_PCSR(pwm->hwpwm));
duty = readl(priv->base + PIPGM_PDUT(pwm->hwpwm));
pwmc0 = readl(priv->base + PIPGM_PWMC(pwm->hwpwm));
pwmc0_clk = pwmc0 & PIPGM_PWMC_CLK_MASK;
state->period = (period << pwmc0_clk) * NSEC_PER_USEC;
state->duty_cycle = (duty << pwmc0_clk) * NSEC_PER_USEC;
if (pwmc0 & PIPGM_PWMC_POLARITY_MASK)
state->polarity = PWM_POLARITY_INVERSED;
else
state->polarity = PWM_POLARITY_NORMAL;
state->enabled = true;
return 0;
}
static const struct pwm_ops visconti_pwm_ops = {
.apply = visconti_pwm_apply,
.get_state = visconti_pwm_get_state,
.owner = THIS_MODULE,
};
static int visconti_pwm_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct visconti_pwm_chip *priv;
int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
priv->chip.dev = dev;
priv->chip.ops = &visconti_pwm_ops;
priv->chip.npwm = 4;
ret = devm_pwmchip_add(&pdev->dev, &priv->chip);
if (ret < 0)
return dev_err_probe(&pdev->dev, ret, "Cannot register visconti PWM\n");
return 0;
}
static const struct of_device_id visconti_pwm_of_match[] = {
{ .compatible = "toshiba,visconti-pwm", },
{ }
};
MODULE_DEVICE_TABLE(of, visconti_pwm_of_match);
static struct platform_driver visconti_pwm_driver = {
.driver = {
.name = "pwm-visconti",
.of_match_table = visconti_pwm_of_match,
},
.probe = visconti_pwm_probe,
};
module_platform_driver(visconti_pwm_driver);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Nobuhiro Iwamatsu <[email protected]>");
MODULE_ALIAS("platform:pwm-visconti");
|
linux-master
|
drivers/pwm/pwm-visconti.c
|
// SPDX-License-Identifier: GPL-2.0+
/*
* Azoteq IQS620A PWM Generator
*
* Copyright (C) 2019 Jeff LaBundy <[email protected]>
*
* Limitations:
* - The period is fixed to 1 ms and is generated continuously despite changes
* to the duty cycle or enable/disable state.
* - Changes to the duty cycle or enable/disable state take effect immediately
* and may result in a glitch during the period in which the change is made.
* - The device cannot generate a 0% duty cycle. For duty cycles below 1 / 256
* ms, the output is disabled and relies upon an external pull-down resistor
* to hold the GPIO3/LTX pin low.
*/
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/mfd/iqs62x.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/notifier.h>
#include <linux/platform_device.h>
#include <linux/pwm.h>
#include <linux/regmap.h>
#include <linux/slab.h>
#define IQS620_PWR_SETTINGS 0xd2
#define IQS620_PWR_SETTINGS_PWM_OUT BIT(7)
#define IQS620_PWM_DUTY_CYCLE 0xd8
#define IQS620_PWM_PERIOD_NS 1000000
struct iqs620_pwm_private {
struct iqs62x_core *iqs62x;
struct pwm_chip chip;
struct notifier_block notifier;
struct mutex lock;
unsigned int duty_scale;
};
static int iqs620_pwm_init(struct iqs620_pwm_private *iqs620_pwm,
unsigned int duty_scale)
{
struct iqs62x_core *iqs62x = iqs620_pwm->iqs62x;
int ret;
if (!duty_scale)
return regmap_clear_bits(iqs62x->regmap, IQS620_PWR_SETTINGS,
IQS620_PWR_SETTINGS_PWM_OUT);
ret = regmap_write(iqs62x->regmap, IQS620_PWM_DUTY_CYCLE,
duty_scale - 1);
if (ret)
return ret;
return regmap_set_bits(iqs62x->regmap, IQS620_PWR_SETTINGS,
IQS620_PWR_SETTINGS_PWM_OUT);
}
static int iqs620_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
const struct pwm_state *state)
{
struct iqs620_pwm_private *iqs620_pwm;
unsigned int duty_cycle;
unsigned int duty_scale;
int ret;
if (state->polarity != PWM_POLARITY_NORMAL)
return -EINVAL;
if (state->period < IQS620_PWM_PERIOD_NS)
return -EINVAL;
iqs620_pwm = container_of(chip, struct iqs620_pwm_private, chip);
/*
* The duty cycle generated by the device is calculated as follows:
*
* duty_cycle = (IQS620_PWM_DUTY_CYCLE + 1) / 256 * 1 ms
*
* ...where IQS620_PWM_DUTY_CYCLE is a register value between 0 and 255
* (inclusive). Therefore the lowest duty cycle the device can generate
* while the output is enabled is 1 / 256 ms.
*
* For lower duty cycles (e.g. 0), the PWM output is simply disabled to
* allow an external pull-down resistor to hold the GPIO3/LTX pin low.
*/
duty_cycle = min_t(u64, state->duty_cycle, IQS620_PWM_PERIOD_NS);
duty_scale = duty_cycle * 256 / IQS620_PWM_PERIOD_NS;
if (!state->enabled)
duty_scale = 0;
mutex_lock(&iqs620_pwm->lock);
ret = iqs620_pwm_init(iqs620_pwm, duty_scale);
if (!ret)
iqs620_pwm->duty_scale = duty_scale;
mutex_unlock(&iqs620_pwm->lock);
return ret;
}
static int iqs620_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
struct pwm_state *state)
{
struct iqs620_pwm_private *iqs620_pwm;
iqs620_pwm = container_of(chip, struct iqs620_pwm_private, chip);
mutex_lock(&iqs620_pwm->lock);
/*
* Since the device cannot generate a 0% duty cycle, requests to do so
* cause subsequent calls to iqs620_pwm_get_state to report the output
* as disabled. This is not ideal, but is the best compromise based on
* the capabilities of the device.
*/
state->enabled = iqs620_pwm->duty_scale > 0;
state->duty_cycle = DIV_ROUND_UP(iqs620_pwm->duty_scale *
IQS620_PWM_PERIOD_NS, 256);
mutex_unlock(&iqs620_pwm->lock);
state->period = IQS620_PWM_PERIOD_NS;
state->polarity = PWM_POLARITY_NORMAL;
return 0;
}
static int iqs620_pwm_notifier(struct notifier_block *notifier,
unsigned long event_flags, void *context)
{
struct iqs620_pwm_private *iqs620_pwm;
int ret;
if (!(event_flags & BIT(IQS62X_EVENT_SYS_RESET)))
return NOTIFY_DONE;
iqs620_pwm = container_of(notifier, struct iqs620_pwm_private,
notifier);
mutex_lock(&iqs620_pwm->lock);
/*
* The parent MFD driver already prints an error message in the event
* of a device reset, so nothing else is printed here unless there is
* an additional failure.
*/
ret = iqs620_pwm_init(iqs620_pwm, iqs620_pwm->duty_scale);
mutex_unlock(&iqs620_pwm->lock);
if (ret) {
dev_err(iqs620_pwm->chip.dev,
"Failed to re-initialize device: %d\n", ret);
return NOTIFY_BAD;
}
return NOTIFY_OK;
}
static const struct pwm_ops iqs620_pwm_ops = {
.apply = iqs620_pwm_apply,
.get_state = iqs620_pwm_get_state,
.owner = THIS_MODULE,
};
static void iqs620_pwm_notifier_unregister(void *context)
{
struct iqs620_pwm_private *iqs620_pwm = context;
int ret;
ret = blocking_notifier_chain_unregister(&iqs620_pwm->iqs62x->nh,
&iqs620_pwm->notifier);
if (ret)
dev_err(iqs620_pwm->chip.dev,
"Failed to unregister notifier: %d\n", ret);
}
static int iqs620_pwm_probe(struct platform_device *pdev)
{
struct iqs62x_core *iqs62x = dev_get_drvdata(pdev->dev.parent);
struct iqs620_pwm_private *iqs620_pwm;
unsigned int val;
int ret;
iqs620_pwm = devm_kzalloc(&pdev->dev, sizeof(*iqs620_pwm), GFP_KERNEL);
if (!iqs620_pwm)
return -ENOMEM;
iqs620_pwm->iqs62x = iqs62x;
ret = regmap_read(iqs62x->regmap, IQS620_PWR_SETTINGS, &val);
if (ret)
return ret;
if (val & IQS620_PWR_SETTINGS_PWM_OUT) {
ret = regmap_read(iqs62x->regmap, IQS620_PWM_DUTY_CYCLE, &val);
if (ret)
return ret;
iqs620_pwm->duty_scale = val + 1;
}
iqs620_pwm->chip.dev = &pdev->dev;
iqs620_pwm->chip.ops = &iqs620_pwm_ops;
iqs620_pwm->chip.npwm = 1;
mutex_init(&iqs620_pwm->lock);
iqs620_pwm->notifier.notifier_call = iqs620_pwm_notifier;
ret = blocking_notifier_chain_register(&iqs620_pwm->iqs62x->nh,
&iqs620_pwm->notifier);
if (ret) {
dev_err(&pdev->dev, "Failed to register notifier: %d\n", ret);
return ret;
}
ret = devm_add_action_or_reset(&pdev->dev,
iqs620_pwm_notifier_unregister,
iqs620_pwm);
if (ret)
return ret;
ret = devm_pwmchip_add(&pdev->dev, &iqs620_pwm->chip);
if (ret)
dev_err(&pdev->dev, "Failed to add device: %d\n", ret);
return ret;
}
static struct platform_driver iqs620_pwm_platform_driver = {
.driver = {
.name = "iqs620a-pwm",
},
.probe = iqs620_pwm_probe,
};
module_platform_driver(iqs620_pwm_platform_driver);
MODULE_AUTHOR("Jeff LaBundy <[email protected]>");
MODULE_DESCRIPTION("Azoteq IQS620A PWM Generator");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:iqs620a-pwm");
|
linux-master
|
drivers/pwm/pwm-iqs620a.c
|
// SPDX-License-Identifier: GPL-2.0
/*
* Intel Keem Bay PWM driver
*
* Copyright (C) 2020 Intel Corporation
* Authors: Lai Poey Seng <[email protected]>
* Vineetha G. Jaya Kumaran <[email protected]>
*
* Limitations:
* - Upon disabling a channel, the currently running
* period will not be completed. However, upon
* reconfiguration of the duty cycle/period, the
* currently running period will be completed first.
*/
#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pwm.h>
#include <linux/regmap.h>
#define KMB_TOTAL_PWM_CHANNELS 6
#define KMB_PWM_COUNT_MAX U16_MAX
#define KMB_PWM_EN_BIT BIT(31)
/* Mask */
#define KMB_PWM_HIGH_MASK GENMASK(31, 16)
#define KMB_PWM_LOW_MASK GENMASK(15, 0)
#define KMB_PWM_LEADIN_MASK GENMASK(30, 0)
/* PWM Register offset */
#define KMB_PWM_LEADIN_OFFSET(ch) (0x00 + 4 * (ch))
#define KMB_PWM_HIGHLOW_OFFSET(ch) (0x20 + 4 * (ch))
struct keembay_pwm {
struct pwm_chip chip;
struct device *dev;
struct clk *clk;
void __iomem *base;
};
static inline struct keembay_pwm *to_keembay_pwm_dev(struct pwm_chip *chip)
{
return container_of(chip, struct keembay_pwm, chip);
}
static void keembay_clk_unprepare(void *data)
{
clk_disable_unprepare(data);
}
static int keembay_clk_enable(struct device *dev, struct clk *clk)
{
int ret;
ret = clk_prepare_enable(clk);
if (ret)
return ret;
return devm_add_action_or_reset(dev, keembay_clk_unprepare, clk);
}
/*
* With gcc 10, CONFIG_CC_OPTIMIZE_FOR_SIZE and only "inline" instead of
* "__always_inline" this fails to compile because the compiler doesn't notice
* for all valid masks (e.g. KMB_PWM_LEADIN_MASK) that they are ok.
*/
static __always_inline void keembay_pwm_update_bits(struct keembay_pwm *priv, u32 mask,
u32 val, u32 offset)
{
u32 buff = readl(priv->base + offset);
buff = u32_replace_bits(buff, val, mask);
writel(buff, priv->base + offset);
}
static void keembay_pwm_enable(struct keembay_pwm *priv, int ch)
{
keembay_pwm_update_bits(priv, KMB_PWM_EN_BIT, 1,
KMB_PWM_LEADIN_OFFSET(ch));
}
static void keembay_pwm_disable(struct keembay_pwm *priv, int ch)
{
keembay_pwm_update_bits(priv, KMB_PWM_EN_BIT, 0,
KMB_PWM_LEADIN_OFFSET(ch));
}
static int keembay_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
struct pwm_state *state)
{
struct keembay_pwm *priv = to_keembay_pwm_dev(chip);
unsigned long long high, low;
unsigned long clk_rate;
u32 highlow;
clk_rate = clk_get_rate(priv->clk);
/* Read channel enabled status */
highlow = readl(priv->base + KMB_PWM_LEADIN_OFFSET(pwm->hwpwm));
if (highlow & KMB_PWM_EN_BIT)
state->enabled = true;
else
state->enabled = false;
/* Read period and duty cycle */
highlow = readl(priv->base + KMB_PWM_HIGHLOW_OFFSET(pwm->hwpwm));
low = FIELD_GET(KMB_PWM_LOW_MASK, highlow) * NSEC_PER_SEC;
high = FIELD_GET(KMB_PWM_HIGH_MASK, highlow) * NSEC_PER_SEC;
state->duty_cycle = DIV_ROUND_UP_ULL(high, clk_rate);
state->period = DIV_ROUND_UP_ULL(high + low, clk_rate);
state->polarity = PWM_POLARITY_NORMAL;
return 0;
}
static int keembay_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
const struct pwm_state *state)
{
struct keembay_pwm *priv = to_keembay_pwm_dev(chip);
struct pwm_state current_state;
unsigned long long div;
unsigned long clk_rate;
u32 pwm_count = 0;
u16 high, low;
if (state->polarity != PWM_POLARITY_NORMAL)
return -EINVAL;
/*
* Configure the pwm repeat count as infinite at (15:0) and leadin
* low time as 0 at (30:16), which is in terms of clock cycles.
*/
keembay_pwm_update_bits(priv, KMB_PWM_LEADIN_MASK, 0,
KMB_PWM_LEADIN_OFFSET(pwm->hwpwm));
keembay_pwm_get_state(chip, pwm, ¤t_state);
if (!state->enabled) {
if (current_state.enabled)
keembay_pwm_disable(priv, pwm->hwpwm);
return 0;
}
/*
* The upper 16 bits and lower 16 bits of the KMB_PWM_HIGHLOW_OFFSET
* register contain the high time and low time of waveform accordingly.
* All the values are in terms of clock cycles.
*/
clk_rate = clk_get_rate(priv->clk);
div = clk_rate * state->duty_cycle;
div = DIV_ROUND_DOWN_ULL(div, NSEC_PER_SEC);
if (div > KMB_PWM_COUNT_MAX)
return -ERANGE;
high = div;
div = clk_rate * state->period;
div = DIV_ROUND_DOWN_ULL(div, NSEC_PER_SEC);
div = div - high;
if (div > KMB_PWM_COUNT_MAX)
return -ERANGE;
low = div;
pwm_count = FIELD_PREP(KMB_PWM_HIGH_MASK, high) |
FIELD_PREP(KMB_PWM_LOW_MASK, low);
writel(pwm_count, priv->base + KMB_PWM_HIGHLOW_OFFSET(pwm->hwpwm));
if (state->enabled && !current_state.enabled)
keembay_pwm_enable(priv, pwm->hwpwm);
return 0;
}
static const struct pwm_ops keembay_pwm_ops = {
.owner = THIS_MODULE,
.apply = keembay_pwm_apply,
.get_state = keembay_pwm_get_state,
};
static int keembay_pwm_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct keembay_pwm *priv;
int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->clk = devm_clk_get(dev, NULL);
if (IS_ERR(priv->clk))
return dev_err_probe(dev, PTR_ERR(priv->clk), "Failed to get clock\n");
priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
ret = keembay_clk_enable(dev, priv->clk);
if (ret)
return ret;
priv->chip.dev = dev;
priv->chip.ops = &keembay_pwm_ops;
priv->chip.npwm = KMB_TOTAL_PWM_CHANNELS;
ret = devm_pwmchip_add(dev, &priv->chip);
if (ret)
return dev_err_probe(dev, ret, "Failed to add PWM chip\n");
return 0;
}
static const struct of_device_id keembay_pwm_of_match[] = {
{ .compatible = "intel,keembay-pwm" },
{ }
};
MODULE_DEVICE_TABLE(of, keembay_pwm_of_match);
static struct platform_driver keembay_pwm_driver = {
.probe = keembay_pwm_probe,
.driver = {
.name = "pwm-keembay",
.of_match_table = keembay_pwm_of_match,
},
};
module_platform_driver(keembay_pwm_driver);
MODULE_ALIAS("platform:pwm-keembay");
MODULE_DESCRIPTION("Intel Keem Bay PWM driver");
MODULE_LICENSE("GPL v2");
|
linux-master
|
drivers/pwm/pwm-keembay.c
|
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2017-2018 SiFive
* For SiFive's PWM IP block documentation please refer Chapter 14 of
* Reference Manual : https://static.dev.sifive.com/FU540-C000-v1.0.pdf
*
* Limitations:
* - When changing both duty cycle and period, we cannot prevent in
* software that the output might produce a period with mixed
* settings (new period length and old duty cycle).
* - The hardware cannot generate a 100% duty cycle.
* - The hardware generates only inverted output.
*/
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pwm.h>
#include <linux/slab.h>
#include <linux/bitfield.h>
/* Register offsets */
#define PWM_SIFIVE_PWMCFG 0x0
#define PWM_SIFIVE_PWMCOUNT 0x8
#define PWM_SIFIVE_PWMS 0x10
#define PWM_SIFIVE_PWMCMP(i) (0x20 + 4 * (i))
/* PWMCFG fields */
#define PWM_SIFIVE_PWMCFG_SCALE GENMASK(3, 0)
#define PWM_SIFIVE_PWMCFG_STICKY BIT(8)
#define PWM_SIFIVE_PWMCFG_ZERO_CMP BIT(9)
#define PWM_SIFIVE_PWMCFG_DEGLITCH BIT(10)
#define PWM_SIFIVE_PWMCFG_EN_ALWAYS BIT(12)
#define PWM_SIFIVE_PWMCFG_EN_ONCE BIT(13)
#define PWM_SIFIVE_PWMCFG_CENTER BIT(16)
#define PWM_SIFIVE_PWMCFG_GANG BIT(24)
#define PWM_SIFIVE_PWMCFG_IP BIT(28)
#define PWM_SIFIVE_CMPWIDTH 16
#define PWM_SIFIVE_DEFAULT_PERIOD 10000000
struct pwm_sifive_ddata {
struct pwm_chip chip;
struct mutex lock; /* lock to protect user_count and approx_period */
struct notifier_block notifier;
struct clk *clk;
void __iomem *regs;
unsigned int real_period;
unsigned int approx_period;
int user_count;
};
static inline
struct pwm_sifive_ddata *pwm_sifive_chip_to_ddata(struct pwm_chip *chip)
{
return container_of(chip, struct pwm_sifive_ddata, chip);
}
static int pwm_sifive_request(struct pwm_chip *chip, struct pwm_device *pwm)
{
struct pwm_sifive_ddata *ddata = pwm_sifive_chip_to_ddata(chip);
mutex_lock(&ddata->lock);
ddata->user_count++;
mutex_unlock(&ddata->lock);
return 0;
}
static void pwm_sifive_free(struct pwm_chip *chip, struct pwm_device *pwm)
{
struct pwm_sifive_ddata *ddata = pwm_sifive_chip_to_ddata(chip);
mutex_lock(&ddata->lock);
ddata->user_count--;
mutex_unlock(&ddata->lock);
}
/* Called holding ddata->lock */
static void pwm_sifive_update_clock(struct pwm_sifive_ddata *ddata,
unsigned long rate)
{
unsigned long long num;
unsigned long scale_pow;
int scale;
u32 val;
/*
* The PWM unit is used with pwmzerocmp=0, so the only way to modify the
* period length is using pwmscale which provides the number of bits the
* counter is shifted before being feed to the comparators. A period
* lasts (1 << (PWM_SIFIVE_CMPWIDTH + pwmscale)) clock ticks.
* (1 << (PWM_SIFIVE_CMPWIDTH + scale)) * 10^9/rate = period
*/
scale_pow = div64_ul(ddata->approx_period * (u64)rate, NSEC_PER_SEC);
scale = clamp(ilog2(scale_pow) - PWM_SIFIVE_CMPWIDTH, 0, 0xf);
val = PWM_SIFIVE_PWMCFG_EN_ALWAYS |
FIELD_PREP(PWM_SIFIVE_PWMCFG_SCALE, scale);
writel(val, ddata->regs + PWM_SIFIVE_PWMCFG);
/* As scale <= 15 the shift operation cannot overflow. */
num = (unsigned long long)NSEC_PER_SEC << (PWM_SIFIVE_CMPWIDTH + scale);
ddata->real_period = div64_ul(num, rate);
dev_dbg(ddata->chip.dev,
"New real_period = %u ns\n", ddata->real_period);
}
static int pwm_sifive_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
struct pwm_state *state)
{
struct pwm_sifive_ddata *ddata = pwm_sifive_chip_to_ddata(chip);
u32 duty, val;
duty = readl(ddata->regs + PWM_SIFIVE_PWMCMP(pwm->hwpwm));
state->enabled = duty > 0;
val = readl(ddata->regs + PWM_SIFIVE_PWMCFG);
if (!(val & PWM_SIFIVE_PWMCFG_EN_ALWAYS))
state->enabled = false;
state->period = ddata->real_period;
state->duty_cycle =
(u64)duty * ddata->real_period >> PWM_SIFIVE_CMPWIDTH;
state->polarity = PWM_POLARITY_INVERSED;
return 0;
}
static int pwm_sifive_apply(struct pwm_chip *chip, struct pwm_device *pwm,
const struct pwm_state *state)
{
struct pwm_sifive_ddata *ddata = pwm_sifive_chip_to_ddata(chip);
struct pwm_state cur_state;
unsigned int duty_cycle;
unsigned long long num;
bool enabled;
int ret = 0;
u32 frac;
if (state->polarity != PWM_POLARITY_INVERSED)
return -EINVAL;
cur_state = pwm->state;
enabled = cur_state.enabled;
duty_cycle = state->duty_cycle;
if (!state->enabled)
duty_cycle = 0;
/*
* The problem of output producing mixed setting as mentioned at top,
* occurs here. To minimize the window for this problem, we are
* calculating the register values first and then writing them
* consecutively
*/
num = (u64)duty_cycle * (1U << PWM_SIFIVE_CMPWIDTH);
frac = DIV64_U64_ROUND_CLOSEST(num, state->period);
/* The hardware cannot generate a 100% duty cycle */
frac = min(frac, (1U << PWM_SIFIVE_CMPWIDTH) - 1);
mutex_lock(&ddata->lock);
if (state->period != ddata->approx_period) {
/*
* Don't let a 2nd user change the period underneath the 1st user.
* However if ddate->approx_period == 0 this is the first time we set
* any period, so let whoever gets here first set the period so other
* users who agree on the period won't fail.
*/
if (ddata->user_count != 1 && ddata->approx_period) {
mutex_unlock(&ddata->lock);
return -EBUSY;
}
ddata->approx_period = state->period;
pwm_sifive_update_clock(ddata, clk_get_rate(ddata->clk));
}
mutex_unlock(&ddata->lock);
/*
* If the PWM is enabled the clk is already on. So only enable it
* conditionally to have it on exactly once afterwards independent of
* the PWM state.
*/
if (!enabled) {
ret = clk_enable(ddata->clk);
if (ret) {
dev_err(ddata->chip.dev, "Enable clk failed\n");
return ret;
}
}
writel(frac, ddata->regs + PWM_SIFIVE_PWMCMP(pwm->hwpwm));
if (!state->enabled)
clk_disable(ddata->clk);
return 0;
}
static const struct pwm_ops pwm_sifive_ops = {
.request = pwm_sifive_request,
.free = pwm_sifive_free,
.get_state = pwm_sifive_get_state,
.apply = pwm_sifive_apply,
.owner = THIS_MODULE,
};
static int pwm_sifive_clock_notifier(struct notifier_block *nb,
unsigned long event, void *data)
{
struct clk_notifier_data *ndata = data;
struct pwm_sifive_ddata *ddata =
container_of(nb, struct pwm_sifive_ddata, notifier);
if (event == POST_RATE_CHANGE) {
mutex_lock(&ddata->lock);
pwm_sifive_update_clock(ddata, ndata->new_rate);
mutex_unlock(&ddata->lock);
}
return NOTIFY_OK;
}
static int pwm_sifive_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct pwm_sifive_ddata *ddata;
struct pwm_chip *chip;
int ret;
u32 val;
unsigned int enabled_pwms = 0, enabled_clks = 1;
ddata = devm_kzalloc(dev, sizeof(*ddata), GFP_KERNEL);
if (!ddata)
return -ENOMEM;
mutex_init(&ddata->lock);
chip = &ddata->chip;
chip->dev = dev;
chip->ops = &pwm_sifive_ops;
chip->npwm = 4;
ddata->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ddata->regs))
return PTR_ERR(ddata->regs);
ddata->clk = devm_clk_get_prepared(dev, NULL);
if (IS_ERR(ddata->clk))
return dev_err_probe(dev, PTR_ERR(ddata->clk),
"Unable to find controller clock\n");
ret = clk_enable(ddata->clk);
if (ret) {
dev_err(dev, "failed to enable clock for pwm: %d\n", ret);
return ret;
}
val = readl(ddata->regs + PWM_SIFIVE_PWMCFG);
if (val & PWM_SIFIVE_PWMCFG_EN_ALWAYS) {
unsigned int i;
for (i = 0; i < chip->npwm; ++i) {
val = readl(ddata->regs + PWM_SIFIVE_PWMCMP(i));
if (val > 0)
++enabled_pwms;
}
}
/* The clk should be on once for each running PWM. */
if (enabled_pwms) {
while (enabled_clks < enabled_pwms) {
/* This is not expected to fail as the clk is already on */
ret = clk_enable(ddata->clk);
if (unlikely(ret)) {
dev_err_probe(dev, ret, "Failed to enable clk\n");
goto disable_clk;
}
++enabled_clks;
}
} else {
clk_disable(ddata->clk);
enabled_clks = 0;
}
/* Watch for changes to underlying clock frequency */
ddata->notifier.notifier_call = pwm_sifive_clock_notifier;
ret = clk_notifier_register(ddata->clk, &ddata->notifier);
if (ret) {
dev_err(dev, "failed to register clock notifier: %d\n", ret);
goto disable_clk;
}
ret = pwmchip_add(chip);
if (ret < 0) {
dev_err(dev, "cannot register PWM: %d\n", ret);
goto unregister_clk;
}
platform_set_drvdata(pdev, ddata);
dev_dbg(dev, "SiFive PWM chip registered %d PWMs\n", chip->npwm);
return 0;
unregister_clk:
clk_notifier_unregister(ddata->clk, &ddata->notifier);
disable_clk:
while (enabled_clks) {
clk_disable(ddata->clk);
--enabled_clks;
}
return ret;
}
static void pwm_sifive_remove(struct platform_device *dev)
{
struct pwm_sifive_ddata *ddata = platform_get_drvdata(dev);
struct pwm_device *pwm;
int ch;
pwmchip_remove(&ddata->chip);
clk_notifier_unregister(ddata->clk, &ddata->notifier);
for (ch = 0; ch < ddata->chip.npwm; ch++) {
pwm = &ddata->chip.pwms[ch];
if (pwm->state.enabled)
clk_disable(ddata->clk);
}
}
static const struct of_device_id pwm_sifive_of_match[] = {
{ .compatible = "sifive,pwm0" },
{},
};
MODULE_DEVICE_TABLE(of, pwm_sifive_of_match);
static struct platform_driver pwm_sifive_driver = {
.probe = pwm_sifive_probe,
.remove_new = pwm_sifive_remove,
.driver = {
.name = "pwm-sifive",
.of_match_table = pwm_sifive_of_match,
},
};
module_platform_driver(pwm_sifive_driver);
MODULE_DESCRIPTION("SiFive PWM driver");
MODULE_LICENSE("GPL v2");
|
linux-master
|
drivers/pwm/pwm-sifive.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) Overkiz SAS 2012
*
* Author: Boris BREZILLON <[email protected]>
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/clocksource.h>
#include <linux/clockchips.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/ioport.h>
#include <linux/io.h>
#include <linux/mfd/syscon.h>
#include <linux/platform_device.h>
#include <linux/pwm.h>
#include <linux/of.h>
#include <linux/regmap.h>
#include <linux/slab.h>
#include <soc/at91/atmel_tcb.h>
#define NPWM 2
#define ATMEL_TC_ACMR_MASK (ATMEL_TC_ACPA | ATMEL_TC_ACPC | \
ATMEL_TC_AEEVT | ATMEL_TC_ASWTRG)
#define ATMEL_TC_BCMR_MASK (ATMEL_TC_BCPB | ATMEL_TC_BCPC | \
ATMEL_TC_BEEVT | ATMEL_TC_BSWTRG)
struct atmel_tcb_pwm_device {
unsigned div; /* PWM clock divider */
unsigned duty; /* PWM duty expressed in clk cycles */
unsigned period; /* PWM period expressed in clk cycles */
};
struct atmel_tcb_channel {
u32 enabled;
u32 cmr;
u32 ra;
u32 rb;
u32 rc;
};
struct atmel_tcb_pwm_chip {
struct pwm_chip chip;
spinlock_t lock;
u8 channel;
u8 width;
struct regmap *regmap;
struct clk *clk;
struct clk *gclk;
struct clk *slow_clk;
struct atmel_tcb_pwm_device pwms[NPWM];
struct atmel_tcb_channel bkup;
};
static const u8 atmel_tcb_divisors[] = { 2, 8, 32, 128, 0, };
static inline struct atmel_tcb_pwm_chip *to_tcb_chip(struct pwm_chip *chip)
{
return container_of(chip, struct atmel_tcb_pwm_chip, chip);
}
static int atmel_tcb_pwm_request(struct pwm_chip *chip,
struct pwm_device *pwm)
{
struct atmel_tcb_pwm_chip *tcbpwmc = to_tcb_chip(chip);
struct atmel_tcb_pwm_device *tcbpwm = &tcbpwmc->pwms[pwm->hwpwm];
unsigned cmr;
int ret;
ret = clk_prepare_enable(tcbpwmc->clk);
if (ret)
return ret;
tcbpwm->duty = 0;
tcbpwm->period = 0;
tcbpwm->div = 0;
spin_lock(&tcbpwmc->lock);
regmap_read(tcbpwmc->regmap, ATMEL_TC_REG(tcbpwmc->channel, CMR), &cmr);
/*
* Get init config from Timer Counter registers if
* Timer Counter is already configured as a PWM generator.
*/
if (cmr & ATMEL_TC_WAVE) {
if (pwm->hwpwm == 0)
regmap_read(tcbpwmc->regmap,
ATMEL_TC_REG(tcbpwmc->channel, RA),
&tcbpwm->duty);
else
regmap_read(tcbpwmc->regmap,
ATMEL_TC_REG(tcbpwmc->channel, RB),
&tcbpwm->duty);
tcbpwm->div = cmr & ATMEL_TC_TCCLKS;
regmap_read(tcbpwmc->regmap, ATMEL_TC_REG(tcbpwmc->channel, RC),
&tcbpwm->period);
cmr &= (ATMEL_TC_TCCLKS | ATMEL_TC_ACMR_MASK |
ATMEL_TC_BCMR_MASK);
} else
cmr = 0;
cmr |= ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO | ATMEL_TC_EEVT_XC0;
regmap_write(tcbpwmc->regmap, ATMEL_TC_REG(tcbpwmc->channel, CMR), cmr);
spin_unlock(&tcbpwmc->lock);
return 0;
}
static void atmel_tcb_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
{
struct atmel_tcb_pwm_chip *tcbpwmc = to_tcb_chip(chip);
clk_disable_unprepare(tcbpwmc->clk);
}
static void atmel_tcb_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm,
enum pwm_polarity polarity)
{
struct atmel_tcb_pwm_chip *tcbpwmc = to_tcb_chip(chip);
struct atmel_tcb_pwm_device *tcbpwm = &tcbpwmc->pwms[pwm->hwpwm];
unsigned cmr;
/*
* If duty is 0 the timer will be stopped and we have to
* configure the output correctly on software trigger:
* - set output to high if PWM_POLARITY_INVERSED
* - set output to low if PWM_POLARITY_NORMAL
*
* This is why we're reverting polarity in this case.
*/
if (tcbpwm->duty == 0)
polarity = !polarity;
spin_lock(&tcbpwmc->lock);
regmap_read(tcbpwmc->regmap, ATMEL_TC_REG(tcbpwmc->channel, CMR), &cmr);
/* flush old setting and set the new one */
if (pwm->hwpwm == 0) {
cmr &= ~ATMEL_TC_ACMR_MASK;
if (polarity == PWM_POLARITY_INVERSED)
cmr |= ATMEL_TC_ASWTRG_CLEAR;
else
cmr |= ATMEL_TC_ASWTRG_SET;
} else {
cmr &= ~ATMEL_TC_BCMR_MASK;
if (polarity == PWM_POLARITY_INVERSED)
cmr |= ATMEL_TC_BSWTRG_CLEAR;
else
cmr |= ATMEL_TC_BSWTRG_SET;
}
regmap_write(tcbpwmc->regmap, ATMEL_TC_REG(tcbpwmc->channel, CMR), cmr);
/*
* Use software trigger to apply the new setting.
* If both PWM devices in this group are disabled we stop the clock.
*/
if (!(cmr & (ATMEL_TC_ACPC | ATMEL_TC_BCPC))) {
regmap_write(tcbpwmc->regmap,
ATMEL_TC_REG(tcbpwmc->channel, CCR),
ATMEL_TC_SWTRG | ATMEL_TC_CLKDIS);
tcbpwmc->bkup.enabled = 1;
} else {
regmap_write(tcbpwmc->regmap,
ATMEL_TC_REG(tcbpwmc->channel, CCR),
ATMEL_TC_SWTRG);
tcbpwmc->bkup.enabled = 0;
}
spin_unlock(&tcbpwmc->lock);
}
static int atmel_tcb_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm,
enum pwm_polarity polarity)
{
struct atmel_tcb_pwm_chip *tcbpwmc = to_tcb_chip(chip);
struct atmel_tcb_pwm_device *tcbpwm = &tcbpwmc->pwms[pwm->hwpwm];
u32 cmr;
/*
* If duty is 0 the timer will be stopped and we have to
* configure the output correctly on software trigger:
* - set output to high if PWM_POLARITY_INVERSED
* - set output to low if PWM_POLARITY_NORMAL
*
* This is why we're reverting polarity in this case.
*/
if (tcbpwm->duty == 0)
polarity = !polarity;
spin_lock(&tcbpwmc->lock);
regmap_read(tcbpwmc->regmap, ATMEL_TC_REG(tcbpwmc->channel, CMR), &cmr);
/* flush old setting and set the new one */
cmr &= ~ATMEL_TC_TCCLKS;
if (pwm->hwpwm == 0) {
cmr &= ~ATMEL_TC_ACMR_MASK;
/* Set CMR flags according to given polarity */
if (polarity == PWM_POLARITY_INVERSED)
cmr |= ATMEL_TC_ASWTRG_CLEAR;
else
cmr |= ATMEL_TC_ASWTRG_SET;
} else {
cmr &= ~ATMEL_TC_BCMR_MASK;
if (polarity == PWM_POLARITY_INVERSED)
cmr |= ATMEL_TC_BSWTRG_CLEAR;
else
cmr |= ATMEL_TC_BSWTRG_SET;
}
/*
* If duty is 0 or equal to period there's no need to register
* a specific action on RA/RB and RC compare.
* The output will be configured on software trigger and keep
* this config till next config call.
*/
if (tcbpwm->duty != tcbpwm->period && tcbpwm->duty > 0) {
if (pwm->hwpwm == 0) {
if (polarity == PWM_POLARITY_INVERSED)
cmr |= ATMEL_TC_ACPA_SET | ATMEL_TC_ACPC_CLEAR;
else
cmr |= ATMEL_TC_ACPA_CLEAR | ATMEL_TC_ACPC_SET;
} else {
if (polarity == PWM_POLARITY_INVERSED)
cmr |= ATMEL_TC_BCPB_SET | ATMEL_TC_BCPC_CLEAR;
else
cmr |= ATMEL_TC_BCPB_CLEAR | ATMEL_TC_BCPC_SET;
}
}
cmr |= (tcbpwm->div & ATMEL_TC_TCCLKS);
regmap_write(tcbpwmc->regmap, ATMEL_TC_REG(tcbpwmc->channel, CMR), cmr);
if (pwm->hwpwm == 0)
regmap_write(tcbpwmc->regmap,
ATMEL_TC_REG(tcbpwmc->channel, RA),
tcbpwm->duty);
else
regmap_write(tcbpwmc->regmap,
ATMEL_TC_REG(tcbpwmc->channel, RB),
tcbpwm->duty);
regmap_write(tcbpwmc->regmap, ATMEL_TC_REG(tcbpwmc->channel, RC),
tcbpwm->period);
/* Use software trigger to apply the new setting */
regmap_write(tcbpwmc->regmap, ATMEL_TC_REG(tcbpwmc->channel, CCR),
ATMEL_TC_SWTRG | ATMEL_TC_CLKEN);
tcbpwmc->bkup.enabled = 1;
spin_unlock(&tcbpwmc->lock);
return 0;
}
static int atmel_tcb_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
int duty_ns, int period_ns)
{
struct atmel_tcb_pwm_chip *tcbpwmc = to_tcb_chip(chip);
struct atmel_tcb_pwm_device *tcbpwm = &tcbpwmc->pwms[pwm->hwpwm];
struct atmel_tcb_pwm_device *atcbpwm = NULL;
int i = 0;
int slowclk = 0;
unsigned period;
unsigned duty;
unsigned rate = clk_get_rate(tcbpwmc->clk);
unsigned long long min;
unsigned long long max;
/*
* Find best clk divisor:
* the smallest divisor which can fulfill the period_ns requirements.
* If there is a gclk, the first divisor is actually the gclk selector
*/
if (tcbpwmc->gclk)
i = 1;
for (; i < ARRAY_SIZE(atmel_tcb_divisors); ++i) {
if (atmel_tcb_divisors[i] == 0) {
slowclk = i;
continue;
}
min = div_u64((u64)NSEC_PER_SEC * atmel_tcb_divisors[i], rate);
max = min << tcbpwmc->width;
if (max >= period_ns)
break;
}
/*
* If none of the divisor are small enough to represent period_ns
* take slow clock (32KHz).
*/
if (i == ARRAY_SIZE(atmel_tcb_divisors)) {
i = slowclk;
rate = clk_get_rate(tcbpwmc->slow_clk);
min = div_u64(NSEC_PER_SEC, rate);
max = min << tcbpwmc->width;
/* If period is too big return ERANGE error */
if (max < period_ns)
return -ERANGE;
}
duty = div_u64(duty_ns, min);
period = div_u64(period_ns, min);
if (pwm->hwpwm == 0)
atcbpwm = &tcbpwmc->pwms[1];
else
atcbpwm = &tcbpwmc->pwms[0];
/*
* PWM devices provided by the TCB driver are grouped by 2.
* PWM devices in a given group must be configured with the
* same period_ns.
*
* We're checking the period value of the second PWM device
* in this group before applying the new config.
*/
if ((atcbpwm && atcbpwm->duty > 0 &&
atcbpwm->duty != atcbpwm->period) &&
(atcbpwm->div != i || atcbpwm->period != period)) {
dev_err(chip->dev,
"failed to configure period_ns: PWM group already configured with a different value\n");
return -EINVAL;
}
tcbpwm->period = period;
tcbpwm->div = i;
tcbpwm->duty = duty;
return 0;
}
static int atmel_tcb_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
const struct pwm_state *state)
{
int duty_cycle, period;
int ret;
if (!state->enabled) {
atmel_tcb_pwm_disable(chip, pwm, state->polarity);
return 0;
}
period = state->period < INT_MAX ? state->period : INT_MAX;
duty_cycle = state->duty_cycle < INT_MAX ? state->duty_cycle : INT_MAX;
ret = atmel_tcb_pwm_config(chip, pwm, duty_cycle, period);
if (ret)
return ret;
return atmel_tcb_pwm_enable(chip, pwm, state->polarity);
}
static const struct pwm_ops atmel_tcb_pwm_ops = {
.request = atmel_tcb_pwm_request,
.free = atmel_tcb_pwm_free,
.apply = atmel_tcb_pwm_apply,
.owner = THIS_MODULE,
};
static struct atmel_tcb_config tcb_rm9200_config = {
.counter_width = 16,
};
static struct atmel_tcb_config tcb_sam9x5_config = {
.counter_width = 32,
};
static struct atmel_tcb_config tcb_sama5d2_config = {
.counter_width = 32,
.has_gclk = 1,
};
static const struct of_device_id atmel_tcb_of_match[] = {
{ .compatible = "atmel,at91rm9200-tcb", .data = &tcb_rm9200_config, },
{ .compatible = "atmel,at91sam9x5-tcb", .data = &tcb_sam9x5_config, },
{ .compatible = "atmel,sama5d2-tcb", .data = &tcb_sama5d2_config, },
{ /* sentinel */ }
};
static int atmel_tcb_pwm_probe(struct platform_device *pdev)
{
const struct of_device_id *match;
struct atmel_tcb_pwm_chip *tcbpwm;
const struct atmel_tcb_config *config;
struct device_node *np = pdev->dev.of_node;
char clk_name[] = "t0_clk";
int err;
int channel;
tcbpwm = devm_kzalloc(&pdev->dev, sizeof(*tcbpwm), GFP_KERNEL);
if (tcbpwm == NULL)
return -ENOMEM;
err = of_property_read_u32(np, "reg", &channel);
if (err < 0) {
dev_err(&pdev->dev,
"failed to get Timer Counter Block channel from device tree (error: %d)\n",
err);
return err;
}
tcbpwm->regmap = syscon_node_to_regmap(np->parent);
if (IS_ERR(tcbpwm->regmap))
return PTR_ERR(tcbpwm->regmap);
tcbpwm->slow_clk = of_clk_get_by_name(np->parent, "slow_clk");
if (IS_ERR(tcbpwm->slow_clk))
return PTR_ERR(tcbpwm->slow_clk);
clk_name[1] += channel;
tcbpwm->clk = of_clk_get_by_name(np->parent, clk_name);
if (IS_ERR(tcbpwm->clk))
tcbpwm->clk = of_clk_get_by_name(np->parent, "t0_clk");
if (IS_ERR(tcbpwm->clk)) {
err = PTR_ERR(tcbpwm->clk);
goto err_slow_clk;
}
match = of_match_node(atmel_tcb_of_match, np->parent);
config = match->data;
if (config->has_gclk) {
tcbpwm->gclk = of_clk_get_by_name(np->parent, "gclk");
if (IS_ERR(tcbpwm->gclk)) {
err = PTR_ERR(tcbpwm->gclk);
goto err_clk;
}
}
tcbpwm->chip.dev = &pdev->dev;
tcbpwm->chip.ops = &atmel_tcb_pwm_ops;
tcbpwm->chip.npwm = NPWM;
tcbpwm->channel = channel;
tcbpwm->width = config->counter_width;
err = clk_prepare_enable(tcbpwm->slow_clk);
if (err)
goto err_gclk;
spin_lock_init(&tcbpwm->lock);
err = pwmchip_add(&tcbpwm->chip);
if (err < 0)
goto err_disable_clk;
platform_set_drvdata(pdev, tcbpwm);
return 0;
err_disable_clk:
clk_disable_unprepare(tcbpwm->slow_clk);
err_gclk:
clk_put(tcbpwm->gclk);
err_clk:
clk_put(tcbpwm->clk);
err_slow_clk:
clk_put(tcbpwm->slow_clk);
return err;
}
static void atmel_tcb_pwm_remove(struct platform_device *pdev)
{
struct atmel_tcb_pwm_chip *tcbpwm = platform_get_drvdata(pdev);
pwmchip_remove(&tcbpwm->chip);
clk_disable_unprepare(tcbpwm->slow_clk);
clk_put(tcbpwm->gclk);
clk_put(tcbpwm->clk);
clk_put(tcbpwm->slow_clk);
}
static const struct of_device_id atmel_tcb_pwm_dt_ids[] = {
{ .compatible = "atmel,tcb-pwm", },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, atmel_tcb_pwm_dt_ids);
#ifdef CONFIG_PM_SLEEP
static int atmel_tcb_pwm_suspend(struct device *dev)
{
struct atmel_tcb_pwm_chip *tcbpwm = dev_get_drvdata(dev);
struct atmel_tcb_channel *chan = &tcbpwm->bkup;
unsigned int channel = tcbpwm->channel;
regmap_read(tcbpwm->regmap, ATMEL_TC_REG(channel, CMR), &chan->cmr);
regmap_read(tcbpwm->regmap, ATMEL_TC_REG(channel, RA), &chan->ra);
regmap_read(tcbpwm->regmap, ATMEL_TC_REG(channel, RB), &chan->rb);
regmap_read(tcbpwm->regmap, ATMEL_TC_REG(channel, RC), &chan->rc);
return 0;
}
static int atmel_tcb_pwm_resume(struct device *dev)
{
struct atmel_tcb_pwm_chip *tcbpwm = dev_get_drvdata(dev);
struct atmel_tcb_channel *chan = &tcbpwm->bkup;
unsigned int channel = tcbpwm->channel;
regmap_write(tcbpwm->regmap, ATMEL_TC_REG(channel, CMR), chan->cmr);
regmap_write(tcbpwm->regmap, ATMEL_TC_REG(channel, RA), chan->ra);
regmap_write(tcbpwm->regmap, ATMEL_TC_REG(channel, RB), chan->rb);
regmap_write(tcbpwm->regmap, ATMEL_TC_REG(channel, RC), chan->rc);
if (chan->enabled)
regmap_write(tcbpwm->regmap,
ATMEL_TC_CLKEN | ATMEL_TC_SWTRG,
ATMEL_TC_REG(channel, CCR));
return 0;
}
#endif
static SIMPLE_DEV_PM_OPS(atmel_tcb_pwm_pm_ops, atmel_tcb_pwm_suspend,
atmel_tcb_pwm_resume);
static struct platform_driver atmel_tcb_pwm_driver = {
.driver = {
.name = "atmel-tcb-pwm",
.of_match_table = atmel_tcb_pwm_dt_ids,
.pm = &atmel_tcb_pwm_pm_ops,
},
.probe = atmel_tcb_pwm_probe,
.remove_new = atmel_tcb_pwm_remove,
};
module_platform_driver(atmel_tcb_pwm_driver);
MODULE_AUTHOR("Boris BREZILLON <[email protected]>");
MODULE_DESCRIPTION("Atmel Timer Counter Pulse Width Modulation Driver");
MODULE_LICENSE("GPL v2");
|
linux-master
|
drivers/pwm/pwm-atmel-tcb.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Authors:
* Branden Bonaby <[email protected]>
*/
#include <linux/hyperv.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/err.h>
#include "hyperv_vmbus.h"
static struct dentry *hv_debug_root;
static int hv_debugfs_delay_get(void *data, u64 *val)
{
*val = *(u32 *)data;
return 0;
}
static int hv_debugfs_delay_set(void *data, u64 val)
{
if (val > 1000)
return -EINVAL;
*(u32 *)data = val;
return 0;
}
DEFINE_DEBUGFS_ATTRIBUTE(hv_debugfs_delay_fops, hv_debugfs_delay_get,
hv_debugfs_delay_set, "%llu\n");
static int hv_debugfs_state_get(void *data, u64 *val)
{
*val = *(bool *)data;
return 0;
}
static int hv_debugfs_state_set(void *data, u64 val)
{
if (val == 1)
*(bool *)data = true;
else if (val == 0)
*(bool *)data = false;
else
return -EINVAL;
return 0;
}
DEFINE_DEBUGFS_ATTRIBUTE(hv_debugfs_state_fops, hv_debugfs_state_get,
hv_debugfs_state_set, "%llu\n");
/* Setup delay files to store test values */
static int hv_debug_delay_files(struct hv_device *dev, struct dentry *root)
{
struct vmbus_channel *channel = dev->channel;
char *buffer = "fuzz_test_buffer_interrupt_delay";
char *message = "fuzz_test_message_delay";
int *buffer_val = &channel->fuzz_testing_interrupt_delay;
int *message_val = &channel->fuzz_testing_message_delay;
struct dentry *buffer_file, *message_file;
buffer_file = debugfs_create_file(buffer, 0644, root,
buffer_val,
&hv_debugfs_delay_fops);
if (IS_ERR(buffer_file)) {
pr_debug("debugfs_hyperv: file %s not created\n", buffer);
return PTR_ERR(buffer_file);
}
message_file = debugfs_create_file(message, 0644, root,
message_val,
&hv_debugfs_delay_fops);
if (IS_ERR(message_file)) {
pr_debug("debugfs_hyperv: file %s not created\n", message);
return PTR_ERR(message_file);
}
return 0;
}
/* Setup test state value for vmbus device */
static int hv_debug_set_test_state(struct hv_device *dev, struct dentry *root)
{
struct vmbus_channel *channel = dev->channel;
bool *state = &channel->fuzz_testing_state;
char *status = "fuzz_test_state";
struct dentry *test_state;
test_state = debugfs_create_file(status, 0644, root,
state,
&hv_debugfs_state_fops);
if (IS_ERR(test_state)) {
pr_debug("debugfs_hyperv: file %s not created\n", status);
return PTR_ERR(test_state);
}
return 0;
}
/* Bind hv device to a dentry for debugfs */
static void hv_debug_set_dir_dentry(struct hv_device *dev, struct dentry *root)
{
if (hv_debug_root)
dev->debug_dir = root;
}
/* Create all test dentry's and names for fuzz testing */
int hv_debug_add_dev_dir(struct hv_device *dev)
{
const char *device = dev_name(&dev->device);
char *delay_name = "delay";
struct dentry *delay, *dev_root;
int ret;
if (!IS_ERR(hv_debug_root)) {
dev_root = debugfs_create_dir(device, hv_debug_root);
if (IS_ERR(dev_root)) {
pr_debug("debugfs_hyperv: hyperv/%s/ not created\n",
device);
return PTR_ERR(dev_root);
}
hv_debug_set_test_state(dev, dev_root);
hv_debug_set_dir_dentry(dev, dev_root);
delay = debugfs_create_dir(delay_name, dev_root);
if (IS_ERR(delay)) {
pr_debug("debugfs_hyperv: hyperv/%s/%s/ not created\n",
device, delay_name);
return PTR_ERR(delay);
}
ret = hv_debug_delay_files(dev, delay);
return ret;
}
pr_debug("debugfs_hyperv: hyperv/ not in root debugfs path\n");
return PTR_ERR(hv_debug_root);
}
/* Remove dentry associated with released hv device */
void hv_debug_rm_dev_dir(struct hv_device *dev)
{
if (!IS_ERR(hv_debug_root))
debugfs_remove_recursive(dev->debug_dir);
}
/* Remove all dentrys associated with vmbus testing */
void hv_debug_rm_all_dir(void)
{
debugfs_remove_recursive(hv_debug_root);
}
/* Delay buffer/message reads on a vmbus channel */
void hv_debug_delay_test(struct vmbus_channel *channel, enum delay delay_type)
{
struct vmbus_channel *test_channel = channel->primary_channel ?
channel->primary_channel :
channel;
bool state = test_channel->fuzz_testing_state;
if (state) {
if (delay_type == 0)
udelay(test_channel->fuzz_testing_interrupt_delay);
else
udelay(test_channel->fuzz_testing_message_delay);
}
}
/* Initialize top dentry for vmbus testing */
int hv_debug_init(void)
{
hv_debug_root = debugfs_create_dir("hyperv", NULL);
if (IS_ERR(hv_debug_root)) {
pr_debug("debugfs_hyperv: hyperv/ not created\n");
return PTR_ERR(hv_debug_root);
}
return 0;
}
|
linux-master
|
drivers/hv/hv_debugfs.c
|
// SPDX-License-Identifier: GPL-2.0
/*
* Architecture neutral utility routines for interacting with
* Hyper-V. This file is specifically for code that must be
* built-in to the kernel image when CONFIG_HYPERV is set
* (vs. being in a module) because it is called from architecture
* specific code under arch/.
*
* Copyright (C) 2021, Microsoft, Inc.
*
* Author : Michael Kelley <[email protected]>
*/
#include <linux/types.h>
#include <linux/acpi.h>
#include <linux/export.h>
#include <linux/bitfield.h>
#include <linux/cpumask.h>
#include <linux/sched/task_stack.h>
#include <linux/panic_notifier.h>
#include <linux/ptrace.h>
#include <linux/kdebug.h>
#include <linux/kmsg_dump.h>
#include <linux/slab.h>
#include <linux/dma-map-ops.h>
#include <linux/set_memory.h>
#include <asm/hyperv-tlfs.h>
#include <asm/mshyperv.h>
/*
* hv_root_partition, ms_hyperv and hv_nested are defined here with other
* Hyper-V specific globals so they are shared across all architectures and are
* built only when CONFIG_HYPERV is defined. But on x86,
* ms_hyperv_init_platform() is built even when CONFIG_HYPERV is not
* defined, and it uses these three variables. So mark them as __weak
* here, allowing for an overriding definition in the module containing
* ms_hyperv_init_platform().
*/
bool __weak hv_root_partition;
EXPORT_SYMBOL_GPL(hv_root_partition);
bool __weak hv_nested;
EXPORT_SYMBOL_GPL(hv_nested);
struct ms_hyperv_info __weak ms_hyperv;
EXPORT_SYMBOL_GPL(ms_hyperv);
u32 *hv_vp_index;
EXPORT_SYMBOL_GPL(hv_vp_index);
u32 hv_max_vp_index;
EXPORT_SYMBOL_GPL(hv_max_vp_index);
void * __percpu *hyperv_pcpu_input_arg;
EXPORT_SYMBOL_GPL(hyperv_pcpu_input_arg);
void * __percpu *hyperv_pcpu_output_arg;
EXPORT_SYMBOL_GPL(hyperv_pcpu_output_arg);
static void hv_kmsg_dump_unregister(void);
static struct ctl_table_header *hv_ctl_table_hdr;
/*
* Hyper-V specific initialization and shutdown code that is
* common across all architectures. Called from architecture
* specific initialization functions.
*/
void __init hv_common_free(void)
{
unregister_sysctl_table(hv_ctl_table_hdr);
hv_ctl_table_hdr = NULL;
if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE)
hv_kmsg_dump_unregister();
kfree(hv_vp_index);
hv_vp_index = NULL;
free_percpu(hyperv_pcpu_output_arg);
hyperv_pcpu_output_arg = NULL;
free_percpu(hyperv_pcpu_input_arg);
hyperv_pcpu_input_arg = NULL;
}
/*
* Functions for allocating and freeing memory with size and
* alignment HV_HYP_PAGE_SIZE. These functions are needed because
* the guest page size may not be the same as the Hyper-V page
* size. We depend upon kmalloc() aligning power-of-two size
* allocations to the allocation size boundary, so that the
* allocated memory appears to Hyper-V as a page of the size
* it expects.
*/
void *hv_alloc_hyperv_page(void)
{
BUILD_BUG_ON(PAGE_SIZE < HV_HYP_PAGE_SIZE);
if (PAGE_SIZE == HV_HYP_PAGE_SIZE)
return (void *)__get_free_page(GFP_KERNEL);
else
return kmalloc(HV_HYP_PAGE_SIZE, GFP_KERNEL);
}
EXPORT_SYMBOL_GPL(hv_alloc_hyperv_page);
void *hv_alloc_hyperv_zeroed_page(void)
{
if (PAGE_SIZE == HV_HYP_PAGE_SIZE)
return (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
else
return kzalloc(HV_HYP_PAGE_SIZE, GFP_KERNEL);
}
EXPORT_SYMBOL_GPL(hv_alloc_hyperv_zeroed_page);
void hv_free_hyperv_page(void *addr)
{
if (PAGE_SIZE == HV_HYP_PAGE_SIZE)
free_page((unsigned long)addr);
else
kfree(addr);
}
EXPORT_SYMBOL_GPL(hv_free_hyperv_page);
static void *hv_panic_page;
/*
* Boolean to control whether to report panic messages over Hyper-V.
*
* It can be set via /proc/sys/kernel/hyperv_record_panic_msg
*/
static int sysctl_record_panic_msg = 1;
/*
* sysctl option to allow the user to control whether kmsg data should be
* reported to Hyper-V on panic.
*/
static struct ctl_table hv_ctl_table[] = {
{
.procname = "hyperv_record_panic_msg",
.data = &sysctl_record_panic_msg,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE
},
{}
};
static int hv_die_panic_notify_crash(struct notifier_block *self,
unsigned long val, void *args);
static struct notifier_block hyperv_die_report_block = {
.notifier_call = hv_die_panic_notify_crash,
};
static struct notifier_block hyperv_panic_report_block = {
.notifier_call = hv_die_panic_notify_crash,
};
/*
* The following callback works both as die and panic notifier; its
* goal is to provide panic information to the hypervisor unless the
* kmsg dumper is used [see hv_kmsg_dump()], which provides more
* information but isn't always available.
*
* Notice that both the panic/die report notifiers are registered only
* if we have the capability HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE set.
*/
static int hv_die_panic_notify_crash(struct notifier_block *self,
unsigned long val, void *args)
{
struct pt_regs *regs;
bool is_die;
/* Don't notify Hyper-V unless we have a die oops event or panic. */
if (self == &hyperv_panic_report_block) {
is_die = false;
regs = current_pt_regs();
} else { /* die event */
if (val != DIE_OOPS)
return NOTIFY_DONE;
is_die = true;
regs = ((struct die_args *)args)->regs;
}
/*
* Hyper-V should be notified only once about a panic/die. If we will
* be calling hv_kmsg_dump() later with kmsg data, don't do the
* notification here.
*/
if (!sysctl_record_panic_msg || !hv_panic_page)
hyperv_report_panic(regs, val, is_die);
return NOTIFY_DONE;
}
/*
* Callback from kmsg_dump. Grab as much as possible from the end of the kmsg
* buffer and call into Hyper-V to transfer the data.
*/
static void hv_kmsg_dump(struct kmsg_dumper *dumper,
enum kmsg_dump_reason reason)
{
struct kmsg_dump_iter iter;
size_t bytes_written;
/* We are only interested in panics. */
if (reason != KMSG_DUMP_PANIC || !sysctl_record_panic_msg)
return;
/*
* Write dump contents to the page. No need to synchronize; panic should
* be single-threaded.
*/
kmsg_dump_rewind(&iter);
kmsg_dump_get_buffer(&iter, false, hv_panic_page, HV_HYP_PAGE_SIZE,
&bytes_written);
if (!bytes_written)
return;
/*
* P3 to contain the physical address of the panic page & P4 to
* contain the size of the panic data in that page. Rest of the
* registers are no-op when the NOTIFY_MSG flag is set.
*/
hv_set_register(HV_REGISTER_CRASH_P0, 0);
hv_set_register(HV_REGISTER_CRASH_P1, 0);
hv_set_register(HV_REGISTER_CRASH_P2, 0);
hv_set_register(HV_REGISTER_CRASH_P3, virt_to_phys(hv_panic_page));
hv_set_register(HV_REGISTER_CRASH_P4, bytes_written);
/*
* Let Hyper-V know there is crash data available along with
* the panic message.
*/
hv_set_register(HV_REGISTER_CRASH_CTL,
(HV_CRASH_CTL_CRASH_NOTIFY |
HV_CRASH_CTL_CRASH_NOTIFY_MSG));
}
static struct kmsg_dumper hv_kmsg_dumper = {
.dump = hv_kmsg_dump,
};
static void hv_kmsg_dump_unregister(void)
{
kmsg_dump_unregister(&hv_kmsg_dumper);
unregister_die_notifier(&hyperv_die_report_block);
atomic_notifier_chain_unregister(&panic_notifier_list,
&hyperv_panic_report_block);
hv_free_hyperv_page(hv_panic_page);
hv_panic_page = NULL;
}
static void hv_kmsg_dump_register(void)
{
int ret;
hv_panic_page = hv_alloc_hyperv_zeroed_page();
if (!hv_panic_page) {
pr_err("Hyper-V: panic message page memory allocation failed\n");
return;
}
ret = kmsg_dump_register(&hv_kmsg_dumper);
if (ret) {
pr_err("Hyper-V: kmsg dump register error 0x%x\n", ret);
hv_free_hyperv_page(hv_panic_page);
hv_panic_page = NULL;
}
}
int __init hv_common_init(void)
{
int i;
if (hv_is_isolation_supported())
sysctl_record_panic_msg = 0;
/*
* Hyper-V expects to get crash register data or kmsg when
* crash enlightment is available and system crashes. Set
* crash_kexec_post_notifiers to be true to make sure that
* calling crash enlightment interface before running kdump
* kernel.
*/
if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) {
u64 hyperv_crash_ctl;
crash_kexec_post_notifiers = true;
pr_info("Hyper-V: enabling crash_kexec_post_notifiers\n");
/*
* Panic message recording (sysctl_record_panic_msg)
* is enabled by default in non-isolated guests and
* disabled by default in isolated guests; the panic
* message recording won't be available in isolated
* guests should the following registration fail.
*/
hv_ctl_table_hdr = register_sysctl("kernel", hv_ctl_table);
if (!hv_ctl_table_hdr)
pr_err("Hyper-V: sysctl table register error");
/*
* Register for panic kmsg callback only if the right
* capability is supported by the hypervisor.
*/
hyperv_crash_ctl = hv_get_register(HV_REGISTER_CRASH_CTL);
if (hyperv_crash_ctl & HV_CRASH_CTL_CRASH_NOTIFY_MSG)
hv_kmsg_dump_register();
register_die_notifier(&hyperv_die_report_block);
atomic_notifier_chain_register(&panic_notifier_list,
&hyperv_panic_report_block);
}
/*
* Allocate the per-CPU state for the hypercall input arg.
* If this allocation fails, we will not be able to setup
* (per-CPU) hypercall input page and thus this failure is
* fatal on Hyper-V.
*/
hyperv_pcpu_input_arg = alloc_percpu(void *);
BUG_ON(!hyperv_pcpu_input_arg);
/* Allocate the per-CPU state for output arg for root */
if (hv_root_partition) {
hyperv_pcpu_output_arg = alloc_percpu(void *);
BUG_ON(!hyperv_pcpu_output_arg);
}
hv_vp_index = kmalloc_array(num_possible_cpus(), sizeof(*hv_vp_index),
GFP_KERNEL);
if (!hv_vp_index) {
hv_common_free();
return -ENOMEM;
}
for (i = 0; i < num_possible_cpus(); i++)
hv_vp_index[i] = VP_INVAL;
return 0;
}
/*
* Hyper-V specific initialization and die code for
* individual CPUs that is common across all architectures.
* Called by the CPU hotplug mechanism.
*/
int hv_common_cpu_init(unsigned int cpu)
{
void **inputarg, **outputarg;
u64 msr_vp_index;
gfp_t flags;
int pgcount = hv_root_partition ? 2 : 1;
void *mem;
int ret;
/* hv_cpu_init() can be called with IRQs disabled from hv_resume() */
flags = irqs_disabled() ? GFP_ATOMIC : GFP_KERNEL;
inputarg = (void **)this_cpu_ptr(hyperv_pcpu_input_arg);
/*
* hyperv_pcpu_input_arg and hyperv_pcpu_output_arg memory is already
* allocated if this CPU was previously online and then taken offline
*/
if (!*inputarg) {
mem = kmalloc(pgcount * HV_HYP_PAGE_SIZE, flags);
if (!mem)
return -ENOMEM;
if (hv_root_partition) {
outputarg = (void **)this_cpu_ptr(hyperv_pcpu_output_arg);
*outputarg = (char *)mem + HV_HYP_PAGE_SIZE;
}
if (!ms_hyperv.paravisor_present &&
(hv_isolation_type_snp() || hv_isolation_type_tdx())) {
ret = set_memory_decrypted((unsigned long)mem, pgcount);
if (ret) {
/* It may be unsafe to free 'mem' */
return ret;
}
memset(mem, 0x00, pgcount * HV_HYP_PAGE_SIZE);
}
/*
* In a fully enlightened TDX/SNP VM with more than 64 VPs, if
* hyperv_pcpu_input_arg is not NULL, set_memory_decrypted() ->
* ... -> cpa_flush()-> ... -> __send_ipi_mask_ex() tries to
* use hyperv_pcpu_input_arg as the hypercall input page, which
* must be a decrypted page in such a VM, but the page is still
* encrypted before set_memory_decrypted() returns. Fix this by
* setting *inputarg after the above set_memory_decrypted(): if
* hyperv_pcpu_input_arg is NULL, __send_ipi_mask_ex() returns
* HV_STATUS_INVALID_PARAMETER immediately, and the function
* hv_send_ipi_mask() falls back to orig_apic.send_IPI_mask(),
* which may be slightly slower than the hypercall, but still
* works correctly in such a VM.
*/
*inputarg = mem;
}
msr_vp_index = hv_get_register(HV_REGISTER_VP_INDEX);
hv_vp_index[cpu] = msr_vp_index;
if (msr_vp_index > hv_max_vp_index)
hv_max_vp_index = msr_vp_index;
return 0;
}
int hv_common_cpu_die(unsigned int cpu)
{
/*
* The hyperv_pcpu_input_arg and hyperv_pcpu_output_arg memory
* is not freed when the CPU goes offline as the hyperv_pcpu_input_arg
* may be used by the Hyper-V vPCI driver in reassigning interrupts
* as part of the offlining process. The interrupt reassignment
* happens *after* the CPUHP_AP_HYPERV_ONLINE state has run and
* called this function.
*
* If a previously offlined CPU is brought back online again, the
* originally allocated memory is reused in hv_common_cpu_init().
*/
return 0;
}
/* Bit mask of the extended capability to query: see HV_EXT_CAPABILITY_xxx */
bool hv_query_ext_cap(u64 cap_query)
{
/*
* The address of the 'hv_extended_cap' variable will be used as an
* output parameter to the hypercall below and so it should be
* compatible with 'virt_to_phys'. Which means, it's address should be
* directly mapped. Use 'static' to keep it compatible; stack variables
* can be virtually mapped, making them incompatible with
* 'virt_to_phys'.
* Hypercall input/output addresses should also be 8-byte aligned.
*/
static u64 hv_extended_cap __aligned(8);
static bool hv_extended_cap_queried;
u64 status;
/*
* Querying extended capabilities is an extended hypercall. Check if the
* partition supports extended hypercall, first.
*/
if (!(ms_hyperv.priv_high & HV_ENABLE_EXTENDED_HYPERCALLS))
return false;
/* Extended capabilities do not change at runtime. */
if (hv_extended_cap_queried)
return hv_extended_cap & cap_query;
status = hv_do_hypercall(HV_EXT_CALL_QUERY_CAPABILITIES, NULL,
&hv_extended_cap);
/*
* The query extended capabilities hypercall should not fail under
* any normal circumstances. Avoid repeatedly making the hypercall, on
* error.
*/
hv_extended_cap_queried = true;
if (!hv_result_success(status)) {
pr_err("Hyper-V: Extended query capabilities hypercall failed 0x%llx\n",
status);
return false;
}
return hv_extended_cap & cap_query;
}
EXPORT_SYMBOL_GPL(hv_query_ext_cap);
void hv_setup_dma_ops(struct device *dev, bool coherent)
{
/*
* Hyper-V does not offer a vIOMMU in the guest
* VM, so pass 0/NULL for the IOMMU settings
*/
arch_setup_dma_ops(dev, 0, 0, NULL, coherent);
}
EXPORT_SYMBOL_GPL(hv_setup_dma_ops);
bool hv_is_hibernation_supported(void)
{
return !hv_root_partition && acpi_sleep_state_supported(ACPI_STATE_S4);
}
EXPORT_SYMBOL_GPL(hv_is_hibernation_supported);
/*
* Default function to read the Hyper-V reference counter, independent
* of whether Hyper-V enlightened clocks/timers are being used. But on
* architectures where it is used, Hyper-V enlightenment code in
* hyperv_timer.c may override this function.
*/
static u64 __hv_read_ref_counter(void)
{
return hv_get_register(HV_REGISTER_TIME_REF_COUNT);
}
u64 (*hv_read_reference_counter)(void) = __hv_read_ref_counter;
EXPORT_SYMBOL_GPL(hv_read_reference_counter);
/* These __weak functions provide default "no-op" behavior and
* may be overridden by architecture specific versions. Architectures
* for which the default "no-op" behavior is sufficient can leave
* them unimplemented and not be cluttered with a bunch of stub
* functions in arch-specific code.
*/
bool __weak hv_is_isolation_supported(void)
{
return false;
}
EXPORT_SYMBOL_GPL(hv_is_isolation_supported);
bool __weak hv_isolation_type_snp(void)
{
return false;
}
EXPORT_SYMBOL_GPL(hv_isolation_type_snp);
bool __weak hv_isolation_type_tdx(void)
{
return false;
}
EXPORT_SYMBOL_GPL(hv_isolation_type_tdx);
void __weak hv_setup_vmbus_handler(void (*handler)(void))
{
}
EXPORT_SYMBOL_GPL(hv_setup_vmbus_handler);
void __weak hv_remove_vmbus_handler(void)
{
}
EXPORT_SYMBOL_GPL(hv_remove_vmbus_handler);
void __weak hv_setup_kexec_handler(void (*handler)(void))
{
}
EXPORT_SYMBOL_GPL(hv_setup_kexec_handler);
void __weak hv_remove_kexec_handler(void)
{
}
EXPORT_SYMBOL_GPL(hv_remove_kexec_handler);
void __weak hv_setup_crash_handler(void (*handler)(struct pt_regs *regs))
{
}
EXPORT_SYMBOL_GPL(hv_setup_crash_handler);
void __weak hv_remove_crash_handler(void)
{
}
EXPORT_SYMBOL_GPL(hv_remove_crash_handler);
void __weak hyperv_cleanup(void)
{
}
EXPORT_SYMBOL_GPL(hyperv_cleanup);
u64 __weak hv_ghcb_hypercall(u64 control, void *input, void *output, u32 input_size)
{
return HV_STATUS_INVALID_PARAMETER;
}
EXPORT_SYMBOL_GPL(hv_ghcb_hypercall);
u64 __weak hv_tdx_hypercall(u64 control, u64 param1, u64 param2)
{
return HV_STATUS_INVALID_PARAMETER;
}
EXPORT_SYMBOL_GPL(hv_tdx_hypercall);
|
linux-master
|
drivers/hv/hv_common.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2009, Microsoft Corporation.
*
* Authors:
* Haiyang Zhang <[email protected]>
* Hank Janssen <[email protected]>
* K. Y. Srinivasan <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/init.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/sysctl.h>
#include <linux/slab.h>
#include <linux/acpi.h>
#include <linux/completion.h>
#include <linux/hyperv.h>
#include <linux/kernel_stat.h>
#include <linux/of_address.h>
#include <linux/clockchips.h>
#include <linux/cpu.h>
#include <linux/sched/isolation.h>
#include <linux/sched/task_stack.h>
#include <linux/delay.h>
#include <linux/panic_notifier.h>
#include <linux/ptrace.h>
#include <linux/screen_info.h>
#include <linux/efi.h>
#include <linux/random.h>
#include <linux/kernel.h>
#include <linux/syscore_ops.h>
#include <linux/dma-map-ops.h>
#include <linux/pci.h>
#include <clocksource/hyperv_timer.h>
#include <asm/mshyperv.h>
#include "hyperv_vmbus.h"
struct vmbus_dynid {
struct list_head node;
struct hv_vmbus_device_id id;
};
static struct device *hv_dev;
static int hyperv_cpuhp_online;
static long __percpu *vmbus_evt;
/* Values parsed from ACPI DSDT */
int vmbus_irq;
int vmbus_interrupt;
/*
* The panic notifier below is responsible solely for unloading the
* vmbus connection, which is necessary in a panic event.
*
* Notice an intrincate relation of this notifier with Hyper-V
* framebuffer panic notifier exists - we need vmbus connection alive
* there in order to succeed, so we need to order both with each other
* [see hvfb_on_panic()] - this is done using notifiers' priorities.
*/
static int hv_panic_vmbus_unload(struct notifier_block *nb, unsigned long val,
void *args)
{
vmbus_initiate_unload(true);
return NOTIFY_DONE;
}
static struct notifier_block hyperv_panic_vmbus_unload_block = {
.notifier_call = hv_panic_vmbus_unload,
.priority = INT_MIN + 1, /* almost the latest one to execute */
};
static const char *fb_mmio_name = "fb_range";
static struct resource *fb_mmio;
static struct resource *hyperv_mmio;
static DEFINE_MUTEX(hyperv_mmio_lock);
static int vmbus_exists(void)
{
if (hv_dev == NULL)
return -ENODEV;
return 0;
}
static u8 channel_monitor_group(const struct vmbus_channel *channel)
{
return (u8)channel->offermsg.monitorid / 32;
}
static u8 channel_monitor_offset(const struct vmbus_channel *channel)
{
return (u8)channel->offermsg.monitorid % 32;
}
static u32 channel_pending(const struct vmbus_channel *channel,
const struct hv_monitor_page *monitor_page)
{
u8 monitor_group = channel_monitor_group(channel);
return monitor_page->trigger_group[monitor_group].pending;
}
static u32 channel_latency(const struct vmbus_channel *channel,
const struct hv_monitor_page *monitor_page)
{
u8 monitor_group = channel_monitor_group(channel);
u8 monitor_offset = channel_monitor_offset(channel);
return monitor_page->latency[monitor_group][monitor_offset];
}
static u32 channel_conn_id(struct vmbus_channel *channel,
struct hv_monitor_page *monitor_page)
{
u8 monitor_group = channel_monitor_group(channel);
u8 monitor_offset = channel_monitor_offset(channel);
return monitor_page->parameter[monitor_group][monitor_offset].connectionid.u.id;
}
static ssize_t id_show(struct device *dev, struct device_attribute *dev_attr,
char *buf)
{
struct hv_device *hv_dev = device_to_hv_device(dev);
if (!hv_dev->channel)
return -ENODEV;
return sprintf(buf, "%d\n", hv_dev->channel->offermsg.child_relid);
}
static DEVICE_ATTR_RO(id);
static ssize_t state_show(struct device *dev, struct device_attribute *dev_attr,
char *buf)
{
struct hv_device *hv_dev = device_to_hv_device(dev);
if (!hv_dev->channel)
return -ENODEV;
return sprintf(buf, "%d\n", hv_dev->channel->state);
}
static DEVICE_ATTR_RO(state);
static ssize_t monitor_id_show(struct device *dev,
struct device_attribute *dev_attr, char *buf)
{
struct hv_device *hv_dev = device_to_hv_device(dev);
if (!hv_dev->channel)
return -ENODEV;
return sprintf(buf, "%d\n", hv_dev->channel->offermsg.monitorid);
}
static DEVICE_ATTR_RO(monitor_id);
static ssize_t class_id_show(struct device *dev,
struct device_attribute *dev_attr, char *buf)
{
struct hv_device *hv_dev = device_to_hv_device(dev);
if (!hv_dev->channel)
return -ENODEV;
return sprintf(buf, "{%pUl}\n",
&hv_dev->channel->offermsg.offer.if_type);
}
static DEVICE_ATTR_RO(class_id);
static ssize_t device_id_show(struct device *dev,
struct device_attribute *dev_attr, char *buf)
{
struct hv_device *hv_dev = device_to_hv_device(dev);
if (!hv_dev->channel)
return -ENODEV;
return sprintf(buf, "{%pUl}\n",
&hv_dev->channel->offermsg.offer.if_instance);
}
static DEVICE_ATTR_RO(device_id);
static ssize_t modalias_show(struct device *dev,
struct device_attribute *dev_attr, char *buf)
{
struct hv_device *hv_dev = device_to_hv_device(dev);
return sprintf(buf, "vmbus:%*phN\n", UUID_SIZE, &hv_dev->dev_type);
}
static DEVICE_ATTR_RO(modalias);
#ifdef CONFIG_NUMA
static ssize_t numa_node_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct hv_device *hv_dev = device_to_hv_device(dev);
if (!hv_dev->channel)
return -ENODEV;
return sprintf(buf, "%d\n", cpu_to_node(hv_dev->channel->target_cpu));
}
static DEVICE_ATTR_RO(numa_node);
#endif
static ssize_t server_monitor_pending_show(struct device *dev,
struct device_attribute *dev_attr,
char *buf)
{
struct hv_device *hv_dev = device_to_hv_device(dev);
if (!hv_dev->channel)
return -ENODEV;
return sprintf(buf, "%d\n",
channel_pending(hv_dev->channel,
vmbus_connection.monitor_pages[0]));
}
static DEVICE_ATTR_RO(server_monitor_pending);
static ssize_t client_monitor_pending_show(struct device *dev,
struct device_attribute *dev_attr,
char *buf)
{
struct hv_device *hv_dev = device_to_hv_device(dev);
if (!hv_dev->channel)
return -ENODEV;
return sprintf(buf, "%d\n",
channel_pending(hv_dev->channel,
vmbus_connection.monitor_pages[1]));
}
static DEVICE_ATTR_RO(client_monitor_pending);
static ssize_t server_monitor_latency_show(struct device *dev,
struct device_attribute *dev_attr,
char *buf)
{
struct hv_device *hv_dev = device_to_hv_device(dev);
if (!hv_dev->channel)
return -ENODEV;
return sprintf(buf, "%d\n",
channel_latency(hv_dev->channel,
vmbus_connection.monitor_pages[0]));
}
static DEVICE_ATTR_RO(server_monitor_latency);
static ssize_t client_monitor_latency_show(struct device *dev,
struct device_attribute *dev_attr,
char *buf)
{
struct hv_device *hv_dev = device_to_hv_device(dev);
if (!hv_dev->channel)
return -ENODEV;
return sprintf(buf, "%d\n",
channel_latency(hv_dev->channel,
vmbus_connection.monitor_pages[1]));
}
static DEVICE_ATTR_RO(client_monitor_latency);
static ssize_t server_monitor_conn_id_show(struct device *dev,
struct device_attribute *dev_attr,
char *buf)
{
struct hv_device *hv_dev = device_to_hv_device(dev);
if (!hv_dev->channel)
return -ENODEV;
return sprintf(buf, "%d\n",
channel_conn_id(hv_dev->channel,
vmbus_connection.monitor_pages[0]));
}
static DEVICE_ATTR_RO(server_monitor_conn_id);
static ssize_t client_monitor_conn_id_show(struct device *dev,
struct device_attribute *dev_attr,
char *buf)
{
struct hv_device *hv_dev = device_to_hv_device(dev);
if (!hv_dev->channel)
return -ENODEV;
return sprintf(buf, "%d\n",
channel_conn_id(hv_dev->channel,
vmbus_connection.monitor_pages[1]));
}
static DEVICE_ATTR_RO(client_monitor_conn_id);
static ssize_t out_intr_mask_show(struct device *dev,
struct device_attribute *dev_attr, char *buf)
{
struct hv_device *hv_dev = device_to_hv_device(dev);
struct hv_ring_buffer_debug_info outbound;
int ret;
if (!hv_dev->channel)
return -ENODEV;
ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
&outbound);
if (ret < 0)
return ret;
return sprintf(buf, "%d\n", outbound.current_interrupt_mask);
}
static DEVICE_ATTR_RO(out_intr_mask);
static ssize_t out_read_index_show(struct device *dev,
struct device_attribute *dev_attr, char *buf)
{
struct hv_device *hv_dev = device_to_hv_device(dev);
struct hv_ring_buffer_debug_info outbound;
int ret;
if (!hv_dev->channel)
return -ENODEV;
ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
&outbound);
if (ret < 0)
return ret;
return sprintf(buf, "%d\n", outbound.current_read_index);
}
static DEVICE_ATTR_RO(out_read_index);
static ssize_t out_write_index_show(struct device *dev,
struct device_attribute *dev_attr,
char *buf)
{
struct hv_device *hv_dev = device_to_hv_device(dev);
struct hv_ring_buffer_debug_info outbound;
int ret;
if (!hv_dev->channel)
return -ENODEV;
ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
&outbound);
if (ret < 0)
return ret;
return sprintf(buf, "%d\n", outbound.current_write_index);
}
static DEVICE_ATTR_RO(out_write_index);
static ssize_t out_read_bytes_avail_show(struct device *dev,
struct device_attribute *dev_attr,
char *buf)
{
struct hv_device *hv_dev = device_to_hv_device(dev);
struct hv_ring_buffer_debug_info outbound;
int ret;
if (!hv_dev->channel)
return -ENODEV;
ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
&outbound);
if (ret < 0)
return ret;
return sprintf(buf, "%d\n", outbound.bytes_avail_toread);
}
static DEVICE_ATTR_RO(out_read_bytes_avail);
static ssize_t out_write_bytes_avail_show(struct device *dev,
struct device_attribute *dev_attr,
char *buf)
{
struct hv_device *hv_dev = device_to_hv_device(dev);
struct hv_ring_buffer_debug_info outbound;
int ret;
if (!hv_dev->channel)
return -ENODEV;
ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
&outbound);
if (ret < 0)
return ret;
return sprintf(buf, "%d\n", outbound.bytes_avail_towrite);
}
static DEVICE_ATTR_RO(out_write_bytes_avail);
static ssize_t in_intr_mask_show(struct device *dev,
struct device_attribute *dev_attr, char *buf)
{
struct hv_device *hv_dev = device_to_hv_device(dev);
struct hv_ring_buffer_debug_info inbound;
int ret;
if (!hv_dev->channel)
return -ENODEV;
ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
if (ret < 0)
return ret;
return sprintf(buf, "%d\n", inbound.current_interrupt_mask);
}
static DEVICE_ATTR_RO(in_intr_mask);
static ssize_t in_read_index_show(struct device *dev,
struct device_attribute *dev_attr, char *buf)
{
struct hv_device *hv_dev = device_to_hv_device(dev);
struct hv_ring_buffer_debug_info inbound;
int ret;
if (!hv_dev->channel)
return -ENODEV;
ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
if (ret < 0)
return ret;
return sprintf(buf, "%d\n", inbound.current_read_index);
}
static DEVICE_ATTR_RO(in_read_index);
static ssize_t in_write_index_show(struct device *dev,
struct device_attribute *dev_attr, char *buf)
{
struct hv_device *hv_dev = device_to_hv_device(dev);
struct hv_ring_buffer_debug_info inbound;
int ret;
if (!hv_dev->channel)
return -ENODEV;
ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
if (ret < 0)
return ret;
return sprintf(buf, "%d\n", inbound.current_write_index);
}
static DEVICE_ATTR_RO(in_write_index);
static ssize_t in_read_bytes_avail_show(struct device *dev,
struct device_attribute *dev_attr,
char *buf)
{
struct hv_device *hv_dev = device_to_hv_device(dev);
struct hv_ring_buffer_debug_info inbound;
int ret;
if (!hv_dev->channel)
return -ENODEV;
ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
if (ret < 0)
return ret;
return sprintf(buf, "%d\n", inbound.bytes_avail_toread);
}
static DEVICE_ATTR_RO(in_read_bytes_avail);
static ssize_t in_write_bytes_avail_show(struct device *dev,
struct device_attribute *dev_attr,
char *buf)
{
struct hv_device *hv_dev = device_to_hv_device(dev);
struct hv_ring_buffer_debug_info inbound;
int ret;
if (!hv_dev->channel)
return -ENODEV;
ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
if (ret < 0)
return ret;
return sprintf(buf, "%d\n", inbound.bytes_avail_towrite);
}
static DEVICE_ATTR_RO(in_write_bytes_avail);
static ssize_t channel_vp_mapping_show(struct device *dev,
struct device_attribute *dev_attr,
char *buf)
{
struct hv_device *hv_dev = device_to_hv_device(dev);
struct vmbus_channel *channel = hv_dev->channel, *cur_sc;
int buf_size = PAGE_SIZE, n_written, tot_written;
struct list_head *cur;
if (!channel)
return -ENODEV;
mutex_lock(&vmbus_connection.channel_mutex);
tot_written = snprintf(buf, buf_size, "%u:%u\n",
channel->offermsg.child_relid, channel->target_cpu);
list_for_each(cur, &channel->sc_list) {
if (tot_written >= buf_size - 1)
break;
cur_sc = list_entry(cur, struct vmbus_channel, sc_list);
n_written = scnprintf(buf + tot_written,
buf_size - tot_written,
"%u:%u\n",
cur_sc->offermsg.child_relid,
cur_sc->target_cpu);
tot_written += n_written;
}
mutex_unlock(&vmbus_connection.channel_mutex);
return tot_written;
}
static DEVICE_ATTR_RO(channel_vp_mapping);
static ssize_t vendor_show(struct device *dev,
struct device_attribute *dev_attr,
char *buf)
{
struct hv_device *hv_dev = device_to_hv_device(dev);
return sprintf(buf, "0x%x\n", hv_dev->vendor_id);
}
static DEVICE_ATTR_RO(vendor);
static ssize_t device_show(struct device *dev,
struct device_attribute *dev_attr,
char *buf)
{
struct hv_device *hv_dev = device_to_hv_device(dev);
return sprintf(buf, "0x%x\n", hv_dev->device_id);
}
static DEVICE_ATTR_RO(device);
static ssize_t driver_override_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct hv_device *hv_dev = device_to_hv_device(dev);
int ret;
ret = driver_set_override(dev, &hv_dev->driver_override, buf, count);
if (ret)
return ret;
return count;
}
static ssize_t driver_override_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct hv_device *hv_dev = device_to_hv_device(dev);
ssize_t len;
device_lock(dev);
len = snprintf(buf, PAGE_SIZE, "%s\n", hv_dev->driver_override);
device_unlock(dev);
return len;
}
static DEVICE_ATTR_RW(driver_override);
/* Set up per device attributes in /sys/bus/vmbus/devices/<bus device> */
static struct attribute *vmbus_dev_attrs[] = {
&dev_attr_id.attr,
&dev_attr_state.attr,
&dev_attr_monitor_id.attr,
&dev_attr_class_id.attr,
&dev_attr_device_id.attr,
&dev_attr_modalias.attr,
#ifdef CONFIG_NUMA
&dev_attr_numa_node.attr,
#endif
&dev_attr_server_monitor_pending.attr,
&dev_attr_client_monitor_pending.attr,
&dev_attr_server_monitor_latency.attr,
&dev_attr_client_monitor_latency.attr,
&dev_attr_server_monitor_conn_id.attr,
&dev_attr_client_monitor_conn_id.attr,
&dev_attr_out_intr_mask.attr,
&dev_attr_out_read_index.attr,
&dev_attr_out_write_index.attr,
&dev_attr_out_read_bytes_avail.attr,
&dev_attr_out_write_bytes_avail.attr,
&dev_attr_in_intr_mask.attr,
&dev_attr_in_read_index.attr,
&dev_attr_in_write_index.attr,
&dev_attr_in_read_bytes_avail.attr,
&dev_attr_in_write_bytes_avail.attr,
&dev_attr_channel_vp_mapping.attr,
&dev_attr_vendor.attr,
&dev_attr_device.attr,
&dev_attr_driver_override.attr,
NULL,
};
/*
* Device-level attribute_group callback function. Returns the permission for
* each attribute, and returns 0 if an attribute is not visible.
*/
static umode_t vmbus_dev_attr_is_visible(struct kobject *kobj,
struct attribute *attr, int idx)
{
struct device *dev = kobj_to_dev(kobj);
const struct hv_device *hv_dev = device_to_hv_device(dev);
/* Hide the monitor attributes if the monitor mechanism is not used. */
if (!hv_dev->channel->offermsg.monitor_allocated &&
(attr == &dev_attr_monitor_id.attr ||
attr == &dev_attr_server_monitor_pending.attr ||
attr == &dev_attr_client_monitor_pending.attr ||
attr == &dev_attr_server_monitor_latency.attr ||
attr == &dev_attr_client_monitor_latency.attr ||
attr == &dev_attr_server_monitor_conn_id.attr ||
attr == &dev_attr_client_monitor_conn_id.attr))
return 0;
return attr->mode;
}
static const struct attribute_group vmbus_dev_group = {
.attrs = vmbus_dev_attrs,
.is_visible = vmbus_dev_attr_is_visible
};
__ATTRIBUTE_GROUPS(vmbus_dev);
/* Set up the attribute for /sys/bus/vmbus/hibernation */
static ssize_t hibernation_show(const struct bus_type *bus, char *buf)
{
return sprintf(buf, "%d\n", !!hv_is_hibernation_supported());
}
static BUS_ATTR_RO(hibernation);
static struct attribute *vmbus_bus_attrs[] = {
&bus_attr_hibernation.attr,
NULL,
};
static const struct attribute_group vmbus_bus_group = {
.attrs = vmbus_bus_attrs,
};
__ATTRIBUTE_GROUPS(vmbus_bus);
/*
* vmbus_uevent - add uevent for our device
*
* This routine is invoked when a device is added or removed on the vmbus to
* generate a uevent to udev in the userspace. The udev will then look at its
* rule and the uevent generated here to load the appropriate driver
*
* The alias string will be of the form vmbus:guid where guid is the string
* representation of the device guid (each byte of the guid will be
* represented with two hex characters.
*/
static int vmbus_uevent(const struct device *device, struct kobj_uevent_env *env)
{
const struct hv_device *dev = device_to_hv_device(device);
const char *format = "MODALIAS=vmbus:%*phN";
return add_uevent_var(env, format, UUID_SIZE, &dev->dev_type);
}
static const struct hv_vmbus_device_id *
hv_vmbus_dev_match(const struct hv_vmbus_device_id *id, const guid_t *guid)
{
if (id == NULL)
return NULL; /* empty device table */
for (; !guid_is_null(&id->guid); id++)
if (guid_equal(&id->guid, guid))
return id;
return NULL;
}
static const struct hv_vmbus_device_id *
hv_vmbus_dynid_match(struct hv_driver *drv, const guid_t *guid)
{
const struct hv_vmbus_device_id *id = NULL;
struct vmbus_dynid *dynid;
spin_lock(&drv->dynids.lock);
list_for_each_entry(dynid, &drv->dynids.list, node) {
if (guid_equal(&dynid->id.guid, guid)) {
id = &dynid->id;
break;
}
}
spin_unlock(&drv->dynids.lock);
return id;
}
static const struct hv_vmbus_device_id vmbus_device_null;
/*
* Return a matching hv_vmbus_device_id pointer.
* If there is no match, return NULL.
*/
static const struct hv_vmbus_device_id *hv_vmbus_get_id(struct hv_driver *drv,
struct hv_device *dev)
{
const guid_t *guid = &dev->dev_type;
const struct hv_vmbus_device_id *id;
/* When driver_override is set, only bind to the matching driver */
if (dev->driver_override && strcmp(dev->driver_override, drv->name))
return NULL;
/* Look at the dynamic ids first, before the static ones */
id = hv_vmbus_dynid_match(drv, guid);
if (!id)
id = hv_vmbus_dev_match(drv->id_table, guid);
/* driver_override will always match, send a dummy id */
if (!id && dev->driver_override)
id = &vmbus_device_null;
return id;
}
/* vmbus_add_dynid - add a new device ID to this driver and re-probe devices */
static int vmbus_add_dynid(struct hv_driver *drv, guid_t *guid)
{
struct vmbus_dynid *dynid;
dynid = kzalloc(sizeof(*dynid), GFP_KERNEL);
if (!dynid)
return -ENOMEM;
dynid->id.guid = *guid;
spin_lock(&drv->dynids.lock);
list_add_tail(&dynid->node, &drv->dynids.list);
spin_unlock(&drv->dynids.lock);
return driver_attach(&drv->driver);
}
static void vmbus_free_dynids(struct hv_driver *drv)
{
struct vmbus_dynid *dynid, *n;
spin_lock(&drv->dynids.lock);
list_for_each_entry_safe(dynid, n, &drv->dynids.list, node) {
list_del(&dynid->node);
kfree(dynid);
}
spin_unlock(&drv->dynids.lock);
}
/*
* store_new_id - sysfs frontend to vmbus_add_dynid()
*
* Allow GUIDs to be added to an existing driver via sysfs.
*/
static ssize_t new_id_store(struct device_driver *driver, const char *buf,
size_t count)
{
struct hv_driver *drv = drv_to_hv_drv(driver);
guid_t guid;
ssize_t retval;
retval = guid_parse(buf, &guid);
if (retval)
return retval;
if (hv_vmbus_dynid_match(drv, &guid))
return -EEXIST;
retval = vmbus_add_dynid(drv, &guid);
if (retval)
return retval;
return count;
}
static DRIVER_ATTR_WO(new_id);
/*
* store_remove_id - remove a PCI device ID from this driver
*
* Removes a dynamic pci device ID to this driver.
*/
static ssize_t remove_id_store(struct device_driver *driver, const char *buf,
size_t count)
{
struct hv_driver *drv = drv_to_hv_drv(driver);
struct vmbus_dynid *dynid, *n;
guid_t guid;
ssize_t retval;
retval = guid_parse(buf, &guid);
if (retval)
return retval;
retval = -ENODEV;
spin_lock(&drv->dynids.lock);
list_for_each_entry_safe(dynid, n, &drv->dynids.list, node) {
struct hv_vmbus_device_id *id = &dynid->id;
if (guid_equal(&id->guid, &guid)) {
list_del(&dynid->node);
kfree(dynid);
retval = count;
break;
}
}
spin_unlock(&drv->dynids.lock);
return retval;
}
static DRIVER_ATTR_WO(remove_id);
static struct attribute *vmbus_drv_attrs[] = {
&driver_attr_new_id.attr,
&driver_attr_remove_id.attr,
NULL,
};
ATTRIBUTE_GROUPS(vmbus_drv);
/*
* vmbus_match - Attempt to match the specified device to the specified driver
*/
static int vmbus_match(struct device *device, struct device_driver *driver)
{
struct hv_driver *drv = drv_to_hv_drv(driver);
struct hv_device *hv_dev = device_to_hv_device(device);
/* The hv_sock driver handles all hv_sock offers. */
if (is_hvsock_channel(hv_dev->channel))
return drv->hvsock;
if (hv_vmbus_get_id(drv, hv_dev))
return 1;
return 0;
}
/*
* vmbus_probe - Add the new vmbus's child device
*/
static int vmbus_probe(struct device *child_device)
{
int ret = 0;
struct hv_driver *drv =
drv_to_hv_drv(child_device->driver);
struct hv_device *dev = device_to_hv_device(child_device);
const struct hv_vmbus_device_id *dev_id;
dev_id = hv_vmbus_get_id(drv, dev);
if (drv->probe) {
ret = drv->probe(dev, dev_id);
if (ret != 0)
pr_err("probe failed for device %s (%d)\n",
dev_name(child_device), ret);
} else {
pr_err("probe not set for driver %s\n",
dev_name(child_device));
ret = -ENODEV;
}
return ret;
}
/*
* vmbus_dma_configure -- Configure DMA coherence for VMbus device
*/
static int vmbus_dma_configure(struct device *child_device)
{
/*
* On ARM64, propagate the DMA coherence setting from the top level
* VMbus ACPI device to the child VMbus device being added here.
* On x86/x64 coherence is assumed and these calls have no effect.
*/
hv_setup_dma_ops(child_device,
device_get_dma_attr(hv_dev) == DEV_DMA_COHERENT);
return 0;
}
/*
* vmbus_remove - Remove a vmbus device
*/
static void vmbus_remove(struct device *child_device)
{
struct hv_driver *drv;
struct hv_device *dev = device_to_hv_device(child_device);
if (child_device->driver) {
drv = drv_to_hv_drv(child_device->driver);
if (drv->remove)
drv->remove(dev);
}
}
/*
* vmbus_shutdown - Shutdown a vmbus device
*/
static void vmbus_shutdown(struct device *child_device)
{
struct hv_driver *drv;
struct hv_device *dev = device_to_hv_device(child_device);
/* The device may not be attached yet */
if (!child_device->driver)
return;
drv = drv_to_hv_drv(child_device->driver);
if (drv->shutdown)
drv->shutdown(dev);
}
#ifdef CONFIG_PM_SLEEP
/*
* vmbus_suspend - Suspend a vmbus device
*/
static int vmbus_suspend(struct device *child_device)
{
struct hv_driver *drv;
struct hv_device *dev = device_to_hv_device(child_device);
/* The device may not be attached yet */
if (!child_device->driver)
return 0;
drv = drv_to_hv_drv(child_device->driver);
if (!drv->suspend)
return -EOPNOTSUPP;
return drv->suspend(dev);
}
/*
* vmbus_resume - Resume a vmbus device
*/
static int vmbus_resume(struct device *child_device)
{
struct hv_driver *drv;
struct hv_device *dev = device_to_hv_device(child_device);
/* The device may not be attached yet */
if (!child_device->driver)
return 0;
drv = drv_to_hv_drv(child_device->driver);
if (!drv->resume)
return -EOPNOTSUPP;
return drv->resume(dev);
}
#else
#define vmbus_suspend NULL
#define vmbus_resume NULL
#endif /* CONFIG_PM_SLEEP */
/*
* vmbus_device_release - Final callback release of the vmbus child device
*/
static void vmbus_device_release(struct device *device)
{
struct hv_device *hv_dev = device_to_hv_device(device);
struct vmbus_channel *channel = hv_dev->channel;
hv_debug_rm_dev_dir(hv_dev);
mutex_lock(&vmbus_connection.channel_mutex);
hv_process_channel_removal(channel);
mutex_unlock(&vmbus_connection.channel_mutex);
kfree(hv_dev);
}
/*
* Note: we must use the "noirq" ops: see the comment before vmbus_bus_pm.
*
* suspend_noirq/resume_noirq are set to NULL to support Suspend-to-Idle: we
* shouldn't suspend the vmbus devices upon Suspend-to-Idle, otherwise there
* is no way to wake up a Generation-2 VM.
*
* The other 4 ops are for hibernation.
*/
static const struct dev_pm_ops vmbus_pm = {
.suspend_noirq = NULL,
.resume_noirq = NULL,
.freeze_noirq = vmbus_suspend,
.thaw_noirq = vmbus_resume,
.poweroff_noirq = vmbus_suspend,
.restore_noirq = vmbus_resume,
};
/* The one and only one */
static struct bus_type hv_bus = {
.name = "vmbus",
.match = vmbus_match,
.shutdown = vmbus_shutdown,
.remove = vmbus_remove,
.probe = vmbus_probe,
.uevent = vmbus_uevent,
.dma_configure = vmbus_dma_configure,
.dev_groups = vmbus_dev_groups,
.drv_groups = vmbus_drv_groups,
.bus_groups = vmbus_bus_groups,
.pm = &vmbus_pm,
};
struct onmessage_work_context {
struct work_struct work;
struct {
struct hv_message_header header;
u8 payload[];
} msg;
};
static void vmbus_onmessage_work(struct work_struct *work)
{
struct onmessage_work_context *ctx;
/* Do not process messages if we're in DISCONNECTED state */
if (vmbus_connection.conn_state == DISCONNECTED)
return;
ctx = container_of(work, struct onmessage_work_context,
work);
vmbus_onmessage((struct vmbus_channel_message_header *)
&ctx->msg.payload);
kfree(ctx);
}
void vmbus_on_msg_dpc(unsigned long data)
{
struct hv_per_cpu_context *hv_cpu = (void *)data;
void *page_addr = hv_cpu->synic_message_page;
struct hv_message msg_copy, *msg = (struct hv_message *)page_addr +
VMBUS_MESSAGE_SINT;
struct vmbus_channel_message_header *hdr;
enum vmbus_channel_message_type msgtype;
const struct vmbus_channel_message_table_entry *entry;
struct onmessage_work_context *ctx;
__u8 payload_size;
u32 message_type;
/*
* 'enum vmbus_channel_message_type' is supposed to always be 'u32' as
* it is being used in 'struct vmbus_channel_message_header' definition
* which is supposed to match hypervisor ABI.
*/
BUILD_BUG_ON(sizeof(enum vmbus_channel_message_type) != sizeof(u32));
/*
* Since the message is in memory shared with the host, an erroneous or
* malicious Hyper-V could modify the message while vmbus_on_msg_dpc()
* or individual message handlers are executing; to prevent this, copy
* the message into private memory.
*/
memcpy(&msg_copy, msg, sizeof(struct hv_message));
message_type = msg_copy.header.message_type;
if (message_type == HVMSG_NONE)
/* no msg */
return;
hdr = (struct vmbus_channel_message_header *)msg_copy.u.payload;
msgtype = hdr->msgtype;
trace_vmbus_on_msg_dpc(hdr);
if (msgtype >= CHANNELMSG_COUNT) {
WARN_ONCE(1, "unknown msgtype=%d\n", msgtype);
goto msg_handled;
}
payload_size = msg_copy.header.payload_size;
if (payload_size > HV_MESSAGE_PAYLOAD_BYTE_COUNT) {
WARN_ONCE(1, "payload size is too large (%d)\n", payload_size);
goto msg_handled;
}
entry = &channel_message_table[msgtype];
if (!entry->message_handler)
goto msg_handled;
if (payload_size < entry->min_payload_len) {
WARN_ONCE(1, "message too short: msgtype=%d len=%d\n", msgtype, payload_size);
goto msg_handled;
}
if (entry->handler_type == VMHT_BLOCKING) {
ctx = kmalloc(struct_size(ctx, msg.payload, payload_size), GFP_ATOMIC);
if (ctx == NULL)
return;
INIT_WORK(&ctx->work, vmbus_onmessage_work);
ctx->msg.header = msg_copy.header;
memcpy(&ctx->msg.payload, msg_copy.u.payload, payload_size);
/*
* The host can generate a rescind message while we
* may still be handling the original offer. We deal with
* this condition by relying on the synchronization provided
* by offer_in_progress and by channel_mutex. See also the
* inline comments in vmbus_onoffer_rescind().
*/
switch (msgtype) {
case CHANNELMSG_RESCIND_CHANNELOFFER:
/*
* If we are handling the rescind message;
* schedule the work on the global work queue.
*
* The OFFER message and the RESCIND message should
* not be handled by the same serialized work queue,
* because the OFFER handler may call vmbus_open(),
* which tries to open the channel by sending an
* OPEN_CHANNEL message to the host and waits for
* the host's response; however, if the host has
* rescinded the channel before it receives the
* OPEN_CHANNEL message, the host just silently
* ignores the OPEN_CHANNEL message; as a result,
* the guest's OFFER handler hangs for ever, if we
* handle the RESCIND message in the same serialized
* work queue: the RESCIND handler can not start to
* run before the OFFER handler finishes.
*/
if (vmbus_connection.ignore_any_offer_msg)
break;
queue_work(vmbus_connection.rescind_work_queue, &ctx->work);
break;
case CHANNELMSG_OFFERCHANNEL:
/*
* The host sends the offer message of a given channel
* before sending the rescind message of the same
* channel. These messages are sent to the guest's
* connect CPU; the guest then starts processing them
* in the tasklet handler on this CPU:
*
* VMBUS_CONNECT_CPU
*
* [vmbus_on_msg_dpc()]
* atomic_inc() // CHANNELMSG_OFFERCHANNEL
* queue_work()
* ...
* [vmbus_on_msg_dpc()]
* schedule_work() // CHANNELMSG_RESCIND_CHANNELOFFER
*
* We rely on the memory-ordering properties of the
* queue_work() and schedule_work() primitives, which
* guarantee that the atomic increment will be visible
* to the CPUs which will execute the offer & rescind
* works by the time these works will start execution.
*/
if (vmbus_connection.ignore_any_offer_msg)
break;
atomic_inc(&vmbus_connection.offer_in_progress);
fallthrough;
default:
queue_work(vmbus_connection.work_queue, &ctx->work);
}
} else
entry->message_handler(hdr);
msg_handled:
vmbus_signal_eom(msg, message_type);
}
#ifdef CONFIG_PM_SLEEP
/*
* Fake RESCIND_CHANNEL messages to clean up hv_sock channels by force for
* hibernation, because hv_sock connections can not persist across hibernation.
*/
static void vmbus_force_channel_rescinded(struct vmbus_channel *channel)
{
struct onmessage_work_context *ctx;
struct vmbus_channel_rescind_offer *rescind;
WARN_ON(!is_hvsock_channel(channel));
/*
* Allocation size is small and the allocation should really not fail,
* otherwise the state of the hv_sock connections ends up in limbo.
*/
ctx = kzalloc(sizeof(*ctx) + sizeof(*rescind),
GFP_KERNEL | __GFP_NOFAIL);
/*
* So far, these are not really used by Linux. Just set them to the
* reasonable values conforming to the definitions of the fields.
*/
ctx->msg.header.message_type = 1;
ctx->msg.header.payload_size = sizeof(*rescind);
/* These values are actually used by Linux. */
rescind = (struct vmbus_channel_rescind_offer *)ctx->msg.payload;
rescind->header.msgtype = CHANNELMSG_RESCIND_CHANNELOFFER;
rescind->child_relid = channel->offermsg.child_relid;
INIT_WORK(&ctx->work, vmbus_onmessage_work);
queue_work(vmbus_connection.work_queue, &ctx->work);
}
#endif /* CONFIG_PM_SLEEP */
/*
* Schedule all channels with events pending
*/
static void vmbus_chan_sched(struct hv_per_cpu_context *hv_cpu)
{
unsigned long *recv_int_page;
u32 maxbits, relid;
/*
* The event page can be directly checked to get the id of
* the channel that has the interrupt pending.
*/
void *page_addr = hv_cpu->synic_event_page;
union hv_synic_event_flags *event
= (union hv_synic_event_flags *)page_addr +
VMBUS_MESSAGE_SINT;
maxbits = HV_EVENT_FLAGS_COUNT;
recv_int_page = event->flags;
if (unlikely(!recv_int_page))
return;
for_each_set_bit(relid, recv_int_page, maxbits) {
void (*callback_fn)(void *context);
struct vmbus_channel *channel;
if (!sync_test_and_clear_bit(relid, recv_int_page))
continue;
/* Special case - vmbus channel protocol msg */
if (relid == 0)
continue;
/*
* Pairs with the kfree_rcu() in vmbus_chan_release().
* Guarantees that the channel data structure doesn't
* get freed while the channel pointer below is being
* dereferenced.
*/
rcu_read_lock();
/* Find channel based on relid */
channel = relid2channel(relid);
if (channel == NULL)
goto sched_unlock_rcu;
if (channel->rescind)
goto sched_unlock_rcu;
/*
* Make sure that the ring buffer data structure doesn't get
* freed while we dereference the ring buffer pointer. Test
* for the channel's onchannel_callback being NULL within a
* sched_lock critical section. See also the inline comments
* in vmbus_reset_channel_cb().
*/
spin_lock(&channel->sched_lock);
callback_fn = channel->onchannel_callback;
if (unlikely(callback_fn == NULL))
goto sched_unlock;
trace_vmbus_chan_sched(channel);
++channel->interrupts;
switch (channel->callback_mode) {
case HV_CALL_ISR:
(*callback_fn)(channel->channel_callback_context);
break;
case HV_CALL_BATCHED:
hv_begin_read(&channel->inbound);
fallthrough;
case HV_CALL_DIRECT:
tasklet_schedule(&channel->callback_event);
}
sched_unlock:
spin_unlock(&channel->sched_lock);
sched_unlock_rcu:
rcu_read_unlock();
}
}
static void vmbus_isr(void)
{
struct hv_per_cpu_context *hv_cpu
= this_cpu_ptr(hv_context.cpu_context);
void *page_addr;
struct hv_message *msg;
vmbus_chan_sched(hv_cpu);
page_addr = hv_cpu->synic_message_page;
msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT;
/* Check if there are actual msgs to be processed */
if (msg->header.message_type != HVMSG_NONE) {
if (msg->header.message_type == HVMSG_TIMER_EXPIRED) {
hv_stimer0_isr();
vmbus_signal_eom(msg, HVMSG_TIMER_EXPIRED);
} else
tasklet_schedule(&hv_cpu->msg_dpc);
}
add_interrupt_randomness(vmbus_interrupt);
}
static irqreturn_t vmbus_percpu_isr(int irq, void *dev_id)
{
vmbus_isr();
return IRQ_HANDLED;
}
/*
* vmbus_bus_init -Main vmbus driver initialization routine.
*
* Here, we
* - initialize the vmbus driver context
* - invoke the vmbus hv main init routine
* - retrieve the channel offers
*/
static int vmbus_bus_init(void)
{
int ret;
ret = hv_init();
if (ret != 0) {
pr_err("Unable to initialize the hypervisor - 0x%x\n", ret);
return ret;
}
ret = bus_register(&hv_bus);
if (ret)
return ret;
/*
* VMbus interrupts are best modeled as per-cpu interrupts. If
* on an architecture with support for per-cpu IRQs (e.g. ARM64),
* allocate a per-cpu IRQ using standard Linux kernel functionality.
* If not on such an architecture (e.g., x86/x64), then rely on
* code in the arch-specific portion of the code tree to connect
* the VMbus interrupt handler.
*/
if (vmbus_irq == -1) {
hv_setup_vmbus_handler(vmbus_isr);
} else {
vmbus_evt = alloc_percpu(long);
ret = request_percpu_irq(vmbus_irq, vmbus_percpu_isr,
"Hyper-V VMbus", vmbus_evt);
if (ret) {
pr_err("Can't request Hyper-V VMbus IRQ %d, Err %d",
vmbus_irq, ret);
free_percpu(vmbus_evt);
goto err_setup;
}
}
ret = hv_synic_alloc();
if (ret)
goto err_alloc;
/*
* Initialize the per-cpu interrupt state and stimer state.
* Then connect to the host.
*/
ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "hyperv/vmbus:online",
hv_synic_init, hv_synic_cleanup);
if (ret < 0)
goto err_alloc;
hyperv_cpuhp_online = ret;
ret = vmbus_connect();
if (ret)
goto err_connect;
/*
* Always register the vmbus unload panic notifier because we
* need to shut the VMbus channel connection on panic.
*/
atomic_notifier_chain_register(&panic_notifier_list,
&hyperv_panic_vmbus_unload_block);
vmbus_request_offers();
return 0;
err_connect:
cpuhp_remove_state(hyperv_cpuhp_online);
err_alloc:
hv_synic_free();
if (vmbus_irq == -1) {
hv_remove_vmbus_handler();
} else {
free_percpu_irq(vmbus_irq, vmbus_evt);
free_percpu(vmbus_evt);
}
err_setup:
bus_unregister(&hv_bus);
return ret;
}
/**
* __vmbus_driver_register() - Register a vmbus's driver
* @hv_driver: Pointer to driver structure you want to register
* @owner: owner module of the drv
* @mod_name: module name string
*
* Registers the given driver with Linux through the 'driver_register()' call
* and sets up the hyper-v vmbus handling for this driver.
* It will return the state of the 'driver_register()' call.
*
*/
int __vmbus_driver_register(struct hv_driver *hv_driver, struct module *owner, const char *mod_name)
{
int ret;
pr_info("registering driver %s\n", hv_driver->name);
ret = vmbus_exists();
if (ret < 0)
return ret;
hv_driver->driver.name = hv_driver->name;
hv_driver->driver.owner = owner;
hv_driver->driver.mod_name = mod_name;
hv_driver->driver.bus = &hv_bus;
spin_lock_init(&hv_driver->dynids.lock);
INIT_LIST_HEAD(&hv_driver->dynids.list);
ret = driver_register(&hv_driver->driver);
return ret;
}
EXPORT_SYMBOL_GPL(__vmbus_driver_register);
/**
* vmbus_driver_unregister() - Unregister a vmbus's driver
* @hv_driver: Pointer to driver structure you want to
* un-register
*
* Un-register the given driver that was previous registered with a call to
* vmbus_driver_register()
*/
void vmbus_driver_unregister(struct hv_driver *hv_driver)
{
pr_info("unregistering driver %s\n", hv_driver->name);
if (!vmbus_exists()) {
driver_unregister(&hv_driver->driver);
vmbus_free_dynids(hv_driver);
}
}
EXPORT_SYMBOL_GPL(vmbus_driver_unregister);
/*
* Called when last reference to channel is gone.
*/
static void vmbus_chan_release(struct kobject *kobj)
{
struct vmbus_channel *channel
= container_of(kobj, struct vmbus_channel, kobj);
kfree_rcu(channel, rcu);
}
struct vmbus_chan_attribute {
struct attribute attr;
ssize_t (*show)(struct vmbus_channel *chan, char *buf);
ssize_t (*store)(struct vmbus_channel *chan,
const char *buf, size_t count);
};
#define VMBUS_CHAN_ATTR(_name, _mode, _show, _store) \
struct vmbus_chan_attribute chan_attr_##_name \
= __ATTR(_name, _mode, _show, _store)
#define VMBUS_CHAN_ATTR_RW(_name) \
struct vmbus_chan_attribute chan_attr_##_name = __ATTR_RW(_name)
#define VMBUS_CHAN_ATTR_RO(_name) \
struct vmbus_chan_attribute chan_attr_##_name = __ATTR_RO(_name)
#define VMBUS_CHAN_ATTR_WO(_name) \
struct vmbus_chan_attribute chan_attr_##_name = __ATTR_WO(_name)
static ssize_t vmbus_chan_attr_show(struct kobject *kobj,
struct attribute *attr, char *buf)
{
const struct vmbus_chan_attribute *attribute
= container_of(attr, struct vmbus_chan_attribute, attr);
struct vmbus_channel *chan
= container_of(kobj, struct vmbus_channel, kobj);
if (!attribute->show)
return -EIO;
return attribute->show(chan, buf);
}
static ssize_t vmbus_chan_attr_store(struct kobject *kobj,
struct attribute *attr, const char *buf,
size_t count)
{
const struct vmbus_chan_attribute *attribute
= container_of(attr, struct vmbus_chan_attribute, attr);
struct vmbus_channel *chan
= container_of(kobj, struct vmbus_channel, kobj);
if (!attribute->store)
return -EIO;
return attribute->store(chan, buf, count);
}
static const struct sysfs_ops vmbus_chan_sysfs_ops = {
.show = vmbus_chan_attr_show,
.store = vmbus_chan_attr_store,
};
static ssize_t out_mask_show(struct vmbus_channel *channel, char *buf)
{
struct hv_ring_buffer_info *rbi = &channel->outbound;
ssize_t ret;
mutex_lock(&rbi->ring_buffer_mutex);
if (!rbi->ring_buffer) {
mutex_unlock(&rbi->ring_buffer_mutex);
return -EINVAL;
}
ret = sprintf(buf, "%u\n", rbi->ring_buffer->interrupt_mask);
mutex_unlock(&rbi->ring_buffer_mutex);
return ret;
}
static VMBUS_CHAN_ATTR_RO(out_mask);
static ssize_t in_mask_show(struct vmbus_channel *channel, char *buf)
{
struct hv_ring_buffer_info *rbi = &channel->inbound;
ssize_t ret;
mutex_lock(&rbi->ring_buffer_mutex);
if (!rbi->ring_buffer) {
mutex_unlock(&rbi->ring_buffer_mutex);
return -EINVAL;
}
ret = sprintf(buf, "%u\n", rbi->ring_buffer->interrupt_mask);
mutex_unlock(&rbi->ring_buffer_mutex);
return ret;
}
static VMBUS_CHAN_ATTR_RO(in_mask);
static ssize_t read_avail_show(struct vmbus_channel *channel, char *buf)
{
struct hv_ring_buffer_info *rbi = &channel->inbound;
ssize_t ret;
mutex_lock(&rbi->ring_buffer_mutex);
if (!rbi->ring_buffer) {
mutex_unlock(&rbi->ring_buffer_mutex);
return -EINVAL;
}
ret = sprintf(buf, "%u\n", hv_get_bytes_to_read(rbi));
mutex_unlock(&rbi->ring_buffer_mutex);
return ret;
}
static VMBUS_CHAN_ATTR_RO(read_avail);
static ssize_t write_avail_show(struct vmbus_channel *channel, char *buf)
{
struct hv_ring_buffer_info *rbi = &channel->outbound;
ssize_t ret;
mutex_lock(&rbi->ring_buffer_mutex);
if (!rbi->ring_buffer) {
mutex_unlock(&rbi->ring_buffer_mutex);
return -EINVAL;
}
ret = sprintf(buf, "%u\n", hv_get_bytes_to_write(rbi));
mutex_unlock(&rbi->ring_buffer_mutex);
return ret;
}
static VMBUS_CHAN_ATTR_RO(write_avail);
static ssize_t target_cpu_show(struct vmbus_channel *channel, char *buf)
{
return sprintf(buf, "%u\n", channel->target_cpu);
}
static ssize_t target_cpu_store(struct vmbus_channel *channel,
const char *buf, size_t count)
{
u32 target_cpu, origin_cpu;
ssize_t ret = count;
if (vmbus_proto_version < VERSION_WIN10_V4_1)
return -EIO;
if (sscanf(buf, "%uu", &target_cpu) != 1)
return -EIO;
/* Validate target_cpu for the cpumask_test_cpu() operation below. */
if (target_cpu >= nr_cpumask_bits)
return -EINVAL;
if (!cpumask_test_cpu(target_cpu, housekeeping_cpumask(HK_TYPE_MANAGED_IRQ)))
return -EINVAL;
/* No CPUs should come up or down during this. */
cpus_read_lock();
if (!cpu_online(target_cpu)) {
cpus_read_unlock();
return -EINVAL;
}
/*
* Synchronizes target_cpu_store() and channel closure:
*
* { Initially: state = CHANNEL_OPENED }
*
* CPU1 CPU2
*
* [target_cpu_store()] [vmbus_disconnect_ring()]
*
* LOCK channel_mutex LOCK channel_mutex
* LOAD r1 = state LOAD r2 = state
* IF (r1 == CHANNEL_OPENED) IF (r2 == CHANNEL_OPENED)
* SEND MODIFYCHANNEL STORE state = CHANNEL_OPEN
* [...] SEND CLOSECHANNEL
* UNLOCK channel_mutex UNLOCK channel_mutex
*
* Forbids: r1 == r2 == CHANNEL_OPENED (i.e., CPU1's LOCK precedes
* CPU2's LOCK) && CPU2's SEND precedes CPU1's SEND
*
* Note. The host processes the channel messages "sequentially", in
* the order in which they are received on a per-partition basis.
*/
mutex_lock(&vmbus_connection.channel_mutex);
/*
* Hyper-V will ignore MODIFYCHANNEL messages for "non-open" channels;
* avoid sending the message and fail here for such channels.
*/
if (channel->state != CHANNEL_OPENED_STATE) {
ret = -EIO;
goto cpu_store_unlock;
}
origin_cpu = channel->target_cpu;
if (target_cpu == origin_cpu)
goto cpu_store_unlock;
if (vmbus_send_modifychannel(channel,
hv_cpu_number_to_vp_number(target_cpu))) {
ret = -EIO;
goto cpu_store_unlock;
}
/*
* For version before VERSION_WIN10_V5_3, the following warning holds:
*
* Warning. At this point, there is *no* guarantee that the host will
* have successfully processed the vmbus_send_modifychannel() request.
* See the header comment of vmbus_send_modifychannel() for more info.
*
* Lags in the processing of the above vmbus_send_modifychannel() can
* result in missed interrupts if the "old" target CPU is taken offline
* before Hyper-V starts sending interrupts to the "new" target CPU.
* But apart from this offlining scenario, the code tolerates such
* lags. It will function correctly even if a channel interrupt comes
* in on a CPU that is different from the channel target_cpu value.
*/
channel->target_cpu = target_cpu;
/* See init_vp_index(). */
if (hv_is_perf_channel(channel))
hv_update_allocated_cpus(origin_cpu, target_cpu);
/* Currently set only for storvsc channels. */
if (channel->change_target_cpu_callback) {
(*channel->change_target_cpu_callback)(channel,
origin_cpu, target_cpu);
}
cpu_store_unlock:
mutex_unlock(&vmbus_connection.channel_mutex);
cpus_read_unlock();
return ret;
}
static VMBUS_CHAN_ATTR(cpu, 0644, target_cpu_show, target_cpu_store);
static ssize_t channel_pending_show(struct vmbus_channel *channel,
char *buf)
{
return sprintf(buf, "%d\n",
channel_pending(channel,
vmbus_connection.monitor_pages[1]));
}
static VMBUS_CHAN_ATTR(pending, 0444, channel_pending_show, NULL);
static ssize_t channel_latency_show(struct vmbus_channel *channel,
char *buf)
{
return sprintf(buf, "%d\n",
channel_latency(channel,
vmbus_connection.monitor_pages[1]));
}
static VMBUS_CHAN_ATTR(latency, 0444, channel_latency_show, NULL);
static ssize_t channel_interrupts_show(struct vmbus_channel *channel, char *buf)
{
return sprintf(buf, "%llu\n", channel->interrupts);
}
static VMBUS_CHAN_ATTR(interrupts, 0444, channel_interrupts_show, NULL);
static ssize_t channel_events_show(struct vmbus_channel *channel, char *buf)
{
return sprintf(buf, "%llu\n", channel->sig_events);
}
static VMBUS_CHAN_ATTR(events, 0444, channel_events_show, NULL);
static ssize_t channel_intr_in_full_show(struct vmbus_channel *channel,
char *buf)
{
return sprintf(buf, "%llu\n",
(unsigned long long)channel->intr_in_full);
}
static VMBUS_CHAN_ATTR(intr_in_full, 0444, channel_intr_in_full_show, NULL);
static ssize_t channel_intr_out_empty_show(struct vmbus_channel *channel,
char *buf)
{
return sprintf(buf, "%llu\n",
(unsigned long long)channel->intr_out_empty);
}
static VMBUS_CHAN_ATTR(intr_out_empty, 0444, channel_intr_out_empty_show, NULL);
static ssize_t channel_out_full_first_show(struct vmbus_channel *channel,
char *buf)
{
return sprintf(buf, "%llu\n",
(unsigned long long)channel->out_full_first);
}
static VMBUS_CHAN_ATTR(out_full_first, 0444, channel_out_full_first_show, NULL);
static ssize_t channel_out_full_total_show(struct vmbus_channel *channel,
char *buf)
{
return sprintf(buf, "%llu\n",
(unsigned long long)channel->out_full_total);
}
static VMBUS_CHAN_ATTR(out_full_total, 0444, channel_out_full_total_show, NULL);
static ssize_t subchannel_monitor_id_show(struct vmbus_channel *channel,
char *buf)
{
return sprintf(buf, "%u\n", channel->offermsg.monitorid);
}
static VMBUS_CHAN_ATTR(monitor_id, 0444, subchannel_monitor_id_show, NULL);
static ssize_t subchannel_id_show(struct vmbus_channel *channel,
char *buf)
{
return sprintf(buf, "%u\n",
channel->offermsg.offer.sub_channel_index);
}
static VMBUS_CHAN_ATTR_RO(subchannel_id);
static struct attribute *vmbus_chan_attrs[] = {
&chan_attr_out_mask.attr,
&chan_attr_in_mask.attr,
&chan_attr_read_avail.attr,
&chan_attr_write_avail.attr,
&chan_attr_cpu.attr,
&chan_attr_pending.attr,
&chan_attr_latency.attr,
&chan_attr_interrupts.attr,
&chan_attr_events.attr,
&chan_attr_intr_in_full.attr,
&chan_attr_intr_out_empty.attr,
&chan_attr_out_full_first.attr,
&chan_attr_out_full_total.attr,
&chan_attr_monitor_id.attr,
&chan_attr_subchannel_id.attr,
NULL
};
/*
* Channel-level attribute_group callback function. Returns the permission for
* each attribute, and returns 0 if an attribute is not visible.
*/
static umode_t vmbus_chan_attr_is_visible(struct kobject *kobj,
struct attribute *attr, int idx)
{
const struct vmbus_channel *channel =
container_of(kobj, struct vmbus_channel, kobj);
/* Hide the monitor attributes if the monitor mechanism is not used. */
if (!channel->offermsg.monitor_allocated &&
(attr == &chan_attr_pending.attr ||
attr == &chan_attr_latency.attr ||
attr == &chan_attr_monitor_id.attr))
return 0;
return attr->mode;
}
static struct attribute_group vmbus_chan_group = {
.attrs = vmbus_chan_attrs,
.is_visible = vmbus_chan_attr_is_visible
};
static struct kobj_type vmbus_chan_ktype = {
.sysfs_ops = &vmbus_chan_sysfs_ops,
.release = vmbus_chan_release,
};
/*
* vmbus_add_channel_kobj - setup a sub-directory under device/channels
*/
int vmbus_add_channel_kobj(struct hv_device *dev, struct vmbus_channel *channel)
{
const struct device *device = &dev->device;
struct kobject *kobj = &channel->kobj;
u32 relid = channel->offermsg.child_relid;
int ret;
kobj->kset = dev->channels_kset;
ret = kobject_init_and_add(kobj, &vmbus_chan_ktype, NULL,
"%u", relid);
if (ret) {
kobject_put(kobj);
return ret;
}
ret = sysfs_create_group(kobj, &vmbus_chan_group);
if (ret) {
/*
* The calling functions' error handling paths will cleanup the
* empty channel directory.
*/
kobject_put(kobj);
dev_err(device, "Unable to set up channel sysfs files\n");
return ret;
}
kobject_uevent(kobj, KOBJ_ADD);
return 0;
}
/*
* vmbus_remove_channel_attr_group - remove the channel's attribute group
*/
void vmbus_remove_channel_attr_group(struct vmbus_channel *channel)
{
sysfs_remove_group(&channel->kobj, &vmbus_chan_group);
}
/*
* vmbus_device_create - Creates and registers a new child device
* on the vmbus.
*/
struct hv_device *vmbus_device_create(const guid_t *type,
const guid_t *instance,
struct vmbus_channel *channel)
{
struct hv_device *child_device_obj;
child_device_obj = kzalloc(sizeof(struct hv_device), GFP_KERNEL);
if (!child_device_obj) {
pr_err("Unable to allocate device object for child device\n");
return NULL;
}
child_device_obj->channel = channel;
guid_copy(&child_device_obj->dev_type, type);
guid_copy(&child_device_obj->dev_instance, instance);
child_device_obj->vendor_id = PCI_VENDOR_ID_MICROSOFT;
return child_device_obj;
}
/*
* vmbus_device_register - Register the child device
*/
int vmbus_device_register(struct hv_device *child_device_obj)
{
struct kobject *kobj = &child_device_obj->device.kobj;
int ret;
dev_set_name(&child_device_obj->device, "%pUl",
&child_device_obj->channel->offermsg.offer.if_instance);
child_device_obj->device.bus = &hv_bus;
child_device_obj->device.parent = hv_dev;
child_device_obj->device.release = vmbus_device_release;
child_device_obj->device.dma_parms = &child_device_obj->dma_parms;
child_device_obj->device.dma_mask = &child_device_obj->dma_mask;
dma_set_mask(&child_device_obj->device, DMA_BIT_MASK(64));
/*
* Register with the LDM. This will kick off the driver/device
* binding...which will eventually call vmbus_match() and vmbus_probe()
*/
ret = device_register(&child_device_obj->device);
if (ret) {
pr_err("Unable to register child device\n");
put_device(&child_device_obj->device);
return ret;
}
child_device_obj->channels_kset = kset_create_and_add("channels",
NULL, kobj);
if (!child_device_obj->channels_kset) {
ret = -ENOMEM;
goto err_dev_unregister;
}
ret = vmbus_add_channel_kobj(child_device_obj,
child_device_obj->channel);
if (ret) {
pr_err("Unable to register primary channeln");
goto err_kset_unregister;
}
hv_debug_add_dev_dir(child_device_obj);
return 0;
err_kset_unregister:
kset_unregister(child_device_obj->channels_kset);
err_dev_unregister:
device_unregister(&child_device_obj->device);
return ret;
}
/*
* vmbus_device_unregister - Remove the specified child device
* from the vmbus.
*/
void vmbus_device_unregister(struct hv_device *device_obj)
{
pr_debug("child device %s unregistered\n",
dev_name(&device_obj->device));
kset_unregister(device_obj->channels_kset);
/*
* Kick off the process of unregistering the device.
* This will call vmbus_remove() and eventually vmbus_device_release()
*/
device_unregister(&device_obj->device);
}
#ifdef CONFIG_ACPI
/*
* VMBUS is an acpi enumerated device. Get the information we
* need from DSDT.
*/
static acpi_status vmbus_walk_resources(struct acpi_resource *res, void *ctx)
{
resource_size_t start = 0;
resource_size_t end = 0;
struct resource *new_res;
struct resource **old_res = &hyperv_mmio;
struct resource **prev_res = NULL;
struct resource r;
switch (res->type) {
/*
* "Address" descriptors are for bus windows. Ignore
* "memory" descriptors, which are for registers on
* devices.
*/
case ACPI_RESOURCE_TYPE_ADDRESS32:
start = res->data.address32.address.minimum;
end = res->data.address32.address.maximum;
break;
case ACPI_RESOURCE_TYPE_ADDRESS64:
start = res->data.address64.address.minimum;
end = res->data.address64.address.maximum;
break;
/*
* The IRQ information is needed only on ARM64, which Hyper-V
* sets up in the extended format. IRQ information is present
* on x86/x64 in the non-extended format but it is not used by
* Linux. So don't bother checking for the non-extended format.
*/
case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
if (!acpi_dev_resource_interrupt(res, 0, &r)) {
pr_err("Unable to parse Hyper-V ACPI interrupt\n");
return AE_ERROR;
}
/* ARM64 INTID for VMbus */
vmbus_interrupt = res->data.extended_irq.interrupts[0];
/* Linux IRQ number */
vmbus_irq = r.start;
return AE_OK;
default:
/* Unused resource type */
return AE_OK;
}
/*
* Ignore ranges that are below 1MB, as they're not
* necessary or useful here.
*/
if (end < 0x100000)
return AE_OK;
new_res = kzalloc(sizeof(*new_res), GFP_ATOMIC);
if (!new_res)
return AE_NO_MEMORY;
/* If this range overlaps the virtual TPM, truncate it. */
if (end > VTPM_BASE_ADDRESS && start < VTPM_BASE_ADDRESS)
end = VTPM_BASE_ADDRESS;
new_res->name = "hyperv mmio";
new_res->flags = IORESOURCE_MEM;
new_res->start = start;
new_res->end = end;
/*
* If two ranges are adjacent, merge them.
*/
do {
if (!*old_res) {
*old_res = new_res;
break;
}
if (((*old_res)->end + 1) == new_res->start) {
(*old_res)->end = new_res->end;
kfree(new_res);
break;
}
if ((*old_res)->start == new_res->end + 1) {
(*old_res)->start = new_res->start;
kfree(new_res);
break;
}
if ((*old_res)->start > new_res->end) {
new_res->sibling = *old_res;
if (prev_res)
(*prev_res)->sibling = new_res;
*old_res = new_res;
break;
}
prev_res = old_res;
old_res = &(*old_res)->sibling;
} while (1);
return AE_OK;
}
#endif
static void vmbus_mmio_remove(void)
{
struct resource *cur_res;
struct resource *next_res;
if (hyperv_mmio) {
if (fb_mmio) {
__release_region(hyperv_mmio, fb_mmio->start,
resource_size(fb_mmio));
fb_mmio = NULL;
}
for (cur_res = hyperv_mmio; cur_res; cur_res = next_res) {
next_res = cur_res->sibling;
kfree(cur_res);
}
}
}
static void __maybe_unused vmbus_reserve_fb(void)
{
resource_size_t start = 0, size;
struct pci_dev *pdev;
if (efi_enabled(EFI_BOOT)) {
/* Gen2 VM: get FB base from EFI framebuffer */
start = screen_info.lfb_base;
size = max_t(__u32, screen_info.lfb_size, 0x800000);
} else {
/* Gen1 VM: get FB base from PCI */
pdev = pci_get_device(PCI_VENDOR_ID_MICROSOFT,
PCI_DEVICE_ID_HYPERV_VIDEO, NULL);
if (!pdev)
return;
if (pdev->resource[0].flags & IORESOURCE_MEM) {
start = pci_resource_start(pdev, 0);
size = pci_resource_len(pdev, 0);
}
/*
* Release the PCI device so hyperv_drm or hyperv_fb driver can
* grab it later.
*/
pci_dev_put(pdev);
}
if (!start)
return;
/*
* Make a claim for the frame buffer in the resource tree under the
* first node, which will be the one below 4GB. The length seems to
* be underreported, particularly in a Generation 1 VM. So start out
* reserving a larger area and make it smaller until it succeeds.
*/
for (; !fb_mmio && (size >= 0x100000); size >>= 1)
fb_mmio = __request_region(hyperv_mmio, start, size, fb_mmio_name, 0);
}
/**
* vmbus_allocate_mmio() - Pick a memory-mapped I/O range.
* @new: If successful, supplied a pointer to the
* allocated MMIO space.
* @device_obj: Identifies the caller
* @min: Minimum guest physical address of the
* allocation
* @max: Maximum guest physical address
* @size: Size of the range to be allocated
* @align: Alignment of the range to be allocated
* @fb_overlap_ok: Whether this allocation can be allowed
* to overlap the video frame buffer.
*
* This function walks the resources granted to VMBus by the
* _CRS object in the ACPI namespace underneath the parent
* "bridge" whether that's a root PCI bus in the Generation 1
* case or a Module Device in the Generation 2 case. It then
* attempts to allocate from the global MMIO pool in a way that
* matches the constraints supplied in these parameters and by
* that _CRS.
*
* Return: 0 on success, -errno on failure
*/
int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
resource_size_t min, resource_size_t max,
resource_size_t size, resource_size_t align,
bool fb_overlap_ok)
{
struct resource *iter, *shadow;
resource_size_t range_min, range_max, start, end;
const char *dev_n = dev_name(&device_obj->device);
int retval;
retval = -ENXIO;
mutex_lock(&hyperv_mmio_lock);
/*
* If overlaps with frame buffers are allowed, then first attempt to
* make the allocation from within the reserved region. Because it
* is already reserved, no shadow allocation is necessary.
*/
if (fb_overlap_ok && fb_mmio && !(min > fb_mmio->end) &&
!(max < fb_mmio->start)) {
range_min = fb_mmio->start;
range_max = fb_mmio->end;
start = (range_min + align - 1) & ~(align - 1);
for (; start + size - 1 <= range_max; start += align) {
*new = request_mem_region_exclusive(start, size, dev_n);
if (*new) {
retval = 0;
goto exit;
}
}
}
for (iter = hyperv_mmio; iter; iter = iter->sibling) {
if ((iter->start >= max) || (iter->end <= min))
continue;
range_min = iter->start;
range_max = iter->end;
start = (range_min + align - 1) & ~(align - 1);
for (; start + size - 1 <= range_max; start += align) {
end = start + size - 1;
/* Skip the whole fb_mmio region if not fb_overlap_ok */
if (!fb_overlap_ok && fb_mmio &&
(((start >= fb_mmio->start) && (start <= fb_mmio->end)) ||
((end >= fb_mmio->start) && (end <= fb_mmio->end))))
continue;
shadow = __request_region(iter, start, size, NULL,
IORESOURCE_BUSY);
if (!shadow)
continue;
*new = request_mem_region_exclusive(start, size, dev_n);
if (*new) {
shadow->name = (char *)*new;
retval = 0;
goto exit;
}
__release_region(iter, start, size);
}
}
exit:
mutex_unlock(&hyperv_mmio_lock);
return retval;
}
EXPORT_SYMBOL_GPL(vmbus_allocate_mmio);
/**
* vmbus_free_mmio() - Free a memory-mapped I/O range.
* @start: Base address of region to release.
* @size: Size of the range to be allocated
*
* This function releases anything requested by
* vmbus_mmio_allocate().
*/
void vmbus_free_mmio(resource_size_t start, resource_size_t size)
{
struct resource *iter;
mutex_lock(&hyperv_mmio_lock);
for (iter = hyperv_mmio; iter; iter = iter->sibling) {
if ((iter->start >= start + size) || (iter->end <= start))
continue;
__release_region(iter, start, size);
}
release_mem_region(start, size);
mutex_unlock(&hyperv_mmio_lock);
}
EXPORT_SYMBOL_GPL(vmbus_free_mmio);
#ifdef CONFIG_ACPI
static int vmbus_acpi_add(struct platform_device *pdev)
{
acpi_status result;
int ret_val = -ENODEV;
struct acpi_device *ancestor;
struct acpi_device *device = ACPI_COMPANION(&pdev->dev);
hv_dev = &device->dev;
/*
* Older versions of Hyper-V for ARM64 fail to include the _CCA
* method on the top level VMbus device in the DSDT. But devices
* are hardware coherent in all current Hyper-V use cases, so fix
* up the ACPI device to behave as if _CCA is present and indicates
* hardware coherence.
*/
ACPI_COMPANION_SET(&device->dev, device);
if (IS_ENABLED(CONFIG_ACPI_CCA_REQUIRED) &&
device_get_dma_attr(&device->dev) == DEV_DMA_NOT_SUPPORTED) {
pr_info("No ACPI _CCA found; assuming coherent device I/O\n");
device->flags.cca_seen = true;
device->flags.coherent_dma = true;
}
result = acpi_walk_resources(device->handle, METHOD_NAME__CRS,
vmbus_walk_resources, NULL);
if (ACPI_FAILURE(result))
goto acpi_walk_err;
/*
* Some ancestor of the vmbus acpi device (Gen1 or Gen2
* firmware) is the VMOD that has the mmio ranges. Get that.
*/
for (ancestor = acpi_dev_parent(device);
ancestor && ancestor->handle != ACPI_ROOT_OBJECT;
ancestor = acpi_dev_parent(ancestor)) {
result = acpi_walk_resources(ancestor->handle, METHOD_NAME__CRS,
vmbus_walk_resources, NULL);
if (ACPI_FAILURE(result))
continue;
if (hyperv_mmio) {
vmbus_reserve_fb();
break;
}
}
ret_val = 0;
acpi_walk_err:
if (ret_val)
vmbus_mmio_remove();
return ret_val;
}
#else
static int vmbus_acpi_add(struct platform_device *pdev)
{
return 0;
}
#endif
static int vmbus_device_add(struct platform_device *pdev)
{
struct resource **cur_res = &hyperv_mmio;
struct of_range range;
struct of_range_parser parser;
struct device_node *np = pdev->dev.of_node;
int ret;
hv_dev = &pdev->dev;
ret = of_range_parser_init(&parser, np);
if (ret)
return ret;
for_each_of_range(&parser, &range) {
struct resource *res;
res = kzalloc(sizeof(*res), GFP_KERNEL);
if (!res) {
vmbus_mmio_remove();
return -ENOMEM;
}
res->name = "hyperv mmio";
res->flags = range.flags;
res->start = range.cpu_addr;
res->end = range.cpu_addr + range.size;
*cur_res = res;
cur_res = &res->sibling;
}
return ret;
}
static int vmbus_platform_driver_probe(struct platform_device *pdev)
{
if (acpi_disabled)
return vmbus_device_add(pdev);
else
return vmbus_acpi_add(pdev);
}
static int vmbus_platform_driver_remove(struct platform_device *pdev)
{
vmbus_mmio_remove();
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int vmbus_bus_suspend(struct device *dev)
{
struct hv_per_cpu_context *hv_cpu = per_cpu_ptr(
hv_context.cpu_context, VMBUS_CONNECT_CPU);
struct vmbus_channel *channel, *sc;
tasklet_disable(&hv_cpu->msg_dpc);
vmbus_connection.ignore_any_offer_msg = true;
/* The tasklet_enable() takes care of providing a memory barrier */
tasklet_enable(&hv_cpu->msg_dpc);
/* Drain all the workqueues as we are in suspend */
drain_workqueue(vmbus_connection.rescind_work_queue);
drain_workqueue(vmbus_connection.work_queue);
drain_workqueue(vmbus_connection.handle_primary_chan_wq);
drain_workqueue(vmbus_connection.handle_sub_chan_wq);
mutex_lock(&vmbus_connection.channel_mutex);
list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
if (!is_hvsock_channel(channel))
continue;
vmbus_force_channel_rescinded(channel);
}
mutex_unlock(&vmbus_connection.channel_mutex);
/*
* Wait until all the sub-channels and hv_sock channels have been
* cleaned up. Sub-channels should be destroyed upon suspend, otherwise
* they would conflict with the new sub-channels that will be created
* in the resume path. hv_sock channels should also be destroyed, but
* a hv_sock channel of an established hv_sock connection can not be
* really destroyed since it may still be referenced by the userspace
* application, so we just force the hv_sock channel to be rescinded
* by vmbus_force_channel_rescinded(), and the userspace application
* will thoroughly destroy the channel after hibernation.
*
* Note: the counter nr_chan_close_on_suspend may never go above 0 if
* the VM has no sub-channel and hv_sock channel, e.g. a 1-vCPU VM.
*/
if (atomic_read(&vmbus_connection.nr_chan_close_on_suspend) > 0)
wait_for_completion(&vmbus_connection.ready_for_suspend_event);
if (atomic_read(&vmbus_connection.nr_chan_fixup_on_resume) != 0) {
pr_err("Can not suspend due to a previous failed resuming\n");
return -EBUSY;
}
mutex_lock(&vmbus_connection.channel_mutex);
list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
/*
* Remove the channel from the array of channels and invalidate
* the channel's relid. Upon resume, vmbus_onoffer() will fix
* up the relid (and other fields, if necessary) and add the
* channel back to the array.
*/
vmbus_channel_unmap_relid(channel);
channel->offermsg.child_relid = INVALID_RELID;
if (is_hvsock_channel(channel)) {
if (!channel->rescind) {
pr_err("hv_sock channel not rescinded!\n");
WARN_ON_ONCE(1);
}
continue;
}
list_for_each_entry(sc, &channel->sc_list, sc_list) {
pr_err("Sub-channel not deleted!\n");
WARN_ON_ONCE(1);
}
atomic_inc(&vmbus_connection.nr_chan_fixup_on_resume);
}
mutex_unlock(&vmbus_connection.channel_mutex);
vmbus_initiate_unload(false);
/* Reset the event for the next resume. */
reinit_completion(&vmbus_connection.ready_for_resume_event);
return 0;
}
static int vmbus_bus_resume(struct device *dev)
{
struct vmbus_channel_msginfo *msginfo;
size_t msgsize;
int ret;
vmbus_connection.ignore_any_offer_msg = false;
/*
* We only use the 'vmbus_proto_version', which was in use before
* hibernation, to re-negotiate with the host.
*/
if (!vmbus_proto_version) {
pr_err("Invalid proto version = 0x%x\n", vmbus_proto_version);
return -EINVAL;
}
msgsize = sizeof(*msginfo) +
sizeof(struct vmbus_channel_initiate_contact);
msginfo = kzalloc(msgsize, GFP_KERNEL);
if (msginfo == NULL)
return -ENOMEM;
ret = vmbus_negotiate_version(msginfo, vmbus_proto_version);
kfree(msginfo);
if (ret != 0)
return ret;
WARN_ON(atomic_read(&vmbus_connection.nr_chan_fixup_on_resume) == 0);
vmbus_request_offers();
if (wait_for_completion_timeout(
&vmbus_connection.ready_for_resume_event, 10 * HZ) == 0)
pr_err("Some vmbus device is missing after suspending?\n");
/* Reset the event for the next suspend. */
reinit_completion(&vmbus_connection.ready_for_suspend_event);
return 0;
}
#else
#define vmbus_bus_suspend NULL
#define vmbus_bus_resume NULL
#endif /* CONFIG_PM_SLEEP */
static const __maybe_unused struct of_device_id vmbus_of_match[] = {
{
.compatible = "microsoft,vmbus",
},
{
/* sentinel */
},
};
MODULE_DEVICE_TABLE(of, vmbus_of_match);
static const __maybe_unused struct acpi_device_id vmbus_acpi_device_ids[] = {
{"VMBUS", 0},
{"VMBus", 0},
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, vmbus_acpi_device_ids);
/*
* Note: we must use the "no_irq" ops, otherwise hibernation can not work with
* PCI device assignment, because "pci_dev_pm_ops" uses the "noirq" ops: in
* the resume path, the pci "noirq" restore op runs before "non-noirq" op (see
* resume_target_kernel() -> dpm_resume_start(), and hibernation_restore() ->
* dpm_resume_end()). This means vmbus_bus_resume() and the pci-hyperv's
* resume callback must also run via the "noirq" ops.
*
* Set suspend_noirq/resume_noirq to NULL for Suspend-to-Idle: see the comment
* earlier in this file before vmbus_pm.
*/
static const struct dev_pm_ops vmbus_bus_pm = {
.suspend_noirq = NULL,
.resume_noirq = NULL,
.freeze_noirq = vmbus_bus_suspend,
.thaw_noirq = vmbus_bus_resume,
.poweroff_noirq = vmbus_bus_suspend,
.restore_noirq = vmbus_bus_resume
};
static struct platform_driver vmbus_platform_driver = {
.probe = vmbus_platform_driver_probe,
.remove = vmbus_platform_driver_remove,
.driver = {
.name = "vmbus",
.acpi_match_table = ACPI_PTR(vmbus_acpi_device_ids),
.of_match_table = of_match_ptr(vmbus_of_match),
.pm = &vmbus_bus_pm,
.probe_type = PROBE_FORCE_SYNCHRONOUS,
}
};
static void hv_kexec_handler(void)
{
hv_stimer_global_cleanup();
vmbus_initiate_unload(false);
/* Make sure conn_state is set as hv_synic_cleanup checks for it */
mb();
cpuhp_remove_state(hyperv_cpuhp_online);
};
static void hv_crash_handler(struct pt_regs *regs)
{
int cpu;
vmbus_initiate_unload(true);
/*
* In crash handler we can't schedule synic cleanup for all CPUs,
* doing the cleanup for current CPU only. This should be sufficient
* for kdump.
*/
cpu = smp_processor_id();
hv_stimer_cleanup(cpu);
hv_synic_disable_regs(cpu);
};
static int hv_synic_suspend(void)
{
/*
* When we reach here, all the non-boot CPUs have been offlined.
* If we're in a legacy configuration where stimer Direct Mode is
* not enabled, the stimers on the non-boot CPUs have been unbound
* in hv_synic_cleanup() -> hv_stimer_legacy_cleanup() ->
* hv_stimer_cleanup() -> clockevents_unbind_device().
*
* hv_synic_suspend() only runs on CPU0 with interrupts disabled.
* Here we do not call hv_stimer_legacy_cleanup() on CPU0 because:
* 1) it's unnecessary as interrupts remain disabled between
* syscore_suspend() and syscore_resume(): see create_image() and
* resume_target_kernel()
* 2) the stimer on CPU0 is automatically disabled later by
* syscore_suspend() -> timekeeping_suspend() -> tick_suspend() -> ...
* -> clockevents_shutdown() -> ... -> hv_ce_shutdown()
* 3) a warning would be triggered if we call
* clockevents_unbind_device(), which may sleep, in an
* interrupts-disabled context.
*/
hv_synic_disable_regs(0);
return 0;
}
static void hv_synic_resume(void)
{
hv_synic_enable_regs(0);
/*
* Note: we don't need to call hv_stimer_init(0), because the timer
* on CPU0 is not unbound in hv_synic_suspend(), and the timer is
* automatically re-enabled in timekeeping_resume().
*/
}
/* The callbacks run only on CPU0, with irqs_disabled. */
static struct syscore_ops hv_synic_syscore_ops = {
.suspend = hv_synic_suspend,
.resume = hv_synic_resume,
};
static int __init hv_acpi_init(void)
{
int ret;
if (!hv_is_hyperv_initialized())
return -ENODEV;
if (hv_root_partition && !hv_nested)
return 0;
/*
* Get ACPI resources first.
*/
ret = platform_driver_register(&vmbus_platform_driver);
if (ret)
return ret;
if (!hv_dev) {
ret = -ENODEV;
goto cleanup;
}
/*
* If we're on an architecture with a hardcoded hypervisor
* vector (i.e. x86/x64), override the VMbus interrupt found
* in the ACPI tables. Ensure vmbus_irq is not set since the
* normal Linux IRQ mechanism is not used in this case.
*/
#ifdef HYPERVISOR_CALLBACK_VECTOR
vmbus_interrupt = HYPERVISOR_CALLBACK_VECTOR;
vmbus_irq = -1;
#endif
hv_debug_init();
ret = vmbus_bus_init();
if (ret)
goto cleanup;
hv_setup_kexec_handler(hv_kexec_handler);
hv_setup_crash_handler(hv_crash_handler);
register_syscore_ops(&hv_synic_syscore_ops);
return 0;
cleanup:
platform_driver_unregister(&vmbus_platform_driver);
hv_dev = NULL;
return ret;
}
static void __exit vmbus_exit(void)
{
int cpu;
unregister_syscore_ops(&hv_synic_syscore_ops);
hv_remove_kexec_handler();
hv_remove_crash_handler();
vmbus_connection.conn_state = DISCONNECTED;
hv_stimer_global_cleanup();
vmbus_disconnect();
if (vmbus_irq == -1) {
hv_remove_vmbus_handler();
} else {
free_percpu_irq(vmbus_irq, vmbus_evt);
free_percpu(vmbus_evt);
}
for_each_online_cpu(cpu) {
struct hv_per_cpu_context *hv_cpu
= per_cpu_ptr(hv_context.cpu_context, cpu);
tasklet_kill(&hv_cpu->msg_dpc);
}
hv_debug_rm_all_dir();
vmbus_free_channels();
kfree(vmbus_connection.channels);
/*
* The vmbus panic notifier is always registered, hence we should
* also unconditionally unregister it here as well.
*/
atomic_notifier_chain_unregister(&panic_notifier_list,
&hyperv_panic_vmbus_unload_block);
bus_unregister(&hv_bus);
cpuhp_remove_state(hyperv_cpuhp_online);
hv_synic_free();
platform_driver_unregister(&vmbus_platform_driver);
}
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Microsoft Hyper-V VMBus Driver");
subsys_initcall(hv_acpi_init);
module_exit(vmbus_exit);
|
linux-master
|
drivers/hv/vmbus_drv.c
|
// SPDX-License-Identifier: GPL-2.0
#include "hyperv_vmbus.h"
#define CREATE_TRACE_POINTS
#include "hv_trace.h"
|
linux-master
|
drivers/hv/hv_trace.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2010, Microsoft Corporation.
*
* Authors:
* Haiyang Zhang <[email protected]>
* Hank Janssen <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/sysctl.h>
#include <linux/reboot.h>
#include <linux/hyperv.h>
#include <linux/clockchips.h>
#include <linux/ptp_clock_kernel.h>
#include <asm/mshyperv.h>
#include "hyperv_vmbus.h"
#define SD_MAJOR 3
#define SD_MINOR 0
#define SD_MINOR_1 1
#define SD_MINOR_2 2
#define SD_VERSION_3_1 (SD_MAJOR << 16 | SD_MINOR_1)
#define SD_VERSION_3_2 (SD_MAJOR << 16 | SD_MINOR_2)
#define SD_VERSION (SD_MAJOR << 16 | SD_MINOR)
#define SD_MAJOR_1 1
#define SD_VERSION_1 (SD_MAJOR_1 << 16 | SD_MINOR)
#define TS_MAJOR 4
#define TS_MINOR 0
#define TS_VERSION (TS_MAJOR << 16 | TS_MINOR)
#define TS_MAJOR_1 1
#define TS_VERSION_1 (TS_MAJOR_1 << 16 | TS_MINOR)
#define TS_MAJOR_3 3
#define TS_VERSION_3 (TS_MAJOR_3 << 16 | TS_MINOR)
#define HB_MAJOR 3
#define HB_MINOR 0
#define HB_VERSION (HB_MAJOR << 16 | HB_MINOR)
#define HB_MAJOR_1 1
#define HB_VERSION_1 (HB_MAJOR_1 << 16 | HB_MINOR)
static int sd_srv_version;
static int ts_srv_version;
static int hb_srv_version;
#define SD_VER_COUNT 4
static const int sd_versions[] = {
SD_VERSION_3_2,
SD_VERSION_3_1,
SD_VERSION,
SD_VERSION_1
};
#define TS_VER_COUNT 3
static const int ts_versions[] = {
TS_VERSION,
TS_VERSION_3,
TS_VERSION_1
};
#define HB_VER_COUNT 2
static const int hb_versions[] = {
HB_VERSION,
HB_VERSION_1
};
#define FW_VER_COUNT 2
static const int fw_versions[] = {
UTIL_FW_VERSION,
UTIL_WS2K8_FW_VERSION
};
/*
* Send the "hibernate" udev event in a thread context.
*/
struct hibernate_work_context {
struct work_struct work;
struct hv_device *dev;
};
static struct hibernate_work_context hibernate_context;
static bool hibernation_supported;
static void send_hibernate_uevent(struct work_struct *work)
{
char *uevent_env[2] = { "EVENT=hibernate", NULL };
struct hibernate_work_context *ctx;
ctx = container_of(work, struct hibernate_work_context, work);
kobject_uevent_env(&ctx->dev->device.kobj, KOBJ_CHANGE, uevent_env);
pr_info("Sent hibernation uevent\n");
}
static int hv_shutdown_init(struct hv_util_service *srv)
{
struct vmbus_channel *channel = srv->channel;
INIT_WORK(&hibernate_context.work, send_hibernate_uevent);
hibernate_context.dev = channel->device_obj;
hibernation_supported = hv_is_hibernation_supported();
return 0;
}
static void shutdown_onchannelcallback(void *context);
static struct hv_util_service util_shutdown = {
.util_cb = shutdown_onchannelcallback,
.util_init = hv_shutdown_init,
};
static int hv_timesync_init(struct hv_util_service *srv);
static int hv_timesync_pre_suspend(void);
static void hv_timesync_deinit(void);
static void timesync_onchannelcallback(void *context);
static struct hv_util_service util_timesynch = {
.util_cb = timesync_onchannelcallback,
.util_init = hv_timesync_init,
.util_pre_suspend = hv_timesync_pre_suspend,
.util_deinit = hv_timesync_deinit,
};
static void heartbeat_onchannelcallback(void *context);
static struct hv_util_service util_heartbeat = {
.util_cb = heartbeat_onchannelcallback,
};
static struct hv_util_service util_kvp = {
.util_cb = hv_kvp_onchannelcallback,
.util_init = hv_kvp_init,
.util_pre_suspend = hv_kvp_pre_suspend,
.util_pre_resume = hv_kvp_pre_resume,
.util_deinit = hv_kvp_deinit,
};
static struct hv_util_service util_vss = {
.util_cb = hv_vss_onchannelcallback,
.util_init = hv_vss_init,
.util_pre_suspend = hv_vss_pre_suspend,
.util_pre_resume = hv_vss_pre_resume,
.util_deinit = hv_vss_deinit,
};
static struct hv_util_service util_fcopy = {
.util_cb = hv_fcopy_onchannelcallback,
.util_init = hv_fcopy_init,
.util_pre_suspend = hv_fcopy_pre_suspend,
.util_pre_resume = hv_fcopy_pre_resume,
.util_deinit = hv_fcopy_deinit,
};
static void perform_shutdown(struct work_struct *dummy)
{
orderly_poweroff(true);
}
static void perform_restart(struct work_struct *dummy)
{
orderly_reboot();
}
/*
* Perform the shutdown operation in a thread context.
*/
static DECLARE_WORK(shutdown_work, perform_shutdown);
/*
* Perform the restart operation in a thread context.
*/
static DECLARE_WORK(restart_work, perform_restart);
static void shutdown_onchannelcallback(void *context)
{
struct vmbus_channel *channel = context;
struct work_struct *work = NULL;
u32 recvlen;
u64 requestid;
u8 *shut_txf_buf = util_shutdown.recv_buffer;
struct shutdown_msg_data *shutdown_msg;
struct icmsg_hdr *icmsghdrp;
if (vmbus_recvpacket(channel, shut_txf_buf, HV_HYP_PAGE_SIZE, &recvlen, &requestid)) {
pr_err_ratelimited("Shutdown request received. Could not read into shut txf buf\n");
return;
}
if (!recvlen)
return;
/* Ensure recvlen is big enough to read header data */
if (recvlen < ICMSG_HDR) {
pr_err_ratelimited("Shutdown request received. Packet length too small: %d\n",
recvlen);
return;
}
icmsghdrp = (struct icmsg_hdr *)&shut_txf_buf[sizeof(struct vmbuspipe_hdr)];
if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
if (vmbus_prep_negotiate_resp(icmsghdrp,
shut_txf_buf, recvlen,
fw_versions, FW_VER_COUNT,
sd_versions, SD_VER_COUNT,
NULL, &sd_srv_version)) {
pr_info("Shutdown IC version %d.%d\n",
sd_srv_version >> 16,
sd_srv_version & 0xFFFF);
}
} else if (icmsghdrp->icmsgtype == ICMSGTYPE_SHUTDOWN) {
/* Ensure recvlen is big enough to contain shutdown_msg_data struct */
if (recvlen < ICMSG_HDR + sizeof(struct shutdown_msg_data)) {
pr_err_ratelimited("Invalid shutdown msg data. Packet length too small: %u\n",
recvlen);
return;
}
shutdown_msg = (struct shutdown_msg_data *)&shut_txf_buf[ICMSG_HDR];
/*
* shutdown_msg->flags can be 0(shut down), 2(reboot),
* or 4(hibernate). It may bitwise-OR 1, which means
* performing the request by force. Linux always tries
* to perform the request by force.
*/
switch (shutdown_msg->flags) {
case 0:
case 1:
icmsghdrp->status = HV_S_OK;
work = &shutdown_work;
pr_info("Shutdown request received - graceful shutdown initiated\n");
break;
case 2:
case 3:
icmsghdrp->status = HV_S_OK;
work = &restart_work;
pr_info("Restart request received - graceful restart initiated\n");
break;
case 4:
case 5:
pr_info("Hibernation request received\n");
icmsghdrp->status = hibernation_supported ?
HV_S_OK : HV_E_FAIL;
if (hibernation_supported)
work = &hibernate_context.work;
break;
default:
icmsghdrp->status = HV_E_FAIL;
pr_info("Shutdown request received - Invalid request\n");
break;
}
} else {
icmsghdrp->status = HV_E_FAIL;
pr_err_ratelimited("Shutdown request received. Invalid msg type: %d\n",
icmsghdrp->icmsgtype);
}
icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION
| ICMSGHDRFLAG_RESPONSE;
vmbus_sendpacket(channel, shut_txf_buf,
recvlen, requestid,
VM_PKT_DATA_INBAND, 0);
if (work)
schedule_work(work);
}
/*
* Set the host time in a process context.
*/
static struct work_struct adj_time_work;
/*
* The last time sample, received from the host. PTP device responds to
* requests by using this data and the current partition-wide time reference
* count.
*/
static struct {
u64 host_time;
u64 ref_time;
spinlock_t lock;
} host_ts;
static inline u64 reftime_to_ns(u64 reftime)
{
return (reftime - WLTIMEDELTA) * 100;
}
/*
* Hard coded threshold for host timesync delay: 600 seconds
*/
static const u64 HOST_TIMESYNC_DELAY_THRESH = 600 * (u64)NSEC_PER_SEC;
static int hv_get_adj_host_time(struct timespec64 *ts)
{
u64 newtime, reftime, timediff_adj;
unsigned long flags;
int ret = 0;
spin_lock_irqsave(&host_ts.lock, flags);
reftime = hv_read_reference_counter();
/*
* We need to let the caller know that last update from host
* is older than the max allowable threshold. clock_gettime()
* and PTP ioctl do not have a documented error that we could
* return for this specific case. Use ESTALE to report this.
*/
timediff_adj = reftime - host_ts.ref_time;
if (timediff_adj * 100 > HOST_TIMESYNC_DELAY_THRESH) {
pr_warn_once("TIMESYNC IC: Stale time stamp, %llu nsecs old\n",
(timediff_adj * 100));
ret = -ESTALE;
}
newtime = host_ts.host_time + timediff_adj;
*ts = ns_to_timespec64(reftime_to_ns(newtime));
spin_unlock_irqrestore(&host_ts.lock, flags);
return ret;
}
static void hv_set_host_time(struct work_struct *work)
{
struct timespec64 ts;
if (!hv_get_adj_host_time(&ts))
do_settimeofday64(&ts);
}
/*
* Synchronize time with host after reboot, restore, etc.
*
* ICTIMESYNCFLAG_SYNC flag bit indicates reboot, restore events of the VM.
* After reboot the flag ICTIMESYNCFLAG_SYNC is included in the first time
* message after the timesync channel is opened. Since the hv_utils module is
* loaded after hv_vmbus, the first message is usually missed. This bit is
* considered a hard request to discipline the clock.
*
* ICTIMESYNCFLAG_SAMPLE bit indicates a time sample from host. This is
* typically used as a hint to the guest. The guest is under no obligation
* to discipline the clock.
*/
static inline void adj_guesttime(u64 hosttime, u64 reftime, u8 adj_flags)
{
unsigned long flags;
u64 cur_reftime;
/*
* Save the adjusted time sample from the host and the snapshot
* of the current system time.
*/
spin_lock_irqsave(&host_ts.lock, flags);
cur_reftime = hv_read_reference_counter();
host_ts.host_time = hosttime;
host_ts.ref_time = cur_reftime;
/*
* TimeSync v4 messages contain reference time (guest's Hyper-V
* clocksource read when the time sample was generated), we can
* improve the precision by adding the delta between now and the
* time of generation. For older protocols we set
* reftime == cur_reftime on call.
*/
host_ts.host_time += (cur_reftime - reftime);
spin_unlock_irqrestore(&host_ts.lock, flags);
/* Schedule work to do do_settimeofday64() */
if (adj_flags & ICTIMESYNCFLAG_SYNC)
schedule_work(&adj_time_work);
}
/*
* Time Sync Channel message handler.
*/
static void timesync_onchannelcallback(void *context)
{
struct vmbus_channel *channel = context;
u32 recvlen;
u64 requestid;
struct icmsg_hdr *icmsghdrp;
struct ictimesync_data *timedatap;
struct ictimesync_ref_data *refdata;
u8 *time_txf_buf = util_timesynch.recv_buffer;
/*
* Drain the ring buffer and use the last packet to update
* host_ts
*/
while (1) {
int ret = vmbus_recvpacket(channel, time_txf_buf,
HV_HYP_PAGE_SIZE, &recvlen,
&requestid);
if (ret) {
pr_err_ratelimited("TimeSync IC pkt recv failed (Err: %d)\n",
ret);
break;
}
if (!recvlen)
break;
/* Ensure recvlen is big enough to read header data */
if (recvlen < ICMSG_HDR) {
pr_err_ratelimited("Timesync request received. Packet length too small: %d\n",
recvlen);
break;
}
icmsghdrp = (struct icmsg_hdr *)&time_txf_buf[
sizeof(struct vmbuspipe_hdr)];
if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
if (vmbus_prep_negotiate_resp(icmsghdrp,
time_txf_buf, recvlen,
fw_versions, FW_VER_COUNT,
ts_versions, TS_VER_COUNT,
NULL, &ts_srv_version)) {
pr_info("TimeSync IC version %d.%d\n",
ts_srv_version >> 16,
ts_srv_version & 0xFFFF);
}
} else if (icmsghdrp->icmsgtype == ICMSGTYPE_TIMESYNC) {
if (ts_srv_version > TS_VERSION_3) {
/* Ensure recvlen is big enough to read ictimesync_ref_data */
if (recvlen < ICMSG_HDR + sizeof(struct ictimesync_ref_data)) {
pr_err_ratelimited("Invalid ictimesync ref data. Length too small: %u\n",
recvlen);
break;
}
refdata = (struct ictimesync_ref_data *)&time_txf_buf[ICMSG_HDR];
adj_guesttime(refdata->parenttime,
refdata->vmreferencetime,
refdata->flags);
} else {
/* Ensure recvlen is big enough to read ictimesync_data */
if (recvlen < ICMSG_HDR + sizeof(struct ictimesync_data)) {
pr_err_ratelimited("Invalid ictimesync data. Length too small: %u\n",
recvlen);
break;
}
timedatap = (struct ictimesync_data *)&time_txf_buf[ICMSG_HDR];
adj_guesttime(timedatap->parenttime,
hv_read_reference_counter(),
timedatap->flags);
}
} else {
icmsghdrp->status = HV_E_FAIL;
pr_err_ratelimited("Timesync request received. Invalid msg type: %d\n",
icmsghdrp->icmsgtype);
}
icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION
| ICMSGHDRFLAG_RESPONSE;
vmbus_sendpacket(channel, time_txf_buf,
recvlen, requestid,
VM_PKT_DATA_INBAND, 0);
}
}
/*
* Heartbeat functionality.
* Every two seconds, Hyper-V send us a heartbeat request message.
* we respond to this message, and Hyper-V knows we are alive.
*/
static void heartbeat_onchannelcallback(void *context)
{
struct vmbus_channel *channel = context;
u32 recvlen;
u64 requestid;
struct icmsg_hdr *icmsghdrp;
struct heartbeat_msg_data *heartbeat_msg;
u8 *hbeat_txf_buf = util_heartbeat.recv_buffer;
while (1) {
if (vmbus_recvpacket(channel, hbeat_txf_buf, HV_HYP_PAGE_SIZE,
&recvlen, &requestid)) {
pr_err_ratelimited("Heartbeat request received. Could not read into hbeat txf buf\n");
return;
}
if (!recvlen)
break;
/* Ensure recvlen is big enough to read header data */
if (recvlen < ICMSG_HDR) {
pr_err_ratelimited("Heartbeat request received. Packet length too small: %d\n",
recvlen);
break;
}
icmsghdrp = (struct icmsg_hdr *)&hbeat_txf_buf[
sizeof(struct vmbuspipe_hdr)];
if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
if (vmbus_prep_negotiate_resp(icmsghdrp,
hbeat_txf_buf, recvlen,
fw_versions, FW_VER_COUNT,
hb_versions, HB_VER_COUNT,
NULL, &hb_srv_version)) {
pr_info("Heartbeat IC version %d.%d\n",
hb_srv_version >> 16,
hb_srv_version & 0xFFFF);
}
} else if (icmsghdrp->icmsgtype == ICMSGTYPE_HEARTBEAT) {
/*
* Ensure recvlen is big enough to read seq_num. Reserved area is not
* included in the check as the host may not fill it up entirely
*/
if (recvlen < ICMSG_HDR + sizeof(u64)) {
pr_err_ratelimited("Invalid heartbeat msg data. Length too small: %u\n",
recvlen);
break;
}
heartbeat_msg = (struct heartbeat_msg_data *)&hbeat_txf_buf[ICMSG_HDR];
heartbeat_msg->seq_num += 1;
} else {
icmsghdrp->status = HV_E_FAIL;
pr_err_ratelimited("Heartbeat request received. Invalid msg type: %d\n",
icmsghdrp->icmsgtype);
}
icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION
| ICMSGHDRFLAG_RESPONSE;
vmbus_sendpacket(channel, hbeat_txf_buf,
recvlen, requestid,
VM_PKT_DATA_INBAND, 0);
}
}
#define HV_UTIL_RING_SEND_SIZE VMBUS_RING_SIZE(3 * HV_HYP_PAGE_SIZE)
#define HV_UTIL_RING_RECV_SIZE VMBUS_RING_SIZE(3 * HV_HYP_PAGE_SIZE)
static int util_probe(struct hv_device *dev,
const struct hv_vmbus_device_id *dev_id)
{
struct hv_util_service *srv =
(struct hv_util_service *)dev_id->driver_data;
int ret;
srv->recv_buffer = kmalloc(HV_HYP_PAGE_SIZE * 4, GFP_KERNEL);
if (!srv->recv_buffer)
return -ENOMEM;
srv->channel = dev->channel;
if (srv->util_init) {
ret = srv->util_init(srv);
if (ret) {
ret = -ENODEV;
goto error1;
}
}
/*
* The set of services managed by the util driver are not performance
* critical and do not need batched reading. Furthermore, some services
* such as KVP can only handle one message from the host at a time.
* Turn off batched reading for all util drivers before we open the
* channel.
*/
set_channel_read_mode(dev->channel, HV_CALL_DIRECT);
hv_set_drvdata(dev, srv);
ret = vmbus_open(dev->channel, HV_UTIL_RING_SEND_SIZE,
HV_UTIL_RING_RECV_SIZE, NULL, 0, srv->util_cb,
dev->channel);
if (ret)
goto error;
return 0;
error:
if (srv->util_deinit)
srv->util_deinit();
error1:
kfree(srv->recv_buffer);
return ret;
}
static void util_remove(struct hv_device *dev)
{
struct hv_util_service *srv = hv_get_drvdata(dev);
if (srv->util_deinit)
srv->util_deinit();
vmbus_close(dev->channel);
kfree(srv->recv_buffer);
}
/*
* When we're in util_suspend(), all the userspace processes have been frozen
* (refer to hibernate() -> freeze_processes()). The userspace is thawed only
* after the whole resume procedure, including util_resume(), finishes.
*/
static int util_suspend(struct hv_device *dev)
{
struct hv_util_service *srv = hv_get_drvdata(dev);
int ret = 0;
if (srv->util_pre_suspend) {
ret = srv->util_pre_suspend();
if (ret)
return ret;
}
vmbus_close(dev->channel);
return 0;
}
static int util_resume(struct hv_device *dev)
{
struct hv_util_service *srv = hv_get_drvdata(dev);
int ret = 0;
if (srv->util_pre_resume) {
ret = srv->util_pre_resume();
if (ret)
return ret;
}
ret = vmbus_open(dev->channel, HV_UTIL_RING_SEND_SIZE,
HV_UTIL_RING_RECV_SIZE, NULL, 0, srv->util_cb,
dev->channel);
return ret;
}
static const struct hv_vmbus_device_id id_table[] = {
/* Shutdown guid */
{ HV_SHUTDOWN_GUID,
.driver_data = (unsigned long)&util_shutdown
},
/* Time synch guid */
{ HV_TS_GUID,
.driver_data = (unsigned long)&util_timesynch
},
/* Heartbeat guid */
{ HV_HEART_BEAT_GUID,
.driver_data = (unsigned long)&util_heartbeat
},
/* KVP guid */
{ HV_KVP_GUID,
.driver_data = (unsigned long)&util_kvp
},
/* VSS GUID */
{ HV_VSS_GUID,
.driver_data = (unsigned long)&util_vss
},
/* File copy GUID */
{ HV_FCOPY_GUID,
.driver_data = (unsigned long)&util_fcopy
},
{ },
};
MODULE_DEVICE_TABLE(vmbus, id_table);
/* The one and only one */
static struct hv_driver util_drv = {
.name = "hv_utils",
.id_table = id_table,
.probe = util_probe,
.remove = util_remove,
.suspend = util_suspend,
.resume = util_resume,
.driver = {
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
};
static int hv_ptp_enable(struct ptp_clock_info *info,
struct ptp_clock_request *request, int on)
{
return -EOPNOTSUPP;
}
static int hv_ptp_settime(struct ptp_clock_info *p, const struct timespec64 *ts)
{
return -EOPNOTSUPP;
}
static int hv_ptp_adjfine(struct ptp_clock_info *ptp, long delta)
{
return -EOPNOTSUPP;
}
static int hv_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
{
return -EOPNOTSUPP;
}
static int hv_ptp_gettime(struct ptp_clock_info *info, struct timespec64 *ts)
{
return hv_get_adj_host_time(ts);
}
static struct ptp_clock_info ptp_hyperv_info = {
.name = "hyperv",
.enable = hv_ptp_enable,
.adjtime = hv_ptp_adjtime,
.adjfine = hv_ptp_adjfine,
.gettime64 = hv_ptp_gettime,
.settime64 = hv_ptp_settime,
.owner = THIS_MODULE,
};
static struct ptp_clock *hv_ptp_clock;
static int hv_timesync_init(struct hv_util_service *srv)
{
spin_lock_init(&host_ts.lock);
INIT_WORK(&adj_time_work, hv_set_host_time);
/*
* ptp_clock_register() returns NULL when CONFIG_PTP_1588_CLOCK is
* disabled but the driver is still useful without the PTP device
* as it still handles the ICTIMESYNCFLAG_SYNC case.
*/
hv_ptp_clock = ptp_clock_register(&ptp_hyperv_info, NULL);
if (IS_ERR_OR_NULL(hv_ptp_clock)) {
pr_err("cannot register PTP clock: %d\n",
PTR_ERR_OR_ZERO(hv_ptp_clock));
hv_ptp_clock = NULL;
}
return 0;
}
static void hv_timesync_cancel_work(void)
{
cancel_work_sync(&adj_time_work);
}
static int hv_timesync_pre_suspend(void)
{
hv_timesync_cancel_work();
return 0;
}
static void hv_timesync_deinit(void)
{
if (hv_ptp_clock)
ptp_clock_unregister(hv_ptp_clock);
hv_timesync_cancel_work();
}
static int __init init_hyperv_utils(void)
{
pr_info("Registering HyperV Utility Driver\n");
return vmbus_driver_register(&util_drv);
}
static void exit_hyperv_utils(void)
{
pr_info("De-Registered HyperV Utility Driver\n");
vmbus_driver_unregister(&util_drv);
}
module_init(init_hyperv_utils);
module_exit(exit_hyperv_utils);
MODULE_DESCRIPTION("Hyper-V Utilities");
MODULE_LICENSE("GPL");
|
linux-master
|
drivers/hv/hv_util.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2009, Microsoft Corporation.
*
* Authors:
* Haiyang Zhang <[email protected]>
* Hank Janssen <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/hyperv.h>
#include <linux/uio.h>
#include <linux/interrupt.h>
#include <linux/set_memory.h>
#include <asm/page.h>
#include <asm/mshyperv.h>
#include "hyperv_vmbus.h"
/*
* hv_gpadl_size - Return the real size of a gpadl, the size that Hyper-V uses
*
* For BUFFER gpadl, Hyper-V uses the exact same size as the guest does.
*
* For RING gpadl, in each ring, the guest uses one PAGE_SIZE as the header
* (because of the alignment requirement), however, the hypervisor only
* uses the first HV_HYP_PAGE_SIZE as the header, therefore leaving a
* (PAGE_SIZE - HV_HYP_PAGE_SIZE) gap. And since there are two rings in a
* ringbuffer, the total size for a RING gpadl that Hyper-V uses is the
* total size that the guest uses minus twice of the gap size.
*/
static inline u32 hv_gpadl_size(enum hv_gpadl_type type, u32 size)
{
switch (type) {
case HV_GPADL_BUFFER:
return size;
case HV_GPADL_RING:
/* The size of a ringbuffer must be page-aligned */
BUG_ON(size % PAGE_SIZE);
/*
* Two things to notice here:
* 1) We're processing two ring buffers as a unit
* 2) We're skipping any space larger than HV_HYP_PAGE_SIZE in
* the first guest-size page of each of the two ring buffers.
* So we effectively subtract out two guest-size pages, and add
* back two Hyper-V size pages.
*/
return size - 2 * (PAGE_SIZE - HV_HYP_PAGE_SIZE);
}
BUG();
return 0;
}
/*
* hv_ring_gpadl_send_hvpgoffset - Calculate the send offset (in unit of
* HV_HYP_PAGE) in a ring gpadl based on the
* offset in the guest
*
* @offset: the offset (in bytes) where the send ringbuffer starts in the
* virtual address space of the guest
*/
static inline u32 hv_ring_gpadl_send_hvpgoffset(u32 offset)
{
/*
* For RING gpadl, in each ring, the guest uses one PAGE_SIZE as the
* header (because of the alignment requirement), however, the
* hypervisor only uses the first HV_HYP_PAGE_SIZE as the header,
* therefore leaving a (PAGE_SIZE - HV_HYP_PAGE_SIZE) gap.
*
* And to calculate the effective send offset in gpadl, we need to
* substract this gap.
*/
return (offset - (PAGE_SIZE - HV_HYP_PAGE_SIZE)) >> HV_HYP_PAGE_SHIFT;
}
/*
* hv_gpadl_hvpfn - Return the Hyper-V page PFN of the @i th Hyper-V page in
* the gpadl
*
* @type: the type of the gpadl
* @kbuffer: the pointer to the gpadl in the guest
* @size: the total size (in bytes) of the gpadl
* @send_offset: the offset (in bytes) where the send ringbuffer starts in the
* virtual address space of the guest
* @i: the index
*/
static inline u64 hv_gpadl_hvpfn(enum hv_gpadl_type type, void *kbuffer,
u32 size, u32 send_offset, int i)
{
int send_idx = hv_ring_gpadl_send_hvpgoffset(send_offset);
unsigned long delta = 0UL;
switch (type) {
case HV_GPADL_BUFFER:
break;
case HV_GPADL_RING:
if (i == 0)
delta = 0;
else if (i <= send_idx)
delta = PAGE_SIZE - HV_HYP_PAGE_SIZE;
else
delta = 2 * (PAGE_SIZE - HV_HYP_PAGE_SIZE);
break;
default:
BUG();
break;
}
return virt_to_hvpfn(kbuffer + delta + (HV_HYP_PAGE_SIZE * i));
}
/*
* vmbus_setevent- Trigger an event notification on the specified
* channel.
*/
void vmbus_setevent(struct vmbus_channel *channel)
{
struct hv_monitor_page *monitorpage;
trace_vmbus_setevent(channel);
/*
* For channels marked as in "low latency" mode
* bypass the monitor page mechanism.
*/
if (channel->offermsg.monitor_allocated && !channel->low_latency) {
vmbus_send_interrupt(channel->offermsg.child_relid);
/* Get the child to parent monitor page */
monitorpage = vmbus_connection.monitor_pages[1];
sync_set_bit(channel->monitor_bit,
(unsigned long *)&monitorpage->trigger_group
[channel->monitor_grp].pending);
} else {
vmbus_set_event(channel);
}
}
EXPORT_SYMBOL_GPL(vmbus_setevent);
/* vmbus_free_ring - drop mapping of ring buffer */
void vmbus_free_ring(struct vmbus_channel *channel)
{
hv_ringbuffer_cleanup(&channel->outbound);
hv_ringbuffer_cleanup(&channel->inbound);
if (channel->ringbuffer_page) {
__free_pages(channel->ringbuffer_page,
get_order(channel->ringbuffer_pagecount
<< PAGE_SHIFT));
channel->ringbuffer_page = NULL;
}
}
EXPORT_SYMBOL_GPL(vmbus_free_ring);
/* vmbus_alloc_ring - allocate and map pages for ring buffer */
int vmbus_alloc_ring(struct vmbus_channel *newchannel,
u32 send_size, u32 recv_size)
{
struct page *page;
int order;
if (send_size % PAGE_SIZE || recv_size % PAGE_SIZE)
return -EINVAL;
/* Allocate the ring buffer */
order = get_order(send_size + recv_size);
page = alloc_pages_node(cpu_to_node(newchannel->target_cpu),
GFP_KERNEL|__GFP_ZERO, order);
if (!page)
page = alloc_pages(GFP_KERNEL|__GFP_ZERO, order);
if (!page)
return -ENOMEM;
newchannel->ringbuffer_page = page;
newchannel->ringbuffer_pagecount = (send_size + recv_size) >> PAGE_SHIFT;
newchannel->ringbuffer_send_offset = send_size >> PAGE_SHIFT;
return 0;
}
EXPORT_SYMBOL_GPL(vmbus_alloc_ring);
/* Used for Hyper-V Socket: a guest client's connect() to the host */
int vmbus_send_tl_connect_request(const guid_t *shv_guest_servie_id,
const guid_t *shv_host_servie_id)
{
struct vmbus_channel_tl_connect_request conn_msg;
int ret;
memset(&conn_msg, 0, sizeof(conn_msg));
conn_msg.header.msgtype = CHANNELMSG_TL_CONNECT_REQUEST;
conn_msg.guest_endpoint_id = *shv_guest_servie_id;
conn_msg.host_service_id = *shv_host_servie_id;
ret = vmbus_post_msg(&conn_msg, sizeof(conn_msg), true);
trace_vmbus_send_tl_connect_request(&conn_msg, ret);
return ret;
}
EXPORT_SYMBOL_GPL(vmbus_send_tl_connect_request);
static int send_modifychannel_without_ack(struct vmbus_channel *channel, u32 target_vp)
{
struct vmbus_channel_modifychannel msg;
int ret;
memset(&msg, 0, sizeof(msg));
msg.header.msgtype = CHANNELMSG_MODIFYCHANNEL;
msg.child_relid = channel->offermsg.child_relid;
msg.target_vp = target_vp;
ret = vmbus_post_msg(&msg, sizeof(msg), true);
trace_vmbus_send_modifychannel(&msg, ret);
return ret;
}
static int send_modifychannel_with_ack(struct vmbus_channel *channel, u32 target_vp)
{
struct vmbus_channel_modifychannel *msg;
struct vmbus_channel_msginfo *info;
unsigned long flags;
int ret;
info = kzalloc(sizeof(struct vmbus_channel_msginfo) +
sizeof(struct vmbus_channel_modifychannel),
GFP_KERNEL);
if (!info)
return -ENOMEM;
init_completion(&info->waitevent);
info->waiting_channel = channel;
msg = (struct vmbus_channel_modifychannel *)info->msg;
msg->header.msgtype = CHANNELMSG_MODIFYCHANNEL;
msg->child_relid = channel->offermsg.child_relid;
msg->target_vp = target_vp;
spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
list_add_tail(&info->msglistentry, &vmbus_connection.chn_msg_list);
spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
ret = vmbus_post_msg(msg, sizeof(*msg), true);
trace_vmbus_send_modifychannel(msg, ret);
if (ret != 0) {
spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
list_del(&info->msglistentry);
spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
goto free_info;
}
/*
* Release channel_mutex; otherwise, vmbus_onoffer_rescind() could block on
* the mutex and be unable to signal the completion.
*
* See the caller target_cpu_store() for information about the usage of the
* mutex.
*/
mutex_unlock(&vmbus_connection.channel_mutex);
wait_for_completion(&info->waitevent);
mutex_lock(&vmbus_connection.channel_mutex);
spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
list_del(&info->msglistentry);
spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
if (info->response.modify_response.status)
ret = -EAGAIN;
free_info:
kfree(info);
return ret;
}
/*
* Set/change the vCPU (@target_vp) the channel (@child_relid) will interrupt.
*
* CHANNELMSG_MODIFYCHANNEL messages are aynchronous. When VMbus version 5.3
* or later is negotiated, Hyper-V always sends an ACK in response to such a
* message. For VMbus version 5.2 and earlier, it never sends an ACK. With-
* out an ACK, we can not know when the host will stop interrupting the "old"
* vCPU and start interrupting the "new" vCPU for the given channel.
*
* The CHANNELMSG_MODIFYCHANNEL message type is supported since VMBus version
* VERSION_WIN10_V4_1.
*/
int vmbus_send_modifychannel(struct vmbus_channel *channel, u32 target_vp)
{
if (vmbus_proto_version >= VERSION_WIN10_V5_3)
return send_modifychannel_with_ack(channel, target_vp);
return send_modifychannel_without_ack(channel, target_vp);
}
EXPORT_SYMBOL_GPL(vmbus_send_modifychannel);
/*
* create_gpadl_header - Creates a gpadl for the specified buffer
*/
static int create_gpadl_header(enum hv_gpadl_type type, void *kbuffer,
u32 size, u32 send_offset,
struct vmbus_channel_msginfo **msginfo)
{
int i;
int pagecount;
struct vmbus_channel_gpadl_header *gpadl_header;
struct vmbus_channel_gpadl_body *gpadl_body;
struct vmbus_channel_msginfo *msgheader;
struct vmbus_channel_msginfo *msgbody = NULL;
u32 msgsize;
int pfnsum, pfncount, pfnleft, pfncurr, pfnsize;
pagecount = hv_gpadl_size(type, size) >> HV_HYP_PAGE_SHIFT;
/* do we need a gpadl body msg */
pfnsize = MAX_SIZE_CHANNEL_MESSAGE -
sizeof(struct vmbus_channel_gpadl_header) -
sizeof(struct gpa_range);
pfncount = pfnsize / sizeof(u64);
if (pagecount > pfncount) {
/* we need a gpadl body */
/* fill in the header */
msgsize = sizeof(struct vmbus_channel_msginfo) +
sizeof(struct vmbus_channel_gpadl_header) +
sizeof(struct gpa_range) + pfncount * sizeof(u64);
msgheader = kzalloc(msgsize, GFP_KERNEL);
if (!msgheader)
goto nomem;
INIT_LIST_HEAD(&msgheader->submsglist);
msgheader->msgsize = msgsize;
gpadl_header = (struct vmbus_channel_gpadl_header *)
msgheader->msg;
gpadl_header->rangecount = 1;
gpadl_header->range_buflen = sizeof(struct gpa_range) +
pagecount * sizeof(u64);
gpadl_header->range[0].byte_offset = 0;
gpadl_header->range[0].byte_count = hv_gpadl_size(type, size);
for (i = 0; i < pfncount; i++)
gpadl_header->range[0].pfn_array[i] = hv_gpadl_hvpfn(
type, kbuffer, size, send_offset, i);
*msginfo = msgheader;
pfnsum = pfncount;
pfnleft = pagecount - pfncount;
/* how many pfns can we fit */
pfnsize = MAX_SIZE_CHANNEL_MESSAGE -
sizeof(struct vmbus_channel_gpadl_body);
pfncount = pfnsize / sizeof(u64);
/* fill in the body */
while (pfnleft) {
if (pfnleft > pfncount)
pfncurr = pfncount;
else
pfncurr = pfnleft;
msgsize = sizeof(struct vmbus_channel_msginfo) +
sizeof(struct vmbus_channel_gpadl_body) +
pfncurr * sizeof(u64);
msgbody = kzalloc(msgsize, GFP_KERNEL);
if (!msgbody) {
struct vmbus_channel_msginfo *pos = NULL;
struct vmbus_channel_msginfo *tmp = NULL;
/*
* Free up all the allocated messages.
*/
list_for_each_entry_safe(pos, tmp,
&msgheader->submsglist,
msglistentry) {
list_del(&pos->msglistentry);
kfree(pos);
}
goto nomem;
}
msgbody->msgsize = msgsize;
gpadl_body =
(struct vmbus_channel_gpadl_body *)msgbody->msg;
/*
* Gpadl is u32 and we are using a pointer which could
* be 64-bit
* This is governed by the guest/host protocol and
* so the hypervisor guarantees that this is ok.
*/
for (i = 0; i < pfncurr; i++)
gpadl_body->pfn[i] = hv_gpadl_hvpfn(type,
kbuffer, size, send_offset, pfnsum + i);
/* add to msg header */
list_add_tail(&msgbody->msglistentry,
&msgheader->submsglist);
pfnsum += pfncurr;
pfnleft -= pfncurr;
}
} else {
/* everything fits in a header */
msgsize = sizeof(struct vmbus_channel_msginfo) +
sizeof(struct vmbus_channel_gpadl_header) +
sizeof(struct gpa_range) + pagecount * sizeof(u64);
msgheader = kzalloc(msgsize, GFP_KERNEL);
if (msgheader == NULL)
goto nomem;
INIT_LIST_HEAD(&msgheader->submsglist);
msgheader->msgsize = msgsize;
gpadl_header = (struct vmbus_channel_gpadl_header *)
msgheader->msg;
gpadl_header->rangecount = 1;
gpadl_header->range_buflen = sizeof(struct gpa_range) +
pagecount * sizeof(u64);
gpadl_header->range[0].byte_offset = 0;
gpadl_header->range[0].byte_count = hv_gpadl_size(type, size);
for (i = 0; i < pagecount; i++)
gpadl_header->range[0].pfn_array[i] = hv_gpadl_hvpfn(
type, kbuffer, size, send_offset, i);
*msginfo = msgheader;
}
return 0;
nomem:
kfree(msgheader);
kfree(msgbody);
return -ENOMEM;
}
/*
* __vmbus_establish_gpadl - Establish a GPADL for a buffer or ringbuffer
*
* @channel: a channel
* @type: the type of the corresponding GPADL, only meaningful for the guest.
* @kbuffer: from kmalloc or vmalloc
* @size: page-size multiple
* @send_offset: the offset (in bytes) where the send ring buffer starts,
* should be 0 for BUFFER type gpadl
* @gpadl_handle: some funky thing
*/
static int __vmbus_establish_gpadl(struct vmbus_channel *channel,
enum hv_gpadl_type type, void *kbuffer,
u32 size, u32 send_offset,
struct vmbus_gpadl *gpadl)
{
struct vmbus_channel_gpadl_header *gpadlmsg;
struct vmbus_channel_gpadl_body *gpadl_body;
struct vmbus_channel_msginfo *msginfo = NULL;
struct vmbus_channel_msginfo *submsginfo, *tmp;
struct list_head *curr;
u32 next_gpadl_handle;
unsigned long flags;
int ret = 0;
next_gpadl_handle =
(atomic_inc_return(&vmbus_connection.next_gpadl_handle) - 1);
ret = create_gpadl_header(type, kbuffer, size, send_offset, &msginfo);
if (ret)
return ret;
ret = set_memory_decrypted((unsigned long)kbuffer,
PFN_UP(size));
if (ret) {
dev_warn(&channel->device_obj->device,
"Failed to set host visibility for new GPADL %d.\n",
ret);
return ret;
}
init_completion(&msginfo->waitevent);
msginfo->waiting_channel = channel;
gpadlmsg = (struct vmbus_channel_gpadl_header *)msginfo->msg;
gpadlmsg->header.msgtype = CHANNELMSG_GPADL_HEADER;
gpadlmsg->child_relid = channel->offermsg.child_relid;
gpadlmsg->gpadl = next_gpadl_handle;
spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
list_add_tail(&msginfo->msglistentry,
&vmbus_connection.chn_msg_list);
spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
if (channel->rescind) {
ret = -ENODEV;
goto cleanup;
}
ret = vmbus_post_msg(gpadlmsg, msginfo->msgsize -
sizeof(*msginfo), true);
trace_vmbus_establish_gpadl_header(gpadlmsg, ret);
if (ret != 0)
goto cleanup;
list_for_each(curr, &msginfo->submsglist) {
submsginfo = (struct vmbus_channel_msginfo *)curr;
gpadl_body =
(struct vmbus_channel_gpadl_body *)submsginfo->msg;
gpadl_body->header.msgtype =
CHANNELMSG_GPADL_BODY;
gpadl_body->gpadl = next_gpadl_handle;
ret = vmbus_post_msg(gpadl_body,
submsginfo->msgsize - sizeof(*submsginfo),
true);
trace_vmbus_establish_gpadl_body(gpadl_body, ret);
if (ret != 0)
goto cleanup;
}
wait_for_completion(&msginfo->waitevent);
if (msginfo->response.gpadl_created.creation_status != 0) {
pr_err("Failed to establish GPADL: err = 0x%x\n",
msginfo->response.gpadl_created.creation_status);
ret = -EDQUOT;
goto cleanup;
}
if (channel->rescind) {
ret = -ENODEV;
goto cleanup;
}
/* At this point, we received the gpadl created msg */
gpadl->gpadl_handle = gpadlmsg->gpadl;
gpadl->buffer = kbuffer;
gpadl->size = size;
cleanup:
spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
list_del(&msginfo->msglistentry);
spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
list_for_each_entry_safe(submsginfo, tmp, &msginfo->submsglist,
msglistentry) {
kfree(submsginfo);
}
kfree(msginfo);
if (ret)
set_memory_encrypted((unsigned long)kbuffer,
PFN_UP(size));
return ret;
}
/*
* vmbus_establish_gpadl - Establish a GPADL for the specified buffer
*
* @channel: a channel
* @kbuffer: from kmalloc or vmalloc
* @size: page-size multiple
* @gpadl_handle: some funky thing
*/
int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
u32 size, struct vmbus_gpadl *gpadl)
{
return __vmbus_establish_gpadl(channel, HV_GPADL_BUFFER, kbuffer, size,
0U, gpadl);
}
EXPORT_SYMBOL_GPL(vmbus_establish_gpadl);
/**
* request_arr_init - Allocates memory for the requestor array. Each slot
* keeps track of the next available slot in the array. Initially, each
* slot points to the next one (as in a Linked List). The last slot
* does not point to anything, so its value is U64_MAX by default.
* @size The size of the array
*/
static u64 *request_arr_init(u32 size)
{
int i;
u64 *req_arr;
req_arr = kcalloc(size, sizeof(u64), GFP_KERNEL);
if (!req_arr)
return NULL;
for (i = 0; i < size - 1; i++)
req_arr[i] = i + 1;
/* Last slot (no more available slots) */
req_arr[i] = U64_MAX;
return req_arr;
}
/*
* vmbus_alloc_requestor - Initializes @rqstor's fields.
* Index 0 is the first free slot
* @size: Size of the requestor array
*/
static int vmbus_alloc_requestor(struct vmbus_requestor *rqstor, u32 size)
{
u64 *rqst_arr;
unsigned long *bitmap;
rqst_arr = request_arr_init(size);
if (!rqst_arr)
return -ENOMEM;
bitmap = bitmap_zalloc(size, GFP_KERNEL);
if (!bitmap) {
kfree(rqst_arr);
return -ENOMEM;
}
rqstor->req_arr = rqst_arr;
rqstor->req_bitmap = bitmap;
rqstor->size = size;
rqstor->next_request_id = 0;
spin_lock_init(&rqstor->req_lock);
return 0;
}
/*
* vmbus_free_requestor - Frees memory allocated for @rqstor
* @rqstor: Pointer to the requestor struct
*/
static void vmbus_free_requestor(struct vmbus_requestor *rqstor)
{
kfree(rqstor->req_arr);
bitmap_free(rqstor->req_bitmap);
}
static int __vmbus_open(struct vmbus_channel *newchannel,
void *userdata, u32 userdatalen,
void (*onchannelcallback)(void *context), void *context)
{
struct vmbus_channel_open_channel *open_msg;
struct vmbus_channel_msginfo *open_info = NULL;
struct page *page = newchannel->ringbuffer_page;
u32 send_pages, recv_pages;
unsigned long flags;
int err;
if (userdatalen > MAX_USER_DEFINED_BYTES)
return -EINVAL;
send_pages = newchannel->ringbuffer_send_offset;
recv_pages = newchannel->ringbuffer_pagecount - send_pages;
if (newchannel->state != CHANNEL_OPEN_STATE)
return -EINVAL;
/* Create and init requestor */
if (newchannel->rqstor_size) {
if (vmbus_alloc_requestor(&newchannel->requestor, newchannel->rqstor_size))
return -ENOMEM;
}
newchannel->state = CHANNEL_OPENING_STATE;
newchannel->onchannel_callback = onchannelcallback;
newchannel->channel_callback_context = context;
if (!newchannel->max_pkt_size)
newchannel->max_pkt_size = VMBUS_DEFAULT_MAX_PKT_SIZE;
/* Establish the gpadl for the ring buffer */
newchannel->ringbuffer_gpadlhandle.gpadl_handle = 0;
err = __vmbus_establish_gpadl(newchannel, HV_GPADL_RING,
page_address(newchannel->ringbuffer_page),
(send_pages + recv_pages) << PAGE_SHIFT,
newchannel->ringbuffer_send_offset << PAGE_SHIFT,
&newchannel->ringbuffer_gpadlhandle);
if (err)
goto error_clean_ring;
err = hv_ringbuffer_init(&newchannel->outbound,
page, send_pages, 0);
if (err)
goto error_free_gpadl;
err = hv_ringbuffer_init(&newchannel->inbound, &page[send_pages],
recv_pages, newchannel->max_pkt_size);
if (err)
goto error_free_gpadl;
/* Create and init the channel open message */
open_info = kzalloc(sizeof(*open_info) +
sizeof(struct vmbus_channel_open_channel),
GFP_KERNEL);
if (!open_info) {
err = -ENOMEM;
goto error_free_gpadl;
}
init_completion(&open_info->waitevent);
open_info->waiting_channel = newchannel;
open_msg = (struct vmbus_channel_open_channel *)open_info->msg;
open_msg->header.msgtype = CHANNELMSG_OPENCHANNEL;
open_msg->openid = newchannel->offermsg.child_relid;
open_msg->child_relid = newchannel->offermsg.child_relid;
open_msg->ringbuffer_gpadlhandle
= newchannel->ringbuffer_gpadlhandle.gpadl_handle;
/*
* The unit of ->downstream_ringbuffer_pageoffset is HV_HYP_PAGE and
* the unit of ->ringbuffer_send_offset (i.e. send_pages) is PAGE, so
* here we calculate it into HV_HYP_PAGE.
*/
open_msg->downstream_ringbuffer_pageoffset =
hv_ring_gpadl_send_hvpgoffset(send_pages << PAGE_SHIFT);
open_msg->target_vp = hv_cpu_number_to_vp_number(newchannel->target_cpu);
if (userdatalen)
memcpy(open_msg->userdata, userdata, userdatalen);
spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
list_add_tail(&open_info->msglistentry,
&vmbus_connection.chn_msg_list);
spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
if (newchannel->rescind) {
err = -ENODEV;
goto error_clean_msglist;
}
err = vmbus_post_msg(open_msg,
sizeof(struct vmbus_channel_open_channel), true);
trace_vmbus_open(open_msg, err);
if (err != 0)
goto error_clean_msglist;
wait_for_completion(&open_info->waitevent);
spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
list_del(&open_info->msglistentry);
spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
if (newchannel->rescind) {
err = -ENODEV;
goto error_free_info;
}
if (open_info->response.open_result.status) {
err = -EAGAIN;
goto error_free_info;
}
newchannel->state = CHANNEL_OPENED_STATE;
kfree(open_info);
return 0;
error_clean_msglist:
spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
list_del(&open_info->msglistentry);
spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
error_free_info:
kfree(open_info);
error_free_gpadl:
vmbus_teardown_gpadl(newchannel, &newchannel->ringbuffer_gpadlhandle);
error_clean_ring:
hv_ringbuffer_cleanup(&newchannel->outbound);
hv_ringbuffer_cleanup(&newchannel->inbound);
vmbus_free_requestor(&newchannel->requestor);
newchannel->state = CHANNEL_OPEN_STATE;
return err;
}
/*
* vmbus_connect_ring - Open the channel but reuse ring buffer
*/
int vmbus_connect_ring(struct vmbus_channel *newchannel,
void (*onchannelcallback)(void *context), void *context)
{
return __vmbus_open(newchannel, NULL, 0, onchannelcallback, context);
}
EXPORT_SYMBOL_GPL(vmbus_connect_ring);
/*
* vmbus_open - Open the specified channel.
*/
int vmbus_open(struct vmbus_channel *newchannel,
u32 send_ringbuffer_size, u32 recv_ringbuffer_size,
void *userdata, u32 userdatalen,
void (*onchannelcallback)(void *context), void *context)
{
int err;
err = vmbus_alloc_ring(newchannel, send_ringbuffer_size,
recv_ringbuffer_size);
if (err)
return err;
err = __vmbus_open(newchannel, userdata, userdatalen,
onchannelcallback, context);
if (err)
vmbus_free_ring(newchannel);
return err;
}
EXPORT_SYMBOL_GPL(vmbus_open);
/*
* vmbus_teardown_gpadl -Teardown the specified GPADL handle
*/
int vmbus_teardown_gpadl(struct vmbus_channel *channel, struct vmbus_gpadl *gpadl)
{
struct vmbus_channel_gpadl_teardown *msg;
struct vmbus_channel_msginfo *info;
unsigned long flags;
int ret;
info = kzalloc(sizeof(*info) +
sizeof(struct vmbus_channel_gpadl_teardown), GFP_KERNEL);
if (!info)
return -ENOMEM;
init_completion(&info->waitevent);
info->waiting_channel = channel;
msg = (struct vmbus_channel_gpadl_teardown *)info->msg;
msg->header.msgtype = CHANNELMSG_GPADL_TEARDOWN;
msg->child_relid = channel->offermsg.child_relid;
msg->gpadl = gpadl->gpadl_handle;
spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
list_add_tail(&info->msglistentry,
&vmbus_connection.chn_msg_list);
spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
if (channel->rescind)
goto post_msg_err;
ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_gpadl_teardown),
true);
trace_vmbus_teardown_gpadl(msg, ret);
if (ret)
goto post_msg_err;
wait_for_completion(&info->waitevent);
gpadl->gpadl_handle = 0;
post_msg_err:
/*
* If the channel has been rescinded;
* we will be awakened by the rescind
* handler; set the error code to zero so we don't leak memory.
*/
if (channel->rescind)
ret = 0;
spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
list_del(&info->msglistentry);
spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
kfree(info);
ret = set_memory_encrypted((unsigned long)gpadl->buffer,
PFN_UP(gpadl->size));
if (ret)
pr_warn("Fail to set mem host visibility in GPADL teardown %d.\n", ret);
return ret;
}
EXPORT_SYMBOL_GPL(vmbus_teardown_gpadl);
void vmbus_reset_channel_cb(struct vmbus_channel *channel)
{
unsigned long flags;
/*
* vmbus_on_event(), running in the per-channel tasklet, can race
* with vmbus_close_internal() in the case of SMP guest, e.g., when
* the former is accessing channel->inbound.ring_buffer, the latter
* could be freeing the ring_buffer pages, so here we must stop it
* first.
*
* vmbus_chan_sched() might call the netvsc driver callback function
* that ends up scheduling NAPI work that accesses the ring buffer.
* At this point, we have to ensure that any such work is completed
* and that the channel ring buffer is no longer being accessed, cf.
* the calls to napi_disable() in netvsc_device_remove().
*/
tasklet_disable(&channel->callback_event);
/* See the inline comments in vmbus_chan_sched(). */
spin_lock_irqsave(&channel->sched_lock, flags);
channel->onchannel_callback = NULL;
spin_unlock_irqrestore(&channel->sched_lock, flags);
channel->sc_creation_callback = NULL;
/* Re-enable tasklet for use on re-open */
tasklet_enable(&channel->callback_event);
}
static int vmbus_close_internal(struct vmbus_channel *channel)
{
struct vmbus_channel_close_channel *msg;
int ret;
vmbus_reset_channel_cb(channel);
/*
* In case a device driver's probe() fails (e.g.,
* util_probe() -> vmbus_open() returns -ENOMEM) and the device is
* rescinded later (e.g., we dynamically disable an Integrated Service
* in Hyper-V Manager), the driver's remove() invokes vmbus_close():
* here we should skip most of the below cleanup work.
*/
if (channel->state != CHANNEL_OPENED_STATE)
return -EINVAL;
channel->state = CHANNEL_OPEN_STATE;
/* Send a closing message */
msg = &channel->close_msg.msg;
msg->header.msgtype = CHANNELMSG_CLOSECHANNEL;
msg->child_relid = channel->offermsg.child_relid;
ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_close_channel),
true);
trace_vmbus_close_internal(msg, ret);
if (ret) {
pr_err("Close failed: close post msg return is %d\n", ret);
/*
* If we failed to post the close msg,
* it is perhaps better to leak memory.
*/
}
/* Tear down the gpadl for the channel's ring buffer */
else if (channel->ringbuffer_gpadlhandle.gpadl_handle) {
ret = vmbus_teardown_gpadl(channel, &channel->ringbuffer_gpadlhandle);
if (ret) {
pr_err("Close failed: teardown gpadl return %d\n", ret);
/*
* If we failed to teardown gpadl,
* it is perhaps better to leak memory.
*/
}
}
if (!ret)
vmbus_free_requestor(&channel->requestor);
return ret;
}
/* disconnect ring - close all channels */
int vmbus_disconnect_ring(struct vmbus_channel *channel)
{
struct vmbus_channel *cur_channel, *tmp;
int ret;
if (channel->primary_channel != NULL)
return -EINVAL;
list_for_each_entry_safe(cur_channel, tmp, &channel->sc_list, sc_list) {
if (cur_channel->rescind)
wait_for_completion(&cur_channel->rescind_event);
mutex_lock(&vmbus_connection.channel_mutex);
if (vmbus_close_internal(cur_channel) == 0) {
vmbus_free_ring(cur_channel);
if (cur_channel->rescind)
hv_process_channel_removal(cur_channel);
}
mutex_unlock(&vmbus_connection.channel_mutex);
}
/*
* Now close the primary.
*/
mutex_lock(&vmbus_connection.channel_mutex);
ret = vmbus_close_internal(channel);
mutex_unlock(&vmbus_connection.channel_mutex);
return ret;
}
EXPORT_SYMBOL_GPL(vmbus_disconnect_ring);
/*
* vmbus_close - Close the specified channel
*/
void vmbus_close(struct vmbus_channel *channel)
{
if (vmbus_disconnect_ring(channel) == 0)
vmbus_free_ring(channel);
}
EXPORT_SYMBOL_GPL(vmbus_close);
/**
* vmbus_sendpacket_getid() - Send the specified buffer on the given channel
* @channel: Pointer to vmbus_channel structure
* @buffer: Pointer to the buffer you want to send the data from.
* @bufferlen: Maximum size of what the buffer holds.
* @requestid: Identifier of the request
* @trans_id: Identifier of the transaction associated to this request, if
* the send is successful; undefined, otherwise.
* @type: Type of packet that is being sent e.g. negotiate, time
* packet etc.
* @flags: 0 or VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED
*
* Sends data in @buffer directly to Hyper-V via the vmbus.
* This will send the data unparsed to Hyper-V.
*
* Mainly used by Hyper-V drivers.
*/
int vmbus_sendpacket_getid(struct vmbus_channel *channel, void *buffer,
u32 bufferlen, u64 requestid, u64 *trans_id,
enum vmbus_packet_type type, u32 flags)
{
struct vmpacket_descriptor desc;
u32 packetlen = sizeof(struct vmpacket_descriptor) + bufferlen;
u32 packetlen_aligned = ALIGN(packetlen, sizeof(u64));
struct kvec bufferlist[3];
u64 aligned_data = 0;
int num_vecs = ((bufferlen != 0) ? 3 : 1);
/* Setup the descriptor */
desc.type = type; /* VmbusPacketTypeDataInBand; */
desc.flags = flags; /* VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED; */
/* in 8-bytes granularity */
desc.offset8 = sizeof(struct vmpacket_descriptor) >> 3;
desc.len8 = (u16)(packetlen_aligned >> 3);
desc.trans_id = VMBUS_RQST_ERROR; /* will be updated in hv_ringbuffer_write() */
bufferlist[0].iov_base = &desc;
bufferlist[0].iov_len = sizeof(struct vmpacket_descriptor);
bufferlist[1].iov_base = buffer;
bufferlist[1].iov_len = bufferlen;
bufferlist[2].iov_base = &aligned_data;
bufferlist[2].iov_len = (packetlen_aligned - packetlen);
return hv_ringbuffer_write(channel, bufferlist, num_vecs, requestid, trans_id);
}
EXPORT_SYMBOL(vmbus_sendpacket_getid);
/**
* vmbus_sendpacket() - Send the specified buffer on the given channel
* @channel: Pointer to vmbus_channel structure
* @buffer: Pointer to the buffer you want to send the data from.
* @bufferlen: Maximum size of what the buffer holds.
* @requestid: Identifier of the request
* @type: Type of packet that is being sent e.g. negotiate, time
* packet etc.
* @flags: 0 or VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED
*
* Sends data in @buffer directly to Hyper-V via the vmbus.
* This will send the data unparsed to Hyper-V.
*
* Mainly used by Hyper-V drivers.
*/
int vmbus_sendpacket(struct vmbus_channel *channel, void *buffer,
u32 bufferlen, u64 requestid,
enum vmbus_packet_type type, u32 flags)
{
return vmbus_sendpacket_getid(channel, buffer, bufferlen,
requestid, NULL, type, flags);
}
EXPORT_SYMBOL(vmbus_sendpacket);
/*
* vmbus_sendpacket_pagebuffer - Send a range of single-page buffer
* packets using a GPADL Direct packet type. This interface allows you
* to control notifying the host. This will be useful for sending
* batched data. Also the sender can control the send flags
* explicitly.
*/
int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
struct hv_page_buffer pagebuffers[],
u32 pagecount, void *buffer, u32 bufferlen,
u64 requestid)
{
int i;
struct vmbus_channel_packet_page_buffer desc;
u32 descsize;
u32 packetlen;
u32 packetlen_aligned;
struct kvec bufferlist[3];
u64 aligned_data = 0;
if (pagecount > MAX_PAGE_BUFFER_COUNT)
return -EINVAL;
/*
* Adjust the size down since vmbus_channel_packet_page_buffer is the
* largest size we support
*/
descsize = sizeof(struct vmbus_channel_packet_page_buffer) -
((MAX_PAGE_BUFFER_COUNT - pagecount) *
sizeof(struct hv_page_buffer));
packetlen = descsize + bufferlen;
packetlen_aligned = ALIGN(packetlen, sizeof(u64));
/* Setup the descriptor */
desc.type = VM_PKT_DATA_USING_GPA_DIRECT;
desc.flags = VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED;
desc.dataoffset8 = descsize >> 3; /* in 8-bytes granularity */
desc.length8 = (u16)(packetlen_aligned >> 3);
desc.transactionid = VMBUS_RQST_ERROR; /* will be updated in hv_ringbuffer_write() */
desc.reserved = 0;
desc.rangecount = pagecount;
for (i = 0; i < pagecount; i++) {
desc.range[i].len = pagebuffers[i].len;
desc.range[i].offset = pagebuffers[i].offset;
desc.range[i].pfn = pagebuffers[i].pfn;
}
bufferlist[0].iov_base = &desc;
bufferlist[0].iov_len = descsize;
bufferlist[1].iov_base = buffer;
bufferlist[1].iov_len = bufferlen;
bufferlist[2].iov_base = &aligned_data;
bufferlist[2].iov_len = (packetlen_aligned - packetlen);
return hv_ringbuffer_write(channel, bufferlist, 3, requestid, NULL);
}
EXPORT_SYMBOL_GPL(vmbus_sendpacket_pagebuffer);
/*
* vmbus_sendpacket_multipagebuffer - Send a multi-page buffer packet
* using a GPADL Direct packet type.
* The buffer includes the vmbus descriptor.
*/
int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
struct vmbus_packet_mpb_array *desc,
u32 desc_size,
void *buffer, u32 bufferlen, u64 requestid)
{
u32 packetlen;
u32 packetlen_aligned;
struct kvec bufferlist[3];
u64 aligned_data = 0;
packetlen = desc_size + bufferlen;
packetlen_aligned = ALIGN(packetlen, sizeof(u64));
/* Setup the descriptor */
desc->type = VM_PKT_DATA_USING_GPA_DIRECT;
desc->flags = VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED;
desc->dataoffset8 = desc_size >> 3; /* in 8-bytes granularity */
desc->length8 = (u16)(packetlen_aligned >> 3);
desc->transactionid = VMBUS_RQST_ERROR; /* will be updated in hv_ringbuffer_write() */
desc->reserved = 0;
desc->rangecount = 1;
bufferlist[0].iov_base = desc;
bufferlist[0].iov_len = desc_size;
bufferlist[1].iov_base = buffer;
bufferlist[1].iov_len = bufferlen;
bufferlist[2].iov_base = &aligned_data;
bufferlist[2].iov_len = (packetlen_aligned - packetlen);
return hv_ringbuffer_write(channel, bufferlist, 3, requestid, NULL);
}
EXPORT_SYMBOL_GPL(vmbus_sendpacket_mpb_desc);
/**
* __vmbus_recvpacket() - Retrieve the user packet on the specified channel
* @channel: Pointer to vmbus_channel structure
* @buffer: Pointer to the buffer you want to receive the data into.
* @bufferlen: Maximum size of what the buffer can hold.
* @buffer_actual_len: The actual size of the data after it was received.
* @requestid: Identifier of the request
* @raw: true means keep the vmpacket_descriptor header in the received data.
*
* Receives directly from the hyper-v vmbus and puts the data it received
* into Buffer. This will receive the data unparsed from hyper-v.
*
* Mainly used by Hyper-V drivers.
*/
static inline int
__vmbus_recvpacket(struct vmbus_channel *channel, void *buffer,
u32 bufferlen, u32 *buffer_actual_len, u64 *requestid,
bool raw)
{
return hv_ringbuffer_read(channel, buffer, bufferlen,
buffer_actual_len, requestid, raw);
}
int vmbus_recvpacket(struct vmbus_channel *channel, void *buffer,
u32 bufferlen, u32 *buffer_actual_len,
u64 *requestid)
{
return __vmbus_recvpacket(channel, buffer, bufferlen,
buffer_actual_len, requestid, false);
}
EXPORT_SYMBOL(vmbus_recvpacket);
/*
* vmbus_recvpacket_raw - Retrieve the raw packet on the specified channel
*/
int vmbus_recvpacket_raw(struct vmbus_channel *channel, void *buffer,
u32 bufferlen, u32 *buffer_actual_len,
u64 *requestid)
{
return __vmbus_recvpacket(channel, buffer, bufferlen,
buffer_actual_len, requestid, true);
}
EXPORT_SYMBOL_GPL(vmbus_recvpacket_raw);
/*
* vmbus_next_request_id - Returns a new request id. It is also
* the index at which the guest memory address is stored.
* Uses a spin lock to avoid race conditions.
* @channel: Pointer to the VMbus channel struct
* @rqst_add: Guest memory address to be stored in the array
*/
u64 vmbus_next_request_id(struct vmbus_channel *channel, u64 rqst_addr)
{
struct vmbus_requestor *rqstor = &channel->requestor;
unsigned long flags;
u64 current_id;
/* Check rqstor has been initialized */
if (!channel->rqstor_size)
return VMBUS_NO_RQSTOR;
lock_requestor(channel, flags);
current_id = rqstor->next_request_id;
/* Requestor array is full */
if (current_id >= rqstor->size) {
unlock_requestor(channel, flags);
return VMBUS_RQST_ERROR;
}
rqstor->next_request_id = rqstor->req_arr[current_id];
rqstor->req_arr[current_id] = rqst_addr;
/* The already held spin lock provides atomicity */
bitmap_set(rqstor->req_bitmap, current_id, 1);
unlock_requestor(channel, flags);
/*
* Cannot return an ID of 0, which is reserved for an unsolicited
* message from Hyper-V; Hyper-V does not acknowledge (respond to)
* VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED requests with ID of
* 0 sent by the guest.
*/
return current_id + 1;
}
EXPORT_SYMBOL_GPL(vmbus_next_request_id);
/* As in vmbus_request_addr_match() but without the requestor lock */
u64 __vmbus_request_addr_match(struct vmbus_channel *channel, u64 trans_id,
u64 rqst_addr)
{
struct vmbus_requestor *rqstor = &channel->requestor;
u64 req_addr;
/* Check rqstor has been initialized */
if (!channel->rqstor_size)
return VMBUS_NO_RQSTOR;
/* Hyper-V can send an unsolicited message with ID of 0 */
if (!trans_id)
return VMBUS_RQST_ERROR;
/* Data corresponding to trans_id is stored at trans_id - 1 */
trans_id--;
/* Invalid trans_id */
if (trans_id >= rqstor->size || !test_bit(trans_id, rqstor->req_bitmap))
return VMBUS_RQST_ERROR;
req_addr = rqstor->req_arr[trans_id];
if (rqst_addr == VMBUS_RQST_ADDR_ANY || req_addr == rqst_addr) {
rqstor->req_arr[trans_id] = rqstor->next_request_id;
rqstor->next_request_id = trans_id;
/* The already held spin lock provides atomicity */
bitmap_clear(rqstor->req_bitmap, trans_id, 1);
}
return req_addr;
}
EXPORT_SYMBOL_GPL(__vmbus_request_addr_match);
/*
* vmbus_request_addr_match - Clears/removes @trans_id from the @channel's
* requestor, provided the memory address stored at @trans_id equals @rqst_addr
* (or provided @rqst_addr matches the sentinel value VMBUS_RQST_ADDR_ANY).
*
* Returns the memory address stored at @trans_id, or VMBUS_RQST_ERROR if
* @trans_id is not contained in the requestor.
*
* Acquires and releases the requestor spin lock.
*/
u64 vmbus_request_addr_match(struct vmbus_channel *channel, u64 trans_id,
u64 rqst_addr)
{
unsigned long flags;
u64 req_addr;
lock_requestor(channel, flags);
req_addr = __vmbus_request_addr_match(channel, trans_id, rqst_addr);
unlock_requestor(channel, flags);
return req_addr;
}
EXPORT_SYMBOL_GPL(vmbus_request_addr_match);
/*
* vmbus_request_addr - Returns the memory address stored at @trans_id
* in @rqstor. Uses a spin lock to avoid race conditions.
* @channel: Pointer to the VMbus channel struct
* @trans_id: Request id sent back from Hyper-V. Becomes the requestor's
* next request id.
*/
u64 vmbus_request_addr(struct vmbus_channel *channel, u64 trans_id)
{
return vmbus_request_addr_match(channel, trans_id, VMBUS_RQST_ADDR_ANY);
}
EXPORT_SYMBOL_GPL(vmbus_request_addr);
|
linux-master
|
drivers/hv/channel.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2012, Microsoft Corporation.
*
* Author:
* K. Y. Srinivasan <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/cleanup.h>
#include <linux/kernel.h>
#include <linux/jiffies.h>
#include <linux/mman.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/kthread.h>
#include <linux/completion.h>
#include <linux/count_zeros.h>
#include <linux/memory_hotplug.h>
#include <linux/memory.h>
#include <linux/notifier.h>
#include <linux/percpu_counter.h>
#include <linux/page_reporting.h>
#include <linux/hyperv.h>
#include <asm/hyperv-tlfs.h>
#include <asm/mshyperv.h>
#define CREATE_TRACE_POINTS
#include "hv_trace_balloon.h"
/*
* We begin with definitions supporting the Dynamic Memory protocol
* with the host.
*
* Begin protocol definitions.
*/
/*
* Protocol versions. The low word is the minor version, the high word the major
* version.
*
* History:
* Initial version 1.0
* Changed to 0.1 on 2009/03/25
* Changes to 0.2 on 2009/05/14
* Changes to 0.3 on 2009/12/03
* Changed to 1.0 on 2011/04/05
*/
#define DYNMEM_MAKE_VERSION(Major, Minor) ((__u32)(((Major) << 16) | (Minor)))
#define DYNMEM_MAJOR_VERSION(Version) ((__u32)(Version) >> 16)
#define DYNMEM_MINOR_VERSION(Version) ((__u32)(Version) & 0xff)
enum {
DYNMEM_PROTOCOL_VERSION_1 = DYNMEM_MAKE_VERSION(0, 3),
DYNMEM_PROTOCOL_VERSION_2 = DYNMEM_MAKE_VERSION(1, 0),
DYNMEM_PROTOCOL_VERSION_3 = DYNMEM_MAKE_VERSION(2, 0),
DYNMEM_PROTOCOL_VERSION_WIN7 = DYNMEM_PROTOCOL_VERSION_1,
DYNMEM_PROTOCOL_VERSION_WIN8 = DYNMEM_PROTOCOL_VERSION_2,
DYNMEM_PROTOCOL_VERSION_WIN10 = DYNMEM_PROTOCOL_VERSION_3,
DYNMEM_PROTOCOL_VERSION_CURRENT = DYNMEM_PROTOCOL_VERSION_WIN10
};
/*
* Message Types
*/
enum dm_message_type {
/*
* Version 0.3
*/
DM_ERROR = 0,
DM_VERSION_REQUEST = 1,
DM_VERSION_RESPONSE = 2,
DM_CAPABILITIES_REPORT = 3,
DM_CAPABILITIES_RESPONSE = 4,
DM_STATUS_REPORT = 5,
DM_BALLOON_REQUEST = 6,
DM_BALLOON_RESPONSE = 7,
DM_UNBALLOON_REQUEST = 8,
DM_UNBALLOON_RESPONSE = 9,
DM_MEM_HOT_ADD_REQUEST = 10,
DM_MEM_HOT_ADD_RESPONSE = 11,
DM_VERSION_03_MAX = 11,
/*
* Version 1.0.
*/
DM_INFO_MESSAGE = 12,
DM_VERSION_1_MAX = 12
};
/*
* Structures defining the dynamic memory management
* protocol.
*/
union dm_version {
struct {
__u16 minor_version;
__u16 major_version;
};
__u32 version;
} __packed;
union dm_caps {
struct {
__u64 balloon:1;
__u64 hot_add:1;
/*
* To support guests that may have alignment
* limitations on hot-add, the guest can specify
* its alignment requirements; a value of n
* represents an alignment of 2^n in mega bytes.
*/
__u64 hot_add_alignment:4;
__u64 reservedz:58;
} cap_bits;
__u64 caps;
} __packed;
union dm_mem_page_range {
struct {
/*
* The PFN number of the first page in the range.
* 40 bits is the architectural limit of a PFN
* number for AMD64.
*/
__u64 start_page:40;
/*
* The number of pages in the range.
*/
__u64 page_cnt:24;
} finfo;
__u64 page_range;
} __packed;
/*
* The header for all dynamic memory messages:
*
* type: Type of the message.
* size: Size of the message in bytes; including the header.
* trans_id: The guest is responsible for manufacturing this ID.
*/
struct dm_header {
__u16 type;
__u16 size;
__u32 trans_id;
} __packed;
/*
* A generic message format for dynamic memory.
* Specific message formats are defined later in the file.
*/
struct dm_message {
struct dm_header hdr;
__u8 data[]; /* enclosed message */
} __packed;
/*
* Specific message types supporting the dynamic memory protocol.
*/
/*
* Version negotiation message. Sent from the guest to the host.
* The guest is free to try different versions until the host
* accepts the version.
*
* dm_version: The protocol version requested.
* is_last_attempt: If TRUE, this is the last version guest will request.
* reservedz: Reserved field, set to zero.
*/
struct dm_version_request {
struct dm_header hdr;
union dm_version version;
__u32 is_last_attempt:1;
__u32 reservedz:31;
} __packed;
/*
* Version response message; Host to Guest and indicates
* if the host has accepted the version sent by the guest.
*
* is_accepted: If TRUE, host has accepted the version and the guest
* should proceed to the next stage of the protocol. FALSE indicates that
* guest should re-try with a different version.
*
* reservedz: Reserved field, set to zero.
*/
struct dm_version_response {
struct dm_header hdr;
__u64 is_accepted:1;
__u64 reservedz:63;
} __packed;
/*
* Message reporting capabilities. This is sent from the guest to the
* host.
*/
struct dm_capabilities {
struct dm_header hdr;
union dm_caps caps;
__u64 min_page_cnt;
__u64 max_page_number;
} __packed;
/*
* Response to the capabilities message. This is sent from the host to the
* guest. This message notifies if the host has accepted the guest's
* capabilities. If the host has not accepted, the guest must shutdown
* the service.
*
* is_accepted: Indicates if the host has accepted guest's capabilities.
* reservedz: Must be 0.
*/
struct dm_capabilities_resp_msg {
struct dm_header hdr;
__u64 is_accepted:1;
__u64 reservedz:63;
} __packed;
/*
* This message is used to report memory pressure from the guest.
* This message is not part of any transaction and there is no
* response to this message.
*
* num_avail: Available memory in pages.
* num_committed: Committed memory in pages.
* page_file_size: The accumulated size of all page files
* in the system in pages.
* zero_free: The number of zero and free pages.
* page_file_writes: The writes to the page file in pages.
* io_diff: An indicator of file cache efficiency or page file activity,
* calculated as File Cache Page Fault Count - Page Read Count.
* This value is in pages.
*
* Some of these metrics are Windows specific and fortunately
* the algorithm on the host side that computes the guest memory
* pressure only uses num_committed value.
*/
struct dm_status {
struct dm_header hdr;
__u64 num_avail;
__u64 num_committed;
__u64 page_file_size;
__u64 zero_free;
__u32 page_file_writes;
__u32 io_diff;
} __packed;
/*
* Message to ask the guest to allocate memory - balloon up message.
* This message is sent from the host to the guest. The guest may not be
* able to allocate as much memory as requested.
*
* num_pages: number of pages to allocate.
*/
struct dm_balloon {
struct dm_header hdr;
__u32 num_pages;
__u32 reservedz;
} __packed;
/*
* Balloon response message; this message is sent from the guest
* to the host in response to the balloon message.
*
* reservedz: Reserved; must be set to zero.
* more_pages: If FALSE, this is the last message of the transaction.
* if TRUE there will atleast one more message from the guest.
*
* range_count: The number of ranges in the range array.
*
* range_array: An array of page ranges returned to the host.
*
*/
struct dm_balloon_response {
struct dm_header hdr;
__u32 reservedz;
__u32 more_pages:1;
__u32 range_count:31;
union dm_mem_page_range range_array[];
} __packed;
/*
* Un-balloon message; this message is sent from the host
* to the guest to give guest more memory.
*
* more_pages: If FALSE, this is the last message of the transaction.
* if TRUE there will atleast one more message from the guest.
*
* reservedz: Reserved; must be set to zero.
*
* range_count: The number of ranges in the range array.
*
* range_array: An array of page ranges returned to the host.
*
*/
struct dm_unballoon_request {
struct dm_header hdr;
__u32 more_pages:1;
__u32 reservedz:31;
__u32 range_count;
union dm_mem_page_range range_array[];
} __packed;
/*
* Un-balloon response message; this message is sent from the guest
* to the host in response to an unballoon request.
*
*/
struct dm_unballoon_response {
struct dm_header hdr;
} __packed;
/*
* Hot add request message. Message sent from the host to the guest.
*
* mem_range: Memory range to hot add.
*
*/
struct dm_hot_add {
struct dm_header hdr;
union dm_mem_page_range range;
} __packed;
/*
* Hot add response message.
* This message is sent by the guest to report the status of a hot add request.
* If page_count is less than the requested page count, then the host should
* assume all further hot add requests will fail, since this indicates that
* the guest has hit an upper physical memory barrier.
*
* Hot adds may also fail due to low resources; in this case, the guest must
* not complete this message until the hot add can succeed, and the host must
* not send a new hot add request until the response is sent.
* If VSC fails to hot add memory DYNMEM_NUMBER_OF_UNSUCCESSFUL_HOTADD_ATTEMPTS
* times it fails the request.
*
*
* page_count: number of pages that were successfully hot added.
*
* result: result of the operation 1: success, 0: failure.
*
*/
struct dm_hot_add_response {
struct dm_header hdr;
__u32 page_count;
__u32 result;
} __packed;
/*
* Types of information sent from host to the guest.
*/
enum dm_info_type {
INFO_TYPE_MAX_PAGE_CNT = 0,
MAX_INFO_TYPE
};
/*
* Header for the information message.
*/
struct dm_info_header {
enum dm_info_type type;
__u32 data_size;
} __packed;
/*
* This message is sent from the host to the guest to pass
* some relevant information (win8 addition).
*
* reserved: no used.
* info_size: size of the information blob.
* info: information blob.
*/
struct dm_info_msg {
struct dm_header hdr;
__u32 reserved;
__u32 info_size;
__u8 info[];
};
/*
* End protocol definitions.
*/
/*
* State to manage hot adding memory into the guest.
* The range start_pfn : end_pfn specifies the range
* that the host has asked us to hot add. The range
* start_pfn : ha_end_pfn specifies the range that we have
* currently hot added. We hot add in multiples of 128M
* chunks; it is possible that we may not be able to bring
* online all the pages in the region. The range
* covered_start_pfn:covered_end_pfn defines the pages that can
* be brough online.
*/
struct hv_hotadd_state {
struct list_head list;
unsigned long start_pfn;
unsigned long covered_start_pfn;
unsigned long covered_end_pfn;
unsigned long ha_end_pfn;
unsigned long end_pfn;
/*
* A list of gaps.
*/
struct list_head gap_list;
};
struct hv_hotadd_gap {
struct list_head list;
unsigned long start_pfn;
unsigned long end_pfn;
};
struct balloon_state {
__u32 num_pages;
struct work_struct wrk;
};
struct hot_add_wrk {
union dm_mem_page_range ha_page_range;
union dm_mem_page_range ha_region_range;
struct work_struct wrk;
};
static bool allow_hibernation;
static bool hot_add = true;
static bool do_hot_add;
/*
* Delay reporting memory pressure by
* the specified number of seconds.
*/
static uint pressure_report_delay = 45;
extern unsigned int page_reporting_order;
#define HV_MAX_FAILURES 2
/*
* The last time we posted a pressure report to host.
*/
static unsigned long last_post_time;
static int hv_hypercall_multi_failure;
module_param(hot_add, bool, (S_IRUGO | S_IWUSR));
MODULE_PARM_DESC(hot_add, "If set attempt memory hot_add");
module_param(pressure_report_delay, uint, (S_IRUGO | S_IWUSR));
MODULE_PARM_DESC(pressure_report_delay, "Delay in secs in reporting pressure");
static atomic_t trans_id = ATOMIC_INIT(0);
static int dm_ring_size = VMBUS_RING_SIZE(16 * 1024);
/*
* Driver specific state.
*/
enum hv_dm_state {
DM_INITIALIZING = 0,
DM_INITIALIZED,
DM_BALLOON_UP,
DM_BALLOON_DOWN,
DM_HOT_ADD,
DM_INIT_ERROR
};
static __u8 recv_buffer[HV_HYP_PAGE_SIZE];
static __u8 balloon_up_send_buffer[HV_HYP_PAGE_SIZE];
#define PAGES_IN_2M (2 * 1024 * 1024 / PAGE_SIZE)
#define HA_CHUNK (128 * 1024 * 1024 / PAGE_SIZE)
struct hv_dynmem_device {
struct hv_device *dev;
enum hv_dm_state state;
struct completion host_event;
struct completion config_event;
/*
* Number of pages we have currently ballooned out.
*/
unsigned int num_pages_ballooned;
unsigned int num_pages_onlined;
unsigned int num_pages_added;
/*
* State to manage the ballooning (up) operation.
*/
struct balloon_state balloon_wrk;
/*
* State to execute the "hot-add" operation.
*/
struct hot_add_wrk ha_wrk;
/*
* This state tracks if the host has specified a hot-add
* region.
*/
bool host_specified_ha_region;
/*
* State to synchronize hot-add.
*/
struct completion ol_waitevent;
/*
* This thread handles hot-add
* requests from the host as well as notifying
* the host with regards to memory pressure in
* the guest.
*/
struct task_struct *thread;
/*
* Protects ha_region_list, num_pages_onlined counter and individual
* regions from ha_region_list.
*/
spinlock_t ha_lock;
/*
* A list of hot-add regions.
*/
struct list_head ha_region_list;
/*
* We start with the highest version we can support
* and downgrade based on the host; we save here the
* next version to try.
*/
__u32 next_version;
/*
* The negotiated version agreed by host.
*/
__u32 version;
struct page_reporting_dev_info pr_dev_info;
/*
* Maximum number of pages that can be hot_add-ed
*/
__u64 max_dynamic_page_count;
};
static struct hv_dynmem_device dm_device;
static void post_status(struct hv_dynmem_device *dm);
static void enable_page_reporting(void);
static void disable_page_reporting(void);
#ifdef CONFIG_MEMORY_HOTPLUG
static inline bool has_pfn_is_backed(struct hv_hotadd_state *has,
unsigned long pfn)
{
struct hv_hotadd_gap *gap;
/* The page is not backed. */
if ((pfn < has->covered_start_pfn) || (pfn >= has->covered_end_pfn))
return false;
/* Check for gaps. */
list_for_each_entry(gap, &has->gap_list, list) {
if ((pfn >= gap->start_pfn) && (pfn < gap->end_pfn))
return false;
}
return true;
}
static unsigned long hv_page_offline_check(unsigned long start_pfn,
unsigned long nr_pages)
{
unsigned long pfn = start_pfn, count = 0;
struct hv_hotadd_state *has;
bool found;
while (pfn < start_pfn + nr_pages) {
/*
* Search for HAS which covers the pfn and when we find one
* count how many consequitive PFNs are covered.
*/
found = false;
list_for_each_entry(has, &dm_device.ha_region_list, list) {
while ((pfn >= has->start_pfn) &&
(pfn < has->end_pfn) &&
(pfn < start_pfn + nr_pages)) {
found = true;
if (has_pfn_is_backed(has, pfn))
count++;
pfn++;
}
}
/*
* This PFN is not in any HAS (e.g. we're offlining a region
* which was present at boot), no need to account for it. Go
* to the next one.
*/
if (!found)
pfn++;
}
return count;
}
static int hv_memory_notifier(struct notifier_block *nb, unsigned long val,
void *v)
{
struct memory_notify *mem = (struct memory_notify *)v;
unsigned long pfn_count;
switch (val) {
case MEM_ONLINE:
case MEM_CANCEL_ONLINE:
complete(&dm_device.ol_waitevent);
break;
case MEM_OFFLINE:
scoped_guard(spinlock_irqsave, &dm_device.ha_lock) {
pfn_count = hv_page_offline_check(mem->start_pfn,
mem->nr_pages);
if (pfn_count <= dm_device.num_pages_onlined) {
dm_device.num_pages_onlined -= pfn_count;
} else {
/*
* We're offlining more pages than we
* managed to online. This is
* unexpected. In any case don't let
* num_pages_onlined wrap around zero.
*/
WARN_ON_ONCE(1);
dm_device.num_pages_onlined = 0;
}
}
break;
case MEM_GOING_ONLINE:
case MEM_GOING_OFFLINE:
case MEM_CANCEL_OFFLINE:
break;
}
return NOTIFY_OK;
}
static struct notifier_block hv_memory_nb = {
.notifier_call = hv_memory_notifier,
.priority = 0
};
/* Check if the particular page is backed and can be onlined and online it. */
static void hv_page_online_one(struct hv_hotadd_state *has, struct page *pg)
{
if (!has_pfn_is_backed(has, page_to_pfn(pg))) {
if (!PageOffline(pg))
__SetPageOffline(pg);
return;
}
if (PageOffline(pg))
__ClearPageOffline(pg);
/* This frame is currently backed; online the page. */
generic_online_page(pg, 0);
lockdep_assert_held(&dm_device.ha_lock);
dm_device.num_pages_onlined++;
}
static void hv_bring_pgs_online(struct hv_hotadd_state *has,
unsigned long start_pfn, unsigned long size)
{
int i;
pr_debug("Online %lu pages starting at pfn 0x%lx\n", size, start_pfn);
for (i = 0; i < size; i++)
hv_page_online_one(has, pfn_to_page(start_pfn + i));
}
static void hv_mem_hot_add(unsigned long start, unsigned long size,
unsigned long pfn_count,
struct hv_hotadd_state *has)
{
int ret = 0;
int i, nid;
unsigned long start_pfn;
unsigned long processed_pfn;
unsigned long total_pfn = pfn_count;
for (i = 0; i < (size/HA_CHUNK); i++) {
start_pfn = start + (i * HA_CHUNK);
scoped_guard(spinlock_irqsave, &dm_device.ha_lock) {
has->ha_end_pfn += HA_CHUNK;
if (total_pfn > HA_CHUNK) {
processed_pfn = HA_CHUNK;
total_pfn -= HA_CHUNK;
} else {
processed_pfn = total_pfn;
total_pfn = 0;
}
has->covered_end_pfn += processed_pfn;
}
reinit_completion(&dm_device.ol_waitevent);
nid = memory_add_physaddr_to_nid(PFN_PHYS(start_pfn));
ret = add_memory(nid, PFN_PHYS((start_pfn)),
(HA_CHUNK << PAGE_SHIFT), MHP_MERGE_RESOURCE);
if (ret) {
pr_err("hot_add memory failed error is %d\n", ret);
if (ret == -EEXIST) {
/*
* This error indicates that the error
* is not a transient failure. This is the
* case where the guest's physical address map
* precludes hot adding memory. Stop all further
* memory hot-add.
*/
do_hot_add = false;
}
scoped_guard(spinlock_irqsave, &dm_device.ha_lock) {
has->ha_end_pfn -= HA_CHUNK;
has->covered_end_pfn -= processed_pfn;
}
break;
}
/*
* Wait for memory to get onlined. If the kernel onlined the
* memory when adding it, this will return directly. Otherwise,
* it will wait for user space to online the memory. This helps
* to avoid adding memory faster than it is getting onlined. As
* adding succeeded, it is ok to proceed even if the memory was
* not onlined in time.
*/
wait_for_completion_timeout(&dm_device.ol_waitevent, 5 * HZ);
post_status(&dm_device);
}
}
static void hv_online_page(struct page *pg, unsigned int order)
{
struct hv_hotadd_state *has;
unsigned long pfn = page_to_pfn(pg);
guard(spinlock_irqsave)(&dm_device.ha_lock);
list_for_each_entry(has, &dm_device.ha_region_list, list) {
/* The page belongs to a different HAS. */
if ((pfn < has->start_pfn) ||
(pfn + (1UL << order) > has->end_pfn))
continue;
hv_bring_pgs_online(has, pfn, 1UL << order);
break;
}
}
static int pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt)
{
struct hv_hotadd_state *has;
struct hv_hotadd_gap *gap;
unsigned long residual, new_inc;
int ret = 0;
guard(spinlock_irqsave)(&dm_device.ha_lock);
list_for_each_entry(has, &dm_device.ha_region_list, list) {
/*
* If the pfn range we are dealing with is not in the current
* "hot add block", move on.
*/
if (start_pfn < has->start_pfn || start_pfn >= has->end_pfn)
continue;
/*
* If the current start pfn is not where the covered_end
* is, create a gap and update covered_end_pfn.
*/
if (has->covered_end_pfn != start_pfn) {
gap = kzalloc(sizeof(struct hv_hotadd_gap), GFP_ATOMIC);
if (!gap) {
ret = -ENOMEM;
break;
}
INIT_LIST_HEAD(&gap->list);
gap->start_pfn = has->covered_end_pfn;
gap->end_pfn = start_pfn;
list_add_tail(&gap->list, &has->gap_list);
has->covered_end_pfn = start_pfn;
}
/*
* If the current hot add-request extends beyond
* our current limit; extend it.
*/
if ((start_pfn + pfn_cnt) > has->end_pfn) {
residual = (start_pfn + pfn_cnt - has->end_pfn);
/*
* Extend the region by multiples of HA_CHUNK.
*/
new_inc = (residual / HA_CHUNK) * HA_CHUNK;
if (residual % HA_CHUNK)
new_inc += HA_CHUNK;
has->end_pfn += new_inc;
}
ret = 1;
break;
}
return ret;
}
static unsigned long handle_pg_range(unsigned long pg_start,
unsigned long pg_count)
{
unsigned long start_pfn = pg_start;
unsigned long pfn_cnt = pg_count;
unsigned long size;
struct hv_hotadd_state *has;
unsigned long pgs_ol = 0;
unsigned long old_covered_state;
unsigned long res = 0, flags;
pr_debug("Hot adding %lu pages starting at pfn 0x%lx.\n", pg_count,
pg_start);
spin_lock_irqsave(&dm_device.ha_lock, flags);
list_for_each_entry(has, &dm_device.ha_region_list, list) {
/*
* If the pfn range we are dealing with is not in the current
* "hot add block", move on.
*/
if (start_pfn < has->start_pfn || start_pfn >= has->end_pfn)
continue;
old_covered_state = has->covered_end_pfn;
if (start_pfn < has->ha_end_pfn) {
/*
* This is the case where we are backing pages
* in an already hot added region. Bring
* these pages online first.
*/
pgs_ol = has->ha_end_pfn - start_pfn;
if (pgs_ol > pfn_cnt)
pgs_ol = pfn_cnt;
has->covered_end_pfn += pgs_ol;
pfn_cnt -= pgs_ol;
/*
* Check if the corresponding memory block is already
* online. It is possible to observe struct pages still
* being uninitialized here so check section instead.
* In case the section is online we need to bring the
* rest of pfns (which were not backed previously)
* online too.
*/
if (start_pfn > has->start_pfn &&
online_section_nr(pfn_to_section_nr(start_pfn)))
hv_bring_pgs_online(has, start_pfn, pgs_ol);
}
if ((has->ha_end_pfn < has->end_pfn) && (pfn_cnt > 0)) {
/*
* We have some residual hot add range
* that needs to be hot added; hot add
* it now. Hot add a multiple of
* HA_CHUNK that fully covers the pages
* we have.
*/
size = (has->end_pfn - has->ha_end_pfn);
if (pfn_cnt <= size) {
size = ((pfn_cnt / HA_CHUNK) * HA_CHUNK);
if (pfn_cnt % HA_CHUNK)
size += HA_CHUNK;
} else {
pfn_cnt = size;
}
spin_unlock_irqrestore(&dm_device.ha_lock, flags);
hv_mem_hot_add(has->ha_end_pfn, size, pfn_cnt, has);
spin_lock_irqsave(&dm_device.ha_lock, flags);
}
/*
* If we managed to online any pages that were given to us,
* we declare success.
*/
res = has->covered_end_pfn - old_covered_state;
break;
}
spin_unlock_irqrestore(&dm_device.ha_lock, flags);
return res;
}
static unsigned long process_hot_add(unsigned long pg_start,
unsigned long pfn_cnt,
unsigned long rg_start,
unsigned long rg_size)
{
struct hv_hotadd_state *ha_region = NULL;
int covered;
if (pfn_cnt == 0)
return 0;
if (!dm_device.host_specified_ha_region) {
covered = pfn_covered(pg_start, pfn_cnt);
if (covered < 0)
return 0;
if (covered)
goto do_pg_range;
}
/*
* If the host has specified a hot-add range; deal with it first.
*/
if (rg_size != 0) {
ha_region = kzalloc(sizeof(struct hv_hotadd_state), GFP_KERNEL);
if (!ha_region)
return 0;
INIT_LIST_HEAD(&ha_region->list);
INIT_LIST_HEAD(&ha_region->gap_list);
ha_region->start_pfn = rg_start;
ha_region->ha_end_pfn = rg_start;
ha_region->covered_start_pfn = pg_start;
ha_region->covered_end_pfn = pg_start;
ha_region->end_pfn = rg_start + rg_size;
scoped_guard(spinlock_irqsave, &dm_device.ha_lock) {
list_add_tail(&ha_region->list, &dm_device.ha_region_list);
}
}
do_pg_range:
/*
* Process the page range specified; bringing them
* online if possible.
*/
return handle_pg_range(pg_start, pfn_cnt);
}
#endif
static void hot_add_req(struct work_struct *dummy)
{
struct dm_hot_add_response resp;
#ifdef CONFIG_MEMORY_HOTPLUG
unsigned long pg_start, pfn_cnt;
unsigned long rg_start, rg_sz;
#endif
struct hv_dynmem_device *dm = &dm_device;
memset(&resp, 0, sizeof(struct dm_hot_add_response));
resp.hdr.type = DM_MEM_HOT_ADD_RESPONSE;
resp.hdr.size = sizeof(struct dm_hot_add_response);
#ifdef CONFIG_MEMORY_HOTPLUG
pg_start = dm->ha_wrk.ha_page_range.finfo.start_page;
pfn_cnt = dm->ha_wrk.ha_page_range.finfo.page_cnt;
rg_start = dm->ha_wrk.ha_region_range.finfo.start_page;
rg_sz = dm->ha_wrk.ha_region_range.finfo.page_cnt;
if ((rg_start == 0) && (!dm->host_specified_ha_region)) {
unsigned long region_size;
unsigned long region_start;
/*
* The host has not specified the hot-add region.
* Based on the hot-add page range being specified,
* compute a hot-add region that can cover the pages
* that need to be hot-added while ensuring the alignment
* and size requirements of Linux as it relates to hot-add.
*/
region_size = (pfn_cnt / HA_CHUNK) * HA_CHUNK;
if (pfn_cnt % HA_CHUNK)
region_size += HA_CHUNK;
region_start = (pg_start / HA_CHUNK) * HA_CHUNK;
rg_start = region_start;
rg_sz = region_size;
}
if (do_hot_add)
resp.page_count = process_hot_add(pg_start, pfn_cnt,
rg_start, rg_sz);
dm->num_pages_added += resp.page_count;
#endif
/*
* The result field of the response structure has the
* following semantics:
*
* 1. If all or some pages hot-added: Guest should return success.
*
* 2. If no pages could be hot-added:
*
* If the guest returns success, then the host
* will not attempt any further hot-add operations. This
* signifies a permanent failure.
*
* If the guest returns failure, then this failure will be
* treated as a transient failure and the host may retry the
* hot-add operation after some delay.
*/
if (resp.page_count > 0)
resp.result = 1;
else if (!do_hot_add)
resp.result = 1;
else
resp.result = 0;
if (!do_hot_add || resp.page_count == 0) {
if (!allow_hibernation)
pr_err("Memory hot add failed\n");
else
pr_info("Ignore hot-add request!\n");
}
dm->state = DM_INITIALIZED;
resp.hdr.trans_id = atomic_inc_return(&trans_id);
vmbus_sendpacket(dm->dev->channel, &resp,
sizeof(struct dm_hot_add_response),
(unsigned long)NULL,
VM_PKT_DATA_INBAND, 0);
}
static void process_info(struct hv_dynmem_device *dm, struct dm_info_msg *msg)
{
struct dm_info_header *info_hdr;
info_hdr = (struct dm_info_header *)msg->info;
switch (info_hdr->type) {
case INFO_TYPE_MAX_PAGE_CNT:
if (info_hdr->data_size == sizeof(__u64)) {
__u64 *max_page_count = (__u64 *)&info_hdr[1];
pr_info("Max. dynamic memory size: %llu MB\n",
(*max_page_count) >> (20 - HV_HYP_PAGE_SHIFT));
dm->max_dynamic_page_count = *max_page_count;
}
break;
default:
pr_warn("Received Unknown type: %d\n", info_hdr->type);
}
}
static unsigned long compute_balloon_floor(void)
{
unsigned long min_pages;
unsigned long nr_pages = totalram_pages();
#define MB2PAGES(mb) ((mb) << (20 - PAGE_SHIFT))
/* Simple continuous piecewiese linear function:
* max MiB -> min MiB gradient
* 0 0
* 16 16
* 32 24
* 128 72 (1/2)
* 512 168 (1/4)
* 2048 360 (1/8)
* 8192 744 (1/16)
* 32768 1512 (1/32)
*/
if (nr_pages < MB2PAGES(128))
min_pages = MB2PAGES(8) + (nr_pages >> 1);
else if (nr_pages < MB2PAGES(512))
min_pages = MB2PAGES(40) + (nr_pages >> 2);
else if (nr_pages < MB2PAGES(2048))
min_pages = MB2PAGES(104) + (nr_pages >> 3);
else if (nr_pages < MB2PAGES(8192))
min_pages = MB2PAGES(232) + (nr_pages >> 4);
else
min_pages = MB2PAGES(488) + (nr_pages >> 5);
#undef MB2PAGES
return min_pages;
}
/*
* Compute total committed memory pages
*/
static unsigned long get_pages_committed(struct hv_dynmem_device *dm)
{
return vm_memory_committed() +
dm->num_pages_ballooned +
(dm->num_pages_added > dm->num_pages_onlined ?
dm->num_pages_added - dm->num_pages_onlined : 0) +
compute_balloon_floor();
}
/*
* Post our status as it relates memory pressure to the
* host. Host expects the guests to post this status
* periodically at 1 second intervals.
*
* The metrics specified in this protocol are very Windows
* specific and so we cook up numbers here to convey our memory
* pressure.
*/
static void post_status(struct hv_dynmem_device *dm)
{
struct dm_status status;
unsigned long now = jiffies;
unsigned long last_post = last_post_time;
unsigned long num_pages_avail, num_pages_committed;
if (pressure_report_delay > 0) {
--pressure_report_delay;
return;
}
if (!time_after(now, (last_post_time + HZ)))
return;
memset(&status, 0, sizeof(struct dm_status));
status.hdr.type = DM_STATUS_REPORT;
status.hdr.size = sizeof(struct dm_status);
status.hdr.trans_id = atomic_inc_return(&trans_id);
/*
* The host expects the guest to report free and committed memory.
* Furthermore, the host expects the pressure information to include
* the ballooned out pages. For a given amount of memory that we are
* managing we need to compute a floor below which we should not
* balloon. Compute this and add it to the pressure report.
* We also need to report all offline pages (num_pages_added -
* num_pages_onlined) as committed to the host, otherwise it can try
* asking us to balloon them out.
*/
num_pages_avail = si_mem_available();
num_pages_committed = get_pages_committed(dm);
trace_balloon_status(num_pages_avail, num_pages_committed,
vm_memory_committed(), dm->num_pages_ballooned,
dm->num_pages_added, dm->num_pages_onlined);
/* Convert numbers of pages into numbers of HV_HYP_PAGEs. */
status.num_avail = num_pages_avail * NR_HV_HYP_PAGES_IN_PAGE;
status.num_committed = num_pages_committed * NR_HV_HYP_PAGES_IN_PAGE;
/*
* If our transaction ID is no longer current, just don't
* send the status. This can happen if we were interrupted
* after we picked our transaction ID.
*/
if (status.hdr.trans_id != atomic_read(&trans_id))
return;
/*
* If the last post time that we sampled has changed,
* we have raced, don't post the status.
*/
if (last_post != last_post_time)
return;
last_post_time = jiffies;
vmbus_sendpacket(dm->dev->channel, &status,
sizeof(struct dm_status),
(unsigned long)NULL,
VM_PKT_DATA_INBAND, 0);
}
static void free_balloon_pages(struct hv_dynmem_device *dm,
union dm_mem_page_range *range_array)
{
int num_pages = range_array->finfo.page_cnt;
__u64 start_frame = range_array->finfo.start_page;
struct page *pg;
int i;
for (i = 0; i < num_pages; i++) {
pg = pfn_to_page(i + start_frame);
__ClearPageOffline(pg);
__free_page(pg);
dm->num_pages_ballooned--;
adjust_managed_page_count(pg, 1);
}
}
static unsigned int alloc_balloon_pages(struct hv_dynmem_device *dm,
unsigned int num_pages,
struct dm_balloon_response *bl_resp,
int alloc_unit)
{
unsigned int i, j;
struct page *pg;
for (i = 0; i < num_pages / alloc_unit; i++) {
if (bl_resp->hdr.size + sizeof(union dm_mem_page_range) >
HV_HYP_PAGE_SIZE)
return i * alloc_unit;
/*
* We execute this code in a thread context. Furthermore,
* we don't want the kernel to try too hard.
*/
pg = alloc_pages(GFP_HIGHUSER | __GFP_NORETRY |
__GFP_NOMEMALLOC | __GFP_NOWARN,
get_order(alloc_unit << PAGE_SHIFT));
if (!pg)
return i * alloc_unit;
dm->num_pages_ballooned += alloc_unit;
/*
* If we allocatted 2M pages; split them so we
* can free them in any order we get.
*/
if (alloc_unit != 1)
split_page(pg, get_order(alloc_unit << PAGE_SHIFT));
/* mark all pages offline */
for (j = 0; j < alloc_unit; j++) {
__SetPageOffline(pg + j);
adjust_managed_page_count(pg + j, -1);
}
bl_resp->range_count++;
bl_resp->range_array[i].finfo.start_page =
page_to_pfn(pg);
bl_resp->range_array[i].finfo.page_cnt = alloc_unit;
bl_resp->hdr.size += sizeof(union dm_mem_page_range);
}
return i * alloc_unit;
}
static void balloon_up(struct work_struct *dummy)
{
unsigned int num_pages = dm_device.balloon_wrk.num_pages;
unsigned int num_ballooned = 0;
struct dm_balloon_response *bl_resp;
int alloc_unit;
int ret;
bool done = false;
int i;
long avail_pages;
unsigned long floor;
/*
* We will attempt 2M allocations. However, if we fail to
* allocate 2M chunks, we will go back to PAGE_SIZE allocations.
*/
alloc_unit = PAGES_IN_2M;
avail_pages = si_mem_available();
floor = compute_balloon_floor();
/* Refuse to balloon below the floor. */
if (avail_pages < num_pages || avail_pages - num_pages < floor) {
pr_info("Balloon request will be partially fulfilled. %s\n",
avail_pages < num_pages ? "Not enough memory." :
"Balloon floor reached.");
num_pages = avail_pages > floor ? (avail_pages - floor) : 0;
}
while (!done) {
memset(balloon_up_send_buffer, 0, HV_HYP_PAGE_SIZE);
bl_resp = (struct dm_balloon_response *)balloon_up_send_buffer;
bl_resp->hdr.type = DM_BALLOON_RESPONSE;
bl_resp->hdr.size = sizeof(struct dm_balloon_response);
bl_resp->more_pages = 1;
num_pages -= num_ballooned;
num_ballooned = alloc_balloon_pages(&dm_device, num_pages,
bl_resp, alloc_unit);
if (alloc_unit != 1 && num_ballooned == 0) {
alloc_unit = 1;
continue;
}
if (num_ballooned == 0 || num_ballooned == num_pages) {
pr_debug("Ballooned %u out of %u requested pages.\n",
num_pages, dm_device.balloon_wrk.num_pages);
bl_resp->more_pages = 0;
done = true;
dm_device.state = DM_INITIALIZED;
}
/*
* We are pushing a lot of data through the channel;
* deal with transient failures caused because of the
* lack of space in the ring buffer.
*/
do {
bl_resp->hdr.trans_id = atomic_inc_return(&trans_id);
ret = vmbus_sendpacket(dm_device.dev->channel,
bl_resp,
bl_resp->hdr.size,
(unsigned long)NULL,
VM_PKT_DATA_INBAND, 0);
if (ret == -EAGAIN)
msleep(20);
post_status(&dm_device);
} while (ret == -EAGAIN);
if (ret) {
/*
* Free up the memory we allocatted.
*/
pr_err("Balloon response failed\n");
for (i = 0; i < bl_resp->range_count; i++)
free_balloon_pages(&dm_device,
&bl_resp->range_array[i]);
done = true;
}
}
}
static void balloon_down(struct hv_dynmem_device *dm,
struct dm_unballoon_request *req)
{
union dm_mem_page_range *range_array = req->range_array;
int range_count = req->range_count;
struct dm_unballoon_response resp;
int i;
unsigned int prev_pages_ballooned = dm->num_pages_ballooned;
for (i = 0; i < range_count; i++) {
free_balloon_pages(dm, &range_array[i]);
complete(&dm_device.config_event);
}
pr_debug("Freed %u ballooned pages.\n",
prev_pages_ballooned - dm->num_pages_ballooned);
if (req->more_pages == 1)
return;
memset(&resp, 0, sizeof(struct dm_unballoon_response));
resp.hdr.type = DM_UNBALLOON_RESPONSE;
resp.hdr.trans_id = atomic_inc_return(&trans_id);
resp.hdr.size = sizeof(struct dm_unballoon_response);
vmbus_sendpacket(dm_device.dev->channel, &resp,
sizeof(struct dm_unballoon_response),
(unsigned long)NULL,
VM_PKT_DATA_INBAND, 0);
dm->state = DM_INITIALIZED;
}
static void balloon_onchannelcallback(void *context);
static int dm_thread_func(void *dm_dev)
{
struct hv_dynmem_device *dm = dm_dev;
while (!kthread_should_stop()) {
wait_for_completion_interruptible_timeout(
&dm_device.config_event, 1*HZ);
/*
* The host expects us to post information on the memory
* pressure every second.
*/
reinit_completion(&dm_device.config_event);
post_status(dm);
/*
* disable free page reporting if multiple hypercall
* failure flag set. It is not done in the page_reporting
* callback context as that causes a deadlock between
* page_reporting_process() and page_reporting_unregister()
*/
if (hv_hypercall_multi_failure >= HV_MAX_FAILURES) {
pr_err("Multiple failures in cold memory discard hypercall, disabling page reporting\n");
disable_page_reporting();
/* Reset the flag after disabling reporting */
hv_hypercall_multi_failure = 0;
}
}
return 0;
}
static void version_resp(struct hv_dynmem_device *dm,
struct dm_version_response *vresp)
{
struct dm_version_request version_req;
int ret;
if (vresp->is_accepted) {
/*
* We are done; wakeup the
* context waiting for version
* negotiation.
*/
complete(&dm->host_event);
return;
}
/*
* If there are more versions to try, continue
* with negotiations; if not
* shutdown the service since we are not able
* to negotiate a suitable version number
* with the host.
*/
if (dm->next_version == 0)
goto version_error;
memset(&version_req, 0, sizeof(struct dm_version_request));
version_req.hdr.type = DM_VERSION_REQUEST;
version_req.hdr.size = sizeof(struct dm_version_request);
version_req.hdr.trans_id = atomic_inc_return(&trans_id);
version_req.version.version = dm->next_version;
dm->version = version_req.version.version;
/*
* Set the next version to try in case current version fails.
* Win7 protocol ought to be the last one to try.
*/
switch (version_req.version.version) {
case DYNMEM_PROTOCOL_VERSION_WIN8:
dm->next_version = DYNMEM_PROTOCOL_VERSION_WIN7;
version_req.is_last_attempt = 0;
break;
default:
dm->next_version = 0;
version_req.is_last_attempt = 1;
}
ret = vmbus_sendpacket(dm->dev->channel, &version_req,
sizeof(struct dm_version_request),
(unsigned long)NULL,
VM_PKT_DATA_INBAND, 0);
if (ret)
goto version_error;
return;
version_error:
dm->state = DM_INIT_ERROR;
complete(&dm->host_event);
}
static void cap_resp(struct hv_dynmem_device *dm,
struct dm_capabilities_resp_msg *cap_resp)
{
if (!cap_resp->is_accepted) {
pr_err("Capabilities not accepted by host\n");
dm->state = DM_INIT_ERROR;
}
complete(&dm->host_event);
}
static void balloon_onchannelcallback(void *context)
{
struct hv_device *dev = context;
u32 recvlen;
u64 requestid;
struct dm_message *dm_msg;
struct dm_header *dm_hdr;
struct hv_dynmem_device *dm = hv_get_drvdata(dev);
struct dm_balloon *bal_msg;
struct dm_hot_add *ha_msg;
union dm_mem_page_range *ha_pg_range;
union dm_mem_page_range *ha_region;
memset(recv_buffer, 0, sizeof(recv_buffer));
vmbus_recvpacket(dev->channel, recv_buffer,
HV_HYP_PAGE_SIZE, &recvlen, &requestid);
if (recvlen > 0) {
dm_msg = (struct dm_message *)recv_buffer;
dm_hdr = &dm_msg->hdr;
switch (dm_hdr->type) {
case DM_VERSION_RESPONSE:
version_resp(dm,
(struct dm_version_response *)dm_msg);
break;
case DM_CAPABILITIES_RESPONSE:
cap_resp(dm,
(struct dm_capabilities_resp_msg *)dm_msg);
break;
case DM_BALLOON_REQUEST:
if (allow_hibernation) {
pr_info("Ignore balloon-up request!\n");
break;
}
if (dm->state == DM_BALLOON_UP)
pr_warn("Currently ballooning\n");
bal_msg = (struct dm_balloon *)recv_buffer;
dm->state = DM_BALLOON_UP;
dm_device.balloon_wrk.num_pages = bal_msg->num_pages;
schedule_work(&dm_device.balloon_wrk.wrk);
break;
case DM_UNBALLOON_REQUEST:
if (allow_hibernation) {
pr_info("Ignore balloon-down request!\n");
break;
}
dm->state = DM_BALLOON_DOWN;
balloon_down(dm,
(struct dm_unballoon_request *)recv_buffer);
break;
case DM_MEM_HOT_ADD_REQUEST:
if (dm->state == DM_HOT_ADD)
pr_warn("Currently hot-adding\n");
dm->state = DM_HOT_ADD;
ha_msg = (struct dm_hot_add *)recv_buffer;
if (ha_msg->hdr.size == sizeof(struct dm_hot_add)) {
/*
* This is a normal hot-add request specifying
* hot-add memory.
*/
dm->host_specified_ha_region = false;
ha_pg_range = &ha_msg->range;
dm->ha_wrk.ha_page_range = *ha_pg_range;
dm->ha_wrk.ha_region_range.page_range = 0;
} else {
/*
* Host is specifying that we first hot-add
* a region and then partially populate this
* region.
*/
dm->host_specified_ha_region = true;
ha_pg_range = &ha_msg->range;
ha_region = &ha_pg_range[1];
dm->ha_wrk.ha_page_range = *ha_pg_range;
dm->ha_wrk.ha_region_range = *ha_region;
}
schedule_work(&dm_device.ha_wrk.wrk);
break;
case DM_INFO_MESSAGE:
process_info(dm, (struct dm_info_msg *)dm_msg);
break;
default:
pr_warn_ratelimited("Unhandled message: type: %d\n", dm_hdr->type);
}
}
}
#define HV_LARGE_REPORTING_ORDER 9
#define HV_LARGE_REPORTING_LEN (HV_HYP_PAGE_SIZE << \
HV_LARGE_REPORTING_ORDER)
static int hv_free_page_report(struct page_reporting_dev_info *pr_dev_info,
struct scatterlist *sgl, unsigned int nents)
{
unsigned long flags;
struct hv_memory_hint *hint;
int i, order;
u64 status;
struct scatterlist *sg;
WARN_ON_ONCE(nents > HV_MEMORY_HINT_MAX_GPA_PAGE_RANGES);
WARN_ON_ONCE(sgl->length < (HV_HYP_PAGE_SIZE << page_reporting_order));
local_irq_save(flags);
hint = *this_cpu_ptr(hyperv_pcpu_input_arg);
if (!hint) {
local_irq_restore(flags);
return -ENOSPC;
}
hint->type = HV_EXT_MEMORY_HEAT_HINT_TYPE_COLD_DISCARD;
hint->reserved = 0;
for_each_sg(sgl, sg, nents, i) {
union hv_gpa_page_range *range;
range = &hint->ranges[i];
range->address_space = 0;
order = get_order(sg->length);
/*
* Hyper-V expects the additional_pages field in the units
* of one of these 3 sizes, 4Kbytes, 2Mbytes or 1Gbytes.
* This is dictated by the values of the fields page.largesize
* and page_size.
* This code however, only uses 4Kbytes and 2Mbytes units
* and not 1Gbytes unit.
*/
/* page reporting for pages 2MB or higher */
if (order >= HV_LARGE_REPORTING_ORDER ) {
range->page.largepage = 1;
range->page_size = HV_GPA_PAGE_RANGE_PAGE_SIZE_2MB;
range->base_large_pfn = page_to_hvpfn(
sg_page(sg)) >> HV_LARGE_REPORTING_ORDER;
range->page.additional_pages =
(sg->length / HV_LARGE_REPORTING_LEN) - 1;
} else {
/* Page reporting for pages below 2MB */
range->page.basepfn = page_to_hvpfn(sg_page(sg));
range->page.largepage = false;
range->page.additional_pages =
(sg->length / HV_HYP_PAGE_SIZE) - 1;
}
}
status = hv_do_rep_hypercall(HV_EXT_CALL_MEMORY_HEAT_HINT, nents, 0,
hint, NULL);
local_irq_restore(flags);
if (!hv_result_success(status)) {
pr_err("Cold memory discard hypercall failed with status %llx\n",
status);
if (hv_hypercall_multi_failure > 0)
hv_hypercall_multi_failure++;
if (hv_result(status) == HV_STATUS_INVALID_PARAMETER) {
pr_err("Underlying Hyper-V does not support order less than 9. Hypercall failed\n");
pr_err("Defaulting to page_reporting_order %d\n",
pageblock_order);
page_reporting_order = pageblock_order;
hv_hypercall_multi_failure++;
return -EINVAL;
}
return -EINVAL;
}
return 0;
}
static void enable_page_reporting(void)
{
int ret;
if (!hv_query_ext_cap(HV_EXT_CAPABILITY_MEMORY_COLD_DISCARD_HINT)) {
pr_debug("Cold memory discard hint not supported by Hyper-V\n");
return;
}
BUILD_BUG_ON(PAGE_REPORTING_CAPACITY > HV_MEMORY_HINT_MAX_GPA_PAGE_RANGES);
dm_device.pr_dev_info.report = hv_free_page_report;
/*
* We let the page_reporting_order parameter decide the order
* in the page_reporting code
*/
dm_device.pr_dev_info.order = 0;
ret = page_reporting_register(&dm_device.pr_dev_info);
if (ret < 0) {
dm_device.pr_dev_info.report = NULL;
pr_err("Failed to enable cold memory discard: %d\n", ret);
} else {
pr_info("Cold memory discard hint enabled with order %d\n",
page_reporting_order);
}
}
static void disable_page_reporting(void)
{
if (dm_device.pr_dev_info.report) {
page_reporting_unregister(&dm_device.pr_dev_info);
dm_device.pr_dev_info.report = NULL;
}
}
static int ballooning_enabled(void)
{
/*
* Disable ballooning if the page size is not 4k (HV_HYP_PAGE_SIZE),
* since currently it's unclear to us whether an unballoon request can
* make sure all page ranges are guest page size aligned.
*/
if (PAGE_SIZE != HV_HYP_PAGE_SIZE) {
pr_info("Ballooning disabled because page size is not 4096 bytes\n");
return 0;
}
return 1;
}
static int hot_add_enabled(void)
{
/*
* Disable hot add on ARM64, because we currently rely on
* memory_add_physaddr_to_nid() to get a node id of a hot add range,
* however ARM64's memory_add_physaddr_to_nid() always return 0 and
* DM_MEM_HOT_ADD_REQUEST doesn't have the NUMA node information for
* add_memory().
*/
if (IS_ENABLED(CONFIG_ARM64)) {
pr_info("Memory hot add disabled on ARM64\n");
return 0;
}
return 1;
}
static int balloon_connect_vsp(struct hv_device *dev)
{
struct dm_version_request version_req;
struct dm_capabilities cap_msg;
unsigned long t;
int ret;
/*
* max_pkt_size should be large enough for one vmbus packet header plus
* our receive buffer size. Hyper-V sends messages up to
* HV_HYP_PAGE_SIZE bytes long on balloon channel.
*/
dev->channel->max_pkt_size = HV_HYP_PAGE_SIZE * 2;
ret = vmbus_open(dev->channel, dm_ring_size, dm_ring_size, NULL, 0,
balloon_onchannelcallback, dev);
if (ret)
return ret;
/*
* Initiate the hand shake with the host and negotiate
* a version that the host can support. We start with the
* highest version number and go down if the host cannot
* support it.
*/
memset(&version_req, 0, sizeof(struct dm_version_request));
version_req.hdr.type = DM_VERSION_REQUEST;
version_req.hdr.size = sizeof(struct dm_version_request);
version_req.hdr.trans_id = atomic_inc_return(&trans_id);
version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN10;
version_req.is_last_attempt = 0;
dm_device.version = version_req.version.version;
ret = vmbus_sendpacket(dev->channel, &version_req,
sizeof(struct dm_version_request),
(unsigned long)NULL, VM_PKT_DATA_INBAND, 0);
if (ret)
goto out;
t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ);
if (t == 0) {
ret = -ETIMEDOUT;
goto out;
}
/*
* If we could not negotiate a compatible version with the host
* fail the probe function.
*/
if (dm_device.state == DM_INIT_ERROR) {
ret = -EPROTO;
goto out;
}
pr_info("Using Dynamic Memory protocol version %u.%u\n",
DYNMEM_MAJOR_VERSION(dm_device.version),
DYNMEM_MINOR_VERSION(dm_device.version));
/*
* Now submit our capabilities to the host.
*/
memset(&cap_msg, 0, sizeof(struct dm_capabilities));
cap_msg.hdr.type = DM_CAPABILITIES_REPORT;
cap_msg.hdr.size = sizeof(struct dm_capabilities);
cap_msg.hdr.trans_id = atomic_inc_return(&trans_id);
/*
* When hibernation (i.e. virtual ACPI S4 state) is enabled, the host
* currently still requires the bits to be set, so we have to add code
* to fail the host's hot-add and balloon up/down requests, if any.
*/
cap_msg.caps.cap_bits.balloon = ballooning_enabled();
cap_msg.caps.cap_bits.hot_add = hot_add_enabled();
/*
* Specify our alignment requirements as it relates
* memory hot-add. Specify 128MB alignment.
*/
cap_msg.caps.cap_bits.hot_add_alignment = 7;
/*
* Currently the host does not use these
* values and we set them to what is done in the
* Windows driver.
*/
cap_msg.min_page_cnt = 0;
cap_msg.max_page_number = -1;
ret = vmbus_sendpacket(dev->channel, &cap_msg,
sizeof(struct dm_capabilities),
(unsigned long)NULL, VM_PKT_DATA_INBAND, 0);
if (ret)
goto out;
t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ);
if (t == 0) {
ret = -ETIMEDOUT;
goto out;
}
/*
* If the host does not like our capabilities,
* fail the probe function.
*/
if (dm_device.state == DM_INIT_ERROR) {
ret = -EPROTO;
goto out;
}
return 0;
out:
vmbus_close(dev->channel);
return ret;
}
/*
* DEBUGFS Interface
*/
#ifdef CONFIG_DEBUG_FS
/**
* hv_balloon_debug_show - shows statistics of balloon operations.
* @f: pointer to the &struct seq_file.
* @offset: ignored.
*
* Provides the statistics that can be accessed in hv-balloon in the debugfs.
*
* Return: zero on success or an error code.
*/
static int hv_balloon_debug_show(struct seq_file *f, void *offset)
{
struct hv_dynmem_device *dm = f->private;
char *sname;
seq_printf(f, "%-22s: %u.%u\n", "host_version",
DYNMEM_MAJOR_VERSION(dm->version),
DYNMEM_MINOR_VERSION(dm->version));
seq_printf(f, "%-22s:", "capabilities");
if (ballooning_enabled())
seq_puts(f, " enabled");
if (hot_add_enabled())
seq_puts(f, " hot_add");
seq_puts(f, "\n");
seq_printf(f, "%-22s: %u", "state", dm->state);
switch (dm->state) {
case DM_INITIALIZING:
sname = "Initializing";
break;
case DM_INITIALIZED:
sname = "Initialized";
break;
case DM_BALLOON_UP:
sname = "Balloon Up";
break;
case DM_BALLOON_DOWN:
sname = "Balloon Down";
break;
case DM_HOT_ADD:
sname = "Hot Add";
break;
case DM_INIT_ERROR:
sname = "Error";
break;
default:
sname = "Unknown";
}
seq_printf(f, " (%s)\n", sname);
/* HV Page Size */
seq_printf(f, "%-22s: %ld\n", "page_size", HV_HYP_PAGE_SIZE);
/* Pages added with hot_add */
seq_printf(f, "%-22s: %u\n", "pages_added", dm->num_pages_added);
/* pages that are "onlined"/used from pages_added */
seq_printf(f, "%-22s: %u\n", "pages_onlined", dm->num_pages_onlined);
/* pages we have given back to host */
seq_printf(f, "%-22s: %u\n", "pages_ballooned", dm->num_pages_ballooned);
seq_printf(f, "%-22s: %lu\n", "total_pages_committed",
get_pages_committed(dm));
seq_printf(f, "%-22s: %llu\n", "max_dynamic_page_count",
dm->max_dynamic_page_count);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(hv_balloon_debug);
static void hv_balloon_debugfs_init(struct hv_dynmem_device *b)
{
debugfs_create_file("hv-balloon", 0444, NULL, b,
&hv_balloon_debug_fops);
}
static void hv_balloon_debugfs_exit(struct hv_dynmem_device *b)
{
debugfs_lookup_and_remove("hv-balloon", NULL);
}
#else
static inline void hv_balloon_debugfs_init(struct hv_dynmem_device *b)
{
}
static inline void hv_balloon_debugfs_exit(struct hv_dynmem_device *b)
{
}
#endif /* CONFIG_DEBUG_FS */
static int balloon_probe(struct hv_device *dev,
const struct hv_vmbus_device_id *dev_id)
{
int ret;
allow_hibernation = hv_is_hibernation_supported();
if (allow_hibernation)
hot_add = false;
#ifdef CONFIG_MEMORY_HOTPLUG
do_hot_add = hot_add;
#else
do_hot_add = false;
#endif
dm_device.dev = dev;
dm_device.state = DM_INITIALIZING;
dm_device.next_version = DYNMEM_PROTOCOL_VERSION_WIN8;
init_completion(&dm_device.host_event);
init_completion(&dm_device.config_event);
INIT_LIST_HEAD(&dm_device.ha_region_list);
spin_lock_init(&dm_device.ha_lock);
INIT_WORK(&dm_device.balloon_wrk.wrk, balloon_up);
INIT_WORK(&dm_device.ha_wrk.wrk, hot_add_req);
dm_device.host_specified_ha_region = false;
#ifdef CONFIG_MEMORY_HOTPLUG
set_online_page_callback(&hv_online_page);
init_completion(&dm_device.ol_waitevent);
register_memory_notifier(&hv_memory_nb);
#endif
hv_set_drvdata(dev, &dm_device);
ret = balloon_connect_vsp(dev);
if (ret != 0)
goto connect_error;
enable_page_reporting();
dm_device.state = DM_INITIALIZED;
dm_device.thread =
kthread_run(dm_thread_func, &dm_device, "hv_balloon");
if (IS_ERR(dm_device.thread)) {
ret = PTR_ERR(dm_device.thread);
goto probe_error;
}
hv_balloon_debugfs_init(&dm_device);
return 0;
probe_error:
dm_device.state = DM_INIT_ERROR;
dm_device.thread = NULL;
disable_page_reporting();
vmbus_close(dev->channel);
connect_error:
#ifdef CONFIG_MEMORY_HOTPLUG
unregister_memory_notifier(&hv_memory_nb);
restore_online_page_callback(&hv_online_page);
#endif
return ret;
}
static void balloon_remove(struct hv_device *dev)
{
struct hv_dynmem_device *dm = hv_get_drvdata(dev);
struct hv_hotadd_state *has, *tmp;
struct hv_hotadd_gap *gap, *tmp_gap;
if (dm->num_pages_ballooned != 0)
pr_warn("Ballooned pages: %d\n", dm->num_pages_ballooned);
hv_balloon_debugfs_exit(dm);
cancel_work_sync(&dm->balloon_wrk.wrk);
cancel_work_sync(&dm->ha_wrk.wrk);
kthread_stop(dm->thread);
/*
* This is to handle the case when balloon_resume()
* call has failed and some cleanup has been done as
* a part of the error handling.
*/
if (dm_device.state != DM_INIT_ERROR) {
disable_page_reporting();
vmbus_close(dev->channel);
#ifdef CONFIG_MEMORY_HOTPLUG
unregister_memory_notifier(&hv_memory_nb);
restore_online_page_callback(&hv_online_page);
#endif
}
guard(spinlock_irqsave)(&dm_device.ha_lock);
list_for_each_entry_safe(has, tmp, &dm->ha_region_list, list) {
list_for_each_entry_safe(gap, tmp_gap, &has->gap_list, list) {
list_del(&gap->list);
kfree(gap);
}
list_del(&has->list);
kfree(has);
}
}
static int balloon_suspend(struct hv_device *hv_dev)
{
struct hv_dynmem_device *dm = hv_get_drvdata(hv_dev);
tasklet_disable(&hv_dev->channel->callback_event);
cancel_work_sync(&dm->balloon_wrk.wrk);
cancel_work_sync(&dm->ha_wrk.wrk);
if (dm->thread) {
kthread_stop(dm->thread);
dm->thread = NULL;
vmbus_close(hv_dev->channel);
}
tasklet_enable(&hv_dev->channel->callback_event);
return 0;
}
static int balloon_resume(struct hv_device *dev)
{
int ret;
dm_device.state = DM_INITIALIZING;
ret = balloon_connect_vsp(dev);
if (ret != 0)
goto out;
dm_device.thread =
kthread_run(dm_thread_func, &dm_device, "hv_balloon");
if (IS_ERR(dm_device.thread)) {
ret = PTR_ERR(dm_device.thread);
dm_device.thread = NULL;
goto close_channel;
}
dm_device.state = DM_INITIALIZED;
return 0;
close_channel:
vmbus_close(dev->channel);
out:
dm_device.state = DM_INIT_ERROR;
disable_page_reporting();
#ifdef CONFIG_MEMORY_HOTPLUG
unregister_memory_notifier(&hv_memory_nb);
restore_online_page_callback(&hv_online_page);
#endif
return ret;
}
static const struct hv_vmbus_device_id id_table[] = {
/* Dynamic Memory Class ID */
/* 525074DC-8985-46e2-8057-A307DC18A502 */
{ HV_DM_GUID, },
{ },
};
MODULE_DEVICE_TABLE(vmbus, id_table);
static struct hv_driver balloon_drv = {
.name = "hv_balloon",
.id_table = id_table,
.probe = balloon_probe,
.remove = balloon_remove,
.suspend = balloon_suspend,
.resume = balloon_resume,
.driver = {
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
};
static int __init init_balloon_drv(void)
{
return vmbus_driver_register(&balloon_drv);
}
module_init(init_balloon_drv);
MODULE_DESCRIPTION("Hyper-V Balloon");
MODULE_LICENSE("GPL");
|
linux-master
|
drivers/hv/hv_balloon.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
*
* Copyright (c) 2009, Microsoft Corporation.
*
* Authors:
* Haiyang Zhang <[email protected]>
* Hank Janssen <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/delay.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/hyperv.h>
#include <linux/export.h>
#include <linux/io.h>
#include <linux/set_memory.h>
#include <asm/mshyperv.h>
#include "hyperv_vmbus.h"
struct vmbus_connection vmbus_connection = {
.conn_state = DISCONNECTED,
.unload_event = COMPLETION_INITIALIZER(
vmbus_connection.unload_event),
.next_gpadl_handle = ATOMIC_INIT(0xE1E10),
.ready_for_suspend_event = COMPLETION_INITIALIZER(
vmbus_connection.ready_for_suspend_event),
.ready_for_resume_event = COMPLETION_INITIALIZER(
vmbus_connection.ready_for_resume_event),
};
EXPORT_SYMBOL_GPL(vmbus_connection);
/*
* Negotiated protocol version with the host.
*/
__u32 vmbus_proto_version;
EXPORT_SYMBOL_GPL(vmbus_proto_version);
/*
* Table of VMBus versions listed from newest to oldest.
* VERSION_WIN7 and VERSION_WS2008 are no longer supported in
* Linux guests and are not listed.
*/
static __u32 vmbus_versions[] = {
VERSION_WIN10_V5_3,
VERSION_WIN10_V5_2,
VERSION_WIN10_V5_1,
VERSION_WIN10_V5,
VERSION_WIN10_V4_1,
VERSION_WIN10,
VERSION_WIN8_1,
VERSION_WIN8
};
/*
* Maximal VMBus protocol version guests can negotiate. Useful to cap the
* VMBus version for testing and debugging purpose.
*/
static uint max_version = VERSION_WIN10_V5_3;
module_param(max_version, uint, S_IRUGO);
MODULE_PARM_DESC(max_version,
"Maximal VMBus protocol version which can be negotiated");
int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo, u32 version)
{
int ret = 0;
struct vmbus_channel_initiate_contact *msg;
unsigned long flags;
init_completion(&msginfo->waitevent);
msg = (struct vmbus_channel_initiate_contact *)msginfo->msg;
memset(msg, 0, sizeof(*msg));
msg->header.msgtype = CHANNELMSG_INITIATE_CONTACT;
msg->vmbus_version_requested = version;
/*
* VMBus protocol 5.0 (VERSION_WIN10_V5) and higher require that we must
* use VMBUS_MESSAGE_CONNECTION_ID_4 for the Initiate Contact Message,
* and for subsequent messages, we must use the Message Connection ID
* field in the host-returned Version Response Message. And, with
* VERSION_WIN10_V5 and higher, we don't use msg->interrupt_page, but we
* tell the host explicitly that we still use VMBUS_MESSAGE_SINT(2) for
* compatibility.
*
* On old hosts, we should always use VMBUS_MESSAGE_CONNECTION_ID (1).
*/
if (version >= VERSION_WIN10_V5) {
msg->msg_sint = VMBUS_MESSAGE_SINT;
msg->msg_vtl = ms_hyperv.vtl;
vmbus_connection.msg_conn_id = VMBUS_MESSAGE_CONNECTION_ID_4;
} else {
msg->interrupt_page = virt_to_phys(vmbus_connection.int_page);
vmbus_connection.msg_conn_id = VMBUS_MESSAGE_CONNECTION_ID;
}
/*
* shared_gpa_boundary is zero in non-SNP VMs, so it's safe to always
* bitwise OR it
*/
msg->monitor_page1 = virt_to_phys(vmbus_connection.monitor_pages[0]) |
ms_hyperv.shared_gpa_boundary;
msg->monitor_page2 = virt_to_phys(vmbus_connection.monitor_pages[1]) |
ms_hyperv.shared_gpa_boundary;
msg->target_vcpu = hv_cpu_number_to_vp_number(VMBUS_CONNECT_CPU);
/*
* Add to list before we send the request since we may
* receive the response before returning from this routine
*/
spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
list_add_tail(&msginfo->msglistentry,
&vmbus_connection.chn_msg_list);
spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
ret = vmbus_post_msg(msg,
sizeof(struct vmbus_channel_initiate_contact),
true);
trace_vmbus_negotiate_version(msg, ret);
if (ret != 0) {
spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
list_del(&msginfo->msglistentry);
spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock,
flags);
return ret;
}
/* Wait for the connection response */
wait_for_completion(&msginfo->waitevent);
spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
list_del(&msginfo->msglistentry);
spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
/* Check if successful */
if (msginfo->response.version_response.version_supported) {
vmbus_connection.conn_state = CONNECTED;
if (version >= VERSION_WIN10_V5)
vmbus_connection.msg_conn_id =
msginfo->response.version_response.msg_conn_id;
} else {
return -ECONNREFUSED;
}
return ret;
}
/*
* vmbus_connect - Sends a connect request on the partition service connection
*/
int vmbus_connect(void)
{
struct vmbus_channel_msginfo *msginfo = NULL;
int i, ret = 0;
__u32 version;
/* Initialize the vmbus connection */
vmbus_connection.conn_state = CONNECTING;
vmbus_connection.work_queue = create_workqueue("hv_vmbus_con");
if (!vmbus_connection.work_queue) {
ret = -ENOMEM;
goto cleanup;
}
vmbus_connection.rescind_work_queue =
create_workqueue("hv_vmbus_rescind");
if (!vmbus_connection.rescind_work_queue) {
ret = -ENOMEM;
goto cleanup;
}
vmbus_connection.ignore_any_offer_msg = false;
vmbus_connection.handle_primary_chan_wq =
create_workqueue("hv_pri_chan");
if (!vmbus_connection.handle_primary_chan_wq) {
ret = -ENOMEM;
goto cleanup;
}
vmbus_connection.handle_sub_chan_wq =
create_workqueue("hv_sub_chan");
if (!vmbus_connection.handle_sub_chan_wq) {
ret = -ENOMEM;
goto cleanup;
}
INIT_LIST_HEAD(&vmbus_connection.chn_msg_list);
spin_lock_init(&vmbus_connection.channelmsg_lock);
INIT_LIST_HEAD(&vmbus_connection.chn_list);
mutex_init(&vmbus_connection.channel_mutex);
/*
* Setup the vmbus event connection for channel interrupt
* abstraction stuff
*/
vmbus_connection.int_page = hv_alloc_hyperv_zeroed_page();
if (vmbus_connection.int_page == NULL) {
ret = -ENOMEM;
goto cleanup;
}
vmbus_connection.recv_int_page = vmbus_connection.int_page;
vmbus_connection.send_int_page =
(void *)((unsigned long)vmbus_connection.int_page +
(HV_HYP_PAGE_SIZE >> 1));
/*
* Setup the monitor notification facility. The 1st page for
* parent->child and the 2nd page for child->parent
*/
vmbus_connection.monitor_pages[0] = hv_alloc_hyperv_page();
vmbus_connection.monitor_pages[1] = hv_alloc_hyperv_page();
if ((vmbus_connection.monitor_pages[0] == NULL) ||
(vmbus_connection.monitor_pages[1] == NULL)) {
ret = -ENOMEM;
goto cleanup;
}
ret = set_memory_decrypted((unsigned long)
vmbus_connection.monitor_pages[0], 1);
ret |= set_memory_decrypted((unsigned long)
vmbus_connection.monitor_pages[1], 1);
if (ret)
goto cleanup;
/*
* Set_memory_decrypted() will change the memory contents if
* decryption occurs, so zero monitor pages here.
*/
memset(vmbus_connection.monitor_pages[0], 0x00, HV_HYP_PAGE_SIZE);
memset(vmbus_connection.monitor_pages[1], 0x00, HV_HYP_PAGE_SIZE);
msginfo = kzalloc(sizeof(*msginfo) +
sizeof(struct vmbus_channel_initiate_contact),
GFP_KERNEL);
if (msginfo == NULL) {
ret = -ENOMEM;
goto cleanup;
}
/*
* Negotiate a compatible VMBUS version number with the
* host. We start with the highest number we can support
* and work our way down until we negotiate a compatible
* version.
*/
for (i = 0; ; i++) {
if (i == ARRAY_SIZE(vmbus_versions)) {
ret = -EDOM;
goto cleanup;
}
version = vmbus_versions[i];
if (version > max_version)
continue;
ret = vmbus_negotiate_version(msginfo, version);
if (ret == -ETIMEDOUT)
goto cleanup;
if (vmbus_connection.conn_state == CONNECTED)
break;
}
if (hv_is_isolation_supported() && version < VERSION_WIN10_V5_2) {
pr_err("Invalid VMBus version %d.%d (expected >= %d.%d) from the host supporting isolation\n",
version >> 16, version & 0xFFFF, VERSION_WIN10_V5_2 >> 16, VERSION_WIN10_V5_2 & 0xFFFF);
ret = -EINVAL;
goto cleanup;
}
vmbus_proto_version = version;
pr_info("Vmbus version:%d.%d\n",
version >> 16, version & 0xFFFF);
vmbus_connection.channels = kcalloc(MAX_CHANNEL_RELIDS,
sizeof(struct vmbus_channel *),
GFP_KERNEL);
if (vmbus_connection.channels == NULL) {
ret = -ENOMEM;
goto cleanup;
}
kfree(msginfo);
return 0;
cleanup:
pr_err("Unable to connect to host\n");
vmbus_connection.conn_state = DISCONNECTED;
vmbus_disconnect();
kfree(msginfo);
return ret;
}
void vmbus_disconnect(void)
{
/*
* First send the unload request to the host.
*/
vmbus_initiate_unload(false);
if (vmbus_connection.handle_sub_chan_wq)
destroy_workqueue(vmbus_connection.handle_sub_chan_wq);
if (vmbus_connection.handle_primary_chan_wq)
destroy_workqueue(vmbus_connection.handle_primary_chan_wq);
if (vmbus_connection.rescind_work_queue)
destroy_workqueue(vmbus_connection.rescind_work_queue);
if (vmbus_connection.work_queue)
destroy_workqueue(vmbus_connection.work_queue);
if (vmbus_connection.int_page) {
hv_free_hyperv_page(vmbus_connection.int_page);
vmbus_connection.int_page = NULL;
}
set_memory_encrypted((unsigned long)vmbus_connection.monitor_pages[0], 1);
set_memory_encrypted((unsigned long)vmbus_connection.monitor_pages[1], 1);
hv_free_hyperv_page(vmbus_connection.monitor_pages[0]);
hv_free_hyperv_page(vmbus_connection.monitor_pages[1]);
vmbus_connection.monitor_pages[0] = NULL;
vmbus_connection.monitor_pages[1] = NULL;
}
/*
* relid2channel - Get the channel object given its
* child relative id (ie channel id)
*/
struct vmbus_channel *relid2channel(u32 relid)
{
if (vmbus_connection.channels == NULL) {
pr_warn_once("relid2channel: relid=%d: No channels mapped!\n", relid);
return NULL;
}
if (WARN_ON(relid >= MAX_CHANNEL_RELIDS))
return NULL;
return READ_ONCE(vmbus_connection.channels[relid]);
}
/*
* vmbus_on_event - Process a channel event notification
*
* For batched channels (default) optimize host to guest signaling
* by ensuring:
* 1. While reading the channel, we disable interrupts from host.
* 2. Ensure that we process all posted messages from the host
* before returning from this callback.
* 3. Once we return, enable signaling from the host. Once this
* state is set we check to see if additional packets are
* available to read. In this case we repeat the process.
* If this tasklet has been running for a long time
* then reschedule ourselves.
*/
void vmbus_on_event(unsigned long data)
{
struct vmbus_channel *channel = (void *) data;
void (*callback_fn)(void *context);
trace_vmbus_on_event(channel);
hv_debug_delay_test(channel, INTERRUPT_DELAY);
/* A channel once created is persistent even when
* there is no driver handling the device. An
* unloading driver sets the onchannel_callback to NULL.
*/
callback_fn = READ_ONCE(channel->onchannel_callback);
if (unlikely(!callback_fn))
return;
(*callback_fn)(channel->channel_callback_context);
if (channel->callback_mode != HV_CALL_BATCHED)
return;
if (likely(hv_end_read(&channel->inbound) == 0))
return;
hv_begin_read(&channel->inbound);
tasklet_schedule(&channel->callback_event);
}
/*
* vmbus_post_msg - Send a msg on the vmbus's message connection
*/
int vmbus_post_msg(void *buffer, size_t buflen, bool can_sleep)
{
struct vmbus_channel_message_header *hdr;
union hv_connection_id conn_id;
int ret = 0;
int retries = 0;
u32 usec = 1;
conn_id.asu32 = 0;
conn_id.u.id = vmbus_connection.msg_conn_id;
/*
* hv_post_message() can have transient failures because of
* insufficient resources. Retry the operation a couple of
* times before giving up.
*/
while (retries < 100) {
ret = hv_post_message(conn_id, 1, buffer, buflen);
switch (ret) {
case HV_STATUS_INVALID_CONNECTION_ID:
/*
* See vmbus_negotiate_version(): VMBus protocol 5.0
* and higher require that we must use
* VMBUS_MESSAGE_CONNECTION_ID_4 for the Initiate
* Contact message, but on old hosts that only
* support VMBus protocol 4.0 or lower, here we get
* HV_STATUS_INVALID_CONNECTION_ID and we should
* return an error immediately without retrying.
*/
hdr = buffer;
if (hdr->msgtype == CHANNELMSG_INITIATE_CONTACT)
return -EINVAL;
/*
* We could get this if we send messages too
* frequently.
*/
ret = -EAGAIN;
break;
case HV_STATUS_INSUFFICIENT_MEMORY:
case HV_STATUS_INSUFFICIENT_BUFFERS:
ret = -ENOBUFS;
break;
case HV_STATUS_SUCCESS:
return ret;
default:
pr_err("hv_post_msg() failed; error code:%d\n", ret);
return -EINVAL;
}
retries++;
if (can_sleep && usec > 1000)
msleep(usec / 1000);
else if (usec < MAX_UDELAY_MS * 1000)
udelay(usec);
else
mdelay(usec / 1000);
if (retries < 22)
usec *= 2;
}
return ret;
}
/*
* vmbus_set_event - Send an event notification to the parent
*/
void vmbus_set_event(struct vmbus_channel *channel)
{
u32 child_relid = channel->offermsg.child_relid;
if (!channel->is_dedicated_interrupt)
vmbus_send_interrupt(child_relid);
++channel->sig_events;
if (ms_hyperv.paravisor_present) {
if (hv_isolation_type_snp())
hv_ghcb_hypercall(HVCALL_SIGNAL_EVENT, &channel->sig_event,
NULL, sizeof(channel->sig_event));
else if (hv_isolation_type_tdx())
hv_tdx_hypercall(HVCALL_SIGNAL_EVENT | HV_HYPERCALL_FAST_BIT,
channel->sig_event, 0);
else
WARN_ON_ONCE(1);
} else {
hv_do_fast_hypercall8(HVCALL_SIGNAL_EVENT, channel->sig_event);
}
}
EXPORT_SYMBOL_GPL(vmbus_set_event);
|
linux-master
|
drivers/hv/connection.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Kernel/userspace transport abstraction for Hyper-V util driver.
*
* Copyright (C) 2015, Vitaly Kuznetsov <[email protected]>
*/
#include <linux/slab.h>
#include <linux/fs.h>
#include <linux/poll.h>
#include "hyperv_vmbus.h"
#include "hv_utils_transport.h"
static DEFINE_SPINLOCK(hvt_list_lock);
static LIST_HEAD(hvt_list);
static void hvt_reset(struct hvutil_transport *hvt)
{
kfree(hvt->outmsg);
hvt->outmsg = NULL;
hvt->outmsg_len = 0;
if (hvt->on_reset)
hvt->on_reset();
}
static ssize_t hvt_op_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
struct hvutil_transport *hvt;
int ret;
hvt = container_of(file->f_op, struct hvutil_transport, fops);
if (wait_event_interruptible(hvt->outmsg_q, hvt->outmsg_len > 0 ||
hvt->mode != HVUTIL_TRANSPORT_CHARDEV))
return -EINTR;
mutex_lock(&hvt->lock);
if (hvt->mode == HVUTIL_TRANSPORT_DESTROY) {
ret = -EBADF;
goto out_unlock;
}
if (!hvt->outmsg) {
ret = -EAGAIN;
goto out_unlock;
}
if (count < hvt->outmsg_len) {
ret = -EINVAL;
goto out_unlock;
}
if (!copy_to_user(buf, hvt->outmsg, hvt->outmsg_len))
ret = hvt->outmsg_len;
else
ret = -EFAULT;
kfree(hvt->outmsg);
hvt->outmsg = NULL;
hvt->outmsg_len = 0;
if (hvt->on_read)
hvt->on_read();
hvt->on_read = NULL;
out_unlock:
mutex_unlock(&hvt->lock);
return ret;
}
static ssize_t hvt_op_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
struct hvutil_transport *hvt;
u8 *inmsg;
int ret;
hvt = container_of(file->f_op, struct hvutil_transport, fops);
inmsg = memdup_user(buf, count);
if (IS_ERR(inmsg))
return PTR_ERR(inmsg);
if (hvt->mode == HVUTIL_TRANSPORT_DESTROY)
ret = -EBADF;
else
ret = hvt->on_msg(inmsg, count);
kfree(inmsg);
return ret ? ret : count;
}
static __poll_t hvt_op_poll(struct file *file, poll_table *wait)
{
struct hvutil_transport *hvt;
hvt = container_of(file->f_op, struct hvutil_transport, fops);
poll_wait(file, &hvt->outmsg_q, wait);
if (hvt->mode == HVUTIL_TRANSPORT_DESTROY)
return EPOLLERR | EPOLLHUP;
if (hvt->outmsg_len > 0)
return EPOLLIN | EPOLLRDNORM;
return 0;
}
static int hvt_op_open(struct inode *inode, struct file *file)
{
struct hvutil_transport *hvt;
int ret = 0;
bool issue_reset = false;
hvt = container_of(file->f_op, struct hvutil_transport, fops);
mutex_lock(&hvt->lock);
if (hvt->mode == HVUTIL_TRANSPORT_DESTROY) {
ret = -EBADF;
} else if (hvt->mode == HVUTIL_TRANSPORT_INIT) {
/*
* Switching to CHARDEV mode. We switch bach to INIT when
* device gets released.
*/
hvt->mode = HVUTIL_TRANSPORT_CHARDEV;
}
else if (hvt->mode == HVUTIL_TRANSPORT_NETLINK) {
/*
* We're switching from netlink communication to using char
* device. Issue the reset first.
*/
issue_reset = true;
hvt->mode = HVUTIL_TRANSPORT_CHARDEV;
} else {
ret = -EBUSY;
}
if (issue_reset)
hvt_reset(hvt);
mutex_unlock(&hvt->lock);
return ret;
}
static void hvt_transport_free(struct hvutil_transport *hvt)
{
misc_deregister(&hvt->mdev);
kfree(hvt->outmsg);
kfree(hvt);
}
static int hvt_op_release(struct inode *inode, struct file *file)
{
struct hvutil_transport *hvt;
int mode_old;
hvt = container_of(file->f_op, struct hvutil_transport, fops);
mutex_lock(&hvt->lock);
mode_old = hvt->mode;
if (hvt->mode != HVUTIL_TRANSPORT_DESTROY)
hvt->mode = HVUTIL_TRANSPORT_INIT;
/*
* Cleanup message buffers to avoid spurious messages when the daemon
* connects back.
*/
hvt_reset(hvt);
if (mode_old == HVUTIL_TRANSPORT_DESTROY)
complete(&hvt->release);
mutex_unlock(&hvt->lock);
return 0;
}
static void hvt_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
{
struct hvutil_transport *hvt, *hvt_found = NULL;
spin_lock(&hvt_list_lock);
list_for_each_entry(hvt, &hvt_list, list) {
if (hvt->cn_id.idx == msg->id.idx &&
hvt->cn_id.val == msg->id.val) {
hvt_found = hvt;
break;
}
}
spin_unlock(&hvt_list_lock);
if (!hvt_found) {
pr_warn("hvt_cn_callback: spurious message received!\n");
return;
}
/*
* Switching to NETLINK mode. Switching to CHARDEV happens when someone
* opens the device.
*/
mutex_lock(&hvt->lock);
if (hvt->mode == HVUTIL_TRANSPORT_INIT)
hvt->mode = HVUTIL_TRANSPORT_NETLINK;
if (hvt->mode == HVUTIL_TRANSPORT_NETLINK)
hvt_found->on_msg(msg->data, msg->len);
else
pr_warn("hvt_cn_callback: unexpected netlink message!\n");
mutex_unlock(&hvt->lock);
}
int hvutil_transport_send(struct hvutil_transport *hvt, void *msg, int len,
void (*on_read_cb)(void))
{
struct cn_msg *cn_msg;
int ret = 0;
if (hvt->mode == HVUTIL_TRANSPORT_INIT ||
hvt->mode == HVUTIL_TRANSPORT_DESTROY) {
return -EINVAL;
} else if (hvt->mode == HVUTIL_TRANSPORT_NETLINK) {
cn_msg = kzalloc(sizeof(*cn_msg) + len, GFP_ATOMIC);
if (!cn_msg)
return -ENOMEM;
cn_msg->id.idx = hvt->cn_id.idx;
cn_msg->id.val = hvt->cn_id.val;
cn_msg->len = len;
memcpy(cn_msg->data, msg, len);
ret = cn_netlink_send(cn_msg, 0, 0, GFP_ATOMIC);
kfree(cn_msg);
/*
* We don't know when netlink messages are delivered but unlike
* in CHARDEV mode we're not blocked and we can send next
* messages right away.
*/
if (on_read_cb)
on_read_cb();
return ret;
}
/* HVUTIL_TRANSPORT_CHARDEV */
mutex_lock(&hvt->lock);
if (hvt->mode != HVUTIL_TRANSPORT_CHARDEV) {
ret = -EINVAL;
goto out_unlock;
}
if (hvt->outmsg) {
/* Previous message wasn't received */
ret = -EFAULT;
goto out_unlock;
}
hvt->outmsg = kzalloc(len, GFP_KERNEL);
if (hvt->outmsg) {
memcpy(hvt->outmsg, msg, len);
hvt->outmsg_len = len;
hvt->on_read = on_read_cb;
wake_up_interruptible(&hvt->outmsg_q);
} else
ret = -ENOMEM;
out_unlock:
mutex_unlock(&hvt->lock);
return ret;
}
struct hvutil_transport *hvutil_transport_init(const char *name,
u32 cn_idx, u32 cn_val,
int (*on_msg)(void *, int),
void (*on_reset)(void))
{
struct hvutil_transport *hvt;
hvt = kzalloc(sizeof(*hvt), GFP_KERNEL);
if (!hvt)
return NULL;
hvt->cn_id.idx = cn_idx;
hvt->cn_id.val = cn_val;
hvt->mdev.minor = MISC_DYNAMIC_MINOR;
hvt->mdev.name = name;
hvt->fops.owner = THIS_MODULE;
hvt->fops.read = hvt_op_read;
hvt->fops.write = hvt_op_write;
hvt->fops.poll = hvt_op_poll;
hvt->fops.open = hvt_op_open;
hvt->fops.release = hvt_op_release;
hvt->mdev.fops = &hvt->fops;
init_waitqueue_head(&hvt->outmsg_q);
mutex_init(&hvt->lock);
init_completion(&hvt->release);
spin_lock(&hvt_list_lock);
list_add(&hvt->list, &hvt_list);
spin_unlock(&hvt_list_lock);
hvt->on_msg = on_msg;
hvt->on_reset = on_reset;
if (misc_register(&hvt->mdev))
goto err_free_hvt;
/* Use cn_id.idx/cn_id.val to determine if we need to setup netlink */
if (hvt->cn_id.idx > 0 && hvt->cn_id.val > 0 &&
cn_add_callback(&hvt->cn_id, name, hvt_cn_callback))
goto err_free_hvt;
return hvt;
err_free_hvt:
spin_lock(&hvt_list_lock);
list_del(&hvt->list);
spin_unlock(&hvt_list_lock);
kfree(hvt);
return NULL;
}
void hvutil_transport_destroy(struct hvutil_transport *hvt)
{
int mode_old;
mutex_lock(&hvt->lock);
mode_old = hvt->mode;
hvt->mode = HVUTIL_TRANSPORT_DESTROY;
wake_up_interruptible(&hvt->outmsg_q);
mutex_unlock(&hvt->lock);
/*
* In case we were in 'chardev' mode we still have an open fd so we
* have to defer freeing the device. Netlink interface can be freed
* now.
*/
spin_lock(&hvt_list_lock);
list_del(&hvt->list);
spin_unlock(&hvt_list_lock);
if (hvt->cn_id.idx > 0 && hvt->cn_id.val > 0)
cn_del_callback(&hvt->cn_id);
if (mode_old == HVUTIL_TRANSPORT_CHARDEV)
wait_for_completion(&hvt->release);
hvt_transport_free(hvt);
}
|
linux-master
|
drivers/hv/hv_utils_transport.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2009, Microsoft Corporation.
*
* Authors:
* Haiyang Zhang <[email protected]>
* Hank Janssen <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/cpu.h>
#include <linux/hyperv.h>
#include <asm/mshyperv.h>
#include <linux/sched/isolation.h>
#include "hyperv_vmbus.h"
static void init_vp_index(struct vmbus_channel *channel);
const struct vmbus_device vmbus_devs[] = {
/* IDE */
{ .dev_type = HV_IDE,
HV_IDE_GUID,
.perf_device = true,
.allowed_in_isolated = false,
},
/* SCSI */
{ .dev_type = HV_SCSI,
HV_SCSI_GUID,
.perf_device = true,
.allowed_in_isolated = true,
},
/* Fibre Channel */
{ .dev_type = HV_FC,
HV_SYNTHFC_GUID,
.perf_device = true,
.allowed_in_isolated = false,
},
/* Synthetic NIC */
{ .dev_type = HV_NIC,
HV_NIC_GUID,
.perf_device = true,
.allowed_in_isolated = true,
},
/* Network Direct */
{ .dev_type = HV_ND,
HV_ND_GUID,
.perf_device = true,
.allowed_in_isolated = false,
},
/* PCIE */
{ .dev_type = HV_PCIE,
HV_PCIE_GUID,
.perf_device = false,
.allowed_in_isolated = true,
},
/* Synthetic Frame Buffer */
{ .dev_type = HV_FB,
HV_SYNTHVID_GUID,
.perf_device = false,
.allowed_in_isolated = false,
},
/* Synthetic Keyboard */
{ .dev_type = HV_KBD,
HV_KBD_GUID,
.perf_device = false,
.allowed_in_isolated = false,
},
/* Synthetic MOUSE */
{ .dev_type = HV_MOUSE,
HV_MOUSE_GUID,
.perf_device = false,
.allowed_in_isolated = false,
},
/* KVP */
{ .dev_type = HV_KVP,
HV_KVP_GUID,
.perf_device = false,
.allowed_in_isolated = false,
},
/* Time Synch */
{ .dev_type = HV_TS,
HV_TS_GUID,
.perf_device = false,
.allowed_in_isolated = true,
},
/* Heartbeat */
{ .dev_type = HV_HB,
HV_HEART_BEAT_GUID,
.perf_device = false,
.allowed_in_isolated = true,
},
/* Shutdown */
{ .dev_type = HV_SHUTDOWN,
HV_SHUTDOWN_GUID,
.perf_device = false,
.allowed_in_isolated = true,
},
/* File copy */
{ .dev_type = HV_FCOPY,
HV_FCOPY_GUID,
.perf_device = false,
.allowed_in_isolated = false,
},
/* Backup */
{ .dev_type = HV_BACKUP,
HV_VSS_GUID,
.perf_device = false,
.allowed_in_isolated = false,
},
/* Dynamic Memory */
{ .dev_type = HV_DM,
HV_DM_GUID,
.perf_device = false,
.allowed_in_isolated = false,
},
/* Unknown GUID */
{ .dev_type = HV_UNKNOWN,
.perf_device = false,
.allowed_in_isolated = false,
},
};
static const struct {
guid_t guid;
} vmbus_unsupported_devs[] = {
{ HV_AVMA1_GUID },
{ HV_AVMA2_GUID },
{ HV_RDV_GUID },
{ HV_IMC_GUID },
};
/*
* The rescinded channel may be blocked waiting for a response from the host;
* take care of that.
*/
static void vmbus_rescind_cleanup(struct vmbus_channel *channel)
{
struct vmbus_channel_msginfo *msginfo;
unsigned long flags;
spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
channel->rescind = true;
list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
msglistentry) {
if (msginfo->waiting_channel == channel) {
complete(&msginfo->waitevent);
break;
}
}
spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
}
static bool is_unsupported_vmbus_devs(const guid_t *guid)
{
int i;
for (i = 0; i < ARRAY_SIZE(vmbus_unsupported_devs); i++)
if (guid_equal(guid, &vmbus_unsupported_devs[i].guid))
return true;
return false;
}
static u16 hv_get_dev_type(const struct vmbus_channel *channel)
{
const guid_t *guid = &channel->offermsg.offer.if_type;
u16 i;
if (is_hvsock_channel(channel) || is_unsupported_vmbus_devs(guid))
return HV_UNKNOWN;
for (i = HV_IDE; i < HV_UNKNOWN; i++) {
if (guid_equal(guid, &vmbus_devs[i].guid))
return i;
}
pr_info("Unknown GUID: %pUl\n", guid);
return i;
}
/**
* vmbus_prep_negotiate_resp() - Create default response for Negotiate message
* @icmsghdrp: Pointer to msg header structure
* @buf: Raw buffer channel data
* @buflen: Length of the raw buffer channel data.
* @fw_version: The framework versions we can support.
* @fw_vercnt: The size of @fw_version.
* @srv_version: The service versions we can support.
* @srv_vercnt: The size of @srv_version.
* @nego_fw_version: The selected framework version.
* @nego_srv_version: The selected service version.
*
* Note: Versions are given in decreasing order.
*
* Set up and fill in default negotiate response message.
* Mainly used by Hyper-V drivers.
*/
bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp, u8 *buf,
u32 buflen, const int *fw_version, int fw_vercnt,
const int *srv_version, int srv_vercnt,
int *nego_fw_version, int *nego_srv_version)
{
int icframe_major, icframe_minor;
int icmsg_major, icmsg_minor;
int fw_major, fw_minor;
int srv_major, srv_minor;
int i, j;
bool found_match = false;
struct icmsg_negotiate *negop;
/* Check that there's enough space for icframe_vercnt, icmsg_vercnt */
if (buflen < ICMSG_HDR + offsetof(struct icmsg_negotiate, reserved)) {
pr_err_ratelimited("Invalid icmsg negotiate\n");
return false;
}
icmsghdrp->icmsgsize = 0x10;
negop = (struct icmsg_negotiate *)&buf[ICMSG_HDR];
icframe_major = negop->icframe_vercnt;
icframe_minor = 0;
icmsg_major = negop->icmsg_vercnt;
icmsg_minor = 0;
/* Validate negop packet */
if (icframe_major > IC_VERSION_NEGOTIATION_MAX_VER_COUNT ||
icmsg_major > IC_VERSION_NEGOTIATION_MAX_VER_COUNT ||
ICMSG_NEGOTIATE_PKT_SIZE(icframe_major, icmsg_major) > buflen) {
pr_err_ratelimited("Invalid icmsg negotiate - icframe_major: %u, icmsg_major: %u\n",
icframe_major, icmsg_major);
goto fw_error;
}
/*
* Select the framework version number we will
* support.
*/
for (i = 0; i < fw_vercnt; i++) {
fw_major = (fw_version[i] >> 16);
fw_minor = (fw_version[i] & 0xFFFF);
for (j = 0; j < negop->icframe_vercnt; j++) {
if ((negop->icversion_data[j].major == fw_major) &&
(negop->icversion_data[j].minor == fw_minor)) {
icframe_major = negop->icversion_data[j].major;
icframe_minor = negop->icversion_data[j].minor;
found_match = true;
break;
}
}
if (found_match)
break;
}
if (!found_match)
goto fw_error;
found_match = false;
for (i = 0; i < srv_vercnt; i++) {
srv_major = (srv_version[i] >> 16);
srv_minor = (srv_version[i] & 0xFFFF);
for (j = negop->icframe_vercnt;
(j < negop->icframe_vercnt + negop->icmsg_vercnt);
j++) {
if ((negop->icversion_data[j].major == srv_major) &&
(negop->icversion_data[j].minor == srv_minor)) {
icmsg_major = negop->icversion_data[j].major;
icmsg_minor = negop->icversion_data[j].minor;
found_match = true;
break;
}
}
if (found_match)
break;
}
/*
* Respond with the framework and service
* version numbers we can support.
*/
fw_error:
if (!found_match) {
negop->icframe_vercnt = 0;
negop->icmsg_vercnt = 0;
} else {
negop->icframe_vercnt = 1;
negop->icmsg_vercnt = 1;
}
if (nego_fw_version)
*nego_fw_version = (icframe_major << 16) | icframe_minor;
if (nego_srv_version)
*nego_srv_version = (icmsg_major << 16) | icmsg_minor;
negop->icversion_data[0].major = icframe_major;
negop->icversion_data[0].minor = icframe_minor;
negop->icversion_data[1].major = icmsg_major;
negop->icversion_data[1].minor = icmsg_minor;
return found_match;
}
EXPORT_SYMBOL_GPL(vmbus_prep_negotiate_resp);
/*
* alloc_channel - Allocate and initialize a vmbus channel object
*/
static struct vmbus_channel *alloc_channel(void)
{
struct vmbus_channel *channel;
channel = kzalloc(sizeof(*channel), GFP_ATOMIC);
if (!channel)
return NULL;
spin_lock_init(&channel->sched_lock);
init_completion(&channel->rescind_event);
INIT_LIST_HEAD(&channel->sc_list);
tasklet_init(&channel->callback_event,
vmbus_on_event, (unsigned long)channel);
hv_ringbuffer_pre_init(channel);
return channel;
}
/*
* free_channel - Release the resources used by the vmbus channel object
*/
static void free_channel(struct vmbus_channel *channel)
{
tasklet_kill(&channel->callback_event);
vmbus_remove_channel_attr_group(channel);
kobject_put(&channel->kobj);
}
void vmbus_channel_map_relid(struct vmbus_channel *channel)
{
if (WARN_ON(channel->offermsg.child_relid >= MAX_CHANNEL_RELIDS))
return;
/*
* The mapping of the channel's relid is visible from the CPUs that
* execute vmbus_chan_sched() by the time that vmbus_chan_sched() will
* execute:
*
* (a) In the "normal (i.e., not resuming from hibernation)" path,
* the full barrier in virt_store_mb() guarantees that the store
* is propagated to all CPUs before the add_channel_work work
* is queued. In turn, add_channel_work is queued before the
* channel's ring buffer is allocated/initialized and the
* OPENCHANNEL message for the channel is sent in vmbus_open().
* Hyper-V won't start sending the interrupts for the channel
* before the OPENCHANNEL message is acked. The memory barrier
* in vmbus_chan_sched() -> sync_test_and_clear_bit() ensures
* that vmbus_chan_sched() must find the channel's relid in
* recv_int_page before retrieving the channel pointer from the
* array of channels.
*
* (b) In the "resuming from hibernation" path, the virt_store_mb()
* guarantees that the store is propagated to all CPUs before
* the VMBus connection is marked as ready for the resume event
* (cf. check_ready_for_resume_event()). The interrupt handler
* of the VMBus driver and vmbus_chan_sched() can not run before
* vmbus_bus_resume() has completed execution (cf. resume_noirq).
*/
virt_store_mb(
vmbus_connection.channels[channel->offermsg.child_relid],
channel);
}
void vmbus_channel_unmap_relid(struct vmbus_channel *channel)
{
if (WARN_ON(channel->offermsg.child_relid >= MAX_CHANNEL_RELIDS))
return;
WRITE_ONCE(
vmbus_connection.channels[channel->offermsg.child_relid],
NULL);
}
static void vmbus_release_relid(u32 relid)
{
struct vmbus_channel_relid_released msg;
int ret;
memset(&msg, 0, sizeof(struct vmbus_channel_relid_released));
msg.child_relid = relid;
msg.header.msgtype = CHANNELMSG_RELID_RELEASED;
ret = vmbus_post_msg(&msg, sizeof(struct vmbus_channel_relid_released),
true);
trace_vmbus_release_relid(&msg, ret);
}
void hv_process_channel_removal(struct vmbus_channel *channel)
{
lockdep_assert_held(&vmbus_connection.channel_mutex);
BUG_ON(!channel->rescind);
/*
* hv_process_channel_removal() could find INVALID_RELID only for
* hv_sock channels. See the inline comments in vmbus_onoffer().
*/
WARN_ON(channel->offermsg.child_relid == INVALID_RELID &&
!is_hvsock_channel(channel));
/*
* Upon suspend, an in-use hv_sock channel is removed from the array of
* channels and the relid is invalidated. After hibernation, when the
* user-space application destroys the channel, it's unnecessary and
* unsafe to remove the channel from the array of channels. See also
* the inline comments before the call of vmbus_release_relid() below.
*/
if (channel->offermsg.child_relid != INVALID_RELID)
vmbus_channel_unmap_relid(channel);
if (channel->primary_channel == NULL)
list_del(&channel->listentry);
else
list_del(&channel->sc_list);
/*
* If this is a "perf" channel, updates the hv_numa_map[] masks so that
* init_vp_index() can (re-)use the CPU.
*/
if (hv_is_perf_channel(channel))
hv_clear_allocated_cpu(channel->target_cpu);
/*
* Upon suspend, an in-use hv_sock channel is marked as "rescinded" and
* the relid is invalidated; after hibernation, when the user-space app
* destroys the channel, the relid is INVALID_RELID, and in this case
* it's unnecessary and unsafe to release the old relid, since the same
* relid can refer to a completely different channel now.
*/
if (channel->offermsg.child_relid != INVALID_RELID)
vmbus_release_relid(channel->offermsg.child_relid);
free_channel(channel);
}
void vmbus_free_channels(void)
{
struct vmbus_channel *channel, *tmp;
list_for_each_entry_safe(channel, tmp, &vmbus_connection.chn_list,
listentry) {
/* hv_process_channel_removal() needs this */
channel->rescind = true;
vmbus_device_unregister(channel->device_obj);
}
}
/* Note: the function can run concurrently for primary/sub channels. */
static void vmbus_add_channel_work(struct work_struct *work)
{
struct vmbus_channel *newchannel =
container_of(work, struct vmbus_channel, add_channel_work);
struct vmbus_channel *primary_channel = newchannel->primary_channel;
int ret;
/*
* This state is used to indicate a successful open
* so that when we do close the channel normally, we
* can cleanup properly.
*/
newchannel->state = CHANNEL_OPEN_STATE;
if (primary_channel != NULL) {
/* newchannel is a sub-channel. */
struct hv_device *dev = primary_channel->device_obj;
if (vmbus_add_channel_kobj(dev, newchannel))
goto err_deq_chan;
if (primary_channel->sc_creation_callback != NULL)
primary_channel->sc_creation_callback(newchannel);
newchannel->probe_done = true;
return;
}
/*
* Start the process of binding the primary channel to the driver
*/
newchannel->device_obj = vmbus_device_create(
&newchannel->offermsg.offer.if_type,
&newchannel->offermsg.offer.if_instance,
newchannel);
if (!newchannel->device_obj)
goto err_deq_chan;
newchannel->device_obj->device_id = newchannel->device_id;
/*
* Add the new device to the bus. This will kick off device-driver
* binding which eventually invokes the device driver's AddDevice()
* method.
*
* If vmbus_device_register() fails, the 'device_obj' is freed in
* vmbus_device_release() as called by device_unregister() in the
* error path of vmbus_device_register(). In the outside error
* path, there's no need to free it.
*/
ret = vmbus_device_register(newchannel->device_obj);
if (ret != 0) {
pr_err("unable to add child device object (relid %d)\n",
newchannel->offermsg.child_relid);
goto err_deq_chan;
}
newchannel->probe_done = true;
return;
err_deq_chan:
mutex_lock(&vmbus_connection.channel_mutex);
/*
* We need to set the flag, otherwise
* vmbus_onoffer_rescind() can be blocked.
*/
newchannel->probe_done = true;
if (primary_channel == NULL)
list_del(&newchannel->listentry);
else
list_del(&newchannel->sc_list);
/* vmbus_process_offer() has mapped the channel. */
vmbus_channel_unmap_relid(newchannel);
mutex_unlock(&vmbus_connection.channel_mutex);
vmbus_release_relid(newchannel->offermsg.child_relid);
free_channel(newchannel);
}
/*
* vmbus_process_offer - Process the offer by creating a channel/device
* associated with this offer
*/
static void vmbus_process_offer(struct vmbus_channel *newchannel)
{
struct vmbus_channel *channel;
struct workqueue_struct *wq;
bool fnew = true;
/*
* Synchronize vmbus_process_offer() and CPU hotplugging:
*
* CPU1 CPU2
*
* [vmbus_process_offer()] [Hot removal of the CPU]
*
* CPU_READ_LOCK CPUS_WRITE_LOCK
* LOAD cpu_online_mask SEARCH chn_list
* STORE target_cpu LOAD target_cpu
* INSERT chn_list STORE cpu_online_mask
* CPUS_READ_UNLOCK CPUS_WRITE_UNLOCK
*
* Forbids: CPU1's LOAD from *not* seing CPU2's STORE &&
* CPU2's SEARCH from *not* seeing CPU1's INSERT
*
* Forbids: CPU2's SEARCH from seeing CPU1's INSERT &&
* CPU2's LOAD from *not* seing CPU1's STORE
*/
cpus_read_lock();
/*
* Serializes the modifications of the chn_list list as well as
* the accesses to next_numa_node_id in init_vp_index().
*/
mutex_lock(&vmbus_connection.channel_mutex);
list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
if (guid_equal(&channel->offermsg.offer.if_type,
&newchannel->offermsg.offer.if_type) &&
guid_equal(&channel->offermsg.offer.if_instance,
&newchannel->offermsg.offer.if_instance)) {
fnew = false;
newchannel->primary_channel = channel;
break;
}
}
init_vp_index(newchannel);
/* Remember the channels that should be cleaned up upon suspend. */
if (is_hvsock_channel(newchannel) || is_sub_channel(newchannel))
atomic_inc(&vmbus_connection.nr_chan_close_on_suspend);
/*
* Now that we have acquired the channel_mutex,
* we can release the potentially racing rescind thread.
*/
atomic_dec(&vmbus_connection.offer_in_progress);
if (fnew) {
list_add_tail(&newchannel->listentry,
&vmbus_connection.chn_list);
} else {
/*
* Check to see if this is a valid sub-channel.
*/
if (newchannel->offermsg.offer.sub_channel_index == 0) {
mutex_unlock(&vmbus_connection.channel_mutex);
cpus_read_unlock();
/*
* Don't call free_channel(), because newchannel->kobj
* is not initialized yet.
*/
kfree(newchannel);
WARN_ON_ONCE(1);
return;
}
/*
* Process the sub-channel.
*/
list_add_tail(&newchannel->sc_list, &channel->sc_list);
}
vmbus_channel_map_relid(newchannel);
mutex_unlock(&vmbus_connection.channel_mutex);
cpus_read_unlock();
/*
* vmbus_process_offer() mustn't call channel->sc_creation_callback()
* directly for sub-channels, because sc_creation_callback() ->
* vmbus_open() may never get the host's response to the
* OPEN_CHANNEL message (the host may rescind a channel at any time,
* e.g. in the case of hot removing a NIC), and vmbus_onoffer_rescind()
* may not wake up the vmbus_open() as it's blocked due to a non-zero
* vmbus_connection.offer_in_progress, and finally we have a deadlock.
*
* The above is also true for primary channels, if the related device
* drivers use sync probing mode by default.
*
* And, usually the handling of primary channels and sub-channels can
* depend on each other, so we should offload them to different
* workqueues to avoid possible deadlock, e.g. in sync-probing mode,
* NIC1's netvsc_subchan_work() can race with NIC2's netvsc_probe() ->
* rtnl_lock(), and causes deadlock: the former gets the rtnl_lock
* and waits for all the sub-channels to appear, but the latter
* can't get the rtnl_lock and this blocks the handling of
* sub-channels.
*/
INIT_WORK(&newchannel->add_channel_work, vmbus_add_channel_work);
wq = fnew ? vmbus_connection.handle_primary_chan_wq :
vmbus_connection.handle_sub_chan_wq;
queue_work(wq, &newchannel->add_channel_work);
}
/*
* Check if CPUs used by other channels of the same device.
* It should only be called by init_vp_index().
*/
static bool hv_cpuself_used(u32 cpu, struct vmbus_channel *chn)
{
struct vmbus_channel *primary = chn->primary_channel;
struct vmbus_channel *sc;
lockdep_assert_held(&vmbus_connection.channel_mutex);
if (!primary)
return false;
if (primary->target_cpu == cpu)
return true;
list_for_each_entry(sc, &primary->sc_list, sc_list)
if (sc != chn && sc->target_cpu == cpu)
return true;
return false;
}
/*
* We use this state to statically distribute the channel interrupt load.
*/
static int next_numa_node_id;
/*
* We can statically distribute the incoming channel interrupt load
* by binding a channel to VCPU.
*
* For non-performance critical channels we assign the VMBUS_CONNECT_CPU.
* Performance critical channels will be distributed evenly among all
* the available NUMA nodes. Once the node is assigned, we will assign
* the CPU based on a simple round robin scheme.
*/
static void init_vp_index(struct vmbus_channel *channel)
{
bool perf_chn = hv_is_perf_channel(channel);
u32 i, ncpu = num_online_cpus();
cpumask_var_t available_mask;
struct cpumask *allocated_mask;
const struct cpumask *hk_mask = housekeeping_cpumask(HK_TYPE_MANAGED_IRQ);
u32 target_cpu;
int numa_node;
if (!perf_chn ||
!alloc_cpumask_var(&available_mask, GFP_KERNEL) ||
cpumask_empty(hk_mask)) {
/*
* If the channel is not a performance critical
* channel, bind it to VMBUS_CONNECT_CPU.
* In case alloc_cpumask_var() fails, bind it to
* VMBUS_CONNECT_CPU.
* If all the cpus are isolated, bind it to
* VMBUS_CONNECT_CPU.
*/
channel->target_cpu = VMBUS_CONNECT_CPU;
if (perf_chn)
hv_set_allocated_cpu(VMBUS_CONNECT_CPU);
return;
}
for (i = 1; i <= ncpu + 1; i++) {
while (true) {
numa_node = next_numa_node_id++;
if (numa_node == nr_node_ids) {
next_numa_node_id = 0;
continue;
}
if (cpumask_empty(cpumask_of_node(numa_node)))
continue;
break;
}
allocated_mask = &hv_context.hv_numa_map[numa_node];
retry:
cpumask_xor(available_mask, allocated_mask, cpumask_of_node(numa_node));
cpumask_and(available_mask, available_mask, hk_mask);
if (cpumask_empty(available_mask)) {
/*
* We have cycled through all the CPUs in the node;
* reset the allocated map.
*/
cpumask_clear(allocated_mask);
goto retry;
}
target_cpu = cpumask_first(available_mask);
cpumask_set_cpu(target_cpu, allocated_mask);
if (channel->offermsg.offer.sub_channel_index >= ncpu ||
i > ncpu || !hv_cpuself_used(target_cpu, channel))
break;
}
channel->target_cpu = target_cpu;
free_cpumask_var(available_mask);
}
#define UNLOAD_DELAY_UNIT_MS 10 /* 10 milliseconds */
#define UNLOAD_WAIT_MS (100*1000) /* 100 seconds */
#define UNLOAD_WAIT_LOOPS (UNLOAD_WAIT_MS/UNLOAD_DELAY_UNIT_MS)
#define UNLOAD_MSG_MS (5*1000) /* Every 5 seconds */
#define UNLOAD_MSG_LOOPS (UNLOAD_MSG_MS/UNLOAD_DELAY_UNIT_MS)
static void vmbus_wait_for_unload(void)
{
int cpu;
void *page_addr;
struct hv_message *msg;
struct vmbus_channel_message_header *hdr;
u32 message_type, i;
/*
* CHANNELMSG_UNLOAD_RESPONSE is always delivered to the CPU which was
* used for initial contact or to CPU0 depending on host version. When
* we're crashing on a different CPU let's hope that IRQ handler on
* the cpu which receives CHANNELMSG_UNLOAD_RESPONSE is still
* functional and vmbus_unload_response() will complete
* vmbus_connection.unload_event. If not, the last thing we can do is
* read message pages for all CPUs directly.
*
* Wait up to 100 seconds since an Azure host must writeback any dirty
* data in its disk cache before the VMbus UNLOAD request will
* complete. This flushing has been empirically observed to take up
* to 50 seconds in cases with a lot of dirty data, so allow additional
* leeway and for inaccuracies in mdelay(). But eventually time out so
* that the panic path can't get hung forever in case the response
* message isn't seen.
*/
for (i = 1; i <= UNLOAD_WAIT_LOOPS; i++) {
if (completion_done(&vmbus_connection.unload_event))
goto completed;
for_each_present_cpu(cpu) {
struct hv_per_cpu_context *hv_cpu
= per_cpu_ptr(hv_context.cpu_context, cpu);
/*
* In a CoCo VM the synic_message_page is not allocated
* in hv_synic_alloc(). Instead it is set/cleared in
* hv_synic_enable_regs() and hv_synic_disable_regs()
* such that it is set only when the CPU is online. If
* not all present CPUs are online, the message page
* might be NULL, so skip such CPUs.
*/
page_addr = hv_cpu->synic_message_page;
if (!page_addr)
continue;
msg = (struct hv_message *)page_addr
+ VMBUS_MESSAGE_SINT;
message_type = READ_ONCE(msg->header.message_type);
if (message_type == HVMSG_NONE)
continue;
hdr = (struct vmbus_channel_message_header *)
msg->u.payload;
if (hdr->msgtype == CHANNELMSG_UNLOAD_RESPONSE)
complete(&vmbus_connection.unload_event);
vmbus_signal_eom(msg, message_type);
}
/*
* Give a notice periodically so someone watching the
* serial output won't think it is completely hung.
*/
if (!(i % UNLOAD_MSG_LOOPS))
pr_notice("Waiting for VMBus UNLOAD to complete\n");
mdelay(UNLOAD_DELAY_UNIT_MS);
}
pr_err("Continuing even though VMBus UNLOAD did not complete\n");
completed:
/*
* We're crashing and already got the UNLOAD_RESPONSE, cleanup all
* maybe-pending messages on all CPUs to be able to receive new
* messages after we reconnect.
*/
for_each_present_cpu(cpu) {
struct hv_per_cpu_context *hv_cpu
= per_cpu_ptr(hv_context.cpu_context, cpu);
page_addr = hv_cpu->synic_message_page;
if (!page_addr)
continue;
msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT;
msg->header.message_type = HVMSG_NONE;
}
}
/*
* vmbus_unload_response - Handler for the unload response.
*/
static void vmbus_unload_response(struct vmbus_channel_message_header *hdr)
{
/*
* This is a global event; just wakeup the waiting thread.
* Once we successfully unload, we can cleanup the monitor state.
*
* NB. A malicious or compromised Hyper-V could send a spurious
* message of type CHANNELMSG_UNLOAD_RESPONSE, and trigger a call
* of the complete() below. Make sure that unload_event has been
* initialized by the time this complete() is executed.
*/
complete(&vmbus_connection.unload_event);
}
void vmbus_initiate_unload(bool crash)
{
struct vmbus_channel_message_header hdr;
if (xchg(&vmbus_connection.conn_state, DISCONNECTED) == DISCONNECTED)
return;
/* Pre-Win2012R2 hosts don't support reconnect */
if (vmbus_proto_version < VERSION_WIN8_1)
return;
reinit_completion(&vmbus_connection.unload_event);
memset(&hdr, 0, sizeof(struct vmbus_channel_message_header));
hdr.msgtype = CHANNELMSG_UNLOAD;
vmbus_post_msg(&hdr, sizeof(struct vmbus_channel_message_header),
!crash);
/*
* vmbus_initiate_unload() is also called on crash and the crash can be
* happening in an interrupt context, where scheduling is impossible.
*/
if (!crash)
wait_for_completion(&vmbus_connection.unload_event);
else
vmbus_wait_for_unload();
}
static void check_ready_for_resume_event(void)
{
/*
* If all the old primary channels have been fixed up, then it's safe
* to resume.
*/
if (atomic_dec_and_test(&vmbus_connection.nr_chan_fixup_on_resume))
complete(&vmbus_connection.ready_for_resume_event);
}
static void vmbus_setup_channel_state(struct vmbus_channel *channel,
struct vmbus_channel_offer_channel *offer)
{
/*
* Setup state for signalling the host.
*/
channel->sig_event = VMBUS_EVENT_CONNECTION_ID;
channel->is_dedicated_interrupt =
(offer->is_dedicated_interrupt != 0);
channel->sig_event = offer->connection_id;
memcpy(&channel->offermsg, offer,
sizeof(struct vmbus_channel_offer_channel));
channel->monitor_grp = (u8)offer->monitorid / 32;
channel->monitor_bit = (u8)offer->monitorid % 32;
channel->device_id = hv_get_dev_type(channel);
}
/*
* find_primary_channel_by_offer - Get the channel object given the new offer.
* This is only used in the resume path of hibernation.
*/
static struct vmbus_channel *
find_primary_channel_by_offer(const struct vmbus_channel_offer_channel *offer)
{
struct vmbus_channel *channel = NULL, *iter;
const guid_t *inst1, *inst2;
/* Ignore sub-channel offers. */
if (offer->offer.sub_channel_index != 0)
return NULL;
mutex_lock(&vmbus_connection.channel_mutex);
list_for_each_entry(iter, &vmbus_connection.chn_list, listentry) {
inst1 = &iter->offermsg.offer.if_instance;
inst2 = &offer->offer.if_instance;
if (guid_equal(inst1, inst2)) {
channel = iter;
break;
}
}
mutex_unlock(&vmbus_connection.channel_mutex);
return channel;
}
static bool vmbus_is_valid_offer(const struct vmbus_channel_offer_channel *offer)
{
const guid_t *guid = &offer->offer.if_type;
u16 i;
if (!hv_is_isolation_supported())
return true;
if (is_hvsock_offer(offer))
return true;
for (i = 0; i < ARRAY_SIZE(vmbus_devs); i++) {
if (guid_equal(guid, &vmbus_devs[i].guid))
return vmbus_devs[i].allowed_in_isolated;
}
return false;
}
/*
* vmbus_onoffer - Handler for channel offers from vmbus in parent partition.
*
*/
static void vmbus_onoffer(struct vmbus_channel_message_header *hdr)
{
struct vmbus_channel_offer_channel *offer;
struct vmbus_channel *oldchannel, *newchannel;
size_t offer_sz;
offer = (struct vmbus_channel_offer_channel *)hdr;
trace_vmbus_onoffer(offer);
if (!vmbus_is_valid_offer(offer)) {
pr_err_ratelimited("Invalid offer %d from the host supporting isolation\n",
offer->child_relid);
atomic_dec(&vmbus_connection.offer_in_progress);
return;
}
oldchannel = find_primary_channel_by_offer(offer);
if (oldchannel != NULL) {
/*
* We're resuming from hibernation: all the sub-channel and
* hv_sock channels we had before the hibernation should have
* been cleaned up, and now we must be seeing a re-offered
* primary channel that we had before the hibernation.
*/
/*
* { Initially: channel relid = INVALID_RELID,
* channels[valid_relid] = NULL }
*
* CPU1 CPU2
*
* [vmbus_onoffer()] [vmbus_device_release()]
*
* LOCK channel_mutex LOCK channel_mutex
* STORE channel relid = valid_relid LOAD r1 = channel relid
* MAP_RELID channel if (r1 != INVALID_RELID)
* UNLOCK channel_mutex UNMAP_RELID channel
* UNLOCK channel_mutex
*
* Forbids: r1 == valid_relid &&
* channels[valid_relid] == channel
*
* Note. r1 can be INVALID_RELID only for an hv_sock channel.
* None of the hv_sock channels which were present before the
* suspend are re-offered upon the resume. See the WARN_ON()
* in hv_process_channel_removal().
*/
mutex_lock(&vmbus_connection.channel_mutex);
atomic_dec(&vmbus_connection.offer_in_progress);
WARN_ON(oldchannel->offermsg.child_relid != INVALID_RELID);
/* Fix up the relid. */
oldchannel->offermsg.child_relid = offer->child_relid;
offer_sz = sizeof(*offer);
if (memcmp(offer, &oldchannel->offermsg, offer_sz) != 0) {
/*
* This is not an error, since the host can also change
* the other field(s) of the offer, e.g. on WS RS5
* (Build 17763), the offer->connection_id of the
* Mellanox VF vmbus device can change when the host
* reoffers the device upon resume.
*/
pr_debug("vmbus offer changed: relid=%d\n",
offer->child_relid);
print_hex_dump_debug("Old vmbus offer: ",
DUMP_PREFIX_OFFSET, 16, 4,
&oldchannel->offermsg, offer_sz,
false);
print_hex_dump_debug("New vmbus offer: ",
DUMP_PREFIX_OFFSET, 16, 4,
offer, offer_sz, false);
/* Fix up the old channel. */
vmbus_setup_channel_state(oldchannel, offer);
}
/* Add the channel back to the array of channels. */
vmbus_channel_map_relid(oldchannel);
check_ready_for_resume_event();
mutex_unlock(&vmbus_connection.channel_mutex);
return;
}
/* Allocate the channel object and save this offer. */
newchannel = alloc_channel();
if (!newchannel) {
vmbus_release_relid(offer->child_relid);
atomic_dec(&vmbus_connection.offer_in_progress);
pr_err("Unable to allocate channel object\n");
return;
}
vmbus_setup_channel_state(newchannel, offer);
vmbus_process_offer(newchannel);
}
static void check_ready_for_suspend_event(void)
{
/*
* If all the sub-channels or hv_sock channels have been cleaned up,
* then it's safe to suspend.
*/
if (atomic_dec_and_test(&vmbus_connection.nr_chan_close_on_suspend))
complete(&vmbus_connection.ready_for_suspend_event);
}
/*
* vmbus_onoffer_rescind - Rescind offer handler.
*
* We queue a work item to process this offer synchronously
*/
static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
{
struct vmbus_channel_rescind_offer *rescind;
struct vmbus_channel *channel;
struct device *dev;
bool clean_up_chan_for_suspend;
rescind = (struct vmbus_channel_rescind_offer *)hdr;
trace_vmbus_onoffer_rescind(rescind);
/*
* The offer msg and the corresponding rescind msg
* from the host are guranteed to be ordered -
* offer comes in first and then the rescind.
* Since we process these events in work elements,
* and with preemption, we may end up processing
* the events out of order. We rely on the synchronization
* provided by offer_in_progress and by channel_mutex for
* ordering these events:
*
* { Initially: offer_in_progress = 1 }
*
* CPU1 CPU2
*
* [vmbus_onoffer()] [vmbus_onoffer_rescind()]
*
* LOCK channel_mutex WAIT_ON offer_in_progress == 0
* DECREMENT offer_in_progress LOCK channel_mutex
* STORE channels[] LOAD channels[]
* UNLOCK channel_mutex UNLOCK channel_mutex
*
* Forbids: CPU2's LOAD from *not* seeing CPU1's STORE
*/
while (atomic_read(&vmbus_connection.offer_in_progress) != 0) {
/*
* We wait here until any channel offer is currently
* being processed.
*/
msleep(1);
}
mutex_lock(&vmbus_connection.channel_mutex);
channel = relid2channel(rescind->child_relid);
if (channel != NULL) {
/*
* Guarantee that no other instance of vmbus_onoffer_rescind()
* has got a reference to the channel object. Synchronize on
* &vmbus_connection.channel_mutex.
*/
if (channel->rescind_ref) {
mutex_unlock(&vmbus_connection.channel_mutex);
return;
}
channel->rescind_ref = true;
}
mutex_unlock(&vmbus_connection.channel_mutex);
if (channel == NULL) {
/*
* We failed in processing the offer message;
* we would have cleaned up the relid in that
* failure path.
*/
return;
}
clean_up_chan_for_suspend = is_hvsock_channel(channel) ||
is_sub_channel(channel);
/*
* Before setting channel->rescind in vmbus_rescind_cleanup(), we
* should make sure the channel callback is not running any more.
*/
vmbus_reset_channel_cb(channel);
/*
* Now wait for offer handling to complete.
*/
vmbus_rescind_cleanup(channel);
while (READ_ONCE(channel->probe_done) == false) {
/*
* We wait here until any channel offer is currently
* being processed.
*/
msleep(1);
}
/*
* At this point, the rescind handling can proceed safely.
*/
if (channel->device_obj) {
if (channel->chn_rescind_callback) {
channel->chn_rescind_callback(channel);
if (clean_up_chan_for_suspend)
check_ready_for_suspend_event();
return;
}
/*
* We will have to unregister this device from the
* driver core.
*/
dev = get_device(&channel->device_obj->device);
if (dev) {
vmbus_device_unregister(channel->device_obj);
put_device(dev);
}
} else if (channel->primary_channel != NULL) {
/*
* Sub-channel is being rescinded. Following is the channel
* close sequence when initiated from the driveri (refer to
* vmbus_close() for details):
* 1. Close all sub-channels first
* 2. Then close the primary channel.
*/
mutex_lock(&vmbus_connection.channel_mutex);
if (channel->state == CHANNEL_OPEN_STATE) {
/*
* The channel is currently not open;
* it is safe for us to cleanup the channel.
*/
hv_process_channel_removal(channel);
} else {
complete(&channel->rescind_event);
}
mutex_unlock(&vmbus_connection.channel_mutex);
}
/* The "channel" may have been freed. Do not access it any longer. */
if (clean_up_chan_for_suspend)
check_ready_for_suspend_event();
}
void vmbus_hvsock_device_unregister(struct vmbus_channel *channel)
{
BUG_ON(!is_hvsock_channel(channel));
/* We always get a rescind msg when a connection is closed. */
while (!READ_ONCE(channel->probe_done) || !READ_ONCE(channel->rescind))
msleep(1);
vmbus_device_unregister(channel->device_obj);
}
EXPORT_SYMBOL_GPL(vmbus_hvsock_device_unregister);
/*
* vmbus_onoffers_delivered -
* This is invoked when all offers have been delivered.
*
* Nothing to do here.
*/
static void vmbus_onoffers_delivered(
struct vmbus_channel_message_header *hdr)
{
}
/*
* vmbus_onopen_result - Open result handler.
*
* This is invoked when we received a response to our channel open request.
* Find the matching request, copy the response and signal the requesting
* thread.
*/
static void vmbus_onopen_result(struct vmbus_channel_message_header *hdr)
{
struct vmbus_channel_open_result *result;
struct vmbus_channel_msginfo *msginfo;
struct vmbus_channel_message_header *requestheader;
struct vmbus_channel_open_channel *openmsg;
unsigned long flags;
result = (struct vmbus_channel_open_result *)hdr;
trace_vmbus_onopen_result(result);
/*
* Find the open msg, copy the result and signal/unblock the wait event
*/
spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
msglistentry) {
requestheader =
(struct vmbus_channel_message_header *)msginfo->msg;
if (requestheader->msgtype == CHANNELMSG_OPENCHANNEL) {
openmsg =
(struct vmbus_channel_open_channel *)msginfo->msg;
if (openmsg->child_relid == result->child_relid &&
openmsg->openid == result->openid) {
memcpy(&msginfo->response.open_result,
result,
sizeof(
struct vmbus_channel_open_result));
complete(&msginfo->waitevent);
break;
}
}
}
spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
}
/*
* vmbus_ongpadl_created - GPADL created handler.
*
* This is invoked when we received a response to our gpadl create request.
* Find the matching request, copy the response and signal the requesting
* thread.
*/
static void vmbus_ongpadl_created(struct vmbus_channel_message_header *hdr)
{
struct vmbus_channel_gpadl_created *gpadlcreated;
struct vmbus_channel_msginfo *msginfo;
struct vmbus_channel_message_header *requestheader;
struct vmbus_channel_gpadl_header *gpadlheader;
unsigned long flags;
gpadlcreated = (struct vmbus_channel_gpadl_created *)hdr;
trace_vmbus_ongpadl_created(gpadlcreated);
/*
* Find the establish msg, copy the result and signal/unblock the wait
* event
*/
spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
msglistentry) {
requestheader =
(struct vmbus_channel_message_header *)msginfo->msg;
if (requestheader->msgtype == CHANNELMSG_GPADL_HEADER) {
gpadlheader =
(struct vmbus_channel_gpadl_header *)requestheader;
if ((gpadlcreated->child_relid ==
gpadlheader->child_relid) &&
(gpadlcreated->gpadl == gpadlheader->gpadl)) {
memcpy(&msginfo->response.gpadl_created,
gpadlcreated,
sizeof(
struct vmbus_channel_gpadl_created));
complete(&msginfo->waitevent);
break;
}
}
}
spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
}
/*
* vmbus_onmodifychannel_response - Modify Channel response handler.
*
* This is invoked when we received a response to our channel modify request.
* Find the matching request, copy the response and signal the requesting thread.
*/
static void vmbus_onmodifychannel_response(struct vmbus_channel_message_header *hdr)
{
struct vmbus_channel_modifychannel_response *response;
struct vmbus_channel_msginfo *msginfo;
unsigned long flags;
response = (struct vmbus_channel_modifychannel_response *)hdr;
trace_vmbus_onmodifychannel_response(response);
/*
* Find the modify msg, copy the response and signal/unblock the wait event.
*/
spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list, msglistentry) {
struct vmbus_channel_message_header *responseheader =
(struct vmbus_channel_message_header *)msginfo->msg;
if (responseheader->msgtype == CHANNELMSG_MODIFYCHANNEL) {
struct vmbus_channel_modifychannel *modifymsg;
modifymsg = (struct vmbus_channel_modifychannel *)msginfo->msg;
if (modifymsg->child_relid == response->child_relid) {
memcpy(&msginfo->response.modify_response, response,
sizeof(*response));
complete(&msginfo->waitevent);
break;
}
}
}
spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
}
/*
* vmbus_ongpadl_torndown - GPADL torndown handler.
*
* This is invoked when we received a response to our gpadl teardown request.
* Find the matching request, copy the response and signal the requesting
* thread.
*/
static void vmbus_ongpadl_torndown(
struct vmbus_channel_message_header *hdr)
{
struct vmbus_channel_gpadl_torndown *gpadl_torndown;
struct vmbus_channel_msginfo *msginfo;
struct vmbus_channel_message_header *requestheader;
struct vmbus_channel_gpadl_teardown *gpadl_teardown;
unsigned long flags;
gpadl_torndown = (struct vmbus_channel_gpadl_torndown *)hdr;
trace_vmbus_ongpadl_torndown(gpadl_torndown);
/*
* Find the open msg, copy the result and signal/unblock the wait event
*/
spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
msglistentry) {
requestheader =
(struct vmbus_channel_message_header *)msginfo->msg;
if (requestheader->msgtype == CHANNELMSG_GPADL_TEARDOWN) {
gpadl_teardown =
(struct vmbus_channel_gpadl_teardown *)requestheader;
if (gpadl_torndown->gpadl == gpadl_teardown->gpadl) {
memcpy(&msginfo->response.gpadl_torndown,
gpadl_torndown,
sizeof(
struct vmbus_channel_gpadl_torndown));
complete(&msginfo->waitevent);
break;
}
}
}
spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
}
/*
* vmbus_onversion_response - Version response handler
*
* This is invoked when we received a response to our initiate contact request.
* Find the matching request, copy the response and signal the requesting
* thread.
*/
static void vmbus_onversion_response(
struct vmbus_channel_message_header *hdr)
{
struct vmbus_channel_msginfo *msginfo;
struct vmbus_channel_message_header *requestheader;
struct vmbus_channel_version_response *version_response;
unsigned long flags;
version_response = (struct vmbus_channel_version_response *)hdr;
trace_vmbus_onversion_response(version_response);
spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
msglistentry) {
requestheader =
(struct vmbus_channel_message_header *)msginfo->msg;
if (requestheader->msgtype ==
CHANNELMSG_INITIATE_CONTACT) {
memcpy(&msginfo->response.version_response,
version_response,
sizeof(struct vmbus_channel_version_response));
complete(&msginfo->waitevent);
}
}
spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
}
/* Channel message dispatch table */
const struct vmbus_channel_message_table_entry
channel_message_table[CHANNELMSG_COUNT] = {
{ CHANNELMSG_INVALID, 0, NULL, 0},
{ CHANNELMSG_OFFERCHANNEL, 0, vmbus_onoffer,
sizeof(struct vmbus_channel_offer_channel)},
{ CHANNELMSG_RESCIND_CHANNELOFFER, 0, vmbus_onoffer_rescind,
sizeof(struct vmbus_channel_rescind_offer) },
{ CHANNELMSG_REQUESTOFFERS, 0, NULL, 0},
{ CHANNELMSG_ALLOFFERS_DELIVERED, 1, vmbus_onoffers_delivered, 0},
{ CHANNELMSG_OPENCHANNEL, 0, NULL, 0},
{ CHANNELMSG_OPENCHANNEL_RESULT, 1, vmbus_onopen_result,
sizeof(struct vmbus_channel_open_result)},
{ CHANNELMSG_CLOSECHANNEL, 0, NULL, 0},
{ CHANNELMSG_GPADL_HEADER, 0, NULL, 0},
{ CHANNELMSG_GPADL_BODY, 0, NULL, 0},
{ CHANNELMSG_GPADL_CREATED, 1, vmbus_ongpadl_created,
sizeof(struct vmbus_channel_gpadl_created)},
{ CHANNELMSG_GPADL_TEARDOWN, 0, NULL, 0},
{ CHANNELMSG_GPADL_TORNDOWN, 1, vmbus_ongpadl_torndown,
sizeof(struct vmbus_channel_gpadl_torndown) },
{ CHANNELMSG_RELID_RELEASED, 0, NULL, 0},
{ CHANNELMSG_INITIATE_CONTACT, 0, NULL, 0},
{ CHANNELMSG_VERSION_RESPONSE, 1, vmbus_onversion_response,
sizeof(struct vmbus_channel_version_response)},
{ CHANNELMSG_UNLOAD, 0, NULL, 0},
{ CHANNELMSG_UNLOAD_RESPONSE, 1, vmbus_unload_response, 0},
{ CHANNELMSG_18, 0, NULL, 0},
{ CHANNELMSG_19, 0, NULL, 0},
{ CHANNELMSG_20, 0, NULL, 0},
{ CHANNELMSG_TL_CONNECT_REQUEST, 0, NULL, 0},
{ CHANNELMSG_MODIFYCHANNEL, 0, NULL, 0},
{ CHANNELMSG_TL_CONNECT_RESULT, 0, NULL, 0},
{ CHANNELMSG_MODIFYCHANNEL_RESPONSE, 1, vmbus_onmodifychannel_response,
sizeof(struct vmbus_channel_modifychannel_response)},
};
/*
* vmbus_onmessage - Handler for channel protocol messages.
*
* This is invoked in the vmbus worker thread context.
*/
void vmbus_onmessage(struct vmbus_channel_message_header *hdr)
{
trace_vmbus_on_message(hdr);
/*
* vmbus_on_msg_dpc() makes sure the hdr->msgtype here can not go
* out of bound and the message_handler pointer can not be NULL.
*/
channel_message_table[hdr->msgtype].message_handler(hdr);
}
/*
* vmbus_request_offers - Send a request to get all our pending offers.
*/
int vmbus_request_offers(void)
{
struct vmbus_channel_message_header *msg;
struct vmbus_channel_msginfo *msginfo;
int ret;
msginfo = kzalloc(sizeof(*msginfo) +
sizeof(struct vmbus_channel_message_header),
GFP_KERNEL);
if (!msginfo)
return -ENOMEM;
msg = (struct vmbus_channel_message_header *)msginfo->msg;
msg->msgtype = CHANNELMSG_REQUESTOFFERS;
ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_message_header),
true);
trace_vmbus_request_offers(ret);
if (ret != 0) {
pr_err("Unable to request offers - %d\n", ret);
goto cleanup;
}
cleanup:
kfree(msginfo);
return ret;
}
void vmbus_set_sc_create_callback(struct vmbus_channel *primary_channel,
void (*sc_cr_cb)(struct vmbus_channel *new_sc))
{
primary_channel->sc_creation_callback = sc_cr_cb;
}
EXPORT_SYMBOL_GPL(vmbus_set_sc_create_callback);
void vmbus_set_chn_rescind_callback(struct vmbus_channel *channel,
void (*chn_rescind_cb)(struct vmbus_channel *))
{
channel->chn_rescind_callback = chn_rescind_cb;
}
EXPORT_SYMBOL_GPL(vmbus_set_chn_rescind_callback);
|
linux-master
|
drivers/hv/channel_mgmt.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
*
* Copyright (c) 2009, Microsoft Corporation.
*
* Authors:
* Haiyang Zhang <[email protected]>
* Hank Janssen <[email protected]>
* K. Y. Srinivasan <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/hyperv.h>
#include <linux/uio.h>
#include <linux/vmalloc.h>
#include <linux/slab.h>
#include <linux/prefetch.h>
#include <linux/io.h>
#include <asm/mshyperv.h>
#include "hyperv_vmbus.h"
#define VMBUS_PKT_TRAILER 8
/*
* When we write to the ring buffer, check if the host needs to
* be signaled. Here is the details of this protocol:
*
* 1. The host guarantees that while it is draining the
* ring buffer, it will set the interrupt_mask to
* indicate it does not need to be interrupted when
* new data is placed.
*
* 2. The host guarantees that it will completely drain
* the ring buffer before exiting the read loop. Further,
* once the ring buffer is empty, it will clear the
* interrupt_mask and re-check to see if new data has
* arrived.
*
* KYS: Oct. 30, 2016:
* It looks like Windows hosts have logic to deal with DOS attacks that
* can be triggered if it receives interrupts when it is not expecting
* the interrupt. The host expects interrupts only when the ring
* transitions from empty to non-empty (or full to non full on the guest
* to host ring).
* So, base the signaling decision solely on the ring state until the
* host logic is fixed.
*/
static void hv_signal_on_write(u32 old_write, struct vmbus_channel *channel)
{
struct hv_ring_buffer_info *rbi = &channel->outbound;
virt_mb();
if (READ_ONCE(rbi->ring_buffer->interrupt_mask))
return;
/* check interrupt_mask before read_index */
virt_rmb();
/*
* This is the only case we need to signal when the
* ring transitions from being empty to non-empty.
*/
if (old_write == READ_ONCE(rbi->ring_buffer->read_index)) {
++channel->intr_out_empty;
vmbus_setevent(channel);
}
}
/* Get the next write location for the specified ring buffer. */
static inline u32
hv_get_next_write_location(struct hv_ring_buffer_info *ring_info)
{
u32 next = ring_info->ring_buffer->write_index;
return next;
}
/* Set the next write location for the specified ring buffer. */
static inline void
hv_set_next_write_location(struct hv_ring_buffer_info *ring_info,
u32 next_write_location)
{
ring_info->ring_buffer->write_index = next_write_location;
}
/* Get the size of the ring buffer. */
static inline u32
hv_get_ring_buffersize(const struct hv_ring_buffer_info *ring_info)
{
return ring_info->ring_datasize;
}
/* Get the read and write indices as u64 of the specified ring buffer. */
static inline u64
hv_get_ring_bufferindices(struct hv_ring_buffer_info *ring_info)
{
return (u64)ring_info->ring_buffer->write_index << 32;
}
/*
* Helper routine to copy from source to ring buffer.
* Assume there is enough room. Handles wrap-around in dest case only!!
*/
static u32 hv_copyto_ringbuffer(
struct hv_ring_buffer_info *ring_info,
u32 start_write_offset,
const void *src,
u32 srclen)
{
void *ring_buffer = hv_get_ring_buffer(ring_info);
u32 ring_buffer_size = hv_get_ring_buffersize(ring_info);
memcpy(ring_buffer + start_write_offset, src, srclen);
start_write_offset += srclen;
if (start_write_offset >= ring_buffer_size)
start_write_offset -= ring_buffer_size;
return start_write_offset;
}
/*
*
* hv_get_ringbuffer_availbytes()
*
* Get number of bytes available to read and to write to
* for the specified ring buffer
*/
static void
hv_get_ringbuffer_availbytes(const struct hv_ring_buffer_info *rbi,
u32 *read, u32 *write)
{
u32 read_loc, write_loc, dsize;
/* Capture the read/write indices before they changed */
read_loc = READ_ONCE(rbi->ring_buffer->read_index);
write_loc = READ_ONCE(rbi->ring_buffer->write_index);
dsize = rbi->ring_datasize;
*write = write_loc >= read_loc ? dsize - (write_loc - read_loc) :
read_loc - write_loc;
*read = dsize - *write;
}
/* Get various debug metrics for the specified ring buffer. */
int hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
struct hv_ring_buffer_debug_info *debug_info)
{
u32 bytes_avail_towrite;
u32 bytes_avail_toread;
mutex_lock(&ring_info->ring_buffer_mutex);
if (!ring_info->ring_buffer) {
mutex_unlock(&ring_info->ring_buffer_mutex);
return -EINVAL;
}
hv_get_ringbuffer_availbytes(ring_info,
&bytes_avail_toread,
&bytes_avail_towrite);
debug_info->bytes_avail_toread = bytes_avail_toread;
debug_info->bytes_avail_towrite = bytes_avail_towrite;
debug_info->current_read_index = ring_info->ring_buffer->read_index;
debug_info->current_write_index = ring_info->ring_buffer->write_index;
debug_info->current_interrupt_mask
= ring_info->ring_buffer->interrupt_mask;
mutex_unlock(&ring_info->ring_buffer_mutex);
return 0;
}
EXPORT_SYMBOL_GPL(hv_ringbuffer_get_debuginfo);
/* Initialize a channel's ring buffer info mutex locks */
void hv_ringbuffer_pre_init(struct vmbus_channel *channel)
{
mutex_init(&channel->inbound.ring_buffer_mutex);
mutex_init(&channel->outbound.ring_buffer_mutex);
}
/* Initialize the ring buffer. */
int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
struct page *pages, u32 page_cnt, u32 max_pkt_size)
{
struct page **pages_wraparound;
int i;
BUILD_BUG_ON((sizeof(struct hv_ring_buffer) != PAGE_SIZE));
/*
* First page holds struct hv_ring_buffer, do wraparound mapping for
* the rest.
*/
pages_wraparound = kcalloc(page_cnt * 2 - 1,
sizeof(struct page *),
GFP_KERNEL);
if (!pages_wraparound)
return -ENOMEM;
pages_wraparound[0] = pages;
for (i = 0; i < 2 * (page_cnt - 1); i++)
pages_wraparound[i + 1] =
&pages[i % (page_cnt - 1) + 1];
ring_info->ring_buffer = (struct hv_ring_buffer *)
vmap(pages_wraparound, page_cnt * 2 - 1, VM_MAP,
pgprot_decrypted(PAGE_KERNEL));
kfree(pages_wraparound);
if (!ring_info->ring_buffer)
return -ENOMEM;
/*
* Ensure the header page is zero'ed since
* encryption status may have changed.
*/
memset(ring_info->ring_buffer, 0, HV_HYP_PAGE_SIZE);
ring_info->ring_buffer->read_index =
ring_info->ring_buffer->write_index = 0;
/* Set the feature bit for enabling flow control. */
ring_info->ring_buffer->feature_bits.value = 1;
ring_info->ring_size = page_cnt << PAGE_SHIFT;
ring_info->ring_size_div10_reciprocal =
reciprocal_value(ring_info->ring_size / 10);
ring_info->ring_datasize = ring_info->ring_size -
sizeof(struct hv_ring_buffer);
ring_info->priv_read_index = 0;
/* Initialize buffer that holds copies of incoming packets */
if (max_pkt_size) {
ring_info->pkt_buffer = kzalloc(max_pkt_size, GFP_KERNEL);
if (!ring_info->pkt_buffer)
return -ENOMEM;
ring_info->pkt_buffer_size = max_pkt_size;
}
spin_lock_init(&ring_info->ring_lock);
return 0;
}
/* Cleanup the ring buffer. */
void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
{
mutex_lock(&ring_info->ring_buffer_mutex);
vunmap(ring_info->ring_buffer);
ring_info->ring_buffer = NULL;
mutex_unlock(&ring_info->ring_buffer_mutex);
kfree(ring_info->pkt_buffer);
ring_info->pkt_buffer = NULL;
ring_info->pkt_buffer_size = 0;
}
/*
* Check if the ring buffer spinlock is available to take or not; used on
* atomic contexts, like panic path (see the Hyper-V framebuffer driver).
*/
bool hv_ringbuffer_spinlock_busy(struct vmbus_channel *channel)
{
struct hv_ring_buffer_info *rinfo = &channel->outbound;
return spin_is_locked(&rinfo->ring_lock);
}
EXPORT_SYMBOL_GPL(hv_ringbuffer_spinlock_busy);
/* Write to the ring buffer. */
int hv_ringbuffer_write(struct vmbus_channel *channel,
const struct kvec *kv_list, u32 kv_count,
u64 requestid, u64 *trans_id)
{
int i;
u32 bytes_avail_towrite;
u32 totalbytes_towrite = sizeof(u64);
u32 next_write_location;
u32 old_write;
u64 prev_indices;
unsigned long flags;
struct hv_ring_buffer_info *outring_info = &channel->outbound;
struct vmpacket_descriptor *desc = kv_list[0].iov_base;
u64 __trans_id, rqst_id = VMBUS_NO_RQSTOR;
if (channel->rescind)
return -ENODEV;
for (i = 0; i < kv_count; i++)
totalbytes_towrite += kv_list[i].iov_len;
spin_lock_irqsave(&outring_info->ring_lock, flags);
bytes_avail_towrite = hv_get_bytes_to_write(outring_info);
/*
* If there is only room for the packet, assume it is full.
* Otherwise, the next time around, we think the ring buffer
* is empty since the read index == write index.
*/
if (bytes_avail_towrite <= totalbytes_towrite) {
++channel->out_full_total;
if (!channel->out_full_flag) {
++channel->out_full_first;
channel->out_full_flag = true;
}
spin_unlock_irqrestore(&outring_info->ring_lock, flags);
return -EAGAIN;
}
channel->out_full_flag = false;
/* Write to the ring buffer */
next_write_location = hv_get_next_write_location(outring_info);
old_write = next_write_location;
for (i = 0; i < kv_count; i++) {
next_write_location = hv_copyto_ringbuffer(outring_info,
next_write_location,
kv_list[i].iov_base,
kv_list[i].iov_len);
}
/*
* Allocate the request ID after the data has been copied into the
* ring buffer. Once this request ID is allocated, the completion
* path could find the data and free it.
*/
if (desc->flags == VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED) {
if (channel->next_request_id_callback != NULL) {
rqst_id = channel->next_request_id_callback(channel, requestid);
if (rqst_id == VMBUS_RQST_ERROR) {
spin_unlock_irqrestore(&outring_info->ring_lock, flags);
return -EAGAIN;
}
}
}
desc = hv_get_ring_buffer(outring_info) + old_write;
__trans_id = (rqst_id == VMBUS_NO_RQSTOR) ? requestid : rqst_id;
/*
* Ensure the compiler doesn't generate code that reads the value of
* the transaction ID from the ring buffer, which is shared with the
* Hyper-V host and subject to being changed at any time.
*/
WRITE_ONCE(desc->trans_id, __trans_id);
if (trans_id)
*trans_id = __trans_id;
/* Set previous packet start */
prev_indices = hv_get_ring_bufferindices(outring_info);
next_write_location = hv_copyto_ringbuffer(outring_info,
next_write_location,
&prev_indices,
sizeof(u64));
/* Issue a full memory barrier before updating the write index */
virt_mb();
/* Now, update the write location */
hv_set_next_write_location(outring_info, next_write_location);
spin_unlock_irqrestore(&outring_info->ring_lock, flags);
hv_signal_on_write(old_write, channel);
if (channel->rescind) {
if (rqst_id != VMBUS_NO_RQSTOR) {
/* Reclaim request ID to avoid leak of IDs */
if (channel->request_addr_callback != NULL)
channel->request_addr_callback(channel, rqst_id);
}
return -ENODEV;
}
return 0;
}
int hv_ringbuffer_read(struct vmbus_channel *channel,
void *buffer, u32 buflen, u32 *buffer_actual_len,
u64 *requestid, bool raw)
{
struct vmpacket_descriptor *desc;
u32 packetlen, offset;
if (unlikely(buflen == 0))
return -EINVAL;
*buffer_actual_len = 0;
*requestid = 0;
/* Make sure there is something to read */
desc = hv_pkt_iter_first(channel);
if (desc == NULL) {
/*
* No error is set when there is even no header, drivers are
* supposed to analyze buffer_actual_len.
*/
return 0;
}
offset = raw ? 0 : (desc->offset8 << 3);
packetlen = (desc->len8 << 3) - offset;
*buffer_actual_len = packetlen;
*requestid = desc->trans_id;
if (unlikely(packetlen > buflen))
return -ENOBUFS;
/* since ring is double mapped, only one copy is necessary */
memcpy(buffer, (const char *)desc + offset, packetlen);
/* Advance ring index to next packet descriptor */
__hv_pkt_iter_next(channel, desc);
/* Notify host of update */
hv_pkt_iter_close(channel);
return 0;
}
/*
* Determine number of bytes available in ring buffer after
* the current iterator (priv_read_index) location.
*
* This is similar to hv_get_bytes_to_read but with private
* read index instead.
*/
static u32 hv_pkt_iter_avail(const struct hv_ring_buffer_info *rbi)
{
u32 priv_read_loc = rbi->priv_read_index;
u32 write_loc;
/*
* The Hyper-V host writes the packet data, then uses
* store_release() to update the write_index. Use load_acquire()
* here to prevent loads of the packet data from being re-ordered
* before the read of the write_index and potentially getting
* stale data.
*/
write_loc = virt_load_acquire(&rbi->ring_buffer->write_index);
if (write_loc >= priv_read_loc)
return write_loc - priv_read_loc;
else
return (rbi->ring_datasize - priv_read_loc) + write_loc;
}
/*
* Get first vmbus packet from ring buffer after read_index
*
* If ring buffer is empty, returns NULL and no other action needed.
*/
struct vmpacket_descriptor *hv_pkt_iter_first(struct vmbus_channel *channel)
{
struct hv_ring_buffer_info *rbi = &channel->inbound;
struct vmpacket_descriptor *desc, *desc_copy;
u32 bytes_avail, pkt_len, pkt_offset;
hv_debug_delay_test(channel, MESSAGE_DELAY);
bytes_avail = hv_pkt_iter_avail(rbi);
if (bytes_avail < sizeof(struct vmpacket_descriptor))
return NULL;
bytes_avail = min(rbi->pkt_buffer_size, bytes_avail);
desc = (struct vmpacket_descriptor *)(hv_get_ring_buffer(rbi) + rbi->priv_read_index);
/*
* Ensure the compiler does not use references to incoming Hyper-V values (which
* could change at any moment) when reading local variables later in the code
*/
pkt_len = READ_ONCE(desc->len8) << 3;
pkt_offset = READ_ONCE(desc->offset8) << 3;
/*
* If pkt_len is invalid, set it to the smaller of hv_pkt_iter_avail() and
* rbi->pkt_buffer_size
*/
if (pkt_len < sizeof(struct vmpacket_descriptor) || pkt_len > bytes_avail)
pkt_len = bytes_avail;
/*
* If pkt_offset is invalid, arbitrarily set it to
* the size of vmpacket_descriptor
*/
if (pkt_offset < sizeof(struct vmpacket_descriptor) || pkt_offset > pkt_len)
pkt_offset = sizeof(struct vmpacket_descriptor);
/* Copy the Hyper-V packet out of the ring buffer */
desc_copy = (struct vmpacket_descriptor *)rbi->pkt_buffer;
memcpy(desc_copy, desc, pkt_len);
/*
* Hyper-V could still change len8 and offset8 after the earlier read.
* Ensure that desc_copy has legal values for len8 and offset8 that
* are consistent with the copy we just made
*/
desc_copy->len8 = pkt_len >> 3;
desc_copy->offset8 = pkt_offset >> 3;
return desc_copy;
}
EXPORT_SYMBOL_GPL(hv_pkt_iter_first);
/*
* Get next vmbus packet from ring buffer.
*
* Advances the current location (priv_read_index) and checks for more
* data. If the end of the ring buffer is reached, then return NULL.
*/
struct vmpacket_descriptor *
__hv_pkt_iter_next(struct vmbus_channel *channel,
const struct vmpacket_descriptor *desc)
{
struct hv_ring_buffer_info *rbi = &channel->inbound;
u32 packetlen = desc->len8 << 3;
u32 dsize = rbi->ring_datasize;
hv_debug_delay_test(channel, MESSAGE_DELAY);
/* bump offset to next potential packet */
rbi->priv_read_index += packetlen + VMBUS_PKT_TRAILER;
if (rbi->priv_read_index >= dsize)
rbi->priv_read_index -= dsize;
/* more data? */
return hv_pkt_iter_first(channel);
}
EXPORT_SYMBOL_GPL(__hv_pkt_iter_next);
/* How many bytes were read in this iterator cycle */
static u32 hv_pkt_iter_bytes_read(const struct hv_ring_buffer_info *rbi,
u32 start_read_index)
{
if (rbi->priv_read_index >= start_read_index)
return rbi->priv_read_index - start_read_index;
else
return rbi->ring_datasize - start_read_index +
rbi->priv_read_index;
}
/*
* Update host ring buffer after iterating over packets. If the host has
* stopped queuing new entries because it found the ring buffer full, and
* sufficient space is being freed up, signal the host. But be careful to
* only signal the host when necessary, both for performance reasons and
* because Hyper-V protects itself by throttling guests that signal
* inappropriately.
*
* Determining when to signal is tricky. There are three key data inputs
* that must be handled in this order to avoid race conditions:
*
* 1. Update the read_index
* 2. Read the pending_send_sz
* 3. Read the current write_index
*
* The interrupt_mask is not used to determine when to signal. The
* interrupt_mask is used only on the guest->host ring buffer when
* sending requests to the host. The host does not use it on the host->
* guest ring buffer to indicate whether it should be signaled.
*/
void hv_pkt_iter_close(struct vmbus_channel *channel)
{
struct hv_ring_buffer_info *rbi = &channel->inbound;
u32 curr_write_sz, pending_sz, bytes_read, start_read_index;
/*
* Make sure all reads are done before we update the read index since
* the writer may start writing to the read area once the read index
* is updated.
*/
virt_rmb();
start_read_index = rbi->ring_buffer->read_index;
rbi->ring_buffer->read_index = rbi->priv_read_index;
/*
* Older versions of Hyper-V (before WS2102 and Win8) do not
* implement pending_send_sz and simply poll if the host->guest
* ring buffer is full. No signaling is needed or expected.
*/
if (!rbi->ring_buffer->feature_bits.feat_pending_send_sz)
return;
/*
* Issue a full memory barrier before making the signaling decision.
* If reading pending_send_sz were to be reordered and happen
* before we commit the new read_index, a race could occur. If the
* host were to set the pending_send_sz after we have sampled
* pending_send_sz, and the ring buffer blocks before we commit the
* read index, we could miss sending the interrupt. Issue a full
* memory barrier to address this.
*/
virt_mb();
/*
* If the pending_send_sz is zero, then the ring buffer is not
* blocked and there is no need to signal. This is far by the
* most common case, so exit quickly for best performance.
*/
pending_sz = READ_ONCE(rbi->ring_buffer->pending_send_sz);
if (!pending_sz)
return;
/*
* Ensure the read of write_index in hv_get_bytes_to_write()
* happens after the read of pending_send_sz.
*/
virt_rmb();
curr_write_sz = hv_get_bytes_to_write(rbi);
bytes_read = hv_pkt_iter_bytes_read(rbi, start_read_index);
/*
* We want to signal the host only if we're transitioning
* from a "not enough free space" state to a "enough free
* space" state. For example, it's possible that this function
* could run and free up enough space to signal the host, and then
* run again and free up additional space before the host has a
* chance to clear the pending_send_sz. The 2nd invocation would
* be a null transition from "enough free space" to "enough free
* space", which doesn't warrant a signal.
*
* Exactly filling the ring buffer is treated as "not enough
* space". The ring buffer always must have at least one byte
* empty so the empty and full conditions are distinguishable.
* hv_get_bytes_to_write() doesn't fully tell the truth in
* this regard.
*
* So first check if we were in the "enough free space" state
* before we began the iteration. If so, the host was not
* blocked, and there's no need to signal.
*/
if (curr_write_sz - bytes_read > pending_sz)
return;
/*
* Similarly, if the new state is "not enough space", then
* there's no need to signal.
*/
if (curr_write_sz <= pending_sz)
return;
++channel->intr_in_full;
vmbus_setevent(channel);
}
EXPORT_SYMBOL_GPL(hv_pkt_iter_close);
|
linux-master
|
drivers/hv/ring_buffer.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* An implementation of host initiated guest snapshot.
*
* Copyright (C) 2013, Microsoft, Inc.
* Author : K. Y. Srinivasan <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/net.h>
#include <linux/nls.h>
#include <linux/connector.h>
#include <linux/workqueue.h>
#include <linux/hyperv.h>
#include <asm/hyperv-tlfs.h>
#include "hyperv_vmbus.h"
#include "hv_utils_transport.h"
#define VSS_MAJOR 5
#define VSS_MINOR 0
#define VSS_VERSION (VSS_MAJOR << 16 | VSS_MINOR)
#define VSS_VER_COUNT 1
static const int vss_versions[] = {
VSS_VERSION
};
#define FW_VER_COUNT 1
static const int fw_versions[] = {
UTIL_FW_VERSION
};
/* See comment with struct hv_vss_msg regarding the max VMbus packet size */
#define VSS_MAX_PKT_SIZE (HV_HYP_PAGE_SIZE * 2)
/*
* Timeout values are based on expecations from host
*/
#define VSS_FREEZE_TIMEOUT (15 * 60)
/*
* Global state maintained for transaction that is being processed. For a class
* of integration services, including the "VSS service", the specified protocol
* is a "request/response" protocol which means that there can only be single
* outstanding transaction from the host at any given point in time. We use
* this to simplify memory management in this driver - we cache and process
* only one message at a time.
*
* While the request/response protocol is guaranteed by the host, we further
* ensure this by serializing packet processing in this driver - we do not
* read additional packets from the VMBUs until the current packet is fully
* handled.
*/
static struct {
int state; /* hvutil_device_state */
int recv_len; /* number of bytes received. */
struct vmbus_channel *recv_channel; /* chn we got the request */
u64 recv_req_id; /* request ID. */
struct hv_vss_msg *msg; /* current message */
} vss_transaction;
static void vss_respond_to_host(int error);
/*
* This state maintains the version number registered by the daemon.
*/
static int dm_reg_value;
static const char vss_devname[] = "vmbus/hv_vss";
static __u8 *recv_buffer;
static struct hvutil_transport *hvt;
static void vss_timeout_func(struct work_struct *dummy);
static void vss_handle_request(struct work_struct *dummy);
static DECLARE_DELAYED_WORK(vss_timeout_work, vss_timeout_func);
static DECLARE_WORK(vss_handle_request_work, vss_handle_request);
static void vss_poll_wrapper(void *channel)
{
/* Transaction is finished, reset the state here to avoid races. */
vss_transaction.state = HVUTIL_READY;
tasklet_schedule(&((struct vmbus_channel *)channel)->callback_event);
}
/*
* Callback when data is received from user mode.
*/
static void vss_timeout_func(struct work_struct *dummy)
{
/*
* Timeout waiting for userspace component to reply happened.
*/
pr_warn("VSS: timeout waiting for daemon to reply\n");
vss_respond_to_host(HV_E_FAIL);
hv_poll_channel(vss_transaction.recv_channel, vss_poll_wrapper);
}
static void vss_register_done(void)
{
hv_poll_channel(vss_transaction.recv_channel, vss_poll_wrapper);
pr_debug("VSS: userspace daemon registered\n");
}
static int vss_handle_handshake(struct hv_vss_msg *vss_msg)
{
u32 our_ver = VSS_OP_REGISTER1;
switch (vss_msg->vss_hdr.operation) {
case VSS_OP_REGISTER:
/* Daemon doesn't expect us to reply */
dm_reg_value = VSS_OP_REGISTER;
break;
case VSS_OP_REGISTER1:
/* Daemon expects us to reply with our own version */
if (hvutil_transport_send(hvt, &our_ver, sizeof(our_ver),
vss_register_done))
return -EFAULT;
dm_reg_value = VSS_OP_REGISTER1;
break;
default:
return -EINVAL;
}
pr_info("VSS: userspace daemon ver. %d connected\n", dm_reg_value);
return 0;
}
static int vss_on_msg(void *msg, int len)
{
struct hv_vss_msg *vss_msg = (struct hv_vss_msg *)msg;
if (len != sizeof(*vss_msg)) {
pr_debug("VSS: Message size does not match length\n");
return -EINVAL;
}
if (vss_msg->vss_hdr.operation == VSS_OP_REGISTER ||
vss_msg->vss_hdr.operation == VSS_OP_REGISTER1) {
/*
* Don't process registration messages if we're in the middle
* of a transaction processing.
*/
if (vss_transaction.state > HVUTIL_READY) {
pr_debug("VSS: Got unexpected registration request\n");
return -EINVAL;
}
return vss_handle_handshake(vss_msg);
} else if (vss_transaction.state == HVUTIL_USERSPACE_REQ) {
vss_transaction.state = HVUTIL_USERSPACE_RECV;
if (vss_msg->vss_hdr.operation == VSS_OP_HOT_BACKUP)
vss_transaction.msg->vss_cf.flags =
VSS_HBU_NO_AUTO_RECOVERY;
if (cancel_delayed_work_sync(&vss_timeout_work)) {
vss_respond_to_host(vss_msg->error);
/* Transaction is finished, reset the state. */
hv_poll_channel(vss_transaction.recv_channel,
vss_poll_wrapper);
}
} else {
/* This is a spurious call! */
pr_debug("VSS: Transaction not active\n");
return -EINVAL;
}
return 0;
}
static void vss_send_op(void)
{
int op = vss_transaction.msg->vss_hdr.operation;
int rc;
struct hv_vss_msg *vss_msg;
/* The transaction state is wrong. */
if (vss_transaction.state != HVUTIL_HOSTMSG_RECEIVED) {
pr_debug("VSS: Unexpected attempt to send to daemon\n");
return;
}
vss_msg = kzalloc(sizeof(*vss_msg), GFP_KERNEL);
if (!vss_msg)
return;
vss_msg->vss_hdr.operation = op;
vss_transaction.state = HVUTIL_USERSPACE_REQ;
schedule_delayed_work(&vss_timeout_work, op == VSS_OP_FREEZE ?
VSS_FREEZE_TIMEOUT * HZ : HV_UTIL_TIMEOUT * HZ);
rc = hvutil_transport_send(hvt, vss_msg, sizeof(*vss_msg), NULL);
if (rc) {
pr_warn("VSS: failed to communicate to the daemon: %d\n", rc);
if (cancel_delayed_work_sync(&vss_timeout_work)) {
vss_respond_to_host(HV_E_FAIL);
vss_transaction.state = HVUTIL_READY;
}
}
kfree(vss_msg);
}
static void vss_handle_request(struct work_struct *dummy)
{
switch (vss_transaction.msg->vss_hdr.operation) {
/*
* Initiate a "freeze/thaw" operation in the guest.
* We respond to the host once the operation is complete.
*
* We send the message to the user space daemon and the operation is
* performed in the daemon.
*/
case VSS_OP_THAW:
case VSS_OP_FREEZE:
case VSS_OP_HOT_BACKUP:
if (vss_transaction.state < HVUTIL_READY) {
/* Userspace is not registered yet */
pr_debug("VSS: Not ready for request.\n");
vss_respond_to_host(HV_E_FAIL);
return;
}
pr_debug("VSS: Received request for op code: %d\n",
vss_transaction.msg->vss_hdr.operation);
vss_transaction.state = HVUTIL_HOSTMSG_RECEIVED;
vss_send_op();
return;
case VSS_OP_GET_DM_INFO:
vss_transaction.msg->dm_info.flags = 0;
break;
default:
break;
}
vss_respond_to_host(0);
hv_poll_channel(vss_transaction.recv_channel, vss_poll_wrapper);
}
/*
* Send a response back to the host.
*/
static void
vss_respond_to_host(int error)
{
struct icmsg_hdr *icmsghdrp;
u32 buf_len;
struct vmbus_channel *channel;
u64 req_id;
/*
* Copy the global state for completing the transaction. Note that
* only one transaction can be active at a time.
*/
buf_len = vss_transaction.recv_len;
channel = vss_transaction.recv_channel;
req_id = vss_transaction.recv_req_id;
icmsghdrp = (struct icmsg_hdr *)
&recv_buffer[sizeof(struct vmbuspipe_hdr)];
if (channel->onchannel_callback == NULL)
/*
* We have raced with util driver being unloaded;
* silently return.
*/
return;
icmsghdrp->status = error;
icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION | ICMSGHDRFLAG_RESPONSE;
vmbus_sendpacket(channel, recv_buffer, buf_len, req_id,
VM_PKT_DATA_INBAND, 0);
}
/*
* This callback is invoked when we get a VSS message from the host.
* The host ensures that only one VSS transaction can be active at a time.
*/
void hv_vss_onchannelcallback(void *context)
{
struct vmbus_channel *channel = context;
u32 recvlen;
u64 requestid;
struct hv_vss_msg *vss_msg;
int vss_srv_version;
struct icmsg_hdr *icmsghdrp;
if (vss_transaction.state > HVUTIL_READY)
return;
if (vmbus_recvpacket(channel, recv_buffer, VSS_MAX_PKT_SIZE, &recvlen, &requestid)) {
pr_err_ratelimited("VSS request received. Could not read into recv buf\n");
return;
}
if (!recvlen)
return;
/* Ensure recvlen is big enough to read header data */
if (recvlen < ICMSG_HDR) {
pr_err_ratelimited("VSS request received. Packet length too small: %d\n",
recvlen);
return;
}
icmsghdrp = (struct icmsg_hdr *)&recv_buffer[sizeof(struct vmbuspipe_hdr)];
if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
if (vmbus_prep_negotiate_resp(icmsghdrp,
recv_buffer, recvlen,
fw_versions, FW_VER_COUNT,
vss_versions, VSS_VER_COUNT,
NULL, &vss_srv_version)) {
pr_info("VSS IC version %d.%d\n",
vss_srv_version >> 16,
vss_srv_version & 0xFFFF);
}
} else if (icmsghdrp->icmsgtype == ICMSGTYPE_VSS) {
/* Ensure recvlen is big enough to contain hv_vss_msg */
if (recvlen < ICMSG_HDR + sizeof(struct hv_vss_msg)) {
pr_err_ratelimited("Invalid VSS msg. Packet length too small: %u\n",
recvlen);
return;
}
vss_msg = (struct hv_vss_msg *)&recv_buffer[ICMSG_HDR];
/*
* Stash away this global state for completing the
* transaction; note transactions are serialized.
*/
vss_transaction.recv_len = recvlen;
vss_transaction.recv_req_id = requestid;
vss_transaction.msg = (struct hv_vss_msg *)vss_msg;
schedule_work(&vss_handle_request_work);
return;
} else {
pr_err_ratelimited("VSS request received. Invalid msg type: %d\n",
icmsghdrp->icmsgtype);
return;
}
icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION |
ICMSGHDRFLAG_RESPONSE;
vmbus_sendpacket(channel, recv_buffer, recvlen, requestid,
VM_PKT_DATA_INBAND, 0);
}
static void vss_on_reset(void)
{
if (cancel_delayed_work_sync(&vss_timeout_work))
vss_respond_to_host(HV_E_FAIL);
vss_transaction.state = HVUTIL_DEVICE_INIT;
}
int
hv_vss_init(struct hv_util_service *srv)
{
if (vmbus_proto_version < VERSION_WIN8_1) {
pr_warn("Integration service 'Backup (volume snapshot)'"
" not supported on this host version.\n");
return -ENOTSUPP;
}
recv_buffer = srv->recv_buffer;
vss_transaction.recv_channel = srv->channel;
vss_transaction.recv_channel->max_pkt_size = VSS_MAX_PKT_SIZE;
/*
* When this driver loads, the user level daemon that
* processes the host requests may not yet be running.
* Defer processing channel callbacks until the daemon
* has registered.
*/
vss_transaction.state = HVUTIL_DEVICE_INIT;
hvt = hvutil_transport_init(vss_devname, CN_VSS_IDX, CN_VSS_VAL,
vss_on_msg, vss_on_reset);
if (!hvt) {
pr_warn("VSS: Failed to initialize transport\n");
return -EFAULT;
}
return 0;
}
static void hv_vss_cancel_work(void)
{
cancel_delayed_work_sync(&vss_timeout_work);
cancel_work_sync(&vss_handle_request_work);
}
int hv_vss_pre_suspend(void)
{
struct vmbus_channel *channel = vss_transaction.recv_channel;
struct hv_vss_msg *vss_msg;
/*
* Fake a THAW message for the user space daemon in case the daemon
* has frozen the file systems. It doesn't matter if there is already
* a message pending to be delivered to the user space since we force
* vss_transaction.state to be HVUTIL_READY, so the user space daemon's
* write() will fail with EINVAL (see vss_on_msg()), and the daemon
* will reset the device by closing and re-opening it.
*/
vss_msg = kzalloc(sizeof(*vss_msg), GFP_KERNEL);
if (!vss_msg)
return -ENOMEM;
tasklet_disable(&channel->callback_event);
vss_msg->vss_hdr.operation = VSS_OP_THAW;
/* Cancel any possible pending work. */
hv_vss_cancel_work();
/* We don't care about the return value. */
hvutil_transport_send(hvt, vss_msg, sizeof(*vss_msg), NULL);
kfree(vss_msg);
vss_transaction.state = HVUTIL_READY;
/* tasklet_enable() will be called in hv_vss_pre_resume(). */
return 0;
}
int hv_vss_pre_resume(void)
{
struct vmbus_channel *channel = vss_transaction.recv_channel;
tasklet_enable(&channel->callback_event);
return 0;
}
void hv_vss_deinit(void)
{
vss_transaction.state = HVUTIL_DEVICE_DYING;
hv_vss_cancel_work();
hvutil_transport_destroy(hvt);
}
|
linux-master
|
drivers/hv/hv_snapshot.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2009, Microsoft Corporation.
*
* Authors:
* Haiyang Zhang <[email protected]>
* Hank Janssen <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/hyperv.h>
#include <linux/random.h>
#include <linux/clockchips.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <clocksource/hyperv_timer.h>
#include <asm/mshyperv.h>
#include <linux/set_memory.h>
#include "hyperv_vmbus.h"
/* The one and only */
struct hv_context hv_context;
/*
* hv_init - Main initialization routine.
*
* This routine must be called before any other routines in here are called
*/
int hv_init(void)
{
hv_context.cpu_context = alloc_percpu(struct hv_per_cpu_context);
if (!hv_context.cpu_context)
return -ENOMEM;
return 0;
}
/*
* hv_post_message - Post a message using the hypervisor message IPC.
*
* This involves a hypercall.
*/
int hv_post_message(union hv_connection_id connection_id,
enum hv_message_type message_type,
void *payload, size_t payload_size)
{
struct hv_input_post_message *aligned_msg;
unsigned long flags;
u64 status;
if (payload_size > HV_MESSAGE_PAYLOAD_BYTE_COUNT)
return -EMSGSIZE;
local_irq_save(flags);
/*
* A TDX VM with the paravisor must use the decrypted post_msg_page: see
* the comment in struct hv_per_cpu_context. A SNP VM with the paravisor
* can use the encrypted hyperv_pcpu_input_arg because it copies the
* input into the GHCB page, which has been decrypted by the paravisor.
*/
if (hv_isolation_type_tdx() && ms_hyperv.paravisor_present)
aligned_msg = this_cpu_ptr(hv_context.cpu_context)->post_msg_page;
else
aligned_msg = *this_cpu_ptr(hyperv_pcpu_input_arg);
aligned_msg->connectionid = connection_id;
aligned_msg->reserved = 0;
aligned_msg->message_type = message_type;
aligned_msg->payload_size = payload_size;
memcpy((void *)aligned_msg->payload, payload, payload_size);
if (ms_hyperv.paravisor_present) {
if (hv_isolation_type_tdx())
status = hv_tdx_hypercall(HVCALL_POST_MESSAGE,
virt_to_phys(aligned_msg), 0);
else if (hv_isolation_type_snp())
status = hv_ghcb_hypercall(HVCALL_POST_MESSAGE,
aligned_msg, NULL,
sizeof(*aligned_msg));
else
status = HV_STATUS_INVALID_PARAMETER;
} else {
status = hv_do_hypercall(HVCALL_POST_MESSAGE,
aligned_msg, NULL);
}
local_irq_restore(flags);
return hv_result(status);
}
int hv_synic_alloc(void)
{
int cpu, ret = -ENOMEM;
struct hv_per_cpu_context *hv_cpu;
/*
* First, zero all per-cpu memory areas so hv_synic_free() can
* detect what memory has been allocated and cleanup properly
* after any failures.
*/
for_each_present_cpu(cpu) {
hv_cpu = per_cpu_ptr(hv_context.cpu_context, cpu);
memset(hv_cpu, 0, sizeof(*hv_cpu));
}
hv_context.hv_numa_map = kcalloc(nr_node_ids, sizeof(struct cpumask),
GFP_KERNEL);
if (hv_context.hv_numa_map == NULL) {
pr_err("Unable to allocate NUMA map\n");
goto err;
}
for_each_present_cpu(cpu) {
hv_cpu = per_cpu_ptr(hv_context.cpu_context, cpu);
tasklet_init(&hv_cpu->msg_dpc,
vmbus_on_msg_dpc, (unsigned long) hv_cpu);
if (ms_hyperv.paravisor_present && hv_isolation_type_tdx()) {
hv_cpu->post_msg_page = (void *)get_zeroed_page(GFP_ATOMIC);
if (hv_cpu->post_msg_page == NULL) {
pr_err("Unable to allocate post msg page\n");
goto err;
}
ret = set_memory_decrypted((unsigned long)hv_cpu->post_msg_page, 1);
if (ret) {
pr_err("Failed to decrypt post msg page: %d\n", ret);
/* Just leak the page, as it's unsafe to free the page. */
hv_cpu->post_msg_page = NULL;
goto err;
}
memset(hv_cpu->post_msg_page, 0, PAGE_SIZE);
}
/*
* Synic message and event pages are allocated by paravisor.
* Skip these pages allocation here.
*/
if (!ms_hyperv.paravisor_present && !hv_root_partition) {
hv_cpu->synic_message_page =
(void *)get_zeroed_page(GFP_ATOMIC);
if (hv_cpu->synic_message_page == NULL) {
pr_err("Unable to allocate SYNIC message page\n");
goto err;
}
hv_cpu->synic_event_page =
(void *)get_zeroed_page(GFP_ATOMIC);
if (hv_cpu->synic_event_page == NULL) {
pr_err("Unable to allocate SYNIC event page\n");
free_page((unsigned long)hv_cpu->synic_message_page);
hv_cpu->synic_message_page = NULL;
goto err;
}
}
if (!ms_hyperv.paravisor_present &&
(hv_isolation_type_snp() || hv_isolation_type_tdx())) {
ret = set_memory_decrypted((unsigned long)
hv_cpu->synic_message_page, 1);
if (ret) {
pr_err("Failed to decrypt SYNIC msg page: %d\n", ret);
hv_cpu->synic_message_page = NULL;
/*
* Free the event page here so that hv_synic_free()
* won't later try to re-encrypt it.
*/
free_page((unsigned long)hv_cpu->synic_event_page);
hv_cpu->synic_event_page = NULL;
goto err;
}
ret = set_memory_decrypted((unsigned long)
hv_cpu->synic_event_page, 1);
if (ret) {
pr_err("Failed to decrypt SYNIC event page: %d\n", ret);
hv_cpu->synic_event_page = NULL;
goto err;
}
memset(hv_cpu->synic_message_page, 0, PAGE_SIZE);
memset(hv_cpu->synic_event_page, 0, PAGE_SIZE);
}
}
return 0;
err:
/*
* Any memory allocations that succeeded will be freed when
* the caller cleans up by calling hv_synic_free()
*/
return ret;
}
void hv_synic_free(void)
{
int cpu, ret;
for_each_present_cpu(cpu) {
struct hv_per_cpu_context *hv_cpu
= per_cpu_ptr(hv_context.cpu_context, cpu);
/* It's better to leak the page if the encryption fails. */
if (ms_hyperv.paravisor_present && hv_isolation_type_tdx()) {
if (hv_cpu->post_msg_page) {
ret = set_memory_encrypted((unsigned long)
hv_cpu->post_msg_page, 1);
if (ret) {
pr_err("Failed to encrypt post msg page: %d\n", ret);
hv_cpu->post_msg_page = NULL;
}
}
}
if (!ms_hyperv.paravisor_present &&
(hv_isolation_type_snp() || hv_isolation_type_tdx())) {
if (hv_cpu->synic_message_page) {
ret = set_memory_encrypted((unsigned long)
hv_cpu->synic_message_page, 1);
if (ret) {
pr_err("Failed to encrypt SYNIC msg page: %d\n", ret);
hv_cpu->synic_message_page = NULL;
}
}
if (hv_cpu->synic_event_page) {
ret = set_memory_encrypted((unsigned long)
hv_cpu->synic_event_page, 1);
if (ret) {
pr_err("Failed to encrypt SYNIC event page: %d\n", ret);
hv_cpu->synic_event_page = NULL;
}
}
}
free_page((unsigned long)hv_cpu->post_msg_page);
free_page((unsigned long)hv_cpu->synic_event_page);
free_page((unsigned long)hv_cpu->synic_message_page);
}
kfree(hv_context.hv_numa_map);
}
/*
* hv_synic_init - Initialize the Synthetic Interrupt Controller.
*
* If it is already initialized by another entity (ie x2v shim), we need to
* retrieve the initialized message and event pages. Otherwise, we create and
* initialize the message and event pages.
*/
void hv_synic_enable_regs(unsigned int cpu)
{
struct hv_per_cpu_context *hv_cpu
= per_cpu_ptr(hv_context.cpu_context, cpu);
union hv_synic_simp simp;
union hv_synic_siefp siefp;
union hv_synic_sint shared_sint;
union hv_synic_scontrol sctrl;
/* Setup the Synic's message page */
simp.as_uint64 = hv_get_register(HV_REGISTER_SIMP);
simp.simp_enabled = 1;
if (ms_hyperv.paravisor_present || hv_root_partition) {
/* Mask out vTOM bit. ioremap_cache() maps decrypted */
u64 base = (simp.base_simp_gpa << HV_HYP_PAGE_SHIFT) &
~ms_hyperv.shared_gpa_boundary;
hv_cpu->synic_message_page
= (void *)ioremap_cache(base, HV_HYP_PAGE_SIZE);
if (!hv_cpu->synic_message_page)
pr_err("Fail to map synic message page.\n");
} else {
simp.base_simp_gpa = virt_to_phys(hv_cpu->synic_message_page)
>> HV_HYP_PAGE_SHIFT;
}
hv_set_register(HV_REGISTER_SIMP, simp.as_uint64);
/* Setup the Synic's event page */
siefp.as_uint64 = hv_get_register(HV_REGISTER_SIEFP);
siefp.siefp_enabled = 1;
if (ms_hyperv.paravisor_present || hv_root_partition) {
/* Mask out vTOM bit. ioremap_cache() maps decrypted */
u64 base = (siefp.base_siefp_gpa << HV_HYP_PAGE_SHIFT) &
~ms_hyperv.shared_gpa_boundary;
hv_cpu->synic_event_page
= (void *)ioremap_cache(base, HV_HYP_PAGE_SIZE);
if (!hv_cpu->synic_event_page)
pr_err("Fail to map synic event page.\n");
} else {
siefp.base_siefp_gpa = virt_to_phys(hv_cpu->synic_event_page)
>> HV_HYP_PAGE_SHIFT;
}
hv_set_register(HV_REGISTER_SIEFP, siefp.as_uint64);
/* Setup the shared SINT. */
if (vmbus_irq != -1)
enable_percpu_irq(vmbus_irq, 0);
shared_sint.as_uint64 = hv_get_register(HV_REGISTER_SINT0 +
VMBUS_MESSAGE_SINT);
shared_sint.vector = vmbus_interrupt;
shared_sint.masked = false;
/*
* On architectures where Hyper-V doesn't support AEOI (e.g., ARM64),
* it doesn't provide a recommendation flag and AEOI must be disabled.
*/
#ifdef HV_DEPRECATING_AEOI_RECOMMENDED
shared_sint.auto_eoi =
!(ms_hyperv.hints & HV_DEPRECATING_AEOI_RECOMMENDED);
#else
shared_sint.auto_eoi = 0;
#endif
hv_set_register(HV_REGISTER_SINT0 + VMBUS_MESSAGE_SINT,
shared_sint.as_uint64);
/* Enable the global synic bit */
sctrl.as_uint64 = hv_get_register(HV_REGISTER_SCONTROL);
sctrl.enable = 1;
hv_set_register(HV_REGISTER_SCONTROL, sctrl.as_uint64);
}
int hv_synic_init(unsigned int cpu)
{
hv_synic_enable_regs(cpu);
hv_stimer_legacy_init(cpu, VMBUS_MESSAGE_SINT);
return 0;
}
/*
* hv_synic_cleanup - Cleanup routine for hv_synic_init().
*/
void hv_synic_disable_regs(unsigned int cpu)
{
struct hv_per_cpu_context *hv_cpu
= per_cpu_ptr(hv_context.cpu_context, cpu);
union hv_synic_sint shared_sint;
union hv_synic_simp simp;
union hv_synic_siefp siefp;
union hv_synic_scontrol sctrl;
shared_sint.as_uint64 = hv_get_register(HV_REGISTER_SINT0 +
VMBUS_MESSAGE_SINT);
shared_sint.masked = 1;
/* Need to correctly cleanup in the case of SMP!!! */
/* Disable the interrupt */
hv_set_register(HV_REGISTER_SINT0 + VMBUS_MESSAGE_SINT,
shared_sint.as_uint64);
simp.as_uint64 = hv_get_register(HV_REGISTER_SIMP);
/*
* In Isolation VM, sim and sief pages are allocated by
* paravisor. These pages also will be used by kdump
* kernel. So just reset enable bit here and keep page
* addresses.
*/
simp.simp_enabled = 0;
if (ms_hyperv.paravisor_present || hv_root_partition) {
iounmap(hv_cpu->synic_message_page);
hv_cpu->synic_message_page = NULL;
} else {
simp.base_simp_gpa = 0;
}
hv_set_register(HV_REGISTER_SIMP, simp.as_uint64);
siefp.as_uint64 = hv_get_register(HV_REGISTER_SIEFP);
siefp.siefp_enabled = 0;
if (ms_hyperv.paravisor_present || hv_root_partition) {
iounmap(hv_cpu->synic_event_page);
hv_cpu->synic_event_page = NULL;
} else {
siefp.base_siefp_gpa = 0;
}
hv_set_register(HV_REGISTER_SIEFP, siefp.as_uint64);
/* Disable the global synic bit */
sctrl.as_uint64 = hv_get_register(HV_REGISTER_SCONTROL);
sctrl.enable = 0;
hv_set_register(HV_REGISTER_SCONTROL, sctrl.as_uint64);
if (vmbus_irq != -1)
disable_percpu_irq(vmbus_irq);
}
#define HV_MAX_TRIES 3
/*
* Scan the event flags page of 'this' CPU looking for any bit that is set. If we find one
* bit set, then wait for a few milliseconds. Repeat these steps for a maximum of 3 times.
* Return 'true', if there is still any set bit after this operation; 'false', otherwise.
*
* If a bit is set, that means there is a pending channel interrupt. The expectation is
* that the normal interrupt handling mechanism will find and process the channel interrupt
* "very soon", and in the process clear the bit.
*/
static bool hv_synic_event_pending(void)
{
struct hv_per_cpu_context *hv_cpu = this_cpu_ptr(hv_context.cpu_context);
union hv_synic_event_flags *event =
(union hv_synic_event_flags *)hv_cpu->synic_event_page + VMBUS_MESSAGE_SINT;
unsigned long *recv_int_page = event->flags; /* assumes VMBus version >= VERSION_WIN8 */
bool pending;
u32 relid;
int tries = 0;
retry:
pending = false;
for_each_set_bit(relid, recv_int_page, HV_EVENT_FLAGS_COUNT) {
/* Special case - VMBus channel protocol messages */
if (relid == 0)
continue;
pending = true;
break;
}
if (pending && tries++ < HV_MAX_TRIES) {
usleep_range(10000, 20000);
goto retry;
}
return pending;
}
int hv_synic_cleanup(unsigned int cpu)
{
struct vmbus_channel *channel, *sc;
bool channel_found = false;
if (vmbus_connection.conn_state != CONNECTED)
goto always_cleanup;
/*
* Hyper-V does not provide a way to change the connect CPU once
* it is set; we must prevent the connect CPU from going offline
* while the VM is running normally. But in the panic or kexec()
* path where the vmbus is already disconnected, the CPU must be
* allowed to shut down.
*/
if (cpu == VMBUS_CONNECT_CPU)
return -EBUSY;
/*
* Search for channels which are bound to the CPU we're about to
* cleanup. In case we find one and vmbus is still connected, we
* fail; this will effectively prevent CPU offlining.
*
* TODO: Re-bind the channels to different CPUs.
*/
mutex_lock(&vmbus_connection.channel_mutex);
list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
if (channel->target_cpu == cpu) {
channel_found = true;
break;
}
list_for_each_entry(sc, &channel->sc_list, sc_list) {
if (sc->target_cpu == cpu) {
channel_found = true;
break;
}
}
if (channel_found)
break;
}
mutex_unlock(&vmbus_connection.channel_mutex);
if (channel_found)
return -EBUSY;
/*
* channel_found == false means that any channels that were previously
* assigned to the CPU have been reassigned elsewhere with a call of
* vmbus_send_modifychannel(). Scan the event flags page looking for
* bits that are set and waiting with a timeout for vmbus_chan_sched()
* to process such bits. If bits are still set after this operation
* and VMBus is connected, fail the CPU offlining operation.
*/
if (vmbus_proto_version >= VERSION_WIN10_V4_1 && hv_synic_event_pending())
return -EBUSY;
always_cleanup:
hv_stimer_legacy_cleanup(cpu);
hv_synic_disable_regs(cpu);
return 0;
}
|
linux-master
|
drivers/hv/hv.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* An implementation of file copy service.
*
* Copyright (C) 2014, Microsoft, Inc.
*
* Author : K. Y. Srinivasan <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/nls.h>
#include <linux/workqueue.h>
#include <linux/hyperv.h>
#include <linux/sched.h>
#include <asm/hyperv-tlfs.h>
#include "hyperv_vmbus.h"
#include "hv_utils_transport.h"
#define WIN8_SRV_MAJOR 1
#define WIN8_SRV_MINOR 1
#define WIN8_SRV_VERSION (WIN8_SRV_MAJOR << 16 | WIN8_SRV_MINOR)
#define FCOPY_VER_COUNT 1
static const int fcopy_versions[] = {
WIN8_SRV_VERSION
};
#define FW_VER_COUNT 1
static const int fw_versions[] = {
UTIL_FW_VERSION
};
/*
* Global state maintained for transaction that is being processed.
* For a class of integration services, including the "file copy service",
* the specified protocol is a "request/response" protocol which means that
* there can only be single outstanding transaction from the host at any
* given point in time. We use this to simplify memory management in this
* driver - we cache and process only one message at a time.
*
* While the request/response protocol is guaranteed by the host, we further
* ensure this by serializing packet processing in this driver - we do not
* read additional packets from the VMBUs until the current packet is fully
* handled.
*/
static struct {
int state; /* hvutil_device_state */
int recv_len; /* number of bytes received. */
struct hv_fcopy_hdr *fcopy_msg; /* current message */
struct vmbus_channel *recv_channel; /* chn we got the request */
u64 recv_req_id; /* request ID. */
} fcopy_transaction;
static void fcopy_respond_to_host(int error);
static void fcopy_send_data(struct work_struct *dummy);
static void fcopy_timeout_func(struct work_struct *dummy);
static DECLARE_DELAYED_WORK(fcopy_timeout_work, fcopy_timeout_func);
static DECLARE_WORK(fcopy_send_work, fcopy_send_data);
static const char fcopy_devname[] = "vmbus/hv_fcopy";
static u8 *recv_buffer;
static struct hvutil_transport *hvt;
/*
* This state maintains the version number registered by the daemon.
*/
static int dm_reg_value;
static void fcopy_poll_wrapper(void *channel)
{
/* Transaction is finished, reset the state here to avoid races. */
fcopy_transaction.state = HVUTIL_READY;
tasklet_schedule(&((struct vmbus_channel *)channel)->callback_event);
}
static void fcopy_timeout_func(struct work_struct *dummy)
{
/*
* If the timer fires, the user-mode component has not responded;
* process the pending transaction.
*/
fcopy_respond_to_host(HV_E_FAIL);
hv_poll_channel(fcopy_transaction.recv_channel, fcopy_poll_wrapper);
}
static void fcopy_register_done(void)
{
pr_debug("FCP: userspace daemon registered\n");
hv_poll_channel(fcopy_transaction.recv_channel, fcopy_poll_wrapper);
}
static int fcopy_handle_handshake(u32 version)
{
u32 our_ver = FCOPY_CURRENT_VERSION;
switch (version) {
case FCOPY_VERSION_0:
/* Daemon doesn't expect us to reply */
dm_reg_value = version;
break;
case FCOPY_VERSION_1:
/* Daemon expects us to reply with our own version */
if (hvutil_transport_send(hvt, &our_ver, sizeof(our_ver),
fcopy_register_done))
return -EFAULT;
dm_reg_value = version;
break;
default:
/*
* For now we will fail the registration.
* If and when we have multiple versions to
* deal with, we will be backward compatible.
* We will add this code when needed.
*/
return -EINVAL;
}
pr_debug("FCP: userspace daemon ver. %d connected\n", version);
return 0;
}
static void fcopy_send_data(struct work_struct *dummy)
{
struct hv_start_fcopy *smsg_out = NULL;
int operation = fcopy_transaction.fcopy_msg->operation;
struct hv_start_fcopy *smsg_in;
void *out_src;
int rc, out_len;
/*
* The strings sent from the host are encoded in
* utf16; convert it to utf8 strings.
* The host assures us that the utf16 strings will not exceed
* the max lengths specified. We will however, reserve room
* for the string terminating character - in the utf16s_utf8s()
* function we limit the size of the buffer where the converted
* string is placed to W_MAX_PATH -1 to guarantee
* that the strings can be properly terminated!
*/
switch (operation) {
case START_FILE_COPY:
out_len = sizeof(struct hv_start_fcopy);
smsg_out = kzalloc(sizeof(*smsg_out), GFP_KERNEL);
if (!smsg_out)
return;
smsg_out->hdr.operation = operation;
smsg_in = (struct hv_start_fcopy *)fcopy_transaction.fcopy_msg;
utf16s_to_utf8s((wchar_t *)smsg_in->file_name, W_MAX_PATH,
UTF16_LITTLE_ENDIAN,
(__u8 *)&smsg_out->file_name, W_MAX_PATH - 1);
utf16s_to_utf8s((wchar_t *)smsg_in->path_name, W_MAX_PATH,
UTF16_LITTLE_ENDIAN,
(__u8 *)&smsg_out->path_name, W_MAX_PATH - 1);
smsg_out->copy_flags = smsg_in->copy_flags;
smsg_out->file_size = smsg_in->file_size;
out_src = smsg_out;
break;
case WRITE_TO_FILE:
out_src = fcopy_transaction.fcopy_msg;
out_len = sizeof(struct hv_do_fcopy);
break;
default:
out_src = fcopy_transaction.fcopy_msg;
out_len = fcopy_transaction.recv_len;
break;
}
fcopy_transaction.state = HVUTIL_USERSPACE_REQ;
rc = hvutil_transport_send(hvt, out_src, out_len, NULL);
if (rc) {
pr_debug("FCP: failed to communicate to the daemon: %d\n", rc);
if (cancel_delayed_work_sync(&fcopy_timeout_work)) {
fcopy_respond_to_host(HV_E_FAIL);
fcopy_transaction.state = HVUTIL_READY;
}
}
kfree(smsg_out);
}
/*
* Send a response back to the host.
*/
static void
fcopy_respond_to_host(int error)
{
struct icmsg_hdr *icmsghdr;
u32 buf_len;
struct vmbus_channel *channel;
u64 req_id;
/*
* Copy the global state for completing the transaction. Note that
* only one transaction can be active at a time. This is guaranteed
* by the file copy protocol implemented by the host. Furthermore,
* the "transaction active" state we maintain ensures that there can
* only be one active transaction at a time.
*/
buf_len = fcopy_transaction.recv_len;
channel = fcopy_transaction.recv_channel;
req_id = fcopy_transaction.recv_req_id;
icmsghdr = (struct icmsg_hdr *)
&recv_buffer[sizeof(struct vmbuspipe_hdr)];
if (channel->onchannel_callback == NULL)
/*
* We have raced with util driver being unloaded;
* silently return.
*/
return;
icmsghdr->status = error;
icmsghdr->icflags = ICMSGHDRFLAG_TRANSACTION | ICMSGHDRFLAG_RESPONSE;
vmbus_sendpacket(channel, recv_buffer, buf_len, req_id,
VM_PKT_DATA_INBAND, 0);
}
void hv_fcopy_onchannelcallback(void *context)
{
struct vmbus_channel *channel = context;
u32 recvlen;
u64 requestid;
struct hv_fcopy_hdr *fcopy_msg;
struct icmsg_hdr *icmsghdr;
int fcopy_srv_version;
if (fcopy_transaction.state > HVUTIL_READY)
return;
if (vmbus_recvpacket(channel, recv_buffer, HV_HYP_PAGE_SIZE * 2, &recvlen, &requestid)) {
pr_err_ratelimited("Fcopy request received. Could not read into recv buf\n");
return;
}
if (!recvlen)
return;
/* Ensure recvlen is big enough to read header data */
if (recvlen < ICMSG_HDR) {
pr_err_ratelimited("Fcopy request received. Packet length too small: %d\n",
recvlen);
return;
}
icmsghdr = (struct icmsg_hdr *)&recv_buffer[
sizeof(struct vmbuspipe_hdr)];
if (icmsghdr->icmsgtype == ICMSGTYPE_NEGOTIATE) {
if (vmbus_prep_negotiate_resp(icmsghdr,
recv_buffer, recvlen,
fw_versions, FW_VER_COUNT,
fcopy_versions, FCOPY_VER_COUNT,
NULL, &fcopy_srv_version)) {
pr_info("FCopy IC version %d.%d\n",
fcopy_srv_version >> 16,
fcopy_srv_version & 0xFFFF);
}
} else if (icmsghdr->icmsgtype == ICMSGTYPE_FCOPY) {
/* Ensure recvlen is big enough to contain hv_fcopy_hdr */
if (recvlen < ICMSG_HDR + sizeof(struct hv_fcopy_hdr)) {
pr_err_ratelimited("Invalid Fcopy hdr. Packet length too small: %u\n",
recvlen);
return;
}
fcopy_msg = (struct hv_fcopy_hdr *)&recv_buffer[ICMSG_HDR];
/*
* Stash away this global state for completing the
* transaction; note transactions are serialized.
*/
fcopy_transaction.recv_len = recvlen;
fcopy_transaction.recv_req_id = requestid;
fcopy_transaction.fcopy_msg = fcopy_msg;
if (fcopy_transaction.state < HVUTIL_READY) {
/* Userspace is not registered yet */
fcopy_respond_to_host(HV_E_FAIL);
return;
}
fcopy_transaction.state = HVUTIL_HOSTMSG_RECEIVED;
/*
* Send the information to the user-level daemon.
*/
schedule_work(&fcopy_send_work);
schedule_delayed_work(&fcopy_timeout_work,
HV_UTIL_TIMEOUT * HZ);
return;
} else {
pr_err_ratelimited("Fcopy request received. Invalid msg type: %d\n",
icmsghdr->icmsgtype);
return;
}
icmsghdr->icflags = ICMSGHDRFLAG_TRANSACTION | ICMSGHDRFLAG_RESPONSE;
vmbus_sendpacket(channel, recv_buffer, recvlen, requestid,
VM_PKT_DATA_INBAND, 0);
}
/* Callback when data is received from userspace */
static int fcopy_on_msg(void *msg, int len)
{
int *val = (int *)msg;
if (len != sizeof(int))
return -EINVAL;
if (fcopy_transaction.state == HVUTIL_DEVICE_INIT)
return fcopy_handle_handshake(*val);
if (fcopy_transaction.state != HVUTIL_USERSPACE_REQ)
return -EINVAL;
/*
* Complete the transaction by forwarding the result
* to the host. But first, cancel the timeout.
*/
if (cancel_delayed_work_sync(&fcopy_timeout_work)) {
fcopy_transaction.state = HVUTIL_USERSPACE_RECV;
fcopy_respond_to_host(*val);
hv_poll_channel(fcopy_transaction.recv_channel,
fcopy_poll_wrapper);
}
return 0;
}
static void fcopy_on_reset(void)
{
/*
* The daemon has exited; reset the state.
*/
fcopy_transaction.state = HVUTIL_DEVICE_INIT;
if (cancel_delayed_work_sync(&fcopy_timeout_work))
fcopy_respond_to_host(HV_E_FAIL);
}
int hv_fcopy_init(struct hv_util_service *srv)
{
recv_buffer = srv->recv_buffer;
fcopy_transaction.recv_channel = srv->channel;
fcopy_transaction.recv_channel->max_pkt_size = HV_HYP_PAGE_SIZE * 2;
/*
* When this driver loads, the user level daemon that
* processes the host requests may not yet be running.
* Defer processing channel callbacks until the daemon
* has registered.
*/
fcopy_transaction.state = HVUTIL_DEVICE_INIT;
hvt = hvutil_transport_init(fcopy_devname, 0, 0,
fcopy_on_msg, fcopy_on_reset);
if (!hvt)
return -EFAULT;
return 0;
}
static void hv_fcopy_cancel_work(void)
{
cancel_delayed_work_sync(&fcopy_timeout_work);
cancel_work_sync(&fcopy_send_work);
}
int hv_fcopy_pre_suspend(void)
{
struct vmbus_channel *channel = fcopy_transaction.recv_channel;
struct hv_fcopy_hdr *fcopy_msg;
/*
* Fake a CANCEL_FCOPY message for the user space daemon in case the
* daemon is in the middle of copying some file. It doesn't matter if
* there is already a message pending to be delivered to the user
* space since we force fcopy_transaction.state to be HVUTIL_READY, so
* the user space daemon's write() will fail with EINVAL (see
* fcopy_on_msg()), and the daemon will reset the device by closing
* and re-opening it.
*/
fcopy_msg = kzalloc(sizeof(*fcopy_msg), GFP_KERNEL);
if (!fcopy_msg)
return -ENOMEM;
tasklet_disable(&channel->callback_event);
fcopy_msg->operation = CANCEL_FCOPY;
hv_fcopy_cancel_work();
/* We don't care about the return value. */
hvutil_transport_send(hvt, fcopy_msg, sizeof(*fcopy_msg), NULL);
kfree(fcopy_msg);
fcopy_transaction.state = HVUTIL_READY;
/* tasklet_enable() will be called in hv_fcopy_pre_resume(). */
return 0;
}
int hv_fcopy_pre_resume(void)
{
struct vmbus_channel *channel = fcopy_transaction.recv_channel;
tasklet_enable(&channel->callback_event);
return 0;
}
void hv_fcopy_deinit(void)
{
fcopy_transaction.state = HVUTIL_DEVICE_DYING;
hv_fcopy_cancel_work();
hvutil_transport_destroy(hvt);
}
|
linux-master
|
drivers/hv/hv_fcopy.c
|
/*
* An implementation of key value pair (KVP) functionality for Linux.
*
*
* Copyright (C) 2010, Novell, Inc.
* Author : K. Y. Srinivasan <[email protected]>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/net.h>
#include <linux/nls.h>
#include <linux/connector.h>
#include <linux/workqueue.h>
#include <linux/hyperv.h>
#include <asm/hyperv-tlfs.h>
#include "hyperv_vmbus.h"
#include "hv_utils_transport.h"
/*
* Pre win8 version numbers used in ws2008 and ws 2008 r2 (win7)
*/
#define WS2008_SRV_MAJOR 1
#define WS2008_SRV_MINOR 0
#define WS2008_SRV_VERSION (WS2008_SRV_MAJOR << 16 | WS2008_SRV_MINOR)
#define WIN7_SRV_MAJOR 3
#define WIN7_SRV_MINOR 0
#define WIN7_SRV_VERSION (WIN7_SRV_MAJOR << 16 | WIN7_SRV_MINOR)
#define WIN8_SRV_MAJOR 4
#define WIN8_SRV_MINOR 0
#define WIN8_SRV_VERSION (WIN8_SRV_MAJOR << 16 | WIN8_SRV_MINOR)
#define KVP_VER_COUNT 3
static const int kvp_versions[] = {
WIN8_SRV_VERSION,
WIN7_SRV_VERSION,
WS2008_SRV_VERSION
};
#define FW_VER_COUNT 2
static const int fw_versions[] = {
UTIL_FW_VERSION,
UTIL_WS2K8_FW_VERSION
};
/*
* Global state maintained for transaction that is being processed. For a class
* of integration services, including the "KVP service", the specified protocol
* is a "request/response" protocol which means that there can only be single
* outstanding transaction from the host at any given point in time. We use
* this to simplify memory management in this driver - we cache and process
* only one message at a time.
*
* While the request/response protocol is guaranteed by the host, we further
* ensure this by serializing packet processing in this driver - we do not
* read additional packets from the VMBUS until the current packet is fully
* handled.
*/
static struct {
int state; /* hvutil_device_state */
int recv_len; /* number of bytes received. */
struct hv_kvp_msg *kvp_msg; /* current message */
struct vmbus_channel *recv_channel; /* chn we got the request */
u64 recv_req_id; /* request ID. */
} kvp_transaction;
/*
* This state maintains the version number registered by the daemon.
*/
static int dm_reg_value;
static void kvp_send_key(struct work_struct *dummy);
static void kvp_respond_to_host(struct hv_kvp_msg *msg, int error);
static void kvp_timeout_func(struct work_struct *dummy);
static void kvp_host_handshake_func(struct work_struct *dummy);
static void kvp_register(int);
static DECLARE_DELAYED_WORK(kvp_timeout_work, kvp_timeout_func);
static DECLARE_DELAYED_WORK(kvp_host_handshake_work, kvp_host_handshake_func);
static DECLARE_WORK(kvp_sendkey_work, kvp_send_key);
static const char kvp_devname[] = "vmbus/hv_kvp";
static u8 *recv_buffer;
static struct hvutil_transport *hvt;
/*
* Register the kernel component with the user-level daemon.
* As part of this registration, pass the LIC version number.
* This number has no meaning, it satisfies the registration protocol.
*/
#define HV_DRV_VERSION "3.1"
static void kvp_poll_wrapper(void *channel)
{
/* Transaction is finished, reset the state here to avoid races. */
kvp_transaction.state = HVUTIL_READY;
tasklet_schedule(&((struct vmbus_channel *)channel)->callback_event);
}
static void kvp_register_done(void)
{
/*
* If we're still negotiating with the host cancel the timeout
* work to not poll the channel twice.
*/
pr_debug("KVP: userspace daemon registered\n");
cancel_delayed_work_sync(&kvp_host_handshake_work);
hv_poll_channel(kvp_transaction.recv_channel, kvp_poll_wrapper);
}
static void
kvp_register(int reg_value)
{
struct hv_kvp_msg *kvp_msg;
char *version;
kvp_msg = kzalloc(sizeof(*kvp_msg), GFP_KERNEL);
if (kvp_msg) {
version = kvp_msg->body.kvp_register.version;
kvp_msg->kvp_hdr.operation = reg_value;
strcpy(version, HV_DRV_VERSION);
hvutil_transport_send(hvt, kvp_msg, sizeof(*kvp_msg),
kvp_register_done);
kfree(kvp_msg);
}
}
static void kvp_timeout_func(struct work_struct *dummy)
{
/*
* If the timer fires, the user-mode component has not responded;
* process the pending transaction.
*/
kvp_respond_to_host(NULL, HV_E_FAIL);
hv_poll_channel(kvp_transaction.recv_channel, kvp_poll_wrapper);
}
static void kvp_host_handshake_func(struct work_struct *dummy)
{
tasklet_schedule(&kvp_transaction.recv_channel->callback_event);
}
static int kvp_handle_handshake(struct hv_kvp_msg *msg)
{
switch (msg->kvp_hdr.operation) {
case KVP_OP_REGISTER:
dm_reg_value = KVP_OP_REGISTER;
pr_info("KVP: IP injection functionality not available\n");
pr_info("KVP: Upgrade the KVP daemon\n");
break;
case KVP_OP_REGISTER1:
dm_reg_value = KVP_OP_REGISTER1;
break;
default:
pr_info("KVP: incompatible daemon\n");
pr_info("KVP: KVP version: %d, Daemon version: %d\n",
KVP_OP_REGISTER1, msg->kvp_hdr.operation);
return -EINVAL;
}
/*
* We have a compatible daemon; complete the handshake.
*/
pr_debug("KVP: userspace daemon ver. %d connected\n",
msg->kvp_hdr.operation);
kvp_register(dm_reg_value);
return 0;
}
/*
* Callback when data is received from user mode.
*/
static int kvp_on_msg(void *msg, int len)
{
struct hv_kvp_msg *message = (struct hv_kvp_msg *)msg;
struct hv_kvp_msg_enumerate *data;
int error = 0;
if (len < sizeof(*message))
return -EINVAL;
/*
* If we are negotiating the version information
* with the daemon; handle that first.
*/
if (kvp_transaction.state < HVUTIL_READY) {
return kvp_handle_handshake(message);
}
/* We didn't send anything to userspace so the reply is spurious */
if (kvp_transaction.state < HVUTIL_USERSPACE_REQ)
return -EINVAL;
kvp_transaction.state = HVUTIL_USERSPACE_RECV;
/*
* Based on the version of the daemon, we propagate errors from the
* daemon differently.
*/
data = &message->body.kvp_enum_data;
switch (dm_reg_value) {
case KVP_OP_REGISTER:
/*
* Null string is used to pass back error condition.
*/
if (data->data.key[0] == 0)
error = HV_S_CONT;
break;
case KVP_OP_REGISTER1:
/*
* We use the message header information from
* the user level daemon to transmit errors.
*/
error = message->error;
break;
}
/*
* Complete the transaction by forwarding the key value
* to the host. But first, cancel the timeout.
*/
if (cancel_delayed_work_sync(&kvp_timeout_work)) {
kvp_respond_to_host(message, error);
hv_poll_channel(kvp_transaction.recv_channel, kvp_poll_wrapper);
}
return 0;
}
static int process_ob_ipinfo(void *in_msg, void *out_msg, int op)
{
struct hv_kvp_msg *in = in_msg;
struct hv_kvp_ip_msg *out = out_msg;
int len;
switch (op) {
case KVP_OP_GET_IP_INFO:
/*
* Transform all parameters into utf16 encoding.
*/
len = utf8s_to_utf16s((char *)in->body.kvp_ip_val.ip_addr,
strlen((char *)in->body.kvp_ip_val.ip_addr),
UTF16_HOST_ENDIAN,
(wchar_t *)out->kvp_ip_val.ip_addr,
MAX_IP_ADDR_SIZE);
if (len < 0)
return len;
len = utf8s_to_utf16s((char *)in->body.kvp_ip_val.sub_net,
strlen((char *)in->body.kvp_ip_val.sub_net),
UTF16_HOST_ENDIAN,
(wchar_t *)out->kvp_ip_val.sub_net,
MAX_IP_ADDR_SIZE);
if (len < 0)
return len;
len = utf8s_to_utf16s((char *)in->body.kvp_ip_val.gate_way,
strlen((char *)in->body.kvp_ip_val.gate_way),
UTF16_HOST_ENDIAN,
(wchar_t *)out->kvp_ip_val.gate_way,
MAX_GATEWAY_SIZE);
if (len < 0)
return len;
len = utf8s_to_utf16s((char *)in->body.kvp_ip_val.dns_addr,
strlen((char *)in->body.kvp_ip_val.dns_addr),
UTF16_HOST_ENDIAN,
(wchar_t *)out->kvp_ip_val.dns_addr,
MAX_IP_ADDR_SIZE);
if (len < 0)
return len;
len = utf8s_to_utf16s((char *)in->body.kvp_ip_val.adapter_id,
strlen((char *)in->body.kvp_ip_val.adapter_id),
UTF16_HOST_ENDIAN,
(wchar_t *)out->kvp_ip_val.adapter_id,
MAX_ADAPTER_ID_SIZE);
if (len < 0)
return len;
out->kvp_ip_val.dhcp_enabled =
in->body.kvp_ip_val.dhcp_enabled;
out->kvp_ip_val.addr_family =
in->body.kvp_ip_val.addr_family;
}
return 0;
}
static void process_ib_ipinfo(void *in_msg, void *out_msg, int op)
{
struct hv_kvp_ip_msg *in = in_msg;
struct hv_kvp_msg *out = out_msg;
switch (op) {
case KVP_OP_SET_IP_INFO:
/*
* Transform all parameters into utf8 encoding.
*/
utf16s_to_utf8s((wchar_t *)in->kvp_ip_val.ip_addr,
MAX_IP_ADDR_SIZE,
UTF16_LITTLE_ENDIAN,
(__u8 *)out->body.kvp_ip_val.ip_addr,
MAX_IP_ADDR_SIZE);
utf16s_to_utf8s((wchar_t *)in->kvp_ip_val.sub_net,
MAX_IP_ADDR_SIZE,
UTF16_LITTLE_ENDIAN,
(__u8 *)out->body.kvp_ip_val.sub_net,
MAX_IP_ADDR_SIZE);
utf16s_to_utf8s((wchar_t *)in->kvp_ip_val.gate_way,
MAX_GATEWAY_SIZE,
UTF16_LITTLE_ENDIAN,
(__u8 *)out->body.kvp_ip_val.gate_way,
MAX_GATEWAY_SIZE);
utf16s_to_utf8s((wchar_t *)in->kvp_ip_val.dns_addr,
MAX_IP_ADDR_SIZE,
UTF16_LITTLE_ENDIAN,
(__u8 *)out->body.kvp_ip_val.dns_addr,
MAX_IP_ADDR_SIZE);
out->body.kvp_ip_val.dhcp_enabled = in->kvp_ip_val.dhcp_enabled;
fallthrough;
case KVP_OP_GET_IP_INFO:
utf16s_to_utf8s((wchar_t *)in->kvp_ip_val.adapter_id,
MAX_ADAPTER_ID_SIZE,
UTF16_LITTLE_ENDIAN,
(__u8 *)out->body.kvp_ip_val.adapter_id,
MAX_ADAPTER_ID_SIZE);
out->body.kvp_ip_val.addr_family = in->kvp_ip_val.addr_family;
}
}
static void
kvp_send_key(struct work_struct *dummy)
{
struct hv_kvp_msg *message;
struct hv_kvp_msg *in_msg;
__u8 operation = kvp_transaction.kvp_msg->kvp_hdr.operation;
__u8 pool = kvp_transaction.kvp_msg->kvp_hdr.pool;
__u32 val32;
__u64 val64;
int rc;
/* The transaction state is wrong. */
if (kvp_transaction.state != HVUTIL_HOSTMSG_RECEIVED)
return;
message = kzalloc(sizeof(*message), GFP_KERNEL);
if (!message)
return;
message->kvp_hdr.operation = operation;
message->kvp_hdr.pool = pool;
in_msg = kvp_transaction.kvp_msg;
/*
* The key/value strings sent from the host are encoded
* in utf16; convert it to utf8 strings.
* The host assures us that the utf16 strings will not exceed
* the max lengths specified. We will however, reserve room
* for the string terminating character - in the utf16s_utf8s()
* function we limit the size of the buffer where the converted
* string is placed to HV_KVP_EXCHANGE_MAX_*_SIZE -1 to guarantee
* that the strings can be properly terminated!
*/
switch (message->kvp_hdr.operation) {
case KVP_OP_SET_IP_INFO:
process_ib_ipinfo(in_msg, message, KVP_OP_SET_IP_INFO);
break;
case KVP_OP_GET_IP_INFO:
/*
* We only need to pass on the info of operation, adapter_id
* and addr_family to the userland kvp daemon.
*/
process_ib_ipinfo(in_msg, message, KVP_OP_GET_IP_INFO);
break;
case KVP_OP_SET:
switch (in_msg->body.kvp_set.data.value_type) {
case REG_SZ:
/*
* The value is a string - utf16 encoding.
*/
message->body.kvp_set.data.value_size =
utf16s_to_utf8s(
(wchar_t *)in_msg->body.kvp_set.data.value,
in_msg->body.kvp_set.data.value_size,
UTF16_LITTLE_ENDIAN,
message->body.kvp_set.data.value,
HV_KVP_EXCHANGE_MAX_VALUE_SIZE - 1) + 1;
break;
case REG_U32:
/*
* The value is a 32 bit scalar.
* We save this as a utf8 string.
*/
val32 = in_msg->body.kvp_set.data.value_u32;
message->body.kvp_set.data.value_size =
sprintf(message->body.kvp_set.data.value,
"%u", val32) + 1;
break;
case REG_U64:
/*
* The value is a 64 bit scalar.
* We save this as a utf8 string.
*/
val64 = in_msg->body.kvp_set.data.value_u64;
message->body.kvp_set.data.value_size =
sprintf(message->body.kvp_set.data.value,
"%llu", val64) + 1;
break;
}
/*
* The key is always a string - utf16 encoding.
*/
message->body.kvp_set.data.key_size =
utf16s_to_utf8s(
(wchar_t *)in_msg->body.kvp_set.data.key,
in_msg->body.kvp_set.data.key_size,
UTF16_LITTLE_ENDIAN,
message->body.kvp_set.data.key,
HV_KVP_EXCHANGE_MAX_KEY_SIZE - 1) + 1;
break;
case KVP_OP_GET:
message->body.kvp_get.data.key_size =
utf16s_to_utf8s(
(wchar_t *)in_msg->body.kvp_get.data.key,
in_msg->body.kvp_get.data.key_size,
UTF16_LITTLE_ENDIAN,
message->body.kvp_get.data.key,
HV_KVP_EXCHANGE_MAX_KEY_SIZE - 1) + 1;
break;
case KVP_OP_DELETE:
message->body.kvp_delete.key_size =
utf16s_to_utf8s(
(wchar_t *)in_msg->body.kvp_delete.key,
in_msg->body.kvp_delete.key_size,
UTF16_LITTLE_ENDIAN,
message->body.kvp_delete.key,
HV_KVP_EXCHANGE_MAX_KEY_SIZE - 1) + 1;
break;
case KVP_OP_ENUMERATE:
message->body.kvp_enum_data.index =
in_msg->body.kvp_enum_data.index;
break;
}
kvp_transaction.state = HVUTIL_USERSPACE_REQ;
rc = hvutil_transport_send(hvt, message, sizeof(*message), NULL);
if (rc) {
pr_debug("KVP: failed to communicate to the daemon: %d\n", rc);
if (cancel_delayed_work_sync(&kvp_timeout_work)) {
kvp_respond_to_host(message, HV_E_FAIL);
kvp_transaction.state = HVUTIL_READY;
}
}
kfree(message);
}
/*
* Send a response back to the host.
*/
static void
kvp_respond_to_host(struct hv_kvp_msg *msg_to_host, int error)
{
struct hv_kvp_msg *kvp_msg;
struct hv_kvp_exchg_msg_value *kvp_data;
char *key_name;
char *value;
struct icmsg_hdr *icmsghdrp;
int keylen = 0;
int valuelen = 0;
u32 buf_len;
struct vmbus_channel *channel;
u64 req_id;
int ret;
/*
* Copy the global state for completing the transaction. Note that
* only one transaction can be active at a time.
*/
buf_len = kvp_transaction.recv_len;
channel = kvp_transaction.recv_channel;
req_id = kvp_transaction.recv_req_id;
icmsghdrp = (struct icmsg_hdr *)
&recv_buffer[sizeof(struct vmbuspipe_hdr)];
if (channel->onchannel_callback == NULL)
/*
* We have raced with util driver being unloaded;
* silently return.
*/
return;
icmsghdrp->status = error;
/*
* If the error parameter is set, terminate the host's enumeration
* on this pool.
*/
if (error) {
/*
* Something failed or we have timed out;
* terminate the current host-side iteration.
*/
goto response_done;
}
kvp_msg = (struct hv_kvp_msg *)
&recv_buffer[sizeof(struct vmbuspipe_hdr) +
sizeof(struct icmsg_hdr)];
switch (kvp_transaction.kvp_msg->kvp_hdr.operation) {
case KVP_OP_GET_IP_INFO:
ret = process_ob_ipinfo(msg_to_host,
(struct hv_kvp_ip_msg *)kvp_msg,
KVP_OP_GET_IP_INFO);
if (ret < 0)
icmsghdrp->status = HV_E_FAIL;
goto response_done;
case KVP_OP_SET_IP_INFO:
goto response_done;
case KVP_OP_GET:
kvp_data = &kvp_msg->body.kvp_get.data;
goto copy_value;
case KVP_OP_SET:
case KVP_OP_DELETE:
goto response_done;
default:
break;
}
kvp_data = &kvp_msg->body.kvp_enum_data.data;
key_name = msg_to_host->body.kvp_enum_data.data.key;
/*
* The windows host expects the key/value pair to be encoded
* in utf16. Ensure that the key/value size reported to the host
* will be less than or equal to the MAX size (including the
* terminating character).
*/
keylen = utf8s_to_utf16s(key_name, strlen(key_name), UTF16_HOST_ENDIAN,
(wchar_t *) kvp_data->key,
(HV_KVP_EXCHANGE_MAX_KEY_SIZE / 2) - 2);
kvp_data->key_size = 2*(keylen + 1); /* utf16 encoding */
copy_value:
value = msg_to_host->body.kvp_enum_data.data.value;
valuelen = utf8s_to_utf16s(value, strlen(value), UTF16_HOST_ENDIAN,
(wchar_t *) kvp_data->value,
(HV_KVP_EXCHANGE_MAX_VALUE_SIZE / 2) - 2);
kvp_data->value_size = 2*(valuelen + 1); /* utf16 encoding */
/*
* If the utf8s to utf16s conversion failed; notify host
* of the error.
*/
if ((keylen < 0) || (valuelen < 0))
icmsghdrp->status = HV_E_FAIL;
kvp_data->value_type = REG_SZ; /* all our values are strings */
response_done:
icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION | ICMSGHDRFLAG_RESPONSE;
vmbus_sendpacket(channel, recv_buffer, buf_len, req_id,
VM_PKT_DATA_INBAND, 0);
}
/*
* This callback is invoked when we get a KVP message from the host.
* The host ensures that only one KVP transaction can be active at a time.
* KVP implementation in Linux needs to forward the key to a user-mde
* component to retrieve the corresponding value. Consequently, we cannot
* respond to the host in the context of this callback. Since the host
* guarantees that at most only one transaction can be active at a time,
* we stash away the transaction state in a set of global variables.
*/
void hv_kvp_onchannelcallback(void *context)
{
struct vmbus_channel *channel = context;
u32 recvlen;
u64 requestid;
struct hv_kvp_msg *kvp_msg;
struct icmsg_hdr *icmsghdrp;
int kvp_srv_version;
static enum {NEGO_NOT_STARTED,
NEGO_IN_PROGRESS,
NEGO_FINISHED} host_negotiatied = NEGO_NOT_STARTED;
if (kvp_transaction.state < HVUTIL_READY) {
/*
* If userspace daemon is not connected and host is asking
* us to negotiate we need to delay to not lose messages.
* This is important for Failover IP setting.
*/
if (host_negotiatied == NEGO_NOT_STARTED) {
host_negotiatied = NEGO_IN_PROGRESS;
schedule_delayed_work(&kvp_host_handshake_work,
HV_UTIL_NEGO_TIMEOUT * HZ);
}
return;
}
if (kvp_transaction.state > HVUTIL_READY)
return;
if (vmbus_recvpacket(channel, recv_buffer, HV_HYP_PAGE_SIZE * 4, &recvlen, &requestid)) {
pr_err_ratelimited("KVP request received. Could not read into recv buf\n");
return;
}
if (!recvlen)
return;
/* Ensure recvlen is big enough to read header data */
if (recvlen < ICMSG_HDR) {
pr_err_ratelimited("KVP request received. Packet length too small: %d\n",
recvlen);
return;
}
icmsghdrp = (struct icmsg_hdr *)&recv_buffer[sizeof(struct vmbuspipe_hdr)];
if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
if (vmbus_prep_negotiate_resp(icmsghdrp,
recv_buffer, recvlen,
fw_versions, FW_VER_COUNT,
kvp_versions, KVP_VER_COUNT,
NULL, &kvp_srv_version)) {
pr_info("KVP IC version %d.%d\n",
kvp_srv_version >> 16,
kvp_srv_version & 0xFFFF);
}
} else if (icmsghdrp->icmsgtype == ICMSGTYPE_KVPEXCHANGE) {
/*
* recvlen is not checked against sizeof(struct kvp_msg) because kvp_msg contains
* a union of structs and the msg type received is not known. Code using this
* struct should provide validation when accessing its fields.
*/
kvp_msg = (struct hv_kvp_msg *)&recv_buffer[ICMSG_HDR];
/*
* Stash away this global state for completing the
* transaction; note transactions are serialized.
*/
kvp_transaction.recv_len = recvlen;
kvp_transaction.recv_req_id = requestid;
kvp_transaction.kvp_msg = kvp_msg;
if (kvp_transaction.state < HVUTIL_READY) {
/* Userspace is not registered yet */
kvp_respond_to_host(NULL, HV_E_FAIL);
return;
}
kvp_transaction.state = HVUTIL_HOSTMSG_RECEIVED;
/*
* Get the information from the
* user-mode component.
* component. This transaction will be
* completed when we get the value from
* the user-mode component.
* Set a timeout to deal with
* user-mode not responding.
*/
schedule_work(&kvp_sendkey_work);
schedule_delayed_work(&kvp_timeout_work,
HV_UTIL_TIMEOUT * HZ);
return;
} else {
pr_err_ratelimited("KVP request received. Invalid msg type: %d\n",
icmsghdrp->icmsgtype);
return;
}
icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION
| ICMSGHDRFLAG_RESPONSE;
vmbus_sendpacket(channel, recv_buffer,
recvlen, requestid,
VM_PKT_DATA_INBAND, 0);
host_negotiatied = NEGO_FINISHED;
hv_poll_channel(kvp_transaction.recv_channel, kvp_poll_wrapper);
}
static void kvp_on_reset(void)
{
if (cancel_delayed_work_sync(&kvp_timeout_work))
kvp_respond_to_host(NULL, HV_E_FAIL);
kvp_transaction.state = HVUTIL_DEVICE_INIT;
}
int
hv_kvp_init(struct hv_util_service *srv)
{
recv_buffer = srv->recv_buffer;
kvp_transaction.recv_channel = srv->channel;
kvp_transaction.recv_channel->max_pkt_size = HV_HYP_PAGE_SIZE * 4;
/*
* When this driver loads, the user level daemon that
* processes the host requests may not yet be running.
* Defer processing channel callbacks until the daemon
* has registered.
*/
kvp_transaction.state = HVUTIL_DEVICE_INIT;
hvt = hvutil_transport_init(kvp_devname, CN_KVP_IDX, CN_KVP_VAL,
kvp_on_msg, kvp_on_reset);
if (!hvt)
return -EFAULT;
return 0;
}
static void hv_kvp_cancel_work(void)
{
cancel_delayed_work_sync(&kvp_host_handshake_work);
cancel_delayed_work_sync(&kvp_timeout_work);
cancel_work_sync(&kvp_sendkey_work);
}
int hv_kvp_pre_suspend(void)
{
struct vmbus_channel *channel = kvp_transaction.recv_channel;
tasklet_disable(&channel->callback_event);
/*
* If there is a pending transtion, it's unnecessary to tell the host
* that the transaction will fail, because that is implied when
* util_suspend() calls vmbus_close() later.
*/
hv_kvp_cancel_work();
/*
* Forece the state to READY to handle the ICMSGTYPE_NEGOTIATE message
* later. The user space daemon may go out of order and its write()
* may fail with EINVAL: this doesn't matter since the daemon will
* reset the device by closing and re-opening it.
*/
kvp_transaction.state = HVUTIL_READY;
return 0;
}
int hv_kvp_pre_resume(void)
{
struct vmbus_channel *channel = kvp_transaction.recv_channel;
tasklet_enable(&channel->callback_event);
return 0;
}
void hv_kvp_deinit(void)
{
kvp_transaction.state = HVUTIL_DEVICE_DYING;
hv_kvp_cancel_work();
hvutil_transport_destroy(hvt);
}
|
linux-master
|
drivers/hv/hv_kvp.c
|
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2020 Facebook */
#include <linux/bits.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/debugfs.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/serial_8250.h>
#include <linux/clkdev.h>
#include <linux/clk-provider.h>
#include <linux/platform_device.h>
#include <linux/platform_data/i2c-xiic.h>
#include <linux/platform_data/i2c-ocores.h>
#include <linux/ptp_clock_kernel.h>
#include <linux/spi/spi.h>
#include <linux/spi/xilinx_spi.h>
#include <linux/spi/altera.h>
#include <net/devlink.h>
#include <linux/i2c.h>
#include <linux/mtd/mtd.h>
#include <linux/nvmem-consumer.h>
#include <linux/crc16.h>
#define PCI_VENDOR_ID_FACEBOOK 0x1d9b
#define PCI_DEVICE_ID_FACEBOOK_TIMECARD 0x0400
#define PCI_VENDOR_ID_CELESTICA 0x18d4
#define PCI_DEVICE_ID_CELESTICA_TIMECARD 0x1008
#define PCI_VENDOR_ID_OROLIA 0x1ad7
#define PCI_DEVICE_ID_OROLIA_ARTCARD 0xa000
static struct class timecard_class = {
.name = "timecard",
};
struct ocp_reg {
u32 ctrl;
u32 status;
u32 select;
u32 version;
u32 time_ns;
u32 time_sec;
u32 __pad0[2];
u32 adjust_ns;
u32 adjust_sec;
u32 __pad1[2];
u32 offset_ns;
u32 offset_window_ns;
u32 __pad2[2];
u32 drift_ns;
u32 drift_window_ns;
u32 __pad3[6];
u32 servo_offset_p;
u32 servo_offset_i;
u32 servo_drift_p;
u32 servo_drift_i;
u32 status_offset;
u32 status_drift;
};
#define OCP_CTRL_ENABLE BIT(0)
#define OCP_CTRL_ADJUST_TIME BIT(1)
#define OCP_CTRL_ADJUST_OFFSET BIT(2)
#define OCP_CTRL_ADJUST_DRIFT BIT(3)
#define OCP_CTRL_ADJUST_SERVO BIT(8)
#define OCP_CTRL_READ_TIME_REQ BIT(30)
#define OCP_CTRL_READ_TIME_DONE BIT(31)
#define OCP_STATUS_IN_SYNC BIT(0)
#define OCP_STATUS_IN_HOLDOVER BIT(1)
#define OCP_SELECT_CLK_NONE 0
#define OCP_SELECT_CLK_REG 0xfe
struct tod_reg {
u32 ctrl;
u32 status;
u32 uart_polarity;
u32 version;
u32 adj_sec;
u32 __pad0[3];
u32 uart_baud;
u32 __pad1[3];
u32 utc_status;
u32 leap;
};
#define TOD_CTRL_PROTOCOL BIT(28)
#define TOD_CTRL_DISABLE_FMT_A BIT(17)
#define TOD_CTRL_DISABLE_FMT_B BIT(16)
#define TOD_CTRL_ENABLE BIT(0)
#define TOD_CTRL_GNSS_MASK GENMASK(3, 0)
#define TOD_CTRL_GNSS_SHIFT 24
#define TOD_STATUS_UTC_MASK GENMASK(7, 0)
#define TOD_STATUS_UTC_VALID BIT(8)
#define TOD_STATUS_LEAP_ANNOUNCE BIT(12)
#define TOD_STATUS_LEAP_VALID BIT(16)
struct ts_reg {
u32 enable;
u32 error;
u32 polarity;
u32 version;
u32 __pad0[4];
u32 cable_delay;
u32 __pad1[3];
u32 intr;
u32 intr_mask;
u32 event_count;
u32 __pad2[1];
u32 ts_count;
u32 time_ns;
u32 time_sec;
u32 data_width;
u32 data;
};
struct pps_reg {
u32 ctrl;
u32 status;
u32 __pad0[6];
u32 cable_delay;
};
#define PPS_STATUS_FILTER_ERR BIT(0)
#define PPS_STATUS_SUPERV_ERR BIT(1)
struct img_reg {
u32 version;
};
struct gpio_reg {
u32 gpio1;
u32 __pad0;
u32 gpio2;
u32 __pad1;
};
struct irig_master_reg {
u32 ctrl;
u32 status;
u32 __pad0;
u32 version;
u32 adj_sec;
u32 mode_ctrl;
};
#define IRIG_M_CTRL_ENABLE BIT(0)
struct irig_slave_reg {
u32 ctrl;
u32 status;
u32 __pad0;
u32 version;
u32 adj_sec;
u32 mode_ctrl;
};
#define IRIG_S_CTRL_ENABLE BIT(0)
struct dcf_master_reg {
u32 ctrl;
u32 status;
u32 __pad0;
u32 version;
u32 adj_sec;
};
#define DCF_M_CTRL_ENABLE BIT(0)
struct dcf_slave_reg {
u32 ctrl;
u32 status;
u32 __pad0;
u32 version;
u32 adj_sec;
};
#define DCF_S_CTRL_ENABLE BIT(0)
struct signal_reg {
u32 enable;
u32 status;
u32 polarity;
u32 version;
u32 __pad0[4];
u32 cable_delay;
u32 __pad1[3];
u32 intr;
u32 intr_mask;
u32 __pad2[2];
u32 start_ns;
u32 start_sec;
u32 pulse_ns;
u32 pulse_sec;
u32 period_ns;
u32 period_sec;
u32 repeat_count;
};
struct frequency_reg {
u32 ctrl;
u32 status;
};
struct board_config_reg {
u32 mro50_serial_activate;
};
#define FREQ_STATUS_VALID BIT(31)
#define FREQ_STATUS_ERROR BIT(30)
#define FREQ_STATUS_OVERRUN BIT(29)
#define FREQ_STATUS_MASK GENMASK(23, 0)
struct ptp_ocp_flash_info {
const char *name;
int pci_offset;
int data_size;
void *data;
};
struct ptp_ocp_firmware_header {
char magic[4];
__be16 pci_vendor_id;
__be16 pci_device_id;
__be32 image_size;
__be16 hw_revision;
__be16 crc;
};
#define OCP_FIRMWARE_MAGIC_HEADER "OCPC"
struct ptp_ocp_i2c_info {
const char *name;
unsigned long fixed_rate;
size_t data_size;
void *data;
};
struct ptp_ocp_ext_info {
int index;
irqreturn_t (*irq_fcn)(int irq, void *priv);
int (*enable)(void *priv, u32 req, bool enable);
};
struct ptp_ocp_ext_src {
void __iomem *mem;
struct ptp_ocp *bp;
struct ptp_ocp_ext_info *info;
int irq_vec;
};
enum ptp_ocp_sma_mode {
SMA_MODE_IN,
SMA_MODE_OUT,
};
struct ptp_ocp_sma_connector {
enum ptp_ocp_sma_mode mode;
bool fixed_fcn;
bool fixed_dir;
bool disabled;
u8 default_fcn;
};
struct ocp_attr_group {
u64 cap;
const struct attribute_group *group;
};
#define OCP_CAP_BASIC BIT(0)
#define OCP_CAP_SIGNAL BIT(1)
#define OCP_CAP_FREQ BIT(2)
struct ptp_ocp_signal {
ktime_t period;
ktime_t pulse;
ktime_t phase;
ktime_t start;
int duty;
bool polarity;
bool running;
};
struct ptp_ocp_serial_port {
int line;
int baud;
};
#define OCP_BOARD_ID_LEN 13
#define OCP_SERIAL_LEN 6
struct ptp_ocp {
struct pci_dev *pdev;
struct device dev;
spinlock_t lock;
struct ocp_reg __iomem *reg;
struct tod_reg __iomem *tod;
struct pps_reg __iomem *pps_to_ext;
struct pps_reg __iomem *pps_to_clk;
struct board_config_reg __iomem *board_config;
struct gpio_reg __iomem *pps_select;
struct gpio_reg __iomem *sma_map1;
struct gpio_reg __iomem *sma_map2;
struct irig_master_reg __iomem *irig_out;
struct irig_slave_reg __iomem *irig_in;
struct dcf_master_reg __iomem *dcf_out;
struct dcf_slave_reg __iomem *dcf_in;
struct tod_reg __iomem *nmea_out;
struct frequency_reg __iomem *freq_in[4];
struct ptp_ocp_ext_src *signal_out[4];
struct ptp_ocp_ext_src *pps;
struct ptp_ocp_ext_src *ts0;
struct ptp_ocp_ext_src *ts1;
struct ptp_ocp_ext_src *ts2;
struct ptp_ocp_ext_src *ts3;
struct ptp_ocp_ext_src *ts4;
struct ocp_art_gpio_reg __iomem *art_sma;
struct img_reg __iomem *image;
struct ptp_clock *ptp;
struct ptp_clock_info ptp_info;
struct platform_device *i2c_ctrl;
struct platform_device *spi_flash;
struct clk_hw *i2c_clk;
struct timer_list watchdog;
const struct attribute_group **attr_group;
const struct ptp_ocp_eeprom_map *eeprom_map;
struct dentry *debug_root;
time64_t gnss_lost;
int id;
int n_irqs;
struct ptp_ocp_serial_port gnss_port;
struct ptp_ocp_serial_port gnss2_port;
struct ptp_ocp_serial_port mac_port; /* miniature atomic clock */
struct ptp_ocp_serial_port nmea_port;
bool fw_loader;
u8 fw_tag;
u16 fw_version;
u8 board_id[OCP_BOARD_ID_LEN];
u8 serial[OCP_SERIAL_LEN];
bool has_eeprom_data;
u32 pps_req_map;
int flash_start;
u32 utc_tai_offset;
u32 ts_window_adjust;
u64 fw_cap;
struct ptp_ocp_signal signal[4];
struct ptp_ocp_sma_connector sma[4];
const struct ocp_sma_op *sma_op;
};
#define OCP_REQ_TIMESTAMP BIT(0)
#define OCP_REQ_PPS BIT(1)
struct ocp_resource {
unsigned long offset;
int size;
int irq_vec;
int (*setup)(struct ptp_ocp *bp, struct ocp_resource *r);
void *extra;
unsigned long bp_offset;
const char * const name;
};
static int ptp_ocp_register_mem(struct ptp_ocp *bp, struct ocp_resource *r);
static int ptp_ocp_register_i2c(struct ptp_ocp *bp, struct ocp_resource *r);
static int ptp_ocp_register_spi(struct ptp_ocp *bp, struct ocp_resource *r);
static int ptp_ocp_register_serial(struct ptp_ocp *bp, struct ocp_resource *r);
static int ptp_ocp_register_ext(struct ptp_ocp *bp, struct ocp_resource *r);
static int ptp_ocp_fb_board_init(struct ptp_ocp *bp, struct ocp_resource *r);
static irqreturn_t ptp_ocp_ts_irq(int irq, void *priv);
static irqreturn_t ptp_ocp_signal_irq(int irq, void *priv);
static int ptp_ocp_ts_enable(void *priv, u32 req, bool enable);
static int ptp_ocp_signal_from_perout(struct ptp_ocp *bp, int gen,
struct ptp_perout_request *req);
static int ptp_ocp_signal_enable(void *priv, u32 req, bool enable);
static int ptp_ocp_sma_store(struct ptp_ocp *bp, const char *buf, int sma_nr);
static int ptp_ocp_art_board_init(struct ptp_ocp *bp, struct ocp_resource *r);
static const struct ocp_attr_group fb_timecard_groups[];
static const struct ocp_attr_group art_timecard_groups[];
struct ptp_ocp_eeprom_map {
u16 off;
u16 len;
u32 bp_offset;
const void * const tag;
};
#define EEPROM_ENTRY(addr, member) \
.off = addr, \
.len = sizeof_field(struct ptp_ocp, member), \
.bp_offset = offsetof(struct ptp_ocp, member)
#define BP_MAP_ENTRY_ADDR(bp, map) ({ \
(void *)((uintptr_t)(bp) + (map)->bp_offset); \
})
static struct ptp_ocp_eeprom_map fb_eeprom_map[] = {
{ EEPROM_ENTRY(0x43, board_id) },
{ EEPROM_ENTRY(0x00, serial), .tag = "mac" },
{ }
};
static struct ptp_ocp_eeprom_map art_eeprom_map[] = {
{ EEPROM_ENTRY(0x200 + 0x43, board_id) },
{ EEPROM_ENTRY(0x200 + 0x63, serial) },
{ }
};
#define bp_assign_entry(bp, res, val) ({ \
uintptr_t addr = (uintptr_t)(bp) + (res)->bp_offset; \
*(typeof(val) *)addr = val; \
})
#define OCP_RES_LOCATION(member) \
.name = #member, .bp_offset = offsetof(struct ptp_ocp, member)
#define OCP_MEM_RESOURCE(member) \
OCP_RES_LOCATION(member), .setup = ptp_ocp_register_mem
#define OCP_SERIAL_RESOURCE(member) \
OCP_RES_LOCATION(member), .setup = ptp_ocp_register_serial
#define OCP_I2C_RESOURCE(member) \
OCP_RES_LOCATION(member), .setup = ptp_ocp_register_i2c
#define OCP_SPI_RESOURCE(member) \
OCP_RES_LOCATION(member), .setup = ptp_ocp_register_spi
#define OCP_EXT_RESOURCE(member) \
OCP_RES_LOCATION(member), .setup = ptp_ocp_register_ext
/* This is the MSI vector mapping used.
* 0: PPS (TS5)
* 1: TS0
* 2: TS1
* 3: GNSS1
* 4: GNSS2
* 5: MAC
* 6: TS2
* 7: I2C controller
* 8: HWICAP (notused)
* 9: SPI Flash
* 10: NMEA
* 11: Signal Generator 1
* 12: Signal Generator 2
* 13: Signal Generator 3
* 14: Signal Generator 4
* 15: TS3
* 16: TS4
--
* 8: Orolia TS1
* 10: Orolia TS2
* 11: Orolia TS0 (GNSS)
* 12: Orolia PPS
* 14: Orolia TS3
* 15: Orolia TS4
*/
static struct ocp_resource ocp_fb_resource[] = {
{
OCP_MEM_RESOURCE(reg),
.offset = 0x01000000, .size = 0x10000,
},
{
OCP_EXT_RESOURCE(ts0),
.offset = 0x01010000, .size = 0x10000, .irq_vec = 1,
.extra = &(struct ptp_ocp_ext_info) {
.index = 0,
.irq_fcn = ptp_ocp_ts_irq,
.enable = ptp_ocp_ts_enable,
},
},
{
OCP_EXT_RESOURCE(ts1),
.offset = 0x01020000, .size = 0x10000, .irq_vec = 2,
.extra = &(struct ptp_ocp_ext_info) {
.index = 1,
.irq_fcn = ptp_ocp_ts_irq,
.enable = ptp_ocp_ts_enable,
},
},
{
OCP_EXT_RESOURCE(ts2),
.offset = 0x01060000, .size = 0x10000, .irq_vec = 6,
.extra = &(struct ptp_ocp_ext_info) {
.index = 2,
.irq_fcn = ptp_ocp_ts_irq,
.enable = ptp_ocp_ts_enable,
},
},
{
OCP_EXT_RESOURCE(ts3),
.offset = 0x01110000, .size = 0x10000, .irq_vec = 15,
.extra = &(struct ptp_ocp_ext_info) {
.index = 3,
.irq_fcn = ptp_ocp_ts_irq,
.enable = ptp_ocp_ts_enable,
},
},
{
OCP_EXT_RESOURCE(ts4),
.offset = 0x01120000, .size = 0x10000, .irq_vec = 16,
.extra = &(struct ptp_ocp_ext_info) {
.index = 4,
.irq_fcn = ptp_ocp_ts_irq,
.enable = ptp_ocp_ts_enable,
},
},
/* Timestamp for PHC and/or PPS generator */
{
OCP_EXT_RESOURCE(pps),
.offset = 0x010C0000, .size = 0x10000, .irq_vec = 0,
.extra = &(struct ptp_ocp_ext_info) {
.index = 5,
.irq_fcn = ptp_ocp_ts_irq,
.enable = ptp_ocp_ts_enable,
},
},
{
OCP_EXT_RESOURCE(signal_out[0]),
.offset = 0x010D0000, .size = 0x10000, .irq_vec = 11,
.extra = &(struct ptp_ocp_ext_info) {
.index = 1,
.irq_fcn = ptp_ocp_signal_irq,
.enable = ptp_ocp_signal_enable,
},
},
{
OCP_EXT_RESOURCE(signal_out[1]),
.offset = 0x010E0000, .size = 0x10000, .irq_vec = 12,
.extra = &(struct ptp_ocp_ext_info) {
.index = 2,
.irq_fcn = ptp_ocp_signal_irq,
.enable = ptp_ocp_signal_enable,
},
},
{
OCP_EXT_RESOURCE(signal_out[2]),
.offset = 0x010F0000, .size = 0x10000, .irq_vec = 13,
.extra = &(struct ptp_ocp_ext_info) {
.index = 3,
.irq_fcn = ptp_ocp_signal_irq,
.enable = ptp_ocp_signal_enable,
},
},
{
OCP_EXT_RESOURCE(signal_out[3]),
.offset = 0x01100000, .size = 0x10000, .irq_vec = 14,
.extra = &(struct ptp_ocp_ext_info) {
.index = 4,
.irq_fcn = ptp_ocp_signal_irq,
.enable = ptp_ocp_signal_enable,
},
},
{
OCP_MEM_RESOURCE(pps_to_ext),
.offset = 0x01030000, .size = 0x10000,
},
{
OCP_MEM_RESOURCE(pps_to_clk),
.offset = 0x01040000, .size = 0x10000,
},
{
OCP_MEM_RESOURCE(tod),
.offset = 0x01050000, .size = 0x10000,
},
{
OCP_MEM_RESOURCE(irig_in),
.offset = 0x01070000, .size = 0x10000,
},
{
OCP_MEM_RESOURCE(irig_out),
.offset = 0x01080000, .size = 0x10000,
},
{
OCP_MEM_RESOURCE(dcf_in),
.offset = 0x01090000, .size = 0x10000,
},
{
OCP_MEM_RESOURCE(dcf_out),
.offset = 0x010A0000, .size = 0x10000,
},
{
OCP_MEM_RESOURCE(nmea_out),
.offset = 0x010B0000, .size = 0x10000,
},
{
OCP_MEM_RESOURCE(image),
.offset = 0x00020000, .size = 0x1000,
},
{
OCP_MEM_RESOURCE(pps_select),
.offset = 0x00130000, .size = 0x1000,
},
{
OCP_MEM_RESOURCE(sma_map1),
.offset = 0x00140000, .size = 0x1000,
},
{
OCP_MEM_RESOURCE(sma_map2),
.offset = 0x00220000, .size = 0x1000,
},
{
OCP_I2C_RESOURCE(i2c_ctrl),
.offset = 0x00150000, .size = 0x10000, .irq_vec = 7,
.extra = &(struct ptp_ocp_i2c_info) {
.name = "xiic-i2c",
.fixed_rate = 50000000,
.data_size = sizeof(struct xiic_i2c_platform_data),
.data = &(struct xiic_i2c_platform_data) {
.num_devices = 2,
.devices = (struct i2c_board_info[]) {
{ I2C_BOARD_INFO("24c02", 0x50) },
{ I2C_BOARD_INFO("24mac402", 0x58),
.platform_data = "mac" },
},
},
},
},
{
OCP_SERIAL_RESOURCE(gnss_port),
.offset = 0x00160000 + 0x1000, .irq_vec = 3,
.extra = &(struct ptp_ocp_serial_port) {
.baud = 115200,
},
},
{
OCP_SERIAL_RESOURCE(gnss2_port),
.offset = 0x00170000 + 0x1000, .irq_vec = 4,
.extra = &(struct ptp_ocp_serial_port) {
.baud = 115200,
},
},
{
OCP_SERIAL_RESOURCE(mac_port),
.offset = 0x00180000 + 0x1000, .irq_vec = 5,
.extra = &(struct ptp_ocp_serial_port) {
.baud = 57600,
},
},
{
OCP_SERIAL_RESOURCE(nmea_port),
.offset = 0x00190000 + 0x1000, .irq_vec = 10,
},
{
OCP_SPI_RESOURCE(spi_flash),
.offset = 0x00310000, .size = 0x10000, .irq_vec = 9,
.extra = &(struct ptp_ocp_flash_info) {
.name = "xilinx_spi", .pci_offset = 0,
.data_size = sizeof(struct xspi_platform_data),
.data = &(struct xspi_platform_data) {
.num_chipselect = 1,
.bits_per_word = 8,
.num_devices = 1,
.force_irq = true,
.devices = &(struct spi_board_info) {
.modalias = "spi-nor",
},
},
},
},
{
OCP_MEM_RESOURCE(freq_in[0]),
.offset = 0x01200000, .size = 0x10000,
},
{
OCP_MEM_RESOURCE(freq_in[1]),
.offset = 0x01210000, .size = 0x10000,
},
{
OCP_MEM_RESOURCE(freq_in[2]),
.offset = 0x01220000, .size = 0x10000,
},
{
OCP_MEM_RESOURCE(freq_in[3]),
.offset = 0x01230000, .size = 0x10000,
},
{
.setup = ptp_ocp_fb_board_init,
},
{ }
};
#define OCP_ART_CONFIG_SIZE 144
#define OCP_ART_TEMP_TABLE_SIZE 368
struct ocp_art_gpio_reg {
struct {
u32 gpio;
u32 __pad[3];
} map[4];
};
static struct ocp_resource ocp_art_resource[] = {
{
OCP_MEM_RESOURCE(reg),
.offset = 0x01000000, .size = 0x10000,
},
{
OCP_SERIAL_RESOURCE(gnss_port),
.offset = 0x00160000 + 0x1000, .irq_vec = 3,
.extra = &(struct ptp_ocp_serial_port) {
.baud = 115200,
},
},
{
OCP_MEM_RESOURCE(art_sma),
.offset = 0x003C0000, .size = 0x1000,
},
/* Timestamp associated with GNSS1 receiver PPS */
{
OCP_EXT_RESOURCE(ts0),
.offset = 0x360000, .size = 0x20, .irq_vec = 12,
.extra = &(struct ptp_ocp_ext_info) {
.index = 0,
.irq_fcn = ptp_ocp_ts_irq,
.enable = ptp_ocp_ts_enable,
},
},
{
OCP_EXT_RESOURCE(ts1),
.offset = 0x380000, .size = 0x20, .irq_vec = 8,
.extra = &(struct ptp_ocp_ext_info) {
.index = 1,
.irq_fcn = ptp_ocp_ts_irq,
.enable = ptp_ocp_ts_enable,
},
},
{
OCP_EXT_RESOURCE(ts2),
.offset = 0x390000, .size = 0x20, .irq_vec = 10,
.extra = &(struct ptp_ocp_ext_info) {
.index = 2,
.irq_fcn = ptp_ocp_ts_irq,
.enable = ptp_ocp_ts_enable,
},
},
{
OCP_EXT_RESOURCE(ts3),
.offset = 0x3A0000, .size = 0x20, .irq_vec = 14,
.extra = &(struct ptp_ocp_ext_info) {
.index = 3,
.irq_fcn = ptp_ocp_ts_irq,
.enable = ptp_ocp_ts_enable,
},
},
{
OCP_EXT_RESOURCE(ts4),
.offset = 0x3B0000, .size = 0x20, .irq_vec = 15,
.extra = &(struct ptp_ocp_ext_info) {
.index = 4,
.irq_fcn = ptp_ocp_ts_irq,
.enable = ptp_ocp_ts_enable,
},
},
/* Timestamp associated with Internal PPS of the card */
{
OCP_EXT_RESOURCE(pps),
.offset = 0x00330000, .size = 0x20, .irq_vec = 11,
.extra = &(struct ptp_ocp_ext_info) {
.index = 5,
.irq_fcn = ptp_ocp_ts_irq,
.enable = ptp_ocp_ts_enable,
},
},
{
OCP_SPI_RESOURCE(spi_flash),
.offset = 0x00310000, .size = 0x10000, .irq_vec = 9,
.extra = &(struct ptp_ocp_flash_info) {
.name = "spi_altera", .pci_offset = 0,
.data_size = sizeof(struct altera_spi_platform_data),
.data = &(struct altera_spi_platform_data) {
.num_chipselect = 1,
.num_devices = 1,
.devices = &(struct spi_board_info) {
.modalias = "spi-nor",
},
},
},
},
{
OCP_I2C_RESOURCE(i2c_ctrl),
.offset = 0x350000, .size = 0x100, .irq_vec = 4,
.extra = &(struct ptp_ocp_i2c_info) {
.name = "ocores-i2c",
.fixed_rate = 400000,
.data_size = sizeof(struct ocores_i2c_platform_data),
.data = &(struct ocores_i2c_platform_data) {
.clock_khz = 125000,
.bus_khz = 400,
.num_devices = 1,
.devices = &(struct i2c_board_info) {
I2C_BOARD_INFO("24c08", 0x50),
},
},
},
},
{
OCP_SERIAL_RESOURCE(mac_port),
.offset = 0x00190000, .irq_vec = 7,
.extra = &(struct ptp_ocp_serial_port) {
.baud = 9600,
},
},
{
OCP_MEM_RESOURCE(board_config),
.offset = 0x210000, .size = 0x1000,
},
{
.setup = ptp_ocp_art_board_init,
},
{ }
};
static const struct pci_device_id ptp_ocp_pcidev_id[] = {
{ PCI_DEVICE_DATA(FACEBOOK, TIMECARD, &ocp_fb_resource) },
{ PCI_DEVICE_DATA(CELESTICA, TIMECARD, &ocp_fb_resource) },
{ PCI_DEVICE_DATA(OROLIA, ARTCARD, &ocp_art_resource) },
{ }
};
MODULE_DEVICE_TABLE(pci, ptp_ocp_pcidev_id);
static DEFINE_MUTEX(ptp_ocp_lock);
static DEFINE_IDR(ptp_ocp_idr);
struct ocp_selector {
const char *name;
int value;
};
static const struct ocp_selector ptp_ocp_clock[] = {
{ .name = "NONE", .value = 0 },
{ .name = "TOD", .value = 1 },
{ .name = "IRIG", .value = 2 },
{ .name = "PPS", .value = 3 },
{ .name = "PTP", .value = 4 },
{ .name = "RTC", .value = 5 },
{ .name = "DCF", .value = 6 },
{ .name = "REGS", .value = 0xfe },
{ .name = "EXT", .value = 0xff },
{ }
};
#define SMA_DISABLE BIT(16)
#define SMA_ENABLE BIT(15)
#define SMA_SELECT_MASK GENMASK(14, 0)
static const struct ocp_selector ptp_ocp_sma_in[] = {
{ .name = "10Mhz", .value = 0x0000 },
{ .name = "PPS1", .value = 0x0001 },
{ .name = "PPS2", .value = 0x0002 },
{ .name = "TS1", .value = 0x0004 },
{ .name = "TS2", .value = 0x0008 },
{ .name = "IRIG", .value = 0x0010 },
{ .name = "DCF", .value = 0x0020 },
{ .name = "TS3", .value = 0x0040 },
{ .name = "TS4", .value = 0x0080 },
{ .name = "FREQ1", .value = 0x0100 },
{ .name = "FREQ2", .value = 0x0200 },
{ .name = "FREQ3", .value = 0x0400 },
{ .name = "FREQ4", .value = 0x0800 },
{ .name = "None", .value = SMA_DISABLE },
{ }
};
static const struct ocp_selector ptp_ocp_sma_out[] = {
{ .name = "10Mhz", .value = 0x0000 },
{ .name = "PHC", .value = 0x0001 },
{ .name = "MAC", .value = 0x0002 },
{ .name = "GNSS1", .value = 0x0004 },
{ .name = "GNSS2", .value = 0x0008 },
{ .name = "IRIG", .value = 0x0010 },
{ .name = "DCF", .value = 0x0020 },
{ .name = "GEN1", .value = 0x0040 },
{ .name = "GEN2", .value = 0x0080 },
{ .name = "GEN3", .value = 0x0100 },
{ .name = "GEN4", .value = 0x0200 },
{ .name = "GND", .value = 0x2000 },
{ .name = "VCC", .value = 0x4000 },
{ }
};
static const struct ocp_selector ptp_ocp_art_sma_in[] = {
{ .name = "PPS1", .value = 0x0001 },
{ .name = "10Mhz", .value = 0x0008 },
{ }
};
static const struct ocp_selector ptp_ocp_art_sma_out[] = {
{ .name = "PHC", .value = 0x0002 },
{ .name = "GNSS", .value = 0x0004 },
{ .name = "10Mhz", .value = 0x0010 },
{ }
};
struct ocp_sma_op {
const struct ocp_selector *tbl[2];
void (*init)(struct ptp_ocp *bp);
u32 (*get)(struct ptp_ocp *bp, int sma_nr);
int (*set_inputs)(struct ptp_ocp *bp, int sma_nr, u32 val);
int (*set_output)(struct ptp_ocp *bp, int sma_nr, u32 val);
};
static void
ptp_ocp_sma_init(struct ptp_ocp *bp)
{
return bp->sma_op->init(bp);
}
static u32
ptp_ocp_sma_get(struct ptp_ocp *bp, int sma_nr)
{
return bp->sma_op->get(bp, sma_nr);
}
static int
ptp_ocp_sma_set_inputs(struct ptp_ocp *bp, int sma_nr, u32 val)
{
return bp->sma_op->set_inputs(bp, sma_nr, val);
}
static int
ptp_ocp_sma_set_output(struct ptp_ocp *bp, int sma_nr, u32 val)
{
return bp->sma_op->set_output(bp, sma_nr, val);
}
static const char *
ptp_ocp_select_name_from_val(const struct ocp_selector *tbl, int val)
{
int i;
for (i = 0; tbl[i].name; i++)
if (tbl[i].value == val)
return tbl[i].name;
return NULL;
}
static int
ptp_ocp_select_val_from_name(const struct ocp_selector *tbl, const char *name)
{
const char *select;
int i;
for (i = 0; tbl[i].name; i++) {
select = tbl[i].name;
if (!strncasecmp(name, select, strlen(select)))
return tbl[i].value;
}
return -EINVAL;
}
static ssize_t
ptp_ocp_select_table_show(const struct ocp_selector *tbl, char *buf)
{
ssize_t count;
int i;
count = 0;
for (i = 0; tbl[i].name; i++)
count += sysfs_emit_at(buf, count, "%s ", tbl[i].name);
if (count)
count--;
count += sysfs_emit_at(buf, count, "\n");
return count;
}
static int
__ptp_ocp_gettime_locked(struct ptp_ocp *bp, struct timespec64 *ts,
struct ptp_system_timestamp *sts)
{
u32 ctrl, time_sec, time_ns;
int i;
ptp_read_system_prets(sts);
ctrl = OCP_CTRL_READ_TIME_REQ | OCP_CTRL_ENABLE;
iowrite32(ctrl, &bp->reg->ctrl);
for (i = 0; i < 100; i++) {
ctrl = ioread32(&bp->reg->ctrl);
if (ctrl & OCP_CTRL_READ_TIME_DONE)
break;
}
ptp_read_system_postts(sts);
if (sts && bp->ts_window_adjust) {
s64 ns = timespec64_to_ns(&sts->post_ts);
sts->post_ts = ns_to_timespec64(ns - bp->ts_window_adjust);
}
time_ns = ioread32(&bp->reg->time_ns);
time_sec = ioread32(&bp->reg->time_sec);
ts->tv_sec = time_sec;
ts->tv_nsec = time_ns;
return ctrl & OCP_CTRL_READ_TIME_DONE ? 0 : -ETIMEDOUT;
}
static int
ptp_ocp_gettimex(struct ptp_clock_info *ptp_info, struct timespec64 *ts,
struct ptp_system_timestamp *sts)
{
struct ptp_ocp *bp = container_of(ptp_info, struct ptp_ocp, ptp_info);
unsigned long flags;
int err;
spin_lock_irqsave(&bp->lock, flags);
err = __ptp_ocp_gettime_locked(bp, ts, sts);
spin_unlock_irqrestore(&bp->lock, flags);
return err;
}
static void
__ptp_ocp_settime_locked(struct ptp_ocp *bp, const struct timespec64 *ts)
{
u32 ctrl, time_sec, time_ns;
u32 select;
time_ns = ts->tv_nsec;
time_sec = ts->tv_sec;
select = ioread32(&bp->reg->select);
iowrite32(OCP_SELECT_CLK_REG, &bp->reg->select);
iowrite32(time_ns, &bp->reg->adjust_ns);
iowrite32(time_sec, &bp->reg->adjust_sec);
ctrl = OCP_CTRL_ADJUST_TIME | OCP_CTRL_ENABLE;
iowrite32(ctrl, &bp->reg->ctrl);
/* restore clock selection */
iowrite32(select >> 16, &bp->reg->select);
}
static int
ptp_ocp_settime(struct ptp_clock_info *ptp_info, const struct timespec64 *ts)
{
struct ptp_ocp *bp = container_of(ptp_info, struct ptp_ocp, ptp_info);
unsigned long flags;
spin_lock_irqsave(&bp->lock, flags);
__ptp_ocp_settime_locked(bp, ts);
spin_unlock_irqrestore(&bp->lock, flags);
return 0;
}
static void
__ptp_ocp_adjtime_locked(struct ptp_ocp *bp, u32 adj_val)
{
u32 select, ctrl;
select = ioread32(&bp->reg->select);
iowrite32(OCP_SELECT_CLK_REG, &bp->reg->select);
iowrite32(adj_val, &bp->reg->offset_ns);
iowrite32(NSEC_PER_SEC, &bp->reg->offset_window_ns);
ctrl = OCP_CTRL_ADJUST_OFFSET | OCP_CTRL_ENABLE;
iowrite32(ctrl, &bp->reg->ctrl);
/* restore clock selection */
iowrite32(select >> 16, &bp->reg->select);
}
static void
ptp_ocp_adjtime_coarse(struct ptp_ocp *bp, s64 delta_ns)
{
struct timespec64 ts;
unsigned long flags;
int err;
spin_lock_irqsave(&bp->lock, flags);
err = __ptp_ocp_gettime_locked(bp, &ts, NULL);
if (likely(!err)) {
set_normalized_timespec64(&ts, ts.tv_sec,
ts.tv_nsec + delta_ns);
__ptp_ocp_settime_locked(bp, &ts);
}
spin_unlock_irqrestore(&bp->lock, flags);
}
static int
ptp_ocp_adjtime(struct ptp_clock_info *ptp_info, s64 delta_ns)
{
struct ptp_ocp *bp = container_of(ptp_info, struct ptp_ocp, ptp_info);
unsigned long flags;
u32 adj_ns, sign;
if (delta_ns > NSEC_PER_SEC || -delta_ns > NSEC_PER_SEC) {
ptp_ocp_adjtime_coarse(bp, delta_ns);
return 0;
}
sign = delta_ns < 0 ? BIT(31) : 0;
adj_ns = sign ? -delta_ns : delta_ns;
spin_lock_irqsave(&bp->lock, flags);
__ptp_ocp_adjtime_locked(bp, sign | adj_ns);
spin_unlock_irqrestore(&bp->lock, flags);
return 0;
}
static int
ptp_ocp_null_adjfine(struct ptp_clock_info *ptp_info, long scaled_ppm)
{
if (scaled_ppm == 0)
return 0;
return -EOPNOTSUPP;
}
static s32
ptp_ocp_null_getmaxphase(struct ptp_clock_info *ptp_info)
{
return 0;
}
static int
ptp_ocp_null_adjphase(struct ptp_clock_info *ptp_info, s32 phase_ns)
{
return -EOPNOTSUPP;
}
static int
ptp_ocp_enable(struct ptp_clock_info *ptp_info, struct ptp_clock_request *rq,
int on)
{
struct ptp_ocp *bp = container_of(ptp_info, struct ptp_ocp, ptp_info);
struct ptp_ocp_ext_src *ext = NULL;
u32 req;
int err;
switch (rq->type) {
case PTP_CLK_REQ_EXTTS:
req = OCP_REQ_TIMESTAMP;
switch (rq->extts.index) {
case 0:
ext = bp->ts0;
break;
case 1:
ext = bp->ts1;
break;
case 2:
ext = bp->ts2;
break;
case 3:
ext = bp->ts3;
break;
case 4:
ext = bp->ts4;
break;
case 5:
ext = bp->pps;
break;
}
break;
case PTP_CLK_REQ_PPS:
req = OCP_REQ_PPS;
ext = bp->pps;
break;
case PTP_CLK_REQ_PEROUT:
switch (rq->perout.index) {
case 0:
/* This is a request for 1PPS on an output SMA.
* Allow, but assume manual configuration.
*/
if (on && (rq->perout.period.sec != 1 ||
rq->perout.period.nsec != 0))
return -EINVAL;
return 0;
case 1:
case 2:
case 3:
case 4:
req = rq->perout.index - 1;
ext = bp->signal_out[req];
err = ptp_ocp_signal_from_perout(bp, req, &rq->perout);
if (err)
return err;
break;
}
break;
default:
return -EOPNOTSUPP;
}
err = -ENXIO;
if (ext)
err = ext->info->enable(ext, req, on);
return err;
}
static int
ptp_ocp_verify(struct ptp_clock_info *ptp_info, unsigned pin,
enum ptp_pin_function func, unsigned chan)
{
struct ptp_ocp *bp = container_of(ptp_info, struct ptp_ocp, ptp_info);
char buf[16];
switch (func) {
case PTP_PF_NONE:
snprintf(buf, sizeof(buf), "IN: None");
break;
case PTP_PF_EXTTS:
/* Allow timestamps, but require sysfs configuration. */
return 0;
case PTP_PF_PEROUT:
/* channel 0 is 1PPS from PHC.
* channels 1..4 are the frequency generators.
*/
if (chan)
snprintf(buf, sizeof(buf), "OUT: GEN%d", chan);
else
snprintf(buf, sizeof(buf), "OUT: PHC");
break;
default:
return -EOPNOTSUPP;
}
return ptp_ocp_sma_store(bp, buf, pin + 1);
}
static const struct ptp_clock_info ptp_ocp_clock_info = {
.owner = THIS_MODULE,
.name = KBUILD_MODNAME,
.max_adj = 100000000,
.gettimex64 = ptp_ocp_gettimex,
.settime64 = ptp_ocp_settime,
.adjtime = ptp_ocp_adjtime,
.adjfine = ptp_ocp_null_adjfine,
.adjphase = ptp_ocp_null_adjphase,
.getmaxphase = ptp_ocp_null_getmaxphase,
.enable = ptp_ocp_enable,
.verify = ptp_ocp_verify,
.pps = true,
.n_ext_ts = 6,
.n_per_out = 5,
};
static void
__ptp_ocp_clear_drift_locked(struct ptp_ocp *bp)
{
u32 ctrl, select;
select = ioread32(&bp->reg->select);
iowrite32(OCP_SELECT_CLK_REG, &bp->reg->select);
iowrite32(0, &bp->reg->drift_ns);
ctrl = OCP_CTRL_ADJUST_DRIFT | OCP_CTRL_ENABLE;
iowrite32(ctrl, &bp->reg->ctrl);
/* restore clock selection */
iowrite32(select >> 16, &bp->reg->select);
}
static void
ptp_ocp_utc_distribute(struct ptp_ocp *bp, u32 val)
{
unsigned long flags;
spin_lock_irqsave(&bp->lock, flags);
bp->utc_tai_offset = val;
if (bp->irig_out)
iowrite32(val, &bp->irig_out->adj_sec);
if (bp->dcf_out)
iowrite32(val, &bp->dcf_out->adj_sec);
if (bp->nmea_out)
iowrite32(val, &bp->nmea_out->adj_sec);
spin_unlock_irqrestore(&bp->lock, flags);
}
static void
ptp_ocp_watchdog(struct timer_list *t)
{
struct ptp_ocp *bp = from_timer(bp, t, watchdog);
unsigned long flags;
u32 status, utc_offset;
status = ioread32(&bp->pps_to_clk->status);
if (status & PPS_STATUS_SUPERV_ERR) {
iowrite32(status, &bp->pps_to_clk->status);
if (!bp->gnss_lost) {
spin_lock_irqsave(&bp->lock, flags);
__ptp_ocp_clear_drift_locked(bp);
spin_unlock_irqrestore(&bp->lock, flags);
bp->gnss_lost = ktime_get_real_seconds();
}
} else if (bp->gnss_lost) {
bp->gnss_lost = 0;
}
/* if GNSS provides correct data we can rely on
* it to get leap second information
*/
if (bp->tod) {
status = ioread32(&bp->tod->utc_status);
utc_offset = status & TOD_STATUS_UTC_MASK;
if (status & TOD_STATUS_UTC_VALID &&
utc_offset != bp->utc_tai_offset)
ptp_ocp_utc_distribute(bp, utc_offset);
}
mod_timer(&bp->watchdog, jiffies + HZ);
}
static void
ptp_ocp_estimate_pci_timing(struct ptp_ocp *bp)
{
ktime_t start, end;
ktime_t delay;
u32 ctrl;
ctrl = ioread32(&bp->reg->ctrl);
ctrl = OCP_CTRL_READ_TIME_REQ | OCP_CTRL_ENABLE;
iowrite32(ctrl, &bp->reg->ctrl);
start = ktime_get_ns();
ctrl = ioread32(&bp->reg->ctrl);
end = ktime_get_ns();
delay = end - start;
bp->ts_window_adjust = (delay >> 5) * 3;
}
static int
ptp_ocp_init_clock(struct ptp_ocp *bp)
{
struct timespec64 ts;
bool sync;
u32 ctrl;
ctrl = OCP_CTRL_ENABLE;
iowrite32(ctrl, &bp->reg->ctrl);
/* NO DRIFT Correction */
/* offset_p:i 1/8, offset_i: 1/16, drift_p: 0, drift_i: 0 */
iowrite32(0x2000, &bp->reg->servo_offset_p);
iowrite32(0x1000, &bp->reg->servo_offset_i);
iowrite32(0, &bp->reg->servo_drift_p);
iowrite32(0, &bp->reg->servo_drift_i);
/* latch servo values */
ctrl |= OCP_CTRL_ADJUST_SERVO;
iowrite32(ctrl, &bp->reg->ctrl);
if ((ioread32(&bp->reg->ctrl) & OCP_CTRL_ENABLE) == 0) {
dev_err(&bp->pdev->dev, "clock not enabled\n");
return -ENODEV;
}
ptp_ocp_estimate_pci_timing(bp);
sync = ioread32(&bp->reg->status) & OCP_STATUS_IN_SYNC;
if (!sync) {
ktime_get_clocktai_ts64(&ts);
ptp_ocp_settime(&bp->ptp_info, &ts);
}
/* If there is a clock supervisor, then enable the watchdog */
if (bp->pps_to_clk) {
timer_setup(&bp->watchdog, ptp_ocp_watchdog, 0);
mod_timer(&bp->watchdog, jiffies + HZ);
}
return 0;
}
static void
ptp_ocp_tod_init(struct ptp_ocp *bp)
{
u32 ctrl, reg;
ctrl = ioread32(&bp->tod->ctrl);
ctrl |= TOD_CTRL_PROTOCOL | TOD_CTRL_ENABLE;
ctrl &= ~(TOD_CTRL_DISABLE_FMT_A | TOD_CTRL_DISABLE_FMT_B);
iowrite32(ctrl, &bp->tod->ctrl);
reg = ioread32(&bp->tod->utc_status);
if (reg & TOD_STATUS_UTC_VALID)
ptp_ocp_utc_distribute(bp, reg & TOD_STATUS_UTC_MASK);
}
static const char *
ptp_ocp_tod_proto_name(const int idx)
{
static const char * const proto_name[] = {
"NMEA", "NMEA_ZDA", "NMEA_RMC", "NMEA_none",
"UBX", "UBX_UTC", "UBX_LS", "UBX_none"
};
return proto_name[idx];
}
static const char *
ptp_ocp_tod_gnss_name(int idx)
{
static const char * const gnss_name[] = {
"ALL", "COMBINED", "GPS", "GLONASS", "GALILEO", "BEIDOU",
"Unknown"
};
if (idx >= ARRAY_SIZE(gnss_name))
idx = ARRAY_SIZE(gnss_name) - 1;
return gnss_name[idx];
}
struct ptp_ocp_nvmem_match_info {
struct ptp_ocp *bp;
const void * const tag;
};
static int
ptp_ocp_nvmem_match(struct device *dev, const void *data)
{
const struct ptp_ocp_nvmem_match_info *info = data;
dev = dev->parent;
if (!i2c_verify_client(dev) || info->tag != dev->platform_data)
return 0;
while ((dev = dev->parent))
if (dev->driver && !strcmp(dev->driver->name, KBUILD_MODNAME))
return info->bp == dev_get_drvdata(dev);
return 0;
}
static inline struct nvmem_device *
ptp_ocp_nvmem_device_get(struct ptp_ocp *bp, const void * const tag)
{
struct ptp_ocp_nvmem_match_info info = { .bp = bp, .tag = tag };
return nvmem_device_find(&info, ptp_ocp_nvmem_match);
}
static inline void
ptp_ocp_nvmem_device_put(struct nvmem_device **nvmemp)
{
if (!IS_ERR_OR_NULL(*nvmemp))
nvmem_device_put(*nvmemp);
*nvmemp = NULL;
}
static void
ptp_ocp_read_eeprom(struct ptp_ocp *bp)
{
const struct ptp_ocp_eeprom_map *map;
struct nvmem_device *nvmem;
const void *tag;
int ret;
if (!bp->i2c_ctrl)
return;
tag = NULL;
nvmem = NULL;
for (map = bp->eeprom_map; map->len; map++) {
if (map->tag != tag) {
tag = map->tag;
ptp_ocp_nvmem_device_put(&nvmem);
}
if (!nvmem) {
nvmem = ptp_ocp_nvmem_device_get(bp, tag);
if (IS_ERR(nvmem)) {
ret = PTR_ERR(nvmem);
goto fail;
}
}
ret = nvmem_device_read(nvmem, map->off, map->len,
BP_MAP_ENTRY_ADDR(bp, map));
if (ret != map->len)
goto fail;
}
bp->has_eeprom_data = true;
out:
ptp_ocp_nvmem_device_put(&nvmem);
return;
fail:
dev_err(&bp->pdev->dev, "could not read eeprom: %d\n", ret);
goto out;
}
static struct device *
ptp_ocp_find_flash(struct ptp_ocp *bp)
{
struct device *dev, *last;
last = NULL;
dev = &bp->spi_flash->dev;
while ((dev = device_find_any_child(dev))) {
if (!strcmp("mtd", dev_bus_name(dev)))
break;
put_device(last);
last = dev;
}
put_device(last);
return dev;
}
static int
ptp_ocp_devlink_fw_image(struct devlink *devlink, const struct firmware *fw,
const u8 **data, size_t *size)
{
struct ptp_ocp *bp = devlink_priv(devlink);
const struct ptp_ocp_firmware_header *hdr;
size_t offset, length;
u16 crc;
hdr = (const struct ptp_ocp_firmware_header *)fw->data;
if (memcmp(hdr->magic, OCP_FIRMWARE_MAGIC_HEADER, 4)) {
devlink_flash_update_status_notify(devlink,
"No firmware header found, cancel firmware upgrade",
NULL, 0, 0);
return -EINVAL;
}
if (be16_to_cpu(hdr->pci_vendor_id) != bp->pdev->vendor ||
be16_to_cpu(hdr->pci_device_id) != bp->pdev->device) {
devlink_flash_update_status_notify(devlink,
"Firmware image compatibility check failed",
NULL, 0, 0);
return -EINVAL;
}
offset = sizeof(*hdr);
length = be32_to_cpu(hdr->image_size);
if (length != (fw->size - offset)) {
devlink_flash_update_status_notify(devlink,
"Firmware image size check failed",
NULL, 0, 0);
return -EINVAL;
}
crc = crc16(0xffff, &fw->data[offset], length);
if (be16_to_cpu(hdr->crc) != crc) {
devlink_flash_update_status_notify(devlink,
"Firmware image CRC check failed",
NULL, 0, 0);
return -EINVAL;
}
*data = &fw->data[offset];
*size = length;
return 0;
}
static int
ptp_ocp_devlink_flash(struct devlink *devlink, struct device *dev,
const struct firmware *fw)
{
struct mtd_info *mtd = dev_get_drvdata(dev);
struct ptp_ocp *bp = devlink_priv(devlink);
size_t off, len, size, resid, wrote;
struct erase_info erase;
size_t base, blksz;
const u8 *data;
int err;
err = ptp_ocp_devlink_fw_image(devlink, fw, &data, &size);
if (err)
goto out;
off = 0;
base = bp->flash_start;
blksz = 4096;
resid = size;
while (resid) {
devlink_flash_update_status_notify(devlink, "Flashing",
NULL, off, size);
len = min_t(size_t, resid, blksz);
erase.addr = base + off;
erase.len = blksz;
err = mtd_erase(mtd, &erase);
if (err)
goto out;
err = mtd_write(mtd, base + off, len, &wrote, data + off);
if (err)
goto out;
off += blksz;
resid -= len;
}
out:
return err;
}
static int
ptp_ocp_devlink_flash_update(struct devlink *devlink,
struct devlink_flash_update_params *params,
struct netlink_ext_ack *extack)
{
struct ptp_ocp *bp = devlink_priv(devlink);
struct device *dev;
const char *msg;
int err;
dev = ptp_ocp_find_flash(bp);
if (!dev) {
dev_err(&bp->pdev->dev, "Can't find Flash SPI adapter\n");
return -ENODEV;
}
devlink_flash_update_status_notify(devlink, "Preparing to flash",
NULL, 0, 0);
err = ptp_ocp_devlink_flash(devlink, dev, params->fw);
msg = err ? "Flash error" : "Flash complete";
devlink_flash_update_status_notify(devlink, msg, NULL, 0, 0);
put_device(dev);
return err;
}
static int
ptp_ocp_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req,
struct netlink_ext_ack *extack)
{
struct ptp_ocp *bp = devlink_priv(devlink);
const char *fw_image;
char buf[32];
int err;
fw_image = bp->fw_loader ? "loader" : "fw";
sprintf(buf, "%d.%d", bp->fw_tag, bp->fw_version);
err = devlink_info_version_running_put(req, fw_image, buf);
if (err)
return err;
if (!bp->has_eeprom_data) {
ptp_ocp_read_eeprom(bp);
if (!bp->has_eeprom_data)
return 0;
}
sprintf(buf, "%pM", bp->serial);
err = devlink_info_serial_number_put(req, buf);
if (err)
return err;
err = devlink_info_version_fixed_put(req,
DEVLINK_INFO_VERSION_GENERIC_BOARD_ID,
bp->board_id);
if (err)
return err;
return 0;
}
static const struct devlink_ops ptp_ocp_devlink_ops = {
.flash_update = ptp_ocp_devlink_flash_update,
.info_get = ptp_ocp_devlink_info_get,
};
static void __iomem *
__ptp_ocp_get_mem(struct ptp_ocp *bp, resource_size_t start, int size)
{
struct resource res = DEFINE_RES_MEM_NAMED(start, size, "ptp_ocp");
return devm_ioremap_resource(&bp->pdev->dev, &res);
}
static void __iomem *
ptp_ocp_get_mem(struct ptp_ocp *bp, struct ocp_resource *r)
{
resource_size_t start;
start = pci_resource_start(bp->pdev, 0) + r->offset;
return __ptp_ocp_get_mem(bp, start, r->size);
}
static void
ptp_ocp_set_irq_resource(struct resource *res, int irq)
{
struct resource r = DEFINE_RES_IRQ(irq);
*res = r;
}
static void
ptp_ocp_set_mem_resource(struct resource *res, resource_size_t start, int size)
{
struct resource r = DEFINE_RES_MEM(start, size);
*res = r;
}
static int
ptp_ocp_register_spi(struct ptp_ocp *bp, struct ocp_resource *r)
{
struct ptp_ocp_flash_info *info;
struct pci_dev *pdev = bp->pdev;
struct platform_device *p;
struct resource res[2];
resource_size_t start;
int id;
start = pci_resource_start(pdev, 0) + r->offset;
ptp_ocp_set_mem_resource(&res[0], start, r->size);
ptp_ocp_set_irq_resource(&res[1], pci_irq_vector(pdev, r->irq_vec));
info = r->extra;
id = pci_dev_id(pdev) << 1;
id += info->pci_offset;
p = platform_device_register_resndata(&pdev->dev, info->name, id,
res, 2, info->data,
info->data_size);
if (IS_ERR(p))
return PTR_ERR(p);
bp_assign_entry(bp, r, p);
return 0;
}
static struct platform_device *
ptp_ocp_i2c_bus(struct pci_dev *pdev, struct ocp_resource *r, int id)
{
struct ptp_ocp_i2c_info *info;
struct resource res[2];
resource_size_t start;
info = r->extra;
start = pci_resource_start(pdev, 0) + r->offset;
ptp_ocp_set_mem_resource(&res[0], start, r->size);
ptp_ocp_set_irq_resource(&res[1], pci_irq_vector(pdev, r->irq_vec));
return platform_device_register_resndata(&pdev->dev, info->name,
id, res, 2,
info->data, info->data_size);
}
static int
ptp_ocp_register_i2c(struct ptp_ocp *bp, struct ocp_resource *r)
{
struct pci_dev *pdev = bp->pdev;
struct ptp_ocp_i2c_info *info;
struct platform_device *p;
struct clk_hw *clk;
char buf[32];
int id;
info = r->extra;
id = pci_dev_id(bp->pdev);
sprintf(buf, "AXI.%d", id);
clk = clk_hw_register_fixed_rate(&pdev->dev, buf, NULL, 0,
info->fixed_rate);
if (IS_ERR(clk))
return PTR_ERR(clk);
bp->i2c_clk = clk;
sprintf(buf, "%s.%d", info->name, id);
devm_clk_hw_register_clkdev(&pdev->dev, clk, NULL, buf);
p = ptp_ocp_i2c_bus(bp->pdev, r, id);
if (IS_ERR(p))
return PTR_ERR(p);
bp_assign_entry(bp, r, p);
return 0;
}
/* The expectation is that this is triggered only on error. */
static irqreturn_t
ptp_ocp_signal_irq(int irq, void *priv)
{
struct ptp_ocp_ext_src *ext = priv;
struct signal_reg __iomem *reg = ext->mem;
struct ptp_ocp *bp = ext->bp;
u32 enable, status;
int gen;
gen = ext->info->index - 1;
enable = ioread32(®->enable);
status = ioread32(®->status);
/* disable generator on error */
if (status || !enable) {
iowrite32(0, ®->intr_mask);
iowrite32(0, ®->enable);
bp->signal[gen].running = false;
}
iowrite32(0, ®->intr); /* ack interrupt */
return IRQ_HANDLED;
}
static int
ptp_ocp_signal_set(struct ptp_ocp *bp, int gen, struct ptp_ocp_signal *s)
{
struct ptp_system_timestamp sts;
struct timespec64 ts;
ktime_t start_ns;
int err;
if (!s->period)
return 0;
if (!s->pulse)
s->pulse = ktime_divns(s->period * s->duty, 100);
err = ptp_ocp_gettimex(&bp->ptp_info, &ts, &sts);
if (err)
return err;
start_ns = ktime_set(ts.tv_sec, ts.tv_nsec) + NSEC_PER_MSEC;
if (!s->start) {
/* roundup() does not work on 32-bit systems */
s->start = DIV64_U64_ROUND_UP(start_ns, s->period);
s->start = ktime_add(s->start, s->phase);
}
if (s->duty < 1 || s->duty > 99)
return -EINVAL;
if (s->pulse < 1 || s->pulse > s->period)
return -EINVAL;
if (s->start < start_ns)
return -EINVAL;
bp->signal[gen] = *s;
return 0;
}
static int
ptp_ocp_signal_from_perout(struct ptp_ocp *bp, int gen,
struct ptp_perout_request *req)
{
struct ptp_ocp_signal s = { };
s.polarity = bp->signal[gen].polarity;
s.period = ktime_set(req->period.sec, req->period.nsec);
if (!s.period)
return 0;
if (req->flags & PTP_PEROUT_DUTY_CYCLE) {
s.pulse = ktime_set(req->on.sec, req->on.nsec);
s.duty = ktime_divns(s.pulse * 100, s.period);
}
if (req->flags & PTP_PEROUT_PHASE)
s.phase = ktime_set(req->phase.sec, req->phase.nsec);
else
s.start = ktime_set(req->start.sec, req->start.nsec);
return ptp_ocp_signal_set(bp, gen, &s);
}
static int
ptp_ocp_signal_enable(void *priv, u32 req, bool enable)
{
struct ptp_ocp_ext_src *ext = priv;
struct signal_reg __iomem *reg = ext->mem;
struct ptp_ocp *bp = ext->bp;
struct timespec64 ts;
int gen;
gen = ext->info->index - 1;
iowrite32(0, ®->intr_mask);
iowrite32(0, ®->enable);
bp->signal[gen].running = false;
if (!enable)
return 0;
ts = ktime_to_timespec64(bp->signal[gen].start);
iowrite32(ts.tv_sec, ®->start_sec);
iowrite32(ts.tv_nsec, ®->start_ns);
ts = ktime_to_timespec64(bp->signal[gen].period);
iowrite32(ts.tv_sec, ®->period_sec);
iowrite32(ts.tv_nsec, ®->period_ns);
ts = ktime_to_timespec64(bp->signal[gen].pulse);
iowrite32(ts.tv_sec, ®->pulse_sec);
iowrite32(ts.tv_nsec, ®->pulse_ns);
iowrite32(bp->signal[gen].polarity, ®->polarity);
iowrite32(0, ®->repeat_count);
iowrite32(0, ®->intr); /* clear interrupt state */
iowrite32(1, ®->intr_mask); /* enable interrupt */
iowrite32(3, ®->enable); /* valid & enable */
bp->signal[gen].running = true;
return 0;
}
static irqreturn_t
ptp_ocp_ts_irq(int irq, void *priv)
{
struct ptp_ocp_ext_src *ext = priv;
struct ts_reg __iomem *reg = ext->mem;
struct ptp_clock_event ev;
u32 sec, nsec;
if (ext == ext->bp->pps) {
if (ext->bp->pps_req_map & OCP_REQ_PPS) {
ev.type = PTP_CLOCK_PPS;
ptp_clock_event(ext->bp->ptp, &ev);
}
if ((ext->bp->pps_req_map & ~OCP_REQ_PPS) == 0)
goto out;
}
/* XXX should fix API - this converts s/ns -> ts -> s/ns */
sec = ioread32(®->time_sec);
nsec = ioread32(®->time_ns);
ev.type = PTP_CLOCK_EXTTS;
ev.index = ext->info->index;
ev.timestamp = sec * NSEC_PER_SEC + nsec;
ptp_clock_event(ext->bp->ptp, &ev);
out:
iowrite32(1, ®->intr); /* write 1 to ack */
return IRQ_HANDLED;
}
static int
ptp_ocp_ts_enable(void *priv, u32 req, bool enable)
{
struct ptp_ocp_ext_src *ext = priv;
struct ts_reg __iomem *reg = ext->mem;
struct ptp_ocp *bp = ext->bp;
if (ext == bp->pps) {
u32 old_map = bp->pps_req_map;
if (enable)
bp->pps_req_map |= req;
else
bp->pps_req_map &= ~req;
/* if no state change, just return */
if ((!!old_map ^ !!bp->pps_req_map) == 0)
return 0;
}
if (enable) {
iowrite32(1, ®->enable);
iowrite32(1, ®->intr_mask);
iowrite32(1, ®->intr);
} else {
iowrite32(0, ®->intr_mask);
iowrite32(0, ®->enable);
}
return 0;
}
static void
ptp_ocp_unregister_ext(struct ptp_ocp_ext_src *ext)
{
ext->info->enable(ext, ~0, false);
pci_free_irq(ext->bp->pdev, ext->irq_vec, ext);
kfree(ext);
}
static int
ptp_ocp_register_ext(struct ptp_ocp *bp, struct ocp_resource *r)
{
struct pci_dev *pdev = bp->pdev;
struct ptp_ocp_ext_src *ext;
int err;
ext = kzalloc(sizeof(*ext), GFP_KERNEL);
if (!ext)
return -ENOMEM;
ext->mem = ptp_ocp_get_mem(bp, r);
if (IS_ERR(ext->mem)) {
err = PTR_ERR(ext->mem);
goto out;
}
ext->bp = bp;
ext->info = r->extra;
ext->irq_vec = r->irq_vec;
err = pci_request_irq(pdev, r->irq_vec, ext->info->irq_fcn, NULL,
ext, "ocp%d.%s", bp->id, r->name);
if (err) {
dev_err(&pdev->dev, "Could not get irq %d\n", r->irq_vec);
goto out;
}
bp_assign_entry(bp, r, ext);
return 0;
out:
kfree(ext);
return err;
}
static int
ptp_ocp_serial_line(struct ptp_ocp *bp, struct ocp_resource *r)
{
struct pci_dev *pdev = bp->pdev;
struct uart_8250_port uart;
/* Setting UPF_IOREMAP and leaving port.membase unspecified lets
* the serial port device claim and release the pci resource.
*/
memset(&uart, 0, sizeof(uart));
uart.port.dev = &pdev->dev;
uart.port.iotype = UPIO_MEM;
uart.port.regshift = 2;
uart.port.mapbase = pci_resource_start(pdev, 0) + r->offset;
uart.port.irq = pci_irq_vector(pdev, r->irq_vec);
uart.port.uartclk = 50000000;
uart.port.flags = UPF_FIXED_TYPE | UPF_IOREMAP | UPF_NO_THRE_TEST;
uart.port.type = PORT_16550A;
return serial8250_register_8250_port(&uart);
}
static int
ptp_ocp_register_serial(struct ptp_ocp *bp, struct ocp_resource *r)
{
struct ptp_ocp_serial_port *p = (struct ptp_ocp_serial_port *)r->extra;
struct ptp_ocp_serial_port port = {};
port.line = ptp_ocp_serial_line(bp, r);
if (port.line < 0)
return port.line;
if (p)
port.baud = p->baud;
bp_assign_entry(bp, r, port);
return 0;
}
static int
ptp_ocp_register_mem(struct ptp_ocp *bp, struct ocp_resource *r)
{
void __iomem *mem;
mem = ptp_ocp_get_mem(bp, r);
if (IS_ERR(mem))
return PTR_ERR(mem);
bp_assign_entry(bp, r, mem);
return 0;
}
static void
ptp_ocp_nmea_out_init(struct ptp_ocp *bp)
{
if (!bp->nmea_out)
return;
iowrite32(0, &bp->nmea_out->ctrl); /* disable */
iowrite32(7, &bp->nmea_out->uart_baud); /* 115200 */
iowrite32(1, &bp->nmea_out->ctrl); /* enable */
}
static void
_ptp_ocp_signal_init(struct ptp_ocp_signal *s, struct signal_reg __iomem *reg)
{
u32 val;
iowrite32(0, ®->enable); /* disable */
val = ioread32(®->polarity);
s->polarity = val ? true : false;
s->duty = 50;
}
static void
ptp_ocp_signal_init(struct ptp_ocp *bp)
{
int i;
for (i = 0; i < 4; i++)
if (bp->signal_out[i])
_ptp_ocp_signal_init(&bp->signal[i],
bp->signal_out[i]->mem);
}
static void
ptp_ocp_attr_group_del(struct ptp_ocp *bp)
{
sysfs_remove_groups(&bp->dev.kobj, bp->attr_group);
kfree(bp->attr_group);
}
static int
ptp_ocp_attr_group_add(struct ptp_ocp *bp,
const struct ocp_attr_group *attr_tbl)
{
int count, i;
int err;
count = 0;
for (i = 0; attr_tbl[i].cap; i++)
if (attr_tbl[i].cap & bp->fw_cap)
count++;
bp->attr_group = kcalloc(count + 1, sizeof(struct attribute_group *),
GFP_KERNEL);
if (!bp->attr_group)
return -ENOMEM;
count = 0;
for (i = 0; attr_tbl[i].cap; i++)
if (attr_tbl[i].cap & bp->fw_cap)
bp->attr_group[count++] = attr_tbl[i].group;
err = sysfs_create_groups(&bp->dev.kobj, bp->attr_group);
if (err)
bp->attr_group[0] = NULL;
return err;
}
static void
ptp_ocp_enable_fpga(u32 __iomem *reg, u32 bit, bool enable)
{
u32 ctrl;
bool on;
ctrl = ioread32(reg);
on = ctrl & bit;
if (on ^ enable) {
ctrl &= ~bit;
ctrl |= enable ? bit : 0;
iowrite32(ctrl, reg);
}
}
static void
ptp_ocp_irig_out(struct ptp_ocp *bp, bool enable)
{
return ptp_ocp_enable_fpga(&bp->irig_out->ctrl,
IRIG_M_CTRL_ENABLE, enable);
}
static void
ptp_ocp_irig_in(struct ptp_ocp *bp, bool enable)
{
return ptp_ocp_enable_fpga(&bp->irig_in->ctrl,
IRIG_S_CTRL_ENABLE, enable);
}
static void
ptp_ocp_dcf_out(struct ptp_ocp *bp, bool enable)
{
return ptp_ocp_enable_fpga(&bp->dcf_out->ctrl,
DCF_M_CTRL_ENABLE, enable);
}
static void
ptp_ocp_dcf_in(struct ptp_ocp *bp, bool enable)
{
return ptp_ocp_enable_fpga(&bp->dcf_in->ctrl,
DCF_S_CTRL_ENABLE, enable);
}
static void
__handle_signal_outputs(struct ptp_ocp *bp, u32 val)
{
ptp_ocp_irig_out(bp, val & 0x00100010);
ptp_ocp_dcf_out(bp, val & 0x00200020);
}
static void
__handle_signal_inputs(struct ptp_ocp *bp, u32 val)
{
ptp_ocp_irig_in(bp, val & 0x00100010);
ptp_ocp_dcf_in(bp, val & 0x00200020);
}
static u32
ptp_ocp_sma_fb_get(struct ptp_ocp *bp, int sma_nr)
{
u32 __iomem *gpio;
u32 shift;
if (bp->sma[sma_nr - 1].fixed_fcn)
return (sma_nr - 1) & 1;
if (bp->sma[sma_nr - 1].mode == SMA_MODE_IN)
gpio = sma_nr > 2 ? &bp->sma_map2->gpio1 : &bp->sma_map1->gpio1;
else
gpio = sma_nr > 2 ? &bp->sma_map1->gpio2 : &bp->sma_map2->gpio2;
shift = sma_nr & 1 ? 0 : 16;
return (ioread32(gpio) >> shift) & 0xffff;
}
static int
ptp_ocp_sma_fb_set_output(struct ptp_ocp *bp, int sma_nr, u32 val)
{
u32 reg, mask, shift;
unsigned long flags;
u32 __iomem *gpio;
gpio = sma_nr > 2 ? &bp->sma_map1->gpio2 : &bp->sma_map2->gpio2;
shift = sma_nr & 1 ? 0 : 16;
mask = 0xffff << (16 - shift);
spin_lock_irqsave(&bp->lock, flags);
reg = ioread32(gpio);
reg = (reg & mask) | (val << shift);
__handle_signal_outputs(bp, reg);
iowrite32(reg, gpio);
spin_unlock_irqrestore(&bp->lock, flags);
return 0;
}
static int
ptp_ocp_sma_fb_set_inputs(struct ptp_ocp *bp, int sma_nr, u32 val)
{
u32 reg, mask, shift;
unsigned long flags;
u32 __iomem *gpio;
gpio = sma_nr > 2 ? &bp->sma_map2->gpio1 : &bp->sma_map1->gpio1;
shift = sma_nr & 1 ? 0 : 16;
mask = 0xffff << (16 - shift);
spin_lock_irqsave(&bp->lock, flags);
reg = ioread32(gpio);
reg = (reg & mask) | (val << shift);
__handle_signal_inputs(bp, reg);
iowrite32(reg, gpio);
spin_unlock_irqrestore(&bp->lock, flags);
return 0;
}
static void
ptp_ocp_sma_fb_init(struct ptp_ocp *bp)
{
u32 reg;
int i;
/* defaults */
bp->sma[0].mode = SMA_MODE_IN;
bp->sma[1].mode = SMA_MODE_IN;
bp->sma[2].mode = SMA_MODE_OUT;
bp->sma[3].mode = SMA_MODE_OUT;
for (i = 0; i < 4; i++)
bp->sma[i].default_fcn = i & 1;
/* If no SMA1 map, the pin functions and directions are fixed. */
if (!bp->sma_map1) {
for (i = 0; i < 4; i++) {
bp->sma[i].fixed_fcn = true;
bp->sma[i].fixed_dir = true;
}
return;
}
/* If SMA2 GPIO output map is all 1, it is not present.
* This indicates the firmware has fixed direction SMA pins.
*/
reg = ioread32(&bp->sma_map2->gpio2);
if (reg == 0xffffffff) {
for (i = 0; i < 4; i++)
bp->sma[i].fixed_dir = true;
} else {
reg = ioread32(&bp->sma_map1->gpio1);
bp->sma[0].mode = reg & BIT(15) ? SMA_MODE_IN : SMA_MODE_OUT;
bp->sma[1].mode = reg & BIT(31) ? SMA_MODE_IN : SMA_MODE_OUT;
reg = ioread32(&bp->sma_map1->gpio2);
bp->sma[2].mode = reg & BIT(15) ? SMA_MODE_OUT : SMA_MODE_IN;
bp->sma[3].mode = reg & BIT(31) ? SMA_MODE_OUT : SMA_MODE_IN;
}
}
static const struct ocp_sma_op ocp_fb_sma_op = {
.tbl = { ptp_ocp_sma_in, ptp_ocp_sma_out },
.init = ptp_ocp_sma_fb_init,
.get = ptp_ocp_sma_fb_get,
.set_inputs = ptp_ocp_sma_fb_set_inputs,
.set_output = ptp_ocp_sma_fb_set_output,
};
static int
ptp_ocp_fb_set_pins(struct ptp_ocp *bp)
{
struct ptp_pin_desc *config;
int i;
config = kcalloc(4, sizeof(*config), GFP_KERNEL);
if (!config)
return -ENOMEM;
for (i = 0; i < 4; i++) {
sprintf(config[i].name, "sma%d", i + 1);
config[i].index = i;
}
bp->ptp_info.n_pins = 4;
bp->ptp_info.pin_config = config;
return 0;
}
static void
ptp_ocp_fb_set_version(struct ptp_ocp *bp)
{
u64 cap = OCP_CAP_BASIC;
u32 version;
version = ioread32(&bp->image->version);
/* if lower 16 bits are empty, this is the fw loader. */
if ((version & 0xffff) == 0) {
version = version >> 16;
bp->fw_loader = true;
}
bp->fw_tag = version >> 15;
bp->fw_version = version & 0x7fff;
if (bp->fw_tag) {
/* FPGA firmware */
if (version >= 5)
cap |= OCP_CAP_SIGNAL | OCP_CAP_FREQ;
} else {
/* SOM firmware */
if (version >= 19)
cap |= OCP_CAP_SIGNAL;
if (version >= 20)
cap |= OCP_CAP_FREQ;
}
bp->fw_cap = cap;
}
/* FB specific board initializers; last "resource" registered. */
static int
ptp_ocp_fb_board_init(struct ptp_ocp *bp, struct ocp_resource *r)
{
int err;
bp->flash_start = 1024 * 4096;
bp->eeprom_map = fb_eeprom_map;
bp->fw_version = ioread32(&bp->image->version);
bp->sma_op = &ocp_fb_sma_op;
ptp_ocp_fb_set_version(bp);
ptp_ocp_tod_init(bp);
ptp_ocp_nmea_out_init(bp);
ptp_ocp_sma_init(bp);
ptp_ocp_signal_init(bp);
err = ptp_ocp_attr_group_add(bp, fb_timecard_groups);
if (err)
return err;
err = ptp_ocp_fb_set_pins(bp);
if (err)
return err;
return ptp_ocp_init_clock(bp);
}
static bool
ptp_ocp_allow_irq(struct ptp_ocp *bp, struct ocp_resource *r)
{
bool allow = !r->irq_vec || r->irq_vec < bp->n_irqs;
if (!allow)
dev_err(&bp->pdev->dev, "irq %d out of range, skipping %s\n",
r->irq_vec, r->name);
return allow;
}
static int
ptp_ocp_register_resources(struct ptp_ocp *bp, kernel_ulong_t driver_data)
{
struct ocp_resource *r, *table;
int err = 0;
table = (struct ocp_resource *)driver_data;
for (r = table; r->setup; r++) {
if (!ptp_ocp_allow_irq(bp, r))
continue;
err = r->setup(bp, r);
if (err) {
dev_err(&bp->pdev->dev,
"Could not register %s: err %d\n",
r->name, err);
break;
}
}
return err;
}
static void
ptp_ocp_art_sma_init(struct ptp_ocp *bp)
{
u32 reg;
int i;
/* defaults */
bp->sma[0].mode = SMA_MODE_IN;
bp->sma[1].mode = SMA_MODE_IN;
bp->sma[2].mode = SMA_MODE_OUT;
bp->sma[3].mode = SMA_MODE_OUT;
bp->sma[0].default_fcn = 0x08; /* IN: 10Mhz */
bp->sma[1].default_fcn = 0x01; /* IN: PPS1 */
bp->sma[2].default_fcn = 0x10; /* OUT: 10Mhz */
bp->sma[3].default_fcn = 0x02; /* OUT: PHC */
/* If no SMA map, the pin functions and directions are fixed. */
if (!bp->art_sma) {
for (i = 0; i < 4; i++) {
bp->sma[i].fixed_fcn = true;
bp->sma[i].fixed_dir = true;
}
return;
}
for (i = 0; i < 4; i++) {
reg = ioread32(&bp->art_sma->map[i].gpio);
switch (reg & 0xff) {
case 0:
bp->sma[i].fixed_fcn = true;
bp->sma[i].fixed_dir = true;
break;
case 1:
case 8:
bp->sma[i].mode = SMA_MODE_IN;
break;
default:
bp->sma[i].mode = SMA_MODE_OUT;
break;
}
}
}
static u32
ptp_ocp_art_sma_get(struct ptp_ocp *bp, int sma_nr)
{
if (bp->sma[sma_nr - 1].fixed_fcn)
return bp->sma[sma_nr - 1].default_fcn;
return ioread32(&bp->art_sma->map[sma_nr - 1].gpio) & 0xff;
}
/* note: store 0 is considered invalid. */
static int
ptp_ocp_art_sma_set(struct ptp_ocp *bp, int sma_nr, u32 val)
{
unsigned long flags;
u32 __iomem *gpio;
int err = 0;
u32 reg;
val &= SMA_SELECT_MASK;
if (hweight32(val) > 1)
return -EINVAL;
gpio = &bp->art_sma->map[sma_nr - 1].gpio;
spin_lock_irqsave(&bp->lock, flags);
reg = ioread32(gpio);
if (((reg >> 16) & val) == 0) {
err = -EOPNOTSUPP;
} else {
reg = (reg & 0xff00) | (val & 0xff);
iowrite32(reg, gpio);
}
spin_unlock_irqrestore(&bp->lock, flags);
return err;
}
static const struct ocp_sma_op ocp_art_sma_op = {
.tbl = { ptp_ocp_art_sma_in, ptp_ocp_art_sma_out },
.init = ptp_ocp_art_sma_init,
.get = ptp_ocp_art_sma_get,
.set_inputs = ptp_ocp_art_sma_set,
.set_output = ptp_ocp_art_sma_set,
};
/* ART specific board initializers; last "resource" registered. */
static int
ptp_ocp_art_board_init(struct ptp_ocp *bp, struct ocp_resource *r)
{
int err;
bp->flash_start = 0x1000000;
bp->eeprom_map = art_eeprom_map;
bp->fw_cap = OCP_CAP_BASIC;
bp->fw_version = ioread32(&bp->reg->version);
bp->fw_tag = 2;
bp->sma_op = &ocp_art_sma_op;
/* Enable MAC serial port during initialisation */
iowrite32(1, &bp->board_config->mro50_serial_activate);
ptp_ocp_sma_init(bp);
err = ptp_ocp_attr_group_add(bp, art_timecard_groups);
if (err)
return err;
return ptp_ocp_init_clock(bp);
}
static ssize_t
ptp_ocp_show_output(const struct ocp_selector *tbl, u32 val, char *buf,
int def_val)
{
const char *name;
ssize_t count;
count = sysfs_emit(buf, "OUT: ");
name = ptp_ocp_select_name_from_val(tbl, val);
if (!name)
name = ptp_ocp_select_name_from_val(tbl, def_val);
count += sysfs_emit_at(buf, count, "%s\n", name);
return count;
}
static ssize_t
ptp_ocp_show_inputs(const struct ocp_selector *tbl, u32 val, char *buf,
int def_val)
{
const char *name;
ssize_t count;
int i;
count = sysfs_emit(buf, "IN: ");
for (i = 0; tbl[i].name; i++) {
if (val & tbl[i].value) {
name = tbl[i].name;
count += sysfs_emit_at(buf, count, "%s ", name);
}
}
if (!val && def_val >= 0) {
name = ptp_ocp_select_name_from_val(tbl, def_val);
count += sysfs_emit_at(buf, count, "%s ", name);
}
if (count)
count--;
count += sysfs_emit_at(buf, count, "\n");
return count;
}
static int
sma_parse_inputs(const struct ocp_selector * const tbl[], const char *buf,
enum ptp_ocp_sma_mode *mode)
{
int idx, count, dir;
char **argv;
int ret;
argv = argv_split(GFP_KERNEL, buf, &count);
if (!argv)
return -ENOMEM;
ret = -EINVAL;
if (!count)
goto out;
idx = 0;
dir = *mode == SMA_MODE_IN ? 0 : 1;
if (!strcasecmp("IN:", argv[0])) {
dir = 0;
idx++;
}
if (!strcasecmp("OUT:", argv[0])) {
dir = 1;
idx++;
}
*mode = dir == 0 ? SMA_MODE_IN : SMA_MODE_OUT;
ret = 0;
for (; idx < count; idx++)
ret |= ptp_ocp_select_val_from_name(tbl[dir], argv[idx]);
if (ret < 0)
ret = -EINVAL;
out:
argv_free(argv);
return ret;
}
static ssize_t
ptp_ocp_sma_show(struct ptp_ocp *bp, int sma_nr, char *buf,
int default_in_val, int default_out_val)
{
struct ptp_ocp_sma_connector *sma = &bp->sma[sma_nr - 1];
const struct ocp_selector * const *tbl;
u32 val;
tbl = bp->sma_op->tbl;
val = ptp_ocp_sma_get(bp, sma_nr) & SMA_SELECT_MASK;
if (sma->mode == SMA_MODE_IN) {
if (sma->disabled)
val = SMA_DISABLE;
return ptp_ocp_show_inputs(tbl[0], val, buf, default_in_val);
}
return ptp_ocp_show_output(tbl[1], val, buf, default_out_val);
}
static ssize_t
sma1_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct ptp_ocp *bp = dev_get_drvdata(dev);
return ptp_ocp_sma_show(bp, 1, buf, 0, 1);
}
static ssize_t
sma2_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct ptp_ocp *bp = dev_get_drvdata(dev);
return ptp_ocp_sma_show(bp, 2, buf, -1, 1);
}
static ssize_t
sma3_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct ptp_ocp *bp = dev_get_drvdata(dev);
return ptp_ocp_sma_show(bp, 3, buf, -1, 0);
}
static ssize_t
sma4_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct ptp_ocp *bp = dev_get_drvdata(dev);
return ptp_ocp_sma_show(bp, 4, buf, -1, 1);
}
static int
ptp_ocp_sma_store(struct ptp_ocp *bp, const char *buf, int sma_nr)
{
struct ptp_ocp_sma_connector *sma = &bp->sma[sma_nr - 1];
enum ptp_ocp_sma_mode mode;
int val;
mode = sma->mode;
val = sma_parse_inputs(bp->sma_op->tbl, buf, &mode);
if (val < 0)
return val;
if (sma->fixed_dir && (mode != sma->mode || val & SMA_DISABLE))
return -EOPNOTSUPP;
if (sma->fixed_fcn) {
if (val != sma->default_fcn)
return -EOPNOTSUPP;
return 0;
}
sma->disabled = !!(val & SMA_DISABLE);
if (mode != sma->mode) {
if (mode == SMA_MODE_IN)
ptp_ocp_sma_set_output(bp, sma_nr, 0);
else
ptp_ocp_sma_set_inputs(bp, sma_nr, 0);
sma->mode = mode;
}
if (!sma->fixed_dir)
val |= SMA_ENABLE; /* add enable bit */
if (sma->disabled)
val = 0;
if (mode == SMA_MODE_IN)
val = ptp_ocp_sma_set_inputs(bp, sma_nr, val);
else
val = ptp_ocp_sma_set_output(bp, sma_nr, val);
return val;
}
static ssize_t
sma1_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct ptp_ocp *bp = dev_get_drvdata(dev);
int err;
err = ptp_ocp_sma_store(bp, buf, 1);
return err ? err : count;
}
static ssize_t
sma2_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct ptp_ocp *bp = dev_get_drvdata(dev);
int err;
err = ptp_ocp_sma_store(bp, buf, 2);
return err ? err : count;
}
static ssize_t
sma3_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct ptp_ocp *bp = dev_get_drvdata(dev);
int err;
err = ptp_ocp_sma_store(bp, buf, 3);
return err ? err : count;
}
static ssize_t
sma4_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct ptp_ocp *bp = dev_get_drvdata(dev);
int err;
err = ptp_ocp_sma_store(bp, buf, 4);
return err ? err : count;
}
static DEVICE_ATTR_RW(sma1);
static DEVICE_ATTR_RW(sma2);
static DEVICE_ATTR_RW(sma3);
static DEVICE_ATTR_RW(sma4);
static ssize_t
available_sma_inputs_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ptp_ocp *bp = dev_get_drvdata(dev);
return ptp_ocp_select_table_show(bp->sma_op->tbl[0], buf);
}
static DEVICE_ATTR_RO(available_sma_inputs);
static ssize_t
available_sma_outputs_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ptp_ocp *bp = dev_get_drvdata(dev);
return ptp_ocp_select_table_show(bp->sma_op->tbl[1], buf);
}
static DEVICE_ATTR_RO(available_sma_outputs);
#define EXT_ATTR_RO(_group, _name, _val) \
struct dev_ext_attribute dev_attr_##_group##_val##_##_name = \
{ __ATTR_RO(_name), (void *)_val }
#define EXT_ATTR_RW(_group, _name, _val) \
struct dev_ext_attribute dev_attr_##_group##_val##_##_name = \
{ __ATTR_RW(_name), (void *)_val }
#define to_ext_attr(x) container_of(x, struct dev_ext_attribute, attr)
/* period [duty [phase [polarity]]] */
static ssize_t
signal_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct dev_ext_attribute *ea = to_ext_attr(attr);
struct ptp_ocp *bp = dev_get_drvdata(dev);
struct ptp_ocp_signal s = { };
int gen = (uintptr_t)ea->var;
int argc, err;
char **argv;
argv = argv_split(GFP_KERNEL, buf, &argc);
if (!argv)
return -ENOMEM;
err = -EINVAL;
s.duty = bp->signal[gen].duty;
s.phase = bp->signal[gen].phase;
s.period = bp->signal[gen].period;
s.polarity = bp->signal[gen].polarity;
switch (argc) {
case 4:
argc--;
err = kstrtobool(argv[argc], &s.polarity);
if (err)
goto out;
fallthrough;
case 3:
argc--;
err = kstrtou64(argv[argc], 0, &s.phase);
if (err)
goto out;
fallthrough;
case 2:
argc--;
err = kstrtoint(argv[argc], 0, &s.duty);
if (err)
goto out;
fallthrough;
case 1:
argc--;
err = kstrtou64(argv[argc], 0, &s.period);
if (err)
goto out;
break;
default:
goto out;
}
err = ptp_ocp_signal_set(bp, gen, &s);
if (err)
goto out;
err = ptp_ocp_signal_enable(bp->signal_out[gen], gen, s.period != 0);
out:
argv_free(argv);
return err ? err : count;
}
static ssize_t
signal_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct dev_ext_attribute *ea = to_ext_attr(attr);
struct ptp_ocp *bp = dev_get_drvdata(dev);
struct ptp_ocp_signal *signal;
struct timespec64 ts;
ssize_t count;
int i;
i = (uintptr_t)ea->var;
signal = &bp->signal[i];
count = sysfs_emit(buf, "%llu %d %llu %d", signal->period,
signal->duty, signal->phase, signal->polarity);
ts = ktime_to_timespec64(signal->start);
count += sysfs_emit_at(buf, count, " %ptT TAI\n", &ts);
return count;
}
static EXT_ATTR_RW(signal, signal, 0);
static EXT_ATTR_RW(signal, signal, 1);
static EXT_ATTR_RW(signal, signal, 2);
static EXT_ATTR_RW(signal, signal, 3);
static ssize_t
duty_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct dev_ext_attribute *ea = to_ext_attr(attr);
struct ptp_ocp *bp = dev_get_drvdata(dev);
int i = (uintptr_t)ea->var;
return sysfs_emit(buf, "%d\n", bp->signal[i].duty);
}
static EXT_ATTR_RO(signal, duty, 0);
static EXT_ATTR_RO(signal, duty, 1);
static EXT_ATTR_RO(signal, duty, 2);
static EXT_ATTR_RO(signal, duty, 3);
static ssize_t
period_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct dev_ext_attribute *ea = to_ext_attr(attr);
struct ptp_ocp *bp = dev_get_drvdata(dev);
int i = (uintptr_t)ea->var;
return sysfs_emit(buf, "%llu\n", bp->signal[i].period);
}
static EXT_ATTR_RO(signal, period, 0);
static EXT_ATTR_RO(signal, period, 1);
static EXT_ATTR_RO(signal, period, 2);
static EXT_ATTR_RO(signal, period, 3);
static ssize_t
phase_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct dev_ext_attribute *ea = to_ext_attr(attr);
struct ptp_ocp *bp = dev_get_drvdata(dev);
int i = (uintptr_t)ea->var;
return sysfs_emit(buf, "%llu\n", bp->signal[i].phase);
}
static EXT_ATTR_RO(signal, phase, 0);
static EXT_ATTR_RO(signal, phase, 1);
static EXT_ATTR_RO(signal, phase, 2);
static EXT_ATTR_RO(signal, phase, 3);
static ssize_t
polarity_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct dev_ext_attribute *ea = to_ext_attr(attr);
struct ptp_ocp *bp = dev_get_drvdata(dev);
int i = (uintptr_t)ea->var;
return sysfs_emit(buf, "%d\n", bp->signal[i].polarity);
}
static EXT_ATTR_RO(signal, polarity, 0);
static EXT_ATTR_RO(signal, polarity, 1);
static EXT_ATTR_RO(signal, polarity, 2);
static EXT_ATTR_RO(signal, polarity, 3);
static ssize_t
running_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct dev_ext_attribute *ea = to_ext_attr(attr);
struct ptp_ocp *bp = dev_get_drvdata(dev);
int i = (uintptr_t)ea->var;
return sysfs_emit(buf, "%d\n", bp->signal[i].running);
}
static EXT_ATTR_RO(signal, running, 0);
static EXT_ATTR_RO(signal, running, 1);
static EXT_ATTR_RO(signal, running, 2);
static EXT_ATTR_RO(signal, running, 3);
static ssize_t
start_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct dev_ext_attribute *ea = to_ext_attr(attr);
struct ptp_ocp *bp = dev_get_drvdata(dev);
int i = (uintptr_t)ea->var;
struct timespec64 ts;
ts = ktime_to_timespec64(bp->signal[i].start);
return sysfs_emit(buf, "%llu.%lu\n", ts.tv_sec, ts.tv_nsec);
}
static EXT_ATTR_RO(signal, start, 0);
static EXT_ATTR_RO(signal, start, 1);
static EXT_ATTR_RO(signal, start, 2);
static EXT_ATTR_RO(signal, start, 3);
static ssize_t
seconds_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct dev_ext_attribute *ea = to_ext_attr(attr);
struct ptp_ocp *bp = dev_get_drvdata(dev);
int idx = (uintptr_t)ea->var;
u32 val;
int err;
err = kstrtou32(buf, 0, &val);
if (err)
return err;
if (val > 0xff)
return -EINVAL;
if (val)
val = (val << 8) | 0x1;
iowrite32(val, &bp->freq_in[idx]->ctrl);
return count;
}
static ssize_t
seconds_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct dev_ext_attribute *ea = to_ext_attr(attr);
struct ptp_ocp *bp = dev_get_drvdata(dev);
int idx = (uintptr_t)ea->var;
u32 val;
val = ioread32(&bp->freq_in[idx]->ctrl);
if (val & 1)
val = (val >> 8) & 0xff;
else
val = 0;
return sysfs_emit(buf, "%u\n", val);
}
static EXT_ATTR_RW(freq, seconds, 0);
static EXT_ATTR_RW(freq, seconds, 1);
static EXT_ATTR_RW(freq, seconds, 2);
static EXT_ATTR_RW(freq, seconds, 3);
static ssize_t
frequency_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct dev_ext_attribute *ea = to_ext_attr(attr);
struct ptp_ocp *bp = dev_get_drvdata(dev);
int idx = (uintptr_t)ea->var;
u32 val;
val = ioread32(&bp->freq_in[idx]->status);
if (val & FREQ_STATUS_ERROR)
return sysfs_emit(buf, "error\n");
if (val & FREQ_STATUS_OVERRUN)
return sysfs_emit(buf, "overrun\n");
if (val & FREQ_STATUS_VALID)
return sysfs_emit(buf, "%lu\n", val & FREQ_STATUS_MASK);
return 0;
}
static EXT_ATTR_RO(freq, frequency, 0);
static EXT_ATTR_RO(freq, frequency, 1);
static EXT_ATTR_RO(freq, frequency, 2);
static EXT_ATTR_RO(freq, frequency, 3);
static ssize_t
serialnum_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct ptp_ocp *bp = dev_get_drvdata(dev);
if (!bp->has_eeprom_data)
ptp_ocp_read_eeprom(bp);
return sysfs_emit(buf, "%pM\n", bp->serial);
}
static DEVICE_ATTR_RO(serialnum);
static ssize_t
gnss_sync_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct ptp_ocp *bp = dev_get_drvdata(dev);
ssize_t ret;
if (bp->gnss_lost)
ret = sysfs_emit(buf, "LOST @ %ptT\n", &bp->gnss_lost);
else
ret = sysfs_emit(buf, "SYNC\n");
return ret;
}
static DEVICE_ATTR_RO(gnss_sync);
static ssize_t
utc_tai_offset_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ptp_ocp *bp = dev_get_drvdata(dev);
return sysfs_emit(buf, "%d\n", bp->utc_tai_offset);
}
static ssize_t
utc_tai_offset_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct ptp_ocp *bp = dev_get_drvdata(dev);
int err;
u32 val;
err = kstrtou32(buf, 0, &val);
if (err)
return err;
ptp_ocp_utc_distribute(bp, val);
return count;
}
static DEVICE_ATTR_RW(utc_tai_offset);
static ssize_t
ts_window_adjust_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ptp_ocp *bp = dev_get_drvdata(dev);
return sysfs_emit(buf, "%d\n", bp->ts_window_adjust);
}
static ssize_t
ts_window_adjust_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct ptp_ocp *bp = dev_get_drvdata(dev);
int err;
u32 val;
err = kstrtou32(buf, 0, &val);
if (err)
return err;
bp->ts_window_adjust = val;
return count;
}
static DEVICE_ATTR_RW(ts_window_adjust);
static ssize_t
irig_b_mode_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct ptp_ocp *bp = dev_get_drvdata(dev);
u32 val;
val = ioread32(&bp->irig_out->ctrl);
val = (val >> 16) & 0x07;
return sysfs_emit(buf, "%d\n", val);
}
static ssize_t
irig_b_mode_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct ptp_ocp *bp = dev_get_drvdata(dev);
unsigned long flags;
int err;
u32 reg;
u8 val;
err = kstrtou8(buf, 0, &val);
if (err)
return err;
if (val > 7)
return -EINVAL;
reg = ((val & 0x7) << 16);
spin_lock_irqsave(&bp->lock, flags);
iowrite32(0, &bp->irig_out->ctrl); /* disable */
iowrite32(reg, &bp->irig_out->ctrl); /* change mode */
iowrite32(reg | IRIG_M_CTRL_ENABLE, &bp->irig_out->ctrl);
spin_unlock_irqrestore(&bp->lock, flags);
return count;
}
static DEVICE_ATTR_RW(irig_b_mode);
static ssize_t
clock_source_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct ptp_ocp *bp = dev_get_drvdata(dev);
const char *p;
u32 select;
select = ioread32(&bp->reg->select);
p = ptp_ocp_select_name_from_val(ptp_ocp_clock, select >> 16);
return sysfs_emit(buf, "%s\n", p);
}
static ssize_t
clock_source_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct ptp_ocp *bp = dev_get_drvdata(dev);
unsigned long flags;
int val;
val = ptp_ocp_select_val_from_name(ptp_ocp_clock, buf);
if (val < 0)
return val;
spin_lock_irqsave(&bp->lock, flags);
iowrite32(val, &bp->reg->select);
spin_unlock_irqrestore(&bp->lock, flags);
return count;
}
static DEVICE_ATTR_RW(clock_source);
static ssize_t
available_clock_sources_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return ptp_ocp_select_table_show(ptp_ocp_clock, buf);
}
static DEVICE_ATTR_RO(available_clock_sources);
static ssize_t
clock_status_drift_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ptp_ocp *bp = dev_get_drvdata(dev);
u32 val;
int res;
val = ioread32(&bp->reg->status_drift);
res = (val & ~INT_MAX) ? -1 : 1;
res *= (val & INT_MAX);
return sysfs_emit(buf, "%d\n", res);
}
static DEVICE_ATTR_RO(clock_status_drift);
static ssize_t
clock_status_offset_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ptp_ocp *bp = dev_get_drvdata(dev);
u32 val;
int res;
val = ioread32(&bp->reg->status_offset);
res = (val & ~INT_MAX) ? -1 : 1;
res *= (val & INT_MAX);
return sysfs_emit(buf, "%d\n", res);
}
static DEVICE_ATTR_RO(clock_status_offset);
static ssize_t
tod_correction_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ptp_ocp *bp = dev_get_drvdata(dev);
u32 val;
int res;
val = ioread32(&bp->tod->adj_sec);
res = (val & ~INT_MAX) ? -1 : 1;
res *= (val & INT_MAX);
return sysfs_emit(buf, "%d\n", res);
}
static ssize_t
tod_correction_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct ptp_ocp *bp = dev_get_drvdata(dev);
unsigned long flags;
int err, res;
u32 val = 0;
err = kstrtos32(buf, 0, &res);
if (err)
return err;
if (res < 0) {
res *= -1;
val |= BIT(31);
}
val |= res;
spin_lock_irqsave(&bp->lock, flags);
iowrite32(val, &bp->tod->adj_sec);
spin_unlock_irqrestore(&bp->lock, flags);
return count;
}
static DEVICE_ATTR_RW(tod_correction);
#define _DEVICE_SIGNAL_GROUP_ATTRS(_nr) \
static struct attribute *fb_timecard_signal##_nr##_attrs[] = { \
&dev_attr_signal##_nr##_signal.attr.attr, \
&dev_attr_signal##_nr##_duty.attr.attr, \
&dev_attr_signal##_nr##_phase.attr.attr, \
&dev_attr_signal##_nr##_period.attr.attr, \
&dev_attr_signal##_nr##_polarity.attr.attr, \
&dev_attr_signal##_nr##_running.attr.attr, \
&dev_attr_signal##_nr##_start.attr.attr, \
NULL, \
}
#define DEVICE_SIGNAL_GROUP(_name, _nr) \
_DEVICE_SIGNAL_GROUP_ATTRS(_nr); \
static const struct attribute_group \
fb_timecard_signal##_nr##_group = { \
.name = #_name, \
.attrs = fb_timecard_signal##_nr##_attrs, \
}
DEVICE_SIGNAL_GROUP(gen1, 0);
DEVICE_SIGNAL_GROUP(gen2, 1);
DEVICE_SIGNAL_GROUP(gen3, 2);
DEVICE_SIGNAL_GROUP(gen4, 3);
#define _DEVICE_FREQ_GROUP_ATTRS(_nr) \
static struct attribute *fb_timecard_freq##_nr##_attrs[] = { \
&dev_attr_freq##_nr##_seconds.attr.attr, \
&dev_attr_freq##_nr##_frequency.attr.attr, \
NULL, \
}
#define DEVICE_FREQ_GROUP(_name, _nr) \
_DEVICE_FREQ_GROUP_ATTRS(_nr); \
static const struct attribute_group \
fb_timecard_freq##_nr##_group = { \
.name = #_name, \
.attrs = fb_timecard_freq##_nr##_attrs, \
}
DEVICE_FREQ_GROUP(freq1, 0);
DEVICE_FREQ_GROUP(freq2, 1);
DEVICE_FREQ_GROUP(freq3, 2);
DEVICE_FREQ_GROUP(freq4, 3);
static ssize_t
disciplining_config_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct ptp_ocp *bp = dev_get_drvdata(kobj_to_dev(kobj));
size_t size = OCP_ART_CONFIG_SIZE;
struct nvmem_device *nvmem;
ssize_t err;
nvmem = ptp_ocp_nvmem_device_get(bp, NULL);
if (IS_ERR(nvmem))
return PTR_ERR(nvmem);
if (off > size) {
err = 0;
goto out;
}
if (off + count > size)
count = size - off;
// the configuration is in the very beginning of the EEPROM
err = nvmem_device_read(nvmem, off, count, buf);
if (err != count) {
err = -EFAULT;
goto out;
}
out:
ptp_ocp_nvmem_device_put(&nvmem);
return err;
}
static ssize_t
disciplining_config_write(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct ptp_ocp *bp = dev_get_drvdata(kobj_to_dev(kobj));
struct nvmem_device *nvmem;
ssize_t err;
/* Allow write of the whole area only */
if (off || count != OCP_ART_CONFIG_SIZE)
return -EFAULT;
nvmem = ptp_ocp_nvmem_device_get(bp, NULL);
if (IS_ERR(nvmem))
return PTR_ERR(nvmem);
err = nvmem_device_write(nvmem, 0x00, count, buf);
if (err != count)
err = -EFAULT;
ptp_ocp_nvmem_device_put(&nvmem);
return err;
}
static BIN_ATTR_RW(disciplining_config, OCP_ART_CONFIG_SIZE);
static ssize_t
temperature_table_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct ptp_ocp *bp = dev_get_drvdata(kobj_to_dev(kobj));
size_t size = OCP_ART_TEMP_TABLE_SIZE;
struct nvmem_device *nvmem;
ssize_t err;
nvmem = ptp_ocp_nvmem_device_get(bp, NULL);
if (IS_ERR(nvmem))
return PTR_ERR(nvmem);
if (off > size) {
err = 0;
goto out;
}
if (off + count > size)
count = size - off;
// the configuration is in the very beginning of the EEPROM
err = nvmem_device_read(nvmem, 0x90 + off, count, buf);
if (err != count) {
err = -EFAULT;
goto out;
}
out:
ptp_ocp_nvmem_device_put(&nvmem);
return err;
}
static ssize_t
temperature_table_write(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct ptp_ocp *bp = dev_get_drvdata(kobj_to_dev(kobj));
struct nvmem_device *nvmem;
ssize_t err;
/* Allow write of the whole area only */
if (off || count != OCP_ART_TEMP_TABLE_SIZE)
return -EFAULT;
nvmem = ptp_ocp_nvmem_device_get(bp, NULL);
if (IS_ERR(nvmem))
return PTR_ERR(nvmem);
err = nvmem_device_write(nvmem, 0x90, count, buf);
if (err != count)
err = -EFAULT;
ptp_ocp_nvmem_device_put(&nvmem);
return err;
}
static BIN_ATTR_RW(temperature_table, OCP_ART_TEMP_TABLE_SIZE);
static struct attribute *fb_timecard_attrs[] = {
&dev_attr_serialnum.attr,
&dev_attr_gnss_sync.attr,
&dev_attr_clock_source.attr,
&dev_attr_available_clock_sources.attr,
&dev_attr_sma1.attr,
&dev_attr_sma2.attr,
&dev_attr_sma3.attr,
&dev_attr_sma4.attr,
&dev_attr_available_sma_inputs.attr,
&dev_attr_available_sma_outputs.attr,
&dev_attr_clock_status_drift.attr,
&dev_attr_clock_status_offset.attr,
&dev_attr_irig_b_mode.attr,
&dev_attr_utc_tai_offset.attr,
&dev_attr_ts_window_adjust.attr,
&dev_attr_tod_correction.attr,
NULL,
};
static const struct attribute_group fb_timecard_group = {
.attrs = fb_timecard_attrs,
};
static const struct ocp_attr_group fb_timecard_groups[] = {
{ .cap = OCP_CAP_BASIC, .group = &fb_timecard_group },
{ .cap = OCP_CAP_SIGNAL, .group = &fb_timecard_signal0_group },
{ .cap = OCP_CAP_SIGNAL, .group = &fb_timecard_signal1_group },
{ .cap = OCP_CAP_SIGNAL, .group = &fb_timecard_signal2_group },
{ .cap = OCP_CAP_SIGNAL, .group = &fb_timecard_signal3_group },
{ .cap = OCP_CAP_FREQ, .group = &fb_timecard_freq0_group },
{ .cap = OCP_CAP_FREQ, .group = &fb_timecard_freq1_group },
{ .cap = OCP_CAP_FREQ, .group = &fb_timecard_freq2_group },
{ .cap = OCP_CAP_FREQ, .group = &fb_timecard_freq3_group },
{ },
};
static struct attribute *art_timecard_attrs[] = {
&dev_attr_serialnum.attr,
&dev_attr_clock_source.attr,
&dev_attr_available_clock_sources.attr,
&dev_attr_utc_tai_offset.attr,
&dev_attr_ts_window_adjust.attr,
&dev_attr_sma1.attr,
&dev_attr_sma2.attr,
&dev_attr_sma3.attr,
&dev_attr_sma4.attr,
&dev_attr_available_sma_inputs.attr,
&dev_attr_available_sma_outputs.attr,
NULL,
};
static struct bin_attribute *bin_art_timecard_attrs[] = {
&bin_attr_disciplining_config,
&bin_attr_temperature_table,
NULL,
};
static const struct attribute_group art_timecard_group = {
.attrs = art_timecard_attrs,
.bin_attrs = bin_art_timecard_attrs,
};
static const struct ocp_attr_group art_timecard_groups[] = {
{ .cap = OCP_CAP_BASIC, .group = &art_timecard_group },
{ },
};
static void
gpio_input_map(char *buf, struct ptp_ocp *bp, u16 map[][2], u16 bit,
const char *def)
{
int i;
for (i = 0; i < 4; i++) {
if (bp->sma[i].mode != SMA_MODE_IN)
continue;
if (map[i][0] & (1 << bit)) {
sprintf(buf, "sma%d", i + 1);
return;
}
}
if (!def)
def = "----";
strcpy(buf, def);
}
static void
gpio_output_map(char *buf, struct ptp_ocp *bp, u16 map[][2], u16 bit)
{
char *ans = buf;
int i;
strcpy(ans, "----");
for (i = 0; i < 4; i++) {
if (bp->sma[i].mode != SMA_MODE_OUT)
continue;
if (map[i][1] & (1 << bit))
ans += sprintf(ans, "sma%d ", i + 1);
}
}
static void
_signal_summary_show(struct seq_file *s, struct ptp_ocp *bp, int nr)
{
struct signal_reg __iomem *reg = bp->signal_out[nr]->mem;
struct ptp_ocp_signal *signal = &bp->signal[nr];
char label[8];
bool on;
u32 val;
if (!signal)
return;
on = signal->running;
sprintf(label, "GEN%d", nr + 1);
seq_printf(s, "%7s: %s, period:%llu duty:%d%% phase:%llu pol:%d",
label, on ? " ON" : "OFF",
signal->period, signal->duty, signal->phase,
signal->polarity);
val = ioread32(®->enable);
seq_printf(s, " [%x", val);
val = ioread32(®->status);
seq_printf(s, " %x]", val);
seq_printf(s, " start:%llu\n", signal->start);
}
static void
_frequency_summary_show(struct seq_file *s, int nr,
struct frequency_reg __iomem *reg)
{
char label[8];
bool on;
u32 val;
if (!reg)
return;
sprintf(label, "FREQ%d", nr + 1);
val = ioread32(®->ctrl);
on = val & 1;
val = (val >> 8) & 0xff;
seq_printf(s, "%7s: %s, sec:%u",
label,
on ? " ON" : "OFF",
val);
val = ioread32(®->status);
if (val & FREQ_STATUS_ERROR)
seq_printf(s, ", error");
if (val & FREQ_STATUS_OVERRUN)
seq_printf(s, ", overrun");
if (val & FREQ_STATUS_VALID)
seq_printf(s, ", freq %lu Hz", val & FREQ_STATUS_MASK);
seq_printf(s, " reg:%x\n", val);
}
static int
ptp_ocp_summary_show(struct seq_file *s, void *data)
{
struct device *dev = s->private;
struct ptp_system_timestamp sts;
struct ts_reg __iomem *ts_reg;
char *buf, *src, *mac_src;
struct timespec64 ts;
struct ptp_ocp *bp;
u16 sma_val[4][2];
u32 ctrl, val;
bool on, map;
int i;
buf = (char *)__get_free_page(GFP_KERNEL);
if (!buf)
return -ENOMEM;
bp = dev_get_drvdata(dev);
seq_printf(s, "%7s: /dev/ptp%d\n", "PTP", ptp_clock_index(bp->ptp));
if (bp->gnss_port.line != -1)
seq_printf(s, "%7s: /dev/ttyS%d\n", "GNSS1",
bp->gnss_port.line);
if (bp->gnss2_port.line != -1)
seq_printf(s, "%7s: /dev/ttyS%d\n", "GNSS2",
bp->gnss2_port.line);
if (bp->mac_port.line != -1)
seq_printf(s, "%7s: /dev/ttyS%d\n", "MAC", bp->mac_port.line);
if (bp->nmea_port.line != -1)
seq_printf(s, "%7s: /dev/ttyS%d\n", "NMEA", bp->nmea_port.line);
memset(sma_val, 0xff, sizeof(sma_val));
if (bp->sma_map1) {
u32 reg;
reg = ioread32(&bp->sma_map1->gpio1);
sma_val[0][0] = reg & 0xffff;
sma_val[1][0] = reg >> 16;
reg = ioread32(&bp->sma_map1->gpio2);
sma_val[2][1] = reg & 0xffff;
sma_val[3][1] = reg >> 16;
reg = ioread32(&bp->sma_map2->gpio1);
sma_val[2][0] = reg & 0xffff;
sma_val[3][0] = reg >> 16;
reg = ioread32(&bp->sma_map2->gpio2);
sma_val[0][1] = reg & 0xffff;
sma_val[1][1] = reg >> 16;
}
sma1_show(dev, NULL, buf);
seq_printf(s, " sma1: %04x,%04x %s",
sma_val[0][0], sma_val[0][1], buf);
sma2_show(dev, NULL, buf);
seq_printf(s, " sma2: %04x,%04x %s",
sma_val[1][0], sma_val[1][1], buf);
sma3_show(dev, NULL, buf);
seq_printf(s, " sma3: %04x,%04x %s",
sma_val[2][0], sma_val[2][1], buf);
sma4_show(dev, NULL, buf);
seq_printf(s, " sma4: %04x,%04x %s",
sma_val[3][0], sma_val[3][1], buf);
if (bp->ts0) {
ts_reg = bp->ts0->mem;
on = ioread32(&ts_reg->enable);
src = "GNSS1";
seq_printf(s, "%7s: %s, src: %s\n", "TS0",
on ? " ON" : "OFF", src);
}
if (bp->ts1) {
ts_reg = bp->ts1->mem;
on = ioread32(&ts_reg->enable);
gpio_input_map(buf, bp, sma_val, 2, NULL);
seq_printf(s, "%7s: %s, src: %s\n", "TS1",
on ? " ON" : "OFF", buf);
}
if (bp->ts2) {
ts_reg = bp->ts2->mem;
on = ioread32(&ts_reg->enable);
gpio_input_map(buf, bp, sma_val, 3, NULL);
seq_printf(s, "%7s: %s, src: %s\n", "TS2",
on ? " ON" : "OFF", buf);
}
if (bp->ts3) {
ts_reg = bp->ts3->mem;
on = ioread32(&ts_reg->enable);
gpio_input_map(buf, bp, sma_val, 6, NULL);
seq_printf(s, "%7s: %s, src: %s\n", "TS3",
on ? " ON" : "OFF", buf);
}
if (bp->ts4) {
ts_reg = bp->ts4->mem;
on = ioread32(&ts_reg->enable);
gpio_input_map(buf, bp, sma_val, 7, NULL);
seq_printf(s, "%7s: %s, src: %s\n", "TS4",
on ? " ON" : "OFF", buf);
}
if (bp->pps) {
ts_reg = bp->pps->mem;
src = "PHC";
on = ioread32(&ts_reg->enable);
map = !!(bp->pps_req_map & OCP_REQ_TIMESTAMP);
seq_printf(s, "%7s: %s, src: %s\n", "TS5",
on && map ? " ON" : "OFF", src);
map = !!(bp->pps_req_map & OCP_REQ_PPS);
seq_printf(s, "%7s: %s, src: %s\n", "PPS",
on && map ? " ON" : "OFF", src);
}
if (bp->fw_cap & OCP_CAP_SIGNAL)
for (i = 0; i < 4; i++)
_signal_summary_show(s, bp, i);
if (bp->fw_cap & OCP_CAP_FREQ)
for (i = 0; i < 4; i++)
_frequency_summary_show(s, i, bp->freq_in[i]);
if (bp->irig_out) {
ctrl = ioread32(&bp->irig_out->ctrl);
on = ctrl & IRIG_M_CTRL_ENABLE;
val = ioread32(&bp->irig_out->status);
gpio_output_map(buf, bp, sma_val, 4);
seq_printf(s, "%7s: %s, error: %d, mode %d, out: %s\n", "IRIG",
on ? " ON" : "OFF", val, (ctrl >> 16), buf);
}
if (bp->irig_in) {
on = ioread32(&bp->irig_in->ctrl) & IRIG_S_CTRL_ENABLE;
val = ioread32(&bp->irig_in->status);
gpio_input_map(buf, bp, sma_val, 4, NULL);
seq_printf(s, "%7s: %s, error: %d, src: %s\n", "IRIG in",
on ? " ON" : "OFF", val, buf);
}
if (bp->dcf_out) {
on = ioread32(&bp->dcf_out->ctrl) & DCF_M_CTRL_ENABLE;
val = ioread32(&bp->dcf_out->status);
gpio_output_map(buf, bp, sma_val, 5);
seq_printf(s, "%7s: %s, error: %d, out: %s\n", "DCF",
on ? " ON" : "OFF", val, buf);
}
if (bp->dcf_in) {
on = ioread32(&bp->dcf_in->ctrl) & DCF_S_CTRL_ENABLE;
val = ioread32(&bp->dcf_in->status);
gpio_input_map(buf, bp, sma_val, 5, NULL);
seq_printf(s, "%7s: %s, error: %d, src: %s\n", "DCF in",
on ? " ON" : "OFF", val, buf);
}
if (bp->nmea_out) {
on = ioread32(&bp->nmea_out->ctrl) & 1;
val = ioread32(&bp->nmea_out->status);
seq_printf(s, "%7s: %s, error: %d\n", "NMEA",
on ? " ON" : "OFF", val);
}
/* compute src for PPS1, used below. */
if (bp->pps_select) {
val = ioread32(&bp->pps_select->gpio1);
src = &buf[80];
mac_src = "GNSS1";
if (val & 0x01) {
gpio_input_map(src, bp, sma_val, 0, NULL);
mac_src = src;
} else if (val & 0x02) {
src = "MAC";
} else if (val & 0x04) {
src = "GNSS1";
} else {
src = "----";
mac_src = src;
}
} else {
src = "?";
mac_src = src;
}
seq_printf(s, "MAC PPS1 src: %s\n", mac_src);
gpio_input_map(buf, bp, sma_val, 1, "GNSS2");
seq_printf(s, "MAC PPS2 src: %s\n", buf);
/* assumes automatic switchover/selection */
val = ioread32(&bp->reg->select);
switch (val >> 16) {
case 0:
sprintf(buf, "----");
break;
case 2:
sprintf(buf, "IRIG");
break;
case 3:
sprintf(buf, "%s via PPS1", src);
break;
case 6:
sprintf(buf, "DCF");
break;
default:
strcpy(buf, "unknown");
break;
}
val = ioread32(&bp->reg->status);
seq_printf(s, "%7s: %s, state: %s\n", "PHC src", buf,
val & OCP_STATUS_IN_SYNC ? "sync" : "unsynced");
if (!ptp_ocp_gettimex(&bp->ptp_info, &ts, &sts)) {
struct timespec64 sys_ts;
s64 pre_ns, post_ns, ns;
pre_ns = timespec64_to_ns(&sts.pre_ts);
post_ns = timespec64_to_ns(&sts.post_ts);
ns = (pre_ns + post_ns) / 2;
ns += (s64)bp->utc_tai_offset * NSEC_PER_SEC;
sys_ts = ns_to_timespec64(ns);
seq_printf(s, "%7s: %lld.%ld == %ptT TAI\n", "PHC",
ts.tv_sec, ts.tv_nsec, &ts);
seq_printf(s, "%7s: %lld.%ld == %ptT UTC offset %d\n", "SYS",
sys_ts.tv_sec, sys_ts.tv_nsec, &sys_ts,
bp->utc_tai_offset);
seq_printf(s, "%7s: PHC:SYS offset: %lld window: %lld\n", "",
timespec64_to_ns(&ts) - ns,
post_ns - pre_ns);
}
free_page((unsigned long)buf);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(ptp_ocp_summary);
static int
ptp_ocp_tod_status_show(struct seq_file *s, void *data)
{
struct device *dev = s->private;
struct ptp_ocp *bp;
u32 val;
int idx;
bp = dev_get_drvdata(dev);
val = ioread32(&bp->tod->ctrl);
if (!(val & TOD_CTRL_ENABLE)) {
seq_printf(s, "TOD Slave disabled\n");
return 0;
}
seq_printf(s, "TOD Slave enabled, Control Register 0x%08X\n", val);
idx = val & TOD_CTRL_PROTOCOL ? 4 : 0;
idx += (val >> 16) & 3;
seq_printf(s, "Protocol %s\n", ptp_ocp_tod_proto_name(idx));
idx = (val >> TOD_CTRL_GNSS_SHIFT) & TOD_CTRL_GNSS_MASK;
seq_printf(s, "GNSS %s\n", ptp_ocp_tod_gnss_name(idx));
val = ioread32(&bp->tod->version);
seq_printf(s, "TOD Version %d.%d.%d\n",
val >> 24, (val >> 16) & 0xff, val & 0xffff);
val = ioread32(&bp->tod->status);
seq_printf(s, "Status register: 0x%08X\n", val);
val = ioread32(&bp->tod->adj_sec);
idx = (val & ~INT_MAX) ? -1 : 1;
idx *= (val & INT_MAX);
seq_printf(s, "Correction seconds: %d\n", idx);
val = ioread32(&bp->tod->utc_status);
seq_printf(s, "UTC status register: 0x%08X\n", val);
seq_printf(s, "UTC offset: %ld valid:%d\n",
val & TOD_STATUS_UTC_MASK, val & TOD_STATUS_UTC_VALID ? 1 : 0);
seq_printf(s, "Leap second info valid:%d, Leap second announce %d\n",
val & TOD_STATUS_LEAP_VALID ? 1 : 0,
val & TOD_STATUS_LEAP_ANNOUNCE ? 1 : 0);
val = ioread32(&bp->tod->leap);
seq_printf(s, "Time to next leap second (in sec): %d\n", (s32) val);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(ptp_ocp_tod_status);
static struct dentry *ptp_ocp_debugfs_root;
static void
ptp_ocp_debugfs_add_device(struct ptp_ocp *bp)
{
struct dentry *d;
d = debugfs_create_dir(dev_name(&bp->dev), ptp_ocp_debugfs_root);
bp->debug_root = d;
debugfs_create_file("summary", 0444, bp->debug_root,
&bp->dev, &ptp_ocp_summary_fops);
if (bp->tod)
debugfs_create_file("tod_status", 0444, bp->debug_root,
&bp->dev, &ptp_ocp_tod_status_fops);
}
static void
ptp_ocp_debugfs_remove_device(struct ptp_ocp *bp)
{
debugfs_remove_recursive(bp->debug_root);
}
static void
ptp_ocp_debugfs_init(void)
{
ptp_ocp_debugfs_root = debugfs_create_dir("timecard", NULL);
}
static void
ptp_ocp_debugfs_fini(void)
{
debugfs_remove_recursive(ptp_ocp_debugfs_root);
}
static void
ptp_ocp_dev_release(struct device *dev)
{
struct ptp_ocp *bp = dev_get_drvdata(dev);
mutex_lock(&ptp_ocp_lock);
idr_remove(&ptp_ocp_idr, bp->id);
mutex_unlock(&ptp_ocp_lock);
}
static int
ptp_ocp_device_init(struct ptp_ocp *bp, struct pci_dev *pdev)
{
int err;
mutex_lock(&ptp_ocp_lock);
err = idr_alloc(&ptp_ocp_idr, bp, 0, 0, GFP_KERNEL);
mutex_unlock(&ptp_ocp_lock);
if (err < 0) {
dev_err(&pdev->dev, "idr_alloc failed: %d\n", err);
return err;
}
bp->id = err;
bp->ptp_info = ptp_ocp_clock_info;
spin_lock_init(&bp->lock);
bp->gnss_port.line = -1;
bp->gnss2_port.line = -1;
bp->mac_port.line = -1;
bp->nmea_port.line = -1;
bp->pdev = pdev;
device_initialize(&bp->dev);
dev_set_name(&bp->dev, "ocp%d", bp->id);
bp->dev.class = &timecard_class;
bp->dev.parent = &pdev->dev;
bp->dev.release = ptp_ocp_dev_release;
dev_set_drvdata(&bp->dev, bp);
err = device_add(&bp->dev);
if (err) {
dev_err(&bp->dev, "device add failed: %d\n", err);
goto out;
}
pci_set_drvdata(pdev, bp);
return 0;
out:
ptp_ocp_dev_release(&bp->dev);
put_device(&bp->dev);
return err;
}
static void
ptp_ocp_symlink(struct ptp_ocp *bp, struct device *child, const char *link)
{
struct device *dev = &bp->dev;
if (sysfs_create_link(&dev->kobj, &child->kobj, link))
dev_err(dev, "%s symlink failed\n", link);
}
static void
ptp_ocp_link_child(struct ptp_ocp *bp, const char *name, const char *link)
{
struct device *dev, *child;
dev = &bp->pdev->dev;
child = device_find_child_by_name(dev, name);
if (!child) {
dev_err(dev, "Could not find device %s\n", name);
return;
}
ptp_ocp_symlink(bp, child, link);
put_device(child);
}
static int
ptp_ocp_complete(struct ptp_ocp *bp)
{
struct pps_device *pps;
char buf[32];
if (bp->gnss_port.line != -1) {
sprintf(buf, "ttyS%d", bp->gnss_port.line);
ptp_ocp_link_child(bp, buf, "ttyGNSS");
}
if (bp->gnss2_port.line != -1) {
sprintf(buf, "ttyS%d", bp->gnss2_port.line);
ptp_ocp_link_child(bp, buf, "ttyGNSS2");
}
if (bp->mac_port.line != -1) {
sprintf(buf, "ttyS%d", bp->mac_port.line);
ptp_ocp_link_child(bp, buf, "ttyMAC");
}
if (bp->nmea_port.line != -1) {
sprintf(buf, "ttyS%d", bp->nmea_port.line);
ptp_ocp_link_child(bp, buf, "ttyNMEA");
}
sprintf(buf, "ptp%d", ptp_clock_index(bp->ptp));
ptp_ocp_link_child(bp, buf, "ptp");
pps = pps_lookup_dev(bp->ptp);
if (pps)
ptp_ocp_symlink(bp, pps->dev, "pps");
ptp_ocp_debugfs_add_device(bp);
return 0;
}
static void
ptp_ocp_phc_info(struct ptp_ocp *bp)
{
struct timespec64 ts;
u32 version, select;
bool sync;
version = ioread32(&bp->reg->version);
select = ioread32(&bp->reg->select);
dev_info(&bp->pdev->dev, "Version %d.%d.%d, clock %s, device ptp%d\n",
version >> 24, (version >> 16) & 0xff, version & 0xffff,
ptp_ocp_select_name_from_val(ptp_ocp_clock, select >> 16),
ptp_clock_index(bp->ptp));
sync = ioread32(&bp->reg->status) & OCP_STATUS_IN_SYNC;
if (!ptp_ocp_gettimex(&bp->ptp_info, &ts, NULL))
dev_info(&bp->pdev->dev, "Time: %lld.%ld, %s\n",
ts.tv_sec, ts.tv_nsec,
sync ? "in-sync" : "UNSYNCED");
}
static void
ptp_ocp_serial_info(struct device *dev, const char *name, int port, int baud)
{
if (port != -1)
dev_info(dev, "%5s: /dev/ttyS%-2d @ %6d\n", name, port, baud);
}
static void
ptp_ocp_info(struct ptp_ocp *bp)
{
static int nmea_baud[] = {
1200, 2400, 4800, 9600, 19200, 38400,
57600, 115200, 230400, 460800, 921600,
1000000, 2000000
};
struct device *dev = &bp->pdev->dev;
u32 reg;
ptp_ocp_phc_info(bp);
ptp_ocp_serial_info(dev, "GNSS", bp->gnss_port.line,
bp->gnss_port.baud);
ptp_ocp_serial_info(dev, "GNSS2", bp->gnss2_port.line,
bp->gnss2_port.baud);
ptp_ocp_serial_info(dev, "MAC", bp->mac_port.line, bp->mac_port.baud);
if (bp->nmea_out && bp->nmea_port.line != -1) {
bp->nmea_port.baud = -1;
reg = ioread32(&bp->nmea_out->uart_baud);
if (reg < ARRAY_SIZE(nmea_baud))
bp->nmea_port.baud = nmea_baud[reg];
ptp_ocp_serial_info(dev, "NMEA", bp->nmea_port.line,
bp->nmea_port.baud);
}
}
static void
ptp_ocp_detach_sysfs(struct ptp_ocp *bp)
{
struct device *dev = &bp->dev;
sysfs_remove_link(&dev->kobj, "ttyGNSS");
sysfs_remove_link(&dev->kobj, "ttyGNSS2");
sysfs_remove_link(&dev->kobj, "ttyMAC");
sysfs_remove_link(&dev->kobj, "ptp");
sysfs_remove_link(&dev->kobj, "pps");
}
static void
ptp_ocp_detach(struct ptp_ocp *bp)
{
int i;
ptp_ocp_debugfs_remove_device(bp);
ptp_ocp_detach_sysfs(bp);
ptp_ocp_attr_group_del(bp);
if (timer_pending(&bp->watchdog))
del_timer_sync(&bp->watchdog);
if (bp->ts0)
ptp_ocp_unregister_ext(bp->ts0);
if (bp->ts1)
ptp_ocp_unregister_ext(bp->ts1);
if (bp->ts2)
ptp_ocp_unregister_ext(bp->ts2);
if (bp->ts3)
ptp_ocp_unregister_ext(bp->ts3);
if (bp->ts4)
ptp_ocp_unregister_ext(bp->ts4);
if (bp->pps)
ptp_ocp_unregister_ext(bp->pps);
for (i = 0; i < 4; i++)
if (bp->signal_out[i])
ptp_ocp_unregister_ext(bp->signal_out[i]);
if (bp->gnss_port.line != -1)
serial8250_unregister_port(bp->gnss_port.line);
if (bp->gnss2_port.line != -1)
serial8250_unregister_port(bp->gnss2_port.line);
if (bp->mac_port.line != -1)
serial8250_unregister_port(bp->mac_port.line);
if (bp->nmea_port.line != -1)
serial8250_unregister_port(bp->nmea_port.line);
platform_device_unregister(bp->spi_flash);
platform_device_unregister(bp->i2c_ctrl);
if (bp->i2c_clk)
clk_hw_unregister_fixed_rate(bp->i2c_clk);
if (bp->n_irqs)
pci_free_irq_vectors(bp->pdev);
if (bp->ptp)
ptp_clock_unregister(bp->ptp);
kfree(bp->ptp_info.pin_config);
device_unregister(&bp->dev);
}
static int
ptp_ocp_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct devlink *devlink;
struct ptp_ocp *bp;
int err;
devlink = devlink_alloc(&ptp_ocp_devlink_ops, sizeof(*bp), &pdev->dev);
if (!devlink) {
dev_err(&pdev->dev, "devlink_alloc failed\n");
return -ENOMEM;
}
err = pci_enable_device(pdev);
if (err) {
dev_err(&pdev->dev, "pci_enable_device\n");
goto out_free;
}
bp = devlink_priv(devlink);
err = ptp_ocp_device_init(bp, pdev);
if (err)
goto out_disable;
/* compat mode.
* Older FPGA firmware only returns 2 irq's.
* allow this - if not all of the IRQ's are returned, skip the
* extra devices and just register the clock.
*/
err = pci_alloc_irq_vectors(pdev, 1, 17, PCI_IRQ_MSI | PCI_IRQ_MSIX);
if (err < 0) {
dev_err(&pdev->dev, "alloc_irq_vectors err: %d\n", err);
goto out;
}
bp->n_irqs = err;
pci_set_master(pdev);
err = ptp_ocp_register_resources(bp, id->driver_data);
if (err)
goto out;
bp->ptp = ptp_clock_register(&bp->ptp_info, &pdev->dev);
if (IS_ERR(bp->ptp)) {
err = PTR_ERR(bp->ptp);
dev_err(&pdev->dev, "ptp_clock_register: %d\n", err);
bp->ptp = NULL;
goto out;
}
err = ptp_ocp_complete(bp);
if (err)
goto out;
ptp_ocp_info(bp);
devlink_register(devlink);
return 0;
out:
ptp_ocp_detach(bp);
out_disable:
pci_disable_device(pdev);
out_free:
devlink_free(devlink);
return err;
}
static void
ptp_ocp_remove(struct pci_dev *pdev)
{
struct ptp_ocp *bp = pci_get_drvdata(pdev);
struct devlink *devlink = priv_to_devlink(bp);
devlink_unregister(devlink);
ptp_ocp_detach(bp);
pci_disable_device(pdev);
devlink_free(devlink);
}
static struct pci_driver ptp_ocp_driver = {
.name = KBUILD_MODNAME,
.id_table = ptp_ocp_pcidev_id,
.probe = ptp_ocp_probe,
.remove = ptp_ocp_remove,
};
static int
ptp_ocp_i2c_notifier_call(struct notifier_block *nb,
unsigned long action, void *data)
{
struct device *dev, *child = data;
struct ptp_ocp *bp;
bool add;
switch (action) {
case BUS_NOTIFY_ADD_DEVICE:
case BUS_NOTIFY_DEL_DEVICE:
add = action == BUS_NOTIFY_ADD_DEVICE;
break;
default:
return 0;
}
if (!i2c_verify_adapter(child))
return 0;
dev = child;
while ((dev = dev->parent))
if (dev->driver && !strcmp(dev->driver->name, KBUILD_MODNAME))
goto found;
return 0;
found:
bp = dev_get_drvdata(dev);
if (add)
ptp_ocp_symlink(bp, child, "i2c");
else
sysfs_remove_link(&bp->dev.kobj, "i2c");
return 0;
}
static struct notifier_block ptp_ocp_i2c_notifier = {
.notifier_call = ptp_ocp_i2c_notifier_call,
};
static int __init
ptp_ocp_init(void)
{
const char *what;
int err;
ptp_ocp_debugfs_init();
what = "timecard class";
err = class_register(&timecard_class);
if (err)
goto out;
what = "i2c notifier";
err = bus_register_notifier(&i2c_bus_type, &ptp_ocp_i2c_notifier);
if (err)
goto out_notifier;
what = "ptp_ocp driver";
err = pci_register_driver(&ptp_ocp_driver);
if (err)
goto out_register;
return 0;
out_register:
bus_unregister_notifier(&i2c_bus_type, &ptp_ocp_i2c_notifier);
out_notifier:
class_unregister(&timecard_class);
out:
ptp_ocp_debugfs_fini();
pr_err(KBUILD_MODNAME ": failed to register %s: %d\n", what, err);
return err;
}
static void __exit
ptp_ocp_fini(void)
{
bus_unregister_notifier(&i2c_bus_type, &ptp_ocp_i2c_notifier);
pci_unregister_driver(&ptp_ocp_driver);
class_unregister(&timecard_class);
ptp_ocp_debugfs_fini();
}
module_init(ptp_ocp_init);
module_exit(ptp_ocp_fini);
MODULE_DESCRIPTION("OpenCompute TimeCard driver");
MODULE_LICENSE("GPL v2");
|
linux-master
|
drivers/ptp/ptp_ocp.c
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* PTP 1588 clock for Freescale QorIQ 1588 timer
*
* Copyright (C) 2010 OMICRON electronics GmbH
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/device.h>
#include <linux/hrtimer.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/timex.h>
#include <linux/slab.h>
#include <linux/clk.h>
#include <linux/fsl/ptp_qoriq.h>
/*
* Register access functions
*/
/* Caller must hold ptp_qoriq->lock. */
static u64 tmr_cnt_read(struct ptp_qoriq *ptp_qoriq)
{
struct ptp_qoriq_registers *regs = &ptp_qoriq->regs;
u64 ns;
u32 lo, hi;
lo = ptp_qoriq->read(®s->ctrl_regs->tmr_cnt_l);
hi = ptp_qoriq->read(®s->ctrl_regs->tmr_cnt_h);
ns = ((u64) hi) << 32;
ns |= lo;
return ns;
}
/* Caller must hold ptp_qoriq->lock. */
static void tmr_cnt_write(struct ptp_qoriq *ptp_qoriq, u64 ns)
{
struct ptp_qoriq_registers *regs = &ptp_qoriq->regs;
u32 hi = ns >> 32;
u32 lo = ns & 0xffffffff;
ptp_qoriq->write(®s->ctrl_regs->tmr_cnt_l, lo);
ptp_qoriq->write(®s->ctrl_regs->tmr_cnt_h, hi);
}
static u64 tmr_offset_read(struct ptp_qoriq *ptp_qoriq)
{
struct ptp_qoriq_registers *regs = &ptp_qoriq->regs;
u32 lo, hi;
u64 ns;
lo = ptp_qoriq->read(®s->ctrl_regs->tmroff_l);
hi = ptp_qoriq->read(®s->ctrl_regs->tmroff_h);
ns = ((u64) hi) << 32;
ns |= lo;
return ns;
}
static void tmr_offset_write(struct ptp_qoriq *ptp_qoriq, u64 delta_ns)
{
struct ptp_qoriq_registers *regs = &ptp_qoriq->regs;
u32 lo = delta_ns & 0xffffffff;
u32 hi = delta_ns >> 32;
ptp_qoriq->write(®s->ctrl_regs->tmroff_l, lo);
ptp_qoriq->write(®s->ctrl_regs->tmroff_h, hi);
}
/* Caller must hold ptp_qoriq->lock. */
static void set_alarm(struct ptp_qoriq *ptp_qoriq)
{
struct ptp_qoriq_registers *regs = &ptp_qoriq->regs;
u64 ns;
u32 lo, hi;
ns = tmr_cnt_read(ptp_qoriq) + tmr_offset_read(ptp_qoriq)
+ 1500000000ULL;
ns = div_u64(ns, 1000000000UL) * 1000000000ULL;
ns -= ptp_qoriq->tclk_period;
hi = ns >> 32;
lo = ns & 0xffffffff;
ptp_qoriq->write(®s->alarm_regs->tmr_alarm1_l, lo);
ptp_qoriq->write(®s->alarm_regs->tmr_alarm1_h, hi);
}
/* Caller must hold ptp_qoriq->lock. */
static void set_fipers(struct ptp_qoriq *ptp_qoriq)
{
struct ptp_qoriq_registers *regs = &ptp_qoriq->regs;
set_alarm(ptp_qoriq);
ptp_qoriq->write(®s->fiper_regs->tmr_fiper1, ptp_qoriq->tmr_fiper1);
ptp_qoriq->write(®s->fiper_regs->tmr_fiper2, ptp_qoriq->tmr_fiper2);
if (ptp_qoriq->fiper3_support)
ptp_qoriq->write(®s->fiper_regs->tmr_fiper3,
ptp_qoriq->tmr_fiper3);
}
int extts_clean_up(struct ptp_qoriq *ptp_qoriq, int index, bool update_event)
{
struct ptp_qoriq_registers *regs = &ptp_qoriq->regs;
struct ptp_clock_event event;
void __iomem *reg_etts_l;
void __iomem *reg_etts_h;
u32 valid, lo, hi;
switch (index) {
case 0:
valid = ETS1_VLD;
reg_etts_l = ®s->etts_regs->tmr_etts1_l;
reg_etts_h = ®s->etts_regs->tmr_etts1_h;
break;
case 1:
valid = ETS2_VLD;
reg_etts_l = ®s->etts_regs->tmr_etts2_l;
reg_etts_h = ®s->etts_regs->tmr_etts2_h;
break;
default:
return -EINVAL;
}
event.type = PTP_CLOCK_EXTTS;
event.index = index;
if (ptp_qoriq->extts_fifo_support)
if (!(ptp_qoriq->read(®s->ctrl_regs->tmr_stat) & valid))
return 0;
do {
lo = ptp_qoriq->read(reg_etts_l);
hi = ptp_qoriq->read(reg_etts_h);
if (update_event) {
event.timestamp = ((u64) hi) << 32;
event.timestamp |= lo;
ptp_clock_event(ptp_qoriq->clock, &event);
}
if (!ptp_qoriq->extts_fifo_support)
break;
} while (ptp_qoriq->read(®s->ctrl_regs->tmr_stat) & valid);
return 0;
}
EXPORT_SYMBOL_GPL(extts_clean_up);
/*
* Interrupt service routine
*/
irqreturn_t ptp_qoriq_isr(int irq, void *priv)
{
struct ptp_qoriq *ptp_qoriq = priv;
struct ptp_qoriq_registers *regs = &ptp_qoriq->regs;
struct ptp_clock_event event;
u32 ack = 0, mask, val, irqs;
spin_lock(&ptp_qoriq->lock);
val = ptp_qoriq->read(®s->ctrl_regs->tmr_tevent);
mask = ptp_qoriq->read(®s->ctrl_regs->tmr_temask);
spin_unlock(&ptp_qoriq->lock);
irqs = val & mask;
if (irqs & ETS1) {
ack |= ETS1;
extts_clean_up(ptp_qoriq, 0, true);
}
if (irqs & ETS2) {
ack |= ETS2;
extts_clean_up(ptp_qoriq, 1, true);
}
if (irqs & PP1) {
ack |= PP1;
event.type = PTP_CLOCK_PPS;
ptp_clock_event(ptp_qoriq->clock, &event);
}
if (ack) {
ptp_qoriq->write(®s->ctrl_regs->tmr_tevent, ack);
return IRQ_HANDLED;
} else
return IRQ_NONE;
}
EXPORT_SYMBOL_GPL(ptp_qoriq_isr);
/*
* PTP clock operations
*/
int ptp_qoriq_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
{
u64 adj, diff;
u32 tmr_add;
int neg_adj = 0;
struct ptp_qoriq *ptp_qoriq = container_of(ptp, struct ptp_qoriq, caps);
struct ptp_qoriq_registers *regs = &ptp_qoriq->regs;
if (scaled_ppm < 0) {
neg_adj = 1;
scaled_ppm = -scaled_ppm;
}
tmr_add = ptp_qoriq->tmr_add;
adj = tmr_add;
/*
* Calculate diff and round() to the nearest integer
*
* diff = adj * (ppb / 1000000000)
* = adj * scaled_ppm / 65536000000
*/
diff = mul_u64_u64_div_u64(adj, scaled_ppm, 32768000000);
diff = DIV64_U64_ROUND_UP(diff, 2);
tmr_add = neg_adj ? tmr_add - diff : tmr_add + diff;
ptp_qoriq->write(®s->ctrl_regs->tmr_add, tmr_add);
return 0;
}
EXPORT_SYMBOL_GPL(ptp_qoriq_adjfine);
int ptp_qoriq_adjtime(struct ptp_clock_info *ptp, s64 delta)
{
struct ptp_qoriq *ptp_qoriq = container_of(ptp, struct ptp_qoriq, caps);
s64 now, curr_delta;
unsigned long flags;
spin_lock_irqsave(&ptp_qoriq->lock, flags);
/* On LS1021A, eTSEC2 and eTSEC3 do not take into account the TMR_OFF
* adjustment
*/
if (ptp_qoriq->etsec) {
now = tmr_cnt_read(ptp_qoriq);
now += delta;
tmr_cnt_write(ptp_qoriq, now);
} else {
curr_delta = tmr_offset_read(ptp_qoriq);
curr_delta += delta;
tmr_offset_write(ptp_qoriq, curr_delta);
}
set_fipers(ptp_qoriq);
spin_unlock_irqrestore(&ptp_qoriq->lock, flags);
return 0;
}
EXPORT_SYMBOL_GPL(ptp_qoriq_adjtime);
int ptp_qoriq_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
{
u64 ns;
unsigned long flags;
struct ptp_qoriq *ptp_qoriq = container_of(ptp, struct ptp_qoriq, caps);
spin_lock_irqsave(&ptp_qoriq->lock, flags);
ns = tmr_cnt_read(ptp_qoriq) + tmr_offset_read(ptp_qoriq);
spin_unlock_irqrestore(&ptp_qoriq->lock, flags);
*ts = ns_to_timespec64(ns);
return 0;
}
EXPORT_SYMBOL_GPL(ptp_qoriq_gettime);
int ptp_qoriq_settime(struct ptp_clock_info *ptp,
const struct timespec64 *ts)
{
u64 ns;
unsigned long flags;
struct ptp_qoriq *ptp_qoriq = container_of(ptp, struct ptp_qoriq, caps);
ns = timespec64_to_ns(ts);
spin_lock_irqsave(&ptp_qoriq->lock, flags);
tmr_offset_write(ptp_qoriq, 0);
tmr_cnt_write(ptp_qoriq, ns);
set_fipers(ptp_qoriq);
spin_unlock_irqrestore(&ptp_qoriq->lock, flags);
return 0;
}
EXPORT_SYMBOL_GPL(ptp_qoriq_settime);
int ptp_qoriq_enable(struct ptp_clock_info *ptp,
struct ptp_clock_request *rq, int on)
{
struct ptp_qoriq *ptp_qoriq = container_of(ptp, struct ptp_qoriq, caps);
struct ptp_qoriq_registers *regs = &ptp_qoriq->regs;
unsigned long flags;
u32 bit, mask = 0;
switch (rq->type) {
case PTP_CLK_REQ_EXTTS:
switch (rq->extts.index) {
case 0:
bit = ETS1EN;
break;
case 1:
bit = ETS2EN;
break;
default:
return -EINVAL;
}
if (on)
extts_clean_up(ptp_qoriq, rq->extts.index, false);
break;
case PTP_CLK_REQ_PPS:
bit = PP1EN;
break;
default:
return -EOPNOTSUPP;
}
spin_lock_irqsave(&ptp_qoriq->lock, flags);
mask = ptp_qoriq->read(®s->ctrl_regs->tmr_temask);
if (on) {
mask |= bit;
ptp_qoriq->write(®s->ctrl_regs->tmr_tevent, bit);
} else {
mask &= ~bit;
}
ptp_qoriq->write(®s->ctrl_regs->tmr_temask, mask);
spin_unlock_irqrestore(&ptp_qoriq->lock, flags);
return 0;
}
EXPORT_SYMBOL_GPL(ptp_qoriq_enable);
static const struct ptp_clock_info ptp_qoriq_caps = {
.owner = THIS_MODULE,
.name = "qoriq ptp clock",
.max_adj = 512000,
.n_alarm = 0,
.n_ext_ts = N_EXT_TS,
.n_per_out = 0,
.n_pins = 0,
.pps = 1,
.adjfine = ptp_qoriq_adjfine,
.adjtime = ptp_qoriq_adjtime,
.gettime64 = ptp_qoriq_gettime,
.settime64 = ptp_qoriq_settime,
.enable = ptp_qoriq_enable,
};
/**
* ptp_qoriq_nominal_freq - calculate nominal frequency according to
* reference clock frequency
*
* @clk_src: reference clock frequency
*
* The nominal frequency is the desired clock frequency.
* It should be less than the reference clock frequency.
* It should be a factor of 1000MHz.
*
* Return the nominal frequency
*/
static u32 ptp_qoriq_nominal_freq(u32 clk_src)
{
u32 remainder = 0;
clk_src /= 1000000;
remainder = clk_src % 100;
if (remainder) {
clk_src -= remainder;
clk_src += 100;
}
do {
clk_src -= 100;
} while (1000 % clk_src);
return clk_src * 1000000;
}
/**
* ptp_qoriq_auto_config - calculate a set of default configurations
*
* @ptp_qoriq: pointer to ptp_qoriq
* @node: pointer to device_node
*
* If below dts properties are not provided, this function will be
* called to calculate a set of default configurations for them.
* "fsl,tclk-period"
* "fsl,tmr-prsc"
* "fsl,tmr-add"
* "fsl,tmr-fiper1"
* "fsl,tmr-fiper2"
* "fsl,tmr-fiper3" (required only for DPAA2 and ENETC hardware)
* "fsl,max-adj"
*
* Return 0 if success
*/
static int ptp_qoriq_auto_config(struct ptp_qoriq *ptp_qoriq,
struct device_node *node)
{
struct clk *clk;
u64 freq_comp;
u64 max_adj;
u32 nominal_freq;
u32 remainder = 0;
u32 clk_src = 0;
ptp_qoriq->cksel = DEFAULT_CKSEL;
clk = of_clk_get(node, 0);
if (!IS_ERR(clk)) {
clk_src = clk_get_rate(clk);
clk_put(clk);
}
if (clk_src <= 100000000UL) {
pr_err("error reference clock value, or lower than 100MHz\n");
return -EINVAL;
}
nominal_freq = ptp_qoriq_nominal_freq(clk_src);
if (!nominal_freq)
return -EINVAL;
ptp_qoriq->tclk_period = 1000000000UL / nominal_freq;
ptp_qoriq->tmr_prsc = DEFAULT_TMR_PRSC;
/* Calculate initial frequency compensation value for TMR_ADD register.
* freq_comp = ceil(2^32 / freq_ratio)
* freq_ratio = reference_clock_freq / nominal_freq
*/
freq_comp = ((u64)1 << 32) * nominal_freq;
freq_comp = div_u64_rem(freq_comp, clk_src, &remainder);
if (remainder)
freq_comp++;
ptp_qoriq->tmr_add = freq_comp;
ptp_qoriq->tmr_fiper1 = DEFAULT_FIPER1_PERIOD - ptp_qoriq->tclk_period;
ptp_qoriq->tmr_fiper2 = DEFAULT_FIPER2_PERIOD - ptp_qoriq->tclk_period;
ptp_qoriq->tmr_fiper3 = DEFAULT_FIPER3_PERIOD - ptp_qoriq->tclk_period;
/* max_adj = 1000000000 * (freq_ratio - 1.0) - 1
* freq_ratio = reference_clock_freq / nominal_freq
*/
max_adj = 1000000000ULL * (clk_src - nominal_freq);
max_adj = div_u64(max_adj, nominal_freq) - 1;
ptp_qoriq->caps.max_adj = max_adj;
return 0;
}
int ptp_qoriq_init(struct ptp_qoriq *ptp_qoriq, void __iomem *base,
const struct ptp_clock_info *caps)
{
struct device_node *node = ptp_qoriq->dev->of_node;
struct ptp_qoriq_registers *regs;
struct timespec64 now;
unsigned long flags;
u32 tmr_ctrl;
if (!node)
return -ENODEV;
ptp_qoriq->base = base;
ptp_qoriq->caps = *caps;
if (of_property_read_u32(node, "fsl,cksel", &ptp_qoriq->cksel))
ptp_qoriq->cksel = DEFAULT_CKSEL;
if (of_property_read_bool(node, "fsl,extts-fifo"))
ptp_qoriq->extts_fifo_support = true;
else
ptp_qoriq->extts_fifo_support = false;
if (of_device_is_compatible(node, "fsl,dpaa2-ptp") ||
of_device_is_compatible(node, "fsl,enetc-ptp"))
ptp_qoriq->fiper3_support = true;
if (of_property_read_u32(node,
"fsl,tclk-period", &ptp_qoriq->tclk_period) ||
of_property_read_u32(node,
"fsl,tmr-prsc", &ptp_qoriq->tmr_prsc) ||
of_property_read_u32(node,
"fsl,tmr-add", &ptp_qoriq->tmr_add) ||
of_property_read_u32(node,
"fsl,tmr-fiper1", &ptp_qoriq->tmr_fiper1) ||
of_property_read_u32(node,
"fsl,tmr-fiper2", &ptp_qoriq->tmr_fiper2) ||
of_property_read_u32(node,
"fsl,max-adj", &ptp_qoriq->caps.max_adj) ||
(ptp_qoriq->fiper3_support &&
of_property_read_u32(node, "fsl,tmr-fiper3",
&ptp_qoriq->tmr_fiper3))) {
pr_warn("device tree node missing required elements, try automatic configuration\n");
if (ptp_qoriq_auto_config(ptp_qoriq, node))
return -ENODEV;
}
if (of_property_read_bool(node, "little-endian")) {
ptp_qoriq->read = qoriq_read_le;
ptp_qoriq->write = qoriq_write_le;
} else {
ptp_qoriq->read = qoriq_read_be;
ptp_qoriq->write = qoriq_write_be;
}
/* The eTSEC uses differnt memory map with DPAA/ENETC */
if (of_device_is_compatible(node, "fsl,etsec-ptp")) {
ptp_qoriq->etsec = true;
ptp_qoriq->regs.ctrl_regs = base + ETSEC_CTRL_REGS_OFFSET;
ptp_qoriq->regs.alarm_regs = base + ETSEC_ALARM_REGS_OFFSET;
ptp_qoriq->regs.fiper_regs = base + ETSEC_FIPER_REGS_OFFSET;
ptp_qoriq->regs.etts_regs = base + ETSEC_ETTS_REGS_OFFSET;
} else {
ptp_qoriq->regs.ctrl_regs = base + CTRL_REGS_OFFSET;
ptp_qoriq->regs.alarm_regs = base + ALARM_REGS_OFFSET;
ptp_qoriq->regs.fiper_regs = base + FIPER_REGS_OFFSET;
ptp_qoriq->regs.etts_regs = base + ETTS_REGS_OFFSET;
}
spin_lock_init(&ptp_qoriq->lock);
ktime_get_real_ts64(&now);
ptp_qoriq_settime(&ptp_qoriq->caps, &now);
tmr_ctrl =
(ptp_qoriq->tclk_period & TCLK_PERIOD_MASK) << TCLK_PERIOD_SHIFT |
(ptp_qoriq->cksel & CKSEL_MASK) << CKSEL_SHIFT;
spin_lock_irqsave(&ptp_qoriq->lock, flags);
regs = &ptp_qoriq->regs;
ptp_qoriq->write(®s->ctrl_regs->tmr_ctrl, tmr_ctrl);
ptp_qoriq->write(®s->ctrl_regs->tmr_add, ptp_qoriq->tmr_add);
ptp_qoriq->write(®s->ctrl_regs->tmr_prsc, ptp_qoriq->tmr_prsc);
ptp_qoriq->write(®s->fiper_regs->tmr_fiper1, ptp_qoriq->tmr_fiper1);
ptp_qoriq->write(®s->fiper_regs->tmr_fiper2, ptp_qoriq->tmr_fiper2);
if (ptp_qoriq->fiper3_support)
ptp_qoriq->write(®s->fiper_regs->tmr_fiper3,
ptp_qoriq->tmr_fiper3);
set_alarm(ptp_qoriq);
ptp_qoriq->write(®s->ctrl_regs->tmr_ctrl,
tmr_ctrl|FIPERST|RTPE|TE|FRD);
spin_unlock_irqrestore(&ptp_qoriq->lock, flags);
ptp_qoriq->clock = ptp_clock_register(&ptp_qoriq->caps, ptp_qoriq->dev);
if (IS_ERR(ptp_qoriq->clock))
return PTR_ERR(ptp_qoriq->clock);
ptp_qoriq->phc_index = ptp_clock_index(ptp_qoriq->clock);
ptp_qoriq_create_debugfs(ptp_qoriq);
return 0;
}
EXPORT_SYMBOL_GPL(ptp_qoriq_init);
void ptp_qoriq_free(struct ptp_qoriq *ptp_qoriq)
{
struct ptp_qoriq_registers *regs = &ptp_qoriq->regs;
ptp_qoriq->write(®s->ctrl_regs->tmr_temask, 0);
ptp_qoriq->write(®s->ctrl_regs->tmr_ctrl, 0);
ptp_qoriq_remove_debugfs(ptp_qoriq);
ptp_clock_unregister(ptp_qoriq->clock);
iounmap(ptp_qoriq->base);
free_irq(ptp_qoriq->irq, ptp_qoriq);
}
EXPORT_SYMBOL_GPL(ptp_qoriq_free);
static int ptp_qoriq_probe(struct platform_device *dev)
{
struct ptp_qoriq *ptp_qoriq;
int err = -ENOMEM;
void __iomem *base;
ptp_qoriq = kzalloc(sizeof(*ptp_qoriq), GFP_KERNEL);
if (!ptp_qoriq)
goto no_memory;
ptp_qoriq->dev = &dev->dev;
err = -ENODEV;
ptp_qoriq->irq = platform_get_irq(dev, 0);
if (ptp_qoriq->irq < 0) {
pr_err("irq not in device tree\n");
goto no_node;
}
if (request_irq(ptp_qoriq->irq, ptp_qoriq_isr, IRQF_SHARED,
DRIVER, ptp_qoriq)) {
pr_err("request_irq failed\n");
goto no_node;
}
ptp_qoriq->rsrc = platform_get_resource(dev, IORESOURCE_MEM, 0);
if (!ptp_qoriq->rsrc) {
pr_err("no resource\n");
goto no_resource;
}
if (request_resource(&iomem_resource, ptp_qoriq->rsrc)) {
pr_err("resource busy\n");
goto no_resource;
}
base = ioremap(ptp_qoriq->rsrc->start,
resource_size(ptp_qoriq->rsrc));
if (!base) {
pr_err("ioremap ptp registers failed\n");
goto no_ioremap;
}
err = ptp_qoriq_init(ptp_qoriq, base, &ptp_qoriq_caps);
if (err)
goto no_clock;
platform_set_drvdata(dev, ptp_qoriq);
return 0;
no_clock:
iounmap(base);
no_ioremap:
release_resource(ptp_qoriq->rsrc);
no_resource:
free_irq(ptp_qoriq->irq, ptp_qoriq);
no_node:
kfree(ptp_qoriq);
no_memory:
return err;
}
static int ptp_qoriq_remove(struct platform_device *dev)
{
struct ptp_qoriq *ptp_qoriq = platform_get_drvdata(dev);
ptp_qoriq_free(ptp_qoriq);
release_resource(ptp_qoriq->rsrc);
kfree(ptp_qoriq);
return 0;
}
static const struct of_device_id match_table[] = {
{ .compatible = "fsl,etsec-ptp" },
{ .compatible = "fsl,fman-ptp-timer" },
{},
};
MODULE_DEVICE_TABLE(of, match_table);
static struct platform_driver ptp_qoriq_driver = {
.driver = {
.name = "ptp_qoriq",
.of_match_table = match_table,
},
.probe = ptp_qoriq_probe,
.remove = ptp_qoriq_remove,
};
module_platform_driver(ptp_qoriq_driver);
MODULE_AUTHOR("Richard Cochran <[email protected]>");
MODULE_DESCRIPTION("PTP clock for Freescale QorIQ 1588 timer");
MODULE_LICENSE("GPL");
|
linux-master
|
drivers/ptp/ptp_qoriq.c
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* PTP 1588 clock support - sysfs interface.
*
* Copyright (C) 2010 OMICRON electronics GmbH
* Copyright 2021 NXP
*/
#include <linux/capability.h>
#include <linux/slab.h>
#include "ptp_private.h"
static ssize_t clock_name_show(struct device *dev,
struct device_attribute *attr, char *page)
{
struct ptp_clock *ptp = dev_get_drvdata(dev);
return sysfs_emit(page, "%s\n", ptp->info->name);
}
static DEVICE_ATTR_RO(clock_name);
static ssize_t max_phase_adjustment_show(struct device *dev,
struct device_attribute *attr,
char *page)
{
struct ptp_clock *ptp = dev_get_drvdata(dev);
return snprintf(page, PAGE_SIZE - 1, "%d\n",
ptp->info->getmaxphase(ptp->info));
}
static DEVICE_ATTR_RO(max_phase_adjustment);
#define PTP_SHOW_INT(name, var) \
static ssize_t var##_show(struct device *dev, \
struct device_attribute *attr, char *page) \
{ \
struct ptp_clock *ptp = dev_get_drvdata(dev); \
return snprintf(page, PAGE_SIZE-1, "%d\n", ptp->info->var); \
} \
static DEVICE_ATTR(name, 0444, var##_show, NULL);
PTP_SHOW_INT(max_adjustment, max_adj);
PTP_SHOW_INT(n_alarms, n_alarm);
PTP_SHOW_INT(n_external_timestamps, n_ext_ts);
PTP_SHOW_INT(n_periodic_outputs, n_per_out);
PTP_SHOW_INT(n_programmable_pins, n_pins);
PTP_SHOW_INT(pps_available, pps);
static ssize_t extts_enable_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct ptp_clock *ptp = dev_get_drvdata(dev);
struct ptp_clock_info *ops = ptp->info;
struct ptp_clock_request req = { .type = PTP_CLK_REQ_EXTTS };
int cnt, enable;
int err = -EINVAL;
cnt = sscanf(buf, "%u %d", &req.extts.index, &enable);
if (cnt != 2)
goto out;
if (req.extts.index >= ops->n_ext_ts)
goto out;
err = ops->enable(ops, &req, enable ? 1 : 0);
if (err)
goto out;
return count;
out:
return err;
}
static DEVICE_ATTR(extts_enable, 0220, NULL, extts_enable_store);
static ssize_t extts_fifo_show(struct device *dev,
struct device_attribute *attr, char *page)
{
struct ptp_clock *ptp = dev_get_drvdata(dev);
struct timestamp_event_queue *queue = &ptp->tsevq;
struct ptp_extts_event event;
unsigned long flags;
size_t qcnt;
int cnt = 0;
memset(&event, 0, sizeof(event));
if (mutex_lock_interruptible(&ptp->tsevq_mux))
return -ERESTARTSYS;
spin_lock_irqsave(&queue->lock, flags);
qcnt = queue_cnt(queue);
if (qcnt) {
event = queue->buf[queue->head];
queue->head = (queue->head + 1) % PTP_MAX_TIMESTAMPS;
}
spin_unlock_irqrestore(&queue->lock, flags);
if (!qcnt)
goto out;
cnt = snprintf(page, PAGE_SIZE, "%u %lld %u\n",
event.index, event.t.sec, event.t.nsec);
out:
mutex_unlock(&ptp->tsevq_mux);
return cnt;
}
static DEVICE_ATTR(fifo, 0444, extts_fifo_show, NULL);
static ssize_t period_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct ptp_clock *ptp = dev_get_drvdata(dev);
struct ptp_clock_info *ops = ptp->info;
struct ptp_clock_request req = { .type = PTP_CLK_REQ_PEROUT };
int cnt, enable, err = -EINVAL;
cnt = sscanf(buf, "%u %lld %u %lld %u", &req.perout.index,
&req.perout.start.sec, &req.perout.start.nsec,
&req.perout.period.sec, &req.perout.period.nsec);
if (cnt != 5)
goto out;
if (req.perout.index >= ops->n_per_out)
goto out;
enable = req.perout.period.sec || req.perout.period.nsec;
err = ops->enable(ops, &req, enable);
if (err)
goto out;
return count;
out:
return err;
}
static DEVICE_ATTR(period, 0220, NULL, period_store);
static ssize_t pps_enable_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct ptp_clock *ptp = dev_get_drvdata(dev);
struct ptp_clock_info *ops = ptp->info;
struct ptp_clock_request req = { .type = PTP_CLK_REQ_PPS };
int cnt, enable;
int err = -EINVAL;
if (!capable(CAP_SYS_TIME))
return -EPERM;
cnt = sscanf(buf, "%d", &enable);
if (cnt != 1)
goto out;
err = ops->enable(ops, &req, enable ? 1 : 0);
if (err)
goto out;
return count;
out:
return err;
}
static DEVICE_ATTR(pps_enable, 0220, NULL, pps_enable_store);
static int unregister_vclock(struct device *dev, void *data)
{
struct ptp_clock *ptp = dev_get_drvdata(dev);
struct ptp_clock_info *info = ptp->info;
struct ptp_vclock *vclock;
u32 *num = data;
vclock = info_to_vclock(info);
dev_info(dev->parent, "delete virtual clock ptp%d\n",
vclock->clock->index);
ptp_vclock_unregister(vclock);
(*num)--;
/* For break. Not error. */
if (*num == 0)
return -EINVAL;
return 0;
}
static ssize_t n_vclocks_show(struct device *dev,
struct device_attribute *attr, char *page)
{
struct ptp_clock *ptp = dev_get_drvdata(dev);
ssize_t size;
if (mutex_lock_interruptible(&ptp->n_vclocks_mux))
return -ERESTARTSYS;
size = snprintf(page, PAGE_SIZE - 1, "%u\n", ptp->n_vclocks);
mutex_unlock(&ptp->n_vclocks_mux);
return size;
}
static ssize_t n_vclocks_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct ptp_clock *ptp = dev_get_drvdata(dev);
struct ptp_vclock *vclock;
int err = -EINVAL;
u32 num, i;
if (kstrtou32(buf, 0, &num))
return err;
if (mutex_lock_interruptible(&ptp->n_vclocks_mux))
return -ERESTARTSYS;
if (num > ptp->max_vclocks) {
dev_err(dev, "max value is %d\n", ptp->max_vclocks);
goto out;
}
/* Need to create more vclocks */
if (num > ptp->n_vclocks) {
for (i = 0; i < num - ptp->n_vclocks; i++) {
vclock = ptp_vclock_register(ptp);
if (!vclock)
goto out;
*(ptp->vclock_index + ptp->n_vclocks + i) =
vclock->clock->index;
dev_info(dev, "new virtual clock ptp%d\n",
vclock->clock->index);
}
}
/* Need to delete vclocks */
if (num < ptp->n_vclocks) {
i = ptp->n_vclocks - num;
device_for_each_child_reverse(dev, &i,
unregister_vclock);
for (i = 1; i <= ptp->n_vclocks - num; i++)
*(ptp->vclock_index + ptp->n_vclocks - i) = -1;
}
/* Need to inform about changed physical clock behavior */
if (!ptp->has_cycles) {
if (num == 0)
dev_info(dev, "only physical clock in use now\n");
else
dev_info(dev, "guarantee physical clock free running\n");
}
ptp->n_vclocks = num;
mutex_unlock(&ptp->n_vclocks_mux);
return count;
out:
mutex_unlock(&ptp->n_vclocks_mux);
return err;
}
static DEVICE_ATTR_RW(n_vclocks);
static ssize_t max_vclocks_show(struct device *dev,
struct device_attribute *attr, char *page)
{
struct ptp_clock *ptp = dev_get_drvdata(dev);
ssize_t size;
size = snprintf(page, PAGE_SIZE - 1, "%u\n", ptp->max_vclocks);
return size;
}
static ssize_t max_vclocks_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct ptp_clock *ptp = dev_get_drvdata(dev);
unsigned int *vclock_index;
int err = -EINVAL;
size_t size;
u32 max;
if (kstrtou32(buf, 0, &max) || max == 0)
return -EINVAL;
if (max == ptp->max_vclocks)
return count;
if (mutex_lock_interruptible(&ptp->n_vclocks_mux))
return -ERESTARTSYS;
if (max < ptp->n_vclocks)
goto out;
size = sizeof(int) * max;
vclock_index = kzalloc(size, GFP_KERNEL);
if (!vclock_index) {
err = -ENOMEM;
goto out;
}
size = sizeof(int) * ptp->n_vclocks;
memcpy(vclock_index, ptp->vclock_index, size);
kfree(ptp->vclock_index);
ptp->vclock_index = vclock_index;
ptp->max_vclocks = max;
mutex_unlock(&ptp->n_vclocks_mux);
return count;
out:
mutex_unlock(&ptp->n_vclocks_mux);
return err;
}
static DEVICE_ATTR_RW(max_vclocks);
static struct attribute *ptp_attrs[] = {
&dev_attr_clock_name.attr,
&dev_attr_max_adjustment.attr,
&dev_attr_max_phase_adjustment.attr,
&dev_attr_n_alarms.attr,
&dev_attr_n_external_timestamps.attr,
&dev_attr_n_periodic_outputs.attr,
&dev_attr_n_programmable_pins.attr,
&dev_attr_pps_available.attr,
&dev_attr_extts_enable.attr,
&dev_attr_fifo.attr,
&dev_attr_period.attr,
&dev_attr_pps_enable.attr,
&dev_attr_n_vclocks.attr,
&dev_attr_max_vclocks.attr,
NULL
};
static umode_t ptp_is_attribute_visible(struct kobject *kobj,
struct attribute *attr, int n)
{
struct device *dev = kobj_to_dev(kobj);
struct ptp_clock *ptp = dev_get_drvdata(dev);
struct ptp_clock_info *info = ptp->info;
umode_t mode = attr->mode;
if (attr == &dev_attr_extts_enable.attr ||
attr == &dev_attr_fifo.attr) {
if (!info->n_ext_ts)
mode = 0;
} else if (attr == &dev_attr_period.attr) {
if (!info->n_per_out)
mode = 0;
} else if (attr == &dev_attr_pps_enable.attr) {
if (!info->pps)
mode = 0;
} else if (attr == &dev_attr_n_vclocks.attr ||
attr == &dev_attr_max_vclocks.attr) {
if (ptp->is_virtual_clock)
mode = 0;
} else if (attr == &dev_attr_max_phase_adjustment.attr) {
if (!info->adjphase || !info->getmaxphase)
mode = 0;
}
return mode;
}
static const struct attribute_group ptp_group = {
.is_visible = ptp_is_attribute_visible,
.attrs = ptp_attrs,
};
const struct attribute_group *ptp_groups[] = {
&ptp_group,
NULL
};
static int ptp_pin_name2index(struct ptp_clock *ptp, const char *name)
{
int i;
for (i = 0; i < ptp->info->n_pins; i++) {
if (!strcmp(ptp->info->pin_config[i].name, name))
return i;
}
return -1;
}
static ssize_t ptp_pin_show(struct device *dev, struct device_attribute *attr,
char *page)
{
struct ptp_clock *ptp = dev_get_drvdata(dev);
unsigned int func, chan;
int index;
index = ptp_pin_name2index(ptp, attr->attr.name);
if (index < 0)
return -EINVAL;
if (mutex_lock_interruptible(&ptp->pincfg_mux))
return -ERESTARTSYS;
func = ptp->info->pin_config[index].func;
chan = ptp->info->pin_config[index].chan;
mutex_unlock(&ptp->pincfg_mux);
return sysfs_emit(page, "%u %u\n", func, chan);
}
static ssize_t ptp_pin_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct ptp_clock *ptp = dev_get_drvdata(dev);
unsigned int func, chan;
int cnt, err, index;
cnt = sscanf(buf, "%u %u", &func, &chan);
if (cnt != 2)
return -EINVAL;
index = ptp_pin_name2index(ptp, attr->attr.name);
if (index < 0)
return -EINVAL;
if (mutex_lock_interruptible(&ptp->pincfg_mux))
return -ERESTARTSYS;
err = ptp_set_pinfunc(ptp, index, func, chan);
mutex_unlock(&ptp->pincfg_mux);
if (err)
return err;
return count;
}
int ptp_populate_pin_groups(struct ptp_clock *ptp)
{
struct ptp_clock_info *info = ptp->info;
int err = -ENOMEM, i, n_pins = info->n_pins;
if (!n_pins)
return 0;
ptp->pin_dev_attr = kcalloc(n_pins, sizeof(*ptp->pin_dev_attr),
GFP_KERNEL);
if (!ptp->pin_dev_attr)
goto no_dev_attr;
ptp->pin_attr = kcalloc(1 + n_pins, sizeof(*ptp->pin_attr), GFP_KERNEL);
if (!ptp->pin_attr)
goto no_pin_attr;
for (i = 0; i < n_pins; i++) {
struct device_attribute *da = &ptp->pin_dev_attr[i];
sysfs_attr_init(&da->attr);
da->attr.name = info->pin_config[i].name;
da->attr.mode = 0644;
da->show = ptp_pin_show;
da->store = ptp_pin_store;
ptp->pin_attr[i] = &da->attr;
}
ptp->pin_attr_group.name = "pins";
ptp->pin_attr_group.attrs = ptp->pin_attr;
ptp->pin_attr_groups[0] = &ptp->pin_attr_group;
return 0;
no_pin_attr:
kfree(ptp->pin_dev_attr);
no_dev_attr:
return err;
}
void ptp_cleanup_pin_groups(struct ptp_clock *ptp)
{
kfree(ptp->pin_attr);
kfree(ptp->pin_dev_attr);
}
|
linux-master
|
drivers/ptp/ptp_sysfs.c
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* PTP 1588 clock support
*
* Copyright (C) 2010 OMICRON electronics GmbH
*/
#include <linux/idr.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/posix-clock.h>
#include <linux/pps_kernel.h>
#include <linux/slab.h>
#include <linux/syscalls.h>
#include <linux/uaccess.h>
#include <uapi/linux/sched/types.h>
#include "ptp_private.h"
#define PTP_MAX_ALARMS 4
#define PTP_PPS_DEFAULTS (PPS_CAPTUREASSERT | PPS_OFFSETASSERT)
#define PTP_PPS_EVENT PPS_CAPTUREASSERT
#define PTP_PPS_MODE (PTP_PPS_DEFAULTS | PPS_CANWAIT | PPS_TSFMT_TSPEC)
struct class *ptp_class;
/* private globals */
static dev_t ptp_devt;
static DEFINE_IDA(ptp_clocks_map);
/* time stamp event queue operations */
static inline int queue_free(struct timestamp_event_queue *q)
{
return PTP_MAX_TIMESTAMPS - queue_cnt(q) - 1;
}
static void enqueue_external_timestamp(struct timestamp_event_queue *queue,
struct ptp_clock_event *src)
{
struct ptp_extts_event *dst;
unsigned long flags;
s64 seconds;
u32 remainder;
seconds = div_u64_rem(src->timestamp, 1000000000, &remainder);
spin_lock_irqsave(&queue->lock, flags);
dst = &queue->buf[queue->tail];
dst->index = src->index;
dst->t.sec = seconds;
dst->t.nsec = remainder;
if (!queue_free(queue))
queue->head = (queue->head + 1) % PTP_MAX_TIMESTAMPS;
queue->tail = (queue->tail + 1) % PTP_MAX_TIMESTAMPS;
spin_unlock_irqrestore(&queue->lock, flags);
}
/* posix clock implementation */
static int ptp_clock_getres(struct posix_clock *pc, struct timespec64 *tp)
{
tp->tv_sec = 0;
tp->tv_nsec = 1;
return 0;
}
static int ptp_clock_settime(struct posix_clock *pc, const struct timespec64 *tp)
{
struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
if (ptp_clock_freerun(ptp)) {
pr_err("ptp: physical clock is free running\n");
return -EBUSY;
}
return ptp->info->settime64(ptp->info, tp);
}
static int ptp_clock_gettime(struct posix_clock *pc, struct timespec64 *tp)
{
struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
int err;
if (ptp->info->gettimex64)
err = ptp->info->gettimex64(ptp->info, tp, NULL);
else
err = ptp->info->gettime64(ptp->info, tp);
return err;
}
static int ptp_clock_adjtime(struct posix_clock *pc, struct __kernel_timex *tx)
{
struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
struct ptp_clock_info *ops;
int err = -EOPNOTSUPP;
if (ptp_clock_freerun(ptp)) {
pr_err("ptp: physical clock is free running\n");
return -EBUSY;
}
ops = ptp->info;
if (tx->modes & ADJ_SETOFFSET) {
struct timespec64 ts;
ktime_t kt;
s64 delta;
ts.tv_sec = tx->time.tv_sec;
ts.tv_nsec = tx->time.tv_usec;
if (!(tx->modes & ADJ_NANO))
ts.tv_nsec *= 1000;
if ((unsigned long) ts.tv_nsec >= NSEC_PER_SEC)
return -EINVAL;
kt = timespec64_to_ktime(ts);
delta = ktime_to_ns(kt);
err = ops->adjtime(ops, delta);
} else if (tx->modes & ADJ_FREQUENCY) {
long ppb = scaled_ppm_to_ppb(tx->freq);
if (ppb > ops->max_adj || ppb < -ops->max_adj)
return -ERANGE;
err = ops->adjfine(ops, tx->freq);
ptp->dialed_frequency = tx->freq;
} else if (tx->modes & ADJ_OFFSET) {
if (ops->adjphase) {
s32 max_phase_adj = ops->getmaxphase(ops);
s32 offset = tx->offset;
if (!(tx->modes & ADJ_NANO))
offset *= NSEC_PER_USEC;
if (offset > max_phase_adj || offset < -max_phase_adj)
return -ERANGE;
err = ops->adjphase(ops, offset);
}
} else if (tx->modes == 0) {
tx->freq = ptp->dialed_frequency;
err = 0;
}
return err;
}
static struct posix_clock_operations ptp_clock_ops = {
.owner = THIS_MODULE,
.clock_adjtime = ptp_clock_adjtime,
.clock_gettime = ptp_clock_gettime,
.clock_getres = ptp_clock_getres,
.clock_settime = ptp_clock_settime,
.ioctl = ptp_ioctl,
.open = ptp_open,
.poll = ptp_poll,
.read = ptp_read,
};
static void ptp_clock_release(struct device *dev)
{
struct ptp_clock *ptp = container_of(dev, struct ptp_clock, dev);
ptp_cleanup_pin_groups(ptp);
kfree(ptp->vclock_index);
mutex_destroy(&ptp->tsevq_mux);
mutex_destroy(&ptp->pincfg_mux);
mutex_destroy(&ptp->n_vclocks_mux);
ida_free(&ptp_clocks_map, ptp->index);
kfree(ptp);
}
static int ptp_getcycles64(struct ptp_clock_info *info, struct timespec64 *ts)
{
if (info->getcyclesx64)
return info->getcyclesx64(info, ts, NULL);
else
return info->gettime64(info, ts);
}
static void ptp_aux_kworker(struct kthread_work *work)
{
struct ptp_clock *ptp = container_of(work, struct ptp_clock,
aux_work.work);
struct ptp_clock_info *info = ptp->info;
long delay;
delay = info->do_aux_work(info);
if (delay >= 0)
kthread_queue_delayed_work(ptp->kworker, &ptp->aux_work, delay);
}
/* public interface */
struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
struct device *parent)
{
struct ptp_clock *ptp;
int err = 0, index, major = MAJOR(ptp_devt);
size_t size;
if (info->n_alarm > PTP_MAX_ALARMS)
return ERR_PTR(-EINVAL);
/* Initialize a clock structure. */
err = -ENOMEM;
ptp = kzalloc(sizeof(struct ptp_clock), GFP_KERNEL);
if (ptp == NULL)
goto no_memory;
index = ida_alloc_max(&ptp_clocks_map, MINORMASK, GFP_KERNEL);
if (index < 0) {
err = index;
goto no_slot;
}
ptp->clock.ops = ptp_clock_ops;
ptp->info = info;
ptp->devid = MKDEV(major, index);
ptp->index = index;
spin_lock_init(&ptp->tsevq.lock);
mutex_init(&ptp->tsevq_mux);
mutex_init(&ptp->pincfg_mux);
mutex_init(&ptp->n_vclocks_mux);
init_waitqueue_head(&ptp->tsev_wq);
if (ptp->info->getcycles64 || ptp->info->getcyclesx64) {
ptp->has_cycles = true;
if (!ptp->info->getcycles64 && ptp->info->getcyclesx64)
ptp->info->getcycles64 = ptp_getcycles64;
} else {
/* Free running cycle counter not supported, use time. */
ptp->info->getcycles64 = ptp_getcycles64;
if (ptp->info->gettimex64)
ptp->info->getcyclesx64 = ptp->info->gettimex64;
if (ptp->info->getcrosststamp)
ptp->info->getcrosscycles = ptp->info->getcrosststamp;
}
if (ptp->info->do_aux_work) {
kthread_init_delayed_work(&ptp->aux_work, ptp_aux_kworker);
ptp->kworker = kthread_create_worker(0, "ptp%d", ptp->index);
if (IS_ERR(ptp->kworker)) {
err = PTR_ERR(ptp->kworker);
pr_err("failed to create ptp aux_worker %d\n", err);
goto kworker_err;
}
}
/* PTP virtual clock is being registered under physical clock */
if (parent && parent->class && parent->class->name &&
strcmp(parent->class->name, "ptp") == 0)
ptp->is_virtual_clock = true;
if (!ptp->is_virtual_clock) {
ptp->max_vclocks = PTP_DEFAULT_MAX_VCLOCKS;
size = sizeof(int) * ptp->max_vclocks;
ptp->vclock_index = kzalloc(size, GFP_KERNEL);
if (!ptp->vclock_index) {
err = -ENOMEM;
goto no_mem_for_vclocks;
}
}
err = ptp_populate_pin_groups(ptp);
if (err)
goto no_pin_groups;
/* Register a new PPS source. */
if (info->pps) {
struct pps_source_info pps;
memset(&pps, 0, sizeof(pps));
snprintf(pps.name, PPS_MAX_NAME_LEN, "ptp%d", index);
pps.mode = PTP_PPS_MODE;
pps.owner = info->owner;
ptp->pps_source = pps_register_source(&pps, PTP_PPS_DEFAULTS);
if (IS_ERR(ptp->pps_source)) {
err = PTR_ERR(ptp->pps_source);
pr_err("failed to register pps source\n");
goto no_pps;
}
ptp->pps_source->lookup_cookie = ptp;
}
/* Initialize a new device of our class in our clock structure. */
device_initialize(&ptp->dev);
ptp->dev.devt = ptp->devid;
ptp->dev.class = ptp_class;
ptp->dev.parent = parent;
ptp->dev.groups = ptp->pin_attr_groups;
ptp->dev.release = ptp_clock_release;
dev_set_drvdata(&ptp->dev, ptp);
dev_set_name(&ptp->dev, "ptp%d", ptp->index);
/* Create a posix clock and link it to the device. */
err = posix_clock_register(&ptp->clock, &ptp->dev);
if (err) {
if (ptp->pps_source)
pps_unregister_source(ptp->pps_source);
if (ptp->kworker)
kthread_destroy_worker(ptp->kworker);
put_device(&ptp->dev);
pr_err("failed to create posix clock\n");
return ERR_PTR(err);
}
return ptp;
no_pps:
ptp_cleanup_pin_groups(ptp);
no_pin_groups:
kfree(ptp->vclock_index);
no_mem_for_vclocks:
if (ptp->kworker)
kthread_destroy_worker(ptp->kworker);
kworker_err:
mutex_destroy(&ptp->tsevq_mux);
mutex_destroy(&ptp->pincfg_mux);
mutex_destroy(&ptp->n_vclocks_mux);
ida_free(&ptp_clocks_map, index);
no_slot:
kfree(ptp);
no_memory:
return ERR_PTR(err);
}
EXPORT_SYMBOL(ptp_clock_register);
static int unregister_vclock(struct device *dev, void *data)
{
struct ptp_clock *ptp = dev_get_drvdata(dev);
ptp_vclock_unregister(info_to_vclock(ptp->info));
return 0;
}
int ptp_clock_unregister(struct ptp_clock *ptp)
{
if (ptp_vclock_in_use(ptp)) {
device_for_each_child(&ptp->dev, NULL, unregister_vclock);
}
ptp->defunct = 1;
wake_up_interruptible(&ptp->tsev_wq);
if (ptp->kworker) {
kthread_cancel_delayed_work_sync(&ptp->aux_work);
kthread_destroy_worker(ptp->kworker);
}
/* Release the clock's resources. */
if (ptp->pps_source)
pps_unregister_source(ptp->pps_source);
posix_clock_unregister(&ptp->clock);
return 0;
}
EXPORT_SYMBOL(ptp_clock_unregister);
void ptp_clock_event(struct ptp_clock *ptp, struct ptp_clock_event *event)
{
struct pps_event_time evt;
switch (event->type) {
case PTP_CLOCK_ALARM:
break;
case PTP_CLOCK_EXTTS:
enqueue_external_timestamp(&ptp->tsevq, event);
wake_up_interruptible(&ptp->tsev_wq);
break;
case PTP_CLOCK_PPS:
pps_get_ts(&evt);
pps_event(ptp->pps_source, &evt, PTP_PPS_EVENT, NULL);
break;
case PTP_CLOCK_PPSUSR:
pps_event(ptp->pps_source, &event->pps_times,
PTP_PPS_EVENT, NULL);
break;
}
}
EXPORT_SYMBOL(ptp_clock_event);
int ptp_clock_index(struct ptp_clock *ptp)
{
return ptp->index;
}
EXPORT_SYMBOL(ptp_clock_index);
int ptp_find_pin(struct ptp_clock *ptp,
enum ptp_pin_function func, unsigned int chan)
{
struct ptp_pin_desc *pin = NULL;
int i;
for (i = 0; i < ptp->info->n_pins; i++) {
if (ptp->info->pin_config[i].func == func &&
ptp->info->pin_config[i].chan == chan) {
pin = &ptp->info->pin_config[i];
break;
}
}
return pin ? i : -1;
}
EXPORT_SYMBOL(ptp_find_pin);
int ptp_find_pin_unlocked(struct ptp_clock *ptp,
enum ptp_pin_function func, unsigned int chan)
{
int result;
mutex_lock(&ptp->pincfg_mux);
result = ptp_find_pin(ptp, func, chan);
mutex_unlock(&ptp->pincfg_mux);
return result;
}
EXPORT_SYMBOL(ptp_find_pin_unlocked);
int ptp_schedule_worker(struct ptp_clock *ptp, unsigned long delay)
{
return kthread_mod_delayed_work(ptp->kworker, &ptp->aux_work, delay);
}
EXPORT_SYMBOL(ptp_schedule_worker);
void ptp_cancel_worker_sync(struct ptp_clock *ptp)
{
kthread_cancel_delayed_work_sync(&ptp->aux_work);
}
EXPORT_SYMBOL(ptp_cancel_worker_sync);
/* module operations */
static void __exit ptp_exit(void)
{
class_destroy(ptp_class);
unregister_chrdev_region(ptp_devt, MINORMASK + 1);
ida_destroy(&ptp_clocks_map);
}
static int __init ptp_init(void)
{
int err;
ptp_class = class_create("ptp");
if (IS_ERR(ptp_class)) {
pr_err("ptp: failed to allocate class\n");
return PTR_ERR(ptp_class);
}
err = alloc_chrdev_region(&ptp_devt, 0, MINORMASK + 1, "ptp");
if (err < 0) {
pr_err("ptp: failed to allocate device region\n");
goto no_region;
}
ptp_class->dev_groups = ptp_groups;
pr_info("PTP clock support registered\n");
return 0;
no_region:
class_destroy(ptp_class);
return err;
}
subsys_initcall(ptp_init);
module_exit(ptp_exit);
MODULE_AUTHOR("Richard Cochran <[email protected]>");
MODULE_DESCRIPTION("PTP clocks support");
MODULE_LICENSE("GPL");
|
linux-master
|
drivers/ptp/ptp_clock.c
|
// SPDX-License-Identifier: GPL-2.0
//
// Copyright (C) 2018 Integrated Device Technology, Inc
//
#define pr_fmt(fmt) "IDT_82p33xxx: " fmt
#include <linux/firmware.h>
#include <linux/platform_device.h>
#include <linux/module.h>
#include <linux/ptp_clock_kernel.h>
#include <linux/delay.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/timekeeping.h>
#include <linux/bitops.h>
#include <linux/of.h>
#include <linux/mfd/rsmu.h>
#include <linux/mfd/idt82p33_reg.h>
#include "ptp_private.h"
#include "ptp_idt82p33.h"
MODULE_DESCRIPTION("Driver for IDT 82p33xxx clock devices");
MODULE_AUTHOR("IDT support-1588 <[email protected]>");
MODULE_VERSION("1.0");
MODULE_LICENSE("GPL");
MODULE_FIRMWARE(FW_FILENAME);
#define EXTTS_PERIOD_MS (95)
/* Module Parameters */
static u32 phase_snap_threshold = SNAP_THRESHOLD_NS;
module_param(phase_snap_threshold, uint, 0);
MODULE_PARM_DESC(phase_snap_threshold,
"threshold (10000ns by default) below which adjtime would use double dco");
static char *firmware;
module_param(firmware, charp, 0);
static struct ptp_pin_desc pin_config[MAX_PHC_PLL][MAX_TRIG_CLK];
static inline int idt82p33_read(struct idt82p33 *idt82p33, u16 regaddr,
u8 *buf, u16 count)
{
return regmap_bulk_read(idt82p33->regmap, regaddr, buf, count);
}
static inline int idt82p33_write(struct idt82p33 *idt82p33, u16 regaddr,
u8 *buf, u16 count)
{
return regmap_bulk_write(idt82p33->regmap, regaddr, buf, count);
}
static void idt82p33_byte_array_to_timespec(struct timespec64 *ts,
u8 buf[TOD_BYTE_COUNT])
{
time64_t sec;
s32 nsec;
u8 i;
nsec = buf[3];
for (i = 0; i < 3; i++) {
nsec <<= 8;
nsec |= buf[2 - i];
}
sec = buf[9];
for (i = 0; i < 5; i++) {
sec <<= 8;
sec |= buf[8 - i];
}
ts->tv_sec = sec;
ts->tv_nsec = nsec;
}
static void idt82p33_timespec_to_byte_array(struct timespec64 const *ts,
u8 buf[TOD_BYTE_COUNT])
{
time64_t sec;
s32 nsec;
u8 i;
nsec = ts->tv_nsec;
sec = ts->tv_sec;
for (i = 0; i < 4; i++) {
buf[i] = nsec & 0xff;
nsec >>= 8;
}
for (i = 4; i < TOD_BYTE_COUNT; i++) {
buf[i] = sec & 0xff;
sec >>= 8;
}
}
static int idt82p33_dpll_set_mode(struct idt82p33_channel *channel,
enum pll_mode mode)
{
struct idt82p33 *idt82p33 = channel->idt82p33;
u8 dpll_mode;
int err;
if (channel->pll_mode == mode)
return 0;
err = idt82p33_read(idt82p33, channel->dpll_mode_cnfg,
&dpll_mode, sizeof(dpll_mode));
if (err)
return err;
dpll_mode &= ~(PLL_MODE_MASK << PLL_MODE_SHIFT);
dpll_mode |= (mode << PLL_MODE_SHIFT);
err = idt82p33_write(idt82p33, channel->dpll_mode_cnfg,
&dpll_mode, sizeof(dpll_mode));
if (err)
return err;
channel->pll_mode = mode;
return 0;
}
static int idt82p33_set_tod_trigger(struct idt82p33_channel *channel,
u8 trigger, bool write)
{
struct idt82p33 *idt82p33 = channel->idt82p33;
int err;
u8 cfg;
if (trigger > WR_TRIG_SEL_MAX)
return -EINVAL;
err = idt82p33_read(idt82p33, channel->dpll_tod_trigger,
&cfg, sizeof(cfg));
if (err)
return err;
if (write == true)
trigger = (trigger << WRITE_TRIGGER_SHIFT) |
(cfg & READ_TRIGGER_MASK);
else
trigger = (trigger << READ_TRIGGER_SHIFT) |
(cfg & WRITE_TRIGGER_MASK);
return idt82p33_write(idt82p33, channel->dpll_tod_trigger,
&trigger, sizeof(trigger));
}
static int idt82p33_get_extts(struct idt82p33_channel *channel,
struct timespec64 *ts)
{
struct idt82p33 *idt82p33 = channel->idt82p33;
u8 buf[TOD_BYTE_COUNT];
int err;
err = idt82p33_read(idt82p33, channel->dpll_tod_sts, buf, sizeof(buf));
if (err)
return err;
/* Since trigger is not self clearing itself, we have to poll tod_sts */
if (memcmp(buf, channel->extts_tod_sts, TOD_BYTE_COUNT) == 0)
return -EAGAIN;
memcpy(channel->extts_tod_sts, buf, TOD_BYTE_COUNT);
idt82p33_byte_array_to_timespec(ts, buf);
if (channel->discard_next_extts) {
channel->discard_next_extts = false;
return -EAGAIN;
}
return 0;
}
static int map_ref_to_tod_trig_sel(int ref, u8 *trigger)
{
int err = 0;
switch (ref) {
case 0:
*trigger = HW_TOD_TRIG_SEL_IN12;
break;
case 1:
*trigger = HW_TOD_TRIG_SEL_IN13;
break;
case 2:
*trigger = HW_TOD_TRIG_SEL_IN14;
break;
default:
err = -EINVAL;
}
return err;
}
static bool is_one_shot(u8 mask)
{
/* Treat single bit PLL masks as continuous trigger */
if ((mask == 1) || (mask == 2))
return false;
else
return true;
}
static int arm_tod_read_with_trigger(struct idt82p33_channel *channel, u8 trigger)
{
struct idt82p33 *idt82p33 = channel->idt82p33;
u8 buf[TOD_BYTE_COUNT];
int err;
/* Remember the current tod_sts before setting the trigger */
err = idt82p33_read(idt82p33, channel->dpll_tod_sts, buf, sizeof(buf));
if (err)
return err;
memcpy(channel->extts_tod_sts, buf, TOD_BYTE_COUNT);
err = idt82p33_set_tod_trigger(channel, trigger, false);
if (err)
dev_err(idt82p33->dev, "%s: err = %d", __func__, err);
return err;
}
static int idt82p33_extts_enable(struct idt82p33_channel *channel,
struct ptp_clock_request *rq, int on)
{
u8 index = rq->extts.index;
struct idt82p33 *idt82p33;
u8 mask = 1 << index;
int err = 0;
u8 old_mask;
u8 trigger;
int ref;
idt82p33 = channel->idt82p33;
old_mask = idt82p33->extts_mask;
/* Reject requests with unsupported flags */
if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
PTP_RISING_EDGE |
PTP_FALLING_EDGE |
PTP_STRICT_FLAGS))
return -EOPNOTSUPP;
/* Reject requests to enable time stamping on falling edge */
if ((rq->extts.flags & PTP_ENABLE_FEATURE) &&
(rq->extts.flags & PTP_FALLING_EDGE))
return -EOPNOTSUPP;
if (index >= MAX_PHC_PLL)
return -EINVAL;
if (on) {
/* Return if it was already enabled */
if (idt82p33->extts_mask & mask)
return 0;
/* Use the pin configured for the channel */
ref = ptp_find_pin(channel->ptp_clock, PTP_PF_EXTTS, channel->plln);
if (ref < 0) {
dev_err(idt82p33->dev, "%s: No valid pin found for Pll%d!\n",
__func__, channel->plln);
return -EBUSY;
}
err = map_ref_to_tod_trig_sel(ref, &trigger);
if (err) {
dev_err(idt82p33->dev,
"%s: Unsupported ref %d!\n", __func__, ref);
return err;
}
err = arm_tod_read_with_trigger(&idt82p33->channel[index], trigger);
if (err == 0) {
idt82p33->extts_mask |= mask;
idt82p33->channel[index].tod_trigger = trigger;
idt82p33->event_channel[index] = channel;
idt82p33->extts_single_shot = is_one_shot(idt82p33->extts_mask);
if (old_mask)
return 0;
schedule_delayed_work(&idt82p33->extts_work,
msecs_to_jiffies(EXTTS_PERIOD_MS));
}
} else {
idt82p33->extts_mask &= ~mask;
idt82p33->extts_single_shot = is_one_shot(idt82p33->extts_mask);
if (idt82p33->extts_mask == 0)
cancel_delayed_work(&idt82p33->extts_work);
}
return err;
}
static int idt82p33_extts_check_channel(struct idt82p33 *idt82p33, u8 todn)
{
struct idt82p33_channel *event_channel;
struct ptp_clock_event event;
struct timespec64 ts;
int err;
err = idt82p33_get_extts(&idt82p33->channel[todn], &ts);
if (err == 0) {
event_channel = idt82p33->event_channel[todn];
event.type = PTP_CLOCK_EXTTS;
event.index = todn;
event.timestamp = timespec64_to_ns(&ts);
ptp_clock_event(event_channel->ptp_clock,
&event);
}
return err;
}
static u8 idt82p33_extts_enable_mask(struct idt82p33_channel *channel,
u8 extts_mask, bool enable)
{
struct idt82p33 *idt82p33 = channel->idt82p33;
u8 trigger = channel->tod_trigger;
u8 mask;
int err;
int i;
if (extts_mask == 0)
return 0;
if (enable == false)
cancel_delayed_work_sync(&idt82p33->extts_work);
for (i = 0; i < MAX_PHC_PLL; i++) {
mask = 1 << i;
if ((extts_mask & mask) == 0)
continue;
if (enable) {
err = arm_tod_read_with_trigger(&idt82p33->channel[i], trigger);
if (err)
dev_err(idt82p33->dev,
"%s: Arm ToD read trigger failed, err = %d",
__func__, err);
} else {
err = idt82p33_extts_check_channel(idt82p33, i);
if (err == 0 && idt82p33->extts_single_shot)
/* trigger happened so we won't re-enable it */
extts_mask &= ~mask;
}
}
if (enable)
schedule_delayed_work(&idt82p33->extts_work,
msecs_to_jiffies(EXTTS_PERIOD_MS));
return extts_mask;
}
static int _idt82p33_gettime(struct idt82p33_channel *channel,
struct timespec64 *ts)
{
struct idt82p33 *idt82p33 = channel->idt82p33;
u8 old_mask = idt82p33->extts_mask;
u8 buf[TOD_BYTE_COUNT];
u8 new_mask = 0;
int err;
/* Disable extts */
if (old_mask)
new_mask = idt82p33_extts_enable_mask(channel, old_mask, false);
err = idt82p33_set_tod_trigger(channel, HW_TOD_RD_TRIG_SEL_LSB_TOD_STS,
false);
if (err)
return err;
channel->discard_next_extts = true;
if (idt82p33->calculate_overhead_flag)
idt82p33->start_time = ktime_get_raw();
err = idt82p33_read(idt82p33, channel->dpll_tod_sts, buf, sizeof(buf));
if (err)
return err;
/* Re-enable extts */
if (new_mask)
idt82p33_extts_enable_mask(channel, new_mask, true);
idt82p33_byte_array_to_timespec(ts, buf);
return 0;
}
/*
* TOD Trigger:
* Bits[7:4] Write 0x9, MSB write
* Bits[3:0] Read 0x9, LSB read
*/
static int _idt82p33_settime(struct idt82p33_channel *channel,
struct timespec64 const *ts)
{
struct idt82p33 *idt82p33 = channel->idt82p33;
struct timespec64 local_ts = *ts;
char buf[TOD_BYTE_COUNT];
s64 dynamic_overhead_ns;
int err;
u8 i;
err = idt82p33_set_tod_trigger(channel, HW_TOD_WR_TRIG_SEL_MSB_TOD_CNFG,
true);
if (err)
return err;
channel->discard_next_extts = true;
if (idt82p33->calculate_overhead_flag) {
dynamic_overhead_ns = ktime_to_ns(ktime_get_raw())
- ktime_to_ns(idt82p33->start_time);
timespec64_add_ns(&local_ts, dynamic_overhead_ns);
idt82p33->calculate_overhead_flag = 0;
}
idt82p33_timespec_to_byte_array(&local_ts, buf);
/*
* Store the new time value.
*/
for (i = 0; i < TOD_BYTE_COUNT; i++) {
err = idt82p33_write(idt82p33, channel->dpll_tod_cnfg + i,
&buf[i], sizeof(buf[i]));
if (err)
return err;
}
return err;
}
static int _idt82p33_adjtime_immediate(struct idt82p33_channel *channel,
s64 delta_ns)
{
struct idt82p33 *idt82p33 = channel->idt82p33;
struct timespec64 ts;
s64 now_ns;
int err;
idt82p33->calculate_overhead_flag = 1;
err = _idt82p33_gettime(channel, &ts);
if (err)
return err;
now_ns = timespec64_to_ns(&ts);
now_ns += delta_ns + idt82p33->tod_write_overhead_ns;
ts = ns_to_timespec64(now_ns);
err = _idt82p33_settime(channel, &ts);
return err;
}
static int _idt82p33_adjtime_internal_triggered(struct idt82p33_channel *channel,
s64 delta_ns)
{
struct idt82p33 *idt82p33 = channel->idt82p33;
char buf[TOD_BYTE_COUNT];
struct timespec64 ts;
const u8 delay_ns = 32;
s32 remainder;
s64 ns;
int err;
err = _idt82p33_gettime(channel, &ts);
if (err)
return err;
if (ts.tv_nsec > (NSEC_PER_SEC - 5 * NSEC_PER_MSEC)) {
/* Too close to miss next trigger, so skip it */
mdelay(6);
ns = (ts.tv_sec + 2) * NSEC_PER_SEC + delta_ns + delay_ns;
} else
ns = (ts.tv_sec + 1) * NSEC_PER_SEC + delta_ns + delay_ns;
ts = ns_to_timespec64(ns);
idt82p33_timespec_to_byte_array(&ts, buf);
/*
* Store the new time value.
*/
err = idt82p33_write(idt82p33, channel->dpll_tod_cnfg, buf, sizeof(buf));
if (err)
return err;
/* Schedule to implement the workaround in one second */
(void)div_s64_rem(delta_ns, NSEC_PER_SEC, &remainder);
if (remainder != 0)
schedule_delayed_work(&channel->adjtime_work, HZ);
return idt82p33_set_tod_trigger(channel, HW_TOD_TRIG_SEL_TOD_PPS, true);
}
static void idt82p33_adjtime_workaround(struct work_struct *work)
{
struct idt82p33_channel *channel = container_of(work,
struct idt82p33_channel,
adjtime_work.work);
struct idt82p33 *idt82p33 = channel->idt82p33;
mutex_lock(idt82p33->lock);
/* Workaround for TOD-to-output alignment issue */
_idt82p33_adjtime_internal_triggered(channel, 0);
mutex_unlock(idt82p33->lock);
}
static int _idt82p33_adjfine(struct idt82p33_channel *channel, long scaled_ppm)
{
struct idt82p33 *idt82p33 = channel->idt82p33;
unsigned char buf[5] = {0};
int err, i;
s64 fcw;
/*
* Frequency Control Word unit is: 1.6861512 * 10^-10 ppm
*
* adjfreq:
* ppb * 10^14
* FCW = -----------
* 16861512
*
* adjfine:
* scaled_ppm * 5^12 * 10^5
* FCW = ------------------------
* 16861512 * 2^4
*/
fcw = scaled_ppm * 762939453125ULL;
fcw = div_s64(fcw, 8430756LL);
for (i = 0; i < 5; i++) {
buf[i] = fcw & 0xff;
fcw >>= 8;
}
err = idt82p33_dpll_set_mode(channel, PLL_MODE_DCO);
if (err)
return err;
err = idt82p33_write(idt82p33, channel->dpll_freq_cnfg,
buf, sizeof(buf));
return err;
}
/* ppb = scaled_ppm * 125 / 2^13 */
static s32 idt82p33_ddco_scaled_ppm(long current_ppm, s32 ddco_ppb)
{
s64 scaled_ppm = div_s64(((s64)ddco_ppb << 13), 125);
s64 max_scaled_ppm = div_s64(((s64)DCO_MAX_PPB << 13), 125);
current_ppm += scaled_ppm;
if (current_ppm > max_scaled_ppm)
current_ppm = max_scaled_ppm;
else if (current_ppm < -max_scaled_ppm)
current_ppm = -max_scaled_ppm;
return (s32)current_ppm;
}
static int idt82p33_stop_ddco(struct idt82p33_channel *channel)
{
int err;
err = _idt82p33_adjfine(channel, channel->current_freq);
if (err)
return err;
channel->ddco = false;
return 0;
}
static int idt82p33_start_ddco(struct idt82p33_channel *channel, s32 delta_ns)
{
s32 current_ppm = channel->current_freq;
u32 duration_ms = MSEC_PER_SEC;
s32 ppb;
int err;
/* If the ToD correction is less than 5 nanoseconds, then skip it.
* The error introduced by the ToD adjustment procedure would be bigger
* than the required ToD correction
*/
if (abs(delta_ns) < DDCO_THRESHOLD_NS)
return 0;
/* For most cases, keep ddco duration 1 second */
ppb = delta_ns;
while (abs(ppb) > DCO_MAX_PPB) {
duration_ms *= 2;
ppb /= 2;
}
err = _idt82p33_adjfine(channel,
idt82p33_ddco_scaled_ppm(current_ppm, ppb));
if (err)
return err;
/* schedule the worker to cancel ddco */
ptp_schedule_worker(channel->ptp_clock,
msecs_to_jiffies(duration_ms) - 1);
channel->ddco = true;
return 0;
}
static int idt82p33_measure_one_byte_write_overhead(
struct idt82p33_channel *channel, s64 *overhead_ns)
{
struct idt82p33 *idt82p33 = channel->idt82p33;
ktime_t start, stop;
u8 trigger = 0;
s64 total_ns;
int err;
u8 i;
total_ns = 0;
*overhead_ns = 0;
for (i = 0; i < MAX_MEASURMENT_COUNT; i++) {
start = ktime_get_raw();
err = idt82p33_write(idt82p33, channel->dpll_tod_trigger,
&trigger, sizeof(trigger));
stop = ktime_get_raw();
if (err)
return err;
total_ns += ktime_to_ns(stop) - ktime_to_ns(start);
}
*overhead_ns = div_s64(total_ns, MAX_MEASURMENT_COUNT);
return err;
}
static int idt82p33_measure_one_byte_read_overhead(
struct idt82p33_channel *channel, s64 *overhead_ns)
{
struct idt82p33 *idt82p33 = channel->idt82p33;
ktime_t start, stop;
u8 trigger = 0;
s64 total_ns;
int err;
u8 i;
total_ns = 0;
*overhead_ns = 0;
for (i = 0; i < MAX_MEASURMENT_COUNT; i++) {
start = ktime_get_raw();
err = idt82p33_read(idt82p33, channel->dpll_tod_trigger,
&trigger, sizeof(trigger));
stop = ktime_get_raw();
if (err)
return err;
total_ns += ktime_to_ns(stop) - ktime_to_ns(start);
}
*overhead_ns = div_s64(total_ns, MAX_MEASURMENT_COUNT);
return err;
}
static int idt82p33_measure_tod_write_9_byte_overhead(
struct idt82p33_channel *channel)
{
struct idt82p33 *idt82p33 = channel->idt82p33;
u8 buf[TOD_BYTE_COUNT];
ktime_t start, stop;
s64 total_ns;
int err = 0;
u8 i, j;
total_ns = 0;
idt82p33->tod_write_overhead_ns = 0;
for (i = 0; i < MAX_MEASURMENT_COUNT; i++) {
start = ktime_get_raw();
/* Need one less byte for applicable overhead */
for (j = 0; j < (TOD_BYTE_COUNT - 1); j++) {
err = idt82p33_write(idt82p33,
channel->dpll_tod_cnfg + i,
&buf[i], sizeof(buf[i]));
if (err)
return err;
}
stop = ktime_get_raw();
total_ns += ktime_to_ns(stop) - ktime_to_ns(start);
}
idt82p33->tod_write_overhead_ns = div_s64(total_ns,
MAX_MEASURMENT_COUNT);
return err;
}
static int idt82p33_measure_settime_gettime_gap_overhead(
struct idt82p33_channel *channel, s64 *overhead_ns)
{
struct timespec64 ts1 = {0, 0};
struct timespec64 ts2;
int err;
*overhead_ns = 0;
err = _idt82p33_settime(channel, &ts1);
if (err)
return err;
err = _idt82p33_gettime(channel, &ts2);
if (!err)
*overhead_ns = timespec64_to_ns(&ts2) - timespec64_to_ns(&ts1);
return err;
}
static int idt82p33_measure_tod_write_overhead(struct idt82p33_channel *channel)
{
s64 trailing_overhead_ns, one_byte_write_ns, gap_ns, one_byte_read_ns;
struct idt82p33 *idt82p33 = channel->idt82p33;
int err;
idt82p33->tod_write_overhead_ns = 0;
err = idt82p33_measure_settime_gettime_gap_overhead(channel, &gap_ns);
if (err) {
dev_err(idt82p33->dev,
"Failed in %s with err %d!\n", __func__, err);
return err;
}
err = idt82p33_measure_one_byte_write_overhead(channel,
&one_byte_write_ns);
if (err)
return err;
err = idt82p33_measure_one_byte_read_overhead(channel,
&one_byte_read_ns);
if (err)
return err;
err = idt82p33_measure_tod_write_9_byte_overhead(channel);
if (err)
return err;
trailing_overhead_ns = gap_ns - 2 * one_byte_write_ns
- one_byte_read_ns;
idt82p33->tod_write_overhead_ns -= trailing_overhead_ns;
return err;
}
static int idt82p33_check_and_set_masks(struct idt82p33 *idt82p33,
u8 page,
u8 offset,
u8 val)
{
int err = 0;
if (page == PLLMASK_ADDR_HI && offset == PLLMASK_ADDR_LO) {
if ((val & 0xfc) || !(val & 0x3)) {
dev_err(idt82p33->dev,
"Invalid PLL mask 0x%x\n", val);
err = -EINVAL;
} else {
idt82p33->pll_mask = val;
}
} else if (page == PLL0_OUTMASK_ADDR_HI &&
offset == PLL0_OUTMASK_ADDR_LO) {
idt82p33->channel[0].output_mask = val;
} else if (page == PLL1_OUTMASK_ADDR_HI &&
offset == PLL1_OUTMASK_ADDR_LO) {
idt82p33->channel[1].output_mask = val;
}
return err;
}
static void idt82p33_display_masks(struct idt82p33 *idt82p33)
{
u8 mask, i;
dev_info(idt82p33->dev,
"pllmask = 0x%02x\n", idt82p33->pll_mask);
for (i = 0; i < MAX_PHC_PLL; i++) {
mask = 1 << i;
if (mask & idt82p33->pll_mask)
dev_info(idt82p33->dev,
"PLL%d output_mask = 0x%04x\n",
i, idt82p33->channel[i].output_mask);
}
}
static int idt82p33_sync_tod(struct idt82p33_channel *channel, bool enable)
{
struct idt82p33 *idt82p33 = channel->idt82p33;
u8 sync_cnfg;
int err;
err = idt82p33_read(idt82p33, channel->dpll_sync_cnfg,
&sync_cnfg, sizeof(sync_cnfg));
if (err)
return err;
sync_cnfg &= ~SYNC_TOD;
if (enable)
sync_cnfg |= SYNC_TOD;
return idt82p33_write(idt82p33, channel->dpll_sync_cnfg,
&sync_cnfg, sizeof(sync_cnfg));
}
static long idt82p33_work_handler(struct ptp_clock_info *ptp)
{
struct idt82p33_channel *channel =
container_of(ptp, struct idt82p33_channel, caps);
struct idt82p33 *idt82p33 = channel->idt82p33;
mutex_lock(idt82p33->lock);
(void)idt82p33_stop_ddco(channel);
mutex_unlock(idt82p33->lock);
/* Return a negative value here to not reschedule */
return -1;
}
static int idt82p33_output_enable(struct idt82p33_channel *channel,
bool enable, unsigned int outn)
{
struct idt82p33 *idt82p33 = channel->idt82p33;
int err;
u8 val;
err = idt82p33_read(idt82p33, OUT_MUX_CNFG(outn), &val, sizeof(val));
if (err)
return err;
if (enable)
val &= ~SQUELCH_ENABLE;
else
val |= SQUELCH_ENABLE;
return idt82p33_write(idt82p33, OUT_MUX_CNFG(outn), &val, sizeof(val));
}
static int idt82p33_perout_enable(struct idt82p33_channel *channel,
bool enable,
struct ptp_perout_request *perout)
{
/* Enable/disable individual output instead */
return idt82p33_output_enable(channel, enable, perout->index);
}
static int idt82p33_enable_tod(struct idt82p33_channel *channel)
{
struct idt82p33 *idt82p33 = channel->idt82p33;
struct timespec64 ts = {0, 0};
int err;
err = idt82p33_measure_tod_write_overhead(channel);
if (err) {
dev_err(idt82p33->dev,
"Failed in %s with err %d!\n", __func__, err);
return err;
}
err = _idt82p33_settime(channel, &ts);
if (err)
return err;
return idt82p33_sync_tod(channel, true);
}
static void idt82p33_ptp_clock_unregister_all(struct idt82p33 *idt82p33)
{
struct idt82p33_channel *channel;
u8 i;
for (i = 0; i < MAX_PHC_PLL; i++) {
channel = &idt82p33->channel[i];
cancel_delayed_work_sync(&channel->adjtime_work);
if (channel->ptp_clock)
ptp_clock_unregister(channel->ptp_clock);
}
}
static int idt82p33_enable(struct ptp_clock_info *ptp,
struct ptp_clock_request *rq, int on)
{
struct idt82p33_channel *channel =
container_of(ptp, struct idt82p33_channel, caps);
struct idt82p33 *idt82p33 = channel->idt82p33;
int err = -EOPNOTSUPP;
mutex_lock(idt82p33->lock);
switch (rq->type) {
case PTP_CLK_REQ_PEROUT:
if (!on)
err = idt82p33_perout_enable(channel, false,
&rq->perout);
/* Only accept a 1-PPS aligned to the second. */
else if (rq->perout.start.nsec || rq->perout.period.sec != 1 ||
rq->perout.period.nsec)
err = -ERANGE;
else
err = idt82p33_perout_enable(channel, true,
&rq->perout);
break;
case PTP_CLK_REQ_EXTTS:
err = idt82p33_extts_enable(channel, rq, on);
break;
default:
break;
}
mutex_unlock(idt82p33->lock);
if (err)
dev_err(idt82p33->dev,
"Failed in %s with err %d!\n", __func__, err);
return err;
}
static s32 idt82p33_getmaxphase(__always_unused struct ptp_clock_info *ptp)
{
return WRITE_PHASE_OFFSET_LIMIT;
}
static int idt82p33_adjwritephase(struct ptp_clock_info *ptp, s32 offset_ns)
{
struct idt82p33_channel *channel =
container_of(ptp, struct idt82p33_channel, caps);
struct idt82p33 *idt82p33 = channel->idt82p33;
s64 offset_regval;
u8 val[4] = {0};
int err;
/* Convert from phaseoffset_fs to register value */
offset_regval = div_s64((s64)(-offset_ns) * 1000000000ll,
IDT_T0DPLL_PHASE_RESOL);
val[0] = offset_regval & 0xFF;
val[1] = (offset_regval >> 8) & 0xFF;
val[2] = (offset_regval >> 16) & 0xFF;
val[3] = (offset_regval >> 24) & 0x1F;
val[3] |= PH_OFFSET_EN;
mutex_lock(idt82p33->lock);
err = idt82p33_dpll_set_mode(channel, PLL_MODE_WPH);
if (err) {
dev_err(idt82p33->dev,
"Failed in %s with err %d!\n", __func__, err);
goto out;
}
err = idt82p33_write(idt82p33, channel->dpll_phase_cnfg, val,
sizeof(val));
out:
mutex_unlock(idt82p33->lock);
return err;
}
static int idt82p33_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
{
struct idt82p33_channel *channel =
container_of(ptp, struct idt82p33_channel, caps);
struct idt82p33 *idt82p33 = channel->idt82p33;
int err;
if (channel->ddco == true)
return 0;
if (scaled_ppm == channel->current_freq)
return 0;
mutex_lock(idt82p33->lock);
err = _idt82p33_adjfine(channel, scaled_ppm);
if (err == 0)
channel->current_freq = scaled_ppm;
mutex_unlock(idt82p33->lock);
if (err)
dev_err(idt82p33->dev,
"Failed in %s with err %d!\n", __func__, err);
return err;
}
static int idt82p33_adjtime(struct ptp_clock_info *ptp, s64 delta_ns)
{
struct idt82p33_channel *channel =
container_of(ptp, struct idt82p33_channel, caps);
struct idt82p33 *idt82p33 = channel->idt82p33;
int err;
if (channel->ddco == true)
return -EBUSY;
mutex_lock(idt82p33->lock);
if (abs(delta_ns) < phase_snap_threshold) {
err = idt82p33_start_ddco(channel, delta_ns);
mutex_unlock(idt82p33->lock);
return err;
}
/* Use more accurate internal 1pps triggered write first */
err = _idt82p33_adjtime_internal_triggered(channel, delta_ns);
if (err && delta_ns > IMMEDIATE_SNAP_THRESHOLD_NS)
err = _idt82p33_adjtime_immediate(channel, delta_ns);
mutex_unlock(idt82p33->lock);
if (err)
dev_err(idt82p33->dev,
"Failed in %s with err %d!\n", __func__, err);
return err;
}
static int idt82p33_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
{
struct idt82p33_channel *channel =
container_of(ptp, struct idt82p33_channel, caps);
struct idt82p33 *idt82p33 = channel->idt82p33;
int err;
mutex_lock(idt82p33->lock);
err = _idt82p33_gettime(channel, ts);
mutex_unlock(idt82p33->lock);
if (err)
dev_err(idt82p33->dev,
"Failed in %s with err %d!\n", __func__, err);
return err;
}
static int idt82p33_settime(struct ptp_clock_info *ptp,
const struct timespec64 *ts)
{
struct idt82p33_channel *channel =
container_of(ptp, struct idt82p33_channel, caps);
struct idt82p33 *idt82p33 = channel->idt82p33;
int err;
mutex_lock(idt82p33->lock);
err = _idt82p33_settime(channel, ts);
mutex_unlock(idt82p33->lock);
if (err)
dev_err(idt82p33->dev,
"Failed in %s with err %d!\n", __func__, err);
return err;
}
static int idt82p33_channel_init(struct idt82p33 *idt82p33, u32 index)
{
struct idt82p33_channel *channel = &idt82p33->channel[index];
switch (index) {
case 0:
channel->dpll_tod_cnfg = DPLL1_TOD_CNFG;
channel->dpll_tod_trigger = DPLL1_TOD_TRIGGER;
channel->dpll_tod_sts = DPLL1_TOD_STS;
channel->dpll_mode_cnfg = DPLL1_OPERATING_MODE_CNFG;
channel->dpll_freq_cnfg = DPLL1_HOLDOVER_FREQ_CNFG;
channel->dpll_phase_cnfg = DPLL1_PHASE_OFFSET_CNFG;
channel->dpll_sync_cnfg = DPLL1_SYNC_EDGE_CNFG;
channel->dpll_input_mode_cnfg = DPLL1_INPUT_MODE_CNFG;
break;
case 1:
channel->dpll_tod_cnfg = DPLL2_TOD_CNFG;
channel->dpll_tod_trigger = DPLL2_TOD_TRIGGER;
channel->dpll_tod_sts = DPLL2_TOD_STS;
channel->dpll_mode_cnfg = DPLL2_OPERATING_MODE_CNFG;
channel->dpll_freq_cnfg = DPLL2_HOLDOVER_FREQ_CNFG;
channel->dpll_phase_cnfg = DPLL2_PHASE_OFFSET_CNFG;
channel->dpll_sync_cnfg = DPLL2_SYNC_EDGE_CNFG;
channel->dpll_input_mode_cnfg = DPLL2_INPUT_MODE_CNFG;
break;
default:
return -EINVAL;
}
channel->plln = index;
channel->current_freq = 0;
channel->idt82p33 = idt82p33;
INIT_DELAYED_WORK(&channel->adjtime_work, idt82p33_adjtime_workaround);
return 0;
}
static int idt82p33_verify_pin(struct ptp_clock_info *ptp, unsigned int pin,
enum ptp_pin_function func, unsigned int chan)
{
switch (func) {
case PTP_PF_NONE:
case PTP_PF_EXTTS:
break;
case PTP_PF_PEROUT:
case PTP_PF_PHYSYNC:
return -1;
}
return 0;
}
static void idt82p33_caps_init(u32 index, struct ptp_clock_info *caps,
struct ptp_pin_desc *pin_cfg, u8 max_pins)
{
struct ptp_pin_desc *ppd;
int i;
caps->owner = THIS_MODULE;
caps->max_adj = DCO_MAX_PPB;
caps->n_per_out = MAX_PER_OUT;
caps->n_ext_ts = MAX_PHC_PLL,
caps->n_pins = max_pins,
caps->adjphase = idt82p33_adjwritephase,
caps->getmaxphase = idt82p33_getmaxphase,
caps->adjfine = idt82p33_adjfine;
caps->adjtime = idt82p33_adjtime;
caps->gettime64 = idt82p33_gettime;
caps->settime64 = idt82p33_settime;
caps->enable = idt82p33_enable;
caps->verify = idt82p33_verify_pin;
caps->do_aux_work = idt82p33_work_handler;
snprintf(caps->name, sizeof(caps->name), "IDT 82P33 PLL%u", index);
caps->pin_config = pin_cfg;
for (i = 0; i < max_pins; ++i) {
ppd = &pin_cfg[i];
ppd->index = i;
ppd->func = PTP_PF_NONE;
ppd->chan = index;
snprintf(ppd->name, sizeof(ppd->name), "in%d", 12 + i);
}
}
static int idt82p33_enable_channel(struct idt82p33 *idt82p33, u32 index)
{
struct idt82p33_channel *channel;
int err;
if (!(index < MAX_PHC_PLL))
return -EINVAL;
channel = &idt82p33->channel[index];
err = idt82p33_channel_init(idt82p33, index);
if (err) {
dev_err(idt82p33->dev,
"Channel_init failed in %s with err %d!\n",
__func__, err);
return err;
}
idt82p33_caps_init(index, &channel->caps,
pin_config[index], MAX_TRIG_CLK);
channel->ptp_clock = ptp_clock_register(&channel->caps, NULL);
if (IS_ERR(channel->ptp_clock)) {
err = PTR_ERR(channel->ptp_clock);
channel->ptp_clock = NULL;
return err;
}
if (!channel->ptp_clock)
return -ENOTSUPP;
err = idt82p33_dpll_set_mode(channel, PLL_MODE_DCO);
if (err) {
dev_err(idt82p33->dev,
"Dpll_set_mode failed in %s with err %d!\n",
__func__, err);
return err;
}
err = idt82p33_enable_tod(channel);
if (err) {
dev_err(idt82p33->dev,
"Enable_tod failed in %s with err %d!\n",
__func__, err);
return err;
}
dev_info(idt82p33->dev, "PLL%d registered as ptp%d\n",
index, channel->ptp_clock->index);
return 0;
}
static int idt82p33_reset(struct idt82p33 *idt82p33, bool cold)
{
int err;
u8 cfg = SOFT_RESET_EN;
if (cold == true)
goto cold_reset;
err = idt82p33_read(idt82p33, REG_SOFT_RESET, &cfg, sizeof(cfg));
if (err) {
dev_err(idt82p33->dev,
"Soft reset failed with err %d!\n", err);
return err;
}
cfg |= SOFT_RESET_EN;
cold_reset:
err = idt82p33_write(idt82p33, REG_SOFT_RESET, &cfg, sizeof(cfg));
if (err)
dev_err(idt82p33->dev,
"Cold reset failed with err %d!\n", err);
return err;
}
static int idt82p33_load_firmware(struct idt82p33 *idt82p33)
{
char fname[128] = FW_FILENAME;
const struct firmware *fw;
struct idt82p33_fwrc *rec;
u8 loaddr, page, val;
int err;
s32 len;
if (firmware) /* module parameter */
snprintf(fname, sizeof(fname), "%s", firmware);
dev_info(idt82p33->dev, "requesting firmware '%s'\n", fname);
err = request_firmware(&fw, fname, idt82p33->dev);
if (err) {
dev_err(idt82p33->dev,
"Failed in %s with err %d!\n", __func__, err);
return err;
}
dev_dbg(idt82p33->dev, "firmware size %zu bytes\n", fw->size);
rec = (struct idt82p33_fwrc *) fw->data;
for (len = fw->size; len > 0; len -= sizeof(*rec)) {
if (rec->reserved) {
dev_err(idt82p33->dev,
"bad firmware, reserved field non-zero\n");
err = -EINVAL;
} else {
val = rec->value;
loaddr = rec->loaddr;
page = rec->hiaddr;
rec++;
err = idt82p33_check_and_set_masks(idt82p33, page,
loaddr, val);
}
if (err == 0) {
/* Page size 128, last 4 bytes of page skipped */
if (loaddr > 0x7b)
continue;
err = idt82p33_write(idt82p33, REG_ADDR(page, loaddr),
&val, sizeof(val));
}
if (err)
goto out;
}
idt82p33_display_masks(idt82p33);
out:
release_firmware(fw);
return err;
}
static void idt82p33_extts_check(struct work_struct *work)
{
struct idt82p33 *idt82p33 = container_of(work, struct idt82p33,
extts_work.work);
struct idt82p33_channel *channel;
int err;
u8 mask;
int i;
if (idt82p33->extts_mask == 0)
return;
mutex_lock(idt82p33->lock);
for (i = 0; i < MAX_PHC_PLL; i++) {
mask = 1 << i;
if ((idt82p33->extts_mask & mask) == 0)
continue;
err = idt82p33_extts_check_channel(idt82p33, i);
if (err == 0) {
/* trigger clears itself, so clear the mask */
if (idt82p33->extts_single_shot) {
idt82p33->extts_mask &= ~mask;
} else {
/* Re-arm */
channel = &idt82p33->channel[i];
arm_tod_read_with_trigger(channel, channel->tod_trigger);
}
}
}
if (idt82p33->extts_mask)
schedule_delayed_work(&idt82p33->extts_work,
msecs_to_jiffies(EXTTS_PERIOD_MS));
mutex_unlock(idt82p33->lock);
}
static int idt82p33_probe(struct platform_device *pdev)
{
struct rsmu_ddata *ddata = dev_get_drvdata(pdev->dev.parent);
struct idt82p33 *idt82p33;
int err;
u8 i;
idt82p33 = devm_kzalloc(&pdev->dev,
sizeof(struct idt82p33), GFP_KERNEL);
if (!idt82p33)
return -ENOMEM;
idt82p33->dev = &pdev->dev;
idt82p33->mfd = pdev->dev.parent;
idt82p33->lock = &ddata->lock;
idt82p33->regmap = ddata->regmap;
idt82p33->tod_write_overhead_ns = 0;
idt82p33->calculate_overhead_flag = 0;
idt82p33->pll_mask = DEFAULT_PLL_MASK;
idt82p33->channel[0].output_mask = DEFAULT_OUTPUT_MASK_PLL0;
idt82p33->channel[1].output_mask = DEFAULT_OUTPUT_MASK_PLL1;
idt82p33->extts_mask = 0;
INIT_DELAYED_WORK(&idt82p33->extts_work, idt82p33_extts_check);
mutex_lock(idt82p33->lock);
/* cold reset before loading firmware */
idt82p33_reset(idt82p33, true);
err = idt82p33_load_firmware(idt82p33);
if (err)
dev_warn(idt82p33->dev,
"loading firmware failed with %d\n", err);
/* soft reset after loading firmware */
idt82p33_reset(idt82p33, false);
if (idt82p33->pll_mask) {
for (i = 0; i < MAX_PHC_PLL; i++) {
if (idt82p33->pll_mask & (1 << i))
err = idt82p33_enable_channel(idt82p33, i);
else
err = idt82p33_channel_init(idt82p33, i);
if (err) {
dev_err(idt82p33->dev,
"Failed in %s with err %d!\n",
__func__, err);
break;
}
}
} else {
dev_err(idt82p33->dev,
"no PLLs flagged as PHCs, nothing to do\n");
err = -ENODEV;
}
mutex_unlock(idt82p33->lock);
if (err) {
idt82p33_ptp_clock_unregister_all(idt82p33);
return err;
}
platform_set_drvdata(pdev, idt82p33);
return 0;
}
static int idt82p33_remove(struct platform_device *pdev)
{
struct idt82p33 *idt82p33 = platform_get_drvdata(pdev);
cancel_delayed_work_sync(&idt82p33->extts_work);
idt82p33_ptp_clock_unregister_all(idt82p33);
return 0;
}
static struct platform_driver idt82p33_driver = {
.driver = {
.name = "82p33x1x-phc",
},
.probe = idt82p33_probe,
.remove = idt82p33_remove,
};
module_platform_driver(idt82p33_driver);
|
linux-master
|
drivers/ptp/ptp_idt82p33.c
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Virtual PTP 1588 clock for use with KVM guests
*
* Copyright (C) 2017 Red Hat Inc.
*/
#include <linux/device.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/ptp_kvm.h>
#include <uapi/linux/kvm_para.h>
#include <asm/kvm_para.h>
#include <uapi/asm/kvm_para.h>
#include <linux/ptp_clock_kernel.h>
struct kvm_ptp_clock {
struct ptp_clock *ptp_clock;
struct ptp_clock_info caps;
};
static DEFINE_SPINLOCK(kvm_ptp_lock);
static int ptp_kvm_get_time_fn(ktime_t *device_time,
struct system_counterval_t *system_counter,
void *ctx)
{
long ret;
u64 cycle;
struct timespec64 tspec;
struct clocksource *cs;
spin_lock(&kvm_ptp_lock);
preempt_disable_notrace();
ret = kvm_arch_ptp_get_crosststamp(&cycle, &tspec, &cs);
if (ret) {
spin_unlock(&kvm_ptp_lock);
preempt_enable_notrace();
return ret;
}
preempt_enable_notrace();
system_counter->cycles = cycle;
system_counter->cs = cs;
*device_time = timespec64_to_ktime(tspec);
spin_unlock(&kvm_ptp_lock);
return 0;
}
static int ptp_kvm_getcrosststamp(struct ptp_clock_info *ptp,
struct system_device_crosststamp *xtstamp)
{
return get_device_system_crosststamp(ptp_kvm_get_time_fn, NULL,
NULL, xtstamp);
}
/*
* PTP clock operations
*/
static int ptp_kvm_adjfine(struct ptp_clock_info *ptp, long delta)
{
return -EOPNOTSUPP;
}
static int ptp_kvm_adjtime(struct ptp_clock_info *ptp, s64 delta)
{
return -EOPNOTSUPP;
}
static int ptp_kvm_settime(struct ptp_clock_info *ptp,
const struct timespec64 *ts)
{
return -EOPNOTSUPP;
}
static int ptp_kvm_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
{
long ret;
struct timespec64 tspec;
spin_lock(&kvm_ptp_lock);
ret = kvm_arch_ptp_get_clock(&tspec);
if (ret) {
spin_unlock(&kvm_ptp_lock);
return ret;
}
spin_unlock(&kvm_ptp_lock);
memcpy(ts, &tspec, sizeof(struct timespec64));
return 0;
}
static int ptp_kvm_enable(struct ptp_clock_info *ptp,
struct ptp_clock_request *rq, int on)
{
return -EOPNOTSUPP;
}
static const struct ptp_clock_info ptp_kvm_caps = {
.owner = THIS_MODULE,
.name = "KVM virtual PTP",
.max_adj = 0,
.n_ext_ts = 0,
.n_pins = 0,
.pps = 0,
.adjfine = ptp_kvm_adjfine,
.adjtime = ptp_kvm_adjtime,
.gettime64 = ptp_kvm_gettime,
.settime64 = ptp_kvm_settime,
.enable = ptp_kvm_enable,
.getcrosststamp = ptp_kvm_getcrosststamp,
};
/* module operations */
static struct kvm_ptp_clock kvm_ptp_clock;
static void __exit ptp_kvm_exit(void)
{
ptp_clock_unregister(kvm_ptp_clock.ptp_clock);
kvm_arch_ptp_exit();
}
static int __init ptp_kvm_init(void)
{
long ret;
ret = kvm_arch_ptp_init();
if (ret) {
if (ret != -EOPNOTSUPP)
pr_err("fail to initialize ptp_kvm");
return ret;
}
kvm_ptp_clock.caps = ptp_kvm_caps;
kvm_ptp_clock.ptp_clock = ptp_clock_register(&kvm_ptp_clock.caps, NULL);
return PTR_ERR_OR_ZERO(kvm_ptp_clock.ptp_clock);
}
module_init(ptp_kvm_init);
module_exit(ptp_kvm_exit);
MODULE_AUTHOR("Marcelo Tosatti <[email protected]>");
MODULE_DESCRIPTION("PTP clock using KVMCLOCK");
MODULE_LICENSE("GPL");
|
linux-master
|
drivers/ptp/ptp_kvm_common.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* DFL device driver for Time-of-Day (ToD) private feature
*
* Copyright (C) 2023 Intel Corporation
*/
#include <linux/bitfield.h>
#include <linux/delay.h>
#include <linux/dfl.h>
#include <linux/gcd.h>
#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/ptp_clock_kernel.h>
#include <linux/spinlock.h>
#include <linux/units.h>
#define FME_FEATURE_ID_TOD 0x22
/* ToD clock register space. */
#define TOD_CLK_FREQ 0x038
/*
* The read sequence of ToD timestamp registers: TOD_NANOSEC, TOD_SECONDSL and
* TOD_SECONDSH, because there is a hardware snapshot whenever the TOD_NANOSEC
* register is read.
*
* The ToD IP requires writing registers in the reverse order to the read sequence.
* The timestamp is corrected when the TOD_NANOSEC register is written, so the
* sequence of write TOD registers: TOD_SECONDSH, TOD_SECONDSL and TOD_NANOSEC.
*/
#define TOD_SECONDSH 0x100
#define TOD_SECONDSL 0x104
#define TOD_NANOSEC 0x108
#define TOD_PERIOD 0x110
#define TOD_ADJUST_PERIOD 0x114
#define TOD_ADJUST_COUNT 0x118
#define TOD_DRIFT_ADJUST 0x11c
#define TOD_DRIFT_ADJUST_RATE 0x120
#define PERIOD_FRAC_OFFSET 16
#define SECONDS_MSB GENMASK_ULL(47, 32)
#define SECONDS_LSB GENMASK_ULL(31, 0)
#define TOD_SECONDSH_SEC_MSB GENMASK_ULL(15, 0)
#define CAL_SECONDS(m, l) ((FIELD_GET(TOD_SECONDSH_SEC_MSB, (m)) << 32) | (l))
#define TOD_PERIOD_MASK GENMASK_ULL(19, 0)
#define TOD_PERIOD_MAX FIELD_MAX(TOD_PERIOD_MASK)
#define TOD_PERIOD_MIN 0
#define TOD_DRIFT_ADJUST_MASK GENMASK_ULL(15, 0)
#define TOD_DRIFT_ADJUST_FNS_MAX FIELD_MAX(TOD_DRIFT_ADJUST_MASK)
#define TOD_DRIFT_ADJUST_RATE_MAX TOD_DRIFT_ADJUST_FNS_MAX
#define TOD_ADJUST_COUNT_MASK GENMASK_ULL(19, 0)
#define TOD_ADJUST_COUNT_MAX FIELD_MAX(TOD_ADJUST_COUNT_MASK)
#define TOD_ADJUST_INTERVAL_US 10
#define TOD_ADJUST_MS \
(((TOD_PERIOD_MAX >> 16) + 1) * (TOD_ADJUST_COUNT_MAX + 1))
#define TOD_ADJUST_MS_MAX (TOD_ADJUST_MS / MICRO)
#define TOD_ADJUST_MAX_US (TOD_ADJUST_MS_MAX * USEC_PER_MSEC)
#define TOD_MAX_ADJ (500 * MEGA)
struct dfl_tod {
struct ptp_clock_info ptp_clock_ops;
struct device *dev;
struct ptp_clock *ptp_clock;
/* ToD Clock address space */
void __iomem *tod_ctrl;
/* ToD clock registers protection */
spinlock_t tod_lock;
};
/*
* A fine ToD HW clock offset adjustment. To perform the fine offset adjustment, the
* adjust_period and adjust_count argument are used to update the TOD_ADJUST_PERIOD
* and TOD_ADJUST_COUNT register for in hardware. The dt->tod_lock spinlock must be
* held when calling this function.
*/
static int fine_adjust_tod_clock(struct dfl_tod *dt, u32 adjust_period,
u32 adjust_count)
{
void __iomem *base = dt->tod_ctrl;
u32 val;
writel(adjust_period, base + TOD_ADJUST_PERIOD);
writel(adjust_count, base + TOD_ADJUST_COUNT);
/* Wait for present offset adjustment update to complete */
return readl_poll_timeout_atomic(base + TOD_ADJUST_COUNT, val, !val, TOD_ADJUST_INTERVAL_US,
TOD_ADJUST_MAX_US);
}
/*
* A coarse ToD HW clock offset adjustment. The coarse time adjustment performs by
* adding or subtracting the delta value from the current ToD HW clock time.
*/
static int coarse_adjust_tod_clock(struct dfl_tod *dt, s64 delta)
{
u32 seconds_msb, seconds_lsb, nanosec;
void __iomem *base = dt->tod_ctrl;
u64 seconds, now;
if (delta == 0)
return 0;
nanosec = readl(base + TOD_NANOSEC);
seconds_lsb = readl(base + TOD_SECONDSL);
seconds_msb = readl(base + TOD_SECONDSH);
/* Calculate new time */
seconds = CAL_SECONDS(seconds_msb, seconds_lsb);
now = seconds * NSEC_PER_SEC + nanosec + delta;
seconds = div_u64_rem(now, NSEC_PER_SEC, &nanosec);
seconds_msb = FIELD_GET(SECONDS_MSB, seconds);
seconds_lsb = FIELD_GET(SECONDS_LSB, seconds);
writel(seconds_msb, base + TOD_SECONDSH);
writel(seconds_lsb, base + TOD_SECONDSL);
writel(nanosec, base + TOD_NANOSEC);
return 0;
}
static int dfl_tod_adjust_fine(struct ptp_clock_info *ptp, long scaled_ppm)
{
struct dfl_tod *dt = container_of(ptp, struct dfl_tod, ptp_clock_ops);
u32 tod_period, tod_rem, tod_drift_adjust_fns, tod_drift_adjust_rate;
void __iomem *base = dt->tod_ctrl;
unsigned long flags, rate;
u64 ppb;
/* Get the clock rate from clock frequency register offset */
rate = readl(base + TOD_CLK_FREQ);
/* add GIGA as nominal ppb */
ppb = scaled_ppm_to_ppb(scaled_ppm) + GIGA;
tod_period = div_u64_rem(ppb << PERIOD_FRAC_OFFSET, rate, &tod_rem);
if (tod_period > TOD_PERIOD_MAX)
return -ERANGE;
/*
* The drift of ToD adjusted periodically by adding a drift_adjust_fns
* correction value every drift_adjust_rate count of clock cycles.
*/
tod_drift_adjust_fns = tod_rem / gcd(tod_rem, rate);
tod_drift_adjust_rate = rate / gcd(tod_rem, rate);
while ((tod_drift_adjust_fns > TOD_DRIFT_ADJUST_FNS_MAX) ||
(tod_drift_adjust_rate > TOD_DRIFT_ADJUST_RATE_MAX)) {
tod_drift_adjust_fns >>= 1;
tod_drift_adjust_rate >>= 1;
}
if (tod_drift_adjust_fns == 0)
tod_drift_adjust_rate = 0;
spin_lock_irqsave(&dt->tod_lock, flags);
writel(tod_period, base + TOD_PERIOD);
writel(0, base + TOD_ADJUST_PERIOD);
writel(0, base + TOD_ADJUST_COUNT);
writel(tod_drift_adjust_fns, base + TOD_DRIFT_ADJUST);
writel(tod_drift_adjust_rate, base + TOD_DRIFT_ADJUST_RATE);
spin_unlock_irqrestore(&dt->tod_lock, flags);
return 0;
}
static int dfl_tod_adjust_time(struct ptp_clock_info *ptp, s64 delta)
{
struct dfl_tod *dt = container_of(ptp, struct dfl_tod, ptp_clock_ops);
u32 period, diff, rem, rem_period, adj_period;
void __iomem *base = dt->tod_ctrl;
unsigned long flags;
bool neg_adj;
u64 count;
int ret;
neg_adj = delta < 0;
if (neg_adj)
delta = -delta;
spin_lock_irqsave(&dt->tod_lock, flags);
/*
* Get the maximum possible value of the Period register offset
* adjustment in nanoseconds scale. This depends on the current
* Period register setting and the maximum and minimum possible
* values of the Period register.
*/
period = readl(base + TOD_PERIOD);
if (neg_adj) {
diff = (period - TOD_PERIOD_MIN) >> PERIOD_FRAC_OFFSET;
adj_period = period - (diff << PERIOD_FRAC_OFFSET);
count = div_u64_rem(delta, diff, &rem);
rem_period = period - (rem << PERIOD_FRAC_OFFSET);
} else {
diff = (TOD_PERIOD_MAX - period) >> PERIOD_FRAC_OFFSET;
adj_period = period + (diff << PERIOD_FRAC_OFFSET);
count = div_u64_rem(delta, diff, &rem);
rem_period = period + (rem << PERIOD_FRAC_OFFSET);
}
ret = 0;
if (count > TOD_ADJUST_COUNT_MAX) {
ret = coarse_adjust_tod_clock(dt, delta);
} else {
/* Adjust the period by count cycles to adjust the time */
if (count)
ret = fine_adjust_tod_clock(dt, adj_period, count);
/* If there is a remainder, adjust the period for an additional cycle */
if (rem)
ret = fine_adjust_tod_clock(dt, rem_period, 1);
}
spin_unlock_irqrestore(&dt->tod_lock, flags);
return ret;
}
static int dfl_tod_get_timex(struct ptp_clock_info *ptp, struct timespec64 *ts,
struct ptp_system_timestamp *sts)
{
struct dfl_tod *dt = container_of(ptp, struct dfl_tod, ptp_clock_ops);
u32 seconds_msb, seconds_lsb, nanosec;
void __iomem *base = dt->tod_ctrl;
unsigned long flags;
u64 seconds;
spin_lock_irqsave(&dt->tod_lock, flags);
ptp_read_system_prets(sts);
nanosec = readl(base + TOD_NANOSEC);
seconds_lsb = readl(base + TOD_SECONDSL);
seconds_msb = readl(base + TOD_SECONDSH);
ptp_read_system_postts(sts);
spin_unlock_irqrestore(&dt->tod_lock, flags);
seconds = CAL_SECONDS(seconds_msb, seconds_lsb);
ts->tv_nsec = nanosec;
ts->tv_sec = seconds;
return 0;
}
static int dfl_tod_set_time(struct ptp_clock_info *ptp,
const struct timespec64 *ts)
{
struct dfl_tod *dt = container_of(ptp, struct dfl_tod, ptp_clock_ops);
u32 seconds_msb = FIELD_GET(SECONDS_MSB, ts->tv_sec);
u32 seconds_lsb = FIELD_GET(SECONDS_LSB, ts->tv_sec);
u32 nanosec = FIELD_GET(SECONDS_LSB, ts->tv_nsec);
void __iomem *base = dt->tod_ctrl;
unsigned long flags;
spin_lock_irqsave(&dt->tod_lock, flags);
writel(seconds_msb, base + TOD_SECONDSH);
writel(seconds_lsb, base + TOD_SECONDSL);
writel(nanosec, base + TOD_NANOSEC);
spin_unlock_irqrestore(&dt->tod_lock, flags);
return 0;
}
static struct ptp_clock_info dfl_tod_clock_ops = {
.owner = THIS_MODULE,
.name = "dfl_tod",
.max_adj = TOD_MAX_ADJ,
.adjfine = dfl_tod_adjust_fine,
.adjtime = dfl_tod_adjust_time,
.gettimex64 = dfl_tod_get_timex,
.settime64 = dfl_tod_set_time,
};
static int dfl_tod_probe(struct dfl_device *ddev)
{
struct device *dev = &ddev->dev;
struct dfl_tod *dt;
dt = devm_kzalloc(dev, sizeof(*dt), GFP_KERNEL);
if (!dt)
return -ENOMEM;
dt->tod_ctrl = devm_ioremap_resource(dev, &ddev->mmio_res);
if (IS_ERR(dt->tod_ctrl))
return PTR_ERR(dt->tod_ctrl);
dt->dev = dev;
spin_lock_init(&dt->tod_lock);
dev_set_drvdata(dev, dt);
dt->ptp_clock_ops = dfl_tod_clock_ops;
dt->ptp_clock = ptp_clock_register(&dt->ptp_clock_ops, dev);
if (IS_ERR(dt->ptp_clock))
return dev_err_probe(dt->dev, PTR_ERR(dt->ptp_clock),
"Unable to register PTP clock\n");
return 0;
}
static void dfl_tod_remove(struct dfl_device *ddev)
{
struct dfl_tod *dt = dev_get_drvdata(&ddev->dev);
ptp_clock_unregister(dt->ptp_clock);
}
static const struct dfl_device_id dfl_tod_ids[] = {
{ FME_ID, FME_FEATURE_ID_TOD },
{ }
};
MODULE_DEVICE_TABLE(dfl, dfl_tod_ids);
static struct dfl_driver dfl_tod_driver = {
.drv = {
.name = "dfl-tod",
},
.id_table = dfl_tod_ids,
.probe = dfl_tod_probe,
.remove = dfl_tod_remove,
};
module_dfl_driver(dfl_tod_driver);
MODULE_DESCRIPTION("FPGA DFL ToD driver");
MODULE_AUTHOR("Intel Corporation");
MODULE_LICENSE("GPL");
|
linux-master
|
drivers/ptp/ptp_dfl_tod.c
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright 2023 NXP
*
* Mock-up PTP Hardware Clock driver for virtual network devices
*
* Create a PTP clock which offers PTP time manipulation operations
* using a timecounter/cyclecounter on top of CLOCK_MONOTONIC_RAW.
*/
#include <linux/ptp_clock_kernel.h>
#include <linux/ptp_mock.h>
#include <linux/timecounter.h>
/* Clamp scaled_ppm between -2,097,152,000 and 2,097,152,000,
* and thus "adj" between -68,719,476 and 68,719,476
*/
#define MOCK_PHC_MAX_ADJ_PPB 32000000
/* Timestamps from ktime_get_raw() have 1 ns resolution, so the scale factor
* (MULT >> SHIFT) needs to be 1. Pick SHIFT as 31 bits, which translates
* MULT(freq 0) into 0x80000000.
*/
#define MOCK_PHC_CC_SHIFT 31
#define MOCK_PHC_CC_MULT (1 << MOCK_PHC_CC_SHIFT)
#define MOCK_PHC_FADJ_SHIFT 9
#define MOCK_PHC_FADJ_DENOMINATOR 15625ULL
/* The largest cycle_delta that timecounter_read_delta() can handle without a
* 64-bit overflow during the multiplication with cc->mult, given the max "adj"
* we permit, is ~8.3 seconds. Make sure readouts are more frequent than that.
*/
#define MOCK_PHC_REFRESH_INTERVAL (HZ * 5)
#define info_to_phc(d) container_of((d), struct mock_phc, info)
struct mock_phc {
struct ptp_clock_info info;
struct ptp_clock *clock;
struct timecounter tc;
struct cyclecounter cc;
spinlock_t lock;
};
static u64 mock_phc_cc_read(const struct cyclecounter *cc)
{
return ktime_get_raw_ns();
}
static int mock_phc_adjfine(struct ptp_clock_info *info, long scaled_ppm)
{
struct mock_phc *phc = info_to_phc(info);
s64 adj;
adj = (s64)scaled_ppm << MOCK_PHC_FADJ_SHIFT;
adj = div_s64(adj, MOCK_PHC_FADJ_DENOMINATOR);
spin_lock(&phc->lock);
timecounter_read(&phc->tc);
phc->cc.mult = MOCK_PHC_CC_MULT + adj;
spin_unlock(&phc->lock);
return 0;
}
static int mock_phc_adjtime(struct ptp_clock_info *info, s64 delta)
{
struct mock_phc *phc = info_to_phc(info);
spin_lock(&phc->lock);
timecounter_adjtime(&phc->tc, delta);
spin_unlock(&phc->lock);
return 0;
}
static int mock_phc_settime64(struct ptp_clock_info *info,
const struct timespec64 *ts)
{
struct mock_phc *phc = info_to_phc(info);
u64 ns = timespec64_to_ns(ts);
spin_lock(&phc->lock);
timecounter_init(&phc->tc, &phc->cc, ns);
spin_unlock(&phc->lock);
return 0;
}
static int mock_phc_gettime64(struct ptp_clock_info *info, struct timespec64 *ts)
{
struct mock_phc *phc = info_to_phc(info);
u64 ns;
spin_lock(&phc->lock);
ns = timecounter_read(&phc->tc);
spin_unlock(&phc->lock);
*ts = ns_to_timespec64(ns);
return 0;
}
static long mock_phc_refresh(struct ptp_clock_info *info)
{
struct timespec64 ts;
mock_phc_gettime64(info, &ts);
return MOCK_PHC_REFRESH_INTERVAL;
}
int mock_phc_index(struct mock_phc *phc)
{
return ptp_clock_index(phc->clock);
}
EXPORT_SYMBOL_GPL(mock_phc_index);
struct mock_phc *mock_phc_create(struct device *dev)
{
struct mock_phc *phc;
int err;
phc = kzalloc(sizeof(*phc), GFP_KERNEL);
if (!phc) {
err = -ENOMEM;
goto out;
}
phc->info = (struct ptp_clock_info) {
.owner = THIS_MODULE,
.name = "Mock-up PTP clock",
.max_adj = MOCK_PHC_MAX_ADJ_PPB,
.adjfine = mock_phc_adjfine,
.adjtime = mock_phc_adjtime,
.gettime64 = mock_phc_gettime64,
.settime64 = mock_phc_settime64,
.do_aux_work = mock_phc_refresh,
};
phc->cc = (struct cyclecounter) {
.read = mock_phc_cc_read,
.mask = CYCLECOUNTER_MASK(64),
.mult = MOCK_PHC_CC_MULT,
.shift = MOCK_PHC_CC_SHIFT,
};
spin_lock_init(&phc->lock);
timecounter_init(&phc->tc, &phc->cc, 0);
phc->clock = ptp_clock_register(&phc->info, dev);
if (IS_ERR(phc->clock)) {
err = PTR_ERR(phc->clock);
goto out_free_phc;
}
ptp_schedule_worker(phc->clock, MOCK_PHC_REFRESH_INTERVAL);
return phc;
out_free_phc:
kfree(phc);
out:
return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(mock_phc_create);
void mock_phc_destroy(struct mock_phc *phc)
{
ptp_clock_unregister(phc->clock);
kfree(phc);
}
EXPORT_SYMBOL_GPL(mock_phc_destroy);
MODULE_DESCRIPTION("Mock-up PTP Hardware Clock driver");
MODULE_LICENSE("GPL");
|
linux-master
|
drivers/ptp/ptp_mock.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Virtual PTP 1588 clock for use with KVM guests
* Copyright (C) 2019 ARM Ltd.
* All Rights Reserved
*/
#include <linux/arm-smccc.h>
#include <linux/ptp_kvm.h>
#include <asm/arch_timer.h>
#include <asm/hypervisor.h>
int kvm_arch_ptp_init(void)
{
int ret;
ret = kvm_arm_hyp_service_available(ARM_SMCCC_KVM_FUNC_PTP);
if (ret <= 0)
return -EOPNOTSUPP;
return 0;
}
void kvm_arch_ptp_exit(void)
{
}
int kvm_arch_ptp_get_clock(struct timespec64 *ts)
{
return kvm_arch_ptp_get_crosststamp(NULL, ts, NULL);
}
|
linux-master
|
drivers/ptp/ptp_kvm_arm.c
|
// SPDX-License-Identifier: GPL-2.0+
/*
* PTP hardware clock driver for the IDT ClockMatrix(TM) family of timing and
* synchronization devices.
*
* Copyright (C) 2019 Integrated Device Technology, Inc., a Renesas Company.
*/
#include <linux/firmware.h>
#include <linux/platform_device.h>
#include <linux/module.h>
#include <linux/ptp_clock_kernel.h>
#include <linux/delay.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/timekeeping.h>
#include <linux/string.h>
#include <linux/of.h>
#include <linux/mfd/rsmu.h>
#include <linux/mfd/idt8a340_reg.h>
#include <asm/unaligned.h>
#include "ptp_private.h"
#include "ptp_clockmatrix.h"
MODULE_DESCRIPTION("Driver for IDT ClockMatrix(TM) family");
MODULE_AUTHOR("Richard Cochran <[email protected]>");
MODULE_AUTHOR("IDT support-1588 <[email protected]>");
MODULE_VERSION("1.0");
MODULE_LICENSE("GPL");
/*
* The name of the firmware file to be loaded
* over-rides any automatic selection
*/
static char *firmware;
module_param(firmware, charp, 0);
#define SETTIME_CORRECTION (0)
#define EXTTS_PERIOD_MS (95)
static int _idtcm_adjfine(struct idtcm_channel *channel, long scaled_ppm);
static inline int idtcm_read(struct idtcm *idtcm,
u16 module,
u16 regaddr,
u8 *buf,
u16 count)
{
return regmap_bulk_read(idtcm->regmap, module + regaddr, buf, count);
}
static inline int idtcm_write(struct idtcm *idtcm,
u16 module,
u16 regaddr,
u8 *buf,
u16 count)
{
return regmap_bulk_write(idtcm->regmap, module + regaddr, buf, count);
}
static int contains_full_configuration(struct idtcm *idtcm,
const struct firmware *fw)
{
struct idtcm_fwrc *rec = (struct idtcm_fwrc *)fw->data;
u16 scratch = IDTCM_FW_REG(idtcm->fw_ver, V520, SCRATCH);
s32 full_count;
s32 count = 0;
u16 regaddr;
u8 loaddr;
s32 len;
/* 4 bytes skipped every 0x80 */
full_count = (scratch - GPIO_USER_CONTROL) -
((scratch >> 7) - (GPIO_USER_CONTROL >> 7)) * 4;
/* If the firmware contains 'full configuration' SM_RESET can be used
* to ensure proper configuration.
*
* Full configuration is defined as the number of programmable
* bytes within the configuration range minus page offset addr range.
*/
for (len = fw->size; len > 0; len -= sizeof(*rec)) {
regaddr = rec->hiaddr << 8;
regaddr |= rec->loaddr;
loaddr = rec->loaddr;
rec++;
/* Top (status registers) and bottom are read-only */
if (regaddr < GPIO_USER_CONTROL || regaddr >= scratch)
continue;
/* Page size 128, last 4 bytes of page skipped */
if ((loaddr > 0x7b && loaddr <= 0x7f) || loaddr > 0xfb)
continue;
count++;
}
return (count >= full_count);
}
static int char_array_to_timespec(u8 *buf,
u8 count,
struct timespec64 *ts)
{
u8 i;
u64 nsec;
time64_t sec;
if (count < TOD_BYTE_COUNT)
return 1;
/* Sub-nanoseconds are in buf[0]. */
nsec = buf[4];
for (i = 0; i < 3; i++) {
nsec <<= 8;
nsec |= buf[3 - i];
}
sec = buf[10];
for (i = 0; i < 5; i++) {
sec <<= 8;
sec |= buf[9 - i];
}
ts->tv_sec = sec;
ts->tv_nsec = nsec;
return 0;
}
static int timespec_to_char_array(struct timespec64 const *ts,
u8 *buf,
u8 count)
{
u8 i;
s32 nsec;
time64_t sec;
if (count < TOD_BYTE_COUNT)
return 1;
nsec = ts->tv_nsec;
sec = ts->tv_sec;
/* Sub-nanoseconds are in buf[0]. */
buf[0] = 0;
for (i = 1; i < 5; i++) {
buf[i] = nsec & 0xff;
nsec >>= 8;
}
for (i = 5; i < TOD_BYTE_COUNT; i++) {
buf[i] = sec & 0xff;
sec >>= 8;
}
return 0;
}
static int idtcm_strverscmp(const char *version1, const char *version2)
{
u8 ver1[3], ver2[3];
int i;
if (sscanf(version1, "%hhu.%hhu.%hhu",
&ver1[0], &ver1[1], &ver1[2]) != 3)
return -1;
if (sscanf(version2, "%hhu.%hhu.%hhu",
&ver2[0], &ver2[1], &ver2[2]) != 3)
return -1;
for (i = 0; i < 3; i++) {
if (ver1[i] > ver2[i])
return 1;
if (ver1[i] < ver2[i])
return -1;
}
return 0;
}
static enum fw_version idtcm_fw_version(const char *version)
{
enum fw_version ver = V_DEFAULT;
if (idtcm_strverscmp(version, "4.8.7") >= 0)
ver = V487;
if (idtcm_strverscmp(version, "5.2.0") >= 0)
ver = V520;
return ver;
}
static int clear_boot_status(struct idtcm *idtcm)
{
u8 buf[4] = {0};
return idtcm_write(idtcm, GENERAL_STATUS, BOOT_STATUS, buf, sizeof(buf));
}
static int read_boot_status(struct idtcm *idtcm, u32 *status)
{
int err;
u8 buf[4] = {0};
err = idtcm_read(idtcm, GENERAL_STATUS, BOOT_STATUS, buf, sizeof(buf));
*status = (buf[3] << 24) | (buf[2] << 16) | (buf[1] << 8) | buf[0];
return err;
}
static int wait_for_boot_status_ready(struct idtcm *idtcm)
{
u32 status = 0;
u8 i = 30; /* 30 * 100ms = 3s */
int err;
do {
err = read_boot_status(idtcm, &status);
if (err)
return err;
if (status == 0xA0)
return 0;
msleep(100);
i--;
} while (i);
dev_warn(idtcm->dev, "%s timed out", __func__);
return -EBUSY;
}
static int arm_tod_read_trig_sel_refclk(struct idtcm_channel *channel, u8 ref)
{
struct idtcm *idtcm = channel->idtcm;
u16 tod_read_cmd = IDTCM_FW_REG(idtcm->fw_ver, V520, TOD_READ_SECONDARY_CMD);
u8 val = 0;
int err;
val &= ~(WR_REF_INDEX_MASK << WR_REF_INDEX_SHIFT);
val |= (ref << WR_REF_INDEX_SHIFT);
err = idtcm_write(idtcm, channel->tod_read_secondary,
TOD_READ_SECONDARY_SEL_CFG_0, &val, sizeof(val));
if (err)
return err;
val = 0 | (SCSR_TOD_READ_TRIG_SEL_REFCLK << TOD_READ_TRIGGER_SHIFT);
err = idtcm_write(idtcm, channel->tod_read_secondary, tod_read_cmd,
&val, sizeof(val));
if (err)
dev_err(idtcm->dev, "%s: err = %d", __func__, err);
return err;
}
static bool is_single_shot(u8 mask)
{
/* Treat single bit ToD masks as continuous trigger */
return !(mask <= 8 && is_power_of_2(mask));
}
static int idtcm_extts_enable(struct idtcm_channel *channel,
struct ptp_clock_request *rq, int on)
{
u8 index = rq->extts.index;
struct idtcm *idtcm;
u8 mask = 1 << index;
int err = 0;
u8 old_mask;
int ref;
idtcm = channel->idtcm;
old_mask = idtcm->extts_mask;
/* Reject requests with unsupported flags */
if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
PTP_RISING_EDGE |
PTP_FALLING_EDGE |
PTP_STRICT_FLAGS))
return -EOPNOTSUPP;
/* Reject requests to enable time stamping on falling edge */
if ((rq->extts.flags & PTP_ENABLE_FEATURE) &&
(rq->extts.flags & PTP_FALLING_EDGE))
return -EOPNOTSUPP;
if (index >= MAX_TOD)
return -EINVAL;
if (on) {
/* Support triggering more than one TOD_0/1/2/3 by same pin */
/* Use the pin configured for the channel */
ref = ptp_find_pin(channel->ptp_clock, PTP_PF_EXTTS, channel->tod);
if (ref < 0) {
dev_err(idtcm->dev, "%s: No valid pin found for TOD%d!\n",
__func__, channel->tod);
return -EBUSY;
}
err = arm_tod_read_trig_sel_refclk(&idtcm->channel[index], ref);
if (err == 0) {
idtcm->extts_mask |= mask;
idtcm->event_channel[index] = channel;
idtcm->channel[index].refn = ref;
idtcm->extts_single_shot = is_single_shot(idtcm->extts_mask);
if (old_mask)
return 0;
schedule_delayed_work(&idtcm->extts_work,
msecs_to_jiffies(EXTTS_PERIOD_MS));
}
} else {
idtcm->extts_mask &= ~mask;
idtcm->extts_single_shot = is_single_shot(idtcm->extts_mask);
if (idtcm->extts_mask == 0)
cancel_delayed_work(&idtcm->extts_work);
}
return err;
}
static int read_sys_apll_status(struct idtcm *idtcm, u8 *status)
{
return idtcm_read(idtcm, STATUS, DPLL_SYS_APLL_STATUS, status,
sizeof(u8));
}
static int read_sys_dpll_status(struct idtcm *idtcm, u8 *status)
{
return idtcm_read(idtcm, STATUS, DPLL_SYS_STATUS, status, sizeof(u8));
}
static int wait_for_sys_apll_dpll_lock(struct idtcm *idtcm)
{
unsigned long timeout = jiffies + msecs_to_jiffies(LOCK_TIMEOUT_MS);
u8 apll = 0;
u8 dpll = 0;
int err;
do {
err = read_sys_apll_status(idtcm, &apll);
if (err)
return err;
err = read_sys_dpll_status(idtcm, &dpll);
if (err)
return err;
apll &= SYS_APLL_LOSS_LOCK_LIVE_MASK;
dpll &= DPLL_SYS_STATE_MASK;
if (apll == SYS_APLL_LOSS_LOCK_LIVE_LOCKED &&
dpll == DPLL_STATE_LOCKED) {
return 0;
} else if (dpll == DPLL_STATE_FREERUN ||
dpll == DPLL_STATE_HOLDOVER ||
dpll == DPLL_STATE_OPEN_LOOP) {
dev_warn(idtcm->dev,
"No wait state: DPLL_SYS_STATE %d", dpll);
return -EPERM;
}
msleep(LOCK_POLL_INTERVAL_MS);
} while (time_is_after_jiffies(timeout));
dev_warn(idtcm->dev,
"%d ms lock timeout: SYS APLL Loss Lock %d SYS DPLL state %d",
LOCK_TIMEOUT_MS, apll, dpll);
return -ETIME;
}
static void wait_for_chip_ready(struct idtcm *idtcm)
{
if (wait_for_boot_status_ready(idtcm))
dev_warn(idtcm->dev, "BOOT_STATUS != 0xA0");
if (wait_for_sys_apll_dpll_lock(idtcm))
dev_warn(idtcm->dev,
"Continuing while SYS APLL/DPLL is not locked");
}
static int _idtcm_gettime_triggered(struct idtcm_channel *channel,
struct timespec64 *ts)
{
struct idtcm *idtcm = channel->idtcm;
u16 tod_read_cmd = IDTCM_FW_REG(idtcm->fw_ver, V520, TOD_READ_SECONDARY_CMD);
u8 buf[TOD_BYTE_COUNT];
u8 trigger;
int err;
err = idtcm_read(idtcm, channel->tod_read_secondary,
tod_read_cmd, &trigger, sizeof(trigger));
if (err)
return err;
if (trigger & TOD_READ_TRIGGER_MASK)
return -EBUSY;
err = idtcm_read(idtcm, channel->tod_read_secondary,
TOD_READ_SECONDARY_BASE, buf, sizeof(buf));
if (err)
return err;
return char_array_to_timespec(buf, sizeof(buf), ts);
}
static int _idtcm_gettime(struct idtcm_channel *channel,
struct timespec64 *ts, u8 timeout)
{
struct idtcm *idtcm = channel->idtcm;
u16 tod_read_cmd = IDTCM_FW_REG(idtcm->fw_ver, V520, TOD_READ_PRIMARY_CMD);
u8 buf[TOD_BYTE_COUNT];
u8 trigger;
int err;
/* wait trigger to be 0 */
do {
if (timeout-- == 0)
return -EIO;
if (idtcm->calculate_overhead_flag)
idtcm->start_time = ktime_get_raw();
err = idtcm_read(idtcm, channel->tod_read_primary,
tod_read_cmd, &trigger,
sizeof(trigger));
if (err)
return err;
} while (trigger & TOD_READ_TRIGGER_MASK);
err = idtcm_read(idtcm, channel->tod_read_primary,
TOD_READ_PRIMARY_BASE, buf, sizeof(buf));
if (err)
return err;
err = char_array_to_timespec(buf, sizeof(buf), ts);
return err;
}
static int idtcm_extts_check_channel(struct idtcm *idtcm, u8 todn)
{
struct idtcm_channel *ptp_channel, *extts_channel;
struct ptp_clock_event event;
struct timespec64 ts;
u32 dco_delay = 0;
int err;
extts_channel = &idtcm->channel[todn];
ptp_channel = idtcm->event_channel[todn];
if (extts_channel == ptp_channel)
dco_delay = ptp_channel->dco_delay;
err = _idtcm_gettime_triggered(extts_channel, &ts);
if (err)
return err;
/* Triggered - save timestamp */
event.type = PTP_CLOCK_EXTTS;
event.index = todn;
event.timestamp = timespec64_to_ns(&ts) - dco_delay;
ptp_clock_event(ptp_channel->ptp_clock, &event);
return err;
}
static int _idtcm_gettime_immediate(struct idtcm_channel *channel,
struct timespec64 *ts)
{
struct idtcm *idtcm = channel->idtcm;
u16 tod_read_cmd = IDTCM_FW_REG(idtcm->fw_ver, V520, TOD_READ_PRIMARY_CMD);
u8 val = (SCSR_TOD_READ_TRIG_SEL_IMMEDIATE << TOD_READ_TRIGGER_SHIFT);
int err;
err = idtcm_write(idtcm, channel->tod_read_primary,
tod_read_cmd, &val, sizeof(val));
if (err)
return err;
return _idtcm_gettime(channel, ts, 10);
}
static int _sync_pll_output(struct idtcm *idtcm,
u8 pll,
u8 sync_src,
u8 qn,
u8 qn_plus_1)
{
int err;
u8 val;
u16 sync_ctrl0;
u16 sync_ctrl1;
u8 temp;
if (qn == 0 && qn_plus_1 == 0)
return 0;
switch (pll) {
case 0:
sync_ctrl0 = HW_Q0_Q1_CH_SYNC_CTRL_0;
sync_ctrl1 = HW_Q0_Q1_CH_SYNC_CTRL_1;
break;
case 1:
sync_ctrl0 = HW_Q2_Q3_CH_SYNC_CTRL_0;
sync_ctrl1 = HW_Q2_Q3_CH_SYNC_CTRL_1;
break;
case 2:
sync_ctrl0 = HW_Q4_Q5_CH_SYNC_CTRL_0;
sync_ctrl1 = HW_Q4_Q5_CH_SYNC_CTRL_1;
break;
case 3:
sync_ctrl0 = HW_Q6_Q7_CH_SYNC_CTRL_0;
sync_ctrl1 = HW_Q6_Q7_CH_SYNC_CTRL_1;
break;
case 4:
sync_ctrl0 = HW_Q8_CH_SYNC_CTRL_0;
sync_ctrl1 = HW_Q8_CH_SYNC_CTRL_1;
break;
case 5:
sync_ctrl0 = HW_Q9_CH_SYNC_CTRL_0;
sync_ctrl1 = HW_Q9_CH_SYNC_CTRL_1;
break;
case 6:
sync_ctrl0 = HW_Q10_CH_SYNC_CTRL_0;
sync_ctrl1 = HW_Q10_CH_SYNC_CTRL_1;
break;
case 7:
sync_ctrl0 = HW_Q11_CH_SYNC_CTRL_0;
sync_ctrl1 = HW_Q11_CH_SYNC_CTRL_1;
break;
default:
return -EINVAL;
}
val = SYNCTRL1_MASTER_SYNC_RST;
/* Place master sync in reset */
err = idtcm_write(idtcm, 0, sync_ctrl1, &val, sizeof(val));
if (err)
return err;
err = idtcm_write(idtcm, 0, sync_ctrl0, &sync_src, sizeof(sync_src));
if (err)
return err;
/* Set sync trigger mask */
val |= SYNCTRL1_FBDIV_FRAME_SYNC_TRIG | SYNCTRL1_FBDIV_SYNC_TRIG;
if (qn)
val |= SYNCTRL1_Q0_DIV_SYNC_TRIG;
if (qn_plus_1)
val |= SYNCTRL1_Q1_DIV_SYNC_TRIG;
err = idtcm_write(idtcm, 0, sync_ctrl1, &val, sizeof(val));
if (err)
return err;
/* PLL5 can have OUT8 as second additional output. */
if (pll == 5 && qn_plus_1 != 0) {
err = idtcm_read(idtcm, 0, HW_Q8_CTRL_SPARE,
&temp, sizeof(temp));
if (err)
return err;
temp &= ~(Q9_TO_Q8_SYNC_TRIG);
err = idtcm_write(idtcm, 0, HW_Q8_CTRL_SPARE,
&temp, sizeof(temp));
if (err)
return err;
temp |= Q9_TO_Q8_SYNC_TRIG;
err = idtcm_write(idtcm, 0, HW_Q8_CTRL_SPARE,
&temp, sizeof(temp));
if (err)
return err;
}
/* PLL6 can have OUT11 as second additional output. */
if (pll == 6 && qn_plus_1 != 0) {
err = idtcm_read(idtcm, 0, HW_Q11_CTRL_SPARE,
&temp, sizeof(temp));
if (err)
return err;
temp &= ~(Q10_TO_Q11_SYNC_TRIG);
err = idtcm_write(idtcm, 0, HW_Q11_CTRL_SPARE,
&temp, sizeof(temp));
if (err)
return err;
temp |= Q10_TO_Q11_SYNC_TRIG;
err = idtcm_write(idtcm, 0, HW_Q11_CTRL_SPARE,
&temp, sizeof(temp));
if (err)
return err;
}
/* Place master sync out of reset */
val &= ~(SYNCTRL1_MASTER_SYNC_RST);
err = idtcm_write(idtcm, 0, sync_ctrl1, &val, sizeof(val));
return err;
}
static int idtcm_sync_pps_output(struct idtcm_channel *channel)
{
struct idtcm *idtcm = channel->idtcm;
u8 pll;
u8 qn;
u8 qn_plus_1;
int err = 0;
u8 out8_mux = 0;
u8 out11_mux = 0;
u8 temp;
u16 output_mask = channel->output_mask;
err = idtcm_read(idtcm, 0, HW_Q8_CTRL_SPARE,
&temp, sizeof(temp));
if (err)
return err;
if ((temp & Q9_TO_Q8_FANOUT_AND_CLOCK_SYNC_ENABLE_MASK) ==
Q9_TO_Q8_FANOUT_AND_CLOCK_SYNC_ENABLE_MASK)
out8_mux = 1;
err = idtcm_read(idtcm, 0, HW_Q11_CTRL_SPARE,
&temp, sizeof(temp));
if (err)
return err;
if ((temp & Q10_TO_Q11_FANOUT_AND_CLOCK_SYNC_ENABLE_MASK) ==
Q10_TO_Q11_FANOUT_AND_CLOCK_SYNC_ENABLE_MASK)
out11_mux = 1;
for (pll = 0; pll < 8; pll++) {
qn = 0;
qn_plus_1 = 0;
if (pll < 4) {
/* First 4 pll has 2 outputs */
qn = output_mask & 0x1;
output_mask = output_mask >> 1;
qn_plus_1 = output_mask & 0x1;
output_mask = output_mask >> 1;
} else if (pll == 4) {
if (out8_mux == 0) {
qn = output_mask & 0x1;
output_mask = output_mask >> 1;
}
} else if (pll == 5) {
if (out8_mux) {
qn_plus_1 = output_mask & 0x1;
output_mask = output_mask >> 1;
}
qn = output_mask & 0x1;
output_mask = output_mask >> 1;
} else if (pll == 6) {
qn = output_mask & 0x1;
output_mask = output_mask >> 1;
if (out11_mux) {
qn_plus_1 = output_mask & 0x1;
output_mask = output_mask >> 1;
}
} else if (pll == 7) {
if (out11_mux == 0) {
qn = output_mask & 0x1;
output_mask = output_mask >> 1;
}
}
if (qn != 0 || qn_plus_1 != 0)
err = _sync_pll_output(idtcm, pll, channel->sync_src,
qn, qn_plus_1);
if (err)
return err;
}
return err;
}
static int _idtcm_set_dpll_hw_tod(struct idtcm_channel *channel,
struct timespec64 const *ts,
enum hw_tod_write_trig_sel wr_trig)
{
struct idtcm *idtcm = channel->idtcm;
u8 buf[TOD_BYTE_COUNT];
u8 cmd;
int err;
struct timespec64 local_ts = *ts;
s64 total_overhead_ns;
/* Configure HW TOD write trigger. */
err = idtcm_read(idtcm, channel->hw_dpll_n, HW_DPLL_TOD_CTRL_1,
&cmd, sizeof(cmd));
if (err)
return err;
cmd &= ~(0x0f);
cmd |= wr_trig | 0x08;
err = idtcm_write(idtcm, channel->hw_dpll_n, HW_DPLL_TOD_CTRL_1,
&cmd, sizeof(cmd));
if (err)
return err;
if (wr_trig != HW_TOD_WR_TRIG_SEL_MSB) {
err = timespec_to_char_array(&local_ts, buf, sizeof(buf));
if (err)
return err;
err = idtcm_write(idtcm, channel->hw_dpll_n,
HW_DPLL_TOD_OVR__0, buf, sizeof(buf));
if (err)
return err;
}
/* ARM HW TOD write trigger. */
cmd &= ~(0x08);
err = idtcm_write(idtcm, channel->hw_dpll_n, HW_DPLL_TOD_CTRL_1,
&cmd, sizeof(cmd));
if (wr_trig == HW_TOD_WR_TRIG_SEL_MSB) {
if (idtcm->calculate_overhead_flag) {
/* Assumption: I2C @ 400KHz */
ktime_t diff = ktime_sub(ktime_get_raw(),
idtcm->start_time);
total_overhead_ns = ktime_to_ns(diff)
+ idtcm->tod_write_overhead_ns
+ SETTIME_CORRECTION;
timespec64_add_ns(&local_ts, total_overhead_ns);
idtcm->calculate_overhead_flag = 0;
}
err = timespec_to_char_array(&local_ts, buf, sizeof(buf));
if (err)
return err;
err = idtcm_write(idtcm, channel->hw_dpll_n,
HW_DPLL_TOD_OVR__0, buf, sizeof(buf));
}
return err;
}
static int _idtcm_set_dpll_scsr_tod(struct idtcm_channel *channel,
struct timespec64 const *ts,
enum scsr_tod_write_trig_sel wr_trig,
enum scsr_tod_write_type_sel wr_type)
{
struct idtcm *idtcm = channel->idtcm;
unsigned char buf[TOD_BYTE_COUNT], cmd;
struct timespec64 local_ts = *ts;
int err, count = 0;
timespec64_add_ns(&local_ts, SETTIME_CORRECTION);
err = timespec_to_char_array(&local_ts, buf, sizeof(buf));
if (err)
return err;
err = idtcm_write(idtcm, channel->tod_write, TOD_WRITE,
buf, sizeof(buf));
if (err)
return err;
/* Trigger the write operation. */
err = idtcm_read(idtcm, channel->tod_write, TOD_WRITE_CMD,
&cmd, sizeof(cmd));
if (err)
return err;
cmd &= ~(TOD_WRITE_SELECTION_MASK << TOD_WRITE_SELECTION_SHIFT);
cmd &= ~(TOD_WRITE_TYPE_MASK << TOD_WRITE_TYPE_SHIFT);
cmd |= (wr_trig << TOD_WRITE_SELECTION_SHIFT);
cmd |= (wr_type << TOD_WRITE_TYPE_SHIFT);
err = idtcm_write(idtcm, channel->tod_write, TOD_WRITE_CMD,
&cmd, sizeof(cmd));
if (err)
return err;
/* Wait for the operation to complete. */
while (1) {
/* pps trigger takes up to 1 sec to complete */
if (wr_trig == SCSR_TOD_WR_TRIG_SEL_TODPPS)
msleep(50);
err = idtcm_read(idtcm, channel->tod_write, TOD_WRITE_CMD,
&cmd, sizeof(cmd));
if (err)
return err;
if ((cmd & TOD_WRITE_SELECTION_MASK) == 0)
break;
if (++count > 20) {
dev_err(idtcm->dev,
"Timed out waiting for the write counter");
return -EIO;
}
}
return 0;
}
static int get_output_base_addr(enum fw_version ver, u8 outn)
{
int base;
switch (outn) {
case 0:
base = IDTCM_FW_REG(ver, V520, OUTPUT_0);
break;
case 1:
base = IDTCM_FW_REG(ver, V520, OUTPUT_1);
break;
case 2:
base = IDTCM_FW_REG(ver, V520, OUTPUT_2);
break;
case 3:
base = IDTCM_FW_REG(ver, V520, OUTPUT_3);
break;
case 4:
base = IDTCM_FW_REG(ver, V520, OUTPUT_4);
break;
case 5:
base = IDTCM_FW_REG(ver, V520, OUTPUT_5);
break;
case 6:
base = IDTCM_FW_REG(ver, V520, OUTPUT_6);
break;
case 7:
base = IDTCM_FW_REG(ver, V520, OUTPUT_7);
break;
case 8:
base = IDTCM_FW_REG(ver, V520, OUTPUT_8);
break;
case 9:
base = IDTCM_FW_REG(ver, V520, OUTPUT_9);
break;
case 10:
base = IDTCM_FW_REG(ver, V520, OUTPUT_10);
break;
case 11:
base = IDTCM_FW_REG(ver, V520, OUTPUT_11);
break;
default:
base = -EINVAL;
}
return base;
}
static int _idtcm_settime_deprecated(struct idtcm_channel *channel,
struct timespec64 const *ts)
{
struct idtcm *idtcm = channel->idtcm;
int err;
err = _idtcm_set_dpll_hw_tod(channel, ts, HW_TOD_WR_TRIG_SEL_MSB);
if (err) {
dev_err(idtcm->dev,
"%s: Set HW ToD failed", __func__);
return err;
}
return idtcm_sync_pps_output(channel);
}
static int _idtcm_settime(struct idtcm_channel *channel,
struct timespec64 const *ts,
enum scsr_tod_write_type_sel wr_type)
{
return _idtcm_set_dpll_scsr_tod(channel, ts,
SCSR_TOD_WR_TRIG_SEL_IMMEDIATE,
wr_type);
}
static int idtcm_set_phase_pull_in_offset(struct idtcm_channel *channel,
s32 offset_ns)
{
int err;
int i;
struct idtcm *idtcm = channel->idtcm;
u8 buf[4];
for (i = 0; i < 4; i++) {
buf[i] = 0xff & (offset_ns);
offset_ns >>= 8;
}
err = idtcm_write(idtcm, channel->dpll_phase_pull_in, PULL_IN_OFFSET,
buf, sizeof(buf));
return err;
}
static int idtcm_set_phase_pull_in_slope_limit(struct idtcm_channel *channel,
u32 max_ffo_ppb)
{
int err;
u8 i;
struct idtcm *idtcm = channel->idtcm;
u8 buf[3];
if (max_ffo_ppb & 0xff000000)
max_ffo_ppb = 0;
for (i = 0; i < 3; i++) {
buf[i] = 0xff & (max_ffo_ppb);
max_ffo_ppb >>= 8;
}
err = idtcm_write(idtcm, channel->dpll_phase_pull_in,
PULL_IN_SLOPE_LIMIT, buf, sizeof(buf));
return err;
}
static int idtcm_start_phase_pull_in(struct idtcm_channel *channel)
{
int err;
struct idtcm *idtcm = channel->idtcm;
u8 buf;
err = idtcm_read(idtcm, channel->dpll_phase_pull_in, PULL_IN_CTRL,
&buf, sizeof(buf));
if (err)
return err;
if (buf == 0) {
buf = 0x01;
err = idtcm_write(idtcm, channel->dpll_phase_pull_in,
PULL_IN_CTRL, &buf, sizeof(buf));
} else {
err = -EBUSY;
}
return err;
}
static int do_phase_pull_in_fw(struct idtcm_channel *channel,
s32 offset_ns,
u32 max_ffo_ppb)
{
int err;
err = idtcm_set_phase_pull_in_offset(channel, -offset_ns);
if (err)
return err;
err = idtcm_set_phase_pull_in_slope_limit(channel, max_ffo_ppb);
if (err)
return err;
err = idtcm_start_phase_pull_in(channel);
return err;
}
static int set_tod_write_overhead(struct idtcm_channel *channel)
{
struct idtcm *idtcm = channel->idtcm;
s64 current_ns = 0;
s64 lowest_ns = 0;
int err;
u8 i;
ktime_t start;
ktime_t stop;
ktime_t diff;
char buf[TOD_BYTE_COUNT] = {0};
/* Set page offset */
idtcm_write(idtcm, channel->hw_dpll_n, HW_DPLL_TOD_OVR__0,
buf, sizeof(buf));
for (i = 0; i < TOD_WRITE_OVERHEAD_COUNT_MAX; i++) {
start = ktime_get_raw();
err = idtcm_write(idtcm, channel->hw_dpll_n,
HW_DPLL_TOD_OVR__0, buf, sizeof(buf));
if (err)
return err;
stop = ktime_get_raw();
diff = ktime_sub(stop, start);
current_ns = ktime_to_ns(diff);
if (i == 0) {
lowest_ns = current_ns;
} else {
if (current_ns < lowest_ns)
lowest_ns = current_ns;
}
}
idtcm->tod_write_overhead_ns = lowest_ns;
return err;
}
static int _idtcm_adjtime_deprecated(struct idtcm_channel *channel, s64 delta)
{
int err;
struct idtcm *idtcm = channel->idtcm;
struct timespec64 ts;
s64 now;
if (abs(delta) < PHASE_PULL_IN_THRESHOLD_NS_DEPRECATED) {
err = channel->do_phase_pull_in(channel, delta, 0);
} else {
idtcm->calculate_overhead_flag = 1;
err = set_tod_write_overhead(channel);
if (err)
return err;
err = _idtcm_gettime_immediate(channel, &ts);
if (err)
return err;
now = timespec64_to_ns(&ts);
now += delta;
ts = ns_to_timespec64(now);
err = _idtcm_settime_deprecated(channel, &ts);
}
return err;
}
static int idtcm_state_machine_reset(struct idtcm *idtcm)
{
u8 byte = SM_RESET_CMD;
u32 status = 0;
int err;
u8 i;
clear_boot_status(idtcm);
err = idtcm_write(idtcm, RESET_CTRL,
IDTCM_FW_REG(idtcm->fw_ver, V520, SM_RESET),
&byte, sizeof(byte));
if (!err) {
for (i = 0; i < 30; i++) {
msleep_interruptible(100);
read_boot_status(idtcm, &status);
if (status == 0xA0) {
dev_dbg(idtcm->dev,
"SM_RESET completed in %d ms", i * 100);
break;
}
}
if (!status)
dev_err(idtcm->dev,
"Timed out waiting for CM_RESET to complete");
}
return err;
}
static int idtcm_read_hw_rev_id(struct idtcm *idtcm, u8 *hw_rev_id)
{
return idtcm_read(idtcm, HW_REVISION, REV_ID, hw_rev_id, sizeof(u8));
}
static int idtcm_read_product_id(struct idtcm *idtcm, u16 *product_id)
{
int err;
u8 buf[2] = {0};
err = idtcm_read(idtcm, GENERAL_STATUS, PRODUCT_ID, buf, sizeof(buf));
*product_id = (buf[1] << 8) | buf[0];
return err;
}
static int idtcm_read_major_release(struct idtcm *idtcm, u8 *major)
{
int err;
u8 buf = 0;
err = idtcm_read(idtcm, GENERAL_STATUS, MAJ_REL, &buf, sizeof(buf));
*major = buf >> 1;
return err;
}
static int idtcm_read_minor_release(struct idtcm *idtcm, u8 *minor)
{
return idtcm_read(idtcm, GENERAL_STATUS, MIN_REL, minor, sizeof(u8));
}
static int idtcm_read_hotfix_release(struct idtcm *idtcm, u8 *hotfix)
{
return idtcm_read(idtcm,
GENERAL_STATUS,
HOTFIX_REL,
hotfix,
sizeof(u8));
}
static int idtcm_read_otp_scsr_config_select(struct idtcm *idtcm,
u8 *config_select)
{
return idtcm_read(idtcm, GENERAL_STATUS, OTP_SCSR_CONFIG_SELECT,
config_select, sizeof(u8));
}
static int set_pll_output_mask(struct idtcm *idtcm, u16 addr, u8 val)
{
int err = 0;
switch (addr) {
case TOD0_OUT_ALIGN_MASK_ADDR:
SET_U16_LSB(idtcm->channel[0].output_mask, val);
break;
case TOD0_OUT_ALIGN_MASK_ADDR + 1:
SET_U16_MSB(idtcm->channel[0].output_mask, val);
break;
case TOD1_OUT_ALIGN_MASK_ADDR:
SET_U16_LSB(idtcm->channel[1].output_mask, val);
break;
case TOD1_OUT_ALIGN_MASK_ADDR + 1:
SET_U16_MSB(idtcm->channel[1].output_mask, val);
break;
case TOD2_OUT_ALIGN_MASK_ADDR:
SET_U16_LSB(idtcm->channel[2].output_mask, val);
break;
case TOD2_OUT_ALIGN_MASK_ADDR + 1:
SET_U16_MSB(idtcm->channel[2].output_mask, val);
break;
case TOD3_OUT_ALIGN_MASK_ADDR:
SET_U16_LSB(idtcm->channel[3].output_mask, val);
break;
case TOD3_OUT_ALIGN_MASK_ADDR + 1:
SET_U16_MSB(idtcm->channel[3].output_mask, val);
break;
default:
err = -EFAULT; /* Bad address */;
break;
}
return err;
}
static int set_tod_ptp_pll(struct idtcm *idtcm, u8 index, u8 pll)
{
if (index >= MAX_TOD) {
dev_err(idtcm->dev, "ToD%d not supported", index);
return -EINVAL;
}
if (pll >= MAX_PLL) {
dev_err(idtcm->dev, "Pll%d not supported", pll);
return -EINVAL;
}
idtcm->channel[index].pll = pll;
return 0;
}
static int check_and_set_masks(struct idtcm *idtcm,
u16 regaddr,
u8 val)
{
int err = 0;
switch (regaddr) {
case TOD_MASK_ADDR:
if ((val & 0xf0) || !(val & 0x0f)) {
dev_err(idtcm->dev, "Invalid TOD mask 0x%02x", val);
err = -EINVAL;
} else {
idtcm->tod_mask = val;
}
break;
case TOD0_PTP_PLL_ADDR:
err = set_tod_ptp_pll(idtcm, 0, val);
break;
case TOD1_PTP_PLL_ADDR:
err = set_tod_ptp_pll(idtcm, 1, val);
break;
case TOD2_PTP_PLL_ADDR:
err = set_tod_ptp_pll(idtcm, 2, val);
break;
case TOD3_PTP_PLL_ADDR:
err = set_tod_ptp_pll(idtcm, 3, val);
break;
default:
err = set_pll_output_mask(idtcm, regaddr, val);
break;
}
return err;
}
static void display_pll_and_masks(struct idtcm *idtcm)
{
u8 i;
u8 mask;
dev_dbg(idtcm->dev, "tod_mask = 0x%02x", idtcm->tod_mask);
for (i = 0; i < MAX_TOD; i++) {
mask = 1 << i;
if (mask & idtcm->tod_mask)
dev_dbg(idtcm->dev,
"TOD%d pll = %d output_mask = 0x%04x",
i, idtcm->channel[i].pll,
idtcm->channel[i].output_mask);
}
}
static int idtcm_load_firmware(struct idtcm *idtcm,
struct device *dev)
{
u16 scratch = IDTCM_FW_REG(idtcm->fw_ver, V520, SCRATCH);
char fname[128] = FW_FILENAME;
const struct firmware *fw;
struct idtcm_fwrc *rec;
u32 regaddr;
int err;
s32 len;
u8 val;
u8 loaddr;
if (firmware) /* module parameter */
snprintf(fname, sizeof(fname), "%s", firmware);
dev_info(idtcm->dev, "requesting firmware '%s'", fname);
err = request_firmware(&fw, fname, dev);
if (err) {
dev_err(idtcm->dev,
"Failed at line %d in %s!", __LINE__, __func__);
return err;
}
dev_dbg(idtcm->dev, "firmware size %zu bytes", fw->size);
rec = (struct idtcm_fwrc *) fw->data;
if (contains_full_configuration(idtcm, fw))
idtcm_state_machine_reset(idtcm);
for (len = fw->size; len > 0; len -= sizeof(*rec)) {
if (rec->reserved) {
dev_err(idtcm->dev,
"bad firmware, reserved field non-zero");
err = -EINVAL;
} else {
regaddr = rec->hiaddr << 8;
regaddr |= rec->loaddr;
val = rec->value;
loaddr = rec->loaddr;
rec++;
err = check_and_set_masks(idtcm, regaddr, val);
}
if (err != -EINVAL) {
err = 0;
/* Top (status registers) and bottom are read-only */
if (regaddr < GPIO_USER_CONTROL || regaddr >= scratch)
continue;
/* Page size 128, last 4 bytes of page skipped */
if ((loaddr > 0x7b && loaddr <= 0x7f) || loaddr > 0xfb)
continue;
err = idtcm_write(idtcm, regaddr, 0, &val, sizeof(val));
}
if (err)
goto out;
}
display_pll_and_masks(idtcm);
out:
release_firmware(fw);
return err;
}
static int idtcm_output_enable(struct idtcm_channel *channel,
bool enable, unsigned int outn)
{
struct idtcm *idtcm = channel->idtcm;
int base;
int err;
u8 val;
base = get_output_base_addr(idtcm->fw_ver, outn);
if (!(base > 0)) {
dev_err(idtcm->dev,
"%s - Unsupported out%d", __func__, outn);
return base;
}
err = idtcm_read(idtcm, (u16)base, OUT_CTRL_1, &val, sizeof(val));
if (err)
return err;
if (enable)
val |= SQUELCH_DISABLE;
else
val &= ~SQUELCH_DISABLE;
return idtcm_write(idtcm, (u16)base, OUT_CTRL_1, &val, sizeof(val));
}
static int idtcm_perout_enable(struct idtcm_channel *channel,
struct ptp_perout_request *perout,
bool enable)
{
struct idtcm *idtcm = channel->idtcm;
struct timespec64 ts = {0, 0};
int err;
err = idtcm_output_enable(channel, enable, perout->index);
if (err) {
dev_err(idtcm->dev, "Unable to set output enable");
return err;
}
/* Align output to internal 1 PPS */
return _idtcm_settime(channel, &ts, SCSR_TOD_WR_TYPE_SEL_DELTA_PLUS);
}
static int idtcm_get_pll_mode(struct idtcm_channel *channel,
enum pll_mode *mode)
{
struct idtcm *idtcm = channel->idtcm;
int err;
u8 dpll_mode;
err = idtcm_read(idtcm, channel->dpll_n,
IDTCM_FW_REG(idtcm->fw_ver, V520, DPLL_MODE),
&dpll_mode, sizeof(dpll_mode));
if (err)
return err;
*mode = (dpll_mode >> PLL_MODE_SHIFT) & PLL_MODE_MASK;
return 0;
}
static int idtcm_set_pll_mode(struct idtcm_channel *channel,
enum pll_mode mode)
{
struct idtcm *idtcm = channel->idtcm;
int err;
u8 dpll_mode;
err = idtcm_read(idtcm, channel->dpll_n,
IDTCM_FW_REG(idtcm->fw_ver, V520, DPLL_MODE),
&dpll_mode, sizeof(dpll_mode));
if (err)
return err;
dpll_mode &= ~(PLL_MODE_MASK << PLL_MODE_SHIFT);
dpll_mode |= (mode << PLL_MODE_SHIFT);
err = idtcm_write(idtcm, channel->dpll_n,
IDTCM_FW_REG(idtcm->fw_ver, V520, DPLL_MODE),
&dpll_mode, sizeof(dpll_mode));
return err;
}
static int idtcm_get_manual_reference(struct idtcm_channel *channel,
enum manual_reference *ref)
{
struct idtcm *idtcm = channel->idtcm;
u8 dpll_manu_ref_cfg;
int err;
err = idtcm_read(idtcm, channel->dpll_ctrl_n,
DPLL_CTRL_DPLL_MANU_REF_CFG,
&dpll_manu_ref_cfg, sizeof(dpll_manu_ref_cfg));
if (err)
return err;
dpll_manu_ref_cfg &= (MANUAL_REFERENCE_MASK << MANUAL_REFERENCE_SHIFT);
*ref = dpll_manu_ref_cfg >> MANUAL_REFERENCE_SHIFT;
return 0;
}
static int idtcm_set_manual_reference(struct idtcm_channel *channel,
enum manual_reference ref)
{
struct idtcm *idtcm = channel->idtcm;
u8 dpll_manu_ref_cfg;
int err;
err = idtcm_read(idtcm, channel->dpll_ctrl_n,
DPLL_CTRL_DPLL_MANU_REF_CFG,
&dpll_manu_ref_cfg, sizeof(dpll_manu_ref_cfg));
if (err)
return err;
dpll_manu_ref_cfg &= ~(MANUAL_REFERENCE_MASK << MANUAL_REFERENCE_SHIFT);
dpll_manu_ref_cfg |= (ref << MANUAL_REFERENCE_SHIFT);
err = idtcm_write(idtcm, channel->dpll_ctrl_n,
DPLL_CTRL_DPLL_MANU_REF_CFG,
&dpll_manu_ref_cfg, sizeof(dpll_manu_ref_cfg));
return err;
}
static int configure_dpll_mode_write_frequency(struct idtcm_channel *channel)
{
struct idtcm *idtcm = channel->idtcm;
int err;
err = idtcm_set_pll_mode(channel, PLL_MODE_WRITE_FREQUENCY);
if (err)
dev_err(idtcm->dev, "Failed to set pll mode to write frequency");
else
channel->mode = PTP_PLL_MODE_WRITE_FREQUENCY;
return err;
}
static int configure_dpll_mode_write_phase(struct idtcm_channel *channel)
{
struct idtcm *idtcm = channel->idtcm;
int err;
err = idtcm_set_pll_mode(channel, PLL_MODE_WRITE_PHASE);
if (err)
dev_err(idtcm->dev, "Failed to set pll mode to write phase");
else
channel->mode = PTP_PLL_MODE_WRITE_PHASE;
return err;
}
static int configure_manual_reference_write_frequency(struct idtcm_channel *channel)
{
struct idtcm *idtcm = channel->idtcm;
int err;
err = idtcm_set_manual_reference(channel, MANU_REF_WRITE_FREQUENCY);
if (err)
dev_err(idtcm->dev, "Failed to set manual reference to write frequency");
else
channel->mode = PTP_PLL_MODE_WRITE_FREQUENCY;
return err;
}
static int configure_manual_reference_write_phase(struct idtcm_channel *channel)
{
struct idtcm *idtcm = channel->idtcm;
int err;
err = idtcm_set_manual_reference(channel, MANU_REF_WRITE_PHASE);
if (err)
dev_err(idtcm->dev, "Failed to set manual reference to write phase");
else
channel->mode = PTP_PLL_MODE_WRITE_PHASE;
return err;
}
static int idtcm_stop_phase_pull_in(struct idtcm_channel *channel)
{
int err;
err = _idtcm_adjfine(channel, channel->current_freq_scaled_ppm);
if (err)
return err;
channel->phase_pull_in = false;
return 0;
}
static long idtcm_work_handler(struct ptp_clock_info *ptp)
{
struct idtcm_channel *channel = container_of(ptp, struct idtcm_channel, caps);
struct idtcm *idtcm = channel->idtcm;
mutex_lock(idtcm->lock);
(void)idtcm_stop_phase_pull_in(channel);
mutex_unlock(idtcm->lock);
/* Return a negative value here to not reschedule */
return -1;
}
static s32 phase_pull_in_scaled_ppm(s32 current_ppm, s32 phase_pull_in_ppb)
{
/* ppb = scaled_ppm * 125 / 2^13 */
/* scaled_ppm = ppb * 2^13 / 125 */
s64 max_scaled_ppm = div_s64((s64)PHASE_PULL_IN_MAX_PPB << 13, 125);
s64 scaled_ppm = div_s64((s64)phase_pull_in_ppb << 13, 125);
current_ppm += scaled_ppm;
if (current_ppm > max_scaled_ppm)
current_ppm = max_scaled_ppm;
else if (current_ppm < -max_scaled_ppm)
current_ppm = -max_scaled_ppm;
return current_ppm;
}
static int do_phase_pull_in_sw(struct idtcm_channel *channel,
s32 delta_ns,
u32 max_ffo_ppb)
{
s32 current_ppm = channel->current_freq_scaled_ppm;
u32 duration_ms = MSEC_PER_SEC;
s32 delta_ppm;
s32 ppb;
int err;
/* If the ToD correction is less than PHASE_PULL_IN_MIN_THRESHOLD_NS,
* skip. The error introduced by the ToD adjustment procedure would
* be bigger than the required ToD correction
*/
if (abs(delta_ns) < PHASE_PULL_IN_MIN_THRESHOLD_NS)
return 0;
if (max_ffo_ppb == 0)
max_ffo_ppb = PHASE_PULL_IN_MAX_PPB;
/* For most cases, keep phase pull-in duration 1 second */
ppb = delta_ns;
while (abs(ppb) > max_ffo_ppb) {
duration_ms *= 2;
ppb /= 2;
}
delta_ppm = phase_pull_in_scaled_ppm(current_ppm, ppb);
err = _idtcm_adjfine(channel, delta_ppm);
if (err)
return err;
/* schedule the worker to cancel phase pull-in */
ptp_schedule_worker(channel->ptp_clock,
msecs_to_jiffies(duration_ms) - 1);
channel->phase_pull_in = true;
return 0;
}
static int initialize_operating_mode_with_manual_reference(struct idtcm_channel *channel,
enum manual_reference ref)
{
struct idtcm *idtcm = channel->idtcm;
channel->mode = PTP_PLL_MODE_UNSUPPORTED;
channel->configure_write_frequency = configure_manual_reference_write_frequency;
channel->configure_write_phase = configure_manual_reference_write_phase;
channel->do_phase_pull_in = do_phase_pull_in_sw;
switch (ref) {
case MANU_REF_WRITE_PHASE:
channel->mode = PTP_PLL_MODE_WRITE_PHASE;
break;
case MANU_REF_WRITE_FREQUENCY:
channel->mode = PTP_PLL_MODE_WRITE_FREQUENCY;
break;
default:
dev_warn(idtcm->dev,
"Unsupported MANUAL_REFERENCE: 0x%02x", ref);
}
return 0;
}
static int initialize_operating_mode_with_pll_mode(struct idtcm_channel *channel,
enum pll_mode mode)
{
struct idtcm *idtcm = channel->idtcm;
int err = 0;
channel->mode = PTP_PLL_MODE_UNSUPPORTED;
channel->configure_write_frequency = configure_dpll_mode_write_frequency;
channel->configure_write_phase = configure_dpll_mode_write_phase;
channel->do_phase_pull_in = do_phase_pull_in_fw;
switch (mode) {
case PLL_MODE_WRITE_PHASE:
channel->mode = PTP_PLL_MODE_WRITE_PHASE;
break;
case PLL_MODE_WRITE_FREQUENCY:
channel->mode = PTP_PLL_MODE_WRITE_FREQUENCY;
break;
default:
dev_err(idtcm->dev,
"Unsupported PLL_MODE: 0x%02x", mode);
err = -EINVAL;
}
return err;
}
static int initialize_dco_operating_mode(struct idtcm_channel *channel)
{
enum manual_reference ref = MANU_REF_XO_DPLL;
enum pll_mode mode = PLL_MODE_DISABLED;
struct idtcm *idtcm = channel->idtcm;
int err;
channel->mode = PTP_PLL_MODE_UNSUPPORTED;
err = idtcm_get_pll_mode(channel, &mode);
if (err) {
dev_err(idtcm->dev, "Unable to read pll mode!");
return err;
}
if (mode == PLL_MODE_PLL) {
err = idtcm_get_manual_reference(channel, &ref);
if (err) {
dev_err(idtcm->dev, "Unable to read manual reference!");
return err;
}
err = initialize_operating_mode_with_manual_reference(channel, ref);
} else {
err = initialize_operating_mode_with_pll_mode(channel, mode);
}
if (channel->mode == PTP_PLL_MODE_WRITE_PHASE)
channel->configure_write_frequency(channel);
return err;
}
/* PTP Hardware Clock interface */
/*
* Maximum absolute value for write phase offset in nanoseconds
*
* Destination signed register is 32-bit register in resolution of 50ps
*
* 0x7fffffff * 50 = 2147483647 * 50 = 107374182350 ps
* Represent 107374182350 ps as 107374182 ns
*/
static s32 idtcm_getmaxphase(struct ptp_clock_info *ptp __always_unused)
{
return MAX_ABS_WRITE_PHASE_NANOSECONDS;
}
/*
* Internal function for implementing support for write phase offset
*
* @channel: channel
* @delta_ns: delta in nanoseconds
*/
static int _idtcm_adjphase(struct idtcm_channel *channel, s32 delta_ns)
{
struct idtcm *idtcm = channel->idtcm;
int err;
u8 i;
u8 buf[4] = {0};
s32 phase_50ps;
if (channel->mode != PTP_PLL_MODE_WRITE_PHASE) {
err = channel->configure_write_phase(channel);
if (err)
return err;
}
phase_50ps = div_s64((s64)delta_ns * 1000, 50);
for (i = 0; i < 4; i++) {
buf[i] = phase_50ps & 0xff;
phase_50ps >>= 8;
}
err = idtcm_write(idtcm, channel->dpll_phase, DPLL_WR_PHASE,
buf, sizeof(buf));
return err;
}
static int _idtcm_adjfine(struct idtcm_channel *channel, long scaled_ppm)
{
struct idtcm *idtcm = channel->idtcm;
u8 i;
int err;
u8 buf[6] = {0};
s64 fcw;
if (channel->mode != PTP_PLL_MODE_WRITE_FREQUENCY) {
err = channel->configure_write_frequency(channel);
if (err)
return err;
}
/*
* Frequency Control Word unit is: 1.11 * 10^-10 ppm
*
* adjfreq:
* ppb * 10^9
* FCW = ----------
* 111
*
* adjfine:
* ppm_16 * 5^12
* FCW = -------------
* 111 * 2^4
*/
/* 2 ^ -53 = 1.1102230246251565404236316680908e-16 */
fcw = scaled_ppm * 244140625ULL;
fcw = div_s64(fcw, 1776);
for (i = 0; i < 6; i++) {
buf[i] = fcw & 0xff;
fcw >>= 8;
}
err = idtcm_write(idtcm, channel->dpll_freq, DPLL_WR_FREQ,
buf, sizeof(buf));
return err;
}
static int idtcm_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
{
struct idtcm_channel *channel = container_of(ptp, struct idtcm_channel, caps);
struct idtcm *idtcm = channel->idtcm;
int err;
mutex_lock(idtcm->lock);
err = _idtcm_gettime_immediate(channel, ts);
mutex_unlock(idtcm->lock);
if (err)
dev_err(idtcm->dev, "Failed at line %d in %s!",
__LINE__, __func__);
return err;
}
static int idtcm_settime_deprecated(struct ptp_clock_info *ptp,
const struct timespec64 *ts)
{
struct idtcm_channel *channel = container_of(ptp, struct idtcm_channel, caps);
struct idtcm *idtcm = channel->idtcm;
int err;
mutex_lock(idtcm->lock);
err = _idtcm_settime_deprecated(channel, ts);
mutex_unlock(idtcm->lock);
if (err)
dev_err(idtcm->dev,
"Failed at line %d in %s!", __LINE__, __func__);
return err;
}
static int idtcm_settime(struct ptp_clock_info *ptp,
const struct timespec64 *ts)
{
struct idtcm_channel *channel = container_of(ptp, struct idtcm_channel, caps);
struct idtcm *idtcm = channel->idtcm;
int err;
mutex_lock(idtcm->lock);
err = _idtcm_settime(channel, ts, SCSR_TOD_WR_TYPE_SEL_ABSOLUTE);
mutex_unlock(idtcm->lock);
if (err)
dev_err(idtcm->dev,
"Failed at line %d in %s!", __LINE__, __func__);
return err;
}
static int idtcm_adjtime_deprecated(struct ptp_clock_info *ptp, s64 delta)
{
struct idtcm_channel *channel = container_of(ptp, struct idtcm_channel, caps);
struct idtcm *idtcm = channel->idtcm;
int err;
mutex_lock(idtcm->lock);
err = _idtcm_adjtime_deprecated(channel, delta);
mutex_unlock(idtcm->lock);
if (err)
dev_err(idtcm->dev,
"Failed at line %d in %s!", __LINE__, __func__);
return err;
}
static int idtcm_adjtime(struct ptp_clock_info *ptp, s64 delta)
{
struct idtcm_channel *channel = container_of(ptp, struct idtcm_channel, caps);
struct idtcm *idtcm = channel->idtcm;
struct timespec64 ts;
enum scsr_tod_write_type_sel type;
int err;
if (channel->phase_pull_in == true)
return -EBUSY;
mutex_lock(idtcm->lock);
if (abs(delta) < PHASE_PULL_IN_THRESHOLD_NS) {
err = channel->do_phase_pull_in(channel, delta, 0);
} else {
if (delta >= 0) {
ts = ns_to_timespec64(delta);
type = SCSR_TOD_WR_TYPE_SEL_DELTA_PLUS;
} else {
ts = ns_to_timespec64(-delta);
type = SCSR_TOD_WR_TYPE_SEL_DELTA_MINUS;
}
err = _idtcm_settime(channel, &ts, type);
}
mutex_unlock(idtcm->lock);
if (err)
dev_err(idtcm->dev,
"Failed at line %d in %s!", __LINE__, __func__);
return err;
}
static int idtcm_adjphase(struct ptp_clock_info *ptp, s32 delta)
{
struct idtcm_channel *channel = container_of(ptp, struct idtcm_channel, caps);
struct idtcm *idtcm = channel->idtcm;
int err;
mutex_lock(idtcm->lock);
err = _idtcm_adjphase(channel, delta);
mutex_unlock(idtcm->lock);
if (err)
dev_err(idtcm->dev,
"Failed at line %d in %s!", __LINE__, __func__);
return err;
}
static int idtcm_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
{
struct idtcm_channel *channel = container_of(ptp, struct idtcm_channel, caps);
struct idtcm *idtcm = channel->idtcm;
int err;
if (channel->phase_pull_in == true)
return 0;
if (scaled_ppm == channel->current_freq_scaled_ppm)
return 0;
mutex_lock(idtcm->lock);
err = _idtcm_adjfine(channel, scaled_ppm);
mutex_unlock(idtcm->lock);
if (err)
dev_err(idtcm->dev,
"Failed at line %d in %s!", __LINE__, __func__);
else
channel->current_freq_scaled_ppm = scaled_ppm;
return err;
}
static int idtcm_enable(struct ptp_clock_info *ptp,
struct ptp_clock_request *rq, int on)
{
struct idtcm_channel *channel = container_of(ptp, struct idtcm_channel, caps);
struct idtcm *idtcm = channel->idtcm;
int err = -EOPNOTSUPP;
mutex_lock(idtcm->lock);
switch (rq->type) {
case PTP_CLK_REQ_PEROUT:
if (!on)
err = idtcm_perout_enable(channel, &rq->perout, false);
/* Only accept a 1-PPS aligned to the second. */
else if (rq->perout.start.nsec || rq->perout.period.sec != 1 ||
rq->perout.period.nsec)
err = -ERANGE;
else
err = idtcm_perout_enable(channel, &rq->perout, true);
break;
case PTP_CLK_REQ_EXTTS:
err = idtcm_extts_enable(channel, rq, on);
break;
default:
break;
}
mutex_unlock(idtcm->lock);
if (err)
dev_err(channel->idtcm->dev,
"Failed in %s with err %d!", __func__, err);
return err;
}
static int idtcm_enable_tod(struct idtcm_channel *channel)
{
struct idtcm *idtcm = channel->idtcm;
struct timespec64 ts = {0, 0};
u16 tod_cfg = IDTCM_FW_REG(idtcm->fw_ver, V520, TOD_CFG);
u8 cfg;
int err;
/*
* Start the TOD clock ticking.
*/
err = idtcm_read(idtcm, channel->tod_n, tod_cfg, &cfg, sizeof(cfg));
if (err)
return err;
cfg |= TOD_ENABLE;
err = idtcm_write(idtcm, channel->tod_n, tod_cfg, &cfg, sizeof(cfg));
if (err)
return err;
if (idtcm->fw_ver < V487)
return _idtcm_settime_deprecated(channel, &ts);
else
return _idtcm_settime(channel, &ts,
SCSR_TOD_WR_TYPE_SEL_ABSOLUTE);
}
static void idtcm_set_version_info(struct idtcm *idtcm)
{
u8 major;
u8 minor;
u8 hotfix;
u16 product_id;
u8 hw_rev_id;
u8 config_select;
idtcm_read_major_release(idtcm, &major);
idtcm_read_minor_release(idtcm, &minor);
idtcm_read_hotfix_release(idtcm, &hotfix);
idtcm_read_product_id(idtcm, &product_id);
idtcm_read_hw_rev_id(idtcm, &hw_rev_id);
idtcm_read_otp_scsr_config_select(idtcm, &config_select);
snprintf(idtcm->version, sizeof(idtcm->version), "%u.%u.%u",
major, minor, hotfix);
idtcm->fw_ver = idtcm_fw_version(idtcm->version);
dev_info(idtcm->dev,
"%d.%d.%d, Id: 0x%04x HW Rev: %d OTP Config Select: %d",
major, minor, hotfix,
product_id, hw_rev_id, config_select);
}
static int idtcm_verify_pin(struct ptp_clock_info *ptp, unsigned int pin,
enum ptp_pin_function func, unsigned int chan)
{
switch (func) {
case PTP_PF_NONE:
case PTP_PF_EXTTS:
break;
case PTP_PF_PEROUT:
case PTP_PF_PHYSYNC:
return -1;
}
return 0;
}
static struct ptp_pin_desc pin_config[MAX_TOD][MAX_REF_CLK];
static const struct ptp_clock_info idtcm_caps = {
.owner = THIS_MODULE,
.max_adj = 244000,
.n_per_out = 12,
.n_ext_ts = MAX_TOD,
.n_pins = MAX_REF_CLK,
.adjphase = &idtcm_adjphase,
.getmaxphase = &idtcm_getmaxphase,
.adjfine = &idtcm_adjfine,
.adjtime = &idtcm_adjtime,
.gettime64 = &idtcm_gettime,
.settime64 = &idtcm_settime,
.enable = &idtcm_enable,
.verify = &idtcm_verify_pin,
.do_aux_work = &idtcm_work_handler,
};
static const struct ptp_clock_info idtcm_caps_deprecated = {
.owner = THIS_MODULE,
.max_adj = 244000,
.n_per_out = 12,
.n_ext_ts = MAX_TOD,
.n_pins = MAX_REF_CLK,
.adjphase = &idtcm_adjphase,
.getmaxphase = &idtcm_getmaxphase,
.adjfine = &idtcm_adjfine,
.adjtime = &idtcm_adjtime_deprecated,
.gettime64 = &idtcm_gettime,
.settime64 = &idtcm_settime_deprecated,
.enable = &idtcm_enable,
.verify = &idtcm_verify_pin,
.do_aux_work = &idtcm_work_handler,
};
static int configure_channel_pll(struct idtcm_channel *channel)
{
struct idtcm *idtcm = channel->idtcm;
int err = 0;
switch (channel->pll) {
case 0:
channel->dpll_freq = DPLL_FREQ_0;
channel->dpll_n = DPLL_0;
channel->hw_dpll_n = HW_DPLL_0;
channel->dpll_phase = DPLL_PHASE_0;
channel->dpll_ctrl_n = DPLL_CTRL_0;
channel->dpll_phase_pull_in = DPLL_PHASE_PULL_IN_0;
break;
case 1:
channel->dpll_freq = DPLL_FREQ_1;
channel->dpll_n = DPLL_1;
channel->hw_dpll_n = HW_DPLL_1;
channel->dpll_phase = DPLL_PHASE_1;
channel->dpll_ctrl_n = DPLL_CTRL_1;
channel->dpll_phase_pull_in = DPLL_PHASE_PULL_IN_1;
break;
case 2:
channel->dpll_freq = DPLL_FREQ_2;
channel->dpll_n = IDTCM_FW_REG(idtcm->fw_ver, V520, DPLL_2);
channel->hw_dpll_n = HW_DPLL_2;
channel->dpll_phase = DPLL_PHASE_2;
channel->dpll_ctrl_n = DPLL_CTRL_2;
channel->dpll_phase_pull_in = DPLL_PHASE_PULL_IN_2;
break;
case 3:
channel->dpll_freq = DPLL_FREQ_3;
channel->dpll_n = DPLL_3;
channel->hw_dpll_n = HW_DPLL_3;
channel->dpll_phase = DPLL_PHASE_3;
channel->dpll_ctrl_n = DPLL_CTRL_3;
channel->dpll_phase_pull_in = DPLL_PHASE_PULL_IN_3;
break;
case 4:
channel->dpll_freq = DPLL_FREQ_4;
channel->dpll_n = IDTCM_FW_REG(idtcm->fw_ver, V520, DPLL_4);
channel->hw_dpll_n = HW_DPLL_4;
channel->dpll_phase = DPLL_PHASE_4;
channel->dpll_ctrl_n = DPLL_CTRL_4;
channel->dpll_phase_pull_in = DPLL_PHASE_PULL_IN_4;
break;
case 5:
channel->dpll_freq = DPLL_FREQ_5;
channel->dpll_n = DPLL_5;
channel->hw_dpll_n = HW_DPLL_5;
channel->dpll_phase = DPLL_PHASE_5;
channel->dpll_ctrl_n = DPLL_CTRL_5;
channel->dpll_phase_pull_in = DPLL_PHASE_PULL_IN_5;
break;
case 6:
channel->dpll_freq = DPLL_FREQ_6;
channel->dpll_n = IDTCM_FW_REG(idtcm->fw_ver, V520, DPLL_6);
channel->hw_dpll_n = HW_DPLL_6;
channel->dpll_phase = DPLL_PHASE_6;
channel->dpll_ctrl_n = DPLL_CTRL_6;
channel->dpll_phase_pull_in = DPLL_PHASE_PULL_IN_6;
break;
case 7:
channel->dpll_freq = DPLL_FREQ_7;
channel->dpll_n = DPLL_7;
channel->hw_dpll_n = HW_DPLL_7;
channel->dpll_phase = DPLL_PHASE_7;
channel->dpll_ctrl_n = DPLL_CTRL_7;
channel->dpll_phase_pull_in = DPLL_PHASE_PULL_IN_7;
break;
default:
err = -EINVAL;
}
return err;
}
/*
* Compensate for the PTP DCO input-to-output delay.
* This delay is 18 FOD cycles.
*/
static u32 idtcm_get_dco_delay(struct idtcm_channel *channel)
{
struct idtcm *idtcm = channel->idtcm;
u8 mbuf[8] = {0};
u8 nbuf[2] = {0};
u32 fodFreq;
int err;
u64 m;
u16 n;
err = idtcm_read(idtcm, channel->dpll_ctrl_n,
DPLL_CTRL_DPLL_FOD_FREQ, mbuf, 6);
if (err)
return 0;
err = idtcm_read(idtcm, channel->dpll_ctrl_n,
DPLL_CTRL_DPLL_FOD_FREQ + 6, nbuf, 2);
if (err)
return 0;
m = get_unaligned_le64(mbuf);
n = get_unaligned_le16(nbuf);
if (n == 0)
n = 1;
fodFreq = (u32)div_u64(m, n);
if (fodFreq >= 500000000)
return (u32)div_u64(18 * (u64)NSEC_PER_SEC, fodFreq);
return 0;
}
static int configure_channel_tod(struct idtcm_channel *channel, u32 index)
{
enum fw_version fw_ver = channel->idtcm->fw_ver;
/* Set tod addresses */
switch (index) {
case 0:
channel->tod_read_primary = IDTCM_FW_REG(fw_ver, V520, TOD_READ_PRIMARY_0);
channel->tod_read_secondary = IDTCM_FW_REG(fw_ver, V520, TOD_READ_SECONDARY_0);
channel->tod_write = IDTCM_FW_REG(fw_ver, V520, TOD_WRITE_0);
channel->tod_n = IDTCM_FW_REG(fw_ver, V520, TOD_0);
channel->sync_src = SYNC_SOURCE_DPLL0_TOD_PPS;
break;
case 1:
channel->tod_read_primary = IDTCM_FW_REG(fw_ver, V520, TOD_READ_PRIMARY_1);
channel->tod_read_secondary = IDTCM_FW_REG(fw_ver, V520, TOD_READ_SECONDARY_1);
channel->tod_write = IDTCM_FW_REG(fw_ver, V520, TOD_WRITE_1);
channel->tod_n = IDTCM_FW_REG(fw_ver, V520, TOD_1);
channel->sync_src = SYNC_SOURCE_DPLL1_TOD_PPS;
break;
case 2:
channel->tod_read_primary = IDTCM_FW_REG(fw_ver, V520, TOD_READ_PRIMARY_2);
channel->tod_read_secondary = IDTCM_FW_REG(fw_ver, V520, TOD_READ_SECONDARY_2);
channel->tod_write = IDTCM_FW_REG(fw_ver, V520, TOD_WRITE_2);
channel->tod_n = IDTCM_FW_REG(fw_ver, V520, TOD_2);
channel->sync_src = SYNC_SOURCE_DPLL2_TOD_PPS;
break;
case 3:
channel->tod_read_primary = IDTCM_FW_REG(fw_ver, V520, TOD_READ_PRIMARY_3);
channel->tod_read_secondary = IDTCM_FW_REG(fw_ver, V520, TOD_READ_SECONDARY_3);
channel->tod_write = IDTCM_FW_REG(fw_ver, V520, TOD_WRITE_3);
channel->tod_n = IDTCM_FW_REG(fw_ver, V520, TOD_3);
channel->sync_src = SYNC_SOURCE_DPLL3_TOD_PPS;
break;
default:
return -EINVAL;
}
return 0;
}
static int idtcm_enable_channel(struct idtcm *idtcm, u32 index)
{
struct idtcm_channel *channel;
int err;
int i;
if (!(index < MAX_TOD))
return -EINVAL;
channel = &idtcm->channel[index];
channel->idtcm = idtcm;
channel->current_freq_scaled_ppm = 0;
/* Set pll addresses */
err = configure_channel_pll(channel);
if (err)
return err;
/* Set tod addresses */
err = configure_channel_tod(channel, index);
if (err)
return err;
if (idtcm->fw_ver < V487)
channel->caps = idtcm_caps_deprecated;
else
channel->caps = idtcm_caps;
snprintf(channel->caps.name, sizeof(channel->caps.name),
"IDT CM TOD%u", index);
channel->caps.pin_config = pin_config[index];
for (i = 0; i < channel->caps.n_pins; ++i) {
struct ptp_pin_desc *ppd = &channel->caps.pin_config[i];
snprintf(ppd->name, sizeof(ppd->name), "input_ref%d", i);
ppd->index = i;
ppd->func = PTP_PF_NONE;
ppd->chan = index;
}
err = initialize_dco_operating_mode(channel);
if (err)
return err;
err = idtcm_enable_tod(channel);
if (err) {
dev_err(idtcm->dev,
"Failed at line %d in %s!", __LINE__, __func__);
return err;
}
channel->dco_delay = idtcm_get_dco_delay(channel);
channel->ptp_clock = ptp_clock_register(&channel->caps, NULL);
if (IS_ERR(channel->ptp_clock)) {
err = PTR_ERR(channel->ptp_clock);
channel->ptp_clock = NULL;
return err;
}
if (!channel->ptp_clock)
return -ENOTSUPP;
dev_info(idtcm->dev, "PLL%d registered as ptp%d",
index, channel->ptp_clock->index);
return 0;
}
static int idtcm_enable_extts_channel(struct idtcm *idtcm, u32 index)
{
struct idtcm_channel *channel;
int err;
if (!(index < MAX_TOD))
return -EINVAL;
channel = &idtcm->channel[index];
channel->idtcm = idtcm;
/* Set tod addresses */
err = configure_channel_tod(channel, index);
if (err)
return err;
channel->idtcm = idtcm;
return 0;
}
static void idtcm_extts_check(struct work_struct *work)
{
struct idtcm *idtcm = container_of(work, struct idtcm, extts_work.work);
struct idtcm_channel *channel;
u8 mask;
int err;
int i;
if (idtcm->extts_mask == 0)
return;
mutex_lock(idtcm->lock);
for (i = 0; i < MAX_TOD; i++) {
mask = 1 << i;
if ((idtcm->extts_mask & mask) == 0)
continue;
err = idtcm_extts_check_channel(idtcm, i);
if (err == 0) {
/* trigger clears itself, so clear the mask */
if (idtcm->extts_single_shot) {
idtcm->extts_mask &= ~mask;
} else {
/* Re-arm */
channel = &idtcm->channel[i];
arm_tod_read_trig_sel_refclk(channel, channel->refn);
}
}
}
if (idtcm->extts_mask)
schedule_delayed_work(&idtcm->extts_work,
msecs_to_jiffies(EXTTS_PERIOD_MS));
mutex_unlock(idtcm->lock);
}
static void ptp_clock_unregister_all(struct idtcm *idtcm)
{
u8 i;
struct idtcm_channel *channel;
for (i = 0; i < MAX_TOD; i++) {
channel = &idtcm->channel[i];
if (channel->ptp_clock)
ptp_clock_unregister(channel->ptp_clock);
}
}
static void set_default_masks(struct idtcm *idtcm)
{
idtcm->tod_mask = DEFAULT_TOD_MASK;
idtcm->extts_mask = 0;
idtcm->channel[0].tod = 0;
idtcm->channel[1].tod = 1;
idtcm->channel[2].tod = 2;
idtcm->channel[3].tod = 3;
idtcm->channel[0].pll = DEFAULT_TOD0_PTP_PLL;
idtcm->channel[1].pll = DEFAULT_TOD1_PTP_PLL;
idtcm->channel[2].pll = DEFAULT_TOD2_PTP_PLL;
idtcm->channel[3].pll = DEFAULT_TOD3_PTP_PLL;
idtcm->channel[0].output_mask = DEFAULT_OUTPUT_MASK_PLL0;
idtcm->channel[1].output_mask = DEFAULT_OUTPUT_MASK_PLL1;
idtcm->channel[2].output_mask = DEFAULT_OUTPUT_MASK_PLL2;
idtcm->channel[3].output_mask = DEFAULT_OUTPUT_MASK_PLL3;
}
static int idtcm_probe(struct platform_device *pdev)
{
struct rsmu_ddata *ddata = dev_get_drvdata(pdev->dev.parent);
struct idtcm *idtcm;
int err;
u8 i;
idtcm = devm_kzalloc(&pdev->dev, sizeof(struct idtcm), GFP_KERNEL);
if (!idtcm)
return -ENOMEM;
idtcm->dev = &pdev->dev;
idtcm->mfd = pdev->dev.parent;
idtcm->lock = &ddata->lock;
idtcm->regmap = ddata->regmap;
idtcm->calculate_overhead_flag = 0;
INIT_DELAYED_WORK(&idtcm->extts_work, idtcm_extts_check);
set_default_masks(idtcm);
mutex_lock(idtcm->lock);
idtcm_set_version_info(idtcm);
err = idtcm_load_firmware(idtcm, &pdev->dev);
if (err)
dev_warn(idtcm->dev, "loading firmware failed with %d", err);
wait_for_chip_ready(idtcm);
if (idtcm->tod_mask) {
for (i = 0; i < MAX_TOD; i++) {
if (idtcm->tod_mask & (1 << i))
err = idtcm_enable_channel(idtcm, i);
else
err = idtcm_enable_extts_channel(idtcm, i);
if (err) {
dev_err(idtcm->dev,
"idtcm_enable_channel %d failed!", i);
break;
}
}
} else {
dev_err(idtcm->dev,
"no PLLs flagged as PHCs, nothing to do");
err = -ENODEV;
}
mutex_unlock(idtcm->lock);
if (err) {
ptp_clock_unregister_all(idtcm);
return err;
}
platform_set_drvdata(pdev, idtcm);
return 0;
}
static int idtcm_remove(struct platform_device *pdev)
{
struct idtcm *idtcm = platform_get_drvdata(pdev);
idtcm->extts_mask = 0;
ptp_clock_unregister_all(idtcm);
cancel_delayed_work_sync(&idtcm->extts_work);
return 0;
}
static struct platform_driver idtcm_driver = {
.driver = {
.name = "8a3400x-phc",
},
.probe = idtcm_probe,
.remove = idtcm_remove,
};
module_platform_driver(idtcm_driver);
|
linux-master
|
drivers/ptp/ptp_clockmatrix.c
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Virtual PTP 1588 clock for use with KVM guests
*
* Copyright (C) 2017 Red Hat Inc.
*/
#include <linux/device.h>
#include <linux/kernel.h>
#include <asm/pvclock.h>
#include <asm/kvmclock.h>
#include <linux/module.h>
#include <uapi/asm/kvm_para.h>
#include <uapi/linux/kvm_para.h>
#include <linux/ptp_clock_kernel.h>
#include <linux/ptp_kvm.h>
#include <linux/set_memory.h>
static phys_addr_t clock_pair_gpa;
static struct kvm_clock_pairing clock_pair_glbl;
static struct kvm_clock_pairing *clock_pair;
int kvm_arch_ptp_init(void)
{
struct page *p;
long ret;
if (!kvm_para_available())
return -ENODEV;
if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) {
p = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!p)
return -ENOMEM;
clock_pair = page_address(p);
ret = set_memory_decrypted((unsigned long)clock_pair, 1);
if (ret) {
__free_page(p);
clock_pair = NULL;
goto nofree;
}
} else {
clock_pair = &clock_pair_glbl;
}
clock_pair_gpa = slow_virt_to_phys(clock_pair);
if (!pvclock_get_pvti_cpu0_va()) {
ret = -ENODEV;
goto err;
}
ret = kvm_hypercall2(KVM_HC_CLOCK_PAIRING, clock_pair_gpa,
KVM_CLOCK_PAIRING_WALLCLOCK);
if (ret == -KVM_ENOSYS) {
ret = -ENODEV;
goto err;
}
return ret;
err:
kvm_arch_ptp_exit();
nofree:
return ret;
}
void kvm_arch_ptp_exit(void)
{
if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) {
WARN_ON(set_memory_encrypted((unsigned long)clock_pair, 1));
free_page((unsigned long)clock_pair);
clock_pair = NULL;
}
}
int kvm_arch_ptp_get_clock(struct timespec64 *ts)
{
long ret;
ret = kvm_hypercall2(KVM_HC_CLOCK_PAIRING,
clock_pair_gpa,
KVM_CLOCK_PAIRING_WALLCLOCK);
if (ret != 0) {
pr_err_ratelimited("clock offset hypercall ret %lu\n", ret);
return -EOPNOTSUPP;
}
ts->tv_sec = clock_pair->sec;
ts->tv_nsec = clock_pair->nsec;
return 0;
}
int kvm_arch_ptp_get_crosststamp(u64 *cycle, struct timespec64 *tspec,
struct clocksource **cs)
{
struct pvclock_vcpu_time_info *src;
unsigned int version;
long ret;
src = this_cpu_pvti();
do {
/*
* We are using a TSC value read in the hosts
* kvm_hc_clock_pairing handling.
* So any changes to tsc_to_system_mul
* and tsc_shift or any other pvclock
* data invalidate that measurement.
*/
version = pvclock_read_begin(src);
ret = kvm_hypercall2(KVM_HC_CLOCK_PAIRING,
clock_pair_gpa,
KVM_CLOCK_PAIRING_WALLCLOCK);
if (ret != 0) {
pr_err_ratelimited("clock pairing hypercall ret %lu\n", ret);
return -EOPNOTSUPP;
}
tspec->tv_sec = clock_pair->sec;
tspec->tv_nsec = clock_pair->nsec;
*cycle = __pvclock_read_cycles(src, clock_pair->tsc);
} while (pvclock_read_retry(src, version));
*cs = &kvm_clock;
return 0;
}
|
linux-master
|
drivers/ptp/ptp_kvm_x86.c
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* PTP virtual clock driver
*
* Copyright 2021 NXP
*/
#include <linux/slab.h>
#include <linux/hashtable.h>
#include "ptp_private.h"
#define PTP_VCLOCK_CC_SHIFT 31
#define PTP_VCLOCK_CC_MULT (1 << PTP_VCLOCK_CC_SHIFT)
#define PTP_VCLOCK_FADJ_SHIFT 9
#define PTP_VCLOCK_FADJ_DENOMINATOR 15625ULL
#define PTP_VCLOCK_REFRESH_INTERVAL (HZ * 2)
/* protects vclock_hash addition/deletion */
static DEFINE_SPINLOCK(vclock_hash_lock);
static DEFINE_READ_MOSTLY_HASHTABLE(vclock_hash, 8);
static void ptp_vclock_hash_add(struct ptp_vclock *vclock)
{
spin_lock(&vclock_hash_lock);
hlist_add_head_rcu(&vclock->vclock_hash_node,
&vclock_hash[vclock->clock->index % HASH_SIZE(vclock_hash)]);
spin_unlock(&vclock_hash_lock);
}
static void ptp_vclock_hash_del(struct ptp_vclock *vclock)
{
spin_lock(&vclock_hash_lock);
hlist_del_init_rcu(&vclock->vclock_hash_node);
spin_unlock(&vclock_hash_lock);
synchronize_rcu();
}
static int ptp_vclock_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
{
struct ptp_vclock *vclock = info_to_vclock(ptp);
s64 adj;
adj = (s64)scaled_ppm << PTP_VCLOCK_FADJ_SHIFT;
adj = div_s64(adj, PTP_VCLOCK_FADJ_DENOMINATOR);
if (mutex_lock_interruptible(&vclock->lock))
return -EINTR;
timecounter_read(&vclock->tc);
vclock->cc.mult = PTP_VCLOCK_CC_MULT + adj;
mutex_unlock(&vclock->lock);
return 0;
}
static int ptp_vclock_adjtime(struct ptp_clock_info *ptp, s64 delta)
{
struct ptp_vclock *vclock = info_to_vclock(ptp);
if (mutex_lock_interruptible(&vclock->lock))
return -EINTR;
timecounter_adjtime(&vclock->tc, delta);
mutex_unlock(&vclock->lock);
return 0;
}
static int ptp_vclock_gettime(struct ptp_clock_info *ptp,
struct timespec64 *ts)
{
struct ptp_vclock *vclock = info_to_vclock(ptp);
u64 ns;
if (mutex_lock_interruptible(&vclock->lock))
return -EINTR;
ns = timecounter_read(&vclock->tc);
mutex_unlock(&vclock->lock);
*ts = ns_to_timespec64(ns);
return 0;
}
static int ptp_vclock_gettimex(struct ptp_clock_info *ptp,
struct timespec64 *ts,
struct ptp_system_timestamp *sts)
{
struct ptp_vclock *vclock = info_to_vclock(ptp);
struct ptp_clock *pptp = vclock->pclock;
struct timespec64 pts;
int err;
u64 ns;
err = pptp->info->getcyclesx64(pptp->info, &pts, sts);
if (err)
return err;
if (mutex_lock_interruptible(&vclock->lock))
return -EINTR;
ns = timecounter_cyc2time(&vclock->tc, timespec64_to_ns(&pts));
mutex_unlock(&vclock->lock);
*ts = ns_to_timespec64(ns);
return 0;
}
static int ptp_vclock_settime(struct ptp_clock_info *ptp,
const struct timespec64 *ts)
{
struct ptp_vclock *vclock = info_to_vclock(ptp);
u64 ns = timespec64_to_ns(ts);
if (mutex_lock_interruptible(&vclock->lock))
return -EINTR;
timecounter_init(&vclock->tc, &vclock->cc, ns);
mutex_unlock(&vclock->lock);
return 0;
}
static int ptp_vclock_getcrosststamp(struct ptp_clock_info *ptp,
struct system_device_crosststamp *xtstamp)
{
struct ptp_vclock *vclock = info_to_vclock(ptp);
struct ptp_clock *pptp = vclock->pclock;
int err;
u64 ns;
err = pptp->info->getcrosscycles(pptp->info, xtstamp);
if (err)
return err;
if (mutex_lock_interruptible(&vclock->lock))
return -EINTR;
ns = timecounter_cyc2time(&vclock->tc, ktime_to_ns(xtstamp->device));
mutex_unlock(&vclock->lock);
xtstamp->device = ns_to_ktime(ns);
return 0;
}
static long ptp_vclock_refresh(struct ptp_clock_info *ptp)
{
struct ptp_vclock *vclock = info_to_vclock(ptp);
struct timespec64 ts;
ptp_vclock_gettime(&vclock->info, &ts);
return PTP_VCLOCK_REFRESH_INTERVAL;
}
static const struct ptp_clock_info ptp_vclock_info = {
.owner = THIS_MODULE,
.name = "ptp virtual clock",
.max_adj = 500000000,
.adjfine = ptp_vclock_adjfine,
.adjtime = ptp_vclock_adjtime,
.settime64 = ptp_vclock_settime,
.do_aux_work = ptp_vclock_refresh,
};
static u64 ptp_vclock_read(const struct cyclecounter *cc)
{
struct ptp_vclock *vclock = cc_to_vclock(cc);
struct ptp_clock *ptp = vclock->pclock;
struct timespec64 ts = {};
ptp->info->getcycles64(ptp->info, &ts);
return timespec64_to_ns(&ts);
}
static const struct cyclecounter ptp_vclock_cc = {
.read = ptp_vclock_read,
.mask = CYCLECOUNTER_MASK(32),
.mult = PTP_VCLOCK_CC_MULT,
.shift = PTP_VCLOCK_CC_SHIFT,
};
struct ptp_vclock *ptp_vclock_register(struct ptp_clock *pclock)
{
struct ptp_vclock *vclock;
vclock = kzalloc(sizeof(*vclock), GFP_KERNEL);
if (!vclock)
return NULL;
vclock->pclock = pclock;
vclock->info = ptp_vclock_info;
if (pclock->info->getcyclesx64)
vclock->info.gettimex64 = ptp_vclock_gettimex;
else
vclock->info.gettime64 = ptp_vclock_gettime;
if (pclock->info->getcrosscycles)
vclock->info.getcrosststamp = ptp_vclock_getcrosststamp;
vclock->cc = ptp_vclock_cc;
snprintf(vclock->info.name, PTP_CLOCK_NAME_LEN, "ptp%d_virt",
pclock->index);
INIT_HLIST_NODE(&vclock->vclock_hash_node);
mutex_init(&vclock->lock);
vclock->clock = ptp_clock_register(&vclock->info, &pclock->dev);
if (IS_ERR_OR_NULL(vclock->clock)) {
kfree(vclock);
return NULL;
}
timecounter_init(&vclock->tc, &vclock->cc, 0);
ptp_schedule_worker(vclock->clock, PTP_VCLOCK_REFRESH_INTERVAL);
ptp_vclock_hash_add(vclock);
return vclock;
}
void ptp_vclock_unregister(struct ptp_vclock *vclock)
{
ptp_vclock_hash_del(vclock);
ptp_clock_unregister(vclock->clock);
kfree(vclock);
}
#if IS_BUILTIN(CONFIG_PTP_1588_CLOCK)
int ptp_get_vclocks_index(int pclock_index, int **vclock_index)
{
char name[PTP_CLOCK_NAME_LEN] = "";
struct ptp_clock *ptp;
struct device *dev;
int num = 0;
if (pclock_index < 0)
return num;
snprintf(name, PTP_CLOCK_NAME_LEN, "ptp%d", pclock_index);
dev = class_find_device_by_name(ptp_class, name);
if (!dev)
return num;
ptp = dev_get_drvdata(dev);
if (mutex_lock_interruptible(&ptp->n_vclocks_mux)) {
put_device(dev);
return num;
}
*vclock_index = kzalloc(sizeof(int) * ptp->n_vclocks, GFP_KERNEL);
if (!(*vclock_index))
goto out;
memcpy(*vclock_index, ptp->vclock_index, sizeof(int) * ptp->n_vclocks);
num = ptp->n_vclocks;
out:
mutex_unlock(&ptp->n_vclocks_mux);
put_device(dev);
return num;
}
EXPORT_SYMBOL(ptp_get_vclocks_index);
ktime_t ptp_convert_timestamp(const ktime_t *hwtstamp, int vclock_index)
{
unsigned int hash = vclock_index % HASH_SIZE(vclock_hash);
struct ptp_vclock *vclock;
u64 ns;
u64 vclock_ns = 0;
ns = ktime_to_ns(*hwtstamp);
rcu_read_lock();
hlist_for_each_entry_rcu(vclock, &vclock_hash[hash], vclock_hash_node) {
if (vclock->clock->index != vclock_index)
continue;
if (mutex_lock_interruptible(&vclock->lock))
break;
vclock_ns = timecounter_cyc2time(&vclock->tc, ns);
mutex_unlock(&vclock->lock);
break;
}
rcu_read_unlock();
return ns_to_ktime(vclock_ns);
}
EXPORT_SYMBOL(ptp_convert_timestamp);
#endif
|
linux-master
|
drivers/ptp/ptp_vclock.c
|
// SPDX-License-Identifier: GPL-2.0-only
// Copyright 2017 Broadcom
#include <linux/err.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/ptp_clock_kernel.h>
#include <linux/types.h>
#define DTE_NCO_LOW_TIME_REG 0x00
#define DTE_NCO_TIME_REG 0x04
#define DTE_NCO_OVERFLOW_REG 0x08
#define DTE_NCO_INC_REG 0x0c
#define DTE_NCO_SUM2_MASK 0xffffffff
#define DTE_NCO_SUM2_SHIFT 4ULL
#define DTE_NCO_SUM3_MASK 0xff
#define DTE_NCO_SUM3_SHIFT 36ULL
#define DTE_NCO_SUM3_WR_SHIFT 8
#define DTE_NCO_TS_WRAP_MASK 0xfff
#define DTE_NCO_TS_WRAP_LSHIFT 32
#define DTE_NCO_INC_DEFAULT 0x80000000
#define DTE_NUM_REGS_TO_RESTORE 4
/* Full wrap around is 44bits in ns (~4.887 hrs) */
#define DTE_WRAP_AROUND_NSEC_SHIFT 44
/* 44 bits NCO */
#define DTE_NCO_MAX_NS 0xFFFFFFFFFFFLL
/* 125MHz with 3.29 reg cfg */
#define DTE_PPB_ADJ(ppb) (u32)(div64_u64((((u64)abs(ppb) * BIT(28)) +\
62500000ULL), 125000000ULL))
/* ptp dte priv structure */
struct ptp_dte {
void __iomem *regs;
struct ptp_clock *ptp_clk;
struct ptp_clock_info caps;
struct device *dev;
u32 ts_ovf_last;
u32 ts_wrap_cnt;
spinlock_t lock;
u32 reg_val[DTE_NUM_REGS_TO_RESTORE];
};
static void dte_write_nco(void __iomem *regs, s64 ns)
{
u32 sum2, sum3;
sum2 = (u32)((ns >> DTE_NCO_SUM2_SHIFT) & DTE_NCO_SUM2_MASK);
/* compensate for ignoring sum1 */
if (sum2 != DTE_NCO_SUM2_MASK)
sum2++;
/* to write sum3, bits [15:8] needs to be written */
sum3 = (u32)(((ns >> DTE_NCO_SUM3_SHIFT) & DTE_NCO_SUM3_MASK) <<
DTE_NCO_SUM3_WR_SHIFT);
writel(0, (regs + DTE_NCO_LOW_TIME_REG));
writel(sum2, (regs + DTE_NCO_TIME_REG));
writel(sum3, (regs + DTE_NCO_OVERFLOW_REG));
}
static s64 dte_read_nco(void __iomem *regs)
{
u32 sum2, sum3;
s64 ns;
/*
* ignoring sum1 (4 bits) gives a 16ns resolution, which
* works due to the async register read.
*/
sum3 = readl(regs + DTE_NCO_OVERFLOW_REG) & DTE_NCO_SUM3_MASK;
sum2 = readl(regs + DTE_NCO_TIME_REG);
ns = ((s64)sum3 << DTE_NCO_SUM3_SHIFT) |
((s64)sum2 << DTE_NCO_SUM2_SHIFT);
return ns;
}
static void dte_write_nco_delta(struct ptp_dte *ptp_dte, s64 delta)
{
s64 ns;
ns = dte_read_nco(ptp_dte->regs);
/* handle wraparound conditions */
if ((delta < 0) && (abs(delta) > ns)) {
if (ptp_dte->ts_wrap_cnt) {
ns += DTE_NCO_MAX_NS + delta;
ptp_dte->ts_wrap_cnt--;
} else {
ns = 0;
}
} else {
ns += delta;
if (ns > DTE_NCO_MAX_NS) {
ptp_dte->ts_wrap_cnt++;
ns -= DTE_NCO_MAX_NS;
}
}
dte_write_nco(ptp_dte->regs, ns);
ptp_dte->ts_ovf_last = (ns >> DTE_NCO_TS_WRAP_LSHIFT) &
DTE_NCO_TS_WRAP_MASK;
}
static s64 dte_read_nco_with_ovf(struct ptp_dte *ptp_dte)
{
u32 ts_ovf;
s64 ns = 0;
ns = dte_read_nco(ptp_dte->regs);
/*Timestamp overflow: 8 LSB bits of sum3, 4 MSB bits of sum2 */
ts_ovf = (ns >> DTE_NCO_TS_WRAP_LSHIFT) & DTE_NCO_TS_WRAP_MASK;
/* Check for wrap around */
if (ts_ovf < ptp_dte->ts_ovf_last)
ptp_dte->ts_wrap_cnt++;
ptp_dte->ts_ovf_last = ts_ovf;
/* adjust for wraparounds */
ns += (s64)(BIT_ULL(DTE_WRAP_AROUND_NSEC_SHIFT) * ptp_dte->ts_wrap_cnt);
return ns;
}
static int ptp_dte_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
{
s32 ppb = scaled_ppm_to_ppb(scaled_ppm);
u32 nco_incr;
unsigned long flags;
struct ptp_dte *ptp_dte = container_of(ptp, struct ptp_dte, caps);
if (abs(ppb) > ptp_dte->caps.max_adj) {
dev_err(ptp_dte->dev, "ppb adj too big\n");
return -EINVAL;
}
if (ppb < 0)
nco_incr = DTE_NCO_INC_DEFAULT - DTE_PPB_ADJ(ppb);
else
nco_incr = DTE_NCO_INC_DEFAULT + DTE_PPB_ADJ(ppb);
spin_lock_irqsave(&ptp_dte->lock, flags);
writel(nco_incr, ptp_dte->regs + DTE_NCO_INC_REG);
spin_unlock_irqrestore(&ptp_dte->lock, flags);
return 0;
}
static int ptp_dte_adjtime(struct ptp_clock_info *ptp, s64 delta)
{
unsigned long flags;
struct ptp_dte *ptp_dte = container_of(ptp, struct ptp_dte, caps);
spin_lock_irqsave(&ptp_dte->lock, flags);
dte_write_nco_delta(ptp_dte, delta);
spin_unlock_irqrestore(&ptp_dte->lock, flags);
return 0;
}
static int ptp_dte_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
{
unsigned long flags;
struct ptp_dte *ptp_dte = container_of(ptp, struct ptp_dte, caps);
spin_lock_irqsave(&ptp_dte->lock, flags);
*ts = ns_to_timespec64(dte_read_nco_with_ovf(ptp_dte));
spin_unlock_irqrestore(&ptp_dte->lock, flags);
return 0;
}
static int ptp_dte_settime(struct ptp_clock_info *ptp,
const struct timespec64 *ts)
{
unsigned long flags;
struct ptp_dte *ptp_dte = container_of(ptp, struct ptp_dte, caps);
spin_lock_irqsave(&ptp_dte->lock, flags);
/* Disable nco increment */
writel(0, ptp_dte->regs + DTE_NCO_INC_REG);
dte_write_nco(ptp_dte->regs, timespec64_to_ns(ts));
/* reset overflow and wrap counter */
ptp_dte->ts_ovf_last = 0;
ptp_dte->ts_wrap_cnt = 0;
/* Enable nco increment */
writel(DTE_NCO_INC_DEFAULT, ptp_dte->regs + DTE_NCO_INC_REG);
spin_unlock_irqrestore(&ptp_dte->lock, flags);
return 0;
}
static int ptp_dte_enable(struct ptp_clock_info *ptp,
struct ptp_clock_request *rq, int on)
{
return -EOPNOTSUPP;
}
static const struct ptp_clock_info ptp_dte_caps = {
.owner = THIS_MODULE,
.name = "DTE PTP timer",
.max_adj = 50000000,
.n_ext_ts = 0,
.n_pins = 0,
.pps = 0,
.adjfine = ptp_dte_adjfine,
.adjtime = ptp_dte_adjtime,
.gettime64 = ptp_dte_gettime,
.settime64 = ptp_dte_settime,
.enable = ptp_dte_enable,
};
static int ptp_dte_probe(struct platform_device *pdev)
{
struct ptp_dte *ptp_dte;
struct device *dev = &pdev->dev;
ptp_dte = devm_kzalloc(dev, sizeof(struct ptp_dte), GFP_KERNEL);
if (!ptp_dte)
return -ENOMEM;
ptp_dte->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ptp_dte->regs))
return PTR_ERR(ptp_dte->regs);
spin_lock_init(&ptp_dte->lock);
ptp_dte->dev = dev;
ptp_dte->caps = ptp_dte_caps;
ptp_dte->ptp_clk = ptp_clock_register(&ptp_dte->caps, &pdev->dev);
if (IS_ERR(ptp_dte->ptp_clk)) {
dev_err(dev,
"%s: Failed to register ptp clock\n", __func__);
return PTR_ERR(ptp_dte->ptp_clk);
}
platform_set_drvdata(pdev, ptp_dte);
dev_info(dev, "ptp clk probe done\n");
return 0;
}
static int ptp_dte_remove(struct platform_device *pdev)
{
struct ptp_dte *ptp_dte = platform_get_drvdata(pdev);
u8 i;
ptp_clock_unregister(ptp_dte->ptp_clk);
for (i = 0; i < DTE_NUM_REGS_TO_RESTORE; i++)
writel(0, ptp_dte->regs + (i * sizeof(u32)));
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int ptp_dte_suspend(struct device *dev)
{
struct ptp_dte *ptp_dte = dev_get_drvdata(dev);
u8 i;
for (i = 0; i < DTE_NUM_REGS_TO_RESTORE; i++) {
ptp_dte->reg_val[i] =
readl(ptp_dte->regs + (i * sizeof(u32)));
}
/* disable the nco */
writel(0, ptp_dte->regs + DTE_NCO_INC_REG);
return 0;
}
static int ptp_dte_resume(struct device *dev)
{
struct ptp_dte *ptp_dte = dev_get_drvdata(dev);
u8 i;
for (i = 0; i < DTE_NUM_REGS_TO_RESTORE; i++) {
if ((i * sizeof(u32)) != DTE_NCO_OVERFLOW_REG)
writel(ptp_dte->reg_val[i],
(ptp_dte->regs + (i * sizeof(u32))));
else
writel(((ptp_dte->reg_val[i] &
DTE_NCO_SUM3_MASK) << DTE_NCO_SUM3_WR_SHIFT),
(ptp_dte->regs + (i * sizeof(u32))));
}
return 0;
}
static const struct dev_pm_ops ptp_dte_pm_ops = {
.suspend = ptp_dte_suspend,
.resume = ptp_dte_resume
};
#define PTP_DTE_PM_OPS (&ptp_dte_pm_ops)
#else
#define PTP_DTE_PM_OPS NULL
#endif
static const struct of_device_id ptp_dte_of_match[] = {
{ .compatible = "brcm,ptp-dte", },
{},
};
MODULE_DEVICE_TABLE(of, ptp_dte_of_match);
static struct platform_driver ptp_dte_driver = {
.driver = {
.name = "ptp-dte",
.pm = PTP_DTE_PM_OPS,
.of_match_table = ptp_dte_of_match,
},
.probe = ptp_dte_probe,
.remove = ptp_dte_remove,
};
module_platform_driver(ptp_dte_driver);
MODULE_AUTHOR("Broadcom");
MODULE_DESCRIPTION("Broadcom DTE PTP Clock driver");
MODULE_LICENSE("GPL v2");
|
linux-master
|
drivers/ptp/ptp_dte.c
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* PTP 1588 clock support - character device implementation.
*
* Copyright (C) 2010 OMICRON electronics GmbH
*/
#include <linux/module.h>
#include <linux/posix-clock.h>
#include <linux/poll.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/timekeeping.h>
#include <linux/nospec.h>
#include "ptp_private.h"
static int ptp_disable_pinfunc(struct ptp_clock_info *ops,
enum ptp_pin_function func, unsigned int chan)
{
struct ptp_clock_request rq;
int err = 0;
memset(&rq, 0, sizeof(rq));
switch (func) {
case PTP_PF_NONE:
break;
case PTP_PF_EXTTS:
rq.type = PTP_CLK_REQ_EXTTS;
rq.extts.index = chan;
err = ops->enable(ops, &rq, 0);
break;
case PTP_PF_PEROUT:
rq.type = PTP_CLK_REQ_PEROUT;
rq.perout.index = chan;
err = ops->enable(ops, &rq, 0);
break;
case PTP_PF_PHYSYNC:
break;
default:
return -EINVAL;
}
return err;
}
int ptp_set_pinfunc(struct ptp_clock *ptp, unsigned int pin,
enum ptp_pin_function func, unsigned int chan)
{
struct ptp_clock_info *info = ptp->info;
struct ptp_pin_desc *pin1 = NULL, *pin2 = &info->pin_config[pin];
unsigned int i;
/* Check to see if any other pin previously had this function. */
for (i = 0; i < info->n_pins; i++) {
if (info->pin_config[i].func == func &&
info->pin_config[i].chan == chan) {
pin1 = &info->pin_config[i];
break;
}
}
if (pin1 && i == pin)
return 0;
/* Check the desired function and channel. */
switch (func) {
case PTP_PF_NONE:
break;
case PTP_PF_EXTTS:
if (chan >= info->n_ext_ts)
return -EINVAL;
break;
case PTP_PF_PEROUT:
if (chan >= info->n_per_out)
return -EINVAL;
break;
case PTP_PF_PHYSYNC:
if (chan != 0)
return -EINVAL;
break;
default:
return -EINVAL;
}
if (info->verify(info, pin, func, chan)) {
pr_err("driver cannot use function %u on pin %u\n", func, chan);
return -EOPNOTSUPP;
}
/* Disable whatever function was previously assigned. */
if (pin1) {
ptp_disable_pinfunc(info, func, chan);
pin1->func = PTP_PF_NONE;
pin1->chan = 0;
}
ptp_disable_pinfunc(info, pin2->func, pin2->chan);
pin2->func = func;
pin2->chan = chan;
return 0;
}
int ptp_open(struct posix_clock *pc, fmode_t fmode)
{
return 0;
}
long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
{
struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
struct ptp_sys_offset_extended *extoff = NULL;
struct ptp_sys_offset_precise precise_offset;
struct system_device_crosststamp xtstamp;
struct ptp_clock_info *ops = ptp->info;
struct ptp_sys_offset *sysoff = NULL;
struct ptp_system_timestamp sts;
struct ptp_clock_request req;
struct ptp_clock_caps caps;
struct ptp_clock_time *pct;
unsigned int i, pin_index;
struct ptp_pin_desc pd;
struct timespec64 ts;
int enable, err = 0;
switch (cmd) {
case PTP_CLOCK_GETCAPS:
case PTP_CLOCK_GETCAPS2:
memset(&caps, 0, sizeof(caps));
caps.max_adj = ptp->info->max_adj;
caps.n_alarm = ptp->info->n_alarm;
caps.n_ext_ts = ptp->info->n_ext_ts;
caps.n_per_out = ptp->info->n_per_out;
caps.pps = ptp->info->pps;
caps.n_pins = ptp->info->n_pins;
caps.cross_timestamping = ptp->info->getcrosststamp != NULL;
caps.adjust_phase = ptp->info->adjphase != NULL &&
ptp->info->getmaxphase != NULL;
if (caps.adjust_phase)
caps.max_phase_adj = ptp->info->getmaxphase(ptp->info);
if (copy_to_user((void __user *)arg, &caps, sizeof(caps)))
err = -EFAULT;
break;
case PTP_EXTTS_REQUEST:
case PTP_EXTTS_REQUEST2:
memset(&req, 0, sizeof(req));
if (copy_from_user(&req.extts, (void __user *)arg,
sizeof(req.extts))) {
err = -EFAULT;
break;
}
if (cmd == PTP_EXTTS_REQUEST2) {
/* Tell the drivers to check the flags carefully. */
req.extts.flags |= PTP_STRICT_FLAGS;
/* Make sure no reserved bit is set. */
if ((req.extts.flags & ~PTP_EXTTS_VALID_FLAGS) ||
req.extts.rsv[0] || req.extts.rsv[1]) {
err = -EINVAL;
break;
}
/* Ensure one of the rising/falling edge bits is set. */
if ((req.extts.flags & PTP_ENABLE_FEATURE) &&
(req.extts.flags & PTP_EXTTS_EDGES) == 0) {
err = -EINVAL;
break;
}
} else if (cmd == PTP_EXTTS_REQUEST) {
req.extts.flags &= PTP_EXTTS_V1_VALID_FLAGS;
req.extts.rsv[0] = 0;
req.extts.rsv[1] = 0;
}
if (req.extts.index >= ops->n_ext_ts) {
err = -EINVAL;
break;
}
req.type = PTP_CLK_REQ_EXTTS;
enable = req.extts.flags & PTP_ENABLE_FEATURE ? 1 : 0;
if (mutex_lock_interruptible(&ptp->pincfg_mux))
return -ERESTARTSYS;
err = ops->enable(ops, &req, enable);
mutex_unlock(&ptp->pincfg_mux);
break;
case PTP_PEROUT_REQUEST:
case PTP_PEROUT_REQUEST2:
memset(&req, 0, sizeof(req));
if (copy_from_user(&req.perout, (void __user *)arg,
sizeof(req.perout))) {
err = -EFAULT;
break;
}
if (cmd == PTP_PEROUT_REQUEST2) {
struct ptp_perout_request *perout = &req.perout;
if (perout->flags & ~PTP_PEROUT_VALID_FLAGS) {
err = -EINVAL;
break;
}
/*
* The "on" field has undefined meaning if
* PTP_PEROUT_DUTY_CYCLE isn't set, we must still treat
* it as reserved, which must be set to zero.
*/
if (!(perout->flags & PTP_PEROUT_DUTY_CYCLE) &&
(perout->rsv[0] || perout->rsv[1] ||
perout->rsv[2] || perout->rsv[3])) {
err = -EINVAL;
break;
}
if (perout->flags & PTP_PEROUT_DUTY_CYCLE) {
/* The duty cycle must be subunitary. */
if (perout->on.sec > perout->period.sec ||
(perout->on.sec == perout->period.sec &&
perout->on.nsec > perout->period.nsec)) {
err = -ERANGE;
break;
}
}
if (perout->flags & PTP_PEROUT_PHASE) {
/*
* The phase should be specified modulo the
* period, therefore anything equal or larger
* than 1 period is invalid.
*/
if (perout->phase.sec > perout->period.sec ||
(perout->phase.sec == perout->period.sec &&
perout->phase.nsec >= perout->period.nsec)) {
err = -ERANGE;
break;
}
}
} else if (cmd == PTP_PEROUT_REQUEST) {
req.perout.flags &= PTP_PEROUT_V1_VALID_FLAGS;
req.perout.rsv[0] = 0;
req.perout.rsv[1] = 0;
req.perout.rsv[2] = 0;
req.perout.rsv[3] = 0;
}
if (req.perout.index >= ops->n_per_out) {
err = -EINVAL;
break;
}
req.type = PTP_CLK_REQ_PEROUT;
enable = req.perout.period.sec || req.perout.period.nsec;
if (mutex_lock_interruptible(&ptp->pincfg_mux))
return -ERESTARTSYS;
err = ops->enable(ops, &req, enable);
mutex_unlock(&ptp->pincfg_mux);
break;
case PTP_ENABLE_PPS:
case PTP_ENABLE_PPS2:
memset(&req, 0, sizeof(req));
if (!capable(CAP_SYS_TIME))
return -EPERM;
req.type = PTP_CLK_REQ_PPS;
enable = arg ? 1 : 0;
if (mutex_lock_interruptible(&ptp->pincfg_mux))
return -ERESTARTSYS;
err = ops->enable(ops, &req, enable);
mutex_unlock(&ptp->pincfg_mux);
break;
case PTP_SYS_OFFSET_PRECISE:
case PTP_SYS_OFFSET_PRECISE2:
if (!ptp->info->getcrosststamp) {
err = -EOPNOTSUPP;
break;
}
err = ptp->info->getcrosststamp(ptp->info, &xtstamp);
if (err)
break;
memset(&precise_offset, 0, sizeof(precise_offset));
ts = ktime_to_timespec64(xtstamp.device);
precise_offset.device.sec = ts.tv_sec;
precise_offset.device.nsec = ts.tv_nsec;
ts = ktime_to_timespec64(xtstamp.sys_realtime);
precise_offset.sys_realtime.sec = ts.tv_sec;
precise_offset.sys_realtime.nsec = ts.tv_nsec;
ts = ktime_to_timespec64(xtstamp.sys_monoraw);
precise_offset.sys_monoraw.sec = ts.tv_sec;
precise_offset.sys_monoraw.nsec = ts.tv_nsec;
if (copy_to_user((void __user *)arg, &precise_offset,
sizeof(precise_offset)))
err = -EFAULT;
break;
case PTP_SYS_OFFSET_EXTENDED:
case PTP_SYS_OFFSET_EXTENDED2:
if (!ptp->info->gettimex64) {
err = -EOPNOTSUPP;
break;
}
extoff = memdup_user((void __user *)arg, sizeof(*extoff));
if (IS_ERR(extoff)) {
err = PTR_ERR(extoff);
extoff = NULL;
break;
}
if (extoff->n_samples > PTP_MAX_SAMPLES
|| extoff->rsv[0] || extoff->rsv[1] || extoff->rsv[2]) {
err = -EINVAL;
break;
}
for (i = 0; i < extoff->n_samples; i++) {
err = ptp->info->gettimex64(ptp->info, &ts, &sts);
if (err)
goto out;
extoff->ts[i][0].sec = sts.pre_ts.tv_sec;
extoff->ts[i][0].nsec = sts.pre_ts.tv_nsec;
extoff->ts[i][1].sec = ts.tv_sec;
extoff->ts[i][1].nsec = ts.tv_nsec;
extoff->ts[i][2].sec = sts.post_ts.tv_sec;
extoff->ts[i][2].nsec = sts.post_ts.tv_nsec;
}
if (copy_to_user((void __user *)arg, extoff, sizeof(*extoff)))
err = -EFAULT;
break;
case PTP_SYS_OFFSET:
case PTP_SYS_OFFSET2:
sysoff = memdup_user((void __user *)arg, sizeof(*sysoff));
if (IS_ERR(sysoff)) {
err = PTR_ERR(sysoff);
sysoff = NULL;
break;
}
if (sysoff->n_samples > PTP_MAX_SAMPLES) {
err = -EINVAL;
break;
}
pct = &sysoff->ts[0];
for (i = 0; i < sysoff->n_samples; i++) {
ktime_get_real_ts64(&ts);
pct->sec = ts.tv_sec;
pct->nsec = ts.tv_nsec;
pct++;
if (ops->gettimex64)
err = ops->gettimex64(ops, &ts, NULL);
else
err = ops->gettime64(ops, &ts);
if (err)
goto out;
pct->sec = ts.tv_sec;
pct->nsec = ts.tv_nsec;
pct++;
}
ktime_get_real_ts64(&ts);
pct->sec = ts.tv_sec;
pct->nsec = ts.tv_nsec;
if (copy_to_user((void __user *)arg, sysoff, sizeof(*sysoff)))
err = -EFAULT;
break;
case PTP_PIN_GETFUNC:
case PTP_PIN_GETFUNC2:
if (copy_from_user(&pd, (void __user *)arg, sizeof(pd))) {
err = -EFAULT;
break;
}
if ((pd.rsv[0] || pd.rsv[1] || pd.rsv[2]
|| pd.rsv[3] || pd.rsv[4])
&& cmd == PTP_PIN_GETFUNC2) {
err = -EINVAL;
break;
} else if (cmd == PTP_PIN_GETFUNC) {
pd.rsv[0] = 0;
pd.rsv[1] = 0;
pd.rsv[2] = 0;
pd.rsv[3] = 0;
pd.rsv[4] = 0;
}
pin_index = pd.index;
if (pin_index >= ops->n_pins) {
err = -EINVAL;
break;
}
pin_index = array_index_nospec(pin_index, ops->n_pins);
if (mutex_lock_interruptible(&ptp->pincfg_mux))
return -ERESTARTSYS;
pd = ops->pin_config[pin_index];
mutex_unlock(&ptp->pincfg_mux);
if (!err && copy_to_user((void __user *)arg, &pd, sizeof(pd)))
err = -EFAULT;
break;
case PTP_PIN_SETFUNC:
case PTP_PIN_SETFUNC2:
if (copy_from_user(&pd, (void __user *)arg, sizeof(pd))) {
err = -EFAULT;
break;
}
if ((pd.rsv[0] || pd.rsv[1] || pd.rsv[2]
|| pd.rsv[3] || pd.rsv[4])
&& cmd == PTP_PIN_SETFUNC2) {
err = -EINVAL;
break;
} else if (cmd == PTP_PIN_SETFUNC) {
pd.rsv[0] = 0;
pd.rsv[1] = 0;
pd.rsv[2] = 0;
pd.rsv[3] = 0;
pd.rsv[4] = 0;
}
pin_index = pd.index;
if (pin_index >= ops->n_pins) {
err = -EINVAL;
break;
}
pin_index = array_index_nospec(pin_index, ops->n_pins);
if (mutex_lock_interruptible(&ptp->pincfg_mux))
return -ERESTARTSYS;
err = ptp_set_pinfunc(ptp, pin_index, pd.func, pd.chan);
mutex_unlock(&ptp->pincfg_mux);
break;
default:
err = -ENOTTY;
break;
}
out:
kfree(extoff);
kfree(sysoff);
return err;
}
__poll_t ptp_poll(struct posix_clock *pc, struct file *fp, poll_table *wait)
{
struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
poll_wait(fp, &ptp->tsev_wq, wait);
return queue_cnt(&ptp->tsevq) ? EPOLLIN : 0;
}
#define EXTTS_BUFSIZE (PTP_BUF_TIMESTAMPS * sizeof(struct ptp_extts_event))
ssize_t ptp_read(struct posix_clock *pc,
uint rdflags, char __user *buf, size_t cnt)
{
struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
struct timestamp_event_queue *queue = &ptp->tsevq;
struct ptp_extts_event *event;
unsigned long flags;
size_t qcnt, i;
int result;
if (cnt % sizeof(struct ptp_extts_event) != 0)
return -EINVAL;
if (cnt > EXTTS_BUFSIZE)
cnt = EXTTS_BUFSIZE;
cnt = cnt / sizeof(struct ptp_extts_event);
if (mutex_lock_interruptible(&ptp->tsevq_mux))
return -ERESTARTSYS;
if (wait_event_interruptible(ptp->tsev_wq,
ptp->defunct || queue_cnt(queue))) {
mutex_unlock(&ptp->tsevq_mux);
return -ERESTARTSYS;
}
if (ptp->defunct) {
mutex_unlock(&ptp->tsevq_mux);
return -ENODEV;
}
event = kmalloc(EXTTS_BUFSIZE, GFP_KERNEL);
if (!event) {
mutex_unlock(&ptp->tsevq_mux);
return -ENOMEM;
}
spin_lock_irqsave(&queue->lock, flags);
qcnt = queue_cnt(queue);
if (cnt > qcnt)
cnt = qcnt;
for (i = 0; i < cnt; i++) {
event[i] = queue->buf[queue->head];
queue->head = (queue->head + 1) % PTP_MAX_TIMESTAMPS;
}
spin_unlock_irqrestore(&queue->lock, flags);
cnt = cnt * sizeof(struct ptp_extts_event);
mutex_unlock(&ptp->tsevq_mux);
result = cnt;
if (copy_to_user(buf, event, cnt))
result = -EFAULT;
kfree(event);
return result;
}
|
linux-master
|
drivers/ptp/ptp_chardev.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* PTP 1588 clock using the EG20T PCH
*
* Copyright (C) 2010 OMICRON electronics GmbH
* Copyright (C) 2011-2012 LAPIS SEMICONDUCTOR Co., LTD.
*
* This code was derived from the IXP46X driver.
*/
#include <linux/device.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/io-64-nonatomic-hi-lo.h>
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/ptp_clock_kernel.h>
#include <linux/ptp_pch.h>
#include <linux/slab.h>
#define STATION_ADDR_LEN 20
#define PCI_DEVICE_ID_PCH_1588 0x8819
#define IO_MEM_BAR 1
#define DEFAULT_ADDEND 0xA0000000
#define TICKS_NS_SHIFT 5
#define N_EXT_TS 2
enum pch_status {
PCH_SUCCESS,
PCH_INVALIDPARAM,
PCH_NOTIMESTAMP,
PCH_INTERRUPTMODEINUSE,
PCH_FAILED,
PCH_UNSUPPORTED,
};
/*
* struct pch_ts_regs - IEEE 1588 registers
*/
struct pch_ts_regs {
u32 control;
u32 event;
u32 addend;
u32 accum;
u32 test;
u32 ts_compare;
u32 rsystime_lo;
u32 rsystime_hi;
u32 systime_lo;
u32 systime_hi;
u32 trgt_lo;
u32 trgt_hi;
u32 asms_lo;
u32 asms_hi;
u32 amms_lo;
u32 amms_hi;
u32 ch_control;
u32 ch_event;
u32 tx_snap_lo;
u32 tx_snap_hi;
u32 rx_snap_lo;
u32 rx_snap_hi;
u32 src_uuid_lo;
u32 src_uuid_hi;
u32 can_status;
u32 can_snap_lo;
u32 can_snap_hi;
u32 ts_sel;
u32 ts_st[6];
u32 reserve1[14];
u32 stl_max_set_en;
u32 stl_max_set;
u32 reserve2[13];
u32 srst;
};
#define PCH_TSC_RESET (1 << 0)
#define PCH_TSC_TTM_MASK (1 << 1)
#define PCH_TSC_ASMS_MASK (1 << 2)
#define PCH_TSC_AMMS_MASK (1 << 3)
#define PCH_TSC_PPSM_MASK (1 << 4)
#define PCH_TSE_TTIPEND (1 << 1)
#define PCH_TSE_SNS (1 << 2)
#define PCH_TSE_SNM (1 << 3)
#define PCH_TSE_PPS (1 << 4)
#define PCH_CC_MM (1 << 0)
#define PCH_CC_TA (1 << 1)
#define PCH_CC_MODE_SHIFT 16
#define PCH_CC_MODE_MASK 0x001F0000
#define PCH_CC_VERSION (1 << 31)
#define PCH_CE_TXS (1 << 0)
#define PCH_CE_RXS (1 << 1)
#define PCH_CE_OVR (1 << 0)
#define PCH_CE_VAL (1 << 1)
#define PCH_ECS_ETH (1 << 0)
#define PCH_ECS_CAN (1 << 1)
#define PCH_IEEE1588_ETH (1 << 0)
#define PCH_IEEE1588_CAN (1 << 1)
/*
* struct pch_dev - Driver private data
*/
struct pch_dev {
struct pch_ts_regs __iomem *regs;
struct ptp_clock *ptp_clock;
struct ptp_clock_info caps;
int exts0_enabled;
int exts1_enabled;
u32 irq;
struct pci_dev *pdev;
spinlock_t register_lock;
};
/*
* struct pch_params - 1588 module parameter
*/
struct pch_params {
u8 station[STATION_ADDR_LEN];
};
/* structure to hold the module parameters */
static struct pch_params pch_param = {
"00:00:00:00:00:00"
};
/*
* Register access functions
*/
static inline void pch_eth_enable_set(struct pch_dev *chip)
{
u32 val;
/* SET the eth_enable bit */
val = ioread32(&chip->regs->ts_sel) | (PCH_ECS_ETH);
iowrite32(val, (&chip->regs->ts_sel));
}
static u64 pch_systime_read(struct pch_ts_regs __iomem *regs)
{
u64 ns;
ns = ioread64_lo_hi(®s->systime_lo);
return ns << TICKS_NS_SHIFT;
}
static void pch_systime_write(struct pch_ts_regs __iomem *regs, u64 ns)
{
iowrite64_lo_hi(ns >> TICKS_NS_SHIFT, ®s->systime_lo);
}
static inline void pch_block_reset(struct pch_dev *chip)
{
u32 val;
/* Reset Hardware Assist block */
val = ioread32(&chip->regs->control) | PCH_TSC_RESET;
iowrite32(val, (&chip->regs->control));
val = val & ~PCH_TSC_RESET;
iowrite32(val, (&chip->regs->control));
}
void pch_ch_control_write(struct pci_dev *pdev, u32 val)
{
struct pch_dev *chip = pci_get_drvdata(pdev);
iowrite32(val, (&chip->regs->ch_control));
}
EXPORT_SYMBOL(pch_ch_control_write);
u32 pch_ch_event_read(struct pci_dev *pdev)
{
struct pch_dev *chip = pci_get_drvdata(pdev);
u32 val;
val = ioread32(&chip->regs->ch_event);
return val;
}
EXPORT_SYMBOL(pch_ch_event_read);
void pch_ch_event_write(struct pci_dev *pdev, u32 val)
{
struct pch_dev *chip = pci_get_drvdata(pdev);
iowrite32(val, (&chip->regs->ch_event));
}
EXPORT_SYMBOL(pch_ch_event_write);
u32 pch_src_uuid_lo_read(struct pci_dev *pdev)
{
struct pch_dev *chip = pci_get_drvdata(pdev);
u32 val;
val = ioread32(&chip->regs->src_uuid_lo);
return val;
}
EXPORT_SYMBOL(pch_src_uuid_lo_read);
u32 pch_src_uuid_hi_read(struct pci_dev *pdev)
{
struct pch_dev *chip = pci_get_drvdata(pdev);
u32 val;
val = ioread32(&chip->regs->src_uuid_hi);
return val;
}
EXPORT_SYMBOL(pch_src_uuid_hi_read);
u64 pch_rx_snap_read(struct pci_dev *pdev)
{
struct pch_dev *chip = pci_get_drvdata(pdev);
u64 ns;
ns = ioread64_lo_hi(&chip->regs->rx_snap_lo);
return ns << TICKS_NS_SHIFT;
}
EXPORT_SYMBOL(pch_rx_snap_read);
u64 pch_tx_snap_read(struct pci_dev *pdev)
{
struct pch_dev *chip = pci_get_drvdata(pdev);
u64 ns;
ns = ioread64_lo_hi(&chip->regs->tx_snap_lo);
return ns << TICKS_NS_SHIFT;
}
EXPORT_SYMBOL(pch_tx_snap_read);
/* This function enables all 64 bits in system time registers [high & low].
This is a work-around for non continuous value in the SystemTime Register*/
static void pch_set_system_time_count(struct pch_dev *chip)
{
iowrite32(0x01, &chip->regs->stl_max_set_en);
iowrite32(0xFFFFFFFF, &chip->regs->stl_max_set);
iowrite32(0x00, &chip->regs->stl_max_set_en);
}
static void pch_reset(struct pch_dev *chip)
{
/* Reset Hardware Assist */
pch_block_reset(chip);
/* enable all 32 bits in system time registers */
pch_set_system_time_count(chip);
}
/**
* pch_set_station_address() - This API sets the station address used by
* IEEE 1588 hardware when looking at PTP
* traffic on the ethernet interface
* @addr: dress which contain the column separated address to be used.
* @pdev: PCI device.
*/
int pch_set_station_address(u8 *addr, struct pci_dev *pdev)
{
struct pch_dev *chip = pci_get_drvdata(pdev);
bool valid;
u64 mac;
/* Verify the parameter */
if ((chip->regs == NULL) || addr == (u8 *)NULL) {
dev_err(&pdev->dev,
"invalid params returning PCH_INVALIDPARAM\n");
return PCH_INVALIDPARAM;
}
valid = mac_pton(addr, (u8 *)&mac);
if (!valid) {
dev_err(&pdev->dev, "invalid params returning PCH_INVALIDPARAM\n");
return PCH_INVALIDPARAM;
}
dev_dbg(&pdev->dev, "invoking pch_station_set\n");
iowrite64_lo_hi(mac, &chip->regs->ts_st);
return 0;
}
EXPORT_SYMBOL(pch_set_station_address);
/*
* Interrupt service routine
*/
static irqreturn_t isr(int irq, void *priv)
{
struct pch_dev *pch_dev = priv;
struct pch_ts_regs __iomem *regs = pch_dev->regs;
struct ptp_clock_event event;
u32 ack = 0, val;
val = ioread32(®s->event);
if (val & PCH_TSE_SNS) {
ack |= PCH_TSE_SNS;
if (pch_dev->exts0_enabled) {
event.type = PTP_CLOCK_EXTTS;
event.index = 0;
event.timestamp = ioread64_hi_lo(®s->asms_hi);
event.timestamp <<= TICKS_NS_SHIFT;
ptp_clock_event(pch_dev->ptp_clock, &event);
}
}
if (val & PCH_TSE_SNM) {
ack |= PCH_TSE_SNM;
if (pch_dev->exts1_enabled) {
event.type = PTP_CLOCK_EXTTS;
event.index = 1;
event.timestamp = ioread64_hi_lo(®s->asms_hi);
event.timestamp <<= TICKS_NS_SHIFT;
ptp_clock_event(pch_dev->ptp_clock, &event);
}
}
if (val & PCH_TSE_TTIPEND)
ack |= PCH_TSE_TTIPEND; /* this bit seems to be always set */
if (ack) {
iowrite32(ack, ®s->event);
return IRQ_HANDLED;
} else
return IRQ_NONE;
}
/*
* PTP clock operations
*/
static int ptp_pch_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
{
u32 addend;
struct pch_dev *pch_dev = container_of(ptp, struct pch_dev, caps);
struct pch_ts_regs __iomem *regs = pch_dev->regs;
addend = adjust_by_scaled_ppm(DEFAULT_ADDEND, scaled_ppm);
iowrite32(addend, ®s->addend);
return 0;
}
static int ptp_pch_adjtime(struct ptp_clock_info *ptp, s64 delta)
{
s64 now;
unsigned long flags;
struct pch_dev *pch_dev = container_of(ptp, struct pch_dev, caps);
struct pch_ts_regs __iomem *regs = pch_dev->regs;
spin_lock_irqsave(&pch_dev->register_lock, flags);
now = pch_systime_read(regs);
now += delta;
pch_systime_write(regs, now);
spin_unlock_irqrestore(&pch_dev->register_lock, flags);
return 0;
}
static int ptp_pch_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
{
u64 ns;
unsigned long flags;
struct pch_dev *pch_dev = container_of(ptp, struct pch_dev, caps);
struct pch_ts_regs __iomem *regs = pch_dev->regs;
spin_lock_irqsave(&pch_dev->register_lock, flags);
ns = pch_systime_read(regs);
spin_unlock_irqrestore(&pch_dev->register_lock, flags);
*ts = ns_to_timespec64(ns);
return 0;
}
static int ptp_pch_settime(struct ptp_clock_info *ptp,
const struct timespec64 *ts)
{
u64 ns;
unsigned long flags;
struct pch_dev *pch_dev = container_of(ptp, struct pch_dev, caps);
struct pch_ts_regs __iomem *regs = pch_dev->regs;
ns = timespec64_to_ns(ts);
spin_lock_irqsave(&pch_dev->register_lock, flags);
pch_systime_write(regs, ns);
spin_unlock_irqrestore(&pch_dev->register_lock, flags);
return 0;
}
static int ptp_pch_enable(struct ptp_clock_info *ptp,
struct ptp_clock_request *rq, int on)
{
struct pch_dev *pch_dev = container_of(ptp, struct pch_dev, caps);
switch (rq->type) {
case PTP_CLK_REQ_EXTTS:
switch (rq->extts.index) {
case 0:
pch_dev->exts0_enabled = on ? 1 : 0;
break;
case 1:
pch_dev->exts1_enabled = on ? 1 : 0;
break;
default:
return -EINVAL;
}
return 0;
default:
break;
}
return -EOPNOTSUPP;
}
static const struct ptp_clock_info ptp_pch_caps = {
.owner = THIS_MODULE,
.name = "PCH timer",
.max_adj = 50000000,
.n_ext_ts = N_EXT_TS,
.n_pins = 0,
.pps = 0,
.adjfine = ptp_pch_adjfine,
.adjtime = ptp_pch_adjtime,
.gettime64 = ptp_pch_gettime,
.settime64 = ptp_pch_settime,
.enable = ptp_pch_enable,
};
static void pch_remove(struct pci_dev *pdev)
{
struct pch_dev *chip = pci_get_drvdata(pdev);
free_irq(pdev->irq, chip);
ptp_clock_unregister(chip->ptp_clock);
}
static s32
pch_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
s32 ret;
unsigned long flags;
struct pch_dev *chip;
chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
if (chip == NULL)
return -ENOMEM;
/* enable the 1588 pci device */
ret = pcim_enable_device(pdev);
if (ret != 0) {
dev_err(&pdev->dev, "could not enable the pci device\n");
return ret;
}
ret = pcim_iomap_regions(pdev, BIT(IO_MEM_BAR), "1588_regs");
if (ret) {
dev_err(&pdev->dev, "could not locate IO memory address\n");
return ret;
}
/* get the virtual address to the 1588 registers */
chip->regs = pcim_iomap_table(pdev)[IO_MEM_BAR];
chip->caps = ptp_pch_caps;
chip->ptp_clock = ptp_clock_register(&chip->caps, &pdev->dev);
if (IS_ERR(chip->ptp_clock))
return PTR_ERR(chip->ptp_clock);
spin_lock_init(&chip->register_lock);
ret = request_irq(pdev->irq, &isr, IRQF_SHARED, KBUILD_MODNAME, chip);
if (ret != 0) {
dev_err(&pdev->dev, "failed to get irq %d\n", pdev->irq);
goto err_req_irq;
}
/* indicate success */
chip->irq = pdev->irq;
chip->pdev = pdev;
pci_set_drvdata(pdev, chip);
spin_lock_irqsave(&chip->register_lock, flags);
/* reset the ieee1588 h/w */
pch_reset(chip);
iowrite32(DEFAULT_ADDEND, &chip->regs->addend);
iowrite64_lo_hi(1, &chip->regs->trgt_lo);
iowrite32(PCH_TSE_TTIPEND, &chip->regs->event);
pch_eth_enable_set(chip);
if (strcmp(pch_param.station, "00:00:00:00:00:00") != 0) {
if (pch_set_station_address(pch_param.station, pdev) != 0) {
dev_err(&pdev->dev,
"Invalid station address parameter\n"
"Module loaded but station address not set correctly\n"
);
}
}
spin_unlock_irqrestore(&chip->register_lock, flags);
return 0;
err_req_irq:
ptp_clock_unregister(chip->ptp_clock);
dev_err(&pdev->dev, "probe failed(ret=0x%x)\n", ret);
return ret;
}
static const struct pci_device_id pch_ieee1588_pcidev_id[] = {
{
.vendor = PCI_VENDOR_ID_INTEL,
.device = PCI_DEVICE_ID_PCH_1588
},
{0}
};
MODULE_DEVICE_TABLE(pci, pch_ieee1588_pcidev_id);
static struct pci_driver pch_driver = {
.name = KBUILD_MODNAME,
.id_table = pch_ieee1588_pcidev_id,
.probe = pch_probe,
.remove = pch_remove,
};
module_pci_driver(pch_driver);
module_param_string(station,
pch_param.station, sizeof(pch_param.station), 0444);
MODULE_PARM_DESC(station,
"IEEE 1588 station address to use - colon separated hex values");
MODULE_AUTHOR("LAPIS SEMICONDUCTOR, <[email protected]>");
MODULE_DESCRIPTION("PTP clock using the EG20T timer");
MODULE_LICENSE("GPL");
|
linux-master
|
drivers/ptp/ptp_pch.c
|
// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
/*
* Copyright (C) 2020 VMware, Inc., Palo Alto, CA., USA
*
* PTP clock driver for VMware precision clock virtual device.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/acpi.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/ptp_clock_kernel.h>
#include <asm/hypervisor.h>
#include <asm/vmware.h>
#define VMWARE_MAGIC 0x564D5868
#define VMWARE_CMD_PCLK(nr) ((nr << 16) | 97)
#define VMWARE_CMD_PCLK_GETTIME VMWARE_CMD_PCLK(0)
static struct acpi_device *ptp_vmw_acpi_device;
static struct ptp_clock *ptp_vmw_clock;
static int ptp_vmw_pclk_read(u64 *ns)
{
u32 ret, nsec_hi, nsec_lo, unused1, unused2, unused3;
asm volatile (VMWARE_HYPERCALL :
"=a"(ret), "=b"(nsec_hi), "=c"(nsec_lo), "=d"(unused1),
"=S"(unused2), "=D"(unused3) :
"a"(VMWARE_MAGIC), "b"(0),
"c"(VMWARE_CMD_PCLK_GETTIME), "d"(0) :
"memory");
if (ret == 0)
*ns = ((u64)nsec_hi << 32) | nsec_lo;
return ret;
}
/*
* PTP clock ops.
*/
static int ptp_vmw_adjtime(struct ptp_clock_info *info, s64 delta)
{
return -EOPNOTSUPP;
}
static int ptp_vmw_adjfine(struct ptp_clock_info *info, long delta)
{
return -EOPNOTSUPP;
}
static int ptp_vmw_gettime(struct ptp_clock_info *info, struct timespec64 *ts)
{
u64 ns;
if (ptp_vmw_pclk_read(&ns) != 0)
return -EIO;
*ts = ns_to_timespec64(ns);
return 0;
}
static int ptp_vmw_settime(struct ptp_clock_info *info,
const struct timespec64 *ts)
{
return -EOPNOTSUPP;
}
static int ptp_vmw_enable(struct ptp_clock_info *info,
struct ptp_clock_request *request, int on)
{
return -EOPNOTSUPP;
}
static struct ptp_clock_info ptp_vmw_clock_info = {
.owner = THIS_MODULE,
.name = "ptp_vmw",
.max_adj = 0,
.adjtime = ptp_vmw_adjtime,
.adjfine = ptp_vmw_adjfine,
.gettime64 = ptp_vmw_gettime,
.settime64 = ptp_vmw_settime,
.enable = ptp_vmw_enable,
};
/*
* ACPI driver ops for VMware "precision clock" virtual device.
*/
static int ptp_vmw_acpi_add(struct acpi_device *device)
{
ptp_vmw_clock = ptp_clock_register(&ptp_vmw_clock_info, NULL);
if (IS_ERR(ptp_vmw_clock)) {
pr_err("failed to register ptp clock\n");
return PTR_ERR(ptp_vmw_clock);
}
ptp_vmw_acpi_device = device;
return 0;
}
static void ptp_vmw_acpi_remove(struct acpi_device *device)
{
ptp_clock_unregister(ptp_vmw_clock);
}
static const struct acpi_device_id ptp_vmw_acpi_device_ids[] = {
{ "VMW0005", 0 },
{ "", 0 },
};
MODULE_DEVICE_TABLE(acpi, ptp_vmw_acpi_device_ids);
static struct acpi_driver ptp_vmw_acpi_driver = {
.name = "ptp_vmw",
.ids = ptp_vmw_acpi_device_ids,
.ops = {
.add = ptp_vmw_acpi_add,
.remove = ptp_vmw_acpi_remove
},
.owner = THIS_MODULE
};
static int __init ptp_vmw_init(void)
{
if (x86_hyper_type != X86_HYPER_VMWARE)
return -1;
return acpi_bus_register_driver(&ptp_vmw_acpi_driver);
}
static void __exit ptp_vmw_exit(void)
{
acpi_bus_unregister_driver(&ptp_vmw_acpi_driver);
}
module_init(ptp_vmw_init);
module_exit(ptp_vmw_exit);
MODULE_DESCRIPTION("VMware virtual PTP clock driver");
MODULE_AUTHOR("VMware, Inc.");
MODULE_LICENSE("Dual BSD/GPL");
|
linux-master
|
drivers/ptp/ptp_vmw.c
|
// SPDX-License-Identifier: GPL-2.0+
/* Copyright 2019 NXP
*/
#include <linux/device.h>
#include <linux/debugfs.h>
#include <linux/fsl/ptp_qoriq.h>
static int ptp_qoriq_fiper1_lpbk_get(void *data, u64 *val)
{
struct ptp_qoriq *ptp_qoriq = data;
struct ptp_qoriq_registers *regs = &ptp_qoriq->regs;
u32 ctrl;
ctrl = ptp_qoriq->read(®s->ctrl_regs->tmr_ctrl);
*val = ctrl & PP1L ? 1 : 0;
return 0;
}
static int ptp_qoriq_fiper1_lpbk_set(void *data, u64 val)
{
struct ptp_qoriq *ptp_qoriq = data;
struct ptp_qoriq_registers *regs = &ptp_qoriq->regs;
u32 ctrl;
ctrl = ptp_qoriq->read(®s->ctrl_regs->tmr_ctrl);
if (val == 0)
ctrl &= ~PP1L;
else
ctrl |= PP1L;
ptp_qoriq->write(®s->ctrl_regs->tmr_ctrl, ctrl);
return 0;
}
DEFINE_DEBUGFS_ATTRIBUTE(ptp_qoriq_fiper1_fops, ptp_qoriq_fiper1_lpbk_get,
ptp_qoriq_fiper1_lpbk_set, "%llu\n");
static int ptp_qoriq_fiper2_lpbk_get(void *data, u64 *val)
{
struct ptp_qoriq *ptp_qoriq = data;
struct ptp_qoriq_registers *regs = &ptp_qoriq->regs;
u32 ctrl;
ctrl = ptp_qoriq->read(®s->ctrl_regs->tmr_ctrl);
*val = ctrl & PP2L ? 1 : 0;
return 0;
}
static int ptp_qoriq_fiper2_lpbk_set(void *data, u64 val)
{
struct ptp_qoriq *ptp_qoriq = data;
struct ptp_qoriq_registers *regs = &ptp_qoriq->regs;
u32 ctrl;
ctrl = ptp_qoriq->read(®s->ctrl_regs->tmr_ctrl);
if (val == 0)
ctrl &= ~PP2L;
else
ctrl |= PP2L;
ptp_qoriq->write(®s->ctrl_regs->tmr_ctrl, ctrl);
return 0;
}
DEFINE_DEBUGFS_ATTRIBUTE(ptp_qoriq_fiper2_fops, ptp_qoriq_fiper2_lpbk_get,
ptp_qoriq_fiper2_lpbk_set, "%llu\n");
void ptp_qoriq_create_debugfs(struct ptp_qoriq *ptp_qoriq)
{
struct dentry *root;
root = debugfs_create_dir(dev_name(ptp_qoriq->dev), NULL);
if (IS_ERR(root))
return;
if (!root)
goto err_root;
ptp_qoriq->debugfs_root = root;
if (!debugfs_create_file_unsafe("fiper1-loopback", 0600, root,
ptp_qoriq, &ptp_qoriq_fiper1_fops))
goto err_node;
if (!debugfs_create_file_unsafe("fiper2-loopback", 0600, root,
ptp_qoriq, &ptp_qoriq_fiper2_fops))
goto err_node;
return;
err_node:
debugfs_remove_recursive(root);
ptp_qoriq->debugfs_root = NULL;
err_root:
dev_err(ptp_qoriq->dev, "failed to initialize debugfs\n");
}
void ptp_qoriq_remove_debugfs(struct ptp_qoriq *ptp_qoriq)
{
debugfs_remove_recursive(ptp_qoriq->debugfs_root);
ptp_qoriq->debugfs_root = NULL;
}
|
linux-master
|
drivers/ptp/ptp_qoriq_debugfs.c
|
// SPDX-License-Identifier: GPL-2.0
//
// Copyright (C) 2018 MOSER-BAER AG
//
#define pr_fmt(fmt) "InES_PTP: " fmt
#include <linux/ethtool.h>
#include <linux/export.h>
#include <linux/if_vlan.h>
#include <linux/mii_timestamper.h>
#include <linux/module.h>
#include <linux/net_tstamp.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/phy.h>
#include <linux/platform_device.h>
#include <linux/ptp_classify.h>
#include <linux/ptp_clock_kernel.h>
#include <linux/stddef.h>
MODULE_DESCRIPTION("Driver for the ZHAW InES PTP time stamping IP core");
MODULE_AUTHOR("Richard Cochran <[email protected]>");
MODULE_VERSION("1.0");
MODULE_LICENSE("GPL");
/* GLOBAL register */
#define MCAST_MAC_SELECT_SHIFT 2
#define MCAST_MAC_SELECT_MASK 0x3
#define IO_RESET BIT(1)
#define PTP_RESET BIT(0)
/* VERSION register */
#define IF_MAJOR_VER_SHIFT 12
#define IF_MAJOR_VER_MASK 0xf
#define IF_MINOR_VER_SHIFT 8
#define IF_MINOR_VER_MASK 0xf
#define FPGA_MAJOR_VER_SHIFT 4
#define FPGA_MAJOR_VER_MASK 0xf
#define FPGA_MINOR_VER_SHIFT 0
#define FPGA_MINOR_VER_MASK 0xf
/* INT_STAT register */
#define RX_INTR_STATUS_3 BIT(5)
#define RX_INTR_STATUS_2 BIT(4)
#define RX_INTR_STATUS_1 BIT(3)
#define TX_INTR_STATUS_3 BIT(2)
#define TX_INTR_STATUS_2 BIT(1)
#define TX_INTR_STATUS_1 BIT(0)
/* INT_MSK register */
#define RX_INTR_MASK_3 BIT(5)
#define RX_INTR_MASK_2 BIT(4)
#define RX_INTR_MASK_1 BIT(3)
#define TX_INTR_MASK_3 BIT(2)
#define TX_INTR_MASK_2 BIT(1)
#define TX_INTR_MASK_1 BIT(0)
/* BUF_STAT register */
#define RX_FIFO_NE_3 BIT(5)
#define RX_FIFO_NE_2 BIT(4)
#define RX_FIFO_NE_1 BIT(3)
#define TX_FIFO_NE_3 BIT(2)
#define TX_FIFO_NE_2 BIT(1)
#define TX_FIFO_NE_1 BIT(0)
/* PORT_CONF register */
#define CM_ONE_STEP BIT(6)
#define PHY_SPEED_SHIFT 4
#define PHY_SPEED_MASK 0x3
#define P2P_DELAY_WR_POS_SHIFT 2
#define P2P_DELAY_WR_POS_MASK 0x3
#define PTP_MODE_SHIFT 0
#define PTP_MODE_MASK 0x3
/* TS_STAT_TX register */
#define TS_ENABLE BIT(15)
#define DATA_READ_POS_SHIFT 8
#define DATA_READ_POS_MASK 0x1f
#define DISCARDED_EVENTS_SHIFT 4
#define DISCARDED_EVENTS_MASK 0xf
#define INES_N_PORTS 3
#define INES_REGISTER_SIZE 0x80
#define INES_PORT_OFFSET 0x20
#define INES_PORT_SIZE 0x20
#define INES_FIFO_DEPTH 90
#define INES_MAX_EVENTS 100
#define BC_PTP_V1 0
#define BC_PTP_V2 1
#define TC_E2E_PTP_V2 2
#define TC_P2P_PTP_V2 3
#define PHY_SPEED_10 0
#define PHY_SPEED_100 1
#define PHY_SPEED_1000 2
#define PORT_CONF \
((PHY_SPEED_1000 << PHY_SPEED_SHIFT) | (BC_PTP_V2 << PTP_MODE_SHIFT))
#define ines_read32(s, r) __raw_readl((void __iomem *)&s->regs->r)
#define ines_write32(s, v, r) __raw_writel(v, (void __iomem *)&s->regs->r)
#define MESSAGE_TYPE_SYNC 1
#define MESSAGE_TYPE_P_DELAY_REQ 2
#define MESSAGE_TYPE_P_DELAY_RESP 3
#define MESSAGE_TYPE_DELAY_REQ 4
static LIST_HEAD(ines_clocks);
static DEFINE_MUTEX(ines_clocks_lock);
struct ines_global_regs {
u32 id;
u32 test;
u32 global;
u32 version;
u32 test2;
u32 int_stat;
u32 int_msk;
u32 buf_stat;
};
struct ines_port_registers {
u32 port_conf;
u32 p_delay;
u32 ts_stat_tx;
u32 ts_stat_rx;
u32 ts_tx;
u32 ts_rx;
};
struct ines_timestamp {
struct list_head list;
unsigned long tmo;
u16 tag;
u64 sec;
u64 nsec;
u64 clkid;
u16 portnum;
u16 seqid;
};
struct ines_port {
struct ines_port_registers *regs;
struct mii_timestamper mii_ts;
struct ines_clock *clock;
bool rxts_enabled;
bool txts_enabled;
unsigned int index;
struct delayed_work ts_work;
/* lock protects event list and tx_skb */
spinlock_t lock;
struct sk_buff *tx_skb;
struct list_head events;
struct list_head pool;
struct ines_timestamp pool_data[INES_MAX_EVENTS];
};
struct ines_clock {
struct ines_port port[INES_N_PORTS];
struct ines_global_regs __iomem *regs;
void __iomem *base;
struct device_node *node;
struct device *dev;
struct list_head list;
};
static bool ines_match(struct sk_buff *skb, unsigned int ptp_class,
struct ines_timestamp *ts, struct device *dev);
static int ines_rxfifo_read(struct ines_port *port);
static u64 ines_rxts64(struct ines_port *port, unsigned int words);
static bool ines_timestamp_expired(struct ines_timestamp *ts);
static u64 ines_txts64(struct ines_port *port, unsigned int words);
static void ines_txtstamp_work(struct work_struct *work);
static bool is_sync_pdelay_resp(struct sk_buff *skb, int type);
static u8 tag_to_msgtype(u8 tag);
static void ines_clock_cleanup(struct ines_clock *clock)
{
struct ines_port *port;
int i;
for (i = 0; i < INES_N_PORTS; i++) {
port = &clock->port[i];
cancel_delayed_work_sync(&port->ts_work);
}
}
static int ines_clock_init(struct ines_clock *clock, struct device *device,
void __iomem *addr)
{
struct device_node *node = device->of_node;
unsigned long port_addr;
struct ines_port *port;
int i, j;
INIT_LIST_HEAD(&clock->list);
clock->node = node;
clock->dev = device;
clock->base = addr;
clock->regs = clock->base;
for (i = 0; i < INES_N_PORTS; i++) {
port = &clock->port[i];
port_addr = (unsigned long) clock->base +
INES_PORT_OFFSET + i * INES_PORT_SIZE;
port->regs = (struct ines_port_registers *) port_addr;
port->clock = clock;
port->index = i;
INIT_DELAYED_WORK(&port->ts_work, ines_txtstamp_work);
spin_lock_init(&port->lock);
INIT_LIST_HEAD(&port->events);
INIT_LIST_HEAD(&port->pool);
for (j = 0; j < INES_MAX_EVENTS; j++)
list_add(&port->pool_data[j].list, &port->pool);
}
ines_write32(clock, 0xBEEF, test);
ines_write32(clock, 0xBEEF, test2);
dev_dbg(device, "ID 0x%x\n", ines_read32(clock, id));
dev_dbg(device, "TEST 0x%x\n", ines_read32(clock, test));
dev_dbg(device, "VERSION 0x%x\n", ines_read32(clock, version));
dev_dbg(device, "TEST2 0x%x\n", ines_read32(clock, test2));
for (i = 0; i < INES_N_PORTS; i++) {
port = &clock->port[i];
ines_write32(port, PORT_CONF, port_conf);
}
return 0;
}
static struct ines_port *ines_find_port(struct device_node *node, u32 index)
{
struct ines_port *port = NULL;
struct ines_clock *clock;
struct list_head *this;
mutex_lock(&ines_clocks_lock);
list_for_each(this, &ines_clocks) {
clock = list_entry(this, struct ines_clock, list);
if (clock->node == node) {
port = &clock->port[index];
break;
}
}
mutex_unlock(&ines_clocks_lock);
return port;
}
static u64 ines_find_rxts(struct ines_port *port, struct sk_buff *skb, int type)
{
struct list_head *this, *next;
struct ines_timestamp *ts;
unsigned long flags;
u64 ns = 0;
if (type == PTP_CLASS_NONE)
return 0;
spin_lock_irqsave(&port->lock, flags);
ines_rxfifo_read(port);
list_for_each_safe(this, next, &port->events) {
ts = list_entry(this, struct ines_timestamp, list);
if (ines_timestamp_expired(ts)) {
list_del_init(&ts->list);
list_add(&ts->list, &port->pool);
continue;
}
if (ines_match(skb, type, ts, port->clock->dev)) {
ns = ts->sec * 1000000000ULL + ts->nsec;
list_del_init(&ts->list);
list_add(&ts->list, &port->pool);
break;
}
}
spin_unlock_irqrestore(&port->lock, flags);
return ns;
}
static u64 ines_find_txts(struct ines_port *port, struct sk_buff *skb)
{
unsigned int class = ptp_classify_raw(skb), i;
u32 data_rd_pos, buf_stat, mask, ts_stat_tx;
struct ines_timestamp ts;
unsigned long flags;
u64 ns = 0;
mask = TX_FIFO_NE_1 << port->index;
spin_lock_irqsave(&port->lock, flags);
for (i = 0; i < INES_FIFO_DEPTH; i++) {
buf_stat = ines_read32(port->clock, buf_stat);
if (!(buf_stat & mask)) {
dev_dbg(port->clock->dev,
"Tx timestamp FIFO unexpectedly empty\n");
break;
}
ts_stat_tx = ines_read32(port, ts_stat_tx);
data_rd_pos = (ts_stat_tx >> DATA_READ_POS_SHIFT) &
DATA_READ_POS_MASK;
if (data_rd_pos) {
dev_err(port->clock->dev,
"unexpected Tx read pos %u\n", data_rd_pos);
break;
}
ts.tag = ines_read32(port, ts_tx);
ts.sec = ines_txts64(port, 3);
ts.nsec = ines_txts64(port, 2);
ts.clkid = ines_txts64(port, 4);
ts.portnum = ines_read32(port, ts_tx);
ts.seqid = ines_read32(port, ts_tx);
if (ines_match(skb, class, &ts, port->clock->dev)) {
ns = ts.sec * 1000000000ULL + ts.nsec;
break;
}
}
spin_unlock_irqrestore(&port->lock, flags);
return ns;
}
static int ines_hwtstamp(struct mii_timestamper *mii_ts, struct ifreq *ifr)
{
struct ines_port *port = container_of(mii_ts, struct ines_port, mii_ts);
u32 cm_one_step = 0, port_conf, ts_stat_rx, ts_stat_tx;
struct hwtstamp_config cfg;
unsigned long flags;
if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
return -EFAULT;
switch (cfg.tx_type) {
case HWTSTAMP_TX_OFF:
ts_stat_tx = 0;
break;
case HWTSTAMP_TX_ON:
ts_stat_tx = TS_ENABLE;
break;
case HWTSTAMP_TX_ONESTEP_P2P:
ts_stat_tx = TS_ENABLE;
cm_one_step = CM_ONE_STEP;
break;
default:
return -ERANGE;
}
switch (cfg.rx_filter) {
case HWTSTAMP_FILTER_NONE:
ts_stat_rx = 0;
break;
case HWTSTAMP_FILTER_ALL:
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
return -ERANGE;
case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
case HWTSTAMP_FILTER_PTP_V2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
ts_stat_rx = TS_ENABLE;
cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
break;
default:
return -ERANGE;
}
spin_lock_irqsave(&port->lock, flags);
port_conf = ines_read32(port, port_conf);
port_conf &= ~CM_ONE_STEP;
port_conf |= cm_one_step;
ines_write32(port, port_conf, port_conf);
ines_write32(port, ts_stat_rx, ts_stat_rx);
ines_write32(port, ts_stat_tx, ts_stat_tx);
port->rxts_enabled = ts_stat_rx == TS_ENABLE;
port->txts_enabled = ts_stat_tx == TS_ENABLE;
spin_unlock_irqrestore(&port->lock, flags);
return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
}
static void ines_link_state(struct mii_timestamper *mii_ts,
struct phy_device *phydev)
{
struct ines_port *port = container_of(mii_ts, struct ines_port, mii_ts);
u32 port_conf, speed_conf;
unsigned long flags;
switch (phydev->speed) {
case SPEED_10:
speed_conf = PHY_SPEED_10 << PHY_SPEED_SHIFT;
break;
case SPEED_100:
speed_conf = PHY_SPEED_100 << PHY_SPEED_SHIFT;
break;
case SPEED_1000:
speed_conf = PHY_SPEED_1000 << PHY_SPEED_SHIFT;
break;
default:
dev_err(port->clock->dev, "bad speed: %d\n", phydev->speed);
return;
}
spin_lock_irqsave(&port->lock, flags);
port_conf = ines_read32(port, port_conf);
port_conf &= ~(0x3 << PHY_SPEED_SHIFT);
port_conf |= speed_conf;
ines_write32(port, port_conf, port_conf);
spin_unlock_irqrestore(&port->lock, flags);
}
static bool ines_match(struct sk_buff *skb, unsigned int ptp_class,
struct ines_timestamp *ts, struct device *dev)
{
struct ptp_header *hdr;
u16 portn, seqid;
u8 msgtype;
u64 clkid;
if (unlikely(ptp_class & PTP_CLASS_V1))
return false;
hdr = ptp_parse_header(skb, ptp_class);
if (!hdr)
return false;
msgtype = ptp_get_msgtype(hdr, ptp_class);
clkid = be64_to_cpup((__be64 *)&hdr->source_port_identity.clock_identity.id[0]);
portn = be16_to_cpu(hdr->source_port_identity.port_number);
seqid = be16_to_cpu(hdr->sequence_id);
if (tag_to_msgtype(ts->tag & 0x7) != msgtype) {
dev_dbg(dev, "msgtype mismatch ts %hhu != skb %hhu\n",
tag_to_msgtype(ts->tag & 0x7), msgtype);
return false;
}
if (ts->clkid != clkid) {
dev_dbg(dev, "clkid mismatch ts %llx != skb %llx\n",
ts->clkid, clkid);
return false;
}
if (ts->portnum != portn) {
dev_dbg(dev, "portn mismatch ts %hu != skb %hu\n",
ts->portnum, portn);
return false;
}
if (ts->seqid != seqid) {
dev_dbg(dev, "seqid mismatch ts %hu != skb %hu\n",
ts->seqid, seqid);
return false;
}
return true;
}
static bool ines_rxtstamp(struct mii_timestamper *mii_ts,
struct sk_buff *skb, int type)
{
struct ines_port *port = container_of(mii_ts, struct ines_port, mii_ts);
struct skb_shared_hwtstamps *ssh;
u64 ns;
if (!port->rxts_enabled)
return false;
ns = ines_find_rxts(port, skb, type);
if (!ns)
return false;
ssh = skb_hwtstamps(skb);
ssh->hwtstamp = ns_to_ktime(ns);
netif_rx(skb);
return true;
}
static int ines_rxfifo_read(struct ines_port *port)
{
u32 data_rd_pos, buf_stat, mask, ts_stat_rx;
struct ines_timestamp *ts;
unsigned int i;
mask = RX_FIFO_NE_1 << port->index;
for (i = 0; i < INES_FIFO_DEPTH; i++) {
if (list_empty(&port->pool)) {
dev_err(port->clock->dev, "event pool is empty\n");
return -1;
}
buf_stat = ines_read32(port->clock, buf_stat);
if (!(buf_stat & mask))
break;
ts_stat_rx = ines_read32(port, ts_stat_rx);
data_rd_pos = (ts_stat_rx >> DATA_READ_POS_SHIFT) &
DATA_READ_POS_MASK;
if (data_rd_pos) {
dev_err(port->clock->dev, "unexpected Rx read pos %u\n",
data_rd_pos);
break;
}
ts = list_first_entry(&port->pool, struct ines_timestamp, list);
ts->tmo = jiffies + HZ;
ts->tag = ines_read32(port, ts_rx);
ts->sec = ines_rxts64(port, 3);
ts->nsec = ines_rxts64(port, 2);
ts->clkid = ines_rxts64(port, 4);
ts->portnum = ines_read32(port, ts_rx);
ts->seqid = ines_read32(port, ts_rx);
list_del_init(&ts->list);
list_add_tail(&ts->list, &port->events);
}
return 0;
}
static u64 ines_rxts64(struct ines_port *port, unsigned int words)
{
unsigned int i;
u64 result;
u16 word;
word = ines_read32(port, ts_rx);
result = word;
words--;
for (i = 0; i < words; i++) {
word = ines_read32(port, ts_rx);
result <<= 16;
result |= word;
}
return result;
}
static bool ines_timestamp_expired(struct ines_timestamp *ts)
{
return time_after(jiffies, ts->tmo);
}
static int ines_ts_info(struct mii_timestamper *mii_ts,
struct ethtool_ts_info *info)
{
info->so_timestamping =
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_TX_SOFTWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RX_SOFTWARE |
SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
info->phc_index = -1;
info->tx_types =
(1 << HWTSTAMP_TX_OFF) |
(1 << HWTSTAMP_TX_ON) |
(1 << HWTSTAMP_TX_ONESTEP_P2P);
info->rx_filters =
(1 << HWTSTAMP_FILTER_NONE) |
(1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
return 0;
}
static u64 ines_txts64(struct ines_port *port, unsigned int words)
{
unsigned int i;
u64 result;
u16 word;
word = ines_read32(port, ts_tx);
result = word;
words--;
for (i = 0; i < words; i++) {
word = ines_read32(port, ts_tx);
result <<= 16;
result |= word;
}
return result;
}
static bool ines_txts_onestep(struct ines_port *port, struct sk_buff *skb, int type)
{
unsigned long flags;
u32 port_conf;
spin_lock_irqsave(&port->lock, flags);
port_conf = ines_read32(port, port_conf);
spin_unlock_irqrestore(&port->lock, flags);
if (port_conf & CM_ONE_STEP)
return is_sync_pdelay_resp(skb, type);
return false;
}
static void ines_txtstamp(struct mii_timestamper *mii_ts,
struct sk_buff *skb, int type)
{
struct ines_port *port = container_of(mii_ts, struct ines_port, mii_ts);
struct sk_buff *old_skb = NULL;
unsigned long flags;
if (!port->txts_enabled || ines_txts_onestep(port, skb, type)) {
kfree_skb(skb);
return;
}
spin_lock_irqsave(&port->lock, flags);
if (port->tx_skb)
old_skb = port->tx_skb;
port->tx_skb = skb;
spin_unlock_irqrestore(&port->lock, flags);
kfree_skb(old_skb);
schedule_delayed_work(&port->ts_work, 1);
}
static void ines_txtstamp_work(struct work_struct *work)
{
struct ines_port *port =
container_of(work, struct ines_port, ts_work.work);
struct skb_shared_hwtstamps ssh;
struct sk_buff *skb;
unsigned long flags;
u64 ns;
spin_lock_irqsave(&port->lock, flags);
skb = port->tx_skb;
port->tx_skb = NULL;
spin_unlock_irqrestore(&port->lock, flags);
ns = ines_find_txts(port, skb);
if (!ns) {
kfree_skb(skb);
return;
}
ssh.hwtstamp = ns_to_ktime(ns);
skb_complete_tx_timestamp(skb, &ssh);
}
static bool is_sync_pdelay_resp(struct sk_buff *skb, int type)
{
struct ptp_header *hdr;
u8 msgtype;
hdr = ptp_parse_header(skb, type);
if (!hdr)
return false;
msgtype = ptp_get_msgtype(hdr, type);
switch (msgtype) {
case PTP_MSGTYPE_SYNC:
case PTP_MSGTYPE_PDELAY_RESP:
return true;
default:
return false;
}
}
static u8 tag_to_msgtype(u8 tag)
{
switch (tag) {
case MESSAGE_TYPE_SYNC:
return PTP_MSGTYPE_SYNC;
case MESSAGE_TYPE_P_DELAY_REQ:
return PTP_MSGTYPE_PDELAY_REQ;
case MESSAGE_TYPE_P_DELAY_RESP:
return PTP_MSGTYPE_PDELAY_RESP;
case MESSAGE_TYPE_DELAY_REQ:
return PTP_MSGTYPE_DELAY_REQ;
}
return 0xf;
}
static struct mii_timestamper *ines_ptp_probe_channel(struct device *device,
unsigned int index)
{
struct device_node *node = device->of_node;
struct ines_port *port;
if (index > INES_N_PORTS - 1) {
dev_err(device, "bad port index %u\n", index);
return ERR_PTR(-EINVAL);
}
port = ines_find_port(node, index);
if (!port) {
dev_err(device, "missing port index %u\n", index);
return ERR_PTR(-ENODEV);
}
port->mii_ts.rxtstamp = ines_rxtstamp;
port->mii_ts.txtstamp = ines_txtstamp;
port->mii_ts.hwtstamp = ines_hwtstamp;
port->mii_ts.link_state = ines_link_state;
port->mii_ts.ts_info = ines_ts_info;
return &port->mii_ts;
}
static void ines_ptp_release_channel(struct device *device,
struct mii_timestamper *mii_ts)
{
}
static struct mii_timestamping_ctrl ines_ctrl = {
.probe_channel = ines_ptp_probe_channel,
.release_channel = ines_ptp_release_channel,
};
static int ines_ptp_ctrl_probe(struct platform_device *pld)
{
struct ines_clock *clock;
void __iomem *addr;
int err = 0;
addr = devm_platform_ioremap_resource(pld, 0);
if (IS_ERR(addr)) {
err = PTR_ERR(addr);
goto out;
}
clock = kzalloc(sizeof(*clock), GFP_KERNEL);
if (!clock) {
err = -ENOMEM;
goto out;
}
if (ines_clock_init(clock, &pld->dev, addr)) {
kfree(clock);
err = -ENOMEM;
goto out;
}
err = register_mii_tstamp_controller(&pld->dev, &ines_ctrl);
if (err) {
kfree(clock);
goto out;
}
mutex_lock(&ines_clocks_lock);
list_add_tail(&ines_clocks, &clock->list);
mutex_unlock(&ines_clocks_lock);
dev_set_drvdata(&pld->dev, clock);
out:
return err;
}
static int ines_ptp_ctrl_remove(struct platform_device *pld)
{
struct ines_clock *clock = dev_get_drvdata(&pld->dev);
unregister_mii_tstamp_controller(&pld->dev);
mutex_lock(&ines_clocks_lock);
list_del(&clock->list);
mutex_unlock(&ines_clocks_lock);
ines_clock_cleanup(clock);
kfree(clock);
return 0;
}
static const struct of_device_id ines_ptp_ctrl_of_match[] = {
{ .compatible = "ines,ptp-ctrl" },
{ }
};
MODULE_DEVICE_TABLE(of, ines_ptp_ctrl_of_match);
static struct platform_driver ines_ptp_ctrl_driver = {
.probe = ines_ptp_ctrl_probe,
.remove = ines_ptp_ctrl_remove,
.driver = {
.name = "ines_ptp_ctrl",
.of_match_table = ines_ptp_ctrl_of_match,
},
};
module_platform_driver(ines_ptp_ctrl_driver);
|
linux-master
|
drivers/ptp/ptp_ines.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* TI K3 DSP Remote Processor(s) driver
*
* Copyright (C) 2018-2022 Texas Instruments Incorporated - https://www.ti.com/
* Suman Anna <[email protected]>
*/
#include <linux/io.h>
#include <linux/mailbox_client.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_reserved_mem.h>
#include <linux/omap-mailbox.h>
#include <linux/platform_device.h>
#include <linux/remoteproc.h>
#include <linux/reset.h>
#include <linux/slab.h>
#include "omap_remoteproc.h"
#include "remoteproc_internal.h"
#include "ti_sci_proc.h"
#define KEYSTONE_RPROC_LOCAL_ADDRESS_MASK (SZ_16M - 1)
/**
* struct k3_dsp_mem - internal memory structure
* @cpu_addr: MPU virtual address of the memory region
* @bus_addr: Bus address used to access the memory region
* @dev_addr: Device address of the memory region from DSP view
* @size: Size of the memory region
*/
struct k3_dsp_mem {
void __iomem *cpu_addr;
phys_addr_t bus_addr;
u32 dev_addr;
size_t size;
};
/**
* struct k3_dsp_mem_data - memory definitions for a DSP
* @name: name for this memory entry
* @dev_addr: device address for the memory entry
*/
struct k3_dsp_mem_data {
const char *name;
const u32 dev_addr;
};
/**
* struct k3_dsp_dev_data - device data structure for a DSP
* @mems: pointer to memory definitions for a DSP
* @num_mems: number of memory regions in @mems
* @boot_align_addr: boot vector address alignment granularity
* @uses_lreset: flag to denote the need for local reset management
*/
struct k3_dsp_dev_data {
const struct k3_dsp_mem_data *mems;
u32 num_mems;
u32 boot_align_addr;
bool uses_lreset;
};
/**
* struct k3_dsp_rproc - k3 DSP remote processor driver structure
* @dev: cached device pointer
* @rproc: remoteproc device handle
* @mem: internal memory regions data
* @num_mems: number of internal memory regions
* @rmem: reserved memory regions data
* @num_rmems: number of reserved memory regions
* @reset: reset control handle
* @data: pointer to DSP-specific device data
* @tsp: TI-SCI processor control handle
* @ti_sci: TI-SCI handle
* @ti_sci_id: TI-SCI device identifier
* @mbox: mailbox channel handle
* @client: mailbox client to request the mailbox channel
*/
struct k3_dsp_rproc {
struct device *dev;
struct rproc *rproc;
struct k3_dsp_mem *mem;
int num_mems;
struct k3_dsp_mem *rmem;
int num_rmems;
struct reset_control *reset;
const struct k3_dsp_dev_data *data;
struct ti_sci_proc *tsp;
const struct ti_sci_handle *ti_sci;
u32 ti_sci_id;
struct mbox_chan *mbox;
struct mbox_client client;
};
/**
* k3_dsp_rproc_mbox_callback() - inbound mailbox message handler
* @client: mailbox client pointer used for requesting the mailbox channel
* @data: mailbox payload
*
* This handler is invoked by the OMAP mailbox driver whenever a mailbox
* message is received. Usually, the mailbox payload simply contains
* the index of the virtqueue that is kicked by the remote processor,
* and we let remoteproc core handle it.
*
* In addition to virtqueue indices, we also have some out-of-band values
* that indicate different events. Those values are deliberately very
* large so they don't coincide with virtqueue indices.
*/
static void k3_dsp_rproc_mbox_callback(struct mbox_client *client, void *data)
{
struct k3_dsp_rproc *kproc = container_of(client, struct k3_dsp_rproc,
client);
struct device *dev = kproc->rproc->dev.parent;
const char *name = kproc->rproc->name;
u32 msg = omap_mbox_message(data);
dev_dbg(dev, "mbox msg: 0x%x\n", msg);
switch (msg) {
case RP_MBOX_CRASH:
/*
* remoteproc detected an exception, but error recovery is not
* supported. So, just log this for now
*/
dev_err(dev, "K3 DSP rproc %s crashed\n", name);
break;
case RP_MBOX_ECHO_REPLY:
dev_info(dev, "received echo reply from %s\n", name);
break;
default:
/* silently handle all other valid messages */
if (msg >= RP_MBOX_READY && msg < RP_MBOX_END_MSG)
return;
if (msg > kproc->rproc->max_notifyid) {
dev_dbg(dev, "dropping unknown message 0x%x", msg);
return;
}
/* msg contains the index of the triggered vring */
if (rproc_vq_interrupt(kproc->rproc, msg) == IRQ_NONE)
dev_dbg(dev, "no message was found in vqid %d\n", msg);
}
}
/*
* Kick the remote processor to notify about pending unprocessed messages.
* The vqid usage is not used and is inconsequential, as the kick is performed
* through a simulated GPIO (a bit in an IPC interrupt-triggering register),
* the remote processor is expected to process both its Tx and Rx virtqueues.
*/
static void k3_dsp_rproc_kick(struct rproc *rproc, int vqid)
{
struct k3_dsp_rproc *kproc = rproc->priv;
struct device *dev = rproc->dev.parent;
mbox_msg_t msg = (mbox_msg_t)vqid;
int ret;
/* send the index of the triggered virtqueue in the mailbox payload */
ret = mbox_send_message(kproc->mbox, (void *)msg);
if (ret < 0)
dev_err(dev, "failed to send mailbox message, status = %d\n",
ret);
}
/* Put the DSP processor into reset */
static int k3_dsp_rproc_reset(struct k3_dsp_rproc *kproc)
{
struct device *dev = kproc->dev;
int ret;
ret = reset_control_assert(kproc->reset);
if (ret) {
dev_err(dev, "local-reset assert failed, ret = %d\n", ret);
return ret;
}
if (kproc->data->uses_lreset)
return ret;
ret = kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci,
kproc->ti_sci_id);
if (ret) {
dev_err(dev, "module-reset assert failed, ret = %d\n", ret);
if (reset_control_deassert(kproc->reset))
dev_warn(dev, "local-reset deassert back failed\n");
}
return ret;
}
/* Release the DSP processor from reset */
static int k3_dsp_rproc_release(struct k3_dsp_rproc *kproc)
{
struct device *dev = kproc->dev;
int ret;
if (kproc->data->uses_lreset)
goto lreset;
ret = kproc->ti_sci->ops.dev_ops.get_device(kproc->ti_sci,
kproc->ti_sci_id);
if (ret) {
dev_err(dev, "module-reset deassert failed, ret = %d\n", ret);
return ret;
}
lreset:
ret = reset_control_deassert(kproc->reset);
if (ret) {
dev_err(dev, "local-reset deassert failed, ret = %d\n", ret);
if (kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci,
kproc->ti_sci_id))
dev_warn(dev, "module-reset assert back failed\n");
}
return ret;
}
static int k3_dsp_rproc_request_mbox(struct rproc *rproc)
{
struct k3_dsp_rproc *kproc = rproc->priv;
struct mbox_client *client = &kproc->client;
struct device *dev = kproc->dev;
int ret;
client->dev = dev;
client->tx_done = NULL;
client->rx_callback = k3_dsp_rproc_mbox_callback;
client->tx_block = false;
client->knows_txdone = false;
kproc->mbox = mbox_request_channel(client, 0);
if (IS_ERR(kproc->mbox)) {
ret = -EBUSY;
dev_err(dev, "mbox_request_channel failed: %ld\n",
PTR_ERR(kproc->mbox));
return ret;
}
/*
* Ping the remote processor, this is only for sanity-sake for now;
* there is no functional effect whatsoever.
*
* Note that the reply will _not_ arrive immediately: this message
* will wait in the mailbox fifo until the remote processor is booted.
*/
ret = mbox_send_message(kproc->mbox, (void *)RP_MBOX_ECHO_REQUEST);
if (ret < 0) {
dev_err(dev, "mbox_send_message failed: %d\n", ret);
mbox_free_channel(kproc->mbox);
return ret;
}
return 0;
}
/*
* The C66x DSP cores have a local reset that affects only the CPU, and a
* generic module reset that powers on the device and allows the DSP internal
* memories to be accessed while the local reset is asserted. This function is
* used to release the global reset on C66x DSPs to allow loading into the DSP
* internal RAMs. The .prepare() ops is invoked by remoteproc core before any
* firmware loading, and is followed by the .start() ops after loading to
* actually let the C66x DSP cores run. This callback is invoked only in
* remoteproc mode.
*/
static int k3_dsp_rproc_prepare(struct rproc *rproc)
{
struct k3_dsp_rproc *kproc = rproc->priv;
struct device *dev = kproc->dev;
int ret;
ret = kproc->ti_sci->ops.dev_ops.get_device(kproc->ti_sci,
kproc->ti_sci_id);
if (ret)
dev_err(dev, "module-reset deassert failed, cannot enable internal RAM loading, ret = %d\n",
ret);
return ret;
}
/*
* This function implements the .unprepare() ops and performs the complimentary
* operations to that of the .prepare() ops. The function is used to assert the
* global reset on applicable C66x cores. This completes the second portion of
* powering down the C66x DSP cores. The cores themselves are only halted in the
* .stop() callback through the local reset, and the .unprepare() ops is invoked
* by the remoteproc core after the remoteproc is stopped to balance the global
* reset. This callback is invoked only in remoteproc mode.
*/
static int k3_dsp_rproc_unprepare(struct rproc *rproc)
{
struct k3_dsp_rproc *kproc = rproc->priv;
struct device *dev = kproc->dev;
int ret;
ret = kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci,
kproc->ti_sci_id);
if (ret)
dev_err(dev, "module-reset assert failed, ret = %d\n", ret);
return ret;
}
/*
* Power up the DSP remote processor.
*
* This function will be invoked only after the firmware for this rproc
* was loaded, parsed successfully, and all of its resource requirements
* were met. This callback is invoked only in remoteproc mode.
*/
static int k3_dsp_rproc_start(struct rproc *rproc)
{
struct k3_dsp_rproc *kproc = rproc->priv;
struct device *dev = kproc->dev;
u32 boot_addr;
int ret;
ret = k3_dsp_rproc_request_mbox(rproc);
if (ret)
return ret;
boot_addr = rproc->bootaddr;
if (boot_addr & (kproc->data->boot_align_addr - 1)) {
dev_err(dev, "invalid boot address 0x%x, must be aligned on a 0x%x boundary\n",
boot_addr, kproc->data->boot_align_addr);
ret = -EINVAL;
goto put_mbox;
}
dev_err(dev, "booting DSP core using boot addr = 0x%x\n", boot_addr);
ret = ti_sci_proc_set_config(kproc->tsp, boot_addr, 0, 0);
if (ret)
goto put_mbox;
ret = k3_dsp_rproc_release(kproc);
if (ret)
goto put_mbox;
return 0;
put_mbox:
mbox_free_channel(kproc->mbox);
return ret;
}
/*
* Stop the DSP remote processor.
*
* This function puts the DSP processor into reset, and finishes processing
* of any pending messages. This callback is invoked only in remoteproc mode.
*/
static int k3_dsp_rproc_stop(struct rproc *rproc)
{
struct k3_dsp_rproc *kproc = rproc->priv;
mbox_free_channel(kproc->mbox);
k3_dsp_rproc_reset(kproc);
return 0;
}
/*
* Attach to a running DSP remote processor (IPC-only mode)
*
* This rproc attach callback only needs to request the mailbox, the remote
* processor is already booted, so there is no need to issue any TI-SCI
* commands to boot the DSP core. This callback is invoked only in IPC-only
* mode.
*/
static int k3_dsp_rproc_attach(struct rproc *rproc)
{
struct k3_dsp_rproc *kproc = rproc->priv;
struct device *dev = kproc->dev;
int ret;
ret = k3_dsp_rproc_request_mbox(rproc);
if (ret)
return ret;
dev_info(dev, "DSP initialized in IPC-only mode\n");
return 0;
}
/*
* Detach from a running DSP remote processor (IPC-only mode)
*
* This rproc detach callback performs the opposite operation to attach callback
* and only needs to release the mailbox, the DSP core is not stopped and will
* be left to continue to run its booted firmware. This callback is invoked only
* in IPC-only mode.
*/
static int k3_dsp_rproc_detach(struct rproc *rproc)
{
struct k3_dsp_rproc *kproc = rproc->priv;
struct device *dev = kproc->dev;
mbox_free_channel(kproc->mbox);
dev_info(dev, "DSP deinitialized in IPC-only mode\n");
return 0;
}
/*
* This function implements the .get_loaded_rsc_table() callback and is used
* to provide the resource table for a booted DSP in IPC-only mode. The K3 DSP
* firmwares follow a design-by-contract approach and are expected to have the
* resource table at the base of the DDR region reserved for firmware usage.
* This provides flexibility for the remote processor to be booted by different
* bootloaders that may or may not have the ability to publish the resource table
* address and size through a DT property. This callback is invoked only in
* IPC-only mode.
*/
static struct resource_table *k3_dsp_get_loaded_rsc_table(struct rproc *rproc,
size_t *rsc_table_sz)
{
struct k3_dsp_rproc *kproc = rproc->priv;
struct device *dev = kproc->dev;
if (!kproc->rmem[0].cpu_addr) {
dev_err(dev, "memory-region #1 does not exist, loaded rsc table can't be found");
return ERR_PTR(-ENOMEM);
}
/*
* NOTE: The resource table size is currently hard-coded to a maximum
* of 256 bytes. The most common resource table usage for K3 firmwares
* is to only have the vdev resource entry and an optional trace entry.
* The exact size could be computed based on resource table address, but
* the hard-coded value suffices to support the IPC-only mode.
*/
*rsc_table_sz = 256;
return (struct resource_table *)kproc->rmem[0].cpu_addr;
}
/*
* Custom function to translate a DSP device address (internal RAMs only) to a
* kernel virtual address. The DSPs can access their RAMs at either an internal
* address visible only from a DSP, or at the SoC-level bus address. Both these
* addresses need to be looked through for translation. The translated addresses
* can be used either by the remoteproc core for loading (when using kernel
* remoteproc loader), or by any rpmsg bus drivers.
*/
static void *k3_dsp_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
{
struct k3_dsp_rproc *kproc = rproc->priv;
void __iomem *va = NULL;
phys_addr_t bus_addr;
u32 dev_addr, offset;
size_t size;
int i;
if (len == 0)
return NULL;
for (i = 0; i < kproc->num_mems; i++) {
bus_addr = kproc->mem[i].bus_addr;
dev_addr = kproc->mem[i].dev_addr;
size = kproc->mem[i].size;
if (da < KEYSTONE_RPROC_LOCAL_ADDRESS_MASK) {
/* handle DSP-view addresses */
if (da >= dev_addr &&
((da + len) <= (dev_addr + size))) {
offset = da - dev_addr;
va = kproc->mem[i].cpu_addr + offset;
return (__force void *)va;
}
} else {
/* handle SoC-view addresses */
if (da >= bus_addr &&
(da + len) <= (bus_addr + size)) {
offset = da - bus_addr;
va = kproc->mem[i].cpu_addr + offset;
return (__force void *)va;
}
}
}
/* handle static DDR reserved memory regions */
for (i = 0; i < kproc->num_rmems; i++) {
dev_addr = kproc->rmem[i].dev_addr;
size = kproc->rmem[i].size;
if (da >= dev_addr && ((da + len) <= (dev_addr + size))) {
offset = da - dev_addr;
va = kproc->rmem[i].cpu_addr + offset;
return (__force void *)va;
}
}
return NULL;
}
static const struct rproc_ops k3_dsp_rproc_ops = {
.start = k3_dsp_rproc_start,
.stop = k3_dsp_rproc_stop,
.kick = k3_dsp_rproc_kick,
.da_to_va = k3_dsp_rproc_da_to_va,
};
static int k3_dsp_rproc_of_get_memories(struct platform_device *pdev,
struct k3_dsp_rproc *kproc)
{
const struct k3_dsp_dev_data *data = kproc->data;
struct device *dev = &pdev->dev;
struct resource *res;
int num_mems = 0;
int i;
num_mems = kproc->data->num_mems;
kproc->mem = devm_kcalloc(kproc->dev, num_mems,
sizeof(*kproc->mem), GFP_KERNEL);
if (!kproc->mem)
return -ENOMEM;
for (i = 0; i < num_mems; i++) {
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
data->mems[i].name);
if (!res) {
dev_err(dev, "found no memory resource for %s\n",
data->mems[i].name);
return -EINVAL;
}
if (!devm_request_mem_region(dev, res->start,
resource_size(res),
dev_name(dev))) {
dev_err(dev, "could not request %s region for resource\n",
data->mems[i].name);
return -EBUSY;
}
kproc->mem[i].cpu_addr = devm_ioremap_wc(dev, res->start,
resource_size(res));
if (!kproc->mem[i].cpu_addr) {
dev_err(dev, "failed to map %s memory\n",
data->mems[i].name);
return -ENOMEM;
}
kproc->mem[i].bus_addr = res->start;
kproc->mem[i].dev_addr = data->mems[i].dev_addr;
kproc->mem[i].size = resource_size(res);
dev_dbg(dev, "memory %8s: bus addr %pa size 0x%zx va %pK da 0x%x\n",
data->mems[i].name, &kproc->mem[i].bus_addr,
kproc->mem[i].size, kproc->mem[i].cpu_addr,
kproc->mem[i].dev_addr);
}
kproc->num_mems = num_mems;
return 0;
}
static int k3_dsp_reserved_mem_init(struct k3_dsp_rproc *kproc)
{
struct device *dev = kproc->dev;
struct device_node *np = dev->of_node;
struct device_node *rmem_np;
struct reserved_mem *rmem;
int num_rmems;
int ret, i;
num_rmems = of_property_count_elems_of_size(np, "memory-region",
sizeof(phandle));
if (num_rmems <= 0) {
dev_err(dev, "device does not reserved memory regions, ret = %d\n",
num_rmems);
return -EINVAL;
}
if (num_rmems < 2) {
dev_err(dev, "device needs at least two memory regions to be defined, num = %d\n",
num_rmems);
return -EINVAL;
}
/* use reserved memory region 0 for vring DMA allocations */
ret = of_reserved_mem_device_init_by_idx(dev, np, 0);
if (ret) {
dev_err(dev, "device cannot initialize DMA pool, ret = %d\n",
ret);
return ret;
}
num_rmems--;
kproc->rmem = kcalloc(num_rmems, sizeof(*kproc->rmem), GFP_KERNEL);
if (!kproc->rmem) {
ret = -ENOMEM;
goto release_rmem;
}
/* use remaining reserved memory regions for static carveouts */
for (i = 0; i < num_rmems; i++) {
rmem_np = of_parse_phandle(np, "memory-region", i + 1);
if (!rmem_np) {
ret = -EINVAL;
goto unmap_rmem;
}
rmem = of_reserved_mem_lookup(rmem_np);
if (!rmem) {
of_node_put(rmem_np);
ret = -EINVAL;
goto unmap_rmem;
}
of_node_put(rmem_np);
kproc->rmem[i].bus_addr = rmem->base;
/* 64-bit address regions currently not supported */
kproc->rmem[i].dev_addr = (u32)rmem->base;
kproc->rmem[i].size = rmem->size;
kproc->rmem[i].cpu_addr = ioremap_wc(rmem->base, rmem->size);
if (!kproc->rmem[i].cpu_addr) {
dev_err(dev, "failed to map reserved memory#%d at %pa of size %pa\n",
i + 1, &rmem->base, &rmem->size);
ret = -ENOMEM;
goto unmap_rmem;
}
dev_dbg(dev, "reserved memory%d: bus addr %pa size 0x%zx va %pK da 0x%x\n",
i + 1, &kproc->rmem[i].bus_addr,
kproc->rmem[i].size, kproc->rmem[i].cpu_addr,
kproc->rmem[i].dev_addr);
}
kproc->num_rmems = num_rmems;
return 0;
unmap_rmem:
for (i--; i >= 0; i--)
iounmap(kproc->rmem[i].cpu_addr);
kfree(kproc->rmem);
release_rmem:
of_reserved_mem_device_release(kproc->dev);
return ret;
}
static void k3_dsp_reserved_mem_exit(struct k3_dsp_rproc *kproc)
{
int i;
for (i = 0; i < kproc->num_rmems; i++)
iounmap(kproc->rmem[i].cpu_addr);
kfree(kproc->rmem);
of_reserved_mem_device_release(kproc->dev);
}
static
struct ti_sci_proc *k3_dsp_rproc_of_get_tsp(struct device *dev,
const struct ti_sci_handle *sci)
{
struct ti_sci_proc *tsp;
u32 temp[2];
int ret;
ret = of_property_read_u32_array(dev->of_node, "ti,sci-proc-ids",
temp, 2);
if (ret < 0)
return ERR_PTR(ret);
tsp = kzalloc(sizeof(*tsp), GFP_KERNEL);
if (!tsp)
return ERR_PTR(-ENOMEM);
tsp->dev = dev;
tsp->sci = sci;
tsp->ops = &sci->ops.proc_ops;
tsp->proc_id = temp[0];
tsp->host_id = temp[1];
return tsp;
}
static int k3_dsp_rproc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
const struct k3_dsp_dev_data *data;
struct k3_dsp_rproc *kproc;
struct rproc *rproc;
const char *fw_name;
bool p_state = false;
int ret = 0;
int ret1;
data = of_device_get_match_data(dev);
if (!data)
return -ENODEV;
ret = rproc_of_parse_firmware(dev, 0, &fw_name);
if (ret) {
dev_err(dev, "failed to parse firmware-name property, ret = %d\n",
ret);
return ret;
}
rproc = rproc_alloc(dev, dev_name(dev), &k3_dsp_rproc_ops, fw_name,
sizeof(*kproc));
if (!rproc)
return -ENOMEM;
rproc->has_iommu = false;
rproc->recovery_disabled = true;
if (data->uses_lreset) {
rproc->ops->prepare = k3_dsp_rproc_prepare;
rproc->ops->unprepare = k3_dsp_rproc_unprepare;
}
kproc = rproc->priv;
kproc->rproc = rproc;
kproc->dev = dev;
kproc->data = data;
kproc->ti_sci = ti_sci_get_by_phandle(np, "ti,sci");
if (IS_ERR(kproc->ti_sci)) {
ret = PTR_ERR(kproc->ti_sci);
if (ret != -EPROBE_DEFER) {
dev_err(dev, "failed to get ti-sci handle, ret = %d\n",
ret);
}
kproc->ti_sci = NULL;
goto free_rproc;
}
ret = of_property_read_u32(np, "ti,sci-dev-id", &kproc->ti_sci_id);
if (ret) {
dev_err(dev, "missing 'ti,sci-dev-id' property\n");
goto put_sci;
}
kproc->reset = devm_reset_control_get_exclusive(dev, NULL);
if (IS_ERR(kproc->reset)) {
ret = PTR_ERR(kproc->reset);
dev_err(dev, "failed to get reset, status = %d\n", ret);
goto put_sci;
}
kproc->tsp = k3_dsp_rproc_of_get_tsp(dev, kproc->ti_sci);
if (IS_ERR(kproc->tsp)) {
dev_err(dev, "failed to construct ti-sci proc control, ret = %d\n",
ret);
ret = PTR_ERR(kproc->tsp);
goto put_sci;
}
ret = ti_sci_proc_request(kproc->tsp);
if (ret < 0) {
dev_err(dev, "ti_sci_proc_request failed, ret = %d\n", ret);
goto free_tsp;
}
ret = k3_dsp_rproc_of_get_memories(pdev, kproc);
if (ret)
goto release_tsp;
ret = k3_dsp_reserved_mem_init(kproc);
if (ret) {
dev_err(dev, "reserved memory init failed, ret = %d\n", ret);
goto release_tsp;
}
ret = kproc->ti_sci->ops.dev_ops.is_on(kproc->ti_sci, kproc->ti_sci_id,
NULL, &p_state);
if (ret) {
dev_err(dev, "failed to get initial state, mode cannot be determined, ret = %d\n",
ret);
goto release_mem;
}
/* configure J721E devices for either remoteproc or IPC-only mode */
if (p_state) {
dev_info(dev, "configured DSP for IPC-only mode\n");
rproc->state = RPROC_DETACHED;
/* override rproc ops with only required IPC-only mode ops */
rproc->ops->prepare = NULL;
rproc->ops->unprepare = NULL;
rproc->ops->start = NULL;
rproc->ops->stop = NULL;
rproc->ops->attach = k3_dsp_rproc_attach;
rproc->ops->detach = k3_dsp_rproc_detach;
rproc->ops->get_loaded_rsc_table = k3_dsp_get_loaded_rsc_table;
} else {
dev_info(dev, "configured DSP for remoteproc mode\n");
/*
* ensure the DSP local reset is asserted to ensure the DSP
* doesn't execute bogus code in .prepare() when the module
* reset is released.
*/
if (data->uses_lreset) {
ret = reset_control_status(kproc->reset);
if (ret < 0) {
dev_err(dev, "failed to get reset status, status = %d\n",
ret);
goto release_mem;
} else if (ret == 0) {
dev_warn(dev, "local reset is deasserted for device\n");
k3_dsp_rproc_reset(kproc);
}
}
}
ret = rproc_add(rproc);
if (ret) {
dev_err(dev, "failed to add register device with remoteproc core, status = %d\n",
ret);
goto release_mem;
}
platform_set_drvdata(pdev, kproc);
return 0;
release_mem:
k3_dsp_reserved_mem_exit(kproc);
release_tsp:
ret1 = ti_sci_proc_release(kproc->tsp);
if (ret1)
dev_err(dev, "failed to release proc, ret = %d\n", ret1);
free_tsp:
kfree(kproc->tsp);
put_sci:
ret1 = ti_sci_put_handle(kproc->ti_sci);
if (ret1)
dev_err(dev, "failed to put ti_sci handle, ret = %d\n", ret1);
free_rproc:
rproc_free(rproc);
return ret;
}
static int k3_dsp_rproc_remove(struct platform_device *pdev)
{
struct k3_dsp_rproc *kproc = platform_get_drvdata(pdev);
struct rproc *rproc = kproc->rproc;
struct device *dev = &pdev->dev;
int ret;
if (rproc->state == RPROC_ATTACHED) {
ret = rproc_detach(rproc);
if (ret) {
dev_err(dev, "failed to detach proc, ret = %d\n", ret);
return ret;
}
}
rproc_del(kproc->rproc);
ret = ti_sci_proc_release(kproc->tsp);
if (ret)
dev_err(dev, "failed to release proc, ret = %d\n", ret);
kfree(kproc->tsp);
ret = ti_sci_put_handle(kproc->ti_sci);
if (ret)
dev_err(dev, "failed to put ti_sci handle, ret = %d\n", ret);
k3_dsp_reserved_mem_exit(kproc);
rproc_free(kproc->rproc);
return 0;
}
static const struct k3_dsp_mem_data c66_mems[] = {
{ .name = "l2sram", .dev_addr = 0x800000 },
{ .name = "l1pram", .dev_addr = 0xe00000 },
{ .name = "l1dram", .dev_addr = 0xf00000 },
};
/* C71x cores only have a L1P Cache, there are no L1P SRAMs */
static const struct k3_dsp_mem_data c71_mems[] = {
{ .name = "l2sram", .dev_addr = 0x800000 },
{ .name = "l1dram", .dev_addr = 0xe00000 },
};
static const struct k3_dsp_mem_data c7xv_mems[] = {
{ .name = "l2sram", .dev_addr = 0x800000 },
};
static const struct k3_dsp_dev_data c66_data = {
.mems = c66_mems,
.num_mems = ARRAY_SIZE(c66_mems),
.boot_align_addr = SZ_1K,
.uses_lreset = true,
};
static const struct k3_dsp_dev_data c71_data = {
.mems = c71_mems,
.num_mems = ARRAY_SIZE(c71_mems),
.boot_align_addr = SZ_2M,
.uses_lreset = false,
};
static const struct k3_dsp_dev_data c7xv_data = {
.mems = c7xv_mems,
.num_mems = ARRAY_SIZE(c7xv_mems),
.boot_align_addr = SZ_2M,
.uses_lreset = false,
};
static const struct of_device_id k3_dsp_of_match[] = {
{ .compatible = "ti,j721e-c66-dsp", .data = &c66_data, },
{ .compatible = "ti,j721e-c71-dsp", .data = &c71_data, },
{ .compatible = "ti,j721s2-c71-dsp", .data = &c71_data, },
{ .compatible = "ti,am62a-c7xv-dsp", .data = &c7xv_data, },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, k3_dsp_of_match);
static struct platform_driver k3_dsp_rproc_driver = {
.probe = k3_dsp_rproc_probe,
.remove = k3_dsp_rproc_remove,
.driver = {
.name = "k3-dsp-rproc",
.of_match_table = k3_dsp_of_match,
},
};
module_platform_driver(k3_dsp_rproc_driver);
MODULE_AUTHOR("Suman Anna <[email protected]>");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("TI K3 DSP Remoteproc driver");
|
linux-master
|
drivers/remoteproc/ti_k3_dsp_remoteproc.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Character device interface driver for Remoteproc framework.
*
* Copyright (c) 2020, The Linux Foundation. All rights reserved.
*/
#include <linux/cdev.h>
#include <linux/compat.h>
#include <linux/fs.h>
#include <linux/module.h>
#include <linux/remoteproc.h>
#include <linux/uaccess.h>
#include <uapi/linux/remoteproc_cdev.h>
#include "remoteproc_internal.h"
#define NUM_RPROC_DEVICES 64
static dev_t rproc_major;
static ssize_t rproc_cdev_write(struct file *filp, const char __user *buf, size_t len, loff_t *pos)
{
struct rproc *rproc = container_of(filp->f_inode->i_cdev, struct rproc, cdev);
int ret = 0;
char cmd[10];
if (!len || len > sizeof(cmd))
return -EINVAL;
ret = copy_from_user(cmd, buf, len);
if (ret)
return -EFAULT;
if (!strncmp(cmd, "start", len)) {
ret = rproc_boot(rproc);
} else if (!strncmp(cmd, "stop", len)) {
ret = rproc_shutdown(rproc);
} else if (!strncmp(cmd, "detach", len)) {
ret = rproc_detach(rproc);
} else {
dev_err(&rproc->dev, "Unrecognized option\n");
ret = -EINVAL;
}
return ret ? ret : len;
}
static long rproc_device_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
{
struct rproc *rproc = container_of(filp->f_inode->i_cdev, struct rproc, cdev);
void __user *argp = (void __user *)arg;
s32 param;
switch (ioctl) {
case RPROC_SET_SHUTDOWN_ON_RELEASE:
if (copy_from_user(¶m, argp, sizeof(s32)))
return -EFAULT;
rproc->cdev_put_on_release = !!param;
break;
case RPROC_GET_SHUTDOWN_ON_RELEASE:
param = (s32)rproc->cdev_put_on_release;
if (copy_to_user(argp, ¶m, sizeof(s32)))
return -EFAULT;
break;
default:
dev_err(&rproc->dev, "Unsupported ioctl\n");
return -EINVAL;
}
return 0;
}
static int rproc_cdev_release(struct inode *inode, struct file *filp)
{
struct rproc *rproc = container_of(inode->i_cdev, struct rproc, cdev);
int ret = 0;
if (!rproc->cdev_put_on_release)
return 0;
if (rproc->state == RPROC_RUNNING)
rproc_shutdown(rproc);
else if (rproc->state == RPROC_ATTACHED)
ret = rproc_detach(rproc);
return ret;
}
static const struct file_operations rproc_fops = {
.write = rproc_cdev_write,
.unlocked_ioctl = rproc_device_ioctl,
.compat_ioctl = compat_ptr_ioctl,
.release = rproc_cdev_release,
};
int rproc_char_device_add(struct rproc *rproc)
{
int ret;
cdev_init(&rproc->cdev, &rproc_fops);
rproc->cdev.owner = THIS_MODULE;
rproc->dev.devt = MKDEV(MAJOR(rproc_major), rproc->index);
cdev_set_parent(&rproc->cdev, &rproc->dev.kobj);
ret = cdev_add(&rproc->cdev, rproc->dev.devt, 1);
if (ret < 0)
dev_err(&rproc->dev, "Failed to add char dev for %s\n", rproc->name);
return ret;
}
void rproc_char_device_remove(struct rproc *rproc)
{
cdev_del(&rproc->cdev);
}
void __init rproc_init_cdev(void)
{
int ret;
ret = alloc_chrdev_region(&rproc_major, 0, NUM_RPROC_DEVICES, "remoteproc");
if (ret < 0)
pr_err("Failed to alloc rproc_cdev region, err %d\n", ret);
}
|
linux-master
|
drivers/remoteproc/remoteproc_cdev.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* TI AMx3 Wakeup M3 Remote Processor driver
*
* Copyright (C) 2014-2015 Texas Instruments, Inc.
*
* Dave Gerlach <[email protected]>
* Suman Anna <[email protected]>
*/
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/remoteproc.h>
#include <linux/reset.h>
#include <linux/platform_data/wkup_m3.h>
#include "remoteproc_internal.h"
#define WKUPM3_MEM_MAX 2
/**
* struct wkup_m3_mem - WkupM3 internal memory structure
* @cpu_addr: MPU virtual address of the memory region
* @bus_addr: Bus address used to access the memory region
* @dev_addr: Device address from Wakeup M3 view
* @size: Size of the memory region
*/
struct wkup_m3_mem {
void __iomem *cpu_addr;
phys_addr_t bus_addr;
u32 dev_addr;
size_t size;
};
/**
* struct wkup_m3_rproc - WkupM3 remote processor state
* @rproc: rproc handle
* @pdev: pointer to platform device
* @mem: WkupM3 memory information
* @rsts: reset control
*/
struct wkup_m3_rproc {
struct rproc *rproc;
struct platform_device *pdev;
struct wkup_m3_mem mem[WKUPM3_MEM_MAX];
struct reset_control *rsts;
};
static int wkup_m3_rproc_start(struct rproc *rproc)
{
struct wkup_m3_rproc *wkupm3 = rproc->priv;
struct platform_device *pdev = wkupm3->pdev;
struct device *dev = &pdev->dev;
struct wkup_m3_platform_data *pdata = dev_get_platdata(dev);
int error = 0;
error = reset_control_deassert(wkupm3->rsts);
if (!wkupm3->rsts && pdata->deassert_reset(pdev, pdata->reset_name)) {
dev_err(dev, "Unable to reset wkup_m3!\n");
error = -ENODEV;
}
return error;
}
static int wkup_m3_rproc_stop(struct rproc *rproc)
{
struct wkup_m3_rproc *wkupm3 = rproc->priv;
struct platform_device *pdev = wkupm3->pdev;
struct device *dev = &pdev->dev;
struct wkup_m3_platform_data *pdata = dev_get_platdata(dev);
int error = 0;
error = reset_control_assert(wkupm3->rsts);
if (!wkupm3->rsts && pdata->assert_reset(pdev, pdata->reset_name)) {
dev_err(dev, "Unable to assert reset of wkup_m3!\n");
error = -ENODEV;
}
return error;
}
static void *wkup_m3_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
{
struct wkup_m3_rproc *wkupm3 = rproc->priv;
void *va = NULL;
int i;
u32 offset;
if (len == 0)
return NULL;
for (i = 0; i < WKUPM3_MEM_MAX; i++) {
if (da >= wkupm3->mem[i].dev_addr && da + len <=
wkupm3->mem[i].dev_addr + wkupm3->mem[i].size) {
offset = da - wkupm3->mem[i].dev_addr;
/* __force to make sparse happy with type conversion */
va = (__force void *)(wkupm3->mem[i].cpu_addr + offset);
break;
}
}
return va;
}
static const struct rproc_ops wkup_m3_rproc_ops = {
.start = wkup_m3_rproc_start,
.stop = wkup_m3_rproc_stop,
.da_to_va = wkup_m3_rproc_da_to_va,
};
static const struct of_device_id wkup_m3_rproc_of_match[] = {
{ .compatible = "ti,am3352-wkup-m3", },
{ .compatible = "ti,am4372-wkup-m3", },
{},
};
MODULE_DEVICE_TABLE(of, wkup_m3_rproc_of_match);
static int wkup_m3_rproc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct wkup_m3_platform_data *pdata = dev->platform_data;
/* umem always needs to be processed first */
const char *mem_names[WKUPM3_MEM_MAX] = { "umem", "dmem" };
struct wkup_m3_rproc *wkupm3;
const char *fw_name;
struct rproc *rproc;
struct resource *res;
const __be32 *addrp;
u32 l4_offset = 0;
u64 size;
int ret;
int i;
ret = of_property_read_string(dev->of_node, "ti,pm-firmware",
&fw_name);
if (ret) {
dev_err(dev, "No firmware filename given\n");
return -ENODEV;
}
pm_runtime_enable(&pdev->dev);
ret = pm_runtime_get_sync(&pdev->dev);
if (ret < 0) {
dev_err(&pdev->dev, "pm_runtime_get_sync() failed\n");
goto err;
}
rproc = rproc_alloc(dev, "wkup_m3", &wkup_m3_rproc_ops,
fw_name, sizeof(*wkupm3));
if (!rproc) {
ret = -ENOMEM;
goto err;
}
rproc->auto_boot = false;
rproc->sysfs_read_only = true;
wkupm3 = rproc->priv;
wkupm3->rproc = rproc;
wkupm3->pdev = pdev;
wkupm3->rsts = devm_reset_control_get_optional_shared(dev, "rstctrl");
if (IS_ERR(wkupm3->rsts))
return PTR_ERR(wkupm3->rsts);
if (!wkupm3->rsts) {
if (!(pdata && pdata->deassert_reset && pdata->assert_reset &&
pdata->reset_name)) {
dev_err(dev, "Platform data missing!\n");
ret = -ENODEV;
goto err_put_rproc;
}
}
for (i = 0; i < ARRAY_SIZE(mem_names); i++) {
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
mem_names[i]);
wkupm3->mem[i].cpu_addr = devm_ioremap_resource(dev, res);
if (IS_ERR(wkupm3->mem[i].cpu_addr)) {
dev_err(&pdev->dev, "devm_ioremap_resource failed for resource %d\n",
i);
ret = PTR_ERR(wkupm3->mem[i].cpu_addr);
goto err_put_rproc;
}
wkupm3->mem[i].bus_addr = res->start;
wkupm3->mem[i].size = resource_size(res);
addrp = of_get_address(dev->of_node, i, &size, NULL);
/*
* The wkupm3 has umem at address 0 in its view, so the device
* addresses for each memory region is computed as a relative
* offset of the bus address for umem, and therefore needs to be
* processed first.
*/
if (!strcmp(mem_names[i], "umem"))
l4_offset = be32_to_cpu(*addrp);
wkupm3->mem[i].dev_addr = be32_to_cpu(*addrp) - l4_offset;
}
dev_set_drvdata(dev, rproc);
ret = rproc_add(rproc);
if (ret) {
dev_err(dev, "rproc_add failed\n");
goto err_put_rproc;
}
return 0;
err_put_rproc:
rproc_free(rproc);
err:
pm_runtime_put_noidle(dev);
pm_runtime_disable(dev);
return ret;
}
static void wkup_m3_rproc_remove(struct platform_device *pdev)
{
struct rproc *rproc = platform_get_drvdata(pdev);
rproc_del(rproc);
rproc_free(rproc);
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
}
#ifdef CONFIG_PM
static int wkup_m3_rpm_suspend(struct device *dev)
{
return -EBUSY;
}
static int wkup_m3_rpm_resume(struct device *dev)
{
return 0;
}
#endif
static const struct dev_pm_ops wkup_m3_rproc_pm_ops = {
SET_RUNTIME_PM_OPS(wkup_m3_rpm_suspend, wkup_m3_rpm_resume, NULL)
};
static struct platform_driver wkup_m3_rproc_driver = {
.probe = wkup_m3_rproc_probe,
.remove_new = wkup_m3_rproc_remove,
.driver = {
.name = "wkup_m3_rproc",
.of_match_table = wkup_m3_rproc_of_match,
.pm = &wkup_m3_rproc_pm_ops,
},
};
module_platform_driver(wkup_m3_rproc_driver);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("TI Wakeup M3 remote processor control driver");
MODULE_AUTHOR("Dave Gerlach <[email protected]>");
|
linux-master
|
drivers/remoteproc/wkup_m3_rproc.c
|
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2017, Linaro Ltd.
*/
#include <linux/firmware.h>
#include <linux/module.h>
#include <linux/notifier.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/remoteproc/qcom_rproc.h>
#include <linux/rpmsg.h>
#include "qcom_common.h"
static BLOCKING_NOTIFIER_HEAD(sysmon_notifiers);
struct qcom_sysmon {
struct rproc_subdev subdev;
struct rproc *rproc;
int state;
struct mutex state_lock;
struct list_head node;
const char *name;
int shutdown_irq;
int ssctl_version;
int ssctl_instance;
struct notifier_block nb;
struct device *dev;
struct rpmsg_endpoint *ept;
struct completion comp;
struct completion ind_comp;
struct completion shutdown_comp;
struct completion ssctl_comp;
struct mutex lock;
bool ssr_ack;
bool shutdown_acked;
struct qmi_handle qmi;
struct sockaddr_qrtr ssctl;
};
enum {
SSCTL_SSR_EVENT_BEFORE_POWERUP,
SSCTL_SSR_EVENT_AFTER_POWERUP,
SSCTL_SSR_EVENT_BEFORE_SHUTDOWN,
SSCTL_SSR_EVENT_AFTER_SHUTDOWN,
};
static const char * const sysmon_state_string[] = {
[SSCTL_SSR_EVENT_BEFORE_POWERUP] = "before_powerup",
[SSCTL_SSR_EVENT_AFTER_POWERUP] = "after_powerup",
[SSCTL_SSR_EVENT_BEFORE_SHUTDOWN] = "before_shutdown",
[SSCTL_SSR_EVENT_AFTER_SHUTDOWN] = "after_shutdown",
};
struct sysmon_event {
const char *subsys_name;
u32 ssr_event;
};
static DEFINE_MUTEX(sysmon_lock);
static LIST_HEAD(sysmon_list);
/**
* sysmon_send_event() - send notification of other remote's SSR event
* @sysmon: sysmon context
* @event: sysmon event context
*/
static void sysmon_send_event(struct qcom_sysmon *sysmon,
const struct sysmon_event *event)
{
char req[50];
int len;
int ret;
len = snprintf(req, sizeof(req), "ssr:%s:%s", event->subsys_name,
sysmon_state_string[event->ssr_event]);
if (len >= sizeof(req))
return;
mutex_lock(&sysmon->lock);
reinit_completion(&sysmon->comp);
sysmon->ssr_ack = false;
ret = rpmsg_send(sysmon->ept, req, len);
if (ret < 0) {
dev_err(sysmon->dev, "failed to send sysmon event\n");
goto out_unlock;
}
ret = wait_for_completion_timeout(&sysmon->comp,
msecs_to_jiffies(5000));
if (!ret) {
dev_err(sysmon->dev, "timeout waiting for sysmon ack\n");
goto out_unlock;
}
if (!sysmon->ssr_ack)
dev_err(sysmon->dev, "unexpected response to sysmon event\n");
out_unlock:
mutex_unlock(&sysmon->lock);
}
/**
* sysmon_request_shutdown() - request graceful shutdown of remote
* @sysmon: sysmon context
*
* Return: boolean indicator of the remote processor acking the request
*/
static bool sysmon_request_shutdown(struct qcom_sysmon *sysmon)
{
char *req = "ssr:shutdown";
bool acked = false;
int ret;
mutex_lock(&sysmon->lock);
reinit_completion(&sysmon->comp);
sysmon->ssr_ack = false;
ret = rpmsg_send(sysmon->ept, req, strlen(req) + 1);
if (ret < 0) {
dev_err(sysmon->dev, "send sysmon shutdown request failed\n");
goto out_unlock;
}
ret = wait_for_completion_timeout(&sysmon->comp,
msecs_to_jiffies(5000));
if (!ret) {
dev_err(sysmon->dev, "timeout waiting for sysmon ack\n");
goto out_unlock;
}
if (!sysmon->ssr_ack)
dev_err(sysmon->dev,
"unexpected response to sysmon shutdown request\n");
else
acked = true;
out_unlock:
mutex_unlock(&sysmon->lock);
return acked;
}
static int sysmon_callback(struct rpmsg_device *rpdev, void *data, int count,
void *priv, u32 addr)
{
struct qcom_sysmon *sysmon = priv;
const char *ssr_ack = "ssr:ack";
const int ssr_ack_len = strlen(ssr_ack) + 1;
if (!sysmon)
return -EINVAL;
if (count >= ssr_ack_len && !memcmp(data, ssr_ack, ssr_ack_len))
sysmon->ssr_ack = true;
complete(&sysmon->comp);
return 0;
}
#define SSCTL_SHUTDOWN_REQ 0x21
#define SSCTL_SHUTDOWN_READY_IND 0x21
#define SSCTL_SUBSYS_EVENT_REQ 0x23
#define SSCTL_MAX_MSG_LEN 7
#define SSCTL_SUBSYS_NAME_LENGTH 15
enum {
SSCTL_SSR_EVENT_FORCED,
SSCTL_SSR_EVENT_GRACEFUL,
};
struct ssctl_shutdown_resp {
struct qmi_response_type_v01 resp;
};
static const struct qmi_elem_info ssctl_shutdown_resp_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
.elem_size = sizeof(struct qmi_response_type_v01),
.array_type = NO_ARRAY,
.tlv_type = 0x02,
.offset = offsetof(struct ssctl_shutdown_resp, resp),
.ei_array = qmi_response_type_v01_ei,
},
{}
};
struct ssctl_subsys_event_req {
u8 subsys_name_len;
char subsys_name[SSCTL_SUBSYS_NAME_LENGTH];
u32 event;
u8 evt_driven_valid;
u32 evt_driven;
};
static const struct qmi_elem_info ssctl_subsys_event_req_ei[] = {
{
.data_type = QMI_DATA_LEN,
.elem_len = 1,
.elem_size = sizeof(uint8_t),
.array_type = NO_ARRAY,
.tlv_type = 0x01,
.offset = offsetof(struct ssctl_subsys_event_req,
subsys_name_len),
.ei_array = NULL,
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = SSCTL_SUBSYS_NAME_LENGTH,
.elem_size = sizeof(char),
.array_type = VAR_LEN_ARRAY,
.tlv_type = 0x01,
.offset = offsetof(struct ssctl_subsys_event_req,
subsys_name),
.ei_array = NULL,
},
{
.data_type = QMI_SIGNED_4_BYTE_ENUM,
.elem_len = 1,
.elem_size = sizeof(uint32_t),
.array_type = NO_ARRAY,
.tlv_type = 0x02,
.offset = offsetof(struct ssctl_subsys_event_req,
event),
.ei_array = NULL,
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(uint8_t),
.array_type = NO_ARRAY,
.tlv_type = 0x10,
.offset = offsetof(struct ssctl_subsys_event_req,
evt_driven_valid),
.ei_array = NULL,
},
{
.data_type = QMI_SIGNED_4_BYTE_ENUM,
.elem_len = 1,
.elem_size = sizeof(uint32_t),
.array_type = NO_ARRAY,
.tlv_type = 0x10,
.offset = offsetof(struct ssctl_subsys_event_req,
evt_driven),
.ei_array = NULL,
},
{}
};
struct ssctl_subsys_event_resp {
struct qmi_response_type_v01 resp;
};
static const struct qmi_elem_info ssctl_subsys_event_resp_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
.elem_size = sizeof(struct qmi_response_type_v01),
.array_type = NO_ARRAY,
.tlv_type = 0x02,
.offset = offsetof(struct ssctl_subsys_event_resp,
resp),
.ei_array = qmi_response_type_v01_ei,
},
{}
};
static const struct qmi_elem_info ssctl_shutdown_ind_ei[] = {
{}
};
static void sysmon_ind_cb(struct qmi_handle *qmi, struct sockaddr_qrtr *sq,
struct qmi_txn *txn, const void *data)
{
struct qcom_sysmon *sysmon = container_of(qmi, struct qcom_sysmon, qmi);
complete(&sysmon->ind_comp);
}
static const struct qmi_msg_handler qmi_indication_handler[] = {
{
.type = QMI_INDICATION,
.msg_id = SSCTL_SHUTDOWN_READY_IND,
.ei = ssctl_shutdown_ind_ei,
.decoded_size = 0,
.fn = sysmon_ind_cb
},
{}
};
static bool ssctl_request_shutdown_wait(struct qcom_sysmon *sysmon)
{
int ret;
ret = wait_for_completion_timeout(&sysmon->shutdown_comp, 10 * HZ);
if (ret)
return true;
ret = try_wait_for_completion(&sysmon->ind_comp);
if (ret)
return true;
dev_err(sysmon->dev, "timeout waiting for shutdown ack\n");
return false;
}
/**
* ssctl_request_shutdown() - request shutdown via SSCTL QMI service
* @sysmon: sysmon context
*
* Return: boolean indicator of the remote processor acking the request
*/
static bool ssctl_request_shutdown(struct qcom_sysmon *sysmon)
{
struct ssctl_shutdown_resp resp;
struct qmi_txn txn;
bool acked = false;
int ret;
reinit_completion(&sysmon->ind_comp);
reinit_completion(&sysmon->shutdown_comp);
ret = qmi_txn_init(&sysmon->qmi, &txn, ssctl_shutdown_resp_ei, &resp);
if (ret < 0) {
dev_err(sysmon->dev, "failed to allocate QMI txn\n");
return false;
}
ret = qmi_send_request(&sysmon->qmi, &sysmon->ssctl, &txn,
SSCTL_SHUTDOWN_REQ, 0, NULL, NULL);
if (ret < 0) {
dev_err(sysmon->dev, "failed to send shutdown request\n");
qmi_txn_cancel(&txn);
return false;
}
ret = qmi_txn_wait(&txn, 5 * HZ);
if (ret < 0) {
dev_err(sysmon->dev, "timeout waiting for shutdown response\n");
} else if (resp.resp.result) {
dev_err(sysmon->dev, "shutdown request rejected\n");
} else {
dev_dbg(sysmon->dev, "shutdown request completed\n");
acked = true;
}
if (sysmon->shutdown_irq > 0)
return ssctl_request_shutdown_wait(sysmon);
return acked;
}
/**
* ssctl_send_event() - send notification of other remote's SSR event
* @sysmon: sysmon context
* @event: sysmon event context
*/
static void ssctl_send_event(struct qcom_sysmon *sysmon,
const struct sysmon_event *event)
{
struct ssctl_subsys_event_resp resp;
struct ssctl_subsys_event_req req;
struct qmi_txn txn;
int ret;
memset(&resp, 0, sizeof(resp));
ret = qmi_txn_init(&sysmon->qmi, &txn, ssctl_subsys_event_resp_ei, &resp);
if (ret < 0) {
dev_err(sysmon->dev, "failed to allocate QMI txn\n");
return;
}
memset(&req, 0, sizeof(req));
strscpy(req.subsys_name, event->subsys_name, sizeof(req.subsys_name));
req.subsys_name_len = strlen(req.subsys_name);
req.event = event->ssr_event;
req.evt_driven_valid = true;
req.evt_driven = SSCTL_SSR_EVENT_FORCED;
ret = qmi_send_request(&sysmon->qmi, &sysmon->ssctl, &txn,
SSCTL_SUBSYS_EVENT_REQ, 40,
ssctl_subsys_event_req_ei, &req);
if (ret < 0) {
dev_err(sysmon->dev, "failed to send subsystem event\n");
qmi_txn_cancel(&txn);
return;
}
ret = qmi_txn_wait(&txn, 5 * HZ);
if (ret < 0)
dev_err(sysmon->dev, "timeout waiting for subsystem event response\n");
else if (resp.resp.result)
dev_err(sysmon->dev, "subsystem event rejected\n");
else
dev_dbg(sysmon->dev, "subsystem event accepted\n");
}
/**
* ssctl_new_server() - QMI callback indicating a new service
* @qmi: QMI handle
* @svc: service information
*
* Return: 0 if we're interested in this service, -EINVAL otherwise.
*/
static int ssctl_new_server(struct qmi_handle *qmi, struct qmi_service *svc)
{
struct qcom_sysmon *sysmon = container_of(qmi, struct qcom_sysmon, qmi);
switch (svc->version) {
case 1:
if (svc->instance != 0)
return -EINVAL;
if (strcmp(sysmon->name, "modem"))
return -EINVAL;
break;
case 2:
if (svc->instance != sysmon->ssctl_instance)
return -EINVAL;
break;
default:
return -EINVAL;
}
sysmon->ssctl_version = svc->version;
sysmon->ssctl.sq_family = AF_QIPCRTR;
sysmon->ssctl.sq_node = svc->node;
sysmon->ssctl.sq_port = svc->port;
svc->priv = sysmon;
complete(&sysmon->ssctl_comp);
return 0;
}
/**
* ssctl_del_server() - QMI callback indicating that @svc is removed
* @qmi: QMI handle
* @svc: service information
*/
static void ssctl_del_server(struct qmi_handle *qmi, struct qmi_service *svc)
{
struct qcom_sysmon *sysmon = svc->priv;
sysmon->ssctl_version = 0;
}
static const struct qmi_ops ssctl_ops = {
.new_server = ssctl_new_server,
.del_server = ssctl_del_server,
};
static int sysmon_prepare(struct rproc_subdev *subdev)
{
struct qcom_sysmon *sysmon = container_of(subdev, struct qcom_sysmon,
subdev);
struct sysmon_event event = {
.subsys_name = sysmon->name,
.ssr_event = SSCTL_SSR_EVENT_BEFORE_POWERUP
};
mutex_lock(&sysmon->state_lock);
sysmon->state = SSCTL_SSR_EVENT_BEFORE_POWERUP;
blocking_notifier_call_chain(&sysmon_notifiers, 0, (void *)&event);
mutex_unlock(&sysmon->state_lock);
return 0;
}
/**
* sysmon_start() - start callback for the sysmon remoteproc subdevice
* @subdev: instance of the sysmon subdevice
*
* Inform all the listners of sysmon notifications that the rproc associated
* to @subdev has booted up. The rproc that booted up also needs to know
* which rprocs are already up and running, so send start notifications
* on behalf of all the online rprocs.
*/
static int sysmon_start(struct rproc_subdev *subdev)
{
struct qcom_sysmon *sysmon = container_of(subdev, struct qcom_sysmon,
subdev);
struct qcom_sysmon *target;
struct sysmon_event event = {
.subsys_name = sysmon->name,
.ssr_event = SSCTL_SSR_EVENT_AFTER_POWERUP
};
reinit_completion(&sysmon->ssctl_comp);
mutex_lock(&sysmon->state_lock);
sysmon->state = SSCTL_SSR_EVENT_AFTER_POWERUP;
blocking_notifier_call_chain(&sysmon_notifiers, 0, (void *)&event);
mutex_unlock(&sysmon->state_lock);
mutex_lock(&sysmon_lock);
list_for_each_entry(target, &sysmon_list, node) {
mutex_lock(&target->state_lock);
if (target == sysmon || target->state != SSCTL_SSR_EVENT_AFTER_POWERUP) {
mutex_unlock(&target->state_lock);
continue;
}
event.subsys_name = target->name;
event.ssr_event = target->state;
if (sysmon->ssctl_version == 2)
ssctl_send_event(sysmon, &event);
else if (sysmon->ept)
sysmon_send_event(sysmon, &event);
mutex_unlock(&target->state_lock);
}
mutex_unlock(&sysmon_lock);
return 0;
}
static void sysmon_stop(struct rproc_subdev *subdev, bool crashed)
{
struct qcom_sysmon *sysmon = container_of(subdev, struct qcom_sysmon, subdev);
struct sysmon_event event = {
.subsys_name = sysmon->name,
.ssr_event = SSCTL_SSR_EVENT_BEFORE_SHUTDOWN
};
sysmon->shutdown_acked = false;
mutex_lock(&sysmon->state_lock);
sysmon->state = SSCTL_SSR_EVENT_BEFORE_SHUTDOWN;
blocking_notifier_call_chain(&sysmon_notifiers, 0, (void *)&event);
mutex_unlock(&sysmon->state_lock);
/* Don't request graceful shutdown if we've crashed */
if (crashed)
return;
if (sysmon->ssctl_instance) {
if (!wait_for_completion_timeout(&sysmon->ssctl_comp, HZ / 2))
dev_err(sysmon->dev, "timeout waiting for ssctl service\n");
}
if (sysmon->ssctl_version)
sysmon->shutdown_acked = ssctl_request_shutdown(sysmon);
else if (sysmon->ept)
sysmon->shutdown_acked = sysmon_request_shutdown(sysmon);
}
static void sysmon_unprepare(struct rproc_subdev *subdev)
{
struct qcom_sysmon *sysmon = container_of(subdev, struct qcom_sysmon,
subdev);
struct sysmon_event event = {
.subsys_name = sysmon->name,
.ssr_event = SSCTL_SSR_EVENT_AFTER_SHUTDOWN
};
mutex_lock(&sysmon->state_lock);
sysmon->state = SSCTL_SSR_EVENT_AFTER_SHUTDOWN;
blocking_notifier_call_chain(&sysmon_notifiers, 0, (void *)&event);
mutex_unlock(&sysmon->state_lock);
}
/**
* sysmon_notify() - notify sysmon target of another's SSR
* @nb: notifier_block associated with sysmon instance
* @event: unused
* @data: SSR identifier of the remote that is going down
*/
static int sysmon_notify(struct notifier_block *nb, unsigned long event,
void *data)
{
struct qcom_sysmon *sysmon = container_of(nb, struct qcom_sysmon, nb);
struct sysmon_event *sysmon_event = data;
/* Skip non-running rprocs and the originating instance */
if (sysmon->state != SSCTL_SSR_EVENT_AFTER_POWERUP ||
!strcmp(sysmon_event->subsys_name, sysmon->name)) {
dev_dbg(sysmon->dev, "not notifying %s\n", sysmon->name);
return NOTIFY_DONE;
}
/* Only SSCTL version 2 supports SSR events */
if (sysmon->ssctl_version == 2)
ssctl_send_event(sysmon, sysmon_event);
else if (sysmon->ept)
sysmon_send_event(sysmon, sysmon_event);
return NOTIFY_DONE;
}
static irqreturn_t sysmon_shutdown_interrupt(int irq, void *data)
{
struct qcom_sysmon *sysmon = data;
complete(&sysmon->shutdown_comp);
return IRQ_HANDLED;
}
/**
* qcom_add_sysmon_subdev() - create a sysmon subdev for the given remoteproc
* @rproc: rproc context to associate the subdev with
* @name: name of this subdev, to use in SSR
* @ssctl_instance: instance id of the ssctl QMI service
*
* Return: A new qcom_sysmon object, or NULL on failure
*/
struct qcom_sysmon *qcom_add_sysmon_subdev(struct rproc *rproc,
const char *name,
int ssctl_instance)
{
struct qcom_sysmon *sysmon;
int ret;
sysmon = kzalloc(sizeof(*sysmon), GFP_KERNEL);
if (!sysmon)
return ERR_PTR(-ENOMEM);
sysmon->dev = rproc->dev.parent;
sysmon->rproc = rproc;
sysmon->name = name;
sysmon->ssctl_instance = ssctl_instance;
init_completion(&sysmon->comp);
init_completion(&sysmon->ind_comp);
init_completion(&sysmon->shutdown_comp);
init_completion(&sysmon->ssctl_comp);
mutex_init(&sysmon->lock);
mutex_init(&sysmon->state_lock);
sysmon->shutdown_irq = of_irq_get_byname(sysmon->dev->of_node,
"shutdown-ack");
if (sysmon->shutdown_irq < 0) {
if (sysmon->shutdown_irq != -ENODATA) {
dev_err(sysmon->dev,
"failed to retrieve shutdown-ack IRQ\n");
ret = sysmon->shutdown_irq;
kfree(sysmon);
return ERR_PTR(ret);
}
} else {
ret = devm_request_threaded_irq(sysmon->dev,
sysmon->shutdown_irq,
NULL, sysmon_shutdown_interrupt,
IRQF_TRIGGER_RISING | IRQF_ONESHOT,
"q6v5 shutdown-ack", sysmon);
if (ret) {
dev_err(sysmon->dev,
"failed to acquire shutdown-ack IRQ\n");
kfree(sysmon);
return ERR_PTR(ret);
}
}
ret = qmi_handle_init(&sysmon->qmi, SSCTL_MAX_MSG_LEN, &ssctl_ops,
qmi_indication_handler);
if (ret < 0) {
dev_err(sysmon->dev, "failed to initialize qmi handle\n");
kfree(sysmon);
return ERR_PTR(ret);
}
qmi_add_lookup(&sysmon->qmi, 43, 0, 0);
sysmon->subdev.prepare = sysmon_prepare;
sysmon->subdev.start = sysmon_start;
sysmon->subdev.stop = sysmon_stop;
sysmon->subdev.unprepare = sysmon_unprepare;
rproc_add_subdev(rproc, &sysmon->subdev);
sysmon->nb.notifier_call = sysmon_notify;
blocking_notifier_chain_register(&sysmon_notifiers, &sysmon->nb);
mutex_lock(&sysmon_lock);
list_add(&sysmon->node, &sysmon_list);
mutex_unlock(&sysmon_lock);
return sysmon;
}
EXPORT_SYMBOL_GPL(qcom_add_sysmon_subdev);
/**
* qcom_remove_sysmon_subdev() - release a qcom_sysmon
* @sysmon: sysmon context, as retrieved by qcom_add_sysmon_subdev()
*/
void qcom_remove_sysmon_subdev(struct qcom_sysmon *sysmon)
{
if (!sysmon)
return;
mutex_lock(&sysmon_lock);
list_del(&sysmon->node);
mutex_unlock(&sysmon_lock);
blocking_notifier_chain_unregister(&sysmon_notifiers, &sysmon->nb);
rproc_remove_subdev(sysmon->rproc, &sysmon->subdev);
qmi_handle_release(&sysmon->qmi);
kfree(sysmon);
}
EXPORT_SYMBOL_GPL(qcom_remove_sysmon_subdev);
/**
* qcom_sysmon_shutdown_acked() - query the success of the last shutdown
* @sysmon: sysmon context
*
* When sysmon is used to request a graceful shutdown of the remote processor
* this can be used by the remoteproc driver to query the success, in order to
* know if it should fall back to other means of requesting a shutdown.
*
* Return: boolean indicator of the success of the last shutdown request
*/
bool qcom_sysmon_shutdown_acked(struct qcom_sysmon *sysmon)
{
return sysmon && sysmon->shutdown_acked;
}
EXPORT_SYMBOL_GPL(qcom_sysmon_shutdown_acked);
/**
* sysmon_probe() - probe sys_mon channel
* @rpdev: rpmsg device handle
*
* Find the sysmon context associated with the ancestor remoteproc and assign
* this rpmsg device with said sysmon context.
*
* Return: 0 on success, negative errno on failure.
*/
static int sysmon_probe(struct rpmsg_device *rpdev)
{
struct qcom_sysmon *sysmon;
struct rproc *rproc;
rproc = rproc_get_by_child(&rpdev->dev);
if (!rproc) {
dev_err(&rpdev->dev, "sysmon device not child of rproc\n");
return -EINVAL;
}
mutex_lock(&sysmon_lock);
list_for_each_entry(sysmon, &sysmon_list, node) {
if (sysmon->rproc == rproc)
goto found;
}
mutex_unlock(&sysmon_lock);
dev_err(&rpdev->dev, "no sysmon associated with parent rproc\n");
return -EINVAL;
found:
mutex_unlock(&sysmon_lock);
rpdev->ept->priv = sysmon;
sysmon->ept = rpdev->ept;
return 0;
}
/**
* sysmon_remove() - sys_mon channel remove handler
* @rpdev: rpmsg device handle
*
* Disassociate the rpmsg device with the sysmon instance.
*/
static void sysmon_remove(struct rpmsg_device *rpdev)
{
struct qcom_sysmon *sysmon = rpdev->ept->priv;
sysmon->ept = NULL;
}
static const struct rpmsg_device_id sysmon_match[] = {
{ "sys_mon" },
{}
};
static struct rpmsg_driver sysmon_driver = {
.probe = sysmon_probe,
.remove = sysmon_remove,
.callback = sysmon_callback,
.id_table = sysmon_match,
.drv = {
.name = "qcom_sysmon",
},
};
module_rpmsg_driver(sysmon_driver);
MODULE_DESCRIPTION("Qualcomm sysmon driver");
MODULE_LICENSE("GPL v2");
|
linux-master
|
drivers/remoteproc/qcom_sysmon.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Remote Processor Framework
*/
#include <linux/remoteproc.h>
#include <linux/slab.h>
#include "remoteproc_internal.h"
#define to_rproc(d) container_of(d, struct rproc, dev)
static ssize_t recovery_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct rproc *rproc = to_rproc(dev);
return sysfs_emit(buf, "%s", rproc->recovery_disabled ? "disabled\n" : "enabled\n");
}
/*
* By writing to the 'recovery' sysfs entry, we control the behavior of the
* recovery mechanism dynamically. The default value of this entry is "enabled".
*
* The 'recovery' sysfs entry supports these commands:
*
* enabled: When enabled, the remote processor will be automatically
* recovered whenever it crashes. Moreover, if the remote
* processor crashes while recovery is disabled, it will
* be automatically recovered too as soon as recovery is enabled.
*
* disabled: When disabled, a remote processor will remain in a crashed
* state if it crashes. This is useful for debugging purposes;
* without it, debugging a crash is substantially harder.
*
* recover: This function will trigger an immediate recovery if the
* remote processor is in a crashed state, without changing
* or checking the recovery state (enabled/disabled).
* This is useful during debugging sessions, when one expects
* additional crashes to happen after enabling recovery. In this
* case, enabling recovery will make it hard to debug subsequent
* crashes, so it's recommended to keep recovery disabled, and
* instead use the "recover" command as needed.
*/
static ssize_t recovery_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct rproc *rproc = to_rproc(dev);
if (sysfs_streq(buf, "enabled")) {
/* change the flag and begin the recovery process if needed */
rproc->recovery_disabled = false;
rproc_trigger_recovery(rproc);
} else if (sysfs_streq(buf, "disabled")) {
rproc->recovery_disabled = true;
} else if (sysfs_streq(buf, "recover")) {
/* begin the recovery process without changing the flag */
rproc_trigger_recovery(rproc);
} else {
return -EINVAL;
}
return count;
}
static DEVICE_ATTR_RW(recovery);
/*
* A coredump-configuration-to-string lookup table, for exposing a
* human readable configuration via sysfs. Always keep in sync with
* enum rproc_coredump_mechanism
*/
static const char * const rproc_coredump_str[] = {
[RPROC_COREDUMP_DISABLED] = "disabled",
[RPROC_COREDUMP_ENABLED] = "enabled",
[RPROC_COREDUMP_INLINE] = "inline",
};
/* Expose the current coredump configuration via debugfs */
static ssize_t coredump_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct rproc *rproc = to_rproc(dev);
return sysfs_emit(buf, "%s\n", rproc_coredump_str[rproc->dump_conf]);
}
/*
* By writing to the 'coredump' sysfs entry, we control the behavior of the
* coredump mechanism dynamically. The default value of this entry is "default".
*
* The 'coredump' sysfs entry supports these commands:
*
* disabled: This is the default coredump mechanism. Recovery will proceed
* without collecting any dump.
*
* default: When the remoteproc crashes the entire coredump will be
* copied to a separate buffer and exposed to userspace.
*
* inline: The coredump will not be copied to a separate buffer and the
* recovery process will have to wait until data is read by
* userspace. But this avoid usage of extra memory.
*/
static ssize_t coredump_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct rproc *rproc = to_rproc(dev);
if (rproc->state == RPROC_CRASHED) {
dev_err(&rproc->dev, "can't change coredump configuration\n");
return -EBUSY;
}
if (sysfs_streq(buf, "disabled")) {
rproc->dump_conf = RPROC_COREDUMP_DISABLED;
} else if (sysfs_streq(buf, "enabled")) {
rproc->dump_conf = RPROC_COREDUMP_ENABLED;
} else if (sysfs_streq(buf, "inline")) {
rproc->dump_conf = RPROC_COREDUMP_INLINE;
} else {
dev_err(&rproc->dev, "Invalid coredump configuration\n");
return -EINVAL;
}
return count;
}
static DEVICE_ATTR_RW(coredump);
/* Expose the loaded / running firmware name via sysfs */
static ssize_t firmware_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct rproc *rproc = to_rproc(dev);
const char *firmware = rproc->firmware;
/*
* If the remote processor has been started by an external
* entity we have no idea of what image it is running. As such
* simply display a generic string rather then rproc->firmware.
*/
if (rproc->state == RPROC_ATTACHED)
firmware = "unknown";
return sprintf(buf, "%s\n", firmware);
}
/* Change firmware name via sysfs */
static ssize_t firmware_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct rproc *rproc = to_rproc(dev);
int err;
err = rproc_set_firmware(rproc, buf);
return err ? err : count;
}
static DEVICE_ATTR_RW(firmware);
/*
* A state-to-string lookup table, for exposing a human readable state
* via sysfs. Always keep in sync with enum rproc_state
*/
static const char * const rproc_state_string[] = {
[RPROC_OFFLINE] = "offline",
[RPROC_SUSPENDED] = "suspended",
[RPROC_RUNNING] = "running",
[RPROC_CRASHED] = "crashed",
[RPROC_DELETED] = "deleted",
[RPROC_ATTACHED] = "attached",
[RPROC_DETACHED] = "detached",
[RPROC_LAST] = "invalid",
};
/* Expose the state of the remote processor via sysfs */
static ssize_t state_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct rproc *rproc = to_rproc(dev);
unsigned int state;
state = rproc->state > RPROC_LAST ? RPROC_LAST : rproc->state;
return sprintf(buf, "%s\n", rproc_state_string[state]);
}
/* Change remote processor state via sysfs */
static ssize_t state_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct rproc *rproc = to_rproc(dev);
int ret = 0;
if (sysfs_streq(buf, "start")) {
ret = rproc_boot(rproc);
if (ret)
dev_err(&rproc->dev, "Boot failed: %d\n", ret);
} else if (sysfs_streq(buf, "stop")) {
ret = rproc_shutdown(rproc);
} else if (sysfs_streq(buf, "detach")) {
ret = rproc_detach(rproc);
} else {
dev_err(&rproc->dev, "Unrecognised option: %s\n", buf);
ret = -EINVAL;
}
return ret ? ret : count;
}
static DEVICE_ATTR_RW(state);
/* Expose the name of the remote processor via sysfs */
static ssize_t name_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct rproc *rproc = to_rproc(dev);
return sprintf(buf, "%s\n", rproc->name);
}
static DEVICE_ATTR_RO(name);
static umode_t rproc_is_visible(struct kobject *kobj, struct attribute *attr,
int n)
{
struct device *dev = kobj_to_dev(kobj);
struct rproc *rproc = to_rproc(dev);
umode_t mode = attr->mode;
if (rproc->sysfs_read_only && (attr == &dev_attr_recovery.attr ||
attr == &dev_attr_firmware.attr ||
attr == &dev_attr_state.attr ||
attr == &dev_attr_coredump.attr))
mode = 0444;
return mode;
}
static struct attribute *rproc_attrs[] = {
&dev_attr_coredump.attr,
&dev_attr_recovery.attr,
&dev_attr_firmware.attr,
&dev_attr_state.attr,
&dev_attr_name.attr,
NULL
};
static const struct attribute_group rproc_devgroup = {
.attrs = rproc_attrs,
.is_visible = rproc_is_visible,
};
static const struct attribute_group *rproc_devgroups[] = {
&rproc_devgroup,
NULL
};
struct class rproc_class = {
.name = "remoteproc",
.dev_groups = rproc_devgroups,
};
int __init rproc_init_sysfs(void)
{
/* create remoteproc device class for sysfs */
int err = class_register(&rproc_class);
if (err)
pr_err("remoteproc: unable to register class\n");
return err;
}
void __exit rproc_exit_sysfs(void)
{
class_unregister(&rproc_class);
}
|
linux-master
|
drivers/remoteproc/remoteproc_sysfs.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Remote Processor Framework ELF loader
*
* Copyright (C) 2011 Texas Instruments, Inc.
* Copyright (C) 2011 Google, Inc.
*
* Ohad Ben-Cohen <[email protected]>
* Brian Swetland <[email protected]>
* Mark Grosen <[email protected]>
* Fernando Guzman Lugo <[email protected]>
* Suman Anna <[email protected]>
* Robert Tivy <[email protected]>
* Armando Uribe De Leon <[email protected]>
* Sjur Brændeland <[email protected]>
*/
#define pr_fmt(fmt) "%s: " fmt, __func__
#include <linux/module.h>
#include <linux/firmware.h>
#include <linux/remoteproc.h>
#include <linux/elf.h>
#include "remoteproc_internal.h"
#include "remoteproc_elf_helpers.h"
/**
* rproc_elf_sanity_check() - Sanity Check for ELF32/ELF64 firmware image
* @rproc: the remote processor handle
* @fw: the ELF firmware image
*
* Make sure this fw image is sane (ie a correct ELF32/ELF64 file).
*
* Return: 0 on success and -EINVAL upon any failure
*/
int rproc_elf_sanity_check(struct rproc *rproc, const struct firmware *fw)
{
const char *name = rproc->firmware;
struct device *dev = &rproc->dev;
/*
* ELF files are beginning with the same structure. Thus, to simplify
* header parsing, we can use the elf32_hdr one for both elf64 and
* elf32.
*/
struct elf32_hdr *ehdr;
u32 elf_shdr_get_size;
u64 phoff, shoff;
char class;
u16 phnum;
if (!fw) {
dev_err(dev, "failed to load %s\n", name);
return -EINVAL;
}
if (fw->size < sizeof(struct elf32_hdr)) {
dev_err(dev, "Image is too small\n");
return -EINVAL;
}
ehdr = (struct elf32_hdr *)fw->data;
if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG)) {
dev_err(dev, "Image is corrupted (bad magic)\n");
return -EINVAL;
}
class = ehdr->e_ident[EI_CLASS];
if (class != ELFCLASS32 && class != ELFCLASS64) {
dev_err(dev, "Unsupported class: %d\n", class);
return -EINVAL;
}
if (class == ELFCLASS64 && fw->size < sizeof(struct elf64_hdr)) {
dev_err(dev, "elf64 header is too small\n");
return -EINVAL;
}
/* We assume the firmware has the same endianness as the host */
# ifdef __LITTLE_ENDIAN
if (ehdr->e_ident[EI_DATA] != ELFDATA2LSB) {
# else /* BIG ENDIAN */
if (ehdr->e_ident[EI_DATA] != ELFDATA2MSB) {
# endif
dev_err(dev, "Unsupported firmware endianness\n");
return -EINVAL;
}
phoff = elf_hdr_get_e_phoff(class, fw->data);
shoff = elf_hdr_get_e_shoff(class, fw->data);
phnum = elf_hdr_get_e_phnum(class, fw->data);
elf_shdr_get_size = elf_size_of_shdr(class);
if (fw->size < shoff + elf_shdr_get_size) {
dev_err(dev, "Image is too small\n");
return -EINVAL;
}
if (phnum == 0) {
dev_err(dev, "No loadable segments\n");
return -EINVAL;
}
if (phoff > fw->size) {
dev_err(dev, "Firmware size is too small\n");
return -EINVAL;
}
dev_dbg(dev, "Firmware is an elf%d file\n",
class == ELFCLASS32 ? 32 : 64);
return 0;
}
EXPORT_SYMBOL(rproc_elf_sanity_check);
/**
* rproc_elf_get_boot_addr() - Get rproc's boot address.
* @rproc: the remote processor handle
* @fw: the ELF firmware image
*
* Note that the boot address is not a configurable property of all remote
* processors. Some will always boot at a specific hard-coded address.
*
* Return: entry point address of the ELF image
*
*/
u64 rproc_elf_get_boot_addr(struct rproc *rproc, const struct firmware *fw)
{
return elf_hdr_get_e_entry(fw_elf_get_class(fw), fw->data);
}
EXPORT_SYMBOL(rproc_elf_get_boot_addr);
/**
* rproc_elf_load_segments() - load firmware segments to memory
* @rproc: remote processor which will be booted using these fw segments
* @fw: the ELF firmware image
*
* This function loads the firmware segments to memory, where the remote
* processor expects them.
*
* Some remote processors will expect their code and data to be placed
* in specific device addresses, and can't have them dynamically assigned.
*
* We currently support only those kind of remote processors, and expect
* the program header's paddr member to contain those addresses. We then go
* through the physically contiguous "carveout" memory regions which we
* allocated (and mapped) earlier on behalf of the remote processor,
* and "translate" device address to kernel addresses, so we can copy the
* segments where they are expected.
*
* Currently we only support remote processors that required carveout
* allocations and got them mapped onto their iommus. Some processors
* might be different: they might not have iommus, and would prefer to
* directly allocate memory for every segment/resource. This is not yet
* supported, though.
*
* Return: 0 on success and an appropriate error code otherwise
*/
int rproc_elf_load_segments(struct rproc *rproc, const struct firmware *fw)
{
struct device *dev = &rproc->dev;
const void *ehdr, *phdr;
int i, ret = 0;
u16 phnum;
const u8 *elf_data = fw->data;
u8 class = fw_elf_get_class(fw);
u32 elf_phdr_get_size = elf_size_of_phdr(class);
ehdr = elf_data;
phnum = elf_hdr_get_e_phnum(class, ehdr);
phdr = elf_data + elf_hdr_get_e_phoff(class, ehdr);
/* go through the available ELF segments */
for (i = 0; i < phnum; i++, phdr += elf_phdr_get_size) {
u64 da = elf_phdr_get_p_paddr(class, phdr);
u64 memsz = elf_phdr_get_p_memsz(class, phdr);
u64 filesz = elf_phdr_get_p_filesz(class, phdr);
u64 offset = elf_phdr_get_p_offset(class, phdr);
u32 type = elf_phdr_get_p_type(class, phdr);
bool is_iomem = false;
void *ptr;
if (type != PT_LOAD || !memsz)
continue;
dev_dbg(dev, "phdr: type %d da 0x%llx memsz 0x%llx filesz 0x%llx\n",
type, da, memsz, filesz);
if (filesz > memsz) {
dev_err(dev, "bad phdr filesz 0x%llx memsz 0x%llx\n",
filesz, memsz);
ret = -EINVAL;
break;
}
if (offset + filesz > fw->size) {
dev_err(dev, "truncated fw: need 0x%llx avail 0x%zx\n",
offset + filesz, fw->size);
ret = -EINVAL;
break;
}
if (!rproc_u64_fit_in_size_t(memsz)) {
dev_err(dev, "size (%llx) does not fit in size_t type\n",
memsz);
ret = -EOVERFLOW;
break;
}
/* grab the kernel address for this device address */
ptr = rproc_da_to_va(rproc, da, memsz, &is_iomem);
if (!ptr) {
dev_err(dev, "bad phdr da 0x%llx mem 0x%llx\n", da,
memsz);
ret = -EINVAL;
break;
}
/* put the segment where the remote processor expects it */
if (filesz) {
if (is_iomem)
memcpy_toio((void __iomem *)ptr, elf_data + offset, filesz);
else
memcpy(ptr, elf_data + offset, filesz);
}
/*
* Zero out remaining memory for this segment.
*
* This isn't strictly required since dma_alloc_coherent already
* did this for us. albeit harmless, we may consider removing
* this.
*/
if (memsz > filesz) {
if (is_iomem)
memset_io((void __iomem *)(ptr + filesz), 0, memsz - filesz);
else
memset(ptr + filesz, 0, memsz - filesz);
}
}
return ret;
}
EXPORT_SYMBOL(rproc_elf_load_segments);
static const void *
find_table(struct device *dev, const struct firmware *fw)
{
const void *shdr, *name_table_shdr;
int i;
const char *name_table;
struct resource_table *table = NULL;
const u8 *elf_data = (void *)fw->data;
u8 class = fw_elf_get_class(fw);
size_t fw_size = fw->size;
const void *ehdr = elf_data;
u16 shnum = elf_hdr_get_e_shnum(class, ehdr);
u32 elf_shdr_get_size = elf_size_of_shdr(class);
u16 shstrndx = elf_hdr_get_e_shstrndx(class, ehdr);
/* look for the resource table and handle it */
/* First, get the section header according to the elf class */
shdr = elf_data + elf_hdr_get_e_shoff(class, ehdr);
/* Compute name table section header entry in shdr array */
name_table_shdr = shdr + (shstrndx * elf_shdr_get_size);
/* Finally, compute the name table section address in elf */
name_table = elf_data + elf_shdr_get_sh_offset(class, name_table_shdr);
for (i = 0; i < shnum; i++, shdr += elf_shdr_get_size) {
u64 size = elf_shdr_get_sh_size(class, shdr);
u64 offset = elf_shdr_get_sh_offset(class, shdr);
u32 name = elf_shdr_get_sh_name(class, shdr);
if (strcmp(name_table + name, ".resource_table"))
continue;
table = (struct resource_table *)(elf_data + offset);
/* make sure we have the entire table */
if (offset + size > fw_size || offset + size < size) {
dev_err(dev, "resource table truncated\n");
return NULL;
}
/* make sure table has at least the header */
if (sizeof(struct resource_table) > size) {
dev_err(dev, "header-less resource table\n");
return NULL;
}
/* we don't support any version beyond the first */
if (table->ver != 1) {
dev_err(dev, "unsupported fw ver: %d\n", table->ver);
return NULL;
}
/* make sure reserved bytes are zeroes */
if (table->reserved[0] || table->reserved[1]) {
dev_err(dev, "non zero reserved bytes\n");
return NULL;
}
/* make sure the offsets array isn't truncated */
if (struct_size(table, offset, table->num) > size) {
dev_err(dev, "resource table incomplete\n");
return NULL;
}
return shdr;
}
return NULL;
}
/**
* rproc_elf_load_rsc_table() - load the resource table
* @rproc: the rproc handle
* @fw: the ELF firmware image
*
* This function finds the resource table inside the remote processor's
* firmware, load it into the @cached_table and update @table_ptr.
*
* Return: 0 on success, negative errno on failure.
*/
int rproc_elf_load_rsc_table(struct rproc *rproc, const struct firmware *fw)
{
const void *shdr;
struct device *dev = &rproc->dev;
struct resource_table *table = NULL;
const u8 *elf_data = fw->data;
size_t tablesz;
u8 class = fw_elf_get_class(fw);
u64 sh_offset;
shdr = find_table(dev, fw);
if (!shdr)
return -EINVAL;
sh_offset = elf_shdr_get_sh_offset(class, shdr);
table = (struct resource_table *)(elf_data + sh_offset);
tablesz = elf_shdr_get_sh_size(class, shdr);
/*
* Create a copy of the resource table. When a virtio device starts
* and calls vring_new_virtqueue() the address of the allocated vring
* will be stored in the cached_table. Before the device is started,
* cached_table will be copied into device memory.
*/
rproc->cached_table = kmemdup(table, tablesz, GFP_KERNEL);
if (!rproc->cached_table)
return -ENOMEM;
rproc->table_ptr = rproc->cached_table;
rproc->table_sz = tablesz;
return 0;
}
EXPORT_SYMBOL(rproc_elf_load_rsc_table);
/**
* rproc_elf_find_loaded_rsc_table() - find the loaded resource table
* @rproc: the rproc handle
* @fw: the ELF firmware image
*
* This function finds the location of the loaded resource table. Don't
* call this function if the table wasn't loaded yet - it's a bug if you do.
*
* Return: pointer to the resource table if it is found or NULL otherwise.
* If the table wasn't loaded yet the result is unspecified.
*/
struct resource_table *rproc_elf_find_loaded_rsc_table(struct rproc *rproc,
const struct firmware *fw)
{
const void *shdr;
u64 sh_addr, sh_size;
u8 class = fw_elf_get_class(fw);
struct device *dev = &rproc->dev;
shdr = find_table(&rproc->dev, fw);
if (!shdr)
return NULL;
sh_addr = elf_shdr_get_sh_addr(class, shdr);
sh_size = elf_shdr_get_sh_size(class, shdr);
if (!rproc_u64_fit_in_size_t(sh_size)) {
dev_err(dev, "size (%llx) does not fit in size_t type\n",
sh_size);
return NULL;
}
return rproc_da_to_va(rproc, sh_addr, sh_size, NULL);
}
EXPORT_SYMBOL(rproc_elf_find_loaded_rsc_table);
|
linux-master
|
drivers/remoteproc/remoteproc_elf_loader.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* ST's Remote Processor Control Driver
*
* Copyright (C) 2015 STMicroelectronics - All Rights Reserved
*
* Author: Ludovic Barre <[email protected]>
*/
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/mailbox_client.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/remoteproc.h>
#include <linux/reset.h>
#include "remoteproc_internal.h"
#define ST_RPROC_VQ0 0
#define ST_RPROC_VQ1 1
#define ST_RPROC_MAX_VRING 2
#define MBOX_RX 0
#define MBOX_TX 1
#define MBOX_MAX 2
struct st_rproc_config {
bool sw_reset;
bool pwr_reset;
unsigned long bootaddr_mask;
};
struct st_rproc {
struct st_rproc_config *config;
struct reset_control *sw_reset;
struct reset_control *pwr_reset;
struct clk *clk;
u32 clk_rate;
struct regmap *boot_base;
u32 boot_offset;
struct mbox_chan *mbox_chan[ST_RPROC_MAX_VRING * MBOX_MAX];
struct mbox_client mbox_client_vq0;
struct mbox_client mbox_client_vq1;
};
static void st_rproc_mbox_callback(struct device *dev, u32 msg)
{
struct rproc *rproc = dev_get_drvdata(dev);
if (rproc_vq_interrupt(rproc, msg) == IRQ_NONE)
dev_dbg(dev, "no message was found in vqid %d\n", msg);
}
static
void st_rproc_mbox_callback_vq0(struct mbox_client *mbox_client, void *data)
{
st_rproc_mbox_callback(mbox_client->dev, 0);
}
static
void st_rproc_mbox_callback_vq1(struct mbox_client *mbox_client, void *data)
{
st_rproc_mbox_callback(mbox_client->dev, 1);
}
static void st_rproc_kick(struct rproc *rproc, int vqid)
{
struct st_rproc *ddata = rproc->priv;
struct device *dev = rproc->dev.parent;
int ret;
/* send the index of the triggered virtqueue in the mailbox payload */
if (WARN_ON(vqid >= ST_RPROC_MAX_VRING))
return;
ret = mbox_send_message(ddata->mbox_chan[vqid * MBOX_MAX + MBOX_TX],
(void *)&vqid);
if (ret < 0)
dev_err(dev, "failed to send message via mbox: %d\n", ret);
}
static int st_rproc_mem_alloc(struct rproc *rproc,
struct rproc_mem_entry *mem)
{
struct device *dev = rproc->dev.parent;
void *va;
va = ioremap_wc(mem->dma, mem->len);
if (!va) {
dev_err(dev, "Unable to map memory region: %pa+%zx\n",
&mem->dma, mem->len);
return -ENOMEM;
}
/* Update memory entry va */
mem->va = va;
return 0;
}
static int st_rproc_mem_release(struct rproc *rproc,
struct rproc_mem_entry *mem)
{
iounmap(mem->va);
return 0;
}
static int st_rproc_parse_fw(struct rproc *rproc, const struct firmware *fw)
{
struct device *dev = rproc->dev.parent;
struct device_node *np = dev->of_node;
struct rproc_mem_entry *mem;
struct reserved_mem *rmem;
struct of_phandle_iterator it;
int index = 0;
of_phandle_iterator_init(&it, np, "memory-region", NULL, 0);
while (of_phandle_iterator_next(&it) == 0) {
rmem = of_reserved_mem_lookup(it.node);
if (!rmem) {
of_node_put(it.node);
dev_err(dev, "unable to acquire memory-region\n");
return -EINVAL;
}
/* No need to map vdev buffer */
if (strcmp(it.node->name, "vdev0buffer")) {
/* Register memory region */
mem = rproc_mem_entry_init(dev, NULL,
(dma_addr_t)rmem->base,
rmem->size, rmem->base,
st_rproc_mem_alloc,
st_rproc_mem_release,
it.node->name);
} else {
/* Register reserved memory for vdev buffer allocation */
mem = rproc_of_resm_mem_entry_init(dev, index,
rmem->size,
rmem->base,
it.node->name);
}
if (!mem) {
of_node_put(it.node);
return -ENOMEM;
}
rproc_add_carveout(rproc, mem);
index++;
}
return rproc_elf_load_rsc_table(rproc, fw);
}
static int st_rproc_start(struct rproc *rproc)
{
struct st_rproc *ddata = rproc->priv;
int err;
regmap_update_bits(ddata->boot_base, ddata->boot_offset,
ddata->config->bootaddr_mask, rproc->bootaddr);
err = clk_enable(ddata->clk);
if (err) {
dev_err(&rproc->dev, "Failed to enable clock\n");
return err;
}
if (ddata->config->sw_reset) {
err = reset_control_deassert(ddata->sw_reset);
if (err) {
dev_err(&rproc->dev, "Failed to deassert S/W Reset\n");
goto sw_reset_fail;
}
}
if (ddata->config->pwr_reset) {
err = reset_control_deassert(ddata->pwr_reset);
if (err) {
dev_err(&rproc->dev, "Failed to deassert Power Reset\n");
goto pwr_reset_fail;
}
}
dev_info(&rproc->dev, "Started from 0x%llx\n", rproc->bootaddr);
return 0;
pwr_reset_fail:
if (ddata->config->pwr_reset)
reset_control_assert(ddata->sw_reset);
sw_reset_fail:
clk_disable(ddata->clk);
return err;
}
static int st_rproc_stop(struct rproc *rproc)
{
struct st_rproc *ddata = rproc->priv;
int sw_err = 0, pwr_err = 0;
if (ddata->config->sw_reset) {
sw_err = reset_control_assert(ddata->sw_reset);
if (sw_err)
dev_err(&rproc->dev, "Failed to assert S/W Reset\n");
}
if (ddata->config->pwr_reset) {
pwr_err = reset_control_assert(ddata->pwr_reset);
if (pwr_err)
dev_err(&rproc->dev, "Failed to assert Power Reset\n");
}
clk_disable(ddata->clk);
return sw_err ?: pwr_err;
}
static const struct rproc_ops st_rproc_ops = {
.kick = st_rproc_kick,
.start = st_rproc_start,
.stop = st_rproc_stop,
.parse_fw = st_rproc_parse_fw,
.load = rproc_elf_load_segments,
.find_loaded_rsc_table = rproc_elf_find_loaded_rsc_table,
.sanity_check = rproc_elf_sanity_check,
.get_boot_addr = rproc_elf_get_boot_addr,
};
/*
* Fetch state of the processor: 0 is off, 1 is on.
*/
static int st_rproc_state(struct platform_device *pdev)
{
struct rproc *rproc = platform_get_drvdata(pdev);
struct st_rproc *ddata = rproc->priv;
int reset_sw = 0, reset_pwr = 0;
if (ddata->config->sw_reset)
reset_sw = reset_control_status(ddata->sw_reset);
if (ddata->config->pwr_reset)
reset_pwr = reset_control_status(ddata->pwr_reset);
if (reset_sw < 0 || reset_pwr < 0)
return -EINVAL;
return !reset_sw && !reset_pwr;
}
static const struct st_rproc_config st40_rproc_cfg = {
.sw_reset = true,
.pwr_reset = true,
.bootaddr_mask = GENMASK(28, 1),
};
static const struct st_rproc_config st231_rproc_cfg = {
.sw_reset = true,
.pwr_reset = false,
.bootaddr_mask = GENMASK(31, 6),
};
static const struct of_device_id st_rproc_match[] = {
{ .compatible = "st,st40-rproc", .data = &st40_rproc_cfg },
{ .compatible = "st,st231-rproc", .data = &st231_rproc_cfg },
{},
};
MODULE_DEVICE_TABLE(of, st_rproc_match);
static int st_rproc_parse_dt(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct rproc *rproc = platform_get_drvdata(pdev);
struct st_rproc *ddata = rproc->priv;
struct device_node *np = dev->of_node;
int err;
if (ddata->config->sw_reset) {
ddata->sw_reset = devm_reset_control_get_exclusive(dev,
"sw_reset");
if (IS_ERR(ddata->sw_reset)) {
dev_err(dev, "Failed to get S/W Reset\n");
return PTR_ERR(ddata->sw_reset);
}
}
if (ddata->config->pwr_reset) {
ddata->pwr_reset = devm_reset_control_get_exclusive(dev,
"pwr_reset");
if (IS_ERR(ddata->pwr_reset)) {
dev_err(dev, "Failed to get Power Reset\n");
return PTR_ERR(ddata->pwr_reset);
}
}
ddata->clk = devm_clk_get(dev, NULL);
if (IS_ERR(ddata->clk)) {
dev_err(dev, "Failed to get clock\n");
return PTR_ERR(ddata->clk);
}
err = of_property_read_u32(np, "clock-frequency", &ddata->clk_rate);
if (err) {
dev_err(dev, "failed to get clock frequency\n");
return err;
}
ddata->boot_base = syscon_regmap_lookup_by_phandle(np, "st,syscfg");
if (IS_ERR(ddata->boot_base)) {
dev_err(dev, "Boot base not found\n");
return PTR_ERR(ddata->boot_base);
}
err = of_property_read_u32_index(np, "st,syscfg", 1,
&ddata->boot_offset);
if (err) {
dev_err(dev, "Boot offset not found\n");
return -EINVAL;
}
err = clk_prepare(ddata->clk);
if (err)
dev_err(dev, "failed to get clock\n");
return err;
}
static int st_rproc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
const struct of_device_id *match;
struct st_rproc *ddata;
struct device_node *np = dev->of_node;
struct rproc *rproc;
struct mbox_chan *chan;
int enabled;
int ret, i;
match = of_match_device(st_rproc_match, dev);
if (!match || !match->data) {
dev_err(dev, "No device match found\n");
return -ENODEV;
}
rproc = rproc_alloc(dev, np->name, &st_rproc_ops, NULL, sizeof(*ddata));
if (!rproc)
return -ENOMEM;
rproc->has_iommu = false;
ddata = rproc->priv;
ddata->config = (struct st_rproc_config *)match->data;
platform_set_drvdata(pdev, rproc);
ret = st_rproc_parse_dt(pdev);
if (ret)
goto free_rproc;
enabled = st_rproc_state(pdev);
if (enabled < 0) {
ret = enabled;
goto free_clk;
}
if (enabled) {
atomic_inc(&rproc->power);
rproc->state = RPROC_RUNNING;
} else {
clk_set_rate(ddata->clk, ddata->clk_rate);
}
if (of_property_present(np, "mbox-names")) {
ddata->mbox_client_vq0.dev = dev;
ddata->mbox_client_vq0.tx_done = NULL;
ddata->mbox_client_vq0.tx_block = false;
ddata->mbox_client_vq0.knows_txdone = false;
ddata->mbox_client_vq0.rx_callback = st_rproc_mbox_callback_vq0;
ddata->mbox_client_vq1.dev = dev;
ddata->mbox_client_vq1.tx_done = NULL;
ddata->mbox_client_vq1.tx_block = false;
ddata->mbox_client_vq1.knows_txdone = false;
ddata->mbox_client_vq1.rx_callback = st_rproc_mbox_callback_vq1;
/*
* To control a co-processor without IPC mechanism.
* This driver can be used without mbox and rpmsg.
*/
chan = mbox_request_channel_byname(&ddata->mbox_client_vq0, "vq0_rx");
if (IS_ERR(chan)) {
dev_err(&rproc->dev, "failed to request mbox chan 0\n");
ret = PTR_ERR(chan);
goto free_clk;
}
ddata->mbox_chan[ST_RPROC_VQ0 * MBOX_MAX + MBOX_RX] = chan;
chan = mbox_request_channel_byname(&ddata->mbox_client_vq0, "vq0_tx");
if (IS_ERR(chan)) {
dev_err(&rproc->dev, "failed to request mbox chan 0\n");
ret = PTR_ERR(chan);
goto free_mbox;
}
ddata->mbox_chan[ST_RPROC_VQ0 * MBOX_MAX + MBOX_TX] = chan;
chan = mbox_request_channel_byname(&ddata->mbox_client_vq1, "vq1_rx");
if (IS_ERR(chan)) {
dev_err(&rproc->dev, "failed to request mbox chan 1\n");
ret = PTR_ERR(chan);
goto free_mbox;
}
ddata->mbox_chan[ST_RPROC_VQ1 * MBOX_MAX + MBOX_RX] = chan;
chan = mbox_request_channel_byname(&ddata->mbox_client_vq1, "vq1_tx");
if (IS_ERR(chan)) {
dev_err(&rproc->dev, "failed to request mbox chan 1\n");
ret = PTR_ERR(chan);
goto free_mbox;
}
ddata->mbox_chan[ST_RPROC_VQ1 * MBOX_MAX + MBOX_TX] = chan;
}
ret = rproc_add(rproc);
if (ret)
goto free_mbox;
return 0;
free_mbox:
for (i = 0; i < ST_RPROC_MAX_VRING * MBOX_MAX; i++)
mbox_free_channel(ddata->mbox_chan[i]);
free_clk:
clk_unprepare(ddata->clk);
free_rproc:
rproc_free(rproc);
return ret;
}
static void st_rproc_remove(struct platform_device *pdev)
{
struct rproc *rproc = platform_get_drvdata(pdev);
struct st_rproc *ddata = rproc->priv;
int i;
rproc_del(rproc);
clk_disable_unprepare(ddata->clk);
for (i = 0; i < ST_RPROC_MAX_VRING * MBOX_MAX; i++)
mbox_free_channel(ddata->mbox_chan[i]);
rproc_free(rproc);
}
static struct platform_driver st_rproc_driver = {
.probe = st_rproc_probe,
.remove_new = st_rproc_remove,
.driver = {
.name = "st-rproc",
.of_match_table = of_match_ptr(st_rproc_match),
},
};
module_platform_driver(st_rproc_driver);
MODULE_DESCRIPTION("ST Remote Processor Control Driver");
MODULE_AUTHOR("Ludovic Barre <[email protected]>");
MODULE_LICENSE("GPL v2");
|
linux-master
|
drivers/remoteproc/st_remoteproc.c
|
// SPDX-License-Identifier: GPL-2.0
/*
* Qualcomm Technology Inc. ADSP Peripheral Image Loader for SDM845.
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/firmware.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iommu.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/remoteproc.h>
#include <linux/reset.h>
#include <linux/soc/qcom/mdt_loader.h>
#include <linux/soc/qcom/smem.h>
#include <linux/soc/qcom/smem_state.h>
#include "qcom_common.h"
#include "qcom_pil_info.h"
#include "qcom_q6v5.h"
#include "remoteproc_internal.h"
/* time out value */
#define ACK_TIMEOUT 1000
#define ACK_TIMEOUT_US 1000000
#define BOOT_FSM_TIMEOUT 10000
/* mask values */
#define EVB_MASK GENMASK(27, 4)
/*QDSP6SS register offsets*/
#define RST_EVB_REG 0x10
#define CORE_START_REG 0x400
#define BOOT_CMD_REG 0x404
#define BOOT_STATUS_REG 0x408
#define RET_CFG_REG 0x1C
/*TCSR register offsets*/
#define LPASS_MASTER_IDLE_REG 0x8
#define LPASS_HALTACK_REG 0x4
#define LPASS_PWR_ON_REG 0x10
#define LPASS_HALTREQ_REG 0x0
#define SID_MASK_DEFAULT 0xF
#define QDSP6SS_XO_CBCR 0x38
#define QDSP6SS_CORE_CBCR 0x20
#define QDSP6SS_SLEEP_CBCR 0x3c
#define QCOM_Q6V5_RPROC_PROXY_PD_MAX 3
#define LPASS_BOOT_CORE_START BIT(0)
#define LPASS_BOOT_CMD_START BIT(0)
#define LPASS_EFUSE_Q6SS_EVB_SEL 0x0
struct adsp_pil_data {
int crash_reason_smem;
const char *firmware_name;
const char *ssr_name;
const char *sysmon_name;
int ssctl_id;
bool is_wpss;
bool has_iommu;
bool auto_boot;
const char **clk_ids;
int num_clks;
const char **proxy_pd_names;
const char *load_state;
};
struct qcom_adsp {
struct device *dev;
struct rproc *rproc;
struct qcom_q6v5 q6v5;
struct clk *xo;
int num_clks;
struct clk_bulk_data *clks;
void __iomem *qdsp6ss_base;
void __iomem *lpass_efuse;
struct reset_control *pdc_sync_reset;
struct reset_control *restart;
struct regmap *halt_map;
unsigned int halt_lpass;
int crash_reason_smem;
const char *info_name;
struct completion start_done;
struct completion stop_done;
phys_addr_t mem_phys;
phys_addr_t mem_reloc;
void *mem_region;
size_t mem_size;
bool has_iommu;
struct device *proxy_pds[QCOM_Q6V5_RPROC_PROXY_PD_MAX];
size_t proxy_pd_count;
struct qcom_rproc_glink glink_subdev;
struct qcom_rproc_ssr ssr_subdev;
struct qcom_sysmon *sysmon;
int (*shutdown)(struct qcom_adsp *adsp);
};
static int qcom_rproc_pds_attach(struct device *dev, struct qcom_adsp *adsp,
const char **pd_names)
{
struct device **devs = adsp->proxy_pds;
size_t num_pds = 0;
int ret;
int i;
if (!pd_names)
return 0;
/* Handle single power domain */
if (dev->pm_domain) {
devs[0] = dev;
pm_runtime_enable(dev);
return 1;
}
while (pd_names[num_pds])
num_pds++;
if (num_pds > ARRAY_SIZE(adsp->proxy_pds))
return -E2BIG;
for (i = 0; i < num_pds; i++) {
devs[i] = dev_pm_domain_attach_by_name(dev, pd_names[i]);
if (IS_ERR_OR_NULL(devs[i])) {
ret = PTR_ERR(devs[i]) ? : -ENODATA;
goto unroll_attach;
}
}
return num_pds;
unroll_attach:
for (i--; i >= 0; i--)
dev_pm_domain_detach(devs[i], false);
return ret;
}
static void qcom_rproc_pds_detach(struct qcom_adsp *adsp, struct device **pds,
size_t pd_count)
{
struct device *dev = adsp->dev;
int i;
/* Handle single power domain */
if (dev->pm_domain && pd_count) {
pm_runtime_disable(dev);
return;
}
for (i = 0; i < pd_count; i++)
dev_pm_domain_detach(pds[i], false);
}
static int qcom_rproc_pds_enable(struct qcom_adsp *adsp, struct device **pds,
size_t pd_count)
{
int ret;
int i;
for (i = 0; i < pd_count; i++) {
dev_pm_genpd_set_performance_state(pds[i], INT_MAX);
ret = pm_runtime_resume_and_get(pds[i]);
if (ret < 0) {
dev_pm_genpd_set_performance_state(pds[i], 0);
goto unroll_pd_votes;
}
}
return 0;
unroll_pd_votes:
for (i--; i >= 0; i--) {
dev_pm_genpd_set_performance_state(pds[i], 0);
pm_runtime_put(pds[i]);
}
return ret;
}
static void qcom_rproc_pds_disable(struct qcom_adsp *adsp, struct device **pds,
size_t pd_count)
{
int i;
for (i = 0; i < pd_count; i++) {
dev_pm_genpd_set_performance_state(pds[i], 0);
pm_runtime_put(pds[i]);
}
}
static int qcom_wpss_shutdown(struct qcom_adsp *adsp)
{
unsigned int val;
regmap_write(adsp->halt_map, adsp->halt_lpass + LPASS_HALTREQ_REG, 1);
/* Wait for halt ACK from QDSP6 */
regmap_read_poll_timeout(adsp->halt_map,
adsp->halt_lpass + LPASS_HALTACK_REG, val,
val, 1000, ACK_TIMEOUT_US);
/* Assert the WPSS PDC Reset */
reset_control_assert(adsp->pdc_sync_reset);
/* Place the WPSS processor into reset */
reset_control_assert(adsp->restart);
/* wait after asserting subsystem restart from AOSS */
usleep_range(200, 205);
/* Remove the WPSS reset */
reset_control_deassert(adsp->restart);
/* De-assert the WPSS PDC Reset */
reset_control_deassert(adsp->pdc_sync_reset);
usleep_range(100, 105);
clk_bulk_disable_unprepare(adsp->num_clks, adsp->clks);
regmap_write(adsp->halt_map, adsp->halt_lpass + LPASS_HALTREQ_REG, 0);
/* Wait for halt ACK from QDSP6 */
regmap_read_poll_timeout(adsp->halt_map,
adsp->halt_lpass + LPASS_HALTACK_REG, val,
!val, 1000, ACK_TIMEOUT_US);
return 0;
}
static int qcom_adsp_shutdown(struct qcom_adsp *adsp)
{
unsigned long timeout;
unsigned int val;
int ret;
/* Reset the retention logic */
val = readl(adsp->qdsp6ss_base + RET_CFG_REG);
val |= 0x1;
writel(val, adsp->qdsp6ss_base + RET_CFG_REG);
clk_bulk_disable_unprepare(adsp->num_clks, adsp->clks);
/* QDSP6 master port needs to be explicitly halted */
ret = regmap_read(adsp->halt_map,
adsp->halt_lpass + LPASS_PWR_ON_REG, &val);
if (ret || !val)
goto reset;
ret = regmap_read(adsp->halt_map,
adsp->halt_lpass + LPASS_MASTER_IDLE_REG,
&val);
if (ret || val)
goto reset;
regmap_write(adsp->halt_map,
adsp->halt_lpass + LPASS_HALTREQ_REG, 1);
/* Wait for halt ACK from QDSP6 */
timeout = jiffies + msecs_to_jiffies(ACK_TIMEOUT);
for (;;) {
ret = regmap_read(adsp->halt_map,
adsp->halt_lpass + LPASS_HALTACK_REG, &val);
if (ret || val || time_after(jiffies, timeout))
break;
usleep_range(1000, 1100);
}
ret = regmap_read(adsp->halt_map,
adsp->halt_lpass + LPASS_MASTER_IDLE_REG, &val);
if (ret || !val)
dev_err(adsp->dev, "port failed halt\n");
reset:
/* Assert the LPASS PDC Reset */
reset_control_assert(adsp->pdc_sync_reset);
/* Place the LPASS processor into reset */
reset_control_assert(adsp->restart);
/* wait after asserting subsystem restart from AOSS */
usleep_range(200, 300);
/* Clear the halt request for the AXIM and AHBM for Q6 */
regmap_write(adsp->halt_map, adsp->halt_lpass + LPASS_HALTREQ_REG, 0);
/* De-assert the LPASS PDC Reset */
reset_control_deassert(adsp->pdc_sync_reset);
/* Remove the LPASS reset */
reset_control_deassert(adsp->restart);
/* wait after de-asserting subsystem restart from AOSS */
usleep_range(200, 300);
return 0;
}
static int adsp_load(struct rproc *rproc, const struct firmware *fw)
{
struct qcom_adsp *adsp = rproc->priv;
int ret;
ret = qcom_mdt_load_no_init(adsp->dev, fw, rproc->firmware, 0,
adsp->mem_region, adsp->mem_phys,
adsp->mem_size, &adsp->mem_reloc);
if (ret)
return ret;
qcom_pil_info_store(adsp->info_name, adsp->mem_phys, adsp->mem_size);
return 0;
}
static void adsp_unmap_carveout(struct rproc *rproc)
{
struct qcom_adsp *adsp = rproc->priv;
if (adsp->has_iommu)
iommu_unmap(rproc->domain, adsp->mem_phys, adsp->mem_size);
}
static int adsp_map_carveout(struct rproc *rproc)
{
struct qcom_adsp *adsp = rproc->priv;
struct of_phandle_args args;
long long sid;
unsigned long iova;
int ret;
if (!adsp->has_iommu)
return 0;
if (!rproc->domain)
return -EINVAL;
ret = of_parse_phandle_with_args(adsp->dev->of_node, "iommus", "#iommu-cells", 0, &args);
if (ret < 0)
return ret;
sid = args.args[0] & SID_MASK_DEFAULT;
/* Add SID configuration for ADSP Firmware to SMMU */
iova = adsp->mem_phys | (sid << 32);
ret = iommu_map(rproc->domain, iova, adsp->mem_phys,
adsp->mem_size, IOMMU_READ | IOMMU_WRITE,
GFP_KERNEL);
if (ret) {
dev_err(adsp->dev, "Unable to map ADSP Physical Memory\n");
return ret;
}
return 0;
}
static int adsp_start(struct rproc *rproc)
{
struct qcom_adsp *adsp = rproc->priv;
int ret;
unsigned int val;
ret = qcom_q6v5_prepare(&adsp->q6v5);
if (ret)
return ret;
ret = adsp_map_carveout(rproc);
if (ret) {
dev_err(adsp->dev, "ADSP smmu mapping failed\n");
goto disable_irqs;
}
ret = clk_prepare_enable(adsp->xo);
if (ret)
goto adsp_smmu_unmap;
ret = qcom_rproc_pds_enable(adsp, adsp->proxy_pds,
adsp->proxy_pd_count);
if (ret < 0)
goto disable_xo_clk;
ret = clk_bulk_prepare_enable(adsp->num_clks, adsp->clks);
if (ret) {
dev_err(adsp->dev, "adsp clk_enable failed\n");
goto disable_power_domain;
}
/* Enable the XO clock */
writel(1, adsp->qdsp6ss_base + QDSP6SS_XO_CBCR);
/* Enable the QDSP6SS sleep clock */
writel(1, adsp->qdsp6ss_base + QDSP6SS_SLEEP_CBCR);
/* Enable the QDSP6 core clock */
writel(1, adsp->qdsp6ss_base + QDSP6SS_CORE_CBCR);
/* Program boot address */
writel(adsp->mem_phys >> 4, adsp->qdsp6ss_base + RST_EVB_REG);
if (adsp->lpass_efuse)
writel(LPASS_EFUSE_Q6SS_EVB_SEL, adsp->lpass_efuse);
/* De-assert QDSP6 stop core. QDSP6 will execute after out of reset */
writel(LPASS_BOOT_CORE_START, adsp->qdsp6ss_base + CORE_START_REG);
/* Trigger boot FSM to start QDSP6 */
writel(LPASS_BOOT_CMD_START, adsp->qdsp6ss_base + BOOT_CMD_REG);
/* Wait for core to come out of reset */
ret = readl_poll_timeout(adsp->qdsp6ss_base + BOOT_STATUS_REG,
val, (val & BIT(0)) != 0, 10, BOOT_FSM_TIMEOUT);
if (ret) {
dev_err(adsp->dev, "failed to bootup adsp\n");
goto disable_adsp_clks;
}
ret = qcom_q6v5_wait_for_start(&adsp->q6v5, msecs_to_jiffies(5 * HZ));
if (ret == -ETIMEDOUT) {
dev_err(adsp->dev, "start timed out\n");
goto disable_adsp_clks;
}
return 0;
disable_adsp_clks:
clk_bulk_disable_unprepare(adsp->num_clks, adsp->clks);
disable_power_domain:
qcom_rproc_pds_disable(adsp, adsp->proxy_pds, adsp->proxy_pd_count);
disable_xo_clk:
clk_disable_unprepare(adsp->xo);
adsp_smmu_unmap:
adsp_unmap_carveout(rproc);
disable_irqs:
qcom_q6v5_unprepare(&adsp->q6v5);
return ret;
}
static void qcom_adsp_pil_handover(struct qcom_q6v5 *q6v5)
{
struct qcom_adsp *adsp = container_of(q6v5, struct qcom_adsp, q6v5);
clk_disable_unprepare(adsp->xo);
qcom_rproc_pds_disable(adsp, adsp->proxy_pds, adsp->proxy_pd_count);
}
static int adsp_stop(struct rproc *rproc)
{
struct qcom_adsp *adsp = rproc->priv;
int handover;
int ret;
ret = qcom_q6v5_request_stop(&adsp->q6v5, adsp->sysmon);
if (ret == -ETIMEDOUT)
dev_err(adsp->dev, "timed out on wait\n");
ret = adsp->shutdown(adsp);
if (ret)
dev_err(adsp->dev, "failed to shutdown: %d\n", ret);
adsp_unmap_carveout(rproc);
handover = qcom_q6v5_unprepare(&adsp->q6v5);
if (handover)
qcom_adsp_pil_handover(&adsp->q6v5);
return ret;
}
static void *adsp_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
{
struct qcom_adsp *adsp = rproc->priv;
int offset;
offset = da - adsp->mem_reloc;
if (offset < 0 || offset + len > adsp->mem_size)
return NULL;
return adsp->mem_region + offset;
}
static int adsp_parse_firmware(struct rproc *rproc, const struct firmware *fw)
{
struct qcom_adsp *adsp = rproc->priv;
int ret;
ret = qcom_register_dump_segments(rproc, fw);
if (ret) {
dev_err(&rproc->dev, "Error in registering dump segments\n");
return ret;
}
if (adsp->has_iommu) {
ret = rproc_elf_load_rsc_table(rproc, fw);
if (ret) {
dev_err(&rproc->dev, "Error in loading resource table\n");
return ret;
}
}
return 0;
}
static unsigned long adsp_panic(struct rproc *rproc)
{
struct qcom_adsp *adsp = rproc->priv;
return qcom_q6v5_panic(&adsp->q6v5);
}
static const struct rproc_ops adsp_ops = {
.start = adsp_start,
.stop = adsp_stop,
.da_to_va = adsp_da_to_va,
.parse_fw = adsp_parse_firmware,
.load = adsp_load,
.panic = adsp_panic,
};
static int adsp_init_clock(struct qcom_adsp *adsp, const char **clk_ids)
{
int num_clks = 0;
int i, ret;
adsp->xo = devm_clk_get(adsp->dev, "xo");
if (IS_ERR(adsp->xo)) {
ret = PTR_ERR(adsp->xo);
if (ret != -EPROBE_DEFER)
dev_err(adsp->dev, "failed to get xo clock");
return ret;
}
for (i = 0; clk_ids[i]; i++)
num_clks++;
adsp->num_clks = num_clks;
adsp->clks = devm_kcalloc(adsp->dev, adsp->num_clks,
sizeof(*adsp->clks), GFP_KERNEL);
if (!adsp->clks)
return -ENOMEM;
for (i = 0; i < adsp->num_clks; i++)
adsp->clks[i].id = clk_ids[i];
return devm_clk_bulk_get(adsp->dev, adsp->num_clks, adsp->clks);
}
static int adsp_init_reset(struct qcom_adsp *adsp)
{
adsp->pdc_sync_reset = devm_reset_control_get_optional_exclusive(adsp->dev,
"pdc_sync");
if (IS_ERR(adsp->pdc_sync_reset)) {
dev_err(adsp->dev, "failed to acquire pdc_sync reset\n");
return PTR_ERR(adsp->pdc_sync_reset);
}
adsp->restart = devm_reset_control_get_optional_exclusive(adsp->dev, "restart");
/* Fall back to the old "cc_lpass" if "restart" is absent */
if (!adsp->restart)
adsp->restart = devm_reset_control_get_exclusive(adsp->dev, "cc_lpass");
if (IS_ERR(adsp->restart)) {
dev_err(adsp->dev, "failed to acquire restart\n");
return PTR_ERR(adsp->restart);
}
return 0;
}
static int adsp_init_mmio(struct qcom_adsp *adsp,
struct platform_device *pdev)
{
struct resource *efuse_region;
struct device_node *syscon;
int ret;
adsp->qdsp6ss_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(adsp->qdsp6ss_base)) {
dev_err(adsp->dev, "failed to map QDSP6SS registers\n");
return PTR_ERR(adsp->qdsp6ss_base);
}
efuse_region = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (!efuse_region) {
adsp->lpass_efuse = NULL;
dev_dbg(adsp->dev, "failed to get efuse memory region\n");
} else {
adsp->lpass_efuse = devm_ioremap_resource(&pdev->dev, efuse_region);
if (IS_ERR(adsp->lpass_efuse)) {
dev_err(adsp->dev, "failed to map efuse registers\n");
return PTR_ERR(adsp->lpass_efuse);
}
}
syscon = of_parse_phandle(pdev->dev.of_node, "qcom,halt-regs", 0);
if (!syscon) {
dev_err(&pdev->dev, "failed to parse qcom,halt-regs\n");
return -EINVAL;
}
adsp->halt_map = syscon_node_to_regmap(syscon);
of_node_put(syscon);
if (IS_ERR(adsp->halt_map))
return PTR_ERR(adsp->halt_map);
ret = of_property_read_u32_index(pdev->dev.of_node, "qcom,halt-regs",
1, &adsp->halt_lpass);
if (ret < 0) {
dev_err(&pdev->dev, "no offset in syscon\n");
return ret;
}
return 0;
}
static int adsp_alloc_memory_region(struct qcom_adsp *adsp)
{
struct reserved_mem *rmem = NULL;
struct device_node *node;
node = of_parse_phandle(adsp->dev->of_node, "memory-region", 0);
if (node)
rmem = of_reserved_mem_lookup(node);
of_node_put(node);
if (!rmem) {
dev_err(adsp->dev, "unable to resolve memory-region\n");
return -EINVAL;
}
adsp->mem_phys = adsp->mem_reloc = rmem->base;
adsp->mem_size = rmem->size;
adsp->mem_region = devm_ioremap_wc(adsp->dev,
adsp->mem_phys, adsp->mem_size);
if (!adsp->mem_region) {
dev_err(adsp->dev, "unable to map memory region: %pa+%zx\n",
&rmem->base, adsp->mem_size);
return -EBUSY;
}
return 0;
}
static int adsp_probe(struct platform_device *pdev)
{
const struct adsp_pil_data *desc;
const char *firmware_name;
struct qcom_adsp *adsp;
struct rproc *rproc;
int ret;
desc = of_device_get_match_data(&pdev->dev);
if (!desc)
return -EINVAL;
firmware_name = desc->firmware_name;
ret = of_property_read_string(pdev->dev.of_node, "firmware-name",
&firmware_name);
if (ret < 0 && ret != -EINVAL) {
dev_err(&pdev->dev, "unable to read firmware-name\n");
return ret;
}
rproc = rproc_alloc(&pdev->dev, pdev->name, &adsp_ops,
firmware_name, sizeof(*adsp));
if (!rproc) {
dev_err(&pdev->dev, "unable to allocate remoteproc\n");
return -ENOMEM;
}
rproc->auto_boot = desc->auto_boot;
rproc->has_iommu = desc->has_iommu;
rproc_coredump_set_elf_info(rproc, ELFCLASS32, EM_NONE);
adsp = rproc->priv;
adsp->dev = &pdev->dev;
adsp->rproc = rproc;
adsp->info_name = desc->sysmon_name;
adsp->has_iommu = desc->has_iommu;
platform_set_drvdata(pdev, adsp);
if (desc->is_wpss)
adsp->shutdown = qcom_wpss_shutdown;
else
adsp->shutdown = qcom_adsp_shutdown;
ret = adsp_alloc_memory_region(adsp);
if (ret)
goto free_rproc;
ret = adsp_init_clock(adsp, desc->clk_ids);
if (ret)
goto free_rproc;
ret = qcom_rproc_pds_attach(adsp->dev, adsp,
desc->proxy_pd_names);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to attach proxy power domains\n");
goto free_rproc;
}
adsp->proxy_pd_count = ret;
ret = adsp_init_reset(adsp);
if (ret)
goto disable_pm;
ret = adsp_init_mmio(adsp, pdev);
if (ret)
goto disable_pm;
ret = qcom_q6v5_init(&adsp->q6v5, pdev, rproc, desc->crash_reason_smem,
desc->load_state, qcom_adsp_pil_handover);
if (ret)
goto disable_pm;
qcom_add_glink_subdev(rproc, &adsp->glink_subdev, desc->ssr_name);
qcom_add_ssr_subdev(rproc, &adsp->ssr_subdev, desc->ssr_name);
adsp->sysmon = qcom_add_sysmon_subdev(rproc,
desc->sysmon_name,
desc->ssctl_id);
if (IS_ERR(adsp->sysmon)) {
ret = PTR_ERR(adsp->sysmon);
goto disable_pm;
}
ret = rproc_add(rproc);
if (ret)
goto disable_pm;
return 0;
disable_pm:
qcom_rproc_pds_detach(adsp, adsp->proxy_pds, adsp->proxy_pd_count);
free_rproc:
rproc_free(rproc);
return ret;
}
static void adsp_remove(struct platform_device *pdev)
{
struct qcom_adsp *adsp = platform_get_drvdata(pdev);
rproc_del(adsp->rproc);
qcom_q6v5_deinit(&adsp->q6v5);
qcom_remove_glink_subdev(adsp->rproc, &adsp->glink_subdev);
qcom_remove_sysmon_subdev(adsp->sysmon);
qcom_remove_ssr_subdev(adsp->rproc, &adsp->ssr_subdev);
qcom_rproc_pds_detach(adsp, adsp->proxy_pds, adsp->proxy_pd_count);
rproc_free(adsp->rproc);
}
static const struct adsp_pil_data adsp_resource_init = {
.crash_reason_smem = 423,
.firmware_name = "adsp.mdt",
.ssr_name = "lpass",
.sysmon_name = "adsp",
.ssctl_id = 0x14,
.is_wpss = false,
.auto_boot = true,
.clk_ids = (const char*[]) {
"sway_cbcr", "lpass_ahbs_aon_cbcr", "lpass_ahbm_aon_cbcr",
"qdsp6ss_xo", "qdsp6ss_sleep", "qdsp6ss_core", NULL
},
.num_clks = 7,
.proxy_pd_names = (const char*[]) {
"cx", NULL
},
};
static const struct adsp_pil_data adsp_sc7280_resource_init = {
.crash_reason_smem = 423,
.firmware_name = "adsp.pbn",
.load_state = "adsp",
.ssr_name = "lpass",
.sysmon_name = "adsp",
.ssctl_id = 0x14,
.has_iommu = true,
.auto_boot = true,
.clk_ids = (const char*[]) {
"gcc_cfg_noc_lpass", NULL
},
.num_clks = 1,
};
static const struct adsp_pil_data cdsp_resource_init = {
.crash_reason_smem = 601,
.firmware_name = "cdsp.mdt",
.ssr_name = "cdsp",
.sysmon_name = "cdsp",
.ssctl_id = 0x17,
.is_wpss = false,
.auto_boot = true,
.clk_ids = (const char*[]) {
"sway", "tbu", "bimc", "ahb_aon", "q6ss_slave", "q6ss_master",
"q6_axim", NULL
},
.num_clks = 7,
.proxy_pd_names = (const char*[]) {
"cx", NULL
},
};
static const struct adsp_pil_data wpss_resource_init = {
.crash_reason_smem = 626,
.firmware_name = "wpss.mdt",
.ssr_name = "wpss",
.sysmon_name = "wpss",
.ssctl_id = 0x19,
.is_wpss = true,
.auto_boot = false,
.load_state = "wpss",
.clk_ids = (const char*[]) {
"ahb_bdg", "ahb", "rscp", NULL
},
.num_clks = 3,
.proxy_pd_names = (const char*[]) {
"cx", "mx", NULL
},
};
static const struct of_device_id adsp_of_match[] = {
{ .compatible = "qcom,qcs404-cdsp-pil", .data = &cdsp_resource_init },
{ .compatible = "qcom,sc7280-adsp-pil", .data = &adsp_sc7280_resource_init },
{ .compatible = "qcom,sc7280-wpss-pil", .data = &wpss_resource_init },
{ .compatible = "qcom,sdm845-adsp-pil", .data = &adsp_resource_init },
{ },
};
MODULE_DEVICE_TABLE(of, adsp_of_match);
static struct platform_driver adsp_pil_driver = {
.probe = adsp_probe,
.remove_new = adsp_remove,
.driver = {
.name = "qcom_q6v5_adsp",
.of_match_table = adsp_of_match,
},
};
module_platform_driver(adsp_pil_driver);
MODULE_DESCRIPTION("QTI SDM845 ADSP Peripheral Image Loader");
MODULE_LICENSE("GPL v2");
|
linux-master
|
drivers/remoteproc/qcom_q6v5_adsp.c
|
// SPDX-License-Identifier: GPL-2.0
/*
* Qualcomm Peripheral Image Loader for Q6V5
*
* Copyright (C) 2016-2018 Linaro Ltd.
* Copyright (C) 2014 Sony Mobile Communications AB
* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
*/
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/interconnect.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/soc/qcom/qcom_aoss.h>
#include <linux/soc/qcom/smem.h>
#include <linux/soc/qcom/smem_state.h>
#include <linux/remoteproc.h>
#include "qcom_common.h"
#include "qcom_q6v5.h"
#define Q6V5_LOAD_STATE_MSG_LEN 64
#define Q6V5_PANIC_DELAY_MS 200
static int q6v5_load_state_toggle(struct qcom_q6v5 *q6v5, bool enable)
{
int ret;
if (!q6v5->qmp)
return 0;
ret = qmp_send(q6v5->qmp, "{class: image, res: load_state, name: %s, val: %s}",
q6v5->load_state, enable ? "on" : "off");
if (ret)
dev_err(q6v5->dev, "failed to toggle load state\n");
return ret;
}
/**
* qcom_q6v5_prepare() - reinitialize the qcom_q6v5 context before start
* @q6v5: reference to qcom_q6v5 context to be reinitialized
*
* Return: 0 on success, negative errno on failure
*/
int qcom_q6v5_prepare(struct qcom_q6v5 *q6v5)
{
int ret;
ret = icc_set_bw(q6v5->path, 0, UINT_MAX);
if (ret < 0) {
dev_err(q6v5->dev, "failed to set bandwidth request\n");
return ret;
}
ret = q6v5_load_state_toggle(q6v5, true);
if (ret) {
icc_set_bw(q6v5->path, 0, 0);
return ret;
}
reinit_completion(&q6v5->start_done);
reinit_completion(&q6v5->stop_done);
q6v5->running = true;
q6v5->handover_issued = false;
enable_irq(q6v5->handover_irq);
return 0;
}
EXPORT_SYMBOL_GPL(qcom_q6v5_prepare);
/**
* qcom_q6v5_unprepare() - unprepare the qcom_q6v5 context after stop
* @q6v5: reference to qcom_q6v5 context to be unprepared
*
* Return: 0 on success, 1 if handover hasn't yet been called
*/
int qcom_q6v5_unprepare(struct qcom_q6v5 *q6v5)
{
disable_irq(q6v5->handover_irq);
q6v5_load_state_toggle(q6v5, false);
/* Disable interconnect vote, in case handover never happened */
icc_set_bw(q6v5->path, 0, 0);
return !q6v5->handover_issued;
}
EXPORT_SYMBOL_GPL(qcom_q6v5_unprepare);
static irqreturn_t q6v5_wdog_interrupt(int irq, void *data)
{
struct qcom_q6v5 *q6v5 = data;
size_t len;
char *msg;
/* Sometimes the stop triggers a watchdog rather than a stop-ack */
if (!q6v5->running) {
complete(&q6v5->stop_done);
return IRQ_HANDLED;
}
msg = qcom_smem_get(QCOM_SMEM_HOST_ANY, q6v5->crash_reason, &len);
if (!IS_ERR(msg) && len > 0 && msg[0])
dev_err(q6v5->dev, "watchdog received: %s\n", msg);
else
dev_err(q6v5->dev, "watchdog without message\n");
q6v5->running = false;
rproc_report_crash(q6v5->rproc, RPROC_WATCHDOG);
return IRQ_HANDLED;
}
static irqreturn_t q6v5_fatal_interrupt(int irq, void *data)
{
struct qcom_q6v5 *q6v5 = data;
size_t len;
char *msg;
if (!q6v5->running)
return IRQ_HANDLED;
msg = qcom_smem_get(QCOM_SMEM_HOST_ANY, q6v5->crash_reason, &len);
if (!IS_ERR(msg) && len > 0 && msg[0])
dev_err(q6v5->dev, "fatal error received: %s\n", msg);
else
dev_err(q6v5->dev, "fatal error without message\n");
q6v5->running = false;
rproc_report_crash(q6v5->rproc, RPROC_FATAL_ERROR);
return IRQ_HANDLED;
}
static irqreturn_t q6v5_ready_interrupt(int irq, void *data)
{
struct qcom_q6v5 *q6v5 = data;
complete(&q6v5->start_done);
return IRQ_HANDLED;
}
/**
* qcom_q6v5_wait_for_start() - wait for remote processor start signal
* @q6v5: reference to qcom_q6v5 context
* @timeout: timeout to wait for the event, in jiffies
*
* qcom_q6v5_unprepare() should not be called when this function fails.
*
* Return: 0 on success, -ETIMEDOUT on timeout
*/
int qcom_q6v5_wait_for_start(struct qcom_q6v5 *q6v5, int timeout)
{
int ret;
ret = wait_for_completion_timeout(&q6v5->start_done, timeout);
if (!ret)
disable_irq(q6v5->handover_irq);
return !ret ? -ETIMEDOUT : 0;
}
EXPORT_SYMBOL_GPL(qcom_q6v5_wait_for_start);
static irqreturn_t q6v5_handover_interrupt(int irq, void *data)
{
struct qcom_q6v5 *q6v5 = data;
if (q6v5->handover)
q6v5->handover(q6v5);
icc_set_bw(q6v5->path, 0, 0);
q6v5->handover_issued = true;
return IRQ_HANDLED;
}
static irqreturn_t q6v5_stop_interrupt(int irq, void *data)
{
struct qcom_q6v5 *q6v5 = data;
complete(&q6v5->stop_done);
return IRQ_HANDLED;
}
/**
* qcom_q6v5_request_stop() - request the remote processor to stop
* @q6v5: reference to qcom_q6v5 context
* @sysmon: reference to the remote's sysmon instance, or NULL
*
* Return: 0 on success, negative errno on failure
*/
int qcom_q6v5_request_stop(struct qcom_q6v5 *q6v5, struct qcom_sysmon *sysmon)
{
int ret;
q6v5->running = false;
/* Don't perform SMP2P dance if remote isn't running */
if (q6v5->rproc->state != RPROC_RUNNING || qcom_sysmon_shutdown_acked(sysmon))
return 0;
qcom_smem_state_update_bits(q6v5->state,
BIT(q6v5->stop_bit), BIT(q6v5->stop_bit));
ret = wait_for_completion_timeout(&q6v5->stop_done, 5 * HZ);
qcom_smem_state_update_bits(q6v5->state, BIT(q6v5->stop_bit), 0);
return ret == 0 ? -ETIMEDOUT : 0;
}
EXPORT_SYMBOL_GPL(qcom_q6v5_request_stop);
/**
* qcom_q6v5_panic() - panic handler to invoke a stop on the remote
* @q6v5: reference to qcom_q6v5 context
*
* Set the stop bit and sleep in order to allow the remote processor to flush
* its caches etc for post mortem debugging.
*
* Return: 200ms
*/
unsigned long qcom_q6v5_panic(struct qcom_q6v5 *q6v5)
{
qcom_smem_state_update_bits(q6v5->state,
BIT(q6v5->stop_bit), BIT(q6v5->stop_bit));
return Q6V5_PANIC_DELAY_MS;
}
EXPORT_SYMBOL_GPL(qcom_q6v5_panic);
/**
* qcom_q6v5_init() - initializer of the q6v5 common struct
* @q6v5: handle to be initialized
* @pdev: platform_device reference for acquiring resources
* @rproc: associated remoteproc instance
* @crash_reason: SMEM id for crash reason string, or 0 if none
* @load_state: load state resource string
* @handover: function to be called when proxy resources should be released
*
* Return: 0 on success, negative errno on failure
*/
int qcom_q6v5_init(struct qcom_q6v5 *q6v5, struct platform_device *pdev,
struct rproc *rproc, int crash_reason, const char *load_state,
void (*handover)(struct qcom_q6v5 *q6v5))
{
int ret;
q6v5->rproc = rproc;
q6v5->dev = &pdev->dev;
q6v5->crash_reason = crash_reason;
q6v5->handover = handover;
init_completion(&q6v5->start_done);
init_completion(&q6v5->stop_done);
q6v5->wdog_irq = platform_get_irq_byname(pdev, "wdog");
if (q6v5->wdog_irq < 0)
return q6v5->wdog_irq;
ret = devm_request_threaded_irq(&pdev->dev, q6v5->wdog_irq,
NULL, q6v5_wdog_interrupt,
IRQF_TRIGGER_RISING | IRQF_ONESHOT,
"q6v5 wdog", q6v5);
if (ret) {
dev_err(&pdev->dev, "failed to acquire wdog IRQ\n");
return ret;
}
q6v5->fatal_irq = platform_get_irq_byname(pdev, "fatal");
if (q6v5->fatal_irq < 0)
return q6v5->fatal_irq;
ret = devm_request_threaded_irq(&pdev->dev, q6v5->fatal_irq,
NULL, q6v5_fatal_interrupt,
IRQF_TRIGGER_RISING | IRQF_ONESHOT,
"q6v5 fatal", q6v5);
if (ret) {
dev_err(&pdev->dev, "failed to acquire fatal IRQ\n");
return ret;
}
q6v5->ready_irq = platform_get_irq_byname(pdev, "ready");
if (q6v5->ready_irq < 0)
return q6v5->ready_irq;
ret = devm_request_threaded_irq(&pdev->dev, q6v5->ready_irq,
NULL, q6v5_ready_interrupt,
IRQF_TRIGGER_RISING | IRQF_ONESHOT,
"q6v5 ready", q6v5);
if (ret) {
dev_err(&pdev->dev, "failed to acquire ready IRQ\n");
return ret;
}
q6v5->handover_irq = platform_get_irq_byname(pdev, "handover");
if (q6v5->handover_irq < 0)
return q6v5->handover_irq;
ret = devm_request_threaded_irq(&pdev->dev, q6v5->handover_irq,
NULL, q6v5_handover_interrupt,
IRQF_TRIGGER_RISING | IRQF_ONESHOT,
"q6v5 handover", q6v5);
if (ret) {
dev_err(&pdev->dev, "failed to acquire handover IRQ\n");
return ret;
}
disable_irq(q6v5->handover_irq);
q6v5->stop_irq = platform_get_irq_byname(pdev, "stop-ack");
if (q6v5->stop_irq < 0)
return q6v5->stop_irq;
ret = devm_request_threaded_irq(&pdev->dev, q6v5->stop_irq,
NULL, q6v5_stop_interrupt,
IRQF_TRIGGER_RISING | IRQF_ONESHOT,
"q6v5 stop", q6v5);
if (ret) {
dev_err(&pdev->dev, "failed to acquire stop-ack IRQ\n");
return ret;
}
q6v5->state = devm_qcom_smem_state_get(&pdev->dev, "stop", &q6v5->stop_bit);
if (IS_ERR(q6v5->state)) {
dev_err(&pdev->dev, "failed to acquire stop state\n");
return PTR_ERR(q6v5->state);
}
q6v5->load_state = devm_kstrdup_const(&pdev->dev, load_state, GFP_KERNEL);
q6v5->qmp = qmp_get(&pdev->dev);
if (IS_ERR(q6v5->qmp)) {
if (PTR_ERR(q6v5->qmp) != -ENODEV)
return dev_err_probe(&pdev->dev, PTR_ERR(q6v5->qmp),
"failed to acquire load state\n");
q6v5->qmp = NULL;
} else if (!q6v5->load_state) {
if (!load_state)
dev_err(&pdev->dev, "load state resource string empty\n");
qmp_put(q6v5->qmp);
return load_state ? -ENOMEM : -EINVAL;
}
q6v5->path = devm_of_icc_get(&pdev->dev, NULL);
if (IS_ERR(q6v5->path))
return dev_err_probe(&pdev->dev, PTR_ERR(q6v5->path),
"failed to acquire interconnect path\n");
return 0;
}
EXPORT_SYMBOL_GPL(qcom_q6v5_init);
/**
* qcom_q6v5_deinit() - deinitialize the q6v5 common struct
* @q6v5: reference to qcom_q6v5 context to be deinitialized
*/
void qcom_q6v5_deinit(struct qcom_q6v5 *q6v5)
{
qmp_put(q6v5->qmp);
}
EXPORT_SYMBOL_GPL(qcom_q6v5_deinit);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Qualcomm Peripheral Image Loader for Q6V5");
|
linux-master
|
drivers/remoteproc/qcom_q6v5.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* PRU-ICSS remoteproc driver for various TI SoCs
*
* Copyright (C) 2014-2022 Texas Instruments Incorporated - https://www.ti.com/
*
* Author(s):
* Suman Anna <[email protected]>
* Andrew F. Davis <[email protected]>
* Grzegorz Jaszczyk <[email protected]> for Texas Instruments
* Puranjay Mohan <[email protected]>
* Md Danish Anwar <[email protected]>
*/
#include <linux/bitops.h>
#include <linux/debugfs.h>
#include <linux/irqdomain.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/remoteproc/pruss.h>
#include <linux/pruss_driver.h>
#include <linux/remoteproc.h>
#include "remoteproc_internal.h"
#include "remoteproc_elf_helpers.h"
#include "pru_rproc.h"
/* PRU_ICSS_PRU_CTRL registers */
#define PRU_CTRL_CTRL 0x0000
#define PRU_CTRL_STS 0x0004
#define PRU_CTRL_WAKEUP_EN 0x0008
#define PRU_CTRL_CYCLE 0x000C
#define PRU_CTRL_STALL 0x0010
#define PRU_CTRL_CTBIR0 0x0020
#define PRU_CTRL_CTBIR1 0x0024
#define PRU_CTRL_CTPPR0 0x0028
#define PRU_CTRL_CTPPR1 0x002C
/* CTRL register bit-fields */
#define CTRL_CTRL_SOFT_RST_N BIT(0)
#define CTRL_CTRL_EN BIT(1)
#define CTRL_CTRL_SLEEPING BIT(2)
#define CTRL_CTRL_CTR_EN BIT(3)
#define CTRL_CTRL_SINGLE_STEP BIT(8)
#define CTRL_CTRL_RUNSTATE BIT(15)
/* PRU_ICSS_PRU_DEBUG registers */
#define PRU_DEBUG_GPREG(x) (0x0000 + (x) * 4)
#define PRU_DEBUG_CT_REG(x) (0x0080 + (x) * 4)
/* PRU/RTU/Tx_PRU Core IRAM address masks */
#define PRU_IRAM_ADDR_MASK 0x3ffff
#define PRU0_IRAM_ADDR_MASK 0x34000
#define PRU1_IRAM_ADDR_MASK 0x38000
#define RTU0_IRAM_ADDR_MASK 0x4000
#define RTU1_IRAM_ADDR_MASK 0x6000
#define TX_PRU0_IRAM_ADDR_MASK 0xa000
#define TX_PRU1_IRAM_ADDR_MASK 0xc000
/* PRU device addresses for various type of PRU RAMs */
#define PRU_IRAM_DA 0 /* Instruction RAM */
#define PRU_PDRAM_DA 0 /* Primary Data RAM */
#define PRU_SDRAM_DA 0x2000 /* Secondary Data RAM */
#define PRU_SHRDRAM_DA 0x10000 /* Shared Data RAM */
#define MAX_PRU_SYS_EVENTS 160
/**
* enum pru_iomem - PRU core memory/register range identifiers
*
* @PRU_IOMEM_IRAM: PRU Instruction RAM range
* @PRU_IOMEM_CTRL: PRU Control register range
* @PRU_IOMEM_DEBUG: PRU Debug register range
* @PRU_IOMEM_MAX: just keep this one at the end
*/
enum pru_iomem {
PRU_IOMEM_IRAM = 0,
PRU_IOMEM_CTRL,
PRU_IOMEM_DEBUG,
PRU_IOMEM_MAX,
};
/**
* struct pru_private_data - device data for a PRU core
* @type: type of the PRU core (PRU, RTU, Tx_PRU)
* @is_k3: flag used to identify the need for special load handling
*/
struct pru_private_data {
enum pru_type type;
unsigned int is_k3 : 1;
};
/**
* struct pru_rproc - PRU remoteproc structure
* @id: id of the PRU core within the PRUSS
* @dev: PRU core device pointer
* @pruss: back-reference to parent PRUSS structure
* @rproc: remoteproc pointer for this PRU core
* @data: PRU core specific data
* @mem_regions: data for each of the PRU memory regions
* @client_np: client device node
* @lock: mutex to protect client usage
* @fw_name: name of firmware image used during loading
* @mapped_irq: virtual interrupt numbers of created fw specific mapping
* @pru_interrupt_map: pointer to interrupt mapping description (firmware)
* @pru_interrupt_map_sz: pru_interrupt_map size
* @rmw_lock: lock for read, modify, write operations on registers
* @dbg_single_step: debug state variable to set PRU into single step mode
* @dbg_continuous: debug state variable to restore PRU execution mode
* @evt_count: number of mapped events
* @gpmux_save: saved value for gpmux config
*/
struct pru_rproc {
int id;
struct device *dev;
struct pruss *pruss;
struct rproc *rproc;
const struct pru_private_data *data;
struct pruss_mem_region mem_regions[PRU_IOMEM_MAX];
struct device_node *client_np;
struct mutex lock;
const char *fw_name;
unsigned int *mapped_irq;
struct pru_irq_rsc *pru_interrupt_map;
size_t pru_interrupt_map_sz;
spinlock_t rmw_lock;
u32 dbg_single_step;
u32 dbg_continuous;
u8 evt_count;
u8 gpmux_save;
};
static inline u32 pru_control_read_reg(struct pru_rproc *pru, unsigned int reg)
{
return readl_relaxed(pru->mem_regions[PRU_IOMEM_CTRL].va + reg);
}
static inline
void pru_control_write_reg(struct pru_rproc *pru, unsigned int reg, u32 val)
{
writel_relaxed(val, pru->mem_regions[PRU_IOMEM_CTRL].va + reg);
}
static inline
void pru_control_set_reg(struct pru_rproc *pru, unsigned int reg,
u32 mask, u32 set)
{
u32 val;
unsigned long flags;
spin_lock_irqsave(&pru->rmw_lock, flags);
val = pru_control_read_reg(pru, reg);
val &= ~mask;
val |= (set & mask);
pru_control_write_reg(pru, reg, val);
spin_unlock_irqrestore(&pru->rmw_lock, flags);
}
/**
* pru_rproc_set_firmware() - set firmware for a PRU core
* @rproc: the rproc instance of the PRU
* @fw_name: the new firmware name, or NULL if default is desired
*
* Return: 0 on success, or errno in error case.
*/
static int pru_rproc_set_firmware(struct rproc *rproc, const char *fw_name)
{
struct pru_rproc *pru = rproc->priv;
if (!fw_name)
fw_name = pru->fw_name;
return rproc_set_firmware(rproc, fw_name);
}
static struct rproc *__pru_rproc_get(struct device_node *np, int index)
{
struct rproc *rproc;
phandle rproc_phandle;
int ret;
ret = of_property_read_u32_index(np, "ti,prus", index, &rproc_phandle);
if (ret)
return ERR_PTR(ret);
rproc = rproc_get_by_phandle(rproc_phandle);
if (!rproc) {
ret = -EPROBE_DEFER;
return ERR_PTR(ret);
}
/* make sure it is PRU rproc */
if (!is_pru_rproc(rproc->dev.parent)) {
rproc_put(rproc);
return ERR_PTR(-ENODEV);
}
return rproc;
}
/**
* pru_rproc_get() - get the PRU rproc instance from a device node
* @np: the user/client device node
* @index: index to use for the ti,prus property
* @pru_id: optional pointer to return the PRU remoteproc processor id
*
* This function looks through a client device node's "ti,prus" property at
* index @index and returns the rproc handle for a valid PRU remote processor if
* found. The function allows only one user to own the PRU rproc resource at a
* time. Caller must call pru_rproc_put() when done with using the rproc, not
* required if the function returns a failure.
*
* When optional @pru_id pointer is passed the PRU remoteproc processor id is
* returned.
*
* Return: rproc handle on success, and an ERR_PTR on failure using one
* of the following error values
* -ENODEV if device is not found
* -EBUSY if PRU is already acquired by anyone
* -EPROBE_DEFER is PRU device is not probed yet
*/
struct rproc *pru_rproc_get(struct device_node *np, int index,
enum pruss_pru_id *pru_id)
{
struct rproc *rproc;
struct pru_rproc *pru;
struct device *dev;
const char *fw_name;
int ret;
u32 mux;
rproc = __pru_rproc_get(np, index);
if (IS_ERR(rproc))
return rproc;
pru = rproc->priv;
dev = &rproc->dev;
mutex_lock(&pru->lock);
if (pru->client_np) {
mutex_unlock(&pru->lock);
ret = -EBUSY;
goto err_no_rproc_handle;
}
pru->client_np = np;
rproc->sysfs_read_only = true;
mutex_unlock(&pru->lock);
if (pru_id)
*pru_id = pru->id;
ret = pruss_cfg_get_gpmux(pru->pruss, pru->id, &pru->gpmux_save);
if (ret) {
dev_err(dev, "failed to get cfg gpmux: %d\n", ret);
goto err;
}
/* An error here is acceptable for backward compatibility */
ret = of_property_read_u32_index(np, "ti,pruss-gp-mux-sel", index,
&mux);
if (!ret) {
ret = pruss_cfg_set_gpmux(pru->pruss, pru->id, mux);
if (ret) {
dev_err(dev, "failed to set cfg gpmux: %d\n", ret);
goto err;
}
}
ret = of_property_read_string_index(np, "firmware-name", index,
&fw_name);
if (!ret) {
ret = pru_rproc_set_firmware(rproc, fw_name);
if (ret) {
dev_err(dev, "failed to set firmware: %d\n", ret);
goto err;
}
}
return rproc;
err_no_rproc_handle:
rproc_put(rproc);
return ERR_PTR(ret);
err:
pru_rproc_put(rproc);
return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(pru_rproc_get);
/**
* pru_rproc_put() - release the PRU rproc resource
* @rproc: the rproc resource to release
*
* Releases the PRU rproc resource and makes it available to other
* users.
*/
void pru_rproc_put(struct rproc *rproc)
{
struct pru_rproc *pru;
if (IS_ERR_OR_NULL(rproc) || !is_pru_rproc(rproc->dev.parent))
return;
pru = rproc->priv;
pruss_cfg_set_gpmux(pru->pruss, pru->id, pru->gpmux_save);
pru_rproc_set_firmware(rproc, NULL);
mutex_lock(&pru->lock);
if (!pru->client_np) {
mutex_unlock(&pru->lock);
return;
}
pru->client_np = NULL;
rproc->sysfs_read_only = false;
mutex_unlock(&pru->lock);
rproc_put(rproc);
}
EXPORT_SYMBOL_GPL(pru_rproc_put);
/**
* pru_rproc_set_ctable() - set the constant table index for the PRU
* @rproc: the rproc instance of the PRU
* @c: constant table index to set
* @addr: physical address to set it to
*
* Return: 0 on success, or errno in error case.
*/
int pru_rproc_set_ctable(struct rproc *rproc, enum pru_ctable_idx c, u32 addr)
{
struct pru_rproc *pru = rproc->priv;
unsigned int reg;
u32 mask, set;
u16 idx;
u16 idx_mask;
if (IS_ERR_OR_NULL(rproc))
return -EINVAL;
if (!rproc->dev.parent || !is_pru_rproc(rproc->dev.parent))
return -ENODEV;
/* pointer is 16 bit and index is 8-bit so mask out the rest */
idx_mask = (c >= PRU_C28) ? 0xFFFF : 0xFF;
/* ctable uses bit 8 and upwards only */
idx = (addr >> 8) & idx_mask;
/* configurable ctable (i.e. C24) starts at PRU_CTRL_CTBIR0 */
reg = PRU_CTRL_CTBIR0 + 4 * (c >> 1);
mask = idx_mask << (16 * (c & 1));
set = idx << (16 * (c & 1));
pru_control_set_reg(pru, reg, mask, set);
return 0;
}
EXPORT_SYMBOL_GPL(pru_rproc_set_ctable);
static inline u32 pru_debug_read_reg(struct pru_rproc *pru, unsigned int reg)
{
return readl_relaxed(pru->mem_regions[PRU_IOMEM_DEBUG].va + reg);
}
static int regs_show(struct seq_file *s, void *data)
{
struct rproc *rproc = s->private;
struct pru_rproc *pru = rproc->priv;
int i, nregs = 32;
u32 pru_sts;
int pru_is_running;
seq_puts(s, "============== Control Registers ==============\n");
seq_printf(s, "CTRL := 0x%08x\n",
pru_control_read_reg(pru, PRU_CTRL_CTRL));
pru_sts = pru_control_read_reg(pru, PRU_CTRL_STS);
seq_printf(s, "STS (PC) := 0x%08x (0x%08x)\n", pru_sts, pru_sts << 2);
seq_printf(s, "WAKEUP_EN := 0x%08x\n",
pru_control_read_reg(pru, PRU_CTRL_WAKEUP_EN));
seq_printf(s, "CYCLE := 0x%08x\n",
pru_control_read_reg(pru, PRU_CTRL_CYCLE));
seq_printf(s, "STALL := 0x%08x\n",
pru_control_read_reg(pru, PRU_CTRL_STALL));
seq_printf(s, "CTBIR0 := 0x%08x\n",
pru_control_read_reg(pru, PRU_CTRL_CTBIR0));
seq_printf(s, "CTBIR1 := 0x%08x\n",
pru_control_read_reg(pru, PRU_CTRL_CTBIR1));
seq_printf(s, "CTPPR0 := 0x%08x\n",
pru_control_read_reg(pru, PRU_CTRL_CTPPR0));
seq_printf(s, "CTPPR1 := 0x%08x\n",
pru_control_read_reg(pru, PRU_CTRL_CTPPR1));
seq_puts(s, "=============== Debug Registers ===============\n");
pru_is_running = pru_control_read_reg(pru, PRU_CTRL_CTRL) &
CTRL_CTRL_RUNSTATE;
if (pru_is_running) {
seq_puts(s, "PRU is executing, cannot print/access debug registers.\n");
return 0;
}
for (i = 0; i < nregs; i++) {
seq_printf(s, "GPREG%-2d := 0x%08x\tCT_REG%-2d := 0x%08x\n",
i, pru_debug_read_reg(pru, PRU_DEBUG_GPREG(i)),
i, pru_debug_read_reg(pru, PRU_DEBUG_CT_REG(i)));
}
return 0;
}
DEFINE_SHOW_ATTRIBUTE(regs);
/*
* Control PRU single-step mode
*
* This is a debug helper function used for controlling the single-step
* mode of the PRU. The PRU Debug registers are not accessible when the
* PRU is in RUNNING state.
*
* Writing a non-zero value sets the PRU into single-step mode irrespective
* of its previous state. The PRU mode is saved only on the first set into
* a single-step mode. Writing a zero value will restore the PRU into its
* original mode.
*/
static int pru_rproc_debug_ss_set(void *data, u64 val)
{
struct rproc *rproc = data;
struct pru_rproc *pru = rproc->priv;
u32 reg_val;
val = val ? 1 : 0;
if (!val && !pru->dbg_single_step)
return 0;
reg_val = pru_control_read_reg(pru, PRU_CTRL_CTRL);
if (val && !pru->dbg_single_step)
pru->dbg_continuous = reg_val;
if (val)
reg_val |= CTRL_CTRL_SINGLE_STEP | CTRL_CTRL_EN;
else
reg_val = pru->dbg_continuous;
pru->dbg_single_step = val;
pru_control_write_reg(pru, PRU_CTRL_CTRL, reg_val);
return 0;
}
static int pru_rproc_debug_ss_get(void *data, u64 *val)
{
struct rproc *rproc = data;
struct pru_rproc *pru = rproc->priv;
*val = pru->dbg_single_step;
return 0;
}
DEFINE_DEBUGFS_ATTRIBUTE(pru_rproc_debug_ss_fops, pru_rproc_debug_ss_get,
pru_rproc_debug_ss_set, "%llu\n");
/*
* Create PRU-specific debugfs entries
*
* The entries are created only if the parent remoteproc debugfs directory
* exists, and will be cleaned up by the remoteproc core.
*/
static void pru_rproc_create_debug_entries(struct rproc *rproc)
{
if (!rproc->dbg_dir)
return;
debugfs_create_file("regs", 0400, rproc->dbg_dir,
rproc, ®s_fops);
debugfs_create_file("single_step", 0600, rproc->dbg_dir,
rproc, &pru_rproc_debug_ss_fops);
}
static void pru_dispose_irq_mapping(struct pru_rproc *pru)
{
if (!pru->mapped_irq)
return;
while (pru->evt_count) {
pru->evt_count--;
if (pru->mapped_irq[pru->evt_count] > 0)
irq_dispose_mapping(pru->mapped_irq[pru->evt_count]);
}
kfree(pru->mapped_irq);
pru->mapped_irq = NULL;
}
/*
* Parse the custom PRU interrupt map resource and configure the INTC
* appropriately.
*/
static int pru_handle_intrmap(struct rproc *rproc)
{
struct device *dev = rproc->dev.parent;
struct pru_rproc *pru = rproc->priv;
struct pru_irq_rsc *rsc = pru->pru_interrupt_map;
struct irq_fwspec fwspec;
struct device_node *parent, *irq_parent;
int i, ret = 0;
/* not having pru_interrupt_map is not an error */
if (!rsc)
return 0;
/* currently supporting only type 0 */
if (rsc->type != 0) {
dev_err(dev, "unsupported rsc type: %d\n", rsc->type);
return -EINVAL;
}
if (rsc->num_evts > MAX_PRU_SYS_EVENTS)
return -EINVAL;
if (sizeof(*rsc) + rsc->num_evts * sizeof(struct pruss_int_map) !=
pru->pru_interrupt_map_sz)
return -EINVAL;
pru->evt_count = rsc->num_evts;
pru->mapped_irq = kcalloc(pru->evt_count, sizeof(unsigned int),
GFP_KERNEL);
if (!pru->mapped_irq) {
pru->evt_count = 0;
return -ENOMEM;
}
/*
* parse and fill in system event to interrupt channel and
* channel-to-host mapping. The interrupt controller to be used
* for these mappings for a given PRU remoteproc is always its
* corresponding sibling PRUSS INTC node.
*/
parent = of_get_parent(dev_of_node(pru->dev));
if (!parent) {
kfree(pru->mapped_irq);
pru->mapped_irq = NULL;
pru->evt_count = 0;
return -ENODEV;
}
irq_parent = of_get_child_by_name(parent, "interrupt-controller");
of_node_put(parent);
if (!irq_parent) {
kfree(pru->mapped_irq);
pru->mapped_irq = NULL;
pru->evt_count = 0;
return -ENODEV;
}
fwspec.fwnode = of_node_to_fwnode(irq_parent);
fwspec.param_count = 3;
for (i = 0; i < pru->evt_count; i++) {
fwspec.param[0] = rsc->pru_intc_map[i].event;
fwspec.param[1] = rsc->pru_intc_map[i].chnl;
fwspec.param[2] = rsc->pru_intc_map[i].host;
dev_dbg(dev, "mapping%d: event %d, chnl %d, host %d\n",
i, fwspec.param[0], fwspec.param[1], fwspec.param[2]);
pru->mapped_irq[i] = irq_create_fwspec_mapping(&fwspec);
if (!pru->mapped_irq[i]) {
dev_err(dev, "failed to get virq for fw mapping %d: event %d chnl %d host %d\n",
i, fwspec.param[0], fwspec.param[1],
fwspec.param[2]);
ret = -EINVAL;
goto map_fail;
}
}
of_node_put(irq_parent);
return ret;
map_fail:
pru_dispose_irq_mapping(pru);
of_node_put(irq_parent);
return ret;
}
static int pru_rproc_start(struct rproc *rproc)
{
struct device *dev = &rproc->dev;
struct pru_rproc *pru = rproc->priv;
const char *names[PRU_TYPE_MAX] = { "PRU", "RTU", "Tx_PRU" };
u32 val;
int ret;
dev_dbg(dev, "starting %s%d: entry-point = 0x%llx\n",
names[pru->data->type], pru->id, (rproc->bootaddr >> 2));
ret = pru_handle_intrmap(rproc);
/*
* reset references to pru interrupt map - they will stop being valid
* after rproc_start returns
*/
pru->pru_interrupt_map = NULL;
pru->pru_interrupt_map_sz = 0;
if (ret)
return ret;
val = CTRL_CTRL_EN | ((rproc->bootaddr >> 2) << 16);
pru_control_write_reg(pru, PRU_CTRL_CTRL, val);
return 0;
}
static int pru_rproc_stop(struct rproc *rproc)
{
struct device *dev = &rproc->dev;
struct pru_rproc *pru = rproc->priv;
const char *names[PRU_TYPE_MAX] = { "PRU", "RTU", "Tx_PRU" };
u32 val;
dev_dbg(dev, "stopping %s%d\n", names[pru->data->type], pru->id);
val = pru_control_read_reg(pru, PRU_CTRL_CTRL);
val &= ~CTRL_CTRL_EN;
pru_control_write_reg(pru, PRU_CTRL_CTRL, val);
/* dispose irq mapping - new firmware can provide new mapping */
pru_dispose_irq_mapping(pru);
return 0;
}
/*
* Convert PRU device address (data spaces only) to kernel virtual address.
*
* Each PRU has access to all data memories within the PRUSS, accessible at
* different ranges. So, look through both its primary and secondary Data
* RAMs as well as any shared Data RAM to convert a PRU device address to
* kernel virtual address. Data RAM0 is primary Data RAM for PRU0 and Data
* RAM1 is primary Data RAM for PRU1.
*/
static void *pru_d_da_to_va(struct pru_rproc *pru, u32 da, size_t len)
{
struct pruss_mem_region dram0, dram1, shrd_ram;
struct pruss *pruss = pru->pruss;
u32 offset;
void *va = NULL;
if (len == 0)
return NULL;
dram0 = pruss->mem_regions[PRUSS_MEM_DRAM0];
dram1 = pruss->mem_regions[PRUSS_MEM_DRAM1];
/* PRU1 has its local RAM addresses reversed */
if (pru->id == PRUSS_PRU1)
swap(dram0, dram1);
shrd_ram = pruss->mem_regions[PRUSS_MEM_SHRD_RAM2];
if (da + len <= PRU_PDRAM_DA + dram0.size) {
offset = da - PRU_PDRAM_DA;
va = (__force void *)(dram0.va + offset);
} else if (da >= PRU_SDRAM_DA &&
da + len <= PRU_SDRAM_DA + dram1.size) {
offset = da - PRU_SDRAM_DA;
va = (__force void *)(dram1.va + offset);
} else if (da >= PRU_SHRDRAM_DA &&
da + len <= PRU_SHRDRAM_DA + shrd_ram.size) {
offset = da - PRU_SHRDRAM_DA;
va = (__force void *)(shrd_ram.va + offset);
}
return va;
}
/*
* Convert PRU device address (instruction space) to kernel virtual address.
*
* A PRU does not have an unified address space. Each PRU has its very own
* private Instruction RAM, and its device address is identical to that of
* its primary Data RAM device address.
*/
static void *pru_i_da_to_va(struct pru_rproc *pru, u32 da, size_t len)
{
u32 offset;
void *va = NULL;
if (len == 0)
return NULL;
/*
* GNU binutils do not support multiple address spaces. The GNU
* linker's default linker script places IRAM at an arbitrary high
* offset, in order to differentiate it from DRAM. Hence we need to
* strip the artificial offset in the IRAM addresses coming from the
* ELF file.
*
* The TI proprietary linker would never set those higher IRAM address
* bits anyway. PRU architecture limits the program counter to 16-bit
* word-address range. This in turn corresponds to 18-bit IRAM
* byte-address range for ELF.
*
* Two more bits are added just in case to make the final 20-bit mask.
* Idea is to have a safeguard in case TI decides to add banking
* in future SoCs.
*/
da &= 0xfffff;
if (da + len <= PRU_IRAM_DA + pru->mem_regions[PRU_IOMEM_IRAM].size) {
offset = da - PRU_IRAM_DA;
va = (__force void *)(pru->mem_regions[PRU_IOMEM_IRAM].va +
offset);
}
return va;
}
/*
* Provide address translations for only PRU Data RAMs through the remoteproc
* core for any PRU client drivers. The PRU Instruction RAM access is restricted
* only to the PRU loader code.
*/
static void *pru_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
{
struct pru_rproc *pru = rproc->priv;
return pru_d_da_to_va(pru, da, len);
}
/* PRU-specific address translator used by PRU loader. */
static void *pru_da_to_va(struct rproc *rproc, u64 da, size_t len, bool is_iram)
{
struct pru_rproc *pru = rproc->priv;
void *va;
if (is_iram)
va = pru_i_da_to_va(pru, da, len);
else
va = pru_d_da_to_va(pru, da, len);
return va;
}
static struct rproc_ops pru_rproc_ops = {
.start = pru_rproc_start,
.stop = pru_rproc_stop,
.da_to_va = pru_rproc_da_to_va,
};
/*
* Custom memory copy implementation for ICSSG PRU/RTU/Tx_PRU Cores
*
* The ICSSG PRU/RTU/Tx_PRU cores have a memory copying issue with IRAM
* memories, that is not seen on previous generation SoCs. The data is reflected
* properly in the IRAM memories only for integer (4-byte) copies. Any unaligned
* copies result in all the other pre-existing bytes zeroed out within that
* 4-byte boundary, thereby resulting in wrong text/code in the IRAMs. Also, the
* IRAM memory port interface does not allow any 8-byte copies (as commonly used
* by ARM64 memcpy implementation) and throws an exception. The DRAM memory
* ports do not show this behavior.
*/
static int pru_rproc_memcpy(void *dest, const void *src, size_t count)
{
const u32 *s = src;
u32 *d = dest;
size_t size = count / 4;
u32 *tmp_src = NULL;
/*
* TODO: relax limitation of 4-byte aligned dest addresses and copy
* sizes
*/
if ((long)dest % 4 || count % 4)
return -EINVAL;
/* src offsets in ELF firmware image can be non-aligned */
if ((long)src % 4) {
tmp_src = kmemdup(src, count, GFP_KERNEL);
if (!tmp_src)
return -ENOMEM;
s = tmp_src;
}
while (size--)
*d++ = *s++;
kfree(tmp_src);
return 0;
}
static int
pru_rproc_load_elf_segments(struct rproc *rproc, const struct firmware *fw)
{
struct pru_rproc *pru = rproc->priv;
struct device *dev = &rproc->dev;
struct elf32_hdr *ehdr;
struct elf32_phdr *phdr;
int i, ret = 0;
const u8 *elf_data = fw->data;
ehdr = (struct elf32_hdr *)elf_data;
phdr = (struct elf32_phdr *)(elf_data + ehdr->e_phoff);
/* go through the available ELF segments */
for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
u32 da = phdr->p_paddr;
u32 memsz = phdr->p_memsz;
u32 filesz = phdr->p_filesz;
u32 offset = phdr->p_offset;
bool is_iram;
void *ptr;
if (phdr->p_type != PT_LOAD || !filesz)
continue;
dev_dbg(dev, "phdr: type %d da 0x%x memsz 0x%x filesz 0x%x\n",
phdr->p_type, da, memsz, filesz);
if (filesz > memsz) {
dev_err(dev, "bad phdr filesz 0x%x memsz 0x%x\n",
filesz, memsz);
ret = -EINVAL;
break;
}
if (offset + filesz > fw->size) {
dev_err(dev, "truncated fw: need 0x%x avail 0x%zx\n",
offset + filesz, fw->size);
ret = -EINVAL;
break;
}
/* grab the kernel address for this device address */
is_iram = phdr->p_flags & PF_X;
ptr = pru_da_to_va(rproc, da, memsz, is_iram);
if (!ptr) {
dev_err(dev, "bad phdr da 0x%x mem 0x%x\n", da, memsz);
ret = -EINVAL;
break;
}
if (pru->data->is_k3) {
ret = pru_rproc_memcpy(ptr, elf_data + phdr->p_offset,
filesz);
if (ret) {
dev_err(dev, "PRU memory copy failed for da 0x%x memsz 0x%x\n",
da, memsz);
break;
}
} else {
memcpy(ptr, elf_data + phdr->p_offset, filesz);
}
/* skip the memzero logic performed by remoteproc ELF loader */
}
return ret;
}
static const void *
pru_rproc_find_interrupt_map(struct device *dev, const struct firmware *fw)
{
struct elf32_shdr *shdr, *name_table_shdr;
const char *name_table;
const u8 *elf_data = fw->data;
struct elf32_hdr *ehdr = (struct elf32_hdr *)elf_data;
u16 shnum = ehdr->e_shnum;
u16 shstrndx = ehdr->e_shstrndx;
int i;
/* first, get the section header */
shdr = (struct elf32_shdr *)(elf_data + ehdr->e_shoff);
/* compute name table section header entry in shdr array */
name_table_shdr = shdr + shstrndx;
/* finally, compute the name table section address in elf */
name_table = elf_data + name_table_shdr->sh_offset;
for (i = 0; i < shnum; i++, shdr++) {
u32 size = shdr->sh_size;
u32 offset = shdr->sh_offset;
u32 name = shdr->sh_name;
if (strcmp(name_table + name, ".pru_irq_map"))
continue;
/* make sure we have the entire irq map */
if (offset + size > fw->size || offset + size < size) {
dev_err(dev, ".pru_irq_map section truncated\n");
return ERR_PTR(-EINVAL);
}
/* make sure irq map has at least the header */
if (sizeof(struct pru_irq_rsc) > size) {
dev_err(dev, "header-less .pru_irq_map section\n");
return ERR_PTR(-EINVAL);
}
return shdr;
}
dev_dbg(dev, "no .pru_irq_map section found for this fw\n");
return NULL;
}
/*
* Use a custom parse_fw callback function for dealing with PRU firmware
* specific sections.
*
* The firmware blob can contain optional ELF sections: .resource_table section
* and .pru_irq_map one. The second one contains the PRUSS interrupt mapping
* description, which needs to be setup before powering on the PRU core. To
* avoid RAM wastage this ELF section is not mapped to any ELF segment (by the
* firmware linker) and therefore is not loaded to PRU memory.
*/
static int pru_rproc_parse_fw(struct rproc *rproc, const struct firmware *fw)
{
struct device *dev = &rproc->dev;
struct pru_rproc *pru = rproc->priv;
const u8 *elf_data = fw->data;
const void *shdr;
u8 class = fw_elf_get_class(fw);
u64 sh_offset;
int ret;
/* load optional rsc table */
ret = rproc_elf_load_rsc_table(rproc, fw);
if (ret == -EINVAL)
dev_dbg(&rproc->dev, "no resource table found for this fw\n");
else if (ret)
return ret;
/* find .pru_interrupt_map section, not having it is not an error */
shdr = pru_rproc_find_interrupt_map(dev, fw);
if (IS_ERR(shdr))
return PTR_ERR(shdr);
if (!shdr)
return 0;
/* preserve pointer to PRU interrupt map together with it size */
sh_offset = elf_shdr_get_sh_offset(class, shdr);
pru->pru_interrupt_map = (struct pru_irq_rsc *)(elf_data + sh_offset);
pru->pru_interrupt_map_sz = elf_shdr_get_sh_size(class, shdr);
return 0;
}
/*
* Compute PRU id based on the IRAM addresses. The PRU IRAMs are
* always at a particular offset within the PRUSS address space.
*/
static int pru_rproc_set_id(struct pru_rproc *pru)
{
int ret = 0;
switch (pru->mem_regions[PRU_IOMEM_IRAM].pa & PRU_IRAM_ADDR_MASK) {
case TX_PRU0_IRAM_ADDR_MASK:
fallthrough;
case RTU0_IRAM_ADDR_MASK:
fallthrough;
case PRU0_IRAM_ADDR_MASK:
pru->id = PRUSS_PRU0;
break;
case TX_PRU1_IRAM_ADDR_MASK:
fallthrough;
case RTU1_IRAM_ADDR_MASK:
fallthrough;
case PRU1_IRAM_ADDR_MASK:
pru->id = PRUSS_PRU1;
break;
default:
ret = -EINVAL;
}
return ret;
}
static int pru_rproc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct platform_device *ppdev = to_platform_device(dev->parent);
struct pru_rproc *pru;
const char *fw_name;
struct rproc *rproc = NULL;
struct resource *res;
int i, ret;
const struct pru_private_data *data;
const char *mem_names[PRU_IOMEM_MAX] = { "iram", "control", "debug" };
data = of_device_get_match_data(&pdev->dev);
if (!data)
return -ENODEV;
ret = of_property_read_string(np, "firmware-name", &fw_name);
if (ret) {
dev_err(dev, "unable to retrieve firmware-name %d\n", ret);
return ret;
}
rproc = devm_rproc_alloc(dev, pdev->name, &pru_rproc_ops, fw_name,
sizeof(*pru));
if (!rproc) {
dev_err(dev, "rproc_alloc failed\n");
return -ENOMEM;
}
/* use a custom load function to deal with PRU-specific quirks */
rproc->ops->load = pru_rproc_load_elf_segments;
/* use a custom parse function to deal with PRU-specific resources */
rproc->ops->parse_fw = pru_rproc_parse_fw;
/* error recovery is not supported for PRUs */
rproc->recovery_disabled = true;
/*
* rproc_add will auto-boot the processor normally, but this is not
* desired with PRU client driven boot-flow methodology. A PRU
* application/client driver will boot the corresponding PRU
* remote-processor as part of its state machine either through the
* remoteproc sysfs interface or through the equivalent kernel API.
*/
rproc->auto_boot = false;
pru = rproc->priv;
pru->dev = dev;
pru->data = data;
pru->pruss = platform_get_drvdata(ppdev);
pru->rproc = rproc;
pru->fw_name = fw_name;
pru->client_np = NULL;
spin_lock_init(&pru->rmw_lock);
mutex_init(&pru->lock);
for (i = 0; i < ARRAY_SIZE(mem_names); i++) {
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
mem_names[i]);
pru->mem_regions[i].va = devm_ioremap_resource(dev, res);
if (IS_ERR(pru->mem_regions[i].va)) {
dev_err(dev, "failed to parse and map memory resource %d %s\n",
i, mem_names[i]);
ret = PTR_ERR(pru->mem_regions[i].va);
return ret;
}
pru->mem_regions[i].pa = res->start;
pru->mem_regions[i].size = resource_size(res);
dev_dbg(dev, "memory %8s: pa %pa size 0x%zx va %pK\n",
mem_names[i], &pru->mem_regions[i].pa,
pru->mem_regions[i].size, pru->mem_regions[i].va);
}
ret = pru_rproc_set_id(pru);
if (ret < 0)
return ret;
platform_set_drvdata(pdev, rproc);
ret = devm_rproc_add(dev, pru->rproc);
if (ret) {
dev_err(dev, "rproc_add failed: %d\n", ret);
return ret;
}
pru_rproc_create_debug_entries(rproc);
dev_dbg(dev, "PRU rproc node %pOF probed successfully\n", np);
return 0;
}
static void pru_rproc_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct rproc *rproc = platform_get_drvdata(pdev);
dev_dbg(dev, "%s: removing rproc %s\n", __func__, rproc->name);
}
static const struct pru_private_data pru_data = {
.type = PRU_TYPE_PRU,
};
static const struct pru_private_data k3_pru_data = {
.type = PRU_TYPE_PRU,
.is_k3 = 1,
};
static const struct pru_private_data k3_rtu_data = {
.type = PRU_TYPE_RTU,
.is_k3 = 1,
};
static const struct pru_private_data k3_tx_pru_data = {
.type = PRU_TYPE_TX_PRU,
.is_k3 = 1,
};
static const struct of_device_id pru_rproc_match[] = {
{ .compatible = "ti,am3356-pru", .data = &pru_data },
{ .compatible = "ti,am4376-pru", .data = &pru_data },
{ .compatible = "ti,am5728-pru", .data = &pru_data },
{ .compatible = "ti,am642-pru", .data = &k3_pru_data },
{ .compatible = "ti,am642-rtu", .data = &k3_rtu_data },
{ .compatible = "ti,am642-tx-pru", .data = &k3_tx_pru_data },
{ .compatible = "ti,k2g-pru", .data = &pru_data },
{ .compatible = "ti,am654-pru", .data = &k3_pru_data },
{ .compatible = "ti,am654-rtu", .data = &k3_rtu_data },
{ .compatible = "ti,am654-tx-pru", .data = &k3_tx_pru_data },
{ .compatible = "ti,j721e-pru", .data = &k3_pru_data },
{ .compatible = "ti,j721e-rtu", .data = &k3_rtu_data },
{ .compatible = "ti,j721e-tx-pru", .data = &k3_tx_pru_data },
{ .compatible = "ti,am625-pru", .data = &k3_pru_data },
{},
};
MODULE_DEVICE_TABLE(of, pru_rproc_match);
static struct platform_driver pru_rproc_driver = {
.driver = {
.name = PRU_RPROC_DRVNAME,
.of_match_table = pru_rproc_match,
.suppress_bind_attrs = true,
},
.probe = pru_rproc_probe,
.remove_new = pru_rproc_remove,
};
module_platform_driver(pru_rproc_driver);
MODULE_AUTHOR("Suman Anna <[email protected]>");
MODULE_AUTHOR("Andrew F. Davis <[email protected]>");
MODULE_AUTHOR("Grzegorz Jaszczyk <[email protected]>");
MODULE_AUTHOR("Puranjay Mohan <[email protected]>");
MODULE_AUTHOR("Md Danish Anwar <[email protected]>");
MODULE_DESCRIPTION("PRU-ICSS Remote Processor Driver");
MODULE_LICENSE("GPL v2");
|
linux-master
|
drivers/remoteproc/pru_rproc.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Remote Processor Framework
*
* Copyright (C) 2011 Texas Instruments, Inc.
* Copyright (C) 2011 Google, Inc.
*
* Ohad Ben-Cohen <[email protected]>
* Brian Swetland <[email protected]>
* Mark Grosen <[email protected]>
* Fernando Guzman Lugo <[email protected]>
* Suman Anna <[email protected]>
* Robert Tivy <[email protected]>
* Armando Uribe De Leon <[email protected]>
*/
#define pr_fmt(fmt) "%s: " fmt, __func__
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/panic_notifier.h>
#include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/dma-mapping.h>
#include <linux/firmware.h>
#include <linux/string.h>
#include <linux/debugfs.h>
#include <linux/rculist.h>
#include <linux/remoteproc.h>
#include <linux/iommu.h>
#include <linux/idr.h>
#include <linux/elf.h>
#include <linux/crc32.h>
#include <linux/of_reserved_mem.h>
#include <linux/virtio_ids.h>
#include <linux/virtio_ring.h>
#include <asm/byteorder.h>
#include <linux/platform_device.h>
#include "remoteproc_internal.h"
#define HIGH_BITS_MASK 0xFFFFFFFF00000000ULL
static DEFINE_MUTEX(rproc_list_mutex);
static LIST_HEAD(rproc_list);
static struct notifier_block rproc_panic_nb;
typedef int (*rproc_handle_resource_t)(struct rproc *rproc,
void *, int offset, int avail);
static int rproc_alloc_carveout(struct rproc *rproc,
struct rproc_mem_entry *mem);
static int rproc_release_carveout(struct rproc *rproc,
struct rproc_mem_entry *mem);
/* Unique indices for remoteproc devices */
static DEFINE_IDA(rproc_dev_index);
static struct workqueue_struct *rproc_recovery_wq;
static const char * const rproc_crash_names[] = {
[RPROC_MMUFAULT] = "mmufault",
[RPROC_WATCHDOG] = "watchdog",
[RPROC_FATAL_ERROR] = "fatal error",
};
/* translate rproc_crash_type to string */
static const char *rproc_crash_to_string(enum rproc_crash_type type)
{
if (type < ARRAY_SIZE(rproc_crash_names))
return rproc_crash_names[type];
return "unknown";
}
/*
* This is the IOMMU fault handler we register with the IOMMU API
* (when relevant; not all remote processors access memory through
* an IOMMU).
*
* IOMMU core will invoke this handler whenever the remote processor
* will try to access an unmapped device address.
*/
static int rproc_iommu_fault(struct iommu_domain *domain, struct device *dev,
unsigned long iova, int flags, void *token)
{
struct rproc *rproc = token;
dev_err(dev, "iommu fault: da 0x%lx flags 0x%x\n", iova, flags);
rproc_report_crash(rproc, RPROC_MMUFAULT);
/*
* Let the iommu core know we're not really handling this fault;
* we just used it as a recovery trigger.
*/
return -ENOSYS;
}
static int rproc_enable_iommu(struct rproc *rproc)
{
struct iommu_domain *domain;
struct device *dev = rproc->dev.parent;
int ret;
if (!rproc->has_iommu) {
dev_dbg(dev, "iommu not present\n");
return 0;
}
domain = iommu_domain_alloc(dev->bus);
if (!domain) {
dev_err(dev, "can't alloc iommu domain\n");
return -ENOMEM;
}
iommu_set_fault_handler(domain, rproc_iommu_fault, rproc);
ret = iommu_attach_device(domain, dev);
if (ret) {
dev_err(dev, "can't attach iommu device: %d\n", ret);
goto free_domain;
}
rproc->domain = domain;
return 0;
free_domain:
iommu_domain_free(domain);
return ret;
}
static void rproc_disable_iommu(struct rproc *rproc)
{
struct iommu_domain *domain = rproc->domain;
struct device *dev = rproc->dev.parent;
if (!domain)
return;
iommu_detach_device(domain, dev);
iommu_domain_free(domain);
}
phys_addr_t rproc_va_to_pa(void *cpu_addr)
{
/*
* Return physical address according to virtual address location
* - in vmalloc: if region ioremapped or defined as dma_alloc_coherent
* - in kernel: if region allocated in generic dma memory pool
*/
if (is_vmalloc_addr(cpu_addr)) {
return page_to_phys(vmalloc_to_page(cpu_addr)) +
offset_in_page(cpu_addr);
}
WARN_ON(!virt_addr_valid(cpu_addr));
return virt_to_phys(cpu_addr);
}
EXPORT_SYMBOL(rproc_va_to_pa);
/**
* rproc_da_to_va() - lookup the kernel virtual address for a remoteproc address
* @rproc: handle of a remote processor
* @da: remoteproc device address to translate
* @len: length of the memory region @da is pointing to
* @is_iomem: optional pointer filled in to indicate if @da is iomapped memory
*
* Some remote processors will ask us to allocate them physically contiguous
* memory regions (which we call "carveouts"), and map them to specific
* device addresses (which are hardcoded in the firmware). They may also have
* dedicated memory regions internal to the processors, and use them either
* exclusively or alongside carveouts.
*
* They may then ask us to copy objects into specific device addresses (e.g.
* code/data sections) or expose us certain symbols in other device address
* (e.g. their trace buffer).
*
* This function is a helper function with which we can go over the allocated
* carveouts and translate specific device addresses to kernel virtual addresses
* so we can access the referenced memory. This function also allows to perform
* translations on the internal remoteproc memory regions through a platform
* implementation specific da_to_va ops, if present.
*
* Note: phys_to_virt(iommu_iova_to_phys(rproc->domain, da)) will work too,
* but only on kernel direct mapped RAM memory. Instead, we're just using
* here the output of the DMA API for the carveouts, which should be more
* correct.
*
* Return: a valid kernel address on success or NULL on failure
*/
void *rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
{
struct rproc_mem_entry *carveout;
void *ptr = NULL;
if (rproc->ops->da_to_va) {
ptr = rproc->ops->da_to_va(rproc, da, len, is_iomem);
if (ptr)
goto out;
}
list_for_each_entry(carveout, &rproc->carveouts, node) {
int offset = da - carveout->da;
/* Verify that carveout is allocated */
if (!carveout->va)
continue;
/* try next carveout if da is too small */
if (offset < 0)
continue;
/* try next carveout if da is too large */
if (offset + len > carveout->len)
continue;
ptr = carveout->va + offset;
if (is_iomem)
*is_iomem = carveout->is_iomem;
break;
}
out:
return ptr;
}
EXPORT_SYMBOL(rproc_da_to_va);
/**
* rproc_find_carveout_by_name() - lookup the carveout region by a name
* @rproc: handle of a remote processor
* @name: carveout name to find (format string)
* @...: optional parameters matching @name string
*
* Platform driver has the capability to register some pre-allacoted carveout
* (physically contiguous memory regions) before rproc firmware loading and
* associated resource table analysis. These regions may be dedicated memory
* regions internal to the coprocessor or specified DDR region with specific
* attributes
*
* This function is a helper function with which we can go over the
* allocated carveouts and return associated region characteristics like
* coprocessor address, length or processor virtual address.
*
* Return: a valid pointer on carveout entry on success or NULL on failure.
*/
__printf(2, 3)
struct rproc_mem_entry *
rproc_find_carveout_by_name(struct rproc *rproc, const char *name, ...)
{
va_list args;
char _name[32];
struct rproc_mem_entry *carveout, *mem = NULL;
if (!name)
return NULL;
va_start(args, name);
vsnprintf(_name, sizeof(_name), name, args);
va_end(args);
list_for_each_entry(carveout, &rproc->carveouts, node) {
/* Compare carveout and requested names */
if (!strcmp(carveout->name, _name)) {
mem = carveout;
break;
}
}
return mem;
}
/**
* rproc_check_carveout_da() - Check specified carveout da configuration
* @rproc: handle of a remote processor
* @mem: pointer on carveout to check
* @da: area device address
* @len: associated area size
*
* This function is a helper function to verify requested device area (couple
* da, len) is part of specified carveout.
* If da is not set (defined as FW_RSC_ADDR_ANY), only requested length is
* checked.
*
* Return: 0 if carveout matches request else error
*/
static int rproc_check_carveout_da(struct rproc *rproc,
struct rproc_mem_entry *mem, u32 da, u32 len)
{
struct device *dev = &rproc->dev;
int delta;
/* Check requested resource length */
if (len > mem->len) {
dev_err(dev, "Registered carveout doesn't fit len request\n");
return -EINVAL;
}
if (da != FW_RSC_ADDR_ANY && mem->da == FW_RSC_ADDR_ANY) {
/* Address doesn't match registered carveout configuration */
return -EINVAL;
} else if (da != FW_RSC_ADDR_ANY && mem->da != FW_RSC_ADDR_ANY) {
delta = da - mem->da;
/* Check requested resource belongs to registered carveout */
if (delta < 0) {
dev_err(dev,
"Registered carveout doesn't fit da request\n");
return -EINVAL;
}
if (delta + len > mem->len) {
dev_err(dev,
"Registered carveout doesn't fit len request\n");
return -EINVAL;
}
}
return 0;
}
int rproc_alloc_vring(struct rproc_vdev *rvdev, int i)
{
struct rproc *rproc = rvdev->rproc;
struct device *dev = &rproc->dev;
struct rproc_vring *rvring = &rvdev->vring[i];
struct fw_rsc_vdev *rsc;
int ret, notifyid;
struct rproc_mem_entry *mem;
size_t size;
/* actual size of vring (in bytes) */
size = PAGE_ALIGN(vring_size(rvring->num, rvring->align));
rsc = (void *)rproc->table_ptr + rvdev->rsc_offset;
/* Search for pre-registered carveout */
mem = rproc_find_carveout_by_name(rproc, "vdev%dvring%d", rvdev->index,
i);
if (mem) {
if (rproc_check_carveout_da(rproc, mem, rsc->vring[i].da, size))
return -ENOMEM;
} else {
/* Register carveout in list */
mem = rproc_mem_entry_init(dev, NULL, 0,
size, rsc->vring[i].da,
rproc_alloc_carveout,
rproc_release_carveout,
"vdev%dvring%d",
rvdev->index, i);
if (!mem) {
dev_err(dev, "Can't allocate memory entry structure\n");
return -ENOMEM;
}
rproc_add_carveout(rproc, mem);
}
/*
* Assign an rproc-wide unique index for this vring
* TODO: assign a notifyid for rvdev updates as well
* TODO: support predefined notifyids (via resource table)
*/
ret = idr_alloc(&rproc->notifyids, rvring, 0, 0, GFP_KERNEL);
if (ret < 0) {
dev_err(dev, "idr_alloc failed: %d\n", ret);
return ret;
}
notifyid = ret;
/* Potentially bump max_notifyid */
if (notifyid > rproc->max_notifyid)
rproc->max_notifyid = notifyid;
rvring->notifyid = notifyid;
/* Let the rproc know the notifyid of this vring.*/
rsc->vring[i].notifyid = notifyid;
return 0;
}
int
rproc_parse_vring(struct rproc_vdev *rvdev, struct fw_rsc_vdev *rsc, int i)
{
struct rproc *rproc = rvdev->rproc;
struct device *dev = &rproc->dev;
struct fw_rsc_vdev_vring *vring = &rsc->vring[i];
struct rproc_vring *rvring = &rvdev->vring[i];
dev_dbg(dev, "vdev rsc: vring%d: da 0x%x, qsz %d, align %d\n",
i, vring->da, vring->num, vring->align);
/* verify queue size and vring alignment are sane */
if (!vring->num || !vring->align) {
dev_err(dev, "invalid qsz (%d) or alignment (%d)\n",
vring->num, vring->align);
return -EINVAL;
}
rvring->num = vring->num;
rvring->align = vring->align;
rvring->rvdev = rvdev;
return 0;
}
void rproc_free_vring(struct rproc_vring *rvring)
{
struct rproc *rproc = rvring->rvdev->rproc;
int idx = rvring - rvring->rvdev->vring;
struct fw_rsc_vdev *rsc;
idr_remove(&rproc->notifyids, rvring->notifyid);
/*
* At this point rproc_stop() has been called and the installed resource
* table in the remote processor memory may no longer be accessible. As
* such and as per rproc_stop(), rproc->table_ptr points to the cached
* resource table (rproc->cached_table). The cached resource table is
* only available when a remote processor has been booted by the
* remoteproc core, otherwise it is NULL.
*
* Based on the above, reset the virtio device section in the cached
* resource table only if there is one to work with.
*/
if (rproc->table_ptr) {
rsc = (void *)rproc->table_ptr + rvring->rvdev->rsc_offset;
rsc->vring[idx].da = 0;
rsc->vring[idx].notifyid = -1;
}
}
void rproc_add_rvdev(struct rproc *rproc, struct rproc_vdev *rvdev)
{
if (rvdev && rproc)
list_add_tail(&rvdev->node, &rproc->rvdevs);
}
void rproc_remove_rvdev(struct rproc_vdev *rvdev)
{
if (rvdev)
list_del(&rvdev->node);
}
/**
* rproc_handle_vdev() - handle a vdev fw resource
* @rproc: the remote processor
* @ptr: the vring resource descriptor
* @offset: offset of the resource entry
* @avail: size of available data (for sanity checking the image)
*
* This resource entry requests the host to statically register a virtio
* device (vdev), and setup everything needed to support it. It contains
* everything needed to make it possible: the virtio device id, virtio
* device features, vrings information, virtio config space, etc...
*
* Before registering the vdev, the vrings are allocated from non-cacheable
* physically contiguous memory. Currently we only support two vrings per
* remote processor (temporary limitation). We might also want to consider
* doing the vring allocation only later when ->find_vqs() is invoked, and
* then release them upon ->del_vqs().
*
* Note: @da is currently not really handled correctly: we dynamically
* allocate it using the DMA API, ignoring requested hard coded addresses,
* and we don't take care of any required IOMMU programming. This is all
* going to be taken care of when the generic iommu-based DMA API will be
* merged. Meanwhile, statically-addressed iommu-based firmware images should
* use RSC_DEVMEM resource entries to map their required @da to the physical
* address of their base CMA region (ouch, hacky!).
*
* Return: 0 on success, or an appropriate error code otherwise
*/
static int rproc_handle_vdev(struct rproc *rproc, void *ptr,
int offset, int avail)
{
struct fw_rsc_vdev *rsc = ptr;
struct device *dev = &rproc->dev;
struct rproc_vdev *rvdev;
size_t rsc_size;
struct rproc_vdev_data rvdev_data;
struct platform_device *pdev;
/* make sure resource isn't truncated */
rsc_size = struct_size(rsc, vring, rsc->num_of_vrings);
if (size_add(rsc_size, rsc->config_len) > avail) {
dev_err(dev, "vdev rsc is truncated\n");
return -EINVAL;
}
/* make sure reserved bytes are zeroes */
if (rsc->reserved[0] || rsc->reserved[1]) {
dev_err(dev, "vdev rsc has non zero reserved bytes\n");
return -EINVAL;
}
dev_dbg(dev, "vdev rsc: id %d, dfeatures 0x%x, cfg len %d, %d vrings\n",
rsc->id, rsc->dfeatures, rsc->config_len, rsc->num_of_vrings);
/* we currently support only two vrings per rvdev */
if (rsc->num_of_vrings > ARRAY_SIZE(rvdev->vring)) {
dev_err(dev, "too many vrings: %d\n", rsc->num_of_vrings);
return -EINVAL;
}
rvdev_data.id = rsc->id;
rvdev_data.index = rproc->nb_vdev++;
rvdev_data.rsc_offset = offset;
rvdev_data.rsc = rsc;
/*
* When there is more than one remote processor, rproc->nb_vdev number is
* same for each separate instances of "rproc". If rvdev_data.index is used
* as device id, then we get duplication in sysfs, so need to use
* PLATFORM_DEVID_AUTO to auto select device id.
*/
pdev = platform_device_register_data(dev, "rproc-virtio", PLATFORM_DEVID_AUTO, &rvdev_data,
sizeof(rvdev_data));
if (IS_ERR(pdev)) {
dev_err(dev, "failed to create rproc-virtio device\n");
return PTR_ERR(pdev);
}
return 0;
}
/**
* rproc_handle_trace() - handle a shared trace buffer resource
* @rproc: the remote processor
* @ptr: the trace resource descriptor
* @offset: offset of the resource entry
* @avail: size of available data (for sanity checking the image)
*
* In case the remote processor dumps trace logs into memory,
* export it via debugfs.
*
* Currently, the 'da' member of @rsc should contain the device address
* where the remote processor is dumping the traces. Later we could also
* support dynamically allocating this address using the generic
* DMA API (but currently there isn't a use case for that).
*
* Return: 0 on success, or an appropriate error code otherwise
*/
static int rproc_handle_trace(struct rproc *rproc, void *ptr,
int offset, int avail)
{
struct fw_rsc_trace *rsc = ptr;
struct rproc_debug_trace *trace;
struct device *dev = &rproc->dev;
char name[15];
if (sizeof(*rsc) > avail) {
dev_err(dev, "trace rsc is truncated\n");
return -EINVAL;
}
/* make sure reserved bytes are zeroes */
if (rsc->reserved) {
dev_err(dev, "trace rsc has non zero reserved bytes\n");
return -EINVAL;
}
trace = kzalloc(sizeof(*trace), GFP_KERNEL);
if (!trace)
return -ENOMEM;
/* set the trace buffer dma properties */
trace->trace_mem.len = rsc->len;
trace->trace_mem.da = rsc->da;
/* set pointer on rproc device */
trace->rproc = rproc;
/* make sure snprintf always null terminates, even if truncating */
snprintf(name, sizeof(name), "trace%d", rproc->num_traces);
/* create the debugfs entry */
trace->tfile = rproc_create_trace_file(name, rproc, trace);
list_add_tail(&trace->node, &rproc->traces);
rproc->num_traces++;
dev_dbg(dev, "%s added: da 0x%x, len 0x%x\n",
name, rsc->da, rsc->len);
return 0;
}
/**
* rproc_handle_devmem() - handle devmem resource entry
* @rproc: remote processor handle
* @ptr: the devmem resource entry
* @offset: offset of the resource entry
* @avail: size of available data (for sanity checking the image)
*
* Remote processors commonly need to access certain on-chip peripherals.
*
* Some of these remote processors access memory via an iommu device,
* and might require us to configure their iommu before they can access
* the on-chip peripherals they need.
*
* This resource entry is a request to map such a peripheral device.
*
* These devmem entries will contain the physical address of the device in
* the 'pa' member. If a specific device address is expected, then 'da' will
* contain it (currently this is the only use case supported). 'len' will
* contain the size of the physical region we need to map.
*
* Currently we just "trust" those devmem entries to contain valid physical
* addresses, but this is going to change: we want the implementations to
* tell us ranges of physical addresses the firmware is allowed to request,
* and not allow firmwares to request access to physical addresses that
* are outside those ranges.
*
* Return: 0 on success, or an appropriate error code otherwise
*/
static int rproc_handle_devmem(struct rproc *rproc, void *ptr,
int offset, int avail)
{
struct fw_rsc_devmem *rsc = ptr;
struct rproc_mem_entry *mapping;
struct device *dev = &rproc->dev;
int ret;
/* no point in handling this resource without a valid iommu domain */
if (!rproc->domain)
return -EINVAL;
if (sizeof(*rsc) > avail) {
dev_err(dev, "devmem rsc is truncated\n");
return -EINVAL;
}
/* make sure reserved bytes are zeroes */
if (rsc->reserved) {
dev_err(dev, "devmem rsc has non zero reserved bytes\n");
return -EINVAL;
}
mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
if (!mapping)
return -ENOMEM;
ret = iommu_map(rproc->domain, rsc->da, rsc->pa, rsc->len, rsc->flags,
GFP_KERNEL);
if (ret) {
dev_err(dev, "failed to map devmem: %d\n", ret);
goto out;
}
/*
* We'll need this info later when we'll want to unmap everything
* (e.g. on shutdown).
*
* We can't trust the remote processor not to change the resource
* table, so we must maintain this info independently.
*/
mapping->da = rsc->da;
mapping->len = rsc->len;
list_add_tail(&mapping->node, &rproc->mappings);
dev_dbg(dev, "mapped devmem pa 0x%x, da 0x%x, len 0x%x\n",
rsc->pa, rsc->da, rsc->len);
return 0;
out:
kfree(mapping);
return ret;
}
/**
* rproc_alloc_carveout() - allocated specified carveout
* @rproc: rproc handle
* @mem: the memory entry to allocate
*
* This function allocate specified memory entry @mem using
* dma_alloc_coherent() as default allocator
*
* Return: 0 on success, or an appropriate error code otherwise
*/
static int rproc_alloc_carveout(struct rproc *rproc,
struct rproc_mem_entry *mem)
{
struct rproc_mem_entry *mapping = NULL;
struct device *dev = &rproc->dev;
dma_addr_t dma;
void *va;
int ret;
va = dma_alloc_coherent(dev->parent, mem->len, &dma, GFP_KERNEL);
if (!va) {
dev_err(dev->parent,
"failed to allocate dma memory: len 0x%zx\n",
mem->len);
return -ENOMEM;
}
dev_dbg(dev, "carveout va %pK, dma %pad, len 0x%zx\n",
va, &dma, mem->len);
if (mem->da != FW_RSC_ADDR_ANY && !rproc->domain) {
/*
* Check requested da is equal to dma address
* and print a warn message in case of missalignment.
* Don't stop rproc_start sequence as coprocessor may
* build pa to da translation on its side.
*/
if (mem->da != (u32)dma)
dev_warn(dev->parent,
"Allocated carveout doesn't fit device address request\n");
}
/*
* Ok, this is non-standard.
*
* Sometimes we can't rely on the generic iommu-based DMA API
* to dynamically allocate the device address and then set the IOMMU
* tables accordingly, because some remote processors might
* _require_ us to use hard coded device addresses that their
* firmware was compiled with.
*
* In this case, we must use the IOMMU API directly and map
* the memory to the device address as expected by the remote
* processor.
*
* Obviously such remote processor devices should not be configured
* to use the iommu-based DMA API: we expect 'dma' to contain the
* physical address in this case.
*/
if (mem->da != FW_RSC_ADDR_ANY && rproc->domain) {
mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
if (!mapping) {
ret = -ENOMEM;
goto dma_free;
}
ret = iommu_map(rproc->domain, mem->da, dma, mem->len,
mem->flags, GFP_KERNEL);
if (ret) {
dev_err(dev, "iommu_map failed: %d\n", ret);
goto free_mapping;
}
/*
* We'll need this info later when we'll want to unmap
* everything (e.g. on shutdown).
*
* We can't trust the remote processor not to change the
* resource table, so we must maintain this info independently.
*/
mapping->da = mem->da;
mapping->len = mem->len;
list_add_tail(&mapping->node, &rproc->mappings);
dev_dbg(dev, "carveout mapped 0x%x to %pad\n",
mem->da, &dma);
}
if (mem->da == FW_RSC_ADDR_ANY) {
/* Update device address as undefined by requester */
if ((u64)dma & HIGH_BITS_MASK)
dev_warn(dev, "DMA address cast in 32bit to fit resource table format\n");
mem->da = (u32)dma;
}
mem->dma = dma;
mem->va = va;
return 0;
free_mapping:
kfree(mapping);
dma_free:
dma_free_coherent(dev->parent, mem->len, va, dma);
return ret;
}
/**
* rproc_release_carveout() - release acquired carveout
* @rproc: rproc handle
* @mem: the memory entry to release
*
* This function releases specified memory entry @mem allocated via
* rproc_alloc_carveout() function by @rproc.
*
* Return: 0 on success, or an appropriate error code otherwise
*/
static int rproc_release_carveout(struct rproc *rproc,
struct rproc_mem_entry *mem)
{
struct device *dev = &rproc->dev;
/* clean up carveout allocations */
dma_free_coherent(dev->parent, mem->len, mem->va, mem->dma);
return 0;
}
/**
* rproc_handle_carveout() - handle phys contig memory allocation requests
* @rproc: rproc handle
* @ptr: the resource entry
* @offset: offset of the resource entry
* @avail: size of available data (for image validation)
*
* This function will handle firmware requests for allocation of physically
* contiguous memory regions.
*
* These request entries should come first in the firmware's resource table,
* as other firmware entries might request placing other data objects inside
* these memory regions (e.g. data/code segments, trace resource entries, ...).
*
* Allocating memory this way helps utilizing the reserved physical memory
* (e.g. CMA) more efficiently, and also minimizes the number of TLB entries
* needed to map it (in case @rproc is using an IOMMU). Reducing the TLB
* pressure is important; it may have a substantial impact on performance.
*
* Return: 0 on success, or an appropriate error code otherwise
*/
static int rproc_handle_carveout(struct rproc *rproc,
void *ptr, int offset, int avail)
{
struct fw_rsc_carveout *rsc = ptr;
struct rproc_mem_entry *carveout;
struct device *dev = &rproc->dev;
if (sizeof(*rsc) > avail) {
dev_err(dev, "carveout rsc is truncated\n");
return -EINVAL;
}
/* make sure reserved bytes are zeroes */
if (rsc->reserved) {
dev_err(dev, "carveout rsc has non zero reserved bytes\n");
return -EINVAL;
}
dev_dbg(dev, "carveout rsc: name: %s, da 0x%x, pa 0x%x, len 0x%x, flags 0x%x\n",
rsc->name, rsc->da, rsc->pa, rsc->len, rsc->flags);
/*
* Check carveout rsc already part of a registered carveout,
* Search by name, then check the da and length
*/
carveout = rproc_find_carveout_by_name(rproc, rsc->name);
if (carveout) {
if (carveout->rsc_offset != FW_RSC_ADDR_ANY) {
dev_err(dev,
"Carveout already associated to resource table\n");
return -ENOMEM;
}
if (rproc_check_carveout_da(rproc, carveout, rsc->da, rsc->len))
return -ENOMEM;
/* Update memory carveout with resource table info */
carveout->rsc_offset = offset;
carveout->flags = rsc->flags;
return 0;
}
/* Register carveout in list */
carveout = rproc_mem_entry_init(dev, NULL, 0, rsc->len, rsc->da,
rproc_alloc_carveout,
rproc_release_carveout, rsc->name);
if (!carveout) {
dev_err(dev, "Can't allocate memory entry structure\n");
return -ENOMEM;
}
carveout->flags = rsc->flags;
carveout->rsc_offset = offset;
rproc_add_carveout(rproc, carveout);
return 0;
}
/**
* rproc_add_carveout() - register an allocated carveout region
* @rproc: rproc handle
* @mem: memory entry to register
*
* This function registers specified memory entry in @rproc carveouts list.
* Specified carveout should have been allocated before registering.
*/
void rproc_add_carveout(struct rproc *rproc, struct rproc_mem_entry *mem)
{
list_add_tail(&mem->node, &rproc->carveouts);
}
EXPORT_SYMBOL(rproc_add_carveout);
/**
* rproc_mem_entry_init() - allocate and initialize rproc_mem_entry struct
* @dev: pointer on device struct
* @va: virtual address
* @dma: dma address
* @len: memory carveout length
* @da: device address
* @alloc: memory carveout allocation function
* @release: memory carveout release function
* @name: carveout name
*
* This function allocates a rproc_mem_entry struct and fill it with parameters
* provided by client.
*
* Return: a valid pointer on success, or NULL on failure
*/
__printf(8, 9)
struct rproc_mem_entry *
rproc_mem_entry_init(struct device *dev,
void *va, dma_addr_t dma, size_t len, u32 da,
int (*alloc)(struct rproc *, struct rproc_mem_entry *),
int (*release)(struct rproc *, struct rproc_mem_entry *),
const char *name, ...)
{
struct rproc_mem_entry *mem;
va_list args;
mem = kzalloc(sizeof(*mem), GFP_KERNEL);
if (!mem)
return mem;
mem->va = va;
mem->dma = dma;
mem->da = da;
mem->len = len;
mem->alloc = alloc;
mem->release = release;
mem->rsc_offset = FW_RSC_ADDR_ANY;
mem->of_resm_idx = -1;
va_start(args, name);
vsnprintf(mem->name, sizeof(mem->name), name, args);
va_end(args);
return mem;
}
EXPORT_SYMBOL(rproc_mem_entry_init);
/**
* rproc_of_resm_mem_entry_init() - allocate and initialize rproc_mem_entry struct
* from a reserved memory phandle
* @dev: pointer on device struct
* @of_resm_idx: reserved memory phandle index in "memory-region"
* @len: memory carveout length
* @da: device address
* @name: carveout name
*
* This function allocates a rproc_mem_entry struct and fill it with parameters
* provided by client.
*
* Return: a valid pointer on success, or NULL on failure
*/
__printf(5, 6)
struct rproc_mem_entry *
rproc_of_resm_mem_entry_init(struct device *dev, u32 of_resm_idx, size_t len,
u32 da, const char *name, ...)
{
struct rproc_mem_entry *mem;
va_list args;
mem = kzalloc(sizeof(*mem), GFP_KERNEL);
if (!mem)
return mem;
mem->da = da;
mem->len = len;
mem->rsc_offset = FW_RSC_ADDR_ANY;
mem->of_resm_idx = of_resm_idx;
va_start(args, name);
vsnprintf(mem->name, sizeof(mem->name), name, args);
va_end(args);
return mem;
}
EXPORT_SYMBOL(rproc_of_resm_mem_entry_init);
/**
* rproc_of_parse_firmware() - parse and return the firmware-name
* @dev: pointer on device struct representing a rproc
* @index: index to use for the firmware-name retrieval
* @fw_name: pointer to a character string, in which the firmware
* name is returned on success and unmodified otherwise.
*
* This is an OF helper function that parses a device's DT node for
* the "firmware-name" property and returns the firmware name pointer
* in @fw_name on success.
*
* Return: 0 on success, or an appropriate failure.
*/
int rproc_of_parse_firmware(struct device *dev, int index, const char **fw_name)
{
int ret;
ret = of_property_read_string_index(dev->of_node, "firmware-name",
index, fw_name);
return ret ? ret : 0;
}
EXPORT_SYMBOL(rproc_of_parse_firmware);
/*
* A lookup table for resource handlers. The indices are defined in
* enum fw_resource_type.
*/
static rproc_handle_resource_t rproc_loading_handlers[RSC_LAST] = {
[RSC_CARVEOUT] = rproc_handle_carveout,
[RSC_DEVMEM] = rproc_handle_devmem,
[RSC_TRACE] = rproc_handle_trace,
[RSC_VDEV] = rproc_handle_vdev,
};
/* handle firmware resource entries before booting the remote processor */
static int rproc_handle_resources(struct rproc *rproc,
rproc_handle_resource_t handlers[RSC_LAST])
{
struct device *dev = &rproc->dev;
rproc_handle_resource_t handler;
int ret = 0, i;
if (!rproc->table_ptr)
return 0;
for (i = 0; i < rproc->table_ptr->num; i++) {
int offset = rproc->table_ptr->offset[i];
struct fw_rsc_hdr *hdr = (void *)rproc->table_ptr + offset;
int avail = rproc->table_sz - offset - sizeof(*hdr);
void *rsc = (void *)hdr + sizeof(*hdr);
/* make sure table isn't truncated */
if (avail < 0) {
dev_err(dev, "rsc table is truncated\n");
return -EINVAL;
}
dev_dbg(dev, "rsc: type %d\n", hdr->type);
if (hdr->type >= RSC_VENDOR_START &&
hdr->type <= RSC_VENDOR_END) {
ret = rproc_handle_rsc(rproc, hdr->type, rsc,
offset + sizeof(*hdr), avail);
if (ret == RSC_HANDLED)
continue;
else if (ret < 0)
break;
dev_warn(dev, "unsupported vendor resource %d\n",
hdr->type);
continue;
}
if (hdr->type >= RSC_LAST) {
dev_warn(dev, "unsupported resource %d\n", hdr->type);
continue;
}
handler = handlers[hdr->type];
if (!handler)
continue;
ret = handler(rproc, rsc, offset + sizeof(*hdr), avail);
if (ret)
break;
}
return ret;
}
static int rproc_prepare_subdevices(struct rproc *rproc)
{
struct rproc_subdev *subdev;
int ret;
list_for_each_entry(subdev, &rproc->subdevs, node) {
if (subdev->prepare) {
ret = subdev->prepare(subdev);
if (ret)
goto unroll_preparation;
}
}
return 0;
unroll_preparation:
list_for_each_entry_continue_reverse(subdev, &rproc->subdevs, node) {
if (subdev->unprepare)
subdev->unprepare(subdev);
}
return ret;
}
static int rproc_start_subdevices(struct rproc *rproc)
{
struct rproc_subdev *subdev;
int ret;
list_for_each_entry(subdev, &rproc->subdevs, node) {
if (subdev->start) {
ret = subdev->start(subdev);
if (ret)
goto unroll_registration;
}
}
return 0;
unroll_registration:
list_for_each_entry_continue_reverse(subdev, &rproc->subdevs, node) {
if (subdev->stop)
subdev->stop(subdev, true);
}
return ret;
}
static void rproc_stop_subdevices(struct rproc *rproc, bool crashed)
{
struct rproc_subdev *subdev;
list_for_each_entry_reverse(subdev, &rproc->subdevs, node) {
if (subdev->stop)
subdev->stop(subdev, crashed);
}
}
static void rproc_unprepare_subdevices(struct rproc *rproc)
{
struct rproc_subdev *subdev;
list_for_each_entry_reverse(subdev, &rproc->subdevs, node) {
if (subdev->unprepare)
subdev->unprepare(subdev);
}
}
/**
* rproc_alloc_registered_carveouts() - allocate all carveouts registered
* in the list
* @rproc: the remote processor handle
*
* This function parses registered carveout list, performs allocation
* if alloc() ops registered and updates resource table information
* if rsc_offset set.
*
* Return: 0 on success
*/
static int rproc_alloc_registered_carveouts(struct rproc *rproc)
{
struct rproc_mem_entry *entry, *tmp;
struct fw_rsc_carveout *rsc;
struct device *dev = &rproc->dev;
u64 pa;
int ret;
list_for_each_entry_safe(entry, tmp, &rproc->carveouts, node) {
if (entry->alloc) {
ret = entry->alloc(rproc, entry);
if (ret) {
dev_err(dev, "Unable to allocate carveout %s: %d\n",
entry->name, ret);
return -ENOMEM;
}
}
if (entry->rsc_offset != FW_RSC_ADDR_ANY) {
/* update resource table */
rsc = (void *)rproc->table_ptr + entry->rsc_offset;
/*
* Some remote processors might need to know the pa
* even though they are behind an IOMMU. E.g., OMAP4's
* remote M3 processor needs this so it can control
* on-chip hardware accelerators that are not behind
* the IOMMU, and therefor must know the pa.
*
* Generally we don't want to expose physical addresses
* if we don't have to (remote processors are generally
* _not_ trusted), so we might want to do this only for
* remote processor that _must_ have this (e.g. OMAP4's
* dual M3 subsystem).
*
* Non-IOMMU processors might also want to have this info.
* In this case, the device address and the physical address
* are the same.
*/
/* Use va if defined else dma to generate pa */
if (entry->va)
pa = (u64)rproc_va_to_pa(entry->va);
else
pa = (u64)entry->dma;
if (((u64)pa) & HIGH_BITS_MASK)
dev_warn(dev,
"Physical address cast in 32bit to fit resource table format\n");
rsc->pa = (u32)pa;
rsc->da = entry->da;
rsc->len = entry->len;
}
}
return 0;
}
/**
* rproc_resource_cleanup() - clean up and free all acquired resources
* @rproc: rproc handle
*
* This function will free all resources acquired for @rproc, and it
* is called whenever @rproc either shuts down or fails to boot.
*/
void rproc_resource_cleanup(struct rproc *rproc)
{
struct rproc_mem_entry *entry, *tmp;
struct rproc_debug_trace *trace, *ttmp;
struct rproc_vdev *rvdev, *rvtmp;
struct device *dev = &rproc->dev;
/* clean up debugfs trace entries */
list_for_each_entry_safe(trace, ttmp, &rproc->traces, node) {
rproc_remove_trace_file(trace->tfile);
rproc->num_traces--;
list_del(&trace->node);
kfree(trace);
}
/* clean up iommu mapping entries */
list_for_each_entry_safe(entry, tmp, &rproc->mappings, node) {
size_t unmapped;
unmapped = iommu_unmap(rproc->domain, entry->da, entry->len);
if (unmapped != entry->len) {
/* nothing much to do besides complaining */
dev_err(dev, "failed to unmap %zx/%zu\n", entry->len,
unmapped);
}
list_del(&entry->node);
kfree(entry);
}
/* clean up carveout allocations */
list_for_each_entry_safe(entry, tmp, &rproc->carveouts, node) {
if (entry->release)
entry->release(rproc, entry);
list_del(&entry->node);
kfree(entry);
}
/* clean up remote vdev entries */
list_for_each_entry_safe(rvdev, rvtmp, &rproc->rvdevs, node)
platform_device_unregister(rvdev->pdev);
rproc_coredump_cleanup(rproc);
}
EXPORT_SYMBOL(rproc_resource_cleanup);
static int rproc_start(struct rproc *rproc, const struct firmware *fw)
{
struct resource_table *loaded_table;
struct device *dev = &rproc->dev;
int ret;
/* load the ELF segments to memory */
ret = rproc_load_segments(rproc, fw);
if (ret) {
dev_err(dev, "Failed to load program segments: %d\n", ret);
return ret;
}
/*
* The starting device has been given the rproc->cached_table as the
* resource table. The address of the vring along with the other
* allocated resources (carveouts etc) is stored in cached_table.
* In order to pass this information to the remote device we must copy
* this information to device memory. We also update the table_ptr so
* that any subsequent changes will be applied to the loaded version.
*/
loaded_table = rproc_find_loaded_rsc_table(rproc, fw);
if (loaded_table) {
memcpy(loaded_table, rproc->cached_table, rproc->table_sz);
rproc->table_ptr = loaded_table;
}
ret = rproc_prepare_subdevices(rproc);
if (ret) {
dev_err(dev, "failed to prepare subdevices for %s: %d\n",
rproc->name, ret);
goto reset_table_ptr;
}
/* power up the remote processor */
ret = rproc->ops->start(rproc);
if (ret) {
dev_err(dev, "can't start rproc %s: %d\n", rproc->name, ret);
goto unprepare_subdevices;
}
/* Start any subdevices for the remote processor */
ret = rproc_start_subdevices(rproc);
if (ret) {
dev_err(dev, "failed to probe subdevices for %s: %d\n",
rproc->name, ret);
goto stop_rproc;
}
rproc->state = RPROC_RUNNING;
dev_info(dev, "remote processor %s is now up\n", rproc->name);
return 0;
stop_rproc:
rproc->ops->stop(rproc);
unprepare_subdevices:
rproc_unprepare_subdevices(rproc);
reset_table_ptr:
rproc->table_ptr = rproc->cached_table;
return ret;
}
static int __rproc_attach(struct rproc *rproc)
{
struct device *dev = &rproc->dev;
int ret;
ret = rproc_prepare_subdevices(rproc);
if (ret) {
dev_err(dev, "failed to prepare subdevices for %s: %d\n",
rproc->name, ret);
goto out;
}
/* Attach to the remote processor */
ret = rproc_attach_device(rproc);
if (ret) {
dev_err(dev, "can't attach to rproc %s: %d\n",
rproc->name, ret);
goto unprepare_subdevices;
}
/* Start any subdevices for the remote processor */
ret = rproc_start_subdevices(rproc);
if (ret) {
dev_err(dev, "failed to probe subdevices for %s: %d\n",
rproc->name, ret);
goto stop_rproc;
}
rproc->state = RPROC_ATTACHED;
dev_info(dev, "remote processor %s is now attached\n", rproc->name);
return 0;
stop_rproc:
rproc->ops->stop(rproc);
unprepare_subdevices:
rproc_unprepare_subdevices(rproc);
out:
return ret;
}
/*
* take a firmware and boot a remote processor with it.
*/
static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw)
{
struct device *dev = &rproc->dev;
const char *name = rproc->firmware;
int ret;
ret = rproc_fw_sanity_check(rproc, fw);
if (ret)
return ret;
dev_info(dev, "Booting fw image %s, size %zd\n", name, fw->size);
/*
* if enabling an IOMMU isn't relevant for this rproc, this is
* just a nop
*/
ret = rproc_enable_iommu(rproc);
if (ret) {
dev_err(dev, "can't enable iommu: %d\n", ret);
return ret;
}
/* Prepare rproc for firmware loading if needed */
ret = rproc_prepare_device(rproc);
if (ret) {
dev_err(dev, "can't prepare rproc %s: %d\n", rproc->name, ret);
goto disable_iommu;
}
rproc->bootaddr = rproc_get_boot_addr(rproc, fw);
/* Load resource table, core dump segment list etc from the firmware */
ret = rproc_parse_fw(rproc, fw);
if (ret)
goto unprepare_rproc;
/* reset max_notifyid */
rproc->max_notifyid = -1;
/* reset handled vdev */
rproc->nb_vdev = 0;
/* handle fw resources which are required to boot rproc */
ret = rproc_handle_resources(rproc, rproc_loading_handlers);
if (ret) {
dev_err(dev, "Failed to process resources: %d\n", ret);
goto clean_up_resources;
}
/* Allocate carveout resources associated to rproc */
ret = rproc_alloc_registered_carveouts(rproc);
if (ret) {
dev_err(dev, "Failed to allocate associated carveouts: %d\n",
ret);
goto clean_up_resources;
}
ret = rproc_start(rproc, fw);
if (ret)
goto clean_up_resources;
return 0;
clean_up_resources:
rproc_resource_cleanup(rproc);
kfree(rproc->cached_table);
rproc->cached_table = NULL;
rproc->table_ptr = NULL;
unprepare_rproc:
/* release HW resources if needed */
rproc_unprepare_device(rproc);
disable_iommu:
rproc_disable_iommu(rproc);
return ret;
}
static int rproc_set_rsc_table(struct rproc *rproc)
{
struct resource_table *table_ptr;
struct device *dev = &rproc->dev;
size_t table_sz;
int ret;
table_ptr = rproc_get_loaded_rsc_table(rproc, &table_sz);
if (!table_ptr) {
/* Not having a resource table is acceptable */
return 0;
}
if (IS_ERR(table_ptr)) {
ret = PTR_ERR(table_ptr);
dev_err(dev, "can't load resource table: %d\n", ret);
return ret;
}
/*
* If it is possible to detach the remote processor, keep an untouched
* copy of the resource table. That way we can start fresh again when
* the remote processor is re-attached, that is:
*
* DETACHED -> ATTACHED -> DETACHED -> ATTACHED
*
* Free'd in rproc_reset_rsc_table_on_detach() and
* rproc_reset_rsc_table_on_stop().
*/
if (rproc->ops->detach) {
rproc->clean_table = kmemdup(table_ptr, table_sz, GFP_KERNEL);
if (!rproc->clean_table)
return -ENOMEM;
} else {
rproc->clean_table = NULL;
}
rproc->cached_table = NULL;
rproc->table_ptr = table_ptr;
rproc->table_sz = table_sz;
return 0;
}
static int rproc_reset_rsc_table_on_detach(struct rproc *rproc)
{
struct resource_table *table_ptr;
/* A resource table was never retrieved, nothing to do here */
if (!rproc->table_ptr)
return 0;
/*
* If we made it to this point a clean_table _must_ have been
* allocated in rproc_set_rsc_table(). If one isn't present
* something went really wrong and we must complain.
*/
if (WARN_ON(!rproc->clean_table))
return -EINVAL;
/* Remember where the external entity installed the resource table */
table_ptr = rproc->table_ptr;
/*
* If we made it here the remote processor was started by another
* entity and a cache table doesn't exist. As such make a copy of
* the resource table currently used by the remote processor and
* use that for the rest of the shutdown process. The memory
* allocated here is free'd in rproc_detach().
*/
rproc->cached_table = kmemdup(rproc->table_ptr,
rproc->table_sz, GFP_KERNEL);
if (!rproc->cached_table)
return -ENOMEM;
/*
* Use a copy of the resource table for the remainder of the
* shutdown process.
*/
rproc->table_ptr = rproc->cached_table;
/*
* Reset the memory area where the firmware loaded the resource table
* to its original value. That way when we re-attach the remote
* processor the resource table is clean and ready to be used again.
*/
memcpy(table_ptr, rproc->clean_table, rproc->table_sz);
/*
* The clean resource table is no longer needed. Allocated in
* rproc_set_rsc_table().
*/
kfree(rproc->clean_table);
return 0;
}
static int rproc_reset_rsc_table_on_stop(struct rproc *rproc)
{
/* A resource table was never retrieved, nothing to do here */
if (!rproc->table_ptr)
return 0;
/*
* If a cache table exists the remote processor was started by
* the remoteproc core. That cache table should be used for
* the rest of the shutdown process.
*/
if (rproc->cached_table)
goto out;
/*
* If we made it here the remote processor was started by another
* entity and a cache table doesn't exist. As such make a copy of
* the resource table currently used by the remote processor and
* use that for the rest of the shutdown process. The memory
* allocated here is free'd in rproc_shutdown().
*/
rproc->cached_table = kmemdup(rproc->table_ptr,
rproc->table_sz, GFP_KERNEL);
if (!rproc->cached_table)
return -ENOMEM;
/*
* Since the remote processor is being switched off the clean table
* won't be needed. Allocated in rproc_set_rsc_table().
*/
kfree(rproc->clean_table);
out:
/*
* Use a copy of the resource table for the remainder of the
* shutdown process.
*/
rproc->table_ptr = rproc->cached_table;
return 0;
}
/*
* Attach to remote processor - similar to rproc_fw_boot() but without
* the steps that deal with the firmware image.
*/
static int rproc_attach(struct rproc *rproc)
{
struct device *dev = &rproc->dev;
int ret;
/*
* if enabling an IOMMU isn't relevant for this rproc, this is
* just a nop
*/
ret = rproc_enable_iommu(rproc);
if (ret) {
dev_err(dev, "can't enable iommu: %d\n", ret);
return ret;
}
/* Do anything that is needed to boot the remote processor */
ret = rproc_prepare_device(rproc);
if (ret) {
dev_err(dev, "can't prepare rproc %s: %d\n", rproc->name, ret);
goto disable_iommu;
}
ret = rproc_set_rsc_table(rproc);
if (ret) {
dev_err(dev, "can't load resource table: %d\n", ret);
goto unprepare_device;
}
/* reset max_notifyid */
rproc->max_notifyid = -1;
/* reset handled vdev */
rproc->nb_vdev = 0;
/*
* Handle firmware resources required to attach to a remote processor.
* Because we are attaching rather than booting the remote processor,
* we expect the platform driver to properly set rproc->table_ptr.
*/
ret = rproc_handle_resources(rproc, rproc_loading_handlers);
if (ret) {
dev_err(dev, "Failed to process resources: %d\n", ret);
goto unprepare_device;
}
/* Allocate carveout resources associated to rproc */
ret = rproc_alloc_registered_carveouts(rproc);
if (ret) {
dev_err(dev, "Failed to allocate associated carveouts: %d\n",
ret);
goto clean_up_resources;
}
ret = __rproc_attach(rproc);
if (ret)
goto clean_up_resources;
return 0;
clean_up_resources:
rproc_resource_cleanup(rproc);
unprepare_device:
/* release HW resources if needed */
rproc_unprepare_device(rproc);
disable_iommu:
rproc_disable_iommu(rproc);
return ret;
}
/*
* take a firmware and boot it up.
*
* Note: this function is called asynchronously upon registration of the
* remote processor (so we must wait until it completes before we try
* to unregister the device. one other option is just to use kref here,
* that might be cleaner).
*/
static void rproc_auto_boot_callback(const struct firmware *fw, void *context)
{
struct rproc *rproc = context;
rproc_boot(rproc);
release_firmware(fw);
}
static int rproc_trigger_auto_boot(struct rproc *rproc)
{
int ret;
/*
* Since the remote processor is in a detached state, it has already
* been booted by another entity. As such there is no point in waiting
* for a firmware image to be loaded, we can simply initiate the process
* of attaching to it immediately.
*/
if (rproc->state == RPROC_DETACHED)
return rproc_boot(rproc);
/*
* We're initiating an asynchronous firmware loading, so we can
* be built-in kernel code, without hanging the boot process.
*/
ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_UEVENT,
rproc->firmware, &rproc->dev, GFP_KERNEL,
rproc, rproc_auto_boot_callback);
if (ret < 0)
dev_err(&rproc->dev, "request_firmware_nowait err: %d\n", ret);
return ret;
}
static int rproc_stop(struct rproc *rproc, bool crashed)
{
struct device *dev = &rproc->dev;
int ret;
/* No need to continue if a stop() operation has not been provided */
if (!rproc->ops->stop)
return -EINVAL;
/* Stop any subdevices for the remote processor */
rproc_stop_subdevices(rproc, crashed);
/* the installed resource table is no longer accessible */
ret = rproc_reset_rsc_table_on_stop(rproc);
if (ret) {
dev_err(dev, "can't reset resource table: %d\n", ret);
return ret;
}
/* power off the remote processor */
ret = rproc->ops->stop(rproc);
if (ret) {
dev_err(dev, "can't stop rproc: %d\n", ret);
return ret;
}
rproc_unprepare_subdevices(rproc);
rproc->state = RPROC_OFFLINE;
dev_info(dev, "stopped remote processor %s\n", rproc->name);
return 0;
}
/*
* __rproc_detach(): Does the opposite of __rproc_attach()
*/
static int __rproc_detach(struct rproc *rproc)
{
struct device *dev = &rproc->dev;
int ret;
/* No need to continue if a detach() operation has not been provided */
if (!rproc->ops->detach)
return -EINVAL;
/* Stop any subdevices for the remote processor */
rproc_stop_subdevices(rproc, false);
/* the installed resource table is no longer accessible */
ret = rproc_reset_rsc_table_on_detach(rproc);
if (ret) {
dev_err(dev, "can't reset resource table: %d\n", ret);
return ret;
}
/* Tell the remote processor the core isn't available anymore */
ret = rproc->ops->detach(rproc);
if (ret) {
dev_err(dev, "can't detach from rproc: %d\n", ret);
return ret;
}
rproc_unprepare_subdevices(rproc);
rproc->state = RPROC_DETACHED;
dev_info(dev, "detached remote processor %s\n", rproc->name);
return 0;
}
static int rproc_attach_recovery(struct rproc *rproc)
{
int ret;
ret = __rproc_detach(rproc);
if (ret)
return ret;
return __rproc_attach(rproc);
}
static int rproc_boot_recovery(struct rproc *rproc)
{
const struct firmware *firmware_p;
struct device *dev = &rproc->dev;
int ret;
ret = rproc_stop(rproc, true);
if (ret)
return ret;
/* generate coredump */
rproc->ops->coredump(rproc);
/* load firmware */
ret = request_firmware(&firmware_p, rproc->firmware, dev);
if (ret < 0) {
dev_err(dev, "request_firmware failed: %d\n", ret);
return ret;
}
/* boot the remote processor up again */
ret = rproc_start(rproc, firmware_p);
release_firmware(firmware_p);
return ret;
}
/**
* rproc_trigger_recovery() - recover a remoteproc
* @rproc: the remote processor
*
* The recovery is done by resetting all the virtio devices, that way all the
* rpmsg drivers will be reseted along with the remote processor making the
* remoteproc functional again.
*
* This function can sleep, so it cannot be called from atomic context.
*
* Return: 0 on success or a negative value upon failure
*/
int rproc_trigger_recovery(struct rproc *rproc)
{
struct device *dev = &rproc->dev;
int ret;
ret = mutex_lock_interruptible(&rproc->lock);
if (ret)
return ret;
/* State could have changed before we got the mutex */
if (rproc->state != RPROC_CRASHED)
goto unlock_mutex;
dev_err(dev, "recovering %s\n", rproc->name);
if (rproc_has_feature(rproc, RPROC_FEAT_ATTACH_ON_RECOVERY))
ret = rproc_attach_recovery(rproc);
else
ret = rproc_boot_recovery(rproc);
unlock_mutex:
mutex_unlock(&rproc->lock);
return ret;
}
/**
* rproc_crash_handler_work() - handle a crash
* @work: work treating the crash
*
* This function needs to handle everything related to a crash, like cpu
* registers and stack dump, information to help to debug the fatal error, etc.
*/
static void rproc_crash_handler_work(struct work_struct *work)
{
struct rproc *rproc = container_of(work, struct rproc, crash_handler);
struct device *dev = &rproc->dev;
dev_dbg(dev, "enter %s\n", __func__);
mutex_lock(&rproc->lock);
if (rproc->state == RPROC_CRASHED) {
/* handle only the first crash detected */
mutex_unlock(&rproc->lock);
return;
}
if (rproc->state == RPROC_OFFLINE) {
/* Don't recover if the remote processor was stopped */
mutex_unlock(&rproc->lock);
goto out;
}
rproc->state = RPROC_CRASHED;
dev_err(dev, "handling crash #%u in %s\n", ++rproc->crash_cnt,
rproc->name);
mutex_unlock(&rproc->lock);
if (!rproc->recovery_disabled)
rproc_trigger_recovery(rproc);
out:
pm_relax(rproc->dev.parent);
}
/**
* rproc_boot() - boot a remote processor
* @rproc: handle of a remote processor
*
* Boot a remote processor (i.e. load its firmware, power it on, ...).
*
* If the remote processor is already powered on, this function immediately
* returns (successfully).
*
* Return: 0 on success, and an appropriate error value otherwise
*/
int rproc_boot(struct rproc *rproc)
{
const struct firmware *firmware_p;
struct device *dev;
int ret;
if (!rproc) {
pr_err("invalid rproc handle\n");
return -EINVAL;
}
dev = &rproc->dev;
ret = mutex_lock_interruptible(&rproc->lock);
if (ret) {
dev_err(dev, "can't lock rproc %s: %d\n", rproc->name, ret);
return ret;
}
if (rproc->state == RPROC_DELETED) {
ret = -ENODEV;
dev_err(dev, "can't boot deleted rproc %s\n", rproc->name);
goto unlock_mutex;
}
/* skip the boot or attach process if rproc is already powered up */
if (atomic_inc_return(&rproc->power) > 1) {
ret = 0;
goto unlock_mutex;
}
if (rproc->state == RPROC_DETACHED) {
dev_info(dev, "attaching to %s\n", rproc->name);
ret = rproc_attach(rproc);
} else {
dev_info(dev, "powering up %s\n", rproc->name);
/* load firmware */
ret = request_firmware(&firmware_p, rproc->firmware, dev);
if (ret < 0) {
dev_err(dev, "request_firmware failed: %d\n", ret);
goto downref_rproc;
}
ret = rproc_fw_boot(rproc, firmware_p);
release_firmware(firmware_p);
}
downref_rproc:
if (ret)
atomic_dec(&rproc->power);
unlock_mutex:
mutex_unlock(&rproc->lock);
return ret;
}
EXPORT_SYMBOL(rproc_boot);
/**
* rproc_shutdown() - power off the remote processor
* @rproc: the remote processor
*
* Power off a remote processor (previously booted with rproc_boot()).
*
* In case @rproc is still being used by an additional user(s), then
* this function will just decrement the power refcount and exit,
* without really powering off the device.
*
* Every call to rproc_boot() must (eventually) be accompanied by a call
* to rproc_shutdown(). Calling rproc_shutdown() redundantly is a bug.
*
* Notes:
* - we're not decrementing the rproc's refcount, only the power refcount.
* which means that the @rproc handle stays valid even after rproc_shutdown()
* returns, and users can still use it with a subsequent rproc_boot(), if
* needed.
*
* Return: 0 on success, and an appropriate error value otherwise
*/
int rproc_shutdown(struct rproc *rproc)
{
struct device *dev = &rproc->dev;
int ret = 0;
ret = mutex_lock_interruptible(&rproc->lock);
if (ret) {
dev_err(dev, "can't lock rproc %s: %d\n", rproc->name, ret);
return ret;
}
if (rproc->state != RPROC_RUNNING &&
rproc->state != RPROC_ATTACHED) {
ret = -EINVAL;
goto out;
}
/* if the remote proc is still needed, bail out */
if (!atomic_dec_and_test(&rproc->power))
goto out;
ret = rproc_stop(rproc, false);
if (ret) {
atomic_inc(&rproc->power);
goto out;
}
/* clean up all acquired resources */
rproc_resource_cleanup(rproc);
/* release HW resources if needed */
rproc_unprepare_device(rproc);
rproc_disable_iommu(rproc);
/* Free the copy of the resource table */
kfree(rproc->cached_table);
rproc->cached_table = NULL;
rproc->table_ptr = NULL;
out:
mutex_unlock(&rproc->lock);
return ret;
}
EXPORT_SYMBOL(rproc_shutdown);
/**
* rproc_detach() - Detach the remote processor from the
* remoteproc core
*
* @rproc: the remote processor
*
* Detach a remote processor (previously attached to with rproc_attach()).
*
* In case @rproc is still being used by an additional user(s), then
* this function will just decrement the power refcount and exit,
* without disconnecting the device.
*
* Function rproc_detach() calls __rproc_detach() in order to let a remote
* processor know that services provided by the application processor are
* no longer available. From there it should be possible to remove the
* platform driver and even power cycle the application processor (if the HW
* supports it) without needing to switch off the remote processor.
*
* Return: 0 on success, and an appropriate error value otherwise
*/
int rproc_detach(struct rproc *rproc)
{
struct device *dev = &rproc->dev;
int ret;
ret = mutex_lock_interruptible(&rproc->lock);
if (ret) {
dev_err(dev, "can't lock rproc %s: %d\n", rproc->name, ret);
return ret;
}
if (rproc->state != RPROC_ATTACHED) {
ret = -EINVAL;
goto out;
}
/* if the remote proc is still needed, bail out */
if (!atomic_dec_and_test(&rproc->power)) {
ret = 0;
goto out;
}
ret = __rproc_detach(rproc);
if (ret) {
atomic_inc(&rproc->power);
goto out;
}
/* clean up all acquired resources */
rproc_resource_cleanup(rproc);
/* release HW resources if needed */
rproc_unprepare_device(rproc);
rproc_disable_iommu(rproc);
/* Free the copy of the resource table */
kfree(rproc->cached_table);
rproc->cached_table = NULL;
rproc->table_ptr = NULL;
out:
mutex_unlock(&rproc->lock);
return ret;
}
EXPORT_SYMBOL(rproc_detach);
/**
* rproc_get_by_phandle() - find a remote processor by phandle
* @phandle: phandle to the rproc
*
* Finds an rproc handle using the remote processor's phandle, and then
* return a handle to the rproc.
*
* This function increments the remote processor's refcount, so always
* use rproc_put() to decrement it back once rproc isn't needed anymore.
*
* Return: rproc handle on success, and NULL on failure
*/
#ifdef CONFIG_OF
struct rproc *rproc_get_by_phandle(phandle phandle)
{
struct rproc *rproc = NULL, *r;
struct device_node *np;
np = of_find_node_by_phandle(phandle);
if (!np)
return NULL;
rcu_read_lock();
list_for_each_entry_rcu(r, &rproc_list, node) {
if (r->dev.parent && device_match_of_node(r->dev.parent, np)) {
/* prevent underlying implementation from being removed */
if (!try_module_get(r->dev.parent->driver->owner)) {
dev_err(&r->dev, "can't get owner\n");
break;
}
rproc = r;
get_device(&rproc->dev);
break;
}
}
rcu_read_unlock();
of_node_put(np);
return rproc;
}
#else
struct rproc *rproc_get_by_phandle(phandle phandle)
{
return NULL;
}
#endif
EXPORT_SYMBOL(rproc_get_by_phandle);
/**
* rproc_set_firmware() - assign a new firmware
* @rproc: rproc handle to which the new firmware is being assigned
* @fw_name: new firmware name to be assigned
*
* This function allows remoteproc drivers or clients to configure a custom
* firmware name that is different from the default name used during remoteproc
* registration. The function does not trigger a remote processor boot,
* only sets the firmware name used for a subsequent boot. This function
* should also be called only when the remote processor is offline.
*
* This allows either the userspace to configure a different name through
* sysfs or a kernel-level remoteproc or a remoteproc client driver to set
* a specific firmware when it is controlling the boot and shutdown of the
* remote processor.
*
* Return: 0 on success or a negative value upon failure
*/
int rproc_set_firmware(struct rproc *rproc, const char *fw_name)
{
struct device *dev;
int ret, len;
char *p;
if (!rproc || !fw_name)
return -EINVAL;
dev = rproc->dev.parent;
ret = mutex_lock_interruptible(&rproc->lock);
if (ret) {
dev_err(dev, "can't lock rproc %s: %d\n", rproc->name, ret);
return -EINVAL;
}
if (rproc->state != RPROC_OFFLINE) {
dev_err(dev, "can't change firmware while running\n");
ret = -EBUSY;
goto out;
}
len = strcspn(fw_name, "\n");
if (!len) {
dev_err(dev, "can't provide empty string for firmware name\n");
ret = -EINVAL;
goto out;
}
p = kstrndup(fw_name, len, GFP_KERNEL);
if (!p) {
ret = -ENOMEM;
goto out;
}
kfree_const(rproc->firmware);
rproc->firmware = p;
out:
mutex_unlock(&rproc->lock);
return ret;
}
EXPORT_SYMBOL(rproc_set_firmware);
static int rproc_validate(struct rproc *rproc)
{
switch (rproc->state) {
case RPROC_OFFLINE:
/*
* An offline processor without a start()
* function makes no sense.
*/
if (!rproc->ops->start)
return -EINVAL;
break;
case RPROC_DETACHED:
/*
* A remote processor in a detached state without an
* attach() function makes not sense.
*/
if (!rproc->ops->attach)
return -EINVAL;
/*
* When attaching to a remote processor the device memory
* is already available and as such there is no need to have a
* cached table.
*/
if (rproc->cached_table)
return -EINVAL;
break;
default:
/*
* When adding a remote processor, the state of the device
* can be offline or detached, nothing else.
*/
return -EINVAL;
}
return 0;
}
/**
* rproc_add() - register a remote processor
* @rproc: the remote processor handle to register
*
* Registers @rproc with the remoteproc framework, after it has been
* allocated with rproc_alloc().
*
* This is called by the platform-specific rproc implementation, whenever
* a new remote processor device is probed.
*
* Note: this function initiates an asynchronous firmware loading
* context, which will look for virtio devices supported by the rproc's
* firmware.
*
* If found, those virtio devices will be created and added, so as a result
* of registering this remote processor, additional virtio drivers might be
* probed.
*
* Return: 0 on success and an appropriate error code otherwise
*/
int rproc_add(struct rproc *rproc)
{
struct device *dev = &rproc->dev;
int ret;
ret = rproc_validate(rproc);
if (ret < 0)
return ret;
/* add char device for this remoteproc */
ret = rproc_char_device_add(rproc);
if (ret < 0)
return ret;
ret = device_add(dev);
if (ret < 0) {
put_device(dev);
goto rproc_remove_cdev;
}
dev_info(dev, "%s is available\n", rproc->name);
/* create debugfs entries */
rproc_create_debug_dir(rproc);
/* if rproc is marked always-on, request it to boot */
if (rproc->auto_boot) {
ret = rproc_trigger_auto_boot(rproc);
if (ret < 0)
goto rproc_remove_dev;
}
/* expose to rproc_get_by_phandle users */
mutex_lock(&rproc_list_mutex);
list_add_rcu(&rproc->node, &rproc_list);
mutex_unlock(&rproc_list_mutex);
return 0;
rproc_remove_dev:
rproc_delete_debug_dir(rproc);
device_del(dev);
rproc_remove_cdev:
rproc_char_device_remove(rproc);
return ret;
}
EXPORT_SYMBOL(rproc_add);
static void devm_rproc_remove(void *rproc)
{
rproc_del(rproc);
}
/**
* devm_rproc_add() - resource managed rproc_add()
* @dev: the underlying device
* @rproc: the remote processor handle to register
*
* This function performs like rproc_add() but the registered rproc device will
* automatically be removed on driver detach.
*
* Return: 0 on success, negative errno on failure
*/
int devm_rproc_add(struct device *dev, struct rproc *rproc)
{
int err;
err = rproc_add(rproc);
if (err)
return err;
return devm_add_action_or_reset(dev, devm_rproc_remove, rproc);
}
EXPORT_SYMBOL(devm_rproc_add);
/**
* rproc_type_release() - release a remote processor instance
* @dev: the rproc's device
*
* This function should _never_ be called directly.
*
* It will be called by the driver core when no one holds a valid pointer
* to @dev anymore.
*/
static void rproc_type_release(struct device *dev)
{
struct rproc *rproc = container_of(dev, struct rproc, dev);
dev_info(&rproc->dev, "releasing %s\n", rproc->name);
idr_destroy(&rproc->notifyids);
if (rproc->index >= 0)
ida_free(&rproc_dev_index, rproc->index);
kfree_const(rproc->firmware);
kfree_const(rproc->name);
kfree(rproc->ops);
kfree(rproc);
}
static const struct device_type rproc_type = {
.name = "remoteproc",
.release = rproc_type_release,
};
static int rproc_alloc_firmware(struct rproc *rproc,
const char *name, const char *firmware)
{
const char *p;
/*
* Allocate a firmware name if the caller gave us one to work
* with. Otherwise construct a new one using a default pattern.
*/
if (firmware)
p = kstrdup_const(firmware, GFP_KERNEL);
else
p = kasprintf(GFP_KERNEL, "rproc-%s-fw", name);
if (!p)
return -ENOMEM;
rproc->firmware = p;
return 0;
}
static int rproc_alloc_ops(struct rproc *rproc, const struct rproc_ops *ops)
{
rproc->ops = kmemdup(ops, sizeof(*ops), GFP_KERNEL);
if (!rproc->ops)
return -ENOMEM;
/* Default to rproc_coredump if no coredump function is specified */
if (!rproc->ops->coredump)
rproc->ops->coredump = rproc_coredump;
if (rproc->ops->load)
return 0;
/* Default to ELF loader if no load function is specified */
rproc->ops->load = rproc_elf_load_segments;
rproc->ops->parse_fw = rproc_elf_load_rsc_table;
rproc->ops->find_loaded_rsc_table = rproc_elf_find_loaded_rsc_table;
rproc->ops->sanity_check = rproc_elf_sanity_check;
rproc->ops->get_boot_addr = rproc_elf_get_boot_addr;
return 0;
}
/**
* rproc_alloc() - allocate a remote processor handle
* @dev: the underlying device
* @name: name of this remote processor
* @ops: platform-specific handlers (mainly start/stop)
* @firmware: name of firmware file to load, can be NULL
* @len: length of private data needed by the rproc driver (in bytes)
*
* Allocates a new remote processor handle, but does not register
* it yet. if @firmware is NULL, a default name is used.
*
* This function should be used by rproc implementations during initialization
* of the remote processor.
*
* After creating an rproc handle using this function, and when ready,
* implementations should then call rproc_add() to complete
* the registration of the remote processor.
*
* Note: _never_ directly deallocate @rproc, even if it was not registered
* yet. Instead, when you need to unroll rproc_alloc(), use rproc_free().
*
* Return: new rproc pointer on success, and NULL on failure
*/
struct rproc *rproc_alloc(struct device *dev, const char *name,
const struct rproc_ops *ops,
const char *firmware, int len)
{
struct rproc *rproc;
if (!dev || !name || !ops)
return NULL;
rproc = kzalloc(sizeof(struct rproc) + len, GFP_KERNEL);
if (!rproc)
return NULL;
rproc->priv = &rproc[1];
rproc->auto_boot = true;
rproc->elf_class = ELFCLASSNONE;
rproc->elf_machine = EM_NONE;
device_initialize(&rproc->dev);
rproc->dev.parent = dev;
rproc->dev.type = &rproc_type;
rproc->dev.class = &rproc_class;
rproc->dev.driver_data = rproc;
idr_init(&rproc->notifyids);
rproc->name = kstrdup_const(name, GFP_KERNEL);
if (!rproc->name)
goto put_device;
if (rproc_alloc_firmware(rproc, name, firmware))
goto put_device;
if (rproc_alloc_ops(rproc, ops))
goto put_device;
/* Assign a unique device index and name */
rproc->index = ida_alloc(&rproc_dev_index, GFP_KERNEL);
if (rproc->index < 0) {
dev_err(dev, "ida_alloc failed: %d\n", rproc->index);
goto put_device;
}
dev_set_name(&rproc->dev, "remoteproc%d", rproc->index);
atomic_set(&rproc->power, 0);
mutex_init(&rproc->lock);
INIT_LIST_HEAD(&rproc->carveouts);
INIT_LIST_HEAD(&rproc->mappings);
INIT_LIST_HEAD(&rproc->traces);
INIT_LIST_HEAD(&rproc->rvdevs);
INIT_LIST_HEAD(&rproc->subdevs);
INIT_LIST_HEAD(&rproc->dump_segments);
INIT_WORK(&rproc->crash_handler, rproc_crash_handler_work);
rproc->state = RPROC_OFFLINE;
return rproc;
put_device:
put_device(&rproc->dev);
return NULL;
}
EXPORT_SYMBOL(rproc_alloc);
/**
* rproc_free() - unroll rproc_alloc()
* @rproc: the remote processor handle
*
* This function decrements the rproc dev refcount.
*
* If no one holds any reference to rproc anymore, then its refcount would
* now drop to zero, and it would be freed.
*/
void rproc_free(struct rproc *rproc)
{
put_device(&rproc->dev);
}
EXPORT_SYMBOL(rproc_free);
/**
* rproc_put() - release rproc reference
* @rproc: the remote processor handle
*
* This function decrements the rproc dev refcount.
*
* If no one holds any reference to rproc anymore, then its refcount would
* now drop to zero, and it would be freed.
*/
void rproc_put(struct rproc *rproc)
{
module_put(rproc->dev.parent->driver->owner);
put_device(&rproc->dev);
}
EXPORT_SYMBOL(rproc_put);
/**
* rproc_del() - unregister a remote processor
* @rproc: rproc handle to unregister
*
* This function should be called when the platform specific rproc
* implementation decides to remove the rproc device. it should
* _only_ be called if a previous invocation of rproc_add()
* has completed successfully.
*
* After rproc_del() returns, @rproc isn't freed yet, because
* of the outstanding reference created by rproc_alloc. To decrement that
* one last refcount, one still needs to call rproc_free().
*
* Return: 0 on success and -EINVAL if @rproc isn't valid
*/
int rproc_del(struct rproc *rproc)
{
if (!rproc)
return -EINVAL;
/* TODO: make sure this works with rproc->power > 1 */
rproc_shutdown(rproc);
mutex_lock(&rproc->lock);
rproc->state = RPROC_DELETED;
mutex_unlock(&rproc->lock);
rproc_delete_debug_dir(rproc);
/* the rproc is downref'ed as soon as it's removed from the klist */
mutex_lock(&rproc_list_mutex);
list_del_rcu(&rproc->node);
mutex_unlock(&rproc_list_mutex);
/* Ensure that no readers of rproc_list are still active */
synchronize_rcu();
device_del(&rproc->dev);
rproc_char_device_remove(rproc);
return 0;
}
EXPORT_SYMBOL(rproc_del);
static void devm_rproc_free(struct device *dev, void *res)
{
rproc_free(*(struct rproc **)res);
}
/**
* devm_rproc_alloc() - resource managed rproc_alloc()
* @dev: the underlying device
* @name: name of this remote processor
* @ops: platform-specific handlers (mainly start/stop)
* @firmware: name of firmware file to load, can be NULL
* @len: length of private data needed by the rproc driver (in bytes)
*
* This function performs like rproc_alloc() but the acquired rproc device will
* automatically be released on driver detach.
*
* Return: new rproc instance, or NULL on failure
*/
struct rproc *devm_rproc_alloc(struct device *dev, const char *name,
const struct rproc_ops *ops,
const char *firmware, int len)
{
struct rproc **ptr, *rproc;
ptr = devres_alloc(devm_rproc_free, sizeof(*ptr), GFP_KERNEL);
if (!ptr)
return NULL;
rproc = rproc_alloc(dev, name, ops, firmware, len);
if (rproc) {
*ptr = rproc;
devres_add(dev, ptr);
} else {
devres_free(ptr);
}
return rproc;
}
EXPORT_SYMBOL(devm_rproc_alloc);
/**
* rproc_add_subdev() - add a subdevice to a remoteproc
* @rproc: rproc handle to add the subdevice to
* @subdev: subdev handle to register
*
* Caller is responsible for populating optional subdevice function pointers.
*/
void rproc_add_subdev(struct rproc *rproc, struct rproc_subdev *subdev)
{
list_add_tail(&subdev->node, &rproc->subdevs);
}
EXPORT_SYMBOL(rproc_add_subdev);
/**
* rproc_remove_subdev() - remove a subdevice from a remoteproc
* @rproc: rproc handle to remove the subdevice from
* @subdev: subdev handle, previously registered with rproc_add_subdev()
*/
void rproc_remove_subdev(struct rproc *rproc, struct rproc_subdev *subdev)
{
list_del(&subdev->node);
}
EXPORT_SYMBOL(rproc_remove_subdev);
/**
* rproc_get_by_child() - acquire rproc handle of @dev's ancestor
* @dev: child device to find ancestor of
*
* Return: the ancestor rproc instance, or NULL if not found
*/
struct rproc *rproc_get_by_child(struct device *dev)
{
for (dev = dev->parent; dev; dev = dev->parent) {
if (dev->type == &rproc_type)
return dev->driver_data;
}
return NULL;
}
EXPORT_SYMBOL(rproc_get_by_child);
/**
* rproc_report_crash() - rproc crash reporter function
* @rproc: remote processor
* @type: crash type
*
* This function must be called every time a crash is detected by the low-level
* drivers implementing a specific remoteproc. This should not be called from a
* non-remoteproc driver.
*
* This function can be called from atomic/interrupt context.
*/
void rproc_report_crash(struct rproc *rproc, enum rproc_crash_type type)
{
if (!rproc) {
pr_err("NULL rproc pointer\n");
return;
}
/* Prevent suspend while the remoteproc is being recovered */
pm_stay_awake(rproc->dev.parent);
dev_err(&rproc->dev, "crash detected in %s: type %s\n",
rproc->name, rproc_crash_to_string(type));
queue_work(rproc_recovery_wq, &rproc->crash_handler);
}
EXPORT_SYMBOL(rproc_report_crash);
static int rproc_panic_handler(struct notifier_block *nb, unsigned long event,
void *ptr)
{
unsigned int longest = 0;
struct rproc *rproc;
unsigned int d;
rcu_read_lock();
list_for_each_entry_rcu(rproc, &rproc_list, node) {
if (!rproc->ops->panic)
continue;
if (rproc->state != RPROC_RUNNING &&
rproc->state != RPROC_ATTACHED)
continue;
d = rproc->ops->panic(rproc);
longest = max(longest, d);
}
rcu_read_unlock();
/*
* Delay for the longest requested duration before returning. This can
* be used by the remoteproc drivers to give the remote processor time
* to perform any requested operations (such as flush caches), when
* it's not possible to signal the Linux side due to the panic.
*/
mdelay(longest);
return NOTIFY_DONE;
}
static void __init rproc_init_panic(void)
{
rproc_panic_nb.notifier_call = rproc_panic_handler;
atomic_notifier_chain_register(&panic_notifier_list, &rproc_panic_nb);
}
static void __exit rproc_exit_panic(void)
{
atomic_notifier_chain_unregister(&panic_notifier_list, &rproc_panic_nb);
}
static int __init remoteproc_init(void)
{
rproc_recovery_wq = alloc_workqueue("rproc_recovery_wq",
WQ_UNBOUND | WQ_FREEZABLE, 0);
if (!rproc_recovery_wq) {
pr_err("remoteproc: creation of rproc_recovery_wq failed\n");
return -ENOMEM;
}
rproc_init_sysfs();
rproc_init_debugfs();
rproc_init_cdev();
rproc_init_panic();
return 0;
}
subsys_initcall(remoteproc_init);
static void __exit remoteproc_exit(void)
{
ida_destroy(&rproc_dev_index);
if (!rproc_recovery_wq)
return;
rproc_exit_panic();
rproc_exit_debugfs();
rproc_exit_sysfs();
destroy_workqueue(rproc_recovery_wq);
}
module_exit(remoteproc_exit);
MODULE_DESCRIPTION("Generic Remote Processor Framework");
|
linux-master
|
drivers/remoteproc/remoteproc_core.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Qualcomm self-authenticating modem subsystem remoteproc driver
*
* Copyright (C) 2016 Linaro Ltd.
* Copyright (C) 2014 Sony Mobile Communications AB
* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/devcoredump.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_reserved_mem.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/remoteproc.h>
#include <linux/reset.h>
#include <linux/soc/qcom/mdt_loader.h>
#include <linux/iopoll.h>
#include <linux/slab.h>
#include "remoteproc_internal.h"
#include "qcom_common.h"
#include "qcom_pil_info.h"
#include "qcom_q6v5.h"
#include <linux/firmware/qcom/qcom_scm.h>
#define MPSS_CRASH_REASON_SMEM 421
#define MBA_LOG_SIZE SZ_4K
#define MPSS_PAS_ID 5
/* RMB Status Register Values */
#define RMB_PBL_SUCCESS 0x1
#define RMB_MBA_XPU_UNLOCKED 0x1
#define RMB_MBA_XPU_UNLOCKED_SCRIBBLED 0x2
#define RMB_MBA_META_DATA_AUTH_SUCCESS 0x3
#define RMB_MBA_AUTH_COMPLETE 0x4
/* PBL/MBA interface registers */
#define RMB_MBA_IMAGE_REG 0x00
#define RMB_PBL_STATUS_REG 0x04
#define RMB_MBA_COMMAND_REG 0x08
#define RMB_MBA_STATUS_REG 0x0C
#define RMB_PMI_META_DATA_REG 0x10
#define RMB_PMI_CODE_START_REG 0x14
#define RMB_PMI_CODE_LENGTH_REG 0x18
#define RMB_MBA_MSS_STATUS 0x40
#define RMB_MBA_ALT_RESET 0x44
#define RMB_CMD_META_DATA_READY 0x1
#define RMB_CMD_LOAD_READY 0x2
/* QDSP6SS Register Offsets */
#define QDSP6SS_RESET_REG 0x014
#define QDSP6SS_GFMUX_CTL_REG 0x020
#define QDSP6SS_PWR_CTL_REG 0x030
#define QDSP6SS_MEM_PWR_CTL 0x0B0
#define QDSP6V6SS_MEM_PWR_CTL 0x034
#define QDSP6SS_STRAP_ACC 0x110
#define QDSP6V62SS_BHS_STATUS 0x0C4
/* AXI Halt Register Offsets */
#define AXI_HALTREQ_REG 0x0
#define AXI_HALTACK_REG 0x4
#define AXI_IDLE_REG 0x8
#define AXI_GATING_VALID_OVERRIDE BIT(0)
#define HALT_ACK_TIMEOUT_US 100000
/* QACCEPT Register Offsets */
#define QACCEPT_ACCEPT_REG 0x0
#define QACCEPT_ACTIVE_REG 0x4
#define QACCEPT_DENY_REG 0x8
#define QACCEPT_REQ_REG 0xC
#define QACCEPT_TIMEOUT_US 50
/* QDSP6SS_RESET */
#define Q6SS_STOP_CORE BIT(0)
#define Q6SS_CORE_ARES BIT(1)
#define Q6SS_BUS_ARES_ENABLE BIT(2)
/* QDSP6SS CBCR */
#define Q6SS_CBCR_CLKEN BIT(0)
#define Q6SS_CBCR_CLKOFF BIT(31)
#define Q6SS_CBCR_TIMEOUT_US 200
/* QDSP6SS_GFMUX_CTL */
#define Q6SS_CLK_ENABLE BIT(1)
/* QDSP6SS_PWR_CTL */
#define Q6SS_L2DATA_SLP_NRET_N_0 BIT(0)
#define Q6SS_L2DATA_SLP_NRET_N_1 BIT(1)
#define Q6SS_L2DATA_SLP_NRET_N_2 BIT(2)
#define Q6SS_L2TAG_SLP_NRET_N BIT(16)
#define Q6SS_ETB_SLP_NRET_N BIT(17)
#define Q6SS_L2DATA_STBY_N BIT(18)
#define Q6SS_SLP_RET_N BIT(19)
#define Q6SS_CLAMP_IO BIT(20)
#define QDSS_BHS_ON BIT(21)
#define QDSS_LDO_BYP BIT(22)
/* QDSP6v55 parameters */
#define QDSP6V55_MEM_BITS GENMASK(16, 8)
/* QDSP6v56 parameters */
#define QDSP6v56_LDO_BYP BIT(25)
#define QDSP6v56_BHS_ON BIT(24)
#define QDSP6v56_CLAMP_WL BIT(21)
#define QDSP6v56_CLAMP_QMC_MEM BIT(22)
#define QDSP6SS_XO_CBCR 0x0038
#define QDSP6SS_ACC_OVERRIDE_VAL 0x20
#define QDSP6v55_BHS_EN_REST_ACK BIT(0)
/* QDSP6v65 parameters */
#define QDSP6SS_CORE_CBCR 0x20
#define QDSP6SS_SLEEP 0x3C
#define QDSP6SS_BOOT_CORE_START 0x400
#define QDSP6SS_BOOT_CMD 0x404
#define BOOT_FSM_TIMEOUT 10000
#define BHS_CHECK_MAX_LOOPS 200
struct reg_info {
struct regulator *reg;
int uV;
int uA;
};
struct qcom_mss_reg_res {
const char *supply;
int uV;
int uA;
};
struct rproc_hexagon_res {
const char *hexagon_mba_image;
struct qcom_mss_reg_res *proxy_supply;
struct qcom_mss_reg_res *fallback_proxy_supply;
struct qcom_mss_reg_res *active_supply;
char **proxy_clk_names;
char **reset_clk_names;
char **active_clk_names;
char **proxy_pd_names;
int version;
bool need_mem_protection;
bool has_alt_reset;
bool has_mba_logs;
bool has_spare_reg;
bool has_qaccept_regs;
bool has_ext_cntl_regs;
bool has_vq6;
};
struct q6v5 {
struct device *dev;
struct rproc *rproc;
void __iomem *reg_base;
void __iomem *rmb_base;
struct regmap *halt_map;
struct regmap *conn_map;
u32 halt_q6;
u32 halt_modem;
u32 halt_nc;
u32 halt_vq6;
u32 conn_box;
u32 qaccept_mdm;
u32 qaccept_cx;
u32 qaccept_axi;
u32 axim1_clk_off;
u32 crypto_clk_off;
u32 force_clk_on;
u32 rscc_disable;
struct reset_control *mss_restart;
struct reset_control *pdc_reset;
struct qcom_q6v5 q6v5;
struct clk *active_clks[8];
struct clk *reset_clks[4];
struct clk *proxy_clks[4];
struct device *proxy_pds[3];
int active_clk_count;
int reset_clk_count;
int proxy_clk_count;
int proxy_pd_count;
struct reg_info active_regs[1];
struct reg_info proxy_regs[1];
struct reg_info fallback_proxy_regs[2];
int active_reg_count;
int proxy_reg_count;
int fallback_proxy_reg_count;
bool dump_mba_loaded;
size_t current_dump_size;
size_t total_dump_size;
phys_addr_t mba_phys;
size_t mba_size;
size_t dp_size;
phys_addr_t mdata_phys;
size_t mdata_size;
phys_addr_t mpss_phys;
phys_addr_t mpss_reloc;
size_t mpss_size;
struct qcom_rproc_glink glink_subdev;
struct qcom_rproc_subdev smd_subdev;
struct qcom_rproc_ssr ssr_subdev;
struct qcom_sysmon *sysmon;
struct platform_device *bam_dmux;
bool need_mem_protection;
bool has_alt_reset;
bool has_mba_logs;
bool has_spare_reg;
bool has_qaccept_regs;
bool has_ext_cntl_regs;
bool has_vq6;
u64 mpss_perm;
u64 mba_perm;
const char *hexagon_mdt_image;
int version;
};
enum {
MSS_MSM8909,
MSS_MSM8916,
MSS_MSM8953,
MSS_MSM8974,
MSS_MSM8996,
MSS_MSM8998,
MSS_SC7180,
MSS_SC7280,
MSS_SDM660,
MSS_SDM845,
};
static int q6v5_regulator_init(struct device *dev, struct reg_info *regs,
const struct qcom_mss_reg_res *reg_res)
{
int rc;
int i;
if (!reg_res)
return 0;
for (i = 0; reg_res[i].supply; i++) {
regs[i].reg = devm_regulator_get(dev, reg_res[i].supply);
if (IS_ERR(regs[i].reg)) {
rc = PTR_ERR(regs[i].reg);
if (rc != -EPROBE_DEFER)
dev_err(dev, "Failed to get %s\n regulator",
reg_res[i].supply);
return rc;
}
regs[i].uV = reg_res[i].uV;
regs[i].uA = reg_res[i].uA;
}
return i;
}
static int q6v5_regulator_enable(struct q6v5 *qproc,
struct reg_info *regs, int count)
{
int ret;
int i;
for (i = 0; i < count; i++) {
if (regs[i].uV > 0) {
ret = regulator_set_voltage(regs[i].reg,
regs[i].uV, INT_MAX);
if (ret) {
dev_err(qproc->dev,
"Failed to request voltage for %d.\n",
i);
goto err;
}
}
if (regs[i].uA > 0) {
ret = regulator_set_load(regs[i].reg,
regs[i].uA);
if (ret < 0) {
dev_err(qproc->dev,
"Failed to set regulator mode\n");
goto err;
}
}
ret = regulator_enable(regs[i].reg);
if (ret) {
dev_err(qproc->dev, "Regulator enable failed\n");
goto err;
}
}
return 0;
err:
for (; i >= 0; i--) {
if (regs[i].uV > 0)
regulator_set_voltage(regs[i].reg, 0, INT_MAX);
if (regs[i].uA > 0)
regulator_set_load(regs[i].reg, 0);
regulator_disable(regs[i].reg);
}
return ret;
}
static void q6v5_regulator_disable(struct q6v5 *qproc,
struct reg_info *regs, int count)
{
int i;
for (i = 0; i < count; i++) {
if (regs[i].uV > 0)
regulator_set_voltage(regs[i].reg, 0, INT_MAX);
if (regs[i].uA > 0)
regulator_set_load(regs[i].reg, 0);
regulator_disable(regs[i].reg);
}
}
static int q6v5_clk_enable(struct device *dev,
struct clk **clks, int count)
{
int rc;
int i;
for (i = 0; i < count; i++) {
rc = clk_prepare_enable(clks[i]);
if (rc) {
dev_err(dev, "Clock enable failed\n");
goto err;
}
}
return 0;
err:
for (i--; i >= 0; i--)
clk_disable_unprepare(clks[i]);
return rc;
}
static void q6v5_clk_disable(struct device *dev,
struct clk **clks, int count)
{
int i;
for (i = 0; i < count; i++)
clk_disable_unprepare(clks[i]);
}
static int q6v5_pds_enable(struct q6v5 *qproc, struct device **pds,
size_t pd_count)
{
int ret;
int i;
for (i = 0; i < pd_count; i++) {
dev_pm_genpd_set_performance_state(pds[i], INT_MAX);
ret = pm_runtime_get_sync(pds[i]);
if (ret < 0) {
pm_runtime_put_noidle(pds[i]);
dev_pm_genpd_set_performance_state(pds[i], 0);
goto unroll_pd_votes;
}
}
return 0;
unroll_pd_votes:
for (i--; i >= 0; i--) {
dev_pm_genpd_set_performance_state(pds[i], 0);
pm_runtime_put(pds[i]);
}
return ret;
}
static void q6v5_pds_disable(struct q6v5 *qproc, struct device **pds,
size_t pd_count)
{
int i;
for (i = 0; i < pd_count; i++) {
dev_pm_genpd_set_performance_state(pds[i], 0);
pm_runtime_put(pds[i]);
}
}
static int q6v5_xfer_mem_ownership(struct q6v5 *qproc, u64 *current_perm,
bool local, bool remote, phys_addr_t addr,
size_t size)
{
struct qcom_scm_vmperm next[2];
int perms = 0;
if (!qproc->need_mem_protection)
return 0;
if (local == !!(*current_perm & BIT(QCOM_SCM_VMID_HLOS)) &&
remote == !!(*current_perm & BIT(QCOM_SCM_VMID_MSS_MSA)))
return 0;
if (local) {
next[perms].vmid = QCOM_SCM_VMID_HLOS;
next[perms].perm = QCOM_SCM_PERM_RWX;
perms++;
}
if (remote) {
next[perms].vmid = QCOM_SCM_VMID_MSS_MSA;
next[perms].perm = QCOM_SCM_PERM_RW;
perms++;
}
return qcom_scm_assign_mem(addr, ALIGN(size, SZ_4K),
current_perm, next, perms);
}
static void q6v5_debug_policy_load(struct q6v5 *qproc, void *mba_region)
{
const struct firmware *dp_fw;
if (request_firmware_direct(&dp_fw, "msadp", qproc->dev))
return;
if (SZ_1M + dp_fw->size <= qproc->mba_size) {
memcpy(mba_region + SZ_1M, dp_fw->data, dp_fw->size);
qproc->dp_size = dp_fw->size;
}
release_firmware(dp_fw);
}
static int q6v5_load(struct rproc *rproc, const struct firmware *fw)
{
struct q6v5 *qproc = rproc->priv;
void *mba_region;
/* MBA is restricted to a maximum size of 1M */
if (fw->size > qproc->mba_size || fw->size > SZ_1M) {
dev_err(qproc->dev, "MBA firmware load failed\n");
return -EINVAL;
}
mba_region = memremap(qproc->mba_phys, qproc->mba_size, MEMREMAP_WC);
if (!mba_region) {
dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n",
&qproc->mba_phys, qproc->mba_size);
return -EBUSY;
}
memcpy(mba_region, fw->data, fw->size);
q6v5_debug_policy_load(qproc, mba_region);
memunmap(mba_region);
return 0;
}
static int q6v5_reset_assert(struct q6v5 *qproc)
{
int ret;
if (qproc->has_alt_reset) {
reset_control_assert(qproc->pdc_reset);
ret = reset_control_reset(qproc->mss_restart);
reset_control_deassert(qproc->pdc_reset);
} else if (qproc->has_spare_reg) {
/*
* When the AXI pipeline is being reset with the Q6 modem partly
* operational there is possibility of AXI valid signal to
* glitch, leading to spurious transactions and Q6 hangs. A work
* around is employed by asserting the AXI_GATING_VALID_OVERRIDE
* BIT before triggering Q6 MSS reset. AXI_GATING_VALID_OVERRIDE
* is withdrawn post MSS assert followed by a MSS deassert,
* while holding the PDC reset.
*/
reset_control_assert(qproc->pdc_reset);
regmap_update_bits(qproc->conn_map, qproc->conn_box,
AXI_GATING_VALID_OVERRIDE, 1);
reset_control_assert(qproc->mss_restart);
reset_control_deassert(qproc->pdc_reset);
regmap_update_bits(qproc->conn_map, qproc->conn_box,
AXI_GATING_VALID_OVERRIDE, 0);
ret = reset_control_deassert(qproc->mss_restart);
} else if (qproc->has_ext_cntl_regs) {
regmap_write(qproc->conn_map, qproc->rscc_disable, 0);
reset_control_assert(qproc->pdc_reset);
reset_control_assert(qproc->mss_restart);
reset_control_deassert(qproc->pdc_reset);
ret = reset_control_deassert(qproc->mss_restart);
} else {
ret = reset_control_assert(qproc->mss_restart);
}
return ret;
}
static int q6v5_reset_deassert(struct q6v5 *qproc)
{
int ret;
if (qproc->has_alt_reset) {
reset_control_assert(qproc->pdc_reset);
writel(1, qproc->rmb_base + RMB_MBA_ALT_RESET);
ret = reset_control_reset(qproc->mss_restart);
writel(0, qproc->rmb_base + RMB_MBA_ALT_RESET);
reset_control_deassert(qproc->pdc_reset);
} else if (qproc->has_spare_reg || qproc->has_ext_cntl_regs) {
ret = reset_control_reset(qproc->mss_restart);
} else {
ret = reset_control_deassert(qproc->mss_restart);
}
return ret;
}
static int q6v5_rmb_pbl_wait(struct q6v5 *qproc, int ms)
{
unsigned long timeout;
s32 val;
timeout = jiffies + msecs_to_jiffies(ms);
for (;;) {
val = readl(qproc->rmb_base + RMB_PBL_STATUS_REG);
if (val)
break;
if (time_after(jiffies, timeout))
return -ETIMEDOUT;
msleep(1);
}
return val;
}
static int q6v5_rmb_mba_wait(struct q6v5 *qproc, u32 status, int ms)
{
unsigned long timeout;
s32 val;
timeout = jiffies + msecs_to_jiffies(ms);
for (;;) {
val = readl(qproc->rmb_base + RMB_MBA_STATUS_REG);
if (val < 0)
break;
if (!status && val)
break;
else if (status && val == status)
break;
if (time_after(jiffies, timeout))
return -ETIMEDOUT;
msleep(1);
}
return val;
}
static void q6v5_dump_mba_logs(struct q6v5 *qproc)
{
struct rproc *rproc = qproc->rproc;
void *data;
void *mba_region;
if (!qproc->has_mba_logs)
return;
if (q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true, false, qproc->mba_phys,
qproc->mba_size))
return;
mba_region = memremap(qproc->mba_phys, qproc->mba_size, MEMREMAP_WC);
if (!mba_region)
return;
data = vmalloc(MBA_LOG_SIZE);
if (data) {
memcpy(data, mba_region, MBA_LOG_SIZE);
dev_coredumpv(&rproc->dev, data, MBA_LOG_SIZE, GFP_KERNEL);
}
memunmap(mba_region);
}
static int q6v5proc_reset(struct q6v5 *qproc)
{
u32 val;
int ret;
int i;
if (qproc->version == MSS_SDM845) {
val = readl(qproc->reg_base + QDSP6SS_SLEEP);
val |= Q6SS_CBCR_CLKEN;
writel(val, qproc->reg_base + QDSP6SS_SLEEP);
ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_SLEEP,
val, !(val & Q6SS_CBCR_CLKOFF), 1,
Q6SS_CBCR_TIMEOUT_US);
if (ret) {
dev_err(qproc->dev, "QDSP6SS Sleep clock timed out\n");
return -ETIMEDOUT;
}
/* De-assert QDSP6 stop core */
writel(1, qproc->reg_base + QDSP6SS_BOOT_CORE_START);
/* Trigger boot FSM */
writel(1, qproc->reg_base + QDSP6SS_BOOT_CMD);
ret = readl_poll_timeout(qproc->rmb_base + RMB_MBA_MSS_STATUS,
val, (val & BIT(0)) != 0, 10, BOOT_FSM_TIMEOUT);
if (ret) {
dev_err(qproc->dev, "Boot FSM failed to complete.\n");
/* Reset the modem so that boot FSM is in reset state */
q6v5_reset_deassert(qproc);
return ret;
}
goto pbl_wait;
} else if (qproc->version == MSS_SC7180 || qproc->version == MSS_SC7280) {
val = readl(qproc->reg_base + QDSP6SS_SLEEP);
val |= Q6SS_CBCR_CLKEN;
writel(val, qproc->reg_base + QDSP6SS_SLEEP);
ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_SLEEP,
val, !(val & Q6SS_CBCR_CLKOFF), 1,
Q6SS_CBCR_TIMEOUT_US);
if (ret) {
dev_err(qproc->dev, "QDSP6SS Sleep clock timed out\n");
return -ETIMEDOUT;
}
/* Turn on the XO clock needed for PLL setup */
val = readl(qproc->reg_base + QDSP6SS_XO_CBCR);
val |= Q6SS_CBCR_CLKEN;
writel(val, qproc->reg_base + QDSP6SS_XO_CBCR);
ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_XO_CBCR,
val, !(val & Q6SS_CBCR_CLKOFF), 1,
Q6SS_CBCR_TIMEOUT_US);
if (ret) {
dev_err(qproc->dev, "QDSP6SS XO clock timed out\n");
return -ETIMEDOUT;
}
/* Configure Q6 core CBCR to auto-enable after reset sequence */
val = readl(qproc->reg_base + QDSP6SS_CORE_CBCR);
val |= Q6SS_CBCR_CLKEN;
writel(val, qproc->reg_base + QDSP6SS_CORE_CBCR);
/* De-assert the Q6 stop core signal */
writel(1, qproc->reg_base + QDSP6SS_BOOT_CORE_START);
/* Wait for 10 us for any staggering logic to settle */
usleep_range(10, 20);
/* Trigger the boot FSM to start the Q6 out-of-reset sequence */
writel(1, qproc->reg_base + QDSP6SS_BOOT_CMD);
/* Poll the MSS_STATUS for FSM completion */
ret = readl_poll_timeout(qproc->rmb_base + RMB_MBA_MSS_STATUS,
val, (val & BIT(0)) != 0, 10, BOOT_FSM_TIMEOUT);
if (ret) {
dev_err(qproc->dev, "Boot FSM failed to complete.\n");
/* Reset the modem so that boot FSM is in reset state */
q6v5_reset_deassert(qproc);
return ret;
}
goto pbl_wait;
} else if (qproc->version == MSS_MSM8909 ||
qproc->version == MSS_MSM8953 ||
qproc->version == MSS_MSM8996 ||
qproc->version == MSS_MSM8998 ||
qproc->version == MSS_SDM660) {
if (qproc->version != MSS_MSM8909 &&
qproc->version != MSS_MSM8953)
/* Override the ACC value if required */
writel(QDSP6SS_ACC_OVERRIDE_VAL,
qproc->reg_base + QDSP6SS_STRAP_ACC);
/* Assert resets, stop core */
val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
val |= Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE;
writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
/* BHS require xo cbcr to be enabled */
val = readl(qproc->reg_base + QDSP6SS_XO_CBCR);
val |= Q6SS_CBCR_CLKEN;
writel(val, qproc->reg_base + QDSP6SS_XO_CBCR);
/* Read CLKOFF bit to go low indicating CLK is enabled */
ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_XO_CBCR,
val, !(val & Q6SS_CBCR_CLKOFF), 1,
Q6SS_CBCR_TIMEOUT_US);
if (ret) {
dev_err(qproc->dev,
"xo cbcr enabling timed out (rc:%d)\n", ret);
return ret;
}
/* Enable power block headswitch and wait for it to stabilize */
val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
val |= QDSP6v56_BHS_ON;
writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
val |= readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
udelay(1);
if (qproc->version == MSS_SDM660) {
ret = readl_relaxed_poll_timeout(qproc->reg_base + QDSP6V62SS_BHS_STATUS,
i, (i & QDSP6v55_BHS_EN_REST_ACK),
1, BHS_CHECK_MAX_LOOPS);
if (ret == -ETIMEDOUT) {
dev_err(qproc->dev, "BHS_EN_REST_ACK not set!\n");
return -ETIMEDOUT;
}
}
/* Put LDO in bypass mode */
val |= QDSP6v56_LDO_BYP;
writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
if (qproc->version != MSS_MSM8909) {
int mem_pwr_ctl;
/* Deassert QDSP6 compiler memory clamp */
val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
val &= ~QDSP6v56_CLAMP_QMC_MEM;
writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
/* Deassert memory peripheral sleep and L2 memory standby */
val |= Q6SS_L2DATA_STBY_N | Q6SS_SLP_RET_N;
writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
/* Turn on L1, L2, ETB and JU memories 1 at a time */
if (qproc->version == MSS_MSM8953 ||
qproc->version == MSS_MSM8996) {
mem_pwr_ctl = QDSP6SS_MEM_PWR_CTL;
i = 19;
} else {
/* MSS_MSM8998, MSS_SDM660 */
mem_pwr_ctl = QDSP6V6SS_MEM_PWR_CTL;
i = 28;
}
val = readl(qproc->reg_base + mem_pwr_ctl);
for (; i >= 0; i--) {
val |= BIT(i);
writel(val, qproc->reg_base + mem_pwr_ctl);
/*
* Read back value to ensure the write is done then
* wait for 1us for both memory peripheral and data
* array to turn on.
*/
val |= readl(qproc->reg_base + mem_pwr_ctl);
udelay(1);
}
} else {
/* Turn on memories */
val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
val |= Q6SS_SLP_RET_N | Q6SS_L2DATA_STBY_N |
Q6SS_ETB_SLP_NRET_N | QDSP6V55_MEM_BITS;
writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
/* Turn on L2 banks 1 at a time */
for (i = 0; i <= 7; i++) {
val |= BIT(i);
writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
}
}
/* Remove word line clamp */
val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
val &= ~QDSP6v56_CLAMP_WL;
writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
} else {
/* Assert resets, stop core */
val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
val |= Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE;
writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
/* Enable power block headswitch and wait for it to stabilize */
val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
val |= QDSS_BHS_ON | QDSS_LDO_BYP;
writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
val |= readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
udelay(1);
/*
* Turn on memories. L2 banks should be done individually
* to minimize inrush current.
*/
val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
val |= Q6SS_SLP_RET_N | Q6SS_L2TAG_SLP_NRET_N |
Q6SS_ETB_SLP_NRET_N | Q6SS_L2DATA_STBY_N;
writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
val |= Q6SS_L2DATA_SLP_NRET_N_2;
writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
val |= Q6SS_L2DATA_SLP_NRET_N_1;
writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
val |= Q6SS_L2DATA_SLP_NRET_N_0;
writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
}
/* Remove IO clamp */
val &= ~Q6SS_CLAMP_IO;
writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
/* Bring core out of reset */
val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
val &= ~Q6SS_CORE_ARES;
writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
/* Turn on core clock */
val = readl(qproc->reg_base + QDSP6SS_GFMUX_CTL_REG);
val |= Q6SS_CLK_ENABLE;
writel(val, qproc->reg_base + QDSP6SS_GFMUX_CTL_REG);
/* Start core execution */
val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
val &= ~Q6SS_STOP_CORE;
writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
pbl_wait:
/* Wait for PBL status */
ret = q6v5_rmb_pbl_wait(qproc, 1000);
if (ret == -ETIMEDOUT) {
dev_err(qproc->dev, "PBL boot timed out\n");
} else if (ret != RMB_PBL_SUCCESS) {
dev_err(qproc->dev, "PBL returned unexpected status %d\n", ret);
ret = -EINVAL;
} else {
ret = 0;
}
return ret;
}
static int q6v5proc_enable_qchannel(struct q6v5 *qproc, struct regmap *map, u32 offset)
{
unsigned int val;
int ret;
if (!qproc->has_qaccept_regs)
return 0;
if (qproc->has_ext_cntl_regs) {
regmap_write(qproc->conn_map, qproc->rscc_disable, 0);
regmap_write(qproc->conn_map, qproc->force_clk_on, 1);
ret = regmap_read_poll_timeout(qproc->halt_map, qproc->axim1_clk_off, val,
!val, 1, Q6SS_CBCR_TIMEOUT_US);
if (ret) {
dev_err(qproc->dev, "failed to enable axim1 clock\n");
return -ETIMEDOUT;
}
}
regmap_write(map, offset + QACCEPT_REQ_REG, 1);
/* Wait for accept */
ret = regmap_read_poll_timeout(map, offset + QACCEPT_ACCEPT_REG, val, val, 5,
QACCEPT_TIMEOUT_US);
if (ret) {
dev_err(qproc->dev, "qchannel enable failed\n");
return -ETIMEDOUT;
}
return 0;
}
static void q6v5proc_disable_qchannel(struct q6v5 *qproc, struct regmap *map, u32 offset)
{
int ret;
unsigned int val, retry;
unsigned int nretry = 10;
bool takedown_complete = false;
if (!qproc->has_qaccept_regs)
return;
while (!takedown_complete && nretry) {
nretry--;
/* Wait for active transactions to complete */
regmap_read_poll_timeout(map, offset + QACCEPT_ACTIVE_REG, val, !val, 5,
QACCEPT_TIMEOUT_US);
/* Request Q-channel transaction takedown */
regmap_write(map, offset + QACCEPT_REQ_REG, 0);
/*
* If the request is denied, reset the Q-channel takedown request,
* wait for active transactions to complete and retry takedown.
*/
retry = 10;
while (retry) {
usleep_range(5, 10);
retry--;
ret = regmap_read(map, offset + QACCEPT_DENY_REG, &val);
if (!ret && val) {
regmap_write(map, offset + QACCEPT_REQ_REG, 1);
break;
}
ret = regmap_read(map, offset + QACCEPT_ACCEPT_REG, &val);
if (!ret && !val) {
takedown_complete = true;
break;
}
}
if (!retry)
break;
}
/* Rely on mss_restart to clear out pending transactions on takedown failure */
if (!takedown_complete)
dev_err(qproc->dev, "qchannel takedown failed\n");
}
static void q6v5proc_halt_axi_port(struct q6v5 *qproc,
struct regmap *halt_map,
u32 offset)
{
unsigned int val;
int ret;
/* Check if we're already idle */
ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val);
if (!ret && val)
return;
/* Assert halt request */
regmap_write(halt_map, offset + AXI_HALTREQ_REG, 1);
/* Wait for halt */
regmap_read_poll_timeout(halt_map, offset + AXI_HALTACK_REG, val,
val, 1000, HALT_ACK_TIMEOUT_US);
ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val);
if (ret || !val)
dev_err(qproc->dev, "port failed halt\n");
/* Clear halt request (port will remain halted until reset) */
regmap_write(halt_map, offset + AXI_HALTREQ_REG, 0);
}
static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw,
const char *fw_name)
{
unsigned long dma_attrs = DMA_ATTR_FORCE_CONTIGUOUS;
dma_addr_t phys;
void *metadata;
u64 mdata_perm;
int xferop_ret;
size_t size;
void *ptr;
int ret;
metadata = qcom_mdt_read_metadata(fw, &size, fw_name, qproc->dev);
if (IS_ERR(metadata))
return PTR_ERR(metadata);
if (qproc->mdata_phys) {
if (size > qproc->mdata_size) {
ret = -EINVAL;
dev_err(qproc->dev, "metadata size outside memory range\n");
goto free_metadata;
}
phys = qproc->mdata_phys;
ptr = memremap(qproc->mdata_phys, size, MEMREMAP_WC);
if (!ptr) {
ret = -EBUSY;
dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n",
&qproc->mdata_phys, size);
goto free_metadata;
}
} else {
ptr = dma_alloc_attrs(qproc->dev, size, &phys, GFP_KERNEL, dma_attrs);
if (!ptr) {
ret = -ENOMEM;
dev_err(qproc->dev, "failed to allocate mdt buffer\n");
goto free_metadata;
}
}
memcpy(ptr, metadata, size);
if (qproc->mdata_phys)
memunmap(ptr);
/* Hypervisor mapping to access metadata by modem */
mdata_perm = BIT(QCOM_SCM_VMID_HLOS);
ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm, false, true,
phys, size);
if (ret) {
dev_err(qproc->dev,
"assigning Q6 access to metadata failed: %d\n", ret);
ret = -EAGAIN;
goto free_dma_attrs;
}
writel(phys, qproc->rmb_base + RMB_PMI_META_DATA_REG);
writel(RMB_CMD_META_DATA_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG);
ret = q6v5_rmb_mba_wait(qproc, RMB_MBA_META_DATA_AUTH_SUCCESS, 1000);
if (ret == -ETIMEDOUT)
dev_err(qproc->dev, "MPSS header authentication timed out\n");
else if (ret < 0)
dev_err(qproc->dev, "MPSS header authentication failed: %d\n", ret);
/* Metadata authentication done, remove modem access */
xferop_ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm, true, false,
phys, size);
if (xferop_ret)
dev_warn(qproc->dev,
"mdt buffer not reclaimed system may become unstable\n");
free_dma_attrs:
if (!qproc->mdata_phys)
dma_free_attrs(qproc->dev, size, ptr, phys, dma_attrs);
free_metadata:
kfree(metadata);
return ret < 0 ? ret : 0;
}
static bool q6v5_phdr_valid(const struct elf32_phdr *phdr)
{
if (phdr->p_type != PT_LOAD)
return false;
if ((phdr->p_flags & QCOM_MDT_TYPE_MASK) == QCOM_MDT_TYPE_HASH)
return false;
if (!phdr->p_memsz)
return false;
return true;
}
static int q6v5_mba_load(struct q6v5 *qproc)
{
int ret;
int xfermemop_ret;
bool mba_load_err = false;
ret = qcom_q6v5_prepare(&qproc->q6v5);
if (ret)
return ret;
ret = q6v5_pds_enable(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
if (ret < 0) {
dev_err(qproc->dev, "failed to enable proxy power domains\n");
goto disable_irqs;
}
ret = q6v5_regulator_enable(qproc, qproc->fallback_proxy_regs,
qproc->fallback_proxy_reg_count);
if (ret) {
dev_err(qproc->dev, "failed to enable fallback proxy supplies\n");
goto disable_proxy_pds;
}
ret = q6v5_regulator_enable(qproc, qproc->proxy_regs,
qproc->proxy_reg_count);
if (ret) {
dev_err(qproc->dev, "failed to enable proxy supplies\n");
goto disable_fallback_proxy_reg;
}
ret = q6v5_clk_enable(qproc->dev, qproc->proxy_clks,
qproc->proxy_clk_count);
if (ret) {
dev_err(qproc->dev, "failed to enable proxy clocks\n");
goto disable_proxy_reg;
}
ret = q6v5_regulator_enable(qproc, qproc->active_regs,
qproc->active_reg_count);
if (ret) {
dev_err(qproc->dev, "failed to enable supplies\n");
goto disable_proxy_clk;
}
ret = q6v5_clk_enable(qproc->dev, qproc->reset_clks,
qproc->reset_clk_count);
if (ret) {
dev_err(qproc->dev, "failed to enable reset clocks\n");
goto disable_vdd;
}
ret = q6v5_reset_deassert(qproc);
if (ret) {
dev_err(qproc->dev, "failed to deassert mss restart\n");
goto disable_reset_clks;
}
ret = q6v5_clk_enable(qproc->dev, qproc->active_clks,
qproc->active_clk_count);
if (ret) {
dev_err(qproc->dev, "failed to enable clocks\n");
goto assert_reset;
}
ret = q6v5proc_enable_qchannel(qproc, qproc->halt_map, qproc->qaccept_axi);
if (ret) {
dev_err(qproc->dev, "failed to enable axi bridge\n");
goto disable_active_clks;
}
/*
* Some versions of the MBA firmware will upon boot wipe the MPSS region as well, so provide
* the Q6 access to this region.
*/
ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, false, true,
qproc->mpss_phys, qproc->mpss_size);
if (ret) {
dev_err(qproc->dev, "assigning Q6 access to mpss memory failed: %d\n", ret);
goto disable_active_clks;
}
/* Assign MBA image access in DDR to q6 */
ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, false, true,
qproc->mba_phys, qproc->mba_size);
if (ret) {
dev_err(qproc->dev,
"assigning Q6 access to mba memory failed: %d\n", ret);
goto disable_active_clks;
}
writel(qproc->mba_phys, qproc->rmb_base + RMB_MBA_IMAGE_REG);
if (qproc->dp_size) {
writel(qproc->mba_phys + SZ_1M, qproc->rmb_base + RMB_PMI_CODE_START_REG);
writel(qproc->dp_size, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
}
ret = q6v5proc_reset(qproc);
if (ret)
goto reclaim_mba;
if (qproc->has_mba_logs)
qcom_pil_info_store("mba", qproc->mba_phys, MBA_LOG_SIZE);
ret = q6v5_rmb_mba_wait(qproc, 0, 5000);
if (ret == -ETIMEDOUT) {
dev_err(qproc->dev, "MBA boot timed out\n");
goto halt_axi_ports;
} else if (ret != RMB_MBA_XPU_UNLOCKED &&
ret != RMB_MBA_XPU_UNLOCKED_SCRIBBLED) {
dev_err(qproc->dev, "MBA returned unexpected status %d\n", ret);
ret = -EINVAL;
goto halt_axi_ports;
}
qproc->dump_mba_loaded = true;
return 0;
halt_axi_ports:
q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6);
if (qproc->has_vq6)
q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_vq6);
q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem);
q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc);
q6v5proc_disable_qchannel(qproc, qproc->halt_map, qproc->qaccept_mdm);
q6v5proc_disable_qchannel(qproc, qproc->halt_map, qproc->qaccept_cx);
q6v5proc_disable_qchannel(qproc, qproc->halt_map, qproc->qaccept_axi);
mba_load_err = true;
reclaim_mba:
xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true,
false, qproc->mba_phys,
qproc->mba_size);
if (xfermemop_ret) {
dev_err(qproc->dev,
"Failed to reclaim mba buffer, system may become unstable\n");
} else if (mba_load_err) {
q6v5_dump_mba_logs(qproc);
}
disable_active_clks:
q6v5_clk_disable(qproc->dev, qproc->active_clks,
qproc->active_clk_count);
assert_reset:
q6v5_reset_assert(qproc);
disable_reset_clks:
q6v5_clk_disable(qproc->dev, qproc->reset_clks,
qproc->reset_clk_count);
disable_vdd:
q6v5_regulator_disable(qproc, qproc->active_regs,
qproc->active_reg_count);
disable_proxy_clk:
q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
qproc->proxy_clk_count);
disable_proxy_reg:
q6v5_regulator_disable(qproc, qproc->proxy_regs,
qproc->proxy_reg_count);
disable_fallback_proxy_reg:
q6v5_regulator_disable(qproc, qproc->fallback_proxy_regs,
qproc->fallback_proxy_reg_count);
disable_proxy_pds:
q6v5_pds_disable(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
disable_irqs:
qcom_q6v5_unprepare(&qproc->q6v5);
return ret;
}
static void q6v5_mba_reclaim(struct q6v5 *qproc)
{
int ret;
u32 val;
qproc->dump_mba_loaded = false;
qproc->dp_size = 0;
q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6);
if (qproc->has_vq6)
q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_vq6);
q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem);
q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc);
if (qproc->version == MSS_MSM8996) {
/*
* To avoid high MX current during LPASS/MSS restart.
*/
val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
val |= Q6SS_CLAMP_IO | QDSP6v56_CLAMP_WL |
QDSP6v56_CLAMP_QMC_MEM;
writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
}
if (qproc->has_ext_cntl_regs) {
regmap_write(qproc->conn_map, qproc->rscc_disable, 1);
ret = regmap_read_poll_timeout(qproc->halt_map, qproc->axim1_clk_off, val,
!val, 1, Q6SS_CBCR_TIMEOUT_US);
if (ret)
dev_err(qproc->dev, "failed to enable axim1 clock\n");
ret = regmap_read_poll_timeout(qproc->halt_map, qproc->crypto_clk_off, val,
!val, 1, Q6SS_CBCR_TIMEOUT_US);
if (ret)
dev_err(qproc->dev, "failed to enable crypto clock\n");
}
q6v5proc_disable_qchannel(qproc, qproc->halt_map, qproc->qaccept_mdm);
q6v5proc_disable_qchannel(qproc, qproc->halt_map, qproc->qaccept_cx);
q6v5proc_disable_qchannel(qproc, qproc->halt_map, qproc->qaccept_axi);
q6v5_reset_assert(qproc);
q6v5_clk_disable(qproc->dev, qproc->reset_clks,
qproc->reset_clk_count);
q6v5_clk_disable(qproc->dev, qproc->active_clks,
qproc->active_clk_count);
q6v5_regulator_disable(qproc, qproc->active_regs,
qproc->active_reg_count);
/* In case of failure or coredump scenario where reclaiming MBA memory
* could not happen reclaim it here.
*/
ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true, false,
qproc->mba_phys,
qproc->mba_size);
WARN_ON(ret);
ret = qcom_q6v5_unprepare(&qproc->q6v5);
if (ret) {
q6v5_pds_disable(qproc, qproc->proxy_pds,
qproc->proxy_pd_count);
q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
qproc->proxy_clk_count);
q6v5_regulator_disable(qproc, qproc->fallback_proxy_regs,
qproc->fallback_proxy_reg_count);
q6v5_regulator_disable(qproc, qproc->proxy_regs,
qproc->proxy_reg_count);
}
}
static int q6v5_reload_mba(struct rproc *rproc)
{
struct q6v5 *qproc = rproc->priv;
const struct firmware *fw;
int ret;
ret = request_firmware(&fw, rproc->firmware, qproc->dev);
if (ret < 0)
return ret;
q6v5_load(rproc, fw);
ret = q6v5_mba_load(qproc);
release_firmware(fw);
return ret;
}
static int q6v5_mpss_load(struct q6v5 *qproc)
{
const struct elf32_phdr *phdrs;
const struct elf32_phdr *phdr;
const struct firmware *seg_fw;
const struct firmware *fw;
struct elf32_hdr *ehdr;
phys_addr_t mpss_reloc;
phys_addr_t boot_addr;
phys_addr_t min_addr = PHYS_ADDR_MAX;
phys_addr_t max_addr = 0;
u32 code_length;
bool relocate = false;
char *fw_name;
size_t fw_name_len;
ssize_t offset;
size_t size = 0;
void *ptr;
int ret;
int i;
fw_name_len = strlen(qproc->hexagon_mdt_image);
if (fw_name_len <= 4)
return -EINVAL;
fw_name = kstrdup(qproc->hexagon_mdt_image, GFP_KERNEL);
if (!fw_name)
return -ENOMEM;
ret = request_firmware(&fw, fw_name, qproc->dev);
if (ret < 0) {
dev_err(qproc->dev, "unable to load %s\n", fw_name);
goto out;
}
/* Initialize the RMB validator */
writel(0, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
ret = q6v5_mpss_init_image(qproc, fw, qproc->hexagon_mdt_image);
if (ret)
goto release_firmware;
ehdr = (struct elf32_hdr *)fw->data;
phdrs = (struct elf32_phdr *)(ehdr + 1);
for (i = 0; i < ehdr->e_phnum; i++) {
phdr = &phdrs[i];
if (!q6v5_phdr_valid(phdr))
continue;
if (phdr->p_flags & QCOM_MDT_RELOCATABLE)
relocate = true;
if (phdr->p_paddr < min_addr)
min_addr = phdr->p_paddr;
if (phdr->p_paddr + phdr->p_memsz > max_addr)
max_addr = ALIGN(phdr->p_paddr + phdr->p_memsz, SZ_4K);
}
if (qproc->version == MSS_MSM8953) {
ret = qcom_scm_pas_mem_setup(MPSS_PAS_ID, qproc->mpss_phys, qproc->mpss_size);
if (ret) {
dev_err(qproc->dev,
"setting up mpss memory failed: %d\n", ret);
goto release_firmware;
}
}
/*
* In case of a modem subsystem restart on secure devices, the modem
* memory can be reclaimed only after MBA is loaded.
*/
q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, true, false,
qproc->mpss_phys, qproc->mpss_size);
/* Share ownership between Linux and MSS, during segment loading */
ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, true, true,
qproc->mpss_phys, qproc->mpss_size);
if (ret) {
dev_err(qproc->dev,
"assigning Q6 access to mpss memory failed: %d\n", ret);
ret = -EAGAIN;
goto release_firmware;
}
mpss_reloc = relocate ? min_addr : qproc->mpss_phys;
qproc->mpss_reloc = mpss_reloc;
/* Load firmware segments */
for (i = 0; i < ehdr->e_phnum; i++) {
phdr = &phdrs[i];
if (!q6v5_phdr_valid(phdr))
continue;
offset = phdr->p_paddr - mpss_reloc;
if (offset < 0 || offset + phdr->p_memsz > qproc->mpss_size) {
dev_err(qproc->dev, "segment outside memory range\n");
ret = -EINVAL;
goto release_firmware;
}
if (phdr->p_filesz > phdr->p_memsz) {
dev_err(qproc->dev,
"refusing to load segment %d with p_filesz > p_memsz\n",
i);
ret = -EINVAL;
goto release_firmware;
}
ptr = memremap(qproc->mpss_phys + offset, phdr->p_memsz, MEMREMAP_WC);
if (!ptr) {
dev_err(qproc->dev,
"unable to map memory region: %pa+%zx-%x\n",
&qproc->mpss_phys, offset, phdr->p_memsz);
goto release_firmware;
}
if (phdr->p_filesz && phdr->p_offset < fw->size) {
/* Firmware is large enough to be non-split */
if (phdr->p_offset + phdr->p_filesz > fw->size) {
dev_err(qproc->dev,
"failed to load segment %d from truncated file %s\n",
i, fw_name);
ret = -EINVAL;
memunmap(ptr);
goto release_firmware;
}
memcpy(ptr, fw->data + phdr->p_offset, phdr->p_filesz);
} else if (phdr->p_filesz) {
/* Replace "xxx.xxx" with "xxx.bxx" */
sprintf(fw_name + fw_name_len - 3, "b%02d", i);
ret = request_firmware_into_buf(&seg_fw, fw_name, qproc->dev,
ptr, phdr->p_filesz);
if (ret) {
dev_err(qproc->dev, "failed to load %s\n", fw_name);
memunmap(ptr);
goto release_firmware;
}
if (seg_fw->size != phdr->p_filesz) {
dev_err(qproc->dev,
"failed to load segment %d from truncated file %s\n",
i, fw_name);
ret = -EINVAL;
release_firmware(seg_fw);
memunmap(ptr);
goto release_firmware;
}
release_firmware(seg_fw);
}
if (phdr->p_memsz > phdr->p_filesz) {
memset(ptr + phdr->p_filesz, 0,
phdr->p_memsz - phdr->p_filesz);
}
memunmap(ptr);
size += phdr->p_memsz;
code_length = readl(qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
if (!code_length) {
boot_addr = relocate ? qproc->mpss_phys : min_addr;
writel(boot_addr, qproc->rmb_base + RMB_PMI_CODE_START_REG);
writel(RMB_CMD_LOAD_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG);
}
writel(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
ret = readl(qproc->rmb_base + RMB_MBA_STATUS_REG);
if (ret < 0) {
dev_err(qproc->dev, "MPSS authentication failed: %d\n",
ret);
goto release_firmware;
}
}
/* Transfer ownership of modem ddr region to q6 */
ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, false, true,
qproc->mpss_phys, qproc->mpss_size);
if (ret) {
dev_err(qproc->dev,
"assigning Q6 access to mpss memory failed: %d\n", ret);
ret = -EAGAIN;
goto release_firmware;
}
ret = q6v5_rmb_mba_wait(qproc, RMB_MBA_AUTH_COMPLETE, 10000);
if (ret == -ETIMEDOUT)
dev_err(qproc->dev, "MPSS authentication timed out\n");
else if (ret < 0)
dev_err(qproc->dev, "MPSS authentication failed: %d\n", ret);
qcom_pil_info_store("modem", qproc->mpss_phys, qproc->mpss_size);
release_firmware:
release_firmware(fw);
out:
kfree(fw_name);
return ret < 0 ? ret : 0;
}
static void qcom_q6v5_dump_segment(struct rproc *rproc,
struct rproc_dump_segment *segment,
void *dest, size_t cp_offset, size_t size)
{
int ret = 0;
struct q6v5 *qproc = rproc->priv;
int offset = segment->da - qproc->mpss_reloc;
void *ptr = NULL;
/* Unlock mba before copying segments */
if (!qproc->dump_mba_loaded) {
ret = q6v5_reload_mba(rproc);
if (!ret) {
/* Reset ownership back to Linux to copy segments */
ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm,
true, false,
qproc->mpss_phys,
qproc->mpss_size);
}
}
if (!ret)
ptr = memremap(qproc->mpss_phys + offset + cp_offset, size, MEMREMAP_WC);
if (ptr) {
memcpy(dest, ptr, size);
memunmap(ptr);
} else {
memset(dest, 0xff, size);
}
qproc->current_dump_size += size;
/* Reclaim mba after copying segments */
if (qproc->current_dump_size == qproc->total_dump_size) {
if (qproc->dump_mba_loaded) {
/* Try to reset ownership back to Q6 */
q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm,
false, true,
qproc->mpss_phys,
qproc->mpss_size);
q6v5_mba_reclaim(qproc);
}
}
}
static int q6v5_start(struct rproc *rproc)
{
struct q6v5 *qproc = rproc->priv;
int xfermemop_ret;
int ret;
ret = q6v5_mba_load(qproc);
if (ret)
return ret;
dev_info(qproc->dev, "MBA booted with%s debug policy, loading mpss\n",
qproc->dp_size ? "" : "out");
ret = q6v5_mpss_load(qproc);
if (ret)
goto reclaim_mpss;
ret = qcom_q6v5_wait_for_start(&qproc->q6v5, msecs_to_jiffies(5000));
if (ret == -ETIMEDOUT) {
dev_err(qproc->dev, "start timed out\n");
goto reclaim_mpss;
}
xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true,
false, qproc->mba_phys,
qproc->mba_size);
if (xfermemop_ret)
dev_err(qproc->dev,
"Failed to reclaim mba buffer system may become unstable\n");
/* Reset Dump Segment Mask */
qproc->current_dump_size = 0;
return 0;
reclaim_mpss:
q6v5_mba_reclaim(qproc);
q6v5_dump_mba_logs(qproc);
return ret;
}
static int q6v5_stop(struct rproc *rproc)
{
struct q6v5 *qproc = rproc->priv;
int ret;
ret = qcom_q6v5_request_stop(&qproc->q6v5, qproc->sysmon);
if (ret == -ETIMEDOUT)
dev_err(qproc->dev, "timed out on wait\n");
q6v5_mba_reclaim(qproc);
return 0;
}
static int qcom_q6v5_register_dump_segments(struct rproc *rproc,
const struct firmware *mba_fw)
{
const struct firmware *fw;
const struct elf32_phdr *phdrs;
const struct elf32_phdr *phdr;
const struct elf32_hdr *ehdr;
struct q6v5 *qproc = rproc->priv;
unsigned long i;
int ret;
ret = request_firmware(&fw, qproc->hexagon_mdt_image, qproc->dev);
if (ret < 0) {
dev_err(qproc->dev, "unable to load %s\n",
qproc->hexagon_mdt_image);
return ret;
}
rproc_coredump_set_elf_info(rproc, ELFCLASS32, EM_NONE);
ehdr = (struct elf32_hdr *)fw->data;
phdrs = (struct elf32_phdr *)(ehdr + 1);
qproc->total_dump_size = 0;
for (i = 0; i < ehdr->e_phnum; i++) {
phdr = &phdrs[i];
if (!q6v5_phdr_valid(phdr))
continue;
ret = rproc_coredump_add_custom_segment(rproc, phdr->p_paddr,
phdr->p_memsz,
qcom_q6v5_dump_segment,
NULL);
if (ret)
break;
qproc->total_dump_size += phdr->p_memsz;
}
release_firmware(fw);
return ret;
}
static unsigned long q6v5_panic(struct rproc *rproc)
{
struct q6v5 *qproc = rproc->priv;
return qcom_q6v5_panic(&qproc->q6v5);
}
static const struct rproc_ops q6v5_ops = {
.start = q6v5_start,
.stop = q6v5_stop,
.parse_fw = qcom_q6v5_register_dump_segments,
.load = q6v5_load,
.panic = q6v5_panic,
};
static void qcom_msa_handover(struct qcom_q6v5 *q6v5)
{
struct q6v5 *qproc = container_of(q6v5, struct q6v5, q6v5);
q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
qproc->proxy_clk_count);
q6v5_regulator_disable(qproc, qproc->proxy_regs,
qproc->proxy_reg_count);
q6v5_regulator_disable(qproc, qproc->fallback_proxy_regs,
qproc->fallback_proxy_reg_count);
q6v5_pds_disable(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
}
static int q6v5_init_mem(struct q6v5 *qproc, struct platform_device *pdev)
{
struct of_phandle_args args;
int halt_cell_cnt = 3;
int ret;
qproc->reg_base = devm_platform_ioremap_resource_byname(pdev, "qdsp6");
if (IS_ERR(qproc->reg_base))
return PTR_ERR(qproc->reg_base);
qproc->rmb_base = devm_platform_ioremap_resource_byname(pdev, "rmb");
if (IS_ERR(qproc->rmb_base))
return PTR_ERR(qproc->rmb_base);
if (qproc->has_vq6)
halt_cell_cnt++;
ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
"qcom,halt-regs", halt_cell_cnt, 0, &args);
if (ret < 0) {
dev_err(&pdev->dev, "failed to parse qcom,halt-regs\n");
return -EINVAL;
}
qproc->halt_map = syscon_node_to_regmap(args.np);
of_node_put(args.np);
if (IS_ERR(qproc->halt_map))
return PTR_ERR(qproc->halt_map);
qproc->halt_q6 = args.args[0];
qproc->halt_modem = args.args[1];
qproc->halt_nc = args.args[2];
if (qproc->has_vq6)
qproc->halt_vq6 = args.args[3];
if (qproc->has_qaccept_regs) {
ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
"qcom,qaccept-regs",
3, 0, &args);
if (ret < 0) {
dev_err(&pdev->dev, "failed to parse qaccept-regs\n");
return -EINVAL;
}
qproc->qaccept_mdm = args.args[0];
qproc->qaccept_cx = args.args[1];
qproc->qaccept_axi = args.args[2];
}
if (qproc->has_ext_cntl_regs) {
ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
"qcom,ext-regs",
2, 0, &args);
if (ret < 0) {
dev_err(&pdev->dev, "failed to parse ext-regs index 0\n");
return -EINVAL;
}
qproc->conn_map = syscon_node_to_regmap(args.np);
of_node_put(args.np);
if (IS_ERR(qproc->conn_map))
return PTR_ERR(qproc->conn_map);
qproc->force_clk_on = args.args[0];
qproc->rscc_disable = args.args[1];
ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
"qcom,ext-regs",
2, 1, &args);
if (ret < 0) {
dev_err(&pdev->dev, "failed to parse ext-regs index 1\n");
return -EINVAL;
}
qproc->axim1_clk_off = args.args[0];
qproc->crypto_clk_off = args.args[1];
}
if (qproc->has_spare_reg) {
ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
"qcom,spare-regs",
1, 0, &args);
if (ret < 0) {
dev_err(&pdev->dev, "failed to parse spare-regs\n");
return -EINVAL;
}
qproc->conn_map = syscon_node_to_regmap(args.np);
of_node_put(args.np);
if (IS_ERR(qproc->conn_map))
return PTR_ERR(qproc->conn_map);
qproc->conn_box = args.args[0];
}
return 0;
}
static int q6v5_init_clocks(struct device *dev, struct clk **clks,
char **clk_names)
{
int i;
if (!clk_names)
return 0;
for (i = 0; clk_names[i]; i++) {
clks[i] = devm_clk_get(dev, clk_names[i]);
if (IS_ERR(clks[i])) {
int rc = PTR_ERR(clks[i]);
if (rc != -EPROBE_DEFER)
dev_err(dev, "Failed to get %s clock\n",
clk_names[i]);
return rc;
}
}
return i;
}
static int q6v5_pds_attach(struct device *dev, struct device **devs,
char **pd_names)
{
size_t num_pds = 0;
int ret;
int i;
if (!pd_names)
return 0;
while (pd_names[num_pds])
num_pds++;
for (i = 0; i < num_pds; i++) {
devs[i] = dev_pm_domain_attach_by_name(dev, pd_names[i]);
if (IS_ERR_OR_NULL(devs[i])) {
ret = PTR_ERR(devs[i]) ? : -ENODATA;
goto unroll_attach;
}
}
return num_pds;
unroll_attach:
for (i--; i >= 0; i--)
dev_pm_domain_detach(devs[i], false);
return ret;
}
static void q6v5_pds_detach(struct q6v5 *qproc, struct device **pds,
size_t pd_count)
{
int i;
for (i = 0; i < pd_count; i++)
dev_pm_domain_detach(pds[i], false);
}
static int q6v5_init_reset(struct q6v5 *qproc)
{
qproc->mss_restart = devm_reset_control_get_exclusive(qproc->dev,
"mss_restart");
if (IS_ERR(qproc->mss_restart)) {
dev_err(qproc->dev, "failed to acquire mss restart\n");
return PTR_ERR(qproc->mss_restart);
}
if (qproc->has_alt_reset || qproc->has_spare_reg || qproc->has_ext_cntl_regs) {
qproc->pdc_reset = devm_reset_control_get_exclusive(qproc->dev,
"pdc_reset");
if (IS_ERR(qproc->pdc_reset)) {
dev_err(qproc->dev, "failed to acquire pdc reset\n");
return PTR_ERR(qproc->pdc_reset);
}
}
return 0;
}
static int q6v5_alloc_memory_region(struct q6v5 *qproc)
{
struct device_node *child;
struct reserved_mem *rmem;
struct device_node *node;
/*
* In the absence of mba/mpss sub-child, extract the mba and mpss
* reserved memory regions from device's memory-region property.
*/
child = of_get_child_by_name(qproc->dev->of_node, "mba");
if (!child) {
node = of_parse_phandle(qproc->dev->of_node,
"memory-region", 0);
} else {
node = of_parse_phandle(child, "memory-region", 0);
of_node_put(child);
}
if (!node) {
dev_err(qproc->dev, "no mba memory-region specified\n");
return -EINVAL;
}
rmem = of_reserved_mem_lookup(node);
of_node_put(node);
if (!rmem) {
dev_err(qproc->dev, "unable to resolve mba region\n");
return -EINVAL;
}
qproc->mba_phys = rmem->base;
qproc->mba_size = rmem->size;
if (!child) {
node = of_parse_phandle(qproc->dev->of_node,
"memory-region", 1);
} else {
child = of_get_child_by_name(qproc->dev->of_node, "mpss");
node = of_parse_phandle(child, "memory-region", 0);
of_node_put(child);
}
if (!node) {
dev_err(qproc->dev, "no mpss memory-region specified\n");
return -EINVAL;
}
rmem = of_reserved_mem_lookup(node);
of_node_put(node);
if (!rmem) {
dev_err(qproc->dev, "unable to resolve mpss region\n");
return -EINVAL;
}
qproc->mpss_phys = qproc->mpss_reloc = rmem->base;
qproc->mpss_size = rmem->size;
if (!child) {
node = of_parse_phandle(qproc->dev->of_node, "memory-region", 2);
} else {
child = of_get_child_by_name(qproc->dev->of_node, "metadata");
node = of_parse_phandle(child, "memory-region", 0);
of_node_put(child);
}
if (!node)
return 0;
rmem = of_reserved_mem_lookup(node);
if (!rmem) {
dev_err(qproc->dev, "unable to resolve metadata region\n");
return -EINVAL;
}
qproc->mdata_phys = rmem->base;
qproc->mdata_size = rmem->size;
return 0;
}
static int q6v5_probe(struct platform_device *pdev)
{
const struct rproc_hexagon_res *desc;
struct device_node *node;
struct q6v5 *qproc;
struct rproc *rproc;
const char *mba_image;
int ret;
desc = of_device_get_match_data(&pdev->dev);
if (!desc)
return -EINVAL;
if (desc->need_mem_protection && !qcom_scm_is_available())
return -EPROBE_DEFER;
mba_image = desc->hexagon_mba_image;
ret = of_property_read_string_index(pdev->dev.of_node, "firmware-name",
0, &mba_image);
if (ret < 0 && ret != -EINVAL) {
dev_err(&pdev->dev, "unable to read mba firmware-name\n");
return ret;
}
rproc = rproc_alloc(&pdev->dev, pdev->name, &q6v5_ops,
mba_image, sizeof(*qproc));
if (!rproc) {
dev_err(&pdev->dev, "failed to allocate rproc\n");
return -ENOMEM;
}
rproc->auto_boot = false;
rproc_coredump_set_elf_info(rproc, ELFCLASS32, EM_NONE);
qproc = rproc->priv;
qproc->dev = &pdev->dev;
qproc->rproc = rproc;
qproc->hexagon_mdt_image = "modem.mdt";
ret = of_property_read_string_index(pdev->dev.of_node, "firmware-name",
1, &qproc->hexagon_mdt_image);
if (ret < 0 && ret != -EINVAL) {
dev_err(&pdev->dev, "unable to read mpss firmware-name\n");
goto free_rproc;
}
platform_set_drvdata(pdev, qproc);
qproc->has_qaccept_regs = desc->has_qaccept_regs;
qproc->has_ext_cntl_regs = desc->has_ext_cntl_regs;
qproc->has_vq6 = desc->has_vq6;
qproc->has_spare_reg = desc->has_spare_reg;
ret = q6v5_init_mem(qproc, pdev);
if (ret)
goto free_rproc;
ret = q6v5_alloc_memory_region(qproc);
if (ret)
goto free_rproc;
ret = q6v5_init_clocks(&pdev->dev, qproc->proxy_clks,
desc->proxy_clk_names);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to get proxy clocks.\n");
goto free_rproc;
}
qproc->proxy_clk_count = ret;
ret = q6v5_init_clocks(&pdev->dev, qproc->reset_clks,
desc->reset_clk_names);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to get reset clocks.\n");
goto free_rproc;
}
qproc->reset_clk_count = ret;
ret = q6v5_init_clocks(&pdev->dev, qproc->active_clks,
desc->active_clk_names);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to get active clocks.\n");
goto free_rproc;
}
qproc->active_clk_count = ret;
ret = q6v5_regulator_init(&pdev->dev, qproc->proxy_regs,
desc->proxy_supply);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to get proxy regulators.\n");
goto free_rproc;
}
qproc->proxy_reg_count = ret;
ret = q6v5_regulator_init(&pdev->dev, qproc->active_regs,
desc->active_supply);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to get active regulators.\n");
goto free_rproc;
}
qproc->active_reg_count = ret;
ret = q6v5_pds_attach(&pdev->dev, qproc->proxy_pds,
desc->proxy_pd_names);
/* Fallback to regulators for old device trees */
if (ret == -ENODATA && desc->fallback_proxy_supply) {
ret = q6v5_regulator_init(&pdev->dev,
qproc->fallback_proxy_regs,
desc->fallback_proxy_supply);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to get fallback proxy regulators.\n");
goto free_rproc;
}
qproc->fallback_proxy_reg_count = ret;
} else if (ret < 0) {
dev_err(&pdev->dev, "Failed to init power domains\n");
goto free_rproc;
} else {
qproc->proxy_pd_count = ret;
}
qproc->has_alt_reset = desc->has_alt_reset;
ret = q6v5_init_reset(qproc);
if (ret)
goto detach_proxy_pds;
qproc->version = desc->version;
qproc->need_mem_protection = desc->need_mem_protection;
qproc->has_mba_logs = desc->has_mba_logs;
ret = qcom_q6v5_init(&qproc->q6v5, pdev, rproc, MPSS_CRASH_REASON_SMEM, "modem",
qcom_msa_handover);
if (ret)
goto detach_proxy_pds;
qproc->mpss_perm = BIT(QCOM_SCM_VMID_HLOS);
qproc->mba_perm = BIT(QCOM_SCM_VMID_HLOS);
qcom_add_glink_subdev(rproc, &qproc->glink_subdev, "mpss");
qcom_add_smd_subdev(rproc, &qproc->smd_subdev);
qcom_add_ssr_subdev(rproc, &qproc->ssr_subdev, "mpss");
qproc->sysmon = qcom_add_sysmon_subdev(rproc, "modem", 0x12);
if (IS_ERR(qproc->sysmon)) {
ret = PTR_ERR(qproc->sysmon);
goto remove_subdevs;
}
ret = rproc_add(rproc);
if (ret)
goto remove_sysmon_subdev;
node = of_get_compatible_child(pdev->dev.of_node, "qcom,bam-dmux");
qproc->bam_dmux = of_platform_device_create(node, NULL, &pdev->dev);
of_node_put(node);
return 0;
remove_sysmon_subdev:
qcom_remove_sysmon_subdev(qproc->sysmon);
remove_subdevs:
qcom_remove_ssr_subdev(rproc, &qproc->ssr_subdev);
qcom_remove_smd_subdev(rproc, &qproc->smd_subdev);
qcom_remove_glink_subdev(rproc, &qproc->glink_subdev);
detach_proxy_pds:
q6v5_pds_detach(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
free_rproc:
rproc_free(rproc);
return ret;
}
static void q6v5_remove(struct platform_device *pdev)
{
struct q6v5 *qproc = platform_get_drvdata(pdev);
struct rproc *rproc = qproc->rproc;
if (qproc->bam_dmux)
of_platform_device_destroy(&qproc->bam_dmux->dev, NULL);
rproc_del(rproc);
qcom_q6v5_deinit(&qproc->q6v5);
qcom_remove_sysmon_subdev(qproc->sysmon);
qcom_remove_ssr_subdev(rproc, &qproc->ssr_subdev);
qcom_remove_smd_subdev(rproc, &qproc->smd_subdev);
qcom_remove_glink_subdev(rproc, &qproc->glink_subdev);
q6v5_pds_detach(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
rproc_free(rproc);
}
static const struct rproc_hexagon_res sc7180_mss = {
.hexagon_mba_image = "mba.mbn",
.proxy_clk_names = (char*[]){
"xo",
NULL
},
.reset_clk_names = (char*[]){
"iface",
"bus",
"snoc_axi",
NULL
},
.active_clk_names = (char*[]){
"mnoc_axi",
"nav",
NULL
},
.proxy_pd_names = (char*[]){
"cx",
"mx",
"mss",
NULL
},
.need_mem_protection = true,
.has_alt_reset = false,
.has_mba_logs = true,
.has_spare_reg = true,
.has_qaccept_regs = false,
.has_ext_cntl_regs = false,
.has_vq6 = false,
.version = MSS_SC7180,
};
static const struct rproc_hexagon_res sc7280_mss = {
.hexagon_mba_image = "mba.mbn",
.proxy_clk_names = (char*[]){
"xo",
"pka",
NULL
},
.active_clk_names = (char*[]){
"iface",
"offline",
"snoc_axi",
NULL
},
.proxy_pd_names = (char*[]){
"cx",
"mss",
NULL
},
.need_mem_protection = true,
.has_alt_reset = false,
.has_mba_logs = true,
.has_spare_reg = false,
.has_qaccept_regs = true,
.has_ext_cntl_regs = true,
.has_vq6 = true,
.version = MSS_SC7280,
};
static const struct rproc_hexagon_res sdm660_mss = {
.hexagon_mba_image = "mba.mbn",
.proxy_clk_names = (char*[]){
"xo",
"qdss",
"mem",
NULL
},
.active_clk_names = (char*[]){
"iface",
"bus",
"gpll0_mss",
"mnoc_axi",
"snoc_axi",
NULL
},
.proxy_pd_names = (char*[]){
"cx",
"mx",
NULL
},
.need_mem_protection = true,
.has_alt_reset = false,
.has_mba_logs = false,
.has_spare_reg = false,
.has_qaccept_regs = false,
.has_ext_cntl_regs = false,
.has_vq6 = false,
.version = MSS_SDM660,
};
static const struct rproc_hexagon_res sdm845_mss = {
.hexagon_mba_image = "mba.mbn",
.proxy_clk_names = (char*[]){
"xo",
"prng",
NULL
},
.reset_clk_names = (char*[]){
"iface",
"snoc_axi",
NULL
},
.active_clk_names = (char*[]){
"bus",
"mem",
"gpll0_mss",
"mnoc_axi",
NULL
},
.proxy_pd_names = (char*[]){
"cx",
"mx",
"mss",
NULL
},
.need_mem_protection = true,
.has_alt_reset = true,
.has_mba_logs = false,
.has_spare_reg = false,
.has_qaccept_regs = false,
.has_ext_cntl_regs = false,
.has_vq6 = false,
.version = MSS_SDM845,
};
static const struct rproc_hexagon_res msm8998_mss = {
.hexagon_mba_image = "mba.mbn",
.proxy_clk_names = (char*[]){
"xo",
"qdss",
"mem",
NULL
},
.active_clk_names = (char*[]){
"iface",
"bus",
"gpll0_mss",
"mnoc_axi",
"snoc_axi",
NULL
},
.proxy_pd_names = (char*[]){
"cx",
"mx",
NULL
},
.need_mem_protection = true,
.has_alt_reset = false,
.has_mba_logs = false,
.has_spare_reg = false,
.has_qaccept_regs = false,
.has_ext_cntl_regs = false,
.has_vq6 = false,
.version = MSS_MSM8998,
};
static const struct rproc_hexagon_res msm8996_mss = {
.hexagon_mba_image = "mba.mbn",
.proxy_supply = (struct qcom_mss_reg_res[]) {
{
.supply = "pll",
.uA = 100000,
},
{}
},
.proxy_clk_names = (char*[]){
"xo",
"pnoc",
"qdss",
NULL
},
.active_clk_names = (char*[]){
"iface",
"bus",
"mem",
"gpll0_mss",
"snoc_axi",
"mnoc_axi",
NULL
},
.proxy_pd_names = (char*[]){
"mx",
"cx",
NULL
},
.need_mem_protection = true,
.has_alt_reset = false,
.has_mba_logs = false,
.has_spare_reg = false,
.has_qaccept_regs = false,
.has_ext_cntl_regs = false,
.has_vq6 = false,
.version = MSS_MSM8996,
};
static const struct rproc_hexagon_res msm8909_mss = {
.hexagon_mba_image = "mba.mbn",
.proxy_supply = (struct qcom_mss_reg_res[]) {
{
.supply = "pll",
.uA = 100000,
},
{}
},
.proxy_clk_names = (char*[]){
"xo",
NULL
},
.active_clk_names = (char*[]){
"iface",
"bus",
"mem",
NULL
},
.proxy_pd_names = (char*[]){
"mx",
"cx",
NULL
},
.need_mem_protection = false,
.has_alt_reset = false,
.has_mba_logs = false,
.has_spare_reg = false,
.has_qaccept_regs = false,
.has_ext_cntl_regs = false,
.has_vq6 = false,
.version = MSS_MSM8909,
};
static const struct rproc_hexagon_res msm8916_mss = {
.hexagon_mba_image = "mba.mbn",
.proxy_supply = (struct qcom_mss_reg_res[]) {
{
.supply = "pll",
.uA = 100000,
},
{}
},
.fallback_proxy_supply = (struct qcom_mss_reg_res[]) {
{
.supply = "mx",
.uV = 1050000,
},
{
.supply = "cx",
.uA = 100000,
},
{}
},
.proxy_clk_names = (char*[]){
"xo",
NULL
},
.active_clk_names = (char*[]){
"iface",
"bus",
"mem",
NULL
},
.proxy_pd_names = (char*[]){
"mx",
"cx",
NULL
},
.need_mem_protection = false,
.has_alt_reset = false,
.has_mba_logs = false,
.has_spare_reg = false,
.has_qaccept_regs = false,
.has_ext_cntl_regs = false,
.has_vq6 = false,
.version = MSS_MSM8916,
};
static const struct rproc_hexagon_res msm8953_mss = {
.hexagon_mba_image = "mba.mbn",
.proxy_supply = (struct qcom_mss_reg_res[]) {
{
.supply = "pll",
.uA = 100000,
},
{}
},
.proxy_clk_names = (char*[]){
"xo",
NULL
},
.active_clk_names = (char*[]){
"iface",
"bus",
"mem",
NULL
},
.proxy_pd_names = (char*[]) {
"cx",
"mx",
"mss",
NULL
},
.need_mem_protection = false,
.has_alt_reset = false,
.has_mba_logs = false,
.has_spare_reg = false,
.has_qaccept_regs = false,
.has_ext_cntl_regs = false,
.has_vq6 = false,
.version = MSS_MSM8953,
};
static const struct rproc_hexagon_res msm8974_mss = {
.hexagon_mba_image = "mba.b00",
.proxy_supply = (struct qcom_mss_reg_res[]) {
{
.supply = "pll",
.uA = 100000,
},
{}
},
.fallback_proxy_supply = (struct qcom_mss_reg_res[]) {
{
.supply = "mx",
.uV = 1050000,
},
{
.supply = "cx",
.uA = 100000,
},
{}
},
.active_supply = (struct qcom_mss_reg_res[]) {
{
.supply = "mss",
.uV = 1050000,
.uA = 100000,
},
{}
},
.proxy_clk_names = (char*[]){
"xo",
NULL
},
.active_clk_names = (char*[]){
"iface",
"bus",
"mem",
NULL
},
.proxy_pd_names = (char*[]){
"mx",
"cx",
NULL
},
.need_mem_protection = false,
.has_alt_reset = false,
.has_mba_logs = false,
.has_spare_reg = false,
.has_qaccept_regs = false,
.has_ext_cntl_regs = false,
.has_vq6 = false,
.version = MSS_MSM8974,
};
static const struct of_device_id q6v5_of_match[] = {
{ .compatible = "qcom,q6v5-pil", .data = &msm8916_mss},
{ .compatible = "qcom,msm8909-mss-pil", .data = &msm8909_mss},
{ .compatible = "qcom,msm8916-mss-pil", .data = &msm8916_mss},
{ .compatible = "qcom,msm8953-mss-pil", .data = &msm8953_mss},
{ .compatible = "qcom,msm8974-mss-pil", .data = &msm8974_mss},
{ .compatible = "qcom,msm8996-mss-pil", .data = &msm8996_mss},
{ .compatible = "qcom,msm8998-mss-pil", .data = &msm8998_mss},
{ .compatible = "qcom,sc7180-mss-pil", .data = &sc7180_mss},
{ .compatible = "qcom,sc7280-mss-pil", .data = &sc7280_mss},
{ .compatible = "qcom,sdm660-mss-pil", .data = &sdm660_mss},
{ .compatible = "qcom,sdm845-mss-pil", .data = &sdm845_mss},
{ },
};
MODULE_DEVICE_TABLE(of, q6v5_of_match);
static struct platform_driver q6v5_driver = {
.probe = q6v5_probe,
.remove_new = q6v5_remove,
.driver = {
.name = "qcom-q6v5-mss",
.of_match_table = q6v5_of_match,
},
};
module_platform_driver(q6v5_driver);
MODULE_DESCRIPTION("Qualcomm Self-authenticating modem remoteproc driver");
MODULE_LICENSE("GPL v2");
|
linux-master
|
drivers/remoteproc/qcom_q6v5_mss.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2019-2020 Linaro Ltd.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of_address.h>
#include "qcom_pil_info.h"
/*
* The PIL relocation information region is used to communicate memory regions
* occupied by co-processor firmware for post mortem crash analysis.
*
* It consists of an array of entries with an 8 byte textual identifier of the
* region followed by a 64 bit base address and 32 bit size, both little
* endian.
*/
#define PIL_RELOC_NAME_LEN 8
#define PIL_RELOC_ENTRY_SIZE (PIL_RELOC_NAME_LEN + sizeof(__le64) + sizeof(__le32))
struct pil_reloc {
void __iomem *base;
size_t num_entries;
};
static struct pil_reloc _reloc __read_mostly;
static DEFINE_MUTEX(pil_reloc_lock);
static int qcom_pil_info_init(void)
{
struct device_node *np;
struct resource imem;
void __iomem *base;
int ret;
/* Already initialized? */
if (_reloc.base)
return 0;
np = of_find_compatible_node(NULL, NULL, "qcom,pil-reloc-info");
if (!np)
return -ENOENT;
ret = of_address_to_resource(np, 0, &imem);
of_node_put(np);
if (ret < 0)
return ret;
base = ioremap(imem.start, resource_size(&imem));
if (!base) {
pr_err("failed to map PIL relocation info region\n");
return -ENOMEM;
}
memset_io(base, 0, resource_size(&imem));
_reloc.base = base;
_reloc.num_entries = (u32)resource_size(&imem) / PIL_RELOC_ENTRY_SIZE;
return 0;
}
/**
* qcom_pil_info_store() - store PIL information of image in IMEM
* @image: name of the image
* @base: base address of the loaded image
* @size: size of the loaded image
*
* Return: 0 on success, negative errno on failure
*/
int qcom_pil_info_store(const char *image, phys_addr_t base, size_t size)
{
char buf[PIL_RELOC_NAME_LEN];
void __iomem *entry;
int ret;
int i;
mutex_lock(&pil_reloc_lock);
ret = qcom_pil_info_init();
if (ret < 0) {
mutex_unlock(&pil_reloc_lock);
return ret;
}
for (i = 0; i < _reloc.num_entries; i++) {
entry = _reloc.base + i * PIL_RELOC_ENTRY_SIZE;
memcpy_fromio(buf, entry, PIL_RELOC_NAME_LEN);
/*
* An empty record means we didn't find it, given that the
* records are packed.
*/
if (!buf[0])
goto found_unused;
if (!strncmp(buf, image, PIL_RELOC_NAME_LEN))
goto found_existing;
}
pr_warn("insufficient PIL info slots\n");
mutex_unlock(&pil_reloc_lock);
return -ENOMEM;
found_unused:
memcpy_toio(entry, image, strnlen(image, PIL_RELOC_NAME_LEN));
found_existing:
/* Use two writel() as base is only aligned to 4 bytes on odd entries */
writel(base, entry + PIL_RELOC_NAME_LEN);
writel((u64)base >> 32, entry + PIL_RELOC_NAME_LEN + 4);
writel(size, entry + PIL_RELOC_NAME_LEN + sizeof(__le64));
mutex_unlock(&pil_reloc_lock);
return 0;
}
EXPORT_SYMBOL_GPL(qcom_pil_info_store);
static void __exit pil_reloc_exit(void)
{
mutex_lock(&pil_reloc_lock);
iounmap(_reloc.base);
_reloc.base = NULL;
mutex_unlock(&pil_reloc_lock);
}
module_exit(pil_reloc_exit);
MODULE_DESCRIPTION("Qualcomm PIL relocation info");
MODULE_LICENSE("GPL v2");
|
linux-master
|
drivers/remoteproc/qcom_pil_info.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Qualcomm Peripheral Image Loader helpers
*
* Copyright (C) 2016 Linaro Ltd
* Copyright (C) 2015 Sony Mobile Communications Inc
* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
*/
#include <linux/firmware.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/notifier.h>
#include <linux/remoteproc.h>
#include <linux/remoteproc/qcom_rproc.h>
#include <linux/rpmsg/qcom_glink.h>
#include <linux/rpmsg/qcom_smd.h>
#include <linux/slab.h>
#include <linux/soc/qcom/mdt_loader.h>
#include <linux/soc/qcom/smem.h>
#include "remoteproc_internal.h"
#include "qcom_common.h"
#define to_glink_subdev(d) container_of(d, struct qcom_rproc_glink, subdev)
#define to_smd_subdev(d) container_of(d, struct qcom_rproc_subdev, subdev)
#define to_ssr_subdev(d) container_of(d, struct qcom_rproc_ssr, subdev)
#define MAX_NUM_OF_SS 10
#define MAX_REGION_NAME_LENGTH 16
#define SBL_MINIDUMP_SMEM_ID 602
#define MINIDUMP_REGION_VALID ('V' << 24 | 'A' << 16 | 'L' << 8 | 'I' << 0)
#define MINIDUMP_SS_ENCR_DONE ('D' << 24 | 'O' << 16 | 'N' << 8 | 'E' << 0)
#define MINIDUMP_SS_ENABLED ('E' << 24 | 'N' << 16 | 'B' << 8 | 'L' << 0)
/**
* struct minidump_region - Minidump region
* @name : Name of the region to be dumped
* @seq_num: : Use to differentiate regions with same name.
* @valid : This entry to be dumped (if set to 1)
* @address : Physical address of region to be dumped
* @size : Size of the region
*/
struct minidump_region {
char name[MAX_REGION_NAME_LENGTH];
__le32 seq_num;
__le32 valid;
__le64 address;
__le64 size;
};
/**
* struct minidump_subsystem - Subsystem's SMEM Table of content
* @status : Subsystem toc init status
* @enabled : if set to 1, this region would be copied during coredump
* @encryption_status: Encryption status for this subsystem
* @encryption_required : Decides to encrypt the subsystem regions or not
* @region_count : Number of regions added in this subsystem toc
* @regions_baseptr : regions base pointer of the subsystem
*/
struct minidump_subsystem {
__le32 status;
__le32 enabled;
__le32 encryption_status;
__le32 encryption_required;
__le32 region_count;
__le64 regions_baseptr;
};
/**
* struct minidump_global_toc - Global Table of Content
* @status : Global Minidump init status
* @md_revision : Minidump revision
* @enabled : Minidump enable status
* @subsystems : Array of subsystems toc
*/
struct minidump_global_toc {
__le32 status;
__le32 md_revision;
__le32 enabled;
struct minidump_subsystem subsystems[MAX_NUM_OF_SS];
};
struct qcom_ssr_subsystem {
const char *name;
struct srcu_notifier_head notifier_list;
struct list_head list;
};
static LIST_HEAD(qcom_ssr_subsystem_list);
static DEFINE_MUTEX(qcom_ssr_subsys_lock);
static void qcom_minidump_cleanup(struct rproc *rproc)
{
struct rproc_dump_segment *entry, *tmp;
list_for_each_entry_safe(entry, tmp, &rproc->dump_segments, node) {
list_del(&entry->node);
kfree(entry->priv);
kfree(entry);
}
}
static int qcom_add_minidump_segments(struct rproc *rproc, struct minidump_subsystem *subsystem,
void (*rproc_dumpfn_t)(struct rproc *rproc, struct rproc_dump_segment *segment,
void *dest, size_t offset, size_t size))
{
struct minidump_region __iomem *ptr;
struct minidump_region region;
int seg_cnt, i;
dma_addr_t da;
size_t size;
char *name;
if (WARN_ON(!list_empty(&rproc->dump_segments))) {
dev_err(&rproc->dev, "dump segment list already populated\n");
return -EUCLEAN;
}
seg_cnt = le32_to_cpu(subsystem->region_count);
ptr = ioremap((unsigned long)le64_to_cpu(subsystem->regions_baseptr),
seg_cnt * sizeof(struct minidump_region));
if (!ptr)
return -EFAULT;
for (i = 0; i < seg_cnt; i++) {
memcpy_fromio(®ion, ptr + i, sizeof(region));
if (le32_to_cpu(region.valid) == MINIDUMP_REGION_VALID) {
name = kstrndup(region.name, MAX_REGION_NAME_LENGTH - 1, GFP_KERNEL);
if (!name) {
iounmap(ptr);
return -ENOMEM;
}
da = le64_to_cpu(region.address);
size = le64_to_cpu(region.size);
rproc_coredump_add_custom_segment(rproc, da, size, rproc_dumpfn_t, name);
}
}
iounmap(ptr);
return 0;
}
void qcom_minidump(struct rproc *rproc, unsigned int minidump_id,
void (*rproc_dumpfn_t)(struct rproc *rproc,
struct rproc_dump_segment *segment, void *dest, size_t offset,
size_t size))
{
int ret;
struct minidump_subsystem *subsystem;
struct minidump_global_toc *toc;
/* Get Global minidump ToC*/
toc = qcom_smem_get(QCOM_SMEM_HOST_ANY, SBL_MINIDUMP_SMEM_ID, NULL);
/* check if global table pointer exists and init is set */
if (IS_ERR(toc) || !toc->status) {
dev_err(&rproc->dev, "Minidump TOC not found in SMEM\n");
return;
}
/* Get subsystem table of contents using the minidump id */
subsystem = &toc->subsystems[minidump_id];
/**
* Collect minidump if SS ToC is valid and segment table
* is initialized in memory and encryption status is set.
*/
if (subsystem->regions_baseptr == 0 ||
le32_to_cpu(subsystem->status) != 1 ||
le32_to_cpu(subsystem->enabled) != MINIDUMP_SS_ENABLED) {
return rproc_coredump(rproc);
}
if (le32_to_cpu(subsystem->encryption_status) != MINIDUMP_SS_ENCR_DONE) {
dev_err(&rproc->dev, "Minidump not ready, skipping\n");
return;
}
/**
* Clear out the dump segments populated by parse_fw before
* re-populating them with minidump segments.
*/
rproc_coredump_cleanup(rproc);
ret = qcom_add_minidump_segments(rproc, subsystem, rproc_dumpfn_t);
if (ret) {
dev_err(&rproc->dev, "Failed with error: %d while adding minidump entries\n", ret);
goto clean_minidump;
}
rproc_coredump_using_sections(rproc);
clean_minidump:
qcom_minidump_cleanup(rproc);
}
EXPORT_SYMBOL_GPL(qcom_minidump);
static int glink_subdev_start(struct rproc_subdev *subdev)
{
struct qcom_rproc_glink *glink = to_glink_subdev(subdev);
glink->edge = qcom_glink_smem_register(glink->dev, glink->node);
return PTR_ERR_OR_ZERO(glink->edge);
}
static void glink_subdev_stop(struct rproc_subdev *subdev, bool crashed)
{
struct qcom_rproc_glink *glink = to_glink_subdev(subdev);
qcom_glink_smem_unregister(glink->edge);
glink->edge = NULL;
}
static void glink_subdev_unprepare(struct rproc_subdev *subdev)
{
struct qcom_rproc_glink *glink = to_glink_subdev(subdev);
qcom_glink_ssr_notify(glink->ssr_name);
}
/**
* qcom_add_glink_subdev() - try to add a GLINK subdevice to rproc
* @rproc: rproc handle to parent the subdevice
* @glink: reference to a GLINK subdev context
* @ssr_name: identifier of the associated remoteproc for ssr notifications
*/
void qcom_add_glink_subdev(struct rproc *rproc, struct qcom_rproc_glink *glink,
const char *ssr_name)
{
struct device *dev = &rproc->dev;
glink->node = of_get_child_by_name(dev->parent->of_node, "glink-edge");
if (!glink->node)
return;
glink->ssr_name = kstrdup_const(ssr_name, GFP_KERNEL);
if (!glink->ssr_name)
return;
glink->dev = dev;
glink->subdev.start = glink_subdev_start;
glink->subdev.stop = glink_subdev_stop;
glink->subdev.unprepare = glink_subdev_unprepare;
rproc_add_subdev(rproc, &glink->subdev);
}
EXPORT_SYMBOL_GPL(qcom_add_glink_subdev);
/**
* qcom_remove_glink_subdev() - remove a GLINK subdevice from rproc
* @rproc: rproc handle
* @glink: reference to a GLINK subdev context
*/
void qcom_remove_glink_subdev(struct rproc *rproc, struct qcom_rproc_glink *glink)
{
if (!glink->node)
return;
rproc_remove_subdev(rproc, &glink->subdev);
kfree_const(glink->ssr_name);
of_node_put(glink->node);
}
EXPORT_SYMBOL_GPL(qcom_remove_glink_subdev);
/**
* qcom_register_dump_segments() - register segments for coredump
* @rproc: remoteproc handle
* @fw: firmware header
*
* Register all segments of the ELF in the remoteproc coredump segment list
*
* Return: 0 on success, negative errno on failure.
*/
int qcom_register_dump_segments(struct rproc *rproc,
const struct firmware *fw)
{
const struct elf32_phdr *phdrs;
const struct elf32_phdr *phdr;
const struct elf32_hdr *ehdr;
int ret;
int i;
ehdr = (struct elf32_hdr *)fw->data;
phdrs = (struct elf32_phdr *)(ehdr + 1);
for (i = 0; i < ehdr->e_phnum; i++) {
phdr = &phdrs[i];
if (phdr->p_type != PT_LOAD)
continue;
if ((phdr->p_flags & QCOM_MDT_TYPE_MASK) == QCOM_MDT_TYPE_HASH)
continue;
if (!phdr->p_memsz)
continue;
ret = rproc_coredump_add_segment(rproc, phdr->p_paddr,
phdr->p_memsz);
if (ret)
return ret;
}
return 0;
}
EXPORT_SYMBOL_GPL(qcom_register_dump_segments);
static int smd_subdev_start(struct rproc_subdev *subdev)
{
struct qcom_rproc_subdev *smd = to_smd_subdev(subdev);
smd->edge = qcom_smd_register_edge(smd->dev, smd->node);
return PTR_ERR_OR_ZERO(smd->edge);
}
static void smd_subdev_stop(struct rproc_subdev *subdev, bool crashed)
{
struct qcom_rproc_subdev *smd = to_smd_subdev(subdev);
qcom_smd_unregister_edge(smd->edge);
smd->edge = NULL;
}
/**
* qcom_add_smd_subdev() - try to add a SMD subdevice to rproc
* @rproc: rproc handle to parent the subdevice
* @smd: reference to a Qualcomm subdev context
*/
void qcom_add_smd_subdev(struct rproc *rproc, struct qcom_rproc_subdev *smd)
{
struct device *dev = &rproc->dev;
smd->node = of_get_child_by_name(dev->parent->of_node, "smd-edge");
if (!smd->node)
return;
smd->dev = dev;
smd->subdev.start = smd_subdev_start;
smd->subdev.stop = smd_subdev_stop;
rproc_add_subdev(rproc, &smd->subdev);
}
EXPORT_SYMBOL_GPL(qcom_add_smd_subdev);
/**
* qcom_remove_smd_subdev() - remove the smd subdevice from rproc
* @rproc: rproc handle
* @smd: the SMD subdevice to remove
*/
void qcom_remove_smd_subdev(struct rproc *rproc, struct qcom_rproc_subdev *smd)
{
if (!smd->node)
return;
rproc_remove_subdev(rproc, &smd->subdev);
of_node_put(smd->node);
}
EXPORT_SYMBOL_GPL(qcom_remove_smd_subdev);
static struct qcom_ssr_subsystem *qcom_ssr_get_subsys(const char *name)
{
struct qcom_ssr_subsystem *info;
mutex_lock(&qcom_ssr_subsys_lock);
/* Match in the global qcom_ssr_subsystem_list with name */
list_for_each_entry(info, &qcom_ssr_subsystem_list, list)
if (!strcmp(info->name, name))
goto out;
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info) {
info = ERR_PTR(-ENOMEM);
goto out;
}
info->name = kstrdup_const(name, GFP_KERNEL);
srcu_init_notifier_head(&info->notifier_list);
/* Add to global notification list */
list_add_tail(&info->list, &qcom_ssr_subsystem_list);
out:
mutex_unlock(&qcom_ssr_subsys_lock);
return info;
}
/**
* qcom_register_ssr_notifier() - register SSR notification handler
* @name: Subsystem's SSR name
* @nb: notifier_block to be invoked upon subsystem's state change
*
* This registers the @nb notifier block as part the notifier chain for a
* remoteproc associated with @name. The notifier block's callback
* will be invoked when the remote processor's SSR events occur
* (pre/post startup and pre/post shutdown).
*
* Return: a subsystem cookie on success, ERR_PTR on failure.
*/
void *qcom_register_ssr_notifier(const char *name, struct notifier_block *nb)
{
struct qcom_ssr_subsystem *info;
info = qcom_ssr_get_subsys(name);
if (IS_ERR(info))
return info;
srcu_notifier_chain_register(&info->notifier_list, nb);
return &info->notifier_list;
}
EXPORT_SYMBOL_GPL(qcom_register_ssr_notifier);
/**
* qcom_unregister_ssr_notifier() - unregister SSR notification handler
* @notify: subsystem cookie returned from qcom_register_ssr_notifier
* @nb: notifier_block to unregister
*
* This function will unregister the notifier from the particular notifier
* chain.
*
* Return: 0 on success, %ENOENT otherwise.
*/
int qcom_unregister_ssr_notifier(void *notify, struct notifier_block *nb)
{
return srcu_notifier_chain_unregister(notify, nb);
}
EXPORT_SYMBOL_GPL(qcom_unregister_ssr_notifier);
static int ssr_notify_prepare(struct rproc_subdev *subdev)
{
struct qcom_rproc_ssr *ssr = to_ssr_subdev(subdev);
struct qcom_ssr_notify_data data = {
.name = ssr->info->name,
.crashed = false,
};
srcu_notifier_call_chain(&ssr->info->notifier_list,
QCOM_SSR_BEFORE_POWERUP, &data);
return 0;
}
static int ssr_notify_start(struct rproc_subdev *subdev)
{
struct qcom_rproc_ssr *ssr = to_ssr_subdev(subdev);
struct qcom_ssr_notify_data data = {
.name = ssr->info->name,
.crashed = false,
};
srcu_notifier_call_chain(&ssr->info->notifier_list,
QCOM_SSR_AFTER_POWERUP, &data);
return 0;
}
static void ssr_notify_stop(struct rproc_subdev *subdev, bool crashed)
{
struct qcom_rproc_ssr *ssr = to_ssr_subdev(subdev);
struct qcom_ssr_notify_data data = {
.name = ssr->info->name,
.crashed = crashed,
};
srcu_notifier_call_chain(&ssr->info->notifier_list,
QCOM_SSR_BEFORE_SHUTDOWN, &data);
}
static void ssr_notify_unprepare(struct rproc_subdev *subdev)
{
struct qcom_rproc_ssr *ssr = to_ssr_subdev(subdev);
struct qcom_ssr_notify_data data = {
.name = ssr->info->name,
.crashed = false,
};
srcu_notifier_call_chain(&ssr->info->notifier_list,
QCOM_SSR_AFTER_SHUTDOWN, &data);
}
/**
* qcom_add_ssr_subdev() - register subdevice as restart notification source
* @rproc: rproc handle
* @ssr: SSR subdevice handle
* @ssr_name: identifier to use for notifications originating from @rproc
*
* As the @ssr is registered with the @rproc SSR events will be sent to all
* registered listeners for the remoteproc when it's SSR events occur
* (pre/post startup and pre/post shutdown).
*/
void qcom_add_ssr_subdev(struct rproc *rproc, struct qcom_rproc_ssr *ssr,
const char *ssr_name)
{
struct qcom_ssr_subsystem *info;
info = qcom_ssr_get_subsys(ssr_name);
if (IS_ERR(info)) {
dev_err(&rproc->dev, "Failed to add ssr subdevice\n");
return;
}
ssr->info = info;
ssr->subdev.prepare = ssr_notify_prepare;
ssr->subdev.start = ssr_notify_start;
ssr->subdev.stop = ssr_notify_stop;
ssr->subdev.unprepare = ssr_notify_unprepare;
rproc_add_subdev(rproc, &ssr->subdev);
}
EXPORT_SYMBOL_GPL(qcom_add_ssr_subdev);
/**
* qcom_remove_ssr_subdev() - remove subdevice as restart notification source
* @rproc: rproc handle
* @ssr: SSR subdevice handle
*/
void qcom_remove_ssr_subdev(struct rproc *rproc, struct qcom_rproc_ssr *ssr)
{
rproc_remove_subdev(rproc, &ssr->subdev);
ssr->info = NULL;
}
EXPORT_SYMBOL_GPL(qcom_remove_ssr_subdev);
MODULE_DESCRIPTION("Qualcomm Remoteproc helper driver");
MODULE_LICENSE("GPL v2");
|
linux-master
|
drivers/remoteproc/qcom_common.c
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2020 Martin Blumenstingl <[email protected]>
*/
#include <linux/bitfield.h>
#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/genalloc.h>
#include <linux/io.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/remoteproc.h>
#include <linux/reset.h>
#include <linux/sizes.h>
#include "remoteproc_internal.h"
#define AO_REMAP_REG0 0x0
#define AO_REMAP_REG0_REMAP_AHB_SRAM_BITS_17_14_FOR_ARM_CPU GENMASK(3, 0)
#define AO_REMAP_REG1 0x4
#define AO_REMAP_REG1_MOVE_AHB_SRAM_TO_0X0_INSTEAD_OF_DDR BIT(4)
#define AO_REMAP_REG1_REMAP_AHB_SRAM_BITS_17_14_FOR_MEDIA_CPU GENMASK(3, 0)
#define AO_CPU_CNTL 0x0
#define AO_CPU_CNTL_AHB_SRAM_BITS_31_20 GENMASK(28, 16)
#define AO_CPU_CNTL_HALT BIT(9)
#define AO_CPU_CNTL_UNKNONWN BIT(8)
#define AO_CPU_CNTL_RUN BIT(0)
#define AO_CPU_STAT 0x4
#define AO_SECURE_REG0 0x0
#define AO_SECURE_REG0_AHB_SRAM_BITS_19_12 GENMASK(15, 8)
/* Only bits [31:20] and [17:14] are usable, all other bits must be zero */
#define MESON_AO_RPROC_SRAM_USABLE_BITS 0xfff3c000ULL
#define MESON_AO_RPROC_MEMORY_OFFSET 0x10000000
struct meson_mx_ao_arc_rproc_priv {
void __iomem *remap_base;
void __iomem *cpu_base;
unsigned long sram_va;
phys_addr_t sram_pa;
size_t sram_size;
struct gen_pool *sram_pool;
struct reset_control *arc_reset;
struct clk *arc_pclk;
struct regmap *secbus2_regmap;
};
static int meson_mx_ao_arc_rproc_start(struct rproc *rproc)
{
struct meson_mx_ao_arc_rproc_priv *priv = rproc->priv;
phys_addr_t translated_sram_addr;
u32 tmp;
int ret;
ret = clk_prepare_enable(priv->arc_pclk);
if (ret)
return ret;
tmp = FIELD_PREP(AO_REMAP_REG0_REMAP_AHB_SRAM_BITS_17_14_FOR_ARM_CPU,
priv->sram_pa >> 14);
writel(tmp, priv->remap_base + AO_REMAP_REG0);
/*
* The SRAM content as seen by the ARC core always starts at 0x0
* regardless of the value given here (this was discovered by trial and
* error). For SoCs older than Meson6 we probably have to set
* AO_REMAP_REG1_MOVE_AHB_SRAM_TO_0X0_INSTEAD_OF_DDR to achieve the
* same. (At least) For Meson8 and newer that bit must not be set.
*/
writel(0x0, priv->remap_base + AO_REMAP_REG1);
regmap_update_bits(priv->secbus2_regmap, AO_SECURE_REG0,
AO_SECURE_REG0_AHB_SRAM_BITS_19_12,
FIELD_PREP(AO_SECURE_REG0_AHB_SRAM_BITS_19_12,
priv->sram_pa >> 12));
ret = reset_control_reset(priv->arc_reset);
if (ret) {
clk_disable_unprepare(priv->arc_pclk);
return ret;
}
usleep_range(10, 100);
/*
* Convert from 0xd9000000 to 0xc9000000 as the vendor driver does.
* This only seems to be relevant for the AO_CPU_CNTL register. It is
* unknown why this is needed.
*/
translated_sram_addr = priv->sram_pa - MESON_AO_RPROC_MEMORY_OFFSET;
tmp = FIELD_PREP(AO_CPU_CNTL_AHB_SRAM_BITS_31_20,
translated_sram_addr >> 20);
tmp |= AO_CPU_CNTL_UNKNONWN | AO_CPU_CNTL_RUN;
writel(tmp, priv->cpu_base + AO_CPU_CNTL);
usleep_range(20, 200);
return 0;
}
static int meson_mx_ao_arc_rproc_stop(struct rproc *rproc)
{
struct meson_mx_ao_arc_rproc_priv *priv = rproc->priv;
writel(AO_CPU_CNTL_HALT, priv->cpu_base + AO_CPU_CNTL);
clk_disable_unprepare(priv->arc_pclk);
return 0;
}
static void *meson_mx_ao_arc_rproc_da_to_va(struct rproc *rproc, u64 da,
size_t len, bool *is_iomem)
{
struct meson_mx_ao_arc_rproc_priv *priv = rproc->priv;
/* The memory from the ARC core's perspective always starts at 0x0. */
if ((da + len) > priv->sram_size)
return NULL;
return (void *)priv->sram_va + da;
}
static struct rproc_ops meson_mx_ao_arc_rproc_ops = {
.start = meson_mx_ao_arc_rproc_start,
.stop = meson_mx_ao_arc_rproc_stop,
.da_to_va = meson_mx_ao_arc_rproc_da_to_va,
.get_boot_addr = rproc_elf_get_boot_addr,
.load = rproc_elf_load_segments,
.sanity_check = rproc_elf_sanity_check,
};
static int meson_mx_ao_arc_rproc_probe(struct platform_device *pdev)
{
struct meson_mx_ao_arc_rproc_priv *priv;
struct device *dev = &pdev->dev;
const char *fw_name = NULL;
struct rproc *rproc;
int ret;
device_property_read_string(dev, "firmware-name", &fw_name);
rproc = devm_rproc_alloc(dev, "meson-mx-ao-arc",
&meson_mx_ao_arc_rproc_ops, fw_name,
sizeof(*priv));
if (!rproc)
return -ENOMEM;
rproc->has_iommu = false;
priv = rproc->priv;
priv->sram_pool = of_gen_pool_get(dev->of_node, "sram", 0);
if (!priv->sram_pool) {
dev_err(dev, "Could not get SRAM pool\n");
return -ENODEV;
}
priv->sram_size = gen_pool_avail(priv->sram_pool);
priv->sram_va = gen_pool_alloc(priv->sram_pool, priv->sram_size);
if (!priv->sram_va) {
dev_err(dev, "Could not alloc memory in SRAM pool\n");
return -ENOMEM;
}
priv->sram_pa = gen_pool_virt_to_phys(priv->sram_pool, priv->sram_va);
if (priv->sram_pa & ~MESON_AO_RPROC_SRAM_USABLE_BITS) {
dev_err(dev, "SRAM address contains unusable bits\n");
ret = -EINVAL;
goto err_free_genpool;
}
priv->secbus2_regmap = syscon_regmap_lookup_by_phandle(dev->of_node,
"amlogic,secbus2");
if (IS_ERR(priv->secbus2_regmap)) {
dev_err(dev, "Failed to find SECBUS2 regmap\n");
ret = PTR_ERR(priv->secbus2_regmap);
goto err_free_genpool;
}
priv->remap_base = devm_platform_ioremap_resource_byname(pdev, "remap");
if (IS_ERR(priv->remap_base)) {
ret = PTR_ERR(priv->remap_base);
goto err_free_genpool;
}
priv->cpu_base = devm_platform_ioremap_resource_byname(pdev, "cpu");
if (IS_ERR(priv->cpu_base)) {
ret = PTR_ERR(priv->cpu_base);
goto err_free_genpool;
}
priv->arc_reset = devm_reset_control_get_exclusive(dev, NULL);
if (IS_ERR(priv->arc_reset)) {
dev_err(dev, "Failed to get ARC reset\n");
ret = PTR_ERR(priv->arc_reset);
goto err_free_genpool;
}
priv->arc_pclk = devm_clk_get(dev, NULL);
if (IS_ERR(priv->arc_pclk)) {
dev_err(dev, "Failed to get the ARC PCLK\n");
ret = PTR_ERR(priv->arc_pclk);
goto err_free_genpool;
}
platform_set_drvdata(pdev, rproc);
ret = rproc_add(rproc);
if (ret)
goto err_free_genpool;
return 0;
err_free_genpool:
gen_pool_free(priv->sram_pool, priv->sram_va, priv->sram_size);
return ret;
}
static void meson_mx_ao_arc_rproc_remove(struct platform_device *pdev)
{
struct rproc *rproc = platform_get_drvdata(pdev);
struct meson_mx_ao_arc_rproc_priv *priv = rproc->priv;
rproc_del(rproc);
gen_pool_free(priv->sram_pool, priv->sram_va, priv->sram_size);
}
static const struct of_device_id meson_mx_ao_arc_rproc_match[] = {
{ .compatible = "amlogic,meson8-ao-arc" },
{ .compatible = "amlogic,meson8b-ao-arc" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, meson_mx_ao_arc_rproc_match);
static struct platform_driver meson_mx_ao_arc_rproc_driver = {
.probe = meson_mx_ao_arc_rproc_probe,
.remove_new = meson_mx_ao_arc_rproc_remove,
.driver = {
.name = "meson-mx-ao-arc-rproc",
.of_match_table = meson_mx_ao_arc_rproc_match,
},
};
module_platform_driver(meson_mx_ao_arc_rproc_driver);
MODULE_DESCRIPTION("Amlogic Meson6/8/8b/8m2 AO ARC remote processor driver");
MODULE_AUTHOR("Martin Blumenstingl <[email protected]>");
MODULE_LICENSE("GPL v2");
|
linux-master
|
drivers/remoteproc/meson_mx_ao_arc.c
|
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright 2021 NXP */
#include <dt-bindings/firmware/imx/rsrc.h>
#include <linux/arm-smccc.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/firmware.h>
#include <linux/firmware/imx/sci.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/mailbox_client.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/remoteproc.h>
#include <linux/slab.h>
#include "imx_rproc.h"
#include "remoteproc_elf_helpers.h"
#include "remoteproc_internal.h"
#define DSP_RPROC_CLK_MAX 5
/*
* Module parameters
*/
static unsigned int no_mailboxes;
module_param_named(no_mailboxes, no_mailboxes, int, 0644);
MODULE_PARM_DESC(no_mailboxes,
"There is no mailbox between cores, so ignore remote proc reply after start, default is 0 (off).");
#define REMOTE_IS_READY BIT(0)
#define REMOTE_READY_WAIT_MAX_RETRIES 500
/* att flags */
/* DSP own area */
#define ATT_OWN BIT(31)
/* DSP instruction area */
#define ATT_IRAM BIT(30)
/* Definitions for i.MX8MP */
/* DAP registers */
#define IMX8M_DAP_DEBUG 0x28800000
#define IMX8M_DAP_DEBUG_SIZE (64 * 1024)
#define IMX8M_DAP_PWRCTL (0x4000 + 0x3020)
#define IMX8M_PWRCTL_CORERESET BIT(16)
/* DSP audio mix registers */
#define IMX8M_AudioDSP_REG0 0x100
#define IMX8M_AudioDSP_REG1 0x104
#define IMX8M_AudioDSP_REG2 0x108
#define IMX8M_AudioDSP_REG3 0x10c
#define IMX8M_AudioDSP_REG2_RUNSTALL BIT(5)
#define IMX8M_AudioDSP_REG2_PWAITMODE BIT(1)
/* Definitions for i.MX8ULP */
#define IMX8ULP_SIM_LPAV_REG_SYSCTRL0 0x8
#define IMX8ULP_SYSCTRL0_DSP_DBG_RST BIT(25)
#define IMX8ULP_SYSCTRL0_DSP_PLAT_CLK_EN BIT(19)
#define IMX8ULP_SYSCTRL0_DSP_PBCLK_EN BIT(18)
#define IMX8ULP_SYSCTRL0_DSP_CLK_EN BIT(17)
#define IMX8ULP_SYSCTRL0_DSP_RST BIT(16)
#define IMX8ULP_SYSCTRL0_DSP_OCD_HALT BIT(14)
#define IMX8ULP_SYSCTRL0_DSP_STALL BIT(13)
#define IMX8ULP_SIP_HIFI_XRDC 0xc200000e
/*
* enum - Predefined Mailbox Messages
*
* @RP_MBOX_SUSPEND_SYSTEM: system suspend request for the remote processor
*
* @RP_MBOX_SUSPEND_ACK: successful response from remote processor for a
* suspend request
*
* @RP_MBOX_RESUME_SYSTEM: system resume request for the remote processor
*
* @RP_MBOX_RESUME_ACK: successful response from remote processor for a
* resume request
*/
enum imx_dsp_rp_mbox_messages {
RP_MBOX_SUSPEND_SYSTEM = 0xFF11,
RP_MBOX_SUSPEND_ACK = 0xFF12,
RP_MBOX_RESUME_SYSTEM = 0xFF13,
RP_MBOX_RESUME_ACK = 0xFF14,
};
/**
* struct imx_dsp_rproc - DSP remote processor state
* @regmap: regmap handler
* @rproc: rproc handler
* @dsp_dcfg: device configuration pointer
* @clks: clocks needed by this device
* @cl: mailbox client to request the mailbox channel
* @cl_rxdb: mailbox client to request the mailbox channel for doorbell
* @tx_ch: mailbox tx channel handle
* @rx_ch: mailbox rx channel handle
* @rxdb_ch: mailbox rx doorbell channel handle
* @pd_dev: power domain device
* @pd_dev_link: power domain device link
* @ipc_handle: System Control Unit ipc handle
* @rproc_work: work for processing virtio interrupts
* @pm_comp: completion primitive to sync for suspend response
* @num_domains: power domain number
* @flags: control flags
*/
struct imx_dsp_rproc {
struct regmap *regmap;
struct rproc *rproc;
const struct imx_dsp_rproc_dcfg *dsp_dcfg;
struct clk_bulk_data clks[DSP_RPROC_CLK_MAX];
struct mbox_client cl;
struct mbox_client cl_rxdb;
struct mbox_chan *tx_ch;
struct mbox_chan *rx_ch;
struct mbox_chan *rxdb_ch;
struct device **pd_dev;
struct device_link **pd_dev_link;
struct imx_sc_ipc *ipc_handle;
struct work_struct rproc_work;
struct completion pm_comp;
int num_domains;
u32 flags;
};
/**
* struct imx_dsp_rproc_dcfg - DSP remote processor configuration
* @dcfg: imx_rproc_dcfg handler
* @reset: reset callback function
*/
struct imx_dsp_rproc_dcfg {
const struct imx_rproc_dcfg *dcfg;
int (*reset)(struct imx_dsp_rproc *priv);
};
static const struct imx_rproc_att imx_dsp_rproc_att_imx8qm[] = {
/* dev addr , sys addr , size , flags */
{ 0x596e8000, 0x556e8000, 0x00008000, ATT_OWN },
{ 0x596f0000, 0x556f0000, 0x00008000, ATT_OWN },
{ 0x596f8000, 0x556f8000, 0x00000800, ATT_OWN | ATT_IRAM},
{ 0x55700000, 0x55700000, 0x00070000, ATT_OWN },
/* DDR (Data) */
{ 0x80000000, 0x80000000, 0x60000000, 0},
};
static const struct imx_rproc_att imx_dsp_rproc_att_imx8qxp[] = {
/* dev addr , sys addr , size , flags */
{ 0x596e8000, 0x596e8000, 0x00008000, ATT_OWN },
{ 0x596f0000, 0x596f0000, 0x00008000, ATT_OWN },
{ 0x596f8000, 0x596f8000, 0x00000800, ATT_OWN | ATT_IRAM},
{ 0x59700000, 0x59700000, 0x00070000, ATT_OWN },
/* DDR (Data) */
{ 0x80000000, 0x80000000, 0x60000000, 0},
};
static const struct imx_rproc_att imx_dsp_rproc_att_imx8mp[] = {
/* dev addr , sys addr , size , flags */
{ 0x3b6e8000, 0x3b6e8000, 0x00008000, ATT_OWN },
{ 0x3b6f0000, 0x3b6f0000, 0x00008000, ATT_OWN },
{ 0x3b6f8000, 0x3b6f8000, 0x00000800, ATT_OWN | ATT_IRAM},
{ 0x3b700000, 0x3b700000, 0x00040000, ATT_OWN },
/* DDR (Data) */
{ 0x40000000, 0x40000000, 0x80000000, 0},
};
static const struct imx_rproc_att imx_dsp_rproc_att_imx8ulp[] = {
/* dev addr , sys addr , size , flags */
{ 0x21170000, 0x21170000, 0x00010000, ATT_OWN | ATT_IRAM},
{ 0x21180000, 0x21180000, 0x00010000, ATT_OWN },
/* DDR (Data) */
{ 0x0c000000, 0x80000000, 0x10000000, 0},
{ 0x30000000, 0x90000000, 0x10000000, 0},
};
/* Initialize the mailboxes between cores, if exists */
static int (*imx_dsp_rproc_mbox_init)(struct imx_dsp_rproc *priv);
/* Reset function for DSP on i.MX8MP */
static int imx8mp_dsp_reset(struct imx_dsp_rproc *priv)
{
void __iomem *dap = ioremap_wc(IMX8M_DAP_DEBUG, IMX8M_DAP_DEBUG_SIZE);
int pwrctl;
/* Put DSP into reset and stall */
pwrctl = readl(dap + IMX8M_DAP_PWRCTL);
pwrctl |= IMX8M_PWRCTL_CORERESET;
writel(pwrctl, dap + IMX8M_DAP_PWRCTL);
/* Keep reset asserted for 10 cycles */
usleep_range(1, 2);
regmap_update_bits(priv->regmap, IMX8M_AudioDSP_REG2,
IMX8M_AudioDSP_REG2_RUNSTALL,
IMX8M_AudioDSP_REG2_RUNSTALL);
/* Take the DSP out of reset and keep stalled for FW loading */
pwrctl = readl(dap + IMX8M_DAP_PWRCTL);
pwrctl &= ~IMX8M_PWRCTL_CORERESET;
writel(pwrctl, dap + IMX8M_DAP_PWRCTL);
iounmap(dap);
return 0;
}
/* Reset function for DSP on i.MX8ULP */
static int imx8ulp_dsp_reset(struct imx_dsp_rproc *priv)
{
struct arm_smccc_res res;
/* Put DSP into reset and stall */
regmap_update_bits(priv->regmap, IMX8ULP_SIM_LPAV_REG_SYSCTRL0,
IMX8ULP_SYSCTRL0_DSP_RST, IMX8ULP_SYSCTRL0_DSP_RST);
regmap_update_bits(priv->regmap, IMX8ULP_SIM_LPAV_REG_SYSCTRL0,
IMX8ULP_SYSCTRL0_DSP_STALL,
IMX8ULP_SYSCTRL0_DSP_STALL);
/* Configure resources of DSP through TFA */
arm_smccc_smc(IMX8ULP_SIP_HIFI_XRDC, 0, 0, 0, 0, 0, 0, 0, &res);
/* Take the DSP out of reset and keep stalled for FW loading */
regmap_update_bits(priv->regmap, IMX8ULP_SIM_LPAV_REG_SYSCTRL0,
IMX8ULP_SYSCTRL0_DSP_RST, 0);
regmap_update_bits(priv->regmap, IMX8ULP_SIM_LPAV_REG_SYSCTRL0,
IMX8ULP_SYSCTRL0_DSP_DBG_RST, 0);
return 0;
}
/* Specific configuration for i.MX8MP */
static const struct imx_rproc_dcfg dsp_rproc_cfg_imx8mp = {
.src_reg = IMX8M_AudioDSP_REG2,
.src_mask = IMX8M_AudioDSP_REG2_RUNSTALL,
.src_start = 0,
.src_stop = IMX8M_AudioDSP_REG2_RUNSTALL,
.att = imx_dsp_rproc_att_imx8mp,
.att_size = ARRAY_SIZE(imx_dsp_rproc_att_imx8mp),
.method = IMX_RPROC_MMIO,
};
static const struct imx_dsp_rproc_dcfg imx_dsp_rproc_cfg_imx8mp = {
.dcfg = &dsp_rproc_cfg_imx8mp,
.reset = imx8mp_dsp_reset,
};
/* Specific configuration for i.MX8ULP */
static const struct imx_rproc_dcfg dsp_rproc_cfg_imx8ulp = {
.src_reg = IMX8ULP_SIM_LPAV_REG_SYSCTRL0,
.src_mask = IMX8ULP_SYSCTRL0_DSP_STALL,
.src_start = 0,
.src_stop = IMX8ULP_SYSCTRL0_DSP_STALL,
.att = imx_dsp_rproc_att_imx8ulp,
.att_size = ARRAY_SIZE(imx_dsp_rproc_att_imx8ulp),
.method = IMX_RPROC_MMIO,
};
static const struct imx_dsp_rproc_dcfg imx_dsp_rproc_cfg_imx8ulp = {
.dcfg = &dsp_rproc_cfg_imx8ulp,
.reset = imx8ulp_dsp_reset,
};
/* Specific configuration for i.MX8QXP */
static const struct imx_rproc_dcfg dsp_rproc_cfg_imx8qxp = {
.att = imx_dsp_rproc_att_imx8qxp,
.att_size = ARRAY_SIZE(imx_dsp_rproc_att_imx8qxp),
.method = IMX_RPROC_SCU_API,
};
static const struct imx_dsp_rproc_dcfg imx_dsp_rproc_cfg_imx8qxp = {
.dcfg = &dsp_rproc_cfg_imx8qxp,
};
/* Specific configuration for i.MX8QM */
static const struct imx_rproc_dcfg dsp_rproc_cfg_imx8qm = {
.att = imx_dsp_rproc_att_imx8qm,
.att_size = ARRAY_SIZE(imx_dsp_rproc_att_imx8qm),
.method = IMX_RPROC_SCU_API,
};
static const struct imx_dsp_rproc_dcfg imx_dsp_rproc_cfg_imx8qm = {
.dcfg = &dsp_rproc_cfg_imx8qm,
};
static int imx_dsp_rproc_ready(struct rproc *rproc)
{
struct imx_dsp_rproc *priv = rproc->priv;
int i;
if (!priv->rxdb_ch)
return 0;
for (i = 0; i < REMOTE_READY_WAIT_MAX_RETRIES; i++) {
if (priv->flags & REMOTE_IS_READY)
return 0;
usleep_range(100, 200);
}
return -ETIMEDOUT;
}
/*
* Start function for rproc_ops
*
* There is a handshake for start procedure: when DSP starts, it
* will send a doorbell message to this driver, then the
* REMOTE_IS_READY flags is set, then driver will kick
* a message to DSP.
*/
static int imx_dsp_rproc_start(struct rproc *rproc)
{
struct imx_dsp_rproc *priv = rproc->priv;
const struct imx_dsp_rproc_dcfg *dsp_dcfg = priv->dsp_dcfg;
const struct imx_rproc_dcfg *dcfg = dsp_dcfg->dcfg;
struct device *dev = rproc->dev.parent;
int ret;
switch (dcfg->method) {
case IMX_RPROC_MMIO:
ret = regmap_update_bits(priv->regmap,
dcfg->src_reg,
dcfg->src_mask,
dcfg->src_start);
break;
case IMX_RPROC_SCU_API:
ret = imx_sc_pm_cpu_start(priv->ipc_handle,
IMX_SC_R_DSP,
true,
rproc->bootaddr);
break;
default:
return -EOPNOTSUPP;
}
if (ret)
dev_err(dev, "Failed to enable remote core!\n");
else
ret = imx_dsp_rproc_ready(rproc);
return ret;
}
/*
* Stop function for rproc_ops
* It clears the REMOTE_IS_READY flags
*/
static int imx_dsp_rproc_stop(struct rproc *rproc)
{
struct imx_dsp_rproc *priv = rproc->priv;
const struct imx_dsp_rproc_dcfg *dsp_dcfg = priv->dsp_dcfg;
const struct imx_rproc_dcfg *dcfg = dsp_dcfg->dcfg;
struct device *dev = rproc->dev.parent;
int ret = 0;
if (rproc->state == RPROC_CRASHED) {
priv->flags &= ~REMOTE_IS_READY;
return 0;
}
switch (dcfg->method) {
case IMX_RPROC_MMIO:
ret = regmap_update_bits(priv->regmap, dcfg->src_reg, dcfg->src_mask,
dcfg->src_stop);
break;
case IMX_RPROC_SCU_API:
ret = imx_sc_pm_cpu_start(priv->ipc_handle,
IMX_SC_R_DSP,
false,
rproc->bootaddr);
break;
default:
return -EOPNOTSUPP;
}
if (ret)
dev_err(dev, "Failed to stop remote core\n");
else
priv->flags &= ~REMOTE_IS_READY;
return ret;
}
/**
* imx_dsp_rproc_sys_to_da() - internal memory translation helper
* @priv: private data pointer
* @sys: system address (DDR address)
* @len: length of the memory buffer
* @da: device address to translate
*
* Convert system address (DDR address) to device address (DSP)
* for there may be memory remap for device.
*/
static int imx_dsp_rproc_sys_to_da(struct imx_dsp_rproc *priv, u64 sys,
size_t len, u64 *da)
{
const struct imx_dsp_rproc_dcfg *dsp_dcfg = priv->dsp_dcfg;
const struct imx_rproc_dcfg *dcfg = dsp_dcfg->dcfg;
int i;
/* Parse address translation table */
for (i = 0; i < dcfg->att_size; i++) {
const struct imx_rproc_att *att = &dcfg->att[i];
if (sys >= att->sa && sys + len <= att->sa + att->size) {
unsigned int offset = sys - att->sa;
*da = att->da + offset;
return 0;
}
}
return -ENOENT;
}
/* Main virtqueue message work function
*
* This function is executed upon scheduling of the i.MX DSP remoteproc
* driver's workqueue. The workqueue is scheduled by the mailbox rx
* handler.
*
* This work function processes both the Tx and Rx virtqueue indices on
* every invocation. The rproc_vq_interrupt function can detect if there
* are new unprocessed messages or not (returns IRQ_NONE vs IRQ_HANDLED),
* but there is no need to check for these return values. The index 0
* triggering will process all pending Rx buffers, and the index 1 triggering
* will process all newly available Tx buffers and will wakeup any potentially
* blocked senders.
*
* NOTE:
* The current logic is based on an inherent design assumption of supporting
* only 2 vrings, but this can be changed if needed.
*/
static void imx_dsp_rproc_vq_work(struct work_struct *work)
{
struct imx_dsp_rproc *priv = container_of(work, struct imx_dsp_rproc,
rproc_work);
struct rproc *rproc = priv->rproc;
mutex_lock(&rproc->lock);
if (rproc->state != RPROC_RUNNING)
goto unlock_mutex;
rproc_vq_interrupt(priv->rproc, 0);
rproc_vq_interrupt(priv->rproc, 1);
unlock_mutex:
mutex_unlock(&rproc->lock);
}
/**
* imx_dsp_rproc_rx_tx_callback() - inbound mailbox message handler
* @cl: mailbox client pointer used for requesting the mailbox channel
* @data: mailbox payload
*
* This handler is invoked by mailbox driver whenever a mailbox
* message is received. Usually, the SUSPEND and RESUME related messages
* are handled in this function, other messages are handled by remoteproc core
*/
static void imx_dsp_rproc_rx_tx_callback(struct mbox_client *cl, void *data)
{
struct rproc *rproc = dev_get_drvdata(cl->dev);
struct imx_dsp_rproc *priv = rproc->priv;
struct device *dev = rproc->dev.parent;
u32 message = (u32)(*(u32 *)data);
dev_dbg(dev, "mbox msg: 0x%x\n", message);
switch (message) {
case RP_MBOX_SUSPEND_ACK:
complete(&priv->pm_comp);
break;
case RP_MBOX_RESUME_ACK:
complete(&priv->pm_comp);
break;
default:
schedule_work(&priv->rproc_work);
break;
}
}
/**
* imx_dsp_rproc_rxdb_callback() - inbound mailbox message handler
* @cl: mailbox client pointer used for requesting the mailbox channel
* @data: mailbox payload
*
* For doorbell, there is no message specified, just set REMOTE_IS_READY
* flag.
*/
static void imx_dsp_rproc_rxdb_callback(struct mbox_client *cl, void *data)
{
struct rproc *rproc = dev_get_drvdata(cl->dev);
struct imx_dsp_rproc *priv = rproc->priv;
/* Remote is ready after firmware is loaded and running */
priv->flags |= REMOTE_IS_READY;
}
/**
* imx_dsp_rproc_mbox_alloc() - request mailbox channels
* @priv: private data pointer
*
* Request three mailbox channels (tx, rx, rxdb).
*/
static int imx_dsp_rproc_mbox_alloc(struct imx_dsp_rproc *priv)
{
struct device *dev = priv->rproc->dev.parent;
struct mbox_client *cl;
int ret;
if (!of_get_property(dev->of_node, "mbox-names", NULL))
return 0;
cl = &priv->cl;
cl->dev = dev;
cl->tx_block = true;
cl->tx_tout = 100;
cl->knows_txdone = false;
cl->rx_callback = imx_dsp_rproc_rx_tx_callback;
/* Channel for sending message */
priv->tx_ch = mbox_request_channel_byname(cl, "tx");
if (IS_ERR(priv->tx_ch)) {
ret = PTR_ERR(priv->tx_ch);
dev_dbg(cl->dev, "failed to request tx mailbox channel: %d\n",
ret);
return ret;
}
/* Channel for receiving message */
priv->rx_ch = mbox_request_channel_byname(cl, "rx");
if (IS_ERR(priv->rx_ch)) {
ret = PTR_ERR(priv->rx_ch);
dev_dbg(cl->dev, "failed to request rx mailbox channel: %d\n",
ret);
goto free_channel_tx;
}
cl = &priv->cl_rxdb;
cl->dev = dev;
cl->rx_callback = imx_dsp_rproc_rxdb_callback;
/*
* RX door bell is used to receive the ready signal from remote
* after firmware loaded.
*/
priv->rxdb_ch = mbox_request_channel_byname(cl, "rxdb");
if (IS_ERR(priv->rxdb_ch)) {
ret = PTR_ERR(priv->rxdb_ch);
dev_dbg(cl->dev, "failed to request mbox chan rxdb, ret %d\n",
ret);
goto free_channel_rx;
}
return 0;
free_channel_rx:
mbox_free_channel(priv->rx_ch);
free_channel_tx:
mbox_free_channel(priv->tx_ch);
return ret;
}
/*
* imx_dsp_rproc_mbox_no_alloc()
*
* Empty function for no mailbox between cores
*
* Always return 0
*/
static int imx_dsp_rproc_mbox_no_alloc(struct imx_dsp_rproc *priv)
{
return 0;
}
static void imx_dsp_rproc_free_mbox(struct imx_dsp_rproc *priv)
{
mbox_free_channel(priv->tx_ch);
mbox_free_channel(priv->rx_ch);
mbox_free_channel(priv->rxdb_ch);
}
/**
* imx_dsp_rproc_add_carveout() - request mailbox channels
* @priv: private data pointer
*
* This function registers specified memory entry in @rproc carveouts list
* The carveouts can help to mapping the memory address for DSP.
*/
static int imx_dsp_rproc_add_carveout(struct imx_dsp_rproc *priv)
{
const struct imx_dsp_rproc_dcfg *dsp_dcfg = priv->dsp_dcfg;
const struct imx_rproc_dcfg *dcfg = dsp_dcfg->dcfg;
struct rproc *rproc = priv->rproc;
struct device *dev = rproc->dev.parent;
struct device_node *np = dev->of_node;
struct of_phandle_iterator it;
struct rproc_mem_entry *mem;
struct reserved_mem *rmem;
void __iomem *cpu_addr;
int a;
u64 da;
/* Remap required addresses */
for (a = 0; a < dcfg->att_size; a++) {
const struct imx_rproc_att *att = &dcfg->att[a];
if (!(att->flags & ATT_OWN))
continue;
if (imx_dsp_rproc_sys_to_da(priv, att->sa, att->size, &da))
return -EINVAL;
cpu_addr = devm_ioremap_wc(dev, att->sa, att->size);
if (!cpu_addr) {
dev_err(dev, "failed to map memory %p\n", &att->sa);
return -ENOMEM;
}
/* Register memory region */
mem = rproc_mem_entry_init(dev, (void __force *)cpu_addr, (dma_addr_t)att->sa,
att->size, da, NULL, NULL, "dsp_mem");
if (mem)
rproc_coredump_add_segment(rproc, da, att->size);
else
return -ENOMEM;
rproc_add_carveout(rproc, mem);
}
of_phandle_iterator_init(&it, np, "memory-region", NULL, 0);
while (of_phandle_iterator_next(&it) == 0) {
/*
* Ignore the first memory region which will be used vdev buffer.
* No need to do extra handlings, rproc_add_virtio_dev will handle it.
*/
if (!strcmp(it.node->name, "vdev0buffer"))
continue;
rmem = of_reserved_mem_lookup(it.node);
if (!rmem) {
of_node_put(it.node);
dev_err(dev, "unable to acquire memory-region\n");
return -EINVAL;
}
if (imx_dsp_rproc_sys_to_da(priv, rmem->base, rmem->size, &da)) {
of_node_put(it.node);
return -EINVAL;
}
cpu_addr = devm_ioremap_wc(dev, rmem->base, rmem->size);
if (!cpu_addr) {
of_node_put(it.node);
dev_err(dev, "failed to map memory %p\n", &rmem->base);
return -ENOMEM;
}
/* Register memory region */
mem = rproc_mem_entry_init(dev, (void __force *)cpu_addr, (dma_addr_t)rmem->base,
rmem->size, da, NULL, NULL, it.node->name);
if (mem) {
rproc_coredump_add_segment(rproc, da, rmem->size);
} else {
of_node_put(it.node);
return -ENOMEM;
}
rproc_add_carveout(rproc, mem);
}
return 0;
}
/* Prepare function for rproc_ops */
static int imx_dsp_rproc_prepare(struct rproc *rproc)
{
struct imx_dsp_rproc *priv = rproc->priv;
struct device *dev = rproc->dev.parent;
struct rproc_mem_entry *carveout;
int ret;
ret = imx_dsp_rproc_add_carveout(priv);
if (ret) {
dev_err(dev, "failed on imx_dsp_rproc_add_carveout\n");
return ret;
}
pm_runtime_get_sync(dev);
/*
* Clear buffers after pm rumtime for internal ocram is not
* accessible if power and clock are not enabled.
*/
list_for_each_entry(carveout, &rproc->carveouts, node) {
if (carveout->va)
memset(carveout->va, 0, carveout->len);
}
return 0;
}
/* Unprepare function for rproc_ops */
static int imx_dsp_rproc_unprepare(struct rproc *rproc)
{
pm_runtime_put_sync(rproc->dev.parent);
return 0;
}
/* Kick function for rproc_ops */
static void imx_dsp_rproc_kick(struct rproc *rproc, int vqid)
{
struct imx_dsp_rproc *priv = rproc->priv;
struct device *dev = rproc->dev.parent;
int err;
__u32 mmsg;
if (!priv->tx_ch) {
dev_err(dev, "No initialized mbox tx channel\n");
return;
}
/*
* Send the index of the triggered virtqueue as the mu payload.
* Let remote processor know which virtqueue is used.
*/
mmsg = vqid;
err = mbox_send_message(priv->tx_ch, (void *)&mmsg);
if (err < 0)
dev_err(dev, "%s: failed (%d, err:%d)\n", __func__, vqid, err);
}
/*
* Custom memory copy implementation for i.MX DSP Cores
*
* The IRAM is part of the HiFi DSP.
* According to hw specs only 32-bits writes are allowed.
*/
static int imx_dsp_rproc_memcpy(void *dst, const void *src, size_t size)
{
void __iomem *dest = (void __iomem *)dst;
const u8 *src_byte = src;
const u32 *source = src;
u32 affected_mask;
int i, q, r;
u32 tmp;
/* destination must be 32bit aligned */
if (!IS_ALIGNED((uintptr_t)dest, 4))
return -EINVAL;
q = size / 4;
r = size % 4;
/* copy data in units of 32 bits at a time */
for (i = 0; i < q; i++)
writel(source[i], dest + i * 4);
if (r) {
affected_mask = GENMASK(8 * r, 0);
/*
* first read the 32bit data of dest, then change affected
* bytes, and write back to dest.
* For unaffected bytes, it should not be changed
*/
tmp = readl(dest + q * 4);
tmp &= ~affected_mask;
/* avoid reading after end of source */
for (i = 0; i < r; i++)
tmp |= (src_byte[q * 4 + i] << (8 * i));
writel(tmp, dest + q * 4);
}
return 0;
}
/*
* Custom memset implementation for i.MX DSP Cores
*
* The IRAM is part of the HiFi DSP.
* According to hw specs only 32-bits writes are allowed.
*/
static int imx_dsp_rproc_memset(void *addr, u8 value, size_t size)
{
void __iomem *tmp_dst = (void __iomem *)addr;
u32 tmp_val = value;
u32 affected_mask;
int q, r;
u32 tmp;
/* destination must be 32bit aligned */
if (!IS_ALIGNED((uintptr_t)addr, 4))
return -EINVAL;
tmp_val |= tmp_val << 8;
tmp_val |= tmp_val << 16;
q = size / 4;
r = size % 4;
while (q--)
writel(tmp_val, tmp_dst++);
if (r) {
affected_mask = GENMASK(8 * r, 0);
/*
* first read the 32bit data of addr, then change affected
* bytes, and write back to addr.
* For unaffected bytes, it should not be changed
*/
tmp = readl(tmp_dst);
tmp &= ~affected_mask;
tmp |= (tmp_val & affected_mask);
writel(tmp, tmp_dst);
}
return 0;
}
/*
* imx_dsp_rproc_elf_load_segments() - load firmware segments to memory
* @rproc: remote processor which will be booted using these fw segments
* @fw: the ELF firmware image
*
* This function loads the firmware segments to memory, where the remote
* processor expects them.
*
* Return: 0 on success and an appropriate error code otherwise
*/
static int imx_dsp_rproc_elf_load_segments(struct rproc *rproc, const struct firmware *fw)
{
struct device *dev = &rproc->dev;
const void *ehdr, *phdr;
int i, ret = 0;
u16 phnum;
const u8 *elf_data = fw->data;
u8 class = fw_elf_get_class(fw);
u32 elf_phdr_get_size = elf_size_of_phdr(class);
ehdr = elf_data;
phnum = elf_hdr_get_e_phnum(class, ehdr);
phdr = elf_data + elf_hdr_get_e_phoff(class, ehdr);
/* go through the available ELF segments */
for (i = 0; i < phnum; i++, phdr += elf_phdr_get_size) {
u64 da = elf_phdr_get_p_paddr(class, phdr);
u64 memsz = elf_phdr_get_p_memsz(class, phdr);
u64 filesz = elf_phdr_get_p_filesz(class, phdr);
u64 offset = elf_phdr_get_p_offset(class, phdr);
u32 type = elf_phdr_get_p_type(class, phdr);
void *ptr;
if (type != PT_LOAD || !memsz)
continue;
dev_dbg(dev, "phdr: type %d da 0x%llx memsz 0x%llx filesz 0x%llx\n",
type, da, memsz, filesz);
if (filesz > memsz) {
dev_err(dev, "bad phdr filesz 0x%llx memsz 0x%llx\n",
filesz, memsz);
ret = -EINVAL;
break;
}
if (offset + filesz > fw->size) {
dev_err(dev, "truncated fw: need 0x%llx avail 0x%zx\n",
offset + filesz, fw->size);
ret = -EINVAL;
break;
}
if (!rproc_u64_fit_in_size_t(memsz)) {
dev_err(dev, "size (%llx) does not fit in size_t type\n",
memsz);
ret = -EOVERFLOW;
break;
}
/* grab the kernel address for this device address */
ptr = rproc_da_to_va(rproc, da, memsz, NULL);
if (!ptr) {
dev_err(dev, "bad phdr da 0x%llx mem 0x%llx\n", da,
memsz);
ret = -EINVAL;
break;
}
/* put the segment where the remote processor expects it */
if (filesz) {
ret = imx_dsp_rproc_memcpy(ptr, elf_data + offset, filesz);
if (ret) {
dev_err(dev, "memory copy failed for da 0x%llx memsz 0x%llx\n",
da, memsz);
break;
}
}
/* zero out remaining memory for this segment */
if (memsz > filesz) {
ret = imx_dsp_rproc_memset(ptr + filesz, 0, memsz - filesz);
if (ret) {
dev_err(dev, "memset failed for da 0x%llx memsz 0x%llx\n",
da, memsz);
break;
}
}
}
return ret;
}
static int imx_dsp_rproc_parse_fw(struct rproc *rproc, const struct firmware *fw)
{
if (rproc_elf_load_rsc_table(rproc, fw))
dev_warn(&rproc->dev, "no resource table found for this firmware\n");
return 0;
}
static const struct rproc_ops imx_dsp_rproc_ops = {
.prepare = imx_dsp_rproc_prepare,
.unprepare = imx_dsp_rproc_unprepare,
.start = imx_dsp_rproc_start,
.stop = imx_dsp_rproc_stop,
.kick = imx_dsp_rproc_kick,
.load = imx_dsp_rproc_elf_load_segments,
.parse_fw = imx_dsp_rproc_parse_fw,
.sanity_check = rproc_elf_sanity_check,
.get_boot_addr = rproc_elf_get_boot_addr,
};
/**
* imx_dsp_attach_pm_domains() - attach the power domains
* @priv: private data pointer
*
* On i.MX8QM and i.MX8QXP there is multiple power domains
* required, so need to link them.
*/
static int imx_dsp_attach_pm_domains(struct imx_dsp_rproc *priv)
{
struct device *dev = priv->rproc->dev.parent;
int ret, i;
priv->num_domains = of_count_phandle_with_args(dev->of_node,
"power-domains",
"#power-domain-cells");
/* If only one domain, then no need to link the device */
if (priv->num_domains <= 1)
return 0;
priv->pd_dev = devm_kmalloc_array(dev, priv->num_domains,
sizeof(*priv->pd_dev),
GFP_KERNEL);
if (!priv->pd_dev)
return -ENOMEM;
priv->pd_dev_link = devm_kmalloc_array(dev, priv->num_domains,
sizeof(*priv->pd_dev_link),
GFP_KERNEL);
if (!priv->pd_dev_link)
return -ENOMEM;
for (i = 0; i < priv->num_domains; i++) {
priv->pd_dev[i] = dev_pm_domain_attach_by_id(dev, i);
if (IS_ERR(priv->pd_dev[i])) {
ret = PTR_ERR(priv->pd_dev[i]);
goto detach_pm;
}
/*
* device_link_add will check priv->pd_dev[i], if it is
* NULL, then will break.
*/
priv->pd_dev_link[i] = device_link_add(dev,
priv->pd_dev[i],
DL_FLAG_STATELESS |
DL_FLAG_PM_RUNTIME);
if (!priv->pd_dev_link[i]) {
dev_pm_domain_detach(priv->pd_dev[i], false);
ret = -EINVAL;
goto detach_pm;
}
}
return 0;
detach_pm:
while (--i >= 0) {
device_link_del(priv->pd_dev_link[i]);
dev_pm_domain_detach(priv->pd_dev[i], false);
}
return ret;
}
static int imx_dsp_detach_pm_domains(struct imx_dsp_rproc *priv)
{
int i;
if (priv->num_domains <= 1)
return 0;
for (i = 0; i < priv->num_domains; i++) {
device_link_del(priv->pd_dev_link[i]);
dev_pm_domain_detach(priv->pd_dev[i], false);
}
return 0;
}
/**
* imx_dsp_rproc_detect_mode() - detect DSP control mode
* @priv: private data pointer
*
* Different platform has different control method for DSP, which depends
* on how the DSP is integrated in platform.
*
* For i.MX8QXP and i.MX8QM, DSP should be started and stopped by System
* Control Unit.
* For i.MX8MP and i.MX8ULP, DSP should be started and stopped by system
* integration module.
*/
static int imx_dsp_rproc_detect_mode(struct imx_dsp_rproc *priv)
{
const struct imx_dsp_rproc_dcfg *dsp_dcfg = priv->dsp_dcfg;
struct device *dev = priv->rproc->dev.parent;
struct regmap *regmap;
int ret = 0;
switch (dsp_dcfg->dcfg->method) {
case IMX_RPROC_SCU_API:
ret = imx_scu_get_handle(&priv->ipc_handle);
if (ret)
return ret;
break;
case IMX_RPROC_MMIO:
regmap = syscon_regmap_lookup_by_phandle(dev->of_node, "fsl,dsp-ctrl");
if (IS_ERR(regmap)) {
dev_err(dev, "failed to find syscon\n");
return PTR_ERR(regmap);
}
priv->regmap = regmap;
break;
default:
ret = -EOPNOTSUPP;
break;
}
return ret;
}
static const char *imx_dsp_clks_names[DSP_RPROC_CLK_MAX] = {
/* DSP clocks */
"core", "ocram", "debug", "ipg", "mu",
};
static int imx_dsp_rproc_clk_get(struct imx_dsp_rproc *priv)
{
struct device *dev = priv->rproc->dev.parent;
struct clk_bulk_data *clks = priv->clks;
int i;
for (i = 0; i < DSP_RPROC_CLK_MAX; i++)
clks[i].id = imx_dsp_clks_names[i];
return devm_clk_bulk_get_optional(dev, DSP_RPROC_CLK_MAX, clks);
}
static int imx_dsp_rproc_probe(struct platform_device *pdev)
{
const struct imx_dsp_rproc_dcfg *dsp_dcfg;
struct device *dev = &pdev->dev;
struct imx_dsp_rproc *priv;
struct rproc *rproc;
const char *fw_name;
int ret;
dsp_dcfg = of_device_get_match_data(dev);
if (!dsp_dcfg)
return -ENODEV;
ret = rproc_of_parse_firmware(dev, 0, &fw_name);
if (ret) {
dev_err(dev, "failed to parse firmware-name property, ret = %d\n",
ret);
return ret;
}
rproc = rproc_alloc(dev, "imx-dsp-rproc", &imx_dsp_rproc_ops, fw_name,
sizeof(*priv));
if (!rproc)
return -ENOMEM;
priv = rproc->priv;
priv->rproc = rproc;
priv->dsp_dcfg = dsp_dcfg;
if (no_mailboxes)
imx_dsp_rproc_mbox_init = imx_dsp_rproc_mbox_no_alloc;
else
imx_dsp_rproc_mbox_init = imx_dsp_rproc_mbox_alloc;
dev_set_drvdata(dev, rproc);
INIT_WORK(&priv->rproc_work, imx_dsp_rproc_vq_work);
ret = imx_dsp_rproc_detect_mode(priv);
if (ret) {
dev_err(dev, "failed on imx_dsp_rproc_detect_mode\n");
goto err_put_rproc;
}
/* There are multiple power domains required by DSP on some platform */
ret = imx_dsp_attach_pm_domains(priv);
if (ret) {
dev_err(dev, "failed on imx_dsp_attach_pm_domains\n");
goto err_put_rproc;
}
/* Get clocks */
ret = imx_dsp_rproc_clk_get(priv);
if (ret) {
dev_err(dev, "failed on imx_dsp_rproc_clk_get\n");
goto err_detach_domains;
}
init_completion(&priv->pm_comp);
rproc->auto_boot = false;
ret = rproc_add(rproc);
if (ret) {
dev_err(dev, "rproc_add failed\n");
goto err_detach_domains;
}
pm_runtime_enable(dev);
return 0;
err_detach_domains:
imx_dsp_detach_pm_domains(priv);
err_put_rproc:
rproc_free(rproc);
return ret;
}
static void imx_dsp_rproc_remove(struct platform_device *pdev)
{
struct rproc *rproc = platform_get_drvdata(pdev);
struct imx_dsp_rproc *priv = rproc->priv;
pm_runtime_disable(&pdev->dev);
rproc_del(rproc);
imx_dsp_detach_pm_domains(priv);
rproc_free(rproc);
}
/* pm runtime functions */
static int imx_dsp_runtime_resume(struct device *dev)
{
struct rproc *rproc = dev_get_drvdata(dev);
struct imx_dsp_rproc *priv = rproc->priv;
const struct imx_dsp_rproc_dcfg *dsp_dcfg = priv->dsp_dcfg;
int ret;
/*
* There is power domain attached with mailbox, if setup mailbox
* in probe(), then the power of mailbox is always enabled,
* the power can't be saved.
* So move setup of mailbox to runtime resume.
*/
ret = imx_dsp_rproc_mbox_init(priv);
if (ret) {
dev_err(dev, "failed on imx_dsp_rproc_mbox_init\n");
return ret;
}
ret = clk_bulk_prepare_enable(DSP_RPROC_CLK_MAX, priv->clks);
if (ret) {
dev_err(dev, "failed on clk_bulk_prepare_enable\n");
return ret;
}
/* Reset DSP if needed */
if (dsp_dcfg->reset)
dsp_dcfg->reset(priv);
return 0;
}
static int imx_dsp_runtime_suspend(struct device *dev)
{
struct rproc *rproc = dev_get_drvdata(dev);
struct imx_dsp_rproc *priv = rproc->priv;
clk_bulk_disable_unprepare(DSP_RPROC_CLK_MAX, priv->clks);
imx_dsp_rproc_free_mbox(priv);
return 0;
}
static void imx_dsp_load_firmware(const struct firmware *fw, void *context)
{
struct rproc *rproc = context;
int ret;
/*
* Same flow as start procedure.
* Load the ELF segments to memory firstly.
*/
ret = rproc_load_segments(rproc, fw);
if (ret)
goto out;
/* Start the remote processor */
ret = rproc->ops->start(rproc);
if (ret)
goto out;
rproc->ops->kick(rproc, 0);
out:
release_firmware(fw);
}
static int imx_dsp_suspend(struct device *dev)
{
struct rproc *rproc = dev_get_drvdata(dev);
struct imx_dsp_rproc *priv = rproc->priv;
__u32 mmsg = RP_MBOX_SUSPEND_SYSTEM;
int ret;
if (rproc->state != RPROC_RUNNING)
goto out;
reinit_completion(&priv->pm_comp);
/* Tell DSP that suspend is happening */
ret = mbox_send_message(priv->tx_ch, (void *)&mmsg);
if (ret < 0) {
dev_err(dev, "PM mbox_send_message failed: %d\n", ret);
return ret;
}
/*
* DSP need to save the context at suspend.
* Here waiting the response for DSP, then power can be disabled.
*/
if (!wait_for_completion_timeout(&priv->pm_comp, msecs_to_jiffies(100)))
return -EBUSY;
out:
/*
* The power of DSP is disabled in suspend, so force pm runtime
* to be suspend, then we can reenable the power and clocks at
* resume stage.
*/
return pm_runtime_force_suspend(dev);
}
static int imx_dsp_resume(struct device *dev)
{
struct rproc *rproc = dev_get_drvdata(dev);
int ret = 0;
ret = pm_runtime_force_resume(dev);
if (ret)
return ret;
if (rproc->state != RPROC_RUNNING)
return 0;
/*
* The power of DSP is disabled at suspend, the memory of dsp
* is reset, the image segments are lost. So need to reload
* firmware and restart the DSP if it is in running state.
*/
ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_UEVENT,
rproc->firmware, dev, GFP_KERNEL,
rproc, imx_dsp_load_firmware);
if (ret < 0) {
dev_err(dev, "load firmware failed: %d\n", ret);
goto err;
}
return 0;
err:
pm_runtime_force_suspend(dev);
return ret;
}
static const struct dev_pm_ops imx_dsp_rproc_pm_ops = {
SYSTEM_SLEEP_PM_OPS(imx_dsp_suspend, imx_dsp_resume)
RUNTIME_PM_OPS(imx_dsp_runtime_suspend, imx_dsp_runtime_resume, NULL)
};
static const struct of_device_id imx_dsp_rproc_of_match[] = {
{ .compatible = "fsl,imx8qxp-hifi4", .data = &imx_dsp_rproc_cfg_imx8qxp },
{ .compatible = "fsl,imx8qm-hifi4", .data = &imx_dsp_rproc_cfg_imx8qm },
{ .compatible = "fsl,imx8mp-hifi4", .data = &imx_dsp_rproc_cfg_imx8mp },
{ .compatible = "fsl,imx8ulp-hifi4", .data = &imx_dsp_rproc_cfg_imx8ulp },
{},
};
MODULE_DEVICE_TABLE(of, imx_dsp_rproc_of_match);
static struct platform_driver imx_dsp_rproc_driver = {
.probe = imx_dsp_rproc_probe,
.remove_new = imx_dsp_rproc_remove,
.driver = {
.name = "imx-dsp-rproc",
.of_match_table = imx_dsp_rproc_of_match,
.pm = pm_ptr(&imx_dsp_rproc_pm_ops),
},
};
module_platform_driver(imx_dsp_rproc_driver);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("i.MX HiFi Core Remote Processor Control Driver");
MODULE_AUTHOR("Shengjiu Wang <[email protected]>");
|
linux-master
|
drivers/remoteproc/imx_dsp_rproc.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Remote Processor Framework
*
* Copyright (C) 2011 Texas Instruments, Inc.
* Copyright (C) 2011 Google, Inc.
*
* Ohad Ben-Cohen <[email protected]>
* Mark Grosen <[email protected]>
* Brian Swetland <[email protected]>
* Fernando Guzman Lugo <[email protected]>
* Suman Anna <[email protected]>
* Robert Tivy <[email protected]>
* Armando Uribe De Leon <[email protected]>
*/
#define pr_fmt(fmt) "%s: " fmt, __func__
#include <linux/kernel.h>
#include <linux/debugfs.h>
#include <linux/remoteproc.h>
#include <linux/device.h>
#include <linux/uaccess.h>
#include "remoteproc_internal.h"
/* remoteproc debugfs parent dir */
static struct dentry *rproc_dbg;
/*
* A coredump-configuration-to-string lookup table, for exposing a
* human readable configuration via debugfs. Always keep in sync with
* enum rproc_coredump_mechanism
*/
static const char * const rproc_coredump_str[] = {
[RPROC_COREDUMP_DISABLED] = "disabled",
[RPROC_COREDUMP_ENABLED] = "enabled",
[RPROC_COREDUMP_INLINE] = "inline",
};
/* Expose the current coredump configuration via debugfs */
static ssize_t rproc_coredump_read(struct file *filp, char __user *userbuf,
size_t count, loff_t *ppos)
{
struct rproc *rproc = filp->private_data;
char buf[20];
int len;
len = scnprintf(buf, sizeof(buf), "%s\n",
rproc_coredump_str[rproc->dump_conf]);
return simple_read_from_buffer(userbuf, count, ppos, buf, len);
}
/*
* By writing to the 'coredump' debugfs entry, we control the behavior of the
* coredump mechanism dynamically. The default value of this entry is "disabled".
*
* The 'coredump' debugfs entry supports these commands:
*
* disabled: By default coredump collection is disabled. Recovery will
* proceed without collecting any dump.
*
* enabled: When the remoteproc crashes the entire coredump will be copied
* to a separate buffer and exposed to userspace.
*
* inline: The coredump will not be copied to a separate buffer and the
* recovery process will have to wait until data is read by
* userspace. But this avoid usage of extra memory.
*/
static ssize_t rproc_coredump_write(struct file *filp,
const char __user *user_buf, size_t count,
loff_t *ppos)
{
struct rproc *rproc = filp->private_data;
int ret, err = 0;
char buf[20];
if (count < 1 || count > sizeof(buf))
return -EINVAL;
ret = copy_from_user(buf, user_buf, count);
if (ret)
return -EFAULT;
/* remove end of line */
if (buf[count - 1] == '\n')
buf[count - 1] = '\0';
if (rproc->state == RPROC_CRASHED) {
dev_err(&rproc->dev, "can't change coredump configuration\n");
err = -EBUSY;
goto out;
}
if (!strncmp(buf, "disabled", count)) {
rproc->dump_conf = RPROC_COREDUMP_DISABLED;
} else if (!strncmp(buf, "enabled", count)) {
rproc->dump_conf = RPROC_COREDUMP_ENABLED;
} else if (!strncmp(buf, "inline", count)) {
rproc->dump_conf = RPROC_COREDUMP_INLINE;
} else {
dev_err(&rproc->dev, "Invalid coredump configuration\n");
err = -EINVAL;
}
out:
return err ? err : count;
}
static const struct file_operations rproc_coredump_fops = {
.read = rproc_coredump_read,
.write = rproc_coredump_write,
.open = simple_open,
.llseek = generic_file_llseek,
};
/*
* Some remote processors may support dumping trace logs into a shared
* memory buffer. We expose this trace buffer using debugfs, so users
* can easily tell what's going on remotely.
*
* We will most probably improve the rproc tracing facilities later on,
* but this kind of lightweight and simple mechanism is always good to have,
* as it provides very early tracing with little to no dependencies at all.
*/
static ssize_t rproc_trace_read(struct file *filp, char __user *userbuf,
size_t count, loff_t *ppos)
{
struct rproc_debug_trace *data = filp->private_data;
struct rproc_mem_entry *trace = &data->trace_mem;
void *va;
char buf[100];
int len;
va = rproc_da_to_va(data->rproc, trace->da, trace->len, NULL);
if (!va) {
len = scnprintf(buf, sizeof(buf), "Trace %s not available\n",
trace->name);
va = buf;
} else {
len = strnlen(va, trace->len);
}
return simple_read_from_buffer(userbuf, count, ppos, va, len);
}
static const struct file_operations trace_rproc_ops = {
.read = rproc_trace_read,
.open = simple_open,
.llseek = generic_file_llseek,
};
/* expose the name of the remote processor via debugfs */
static ssize_t rproc_name_read(struct file *filp, char __user *userbuf,
size_t count, loff_t *ppos)
{
struct rproc *rproc = filp->private_data;
/* need room for the name, a newline and a terminating null */
char buf[100];
int i;
i = scnprintf(buf, sizeof(buf), "%.98s\n", rproc->name);
return simple_read_from_buffer(userbuf, count, ppos, buf, i);
}
static const struct file_operations rproc_name_ops = {
.read = rproc_name_read,
.open = simple_open,
.llseek = generic_file_llseek,
};
/* expose recovery flag via debugfs */
static ssize_t rproc_recovery_read(struct file *filp, char __user *userbuf,
size_t count, loff_t *ppos)
{
struct rproc *rproc = filp->private_data;
char *buf = rproc->recovery_disabled ? "disabled\n" : "enabled\n";
return simple_read_from_buffer(userbuf, count, ppos, buf, strlen(buf));
}
/*
* By writing to the 'recovery' debugfs entry, we control the behavior of the
* recovery mechanism dynamically. The default value of this entry is "enabled".
*
* The 'recovery' debugfs entry supports these commands:
*
* enabled: When enabled, the remote processor will be automatically
* recovered whenever it crashes. Moreover, if the remote
* processor crashes while recovery is disabled, it will
* be automatically recovered too as soon as recovery is enabled.
*
* disabled: When disabled, a remote processor will remain in a crashed
* state if it crashes. This is useful for debugging purposes;
* without it, debugging a crash is substantially harder.
*
* recover: This function will trigger an immediate recovery if the
* remote processor is in a crashed state, without changing
* or checking the recovery state (enabled/disabled).
* This is useful during debugging sessions, when one expects
* additional crashes to happen after enabling recovery. In this
* case, enabling recovery will make it hard to debug subsequent
* crashes, so it's recommended to keep recovery disabled, and
* instead use the "recover" command as needed.
*/
static ssize_t
rproc_recovery_write(struct file *filp, const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct rproc *rproc = filp->private_data;
char buf[10];
int ret;
if (count < 1 || count > sizeof(buf))
return -EINVAL;
ret = copy_from_user(buf, user_buf, count);
if (ret)
return -EFAULT;
/* remove end of line */
if (buf[count - 1] == '\n')
buf[count - 1] = '\0';
if (!strncmp(buf, "enabled", count)) {
/* change the flag and begin the recovery process if needed */
rproc->recovery_disabled = false;
rproc_trigger_recovery(rproc);
} else if (!strncmp(buf, "disabled", count)) {
rproc->recovery_disabled = true;
} else if (!strncmp(buf, "recover", count)) {
/* begin the recovery process without changing the flag */
rproc_trigger_recovery(rproc);
} else {
return -EINVAL;
}
return count;
}
static const struct file_operations rproc_recovery_ops = {
.read = rproc_recovery_read,
.write = rproc_recovery_write,
.open = simple_open,
.llseek = generic_file_llseek,
};
/* expose the crash trigger via debugfs */
static ssize_t
rproc_crash_write(struct file *filp, const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct rproc *rproc = filp->private_data;
unsigned int type;
int ret;
ret = kstrtouint_from_user(user_buf, count, 0, &type);
if (ret < 0)
return ret;
rproc_report_crash(rproc, type);
return count;
}
static const struct file_operations rproc_crash_ops = {
.write = rproc_crash_write,
.open = simple_open,
.llseek = generic_file_llseek,
};
/* Expose resource table content via debugfs */
static int rproc_rsc_table_show(struct seq_file *seq, void *p)
{
static const char * const types[] = {"carveout", "devmem", "trace", "vdev"};
struct rproc *rproc = seq->private;
struct resource_table *table = rproc->table_ptr;
struct fw_rsc_carveout *c;
struct fw_rsc_devmem *d;
struct fw_rsc_trace *t;
struct fw_rsc_vdev *v;
int i, j;
if (!table) {
seq_puts(seq, "No resource table found\n");
return 0;
}
for (i = 0; i < table->num; i++) {
int offset = table->offset[i];
struct fw_rsc_hdr *hdr = (void *)table + offset;
void *rsc = (void *)hdr + sizeof(*hdr);
switch (hdr->type) {
case RSC_CARVEOUT:
c = rsc;
seq_printf(seq, "Entry %d is of type %s\n", i, types[hdr->type]);
seq_printf(seq, " Device Address 0x%x\n", c->da);
seq_printf(seq, " Physical Address 0x%x\n", c->pa);
seq_printf(seq, " Length 0x%x Bytes\n", c->len);
seq_printf(seq, " Flags 0x%x\n", c->flags);
seq_printf(seq, " Reserved (should be zero) [%d]\n", c->reserved);
seq_printf(seq, " Name %s\n\n", c->name);
break;
case RSC_DEVMEM:
d = rsc;
seq_printf(seq, "Entry %d is of type %s\n", i, types[hdr->type]);
seq_printf(seq, " Device Address 0x%x\n", d->da);
seq_printf(seq, " Physical Address 0x%x\n", d->pa);
seq_printf(seq, " Length 0x%x Bytes\n", d->len);
seq_printf(seq, " Flags 0x%x\n", d->flags);
seq_printf(seq, " Reserved (should be zero) [%d]\n", d->reserved);
seq_printf(seq, " Name %s\n\n", d->name);
break;
case RSC_TRACE:
t = rsc;
seq_printf(seq, "Entry %d is of type %s\n", i, types[hdr->type]);
seq_printf(seq, " Device Address 0x%x\n", t->da);
seq_printf(seq, " Length 0x%x Bytes\n", t->len);
seq_printf(seq, " Reserved (should be zero) [%d]\n", t->reserved);
seq_printf(seq, " Name %s\n\n", t->name);
break;
case RSC_VDEV:
v = rsc;
seq_printf(seq, "Entry %d is of type %s\n", i, types[hdr->type]);
seq_printf(seq, " ID %d\n", v->id);
seq_printf(seq, " Notify ID %d\n", v->notifyid);
seq_printf(seq, " Device features 0x%x\n", v->dfeatures);
seq_printf(seq, " Guest features 0x%x\n", v->gfeatures);
seq_printf(seq, " Config length 0x%x\n", v->config_len);
seq_printf(seq, " Status 0x%x\n", v->status);
seq_printf(seq, " Number of vrings %d\n", v->num_of_vrings);
seq_printf(seq, " Reserved (should be zero) [%d][%d]\n\n",
v->reserved[0], v->reserved[1]);
for (j = 0; j < v->num_of_vrings; j++) {
seq_printf(seq, " Vring %d\n", j);
seq_printf(seq, " Device Address 0x%x\n", v->vring[j].da);
seq_printf(seq, " Alignment %d\n", v->vring[j].align);
seq_printf(seq, " Number of buffers %d\n", v->vring[j].num);
seq_printf(seq, " Notify ID %d\n", v->vring[j].notifyid);
seq_printf(seq, " Physical Address 0x%x\n\n",
v->vring[j].pa);
}
break;
default:
seq_printf(seq, "Unknown resource type found: %d [hdr: %pK]\n",
hdr->type, hdr);
break;
}
}
return 0;
}
DEFINE_SHOW_ATTRIBUTE(rproc_rsc_table);
/* Expose carveout content via debugfs */
static int rproc_carveouts_show(struct seq_file *seq, void *p)
{
struct rproc *rproc = seq->private;
struct rproc_mem_entry *carveout;
list_for_each_entry(carveout, &rproc->carveouts, node) {
seq_puts(seq, "Carveout memory entry:\n");
seq_printf(seq, "\tName: %s\n", carveout->name);
seq_printf(seq, "\tVirtual address: %pK\n", carveout->va);
seq_printf(seq, "\tDMA address: %pad\n", &carveout->dma);
seq_printf(seq, "\tDevice address: 0x%x\n", carveout->da);
seq_printf(seq, "\tLength: 0x%zx Bytes\n\n", carveout->len);
}
return 0;
}
DEFINE_SHOW_ATTRIBUTE(rproc_carveouts);
void rproc_remove_trace_file(struct dentry *tfile)
{
debugfs_remove(tfile);
}
struct dentry *rproc_create_trace_file(const char *name, struct rproc *rproc,
struct rproc_debug_trace *trace)
{
return debugfs_create_file(name, 0400, rproc->dbg_dir, trace,
&trace_rproc_ops);
}
void rproc_delete_debug_dir(struct rproc *rproc)
{
debugfs_remove_recursive(rproc->dbg_dir);
}
void rproc_create_debug_dir(struct rproc *rproc)
{
struct device *dev = &rproc->dev;
if (!rproc_dbg)
return;
rproc->dbg_dir = debugfs_create_dir(dev_name(dev), rproc_dbg);
debugfs_create_file("name", 0400, rproc->dbg_dir,
rproc, &rproc_name_ops);
debugfs_create_file("recovery", 0600, rproc->dbg_dir,
rproc, &rproc_recovery_ops);
debugfs_create_file("crash", 0200, rproc->dbg_dir,
rproc, &rproc_crash_ops);
debugfs_create_file("resource_table", 0400, rproc->dbg_dir,
rproc, &rproc_rsc_table_fops);
debugfs_create_file("carveout_memories", 0400, rproc->dbg_dir,
rproc, &rproc_carveouts_fops);
debugfs_create_file("coredump", 0600, rproc->dbg_dir,
rproc, &rproc_coredump_fops);
}
void __init rproc_init_debugfs(void)
{
if (debugfs_initialized())
rproc_dbg = debugfs_create_dir(KBUILD_MODNAME, NULL);
}
void __exit rproc_exit_debugfs(void)
{
debugfs_remove(rproc_dbg);
}
|
linux-master
|
drivers/remoteproc/remoteproc_debugfs.c
|
// SPDX-License-Identifier: GPL-2.0
/*
* ZynqMP R5 Remote Processor driver
*
*/
#include <dt-bindings/power/xlnx-zynqmp-power.h>
#include <linux/dma-mapping.h>
#include <linux/firmware/xlnx-zynqmp.h>
#include <linux/kernel.h>
#include <linux/mailbox_client.h>
#include <linux/mailbox/zynqmp-ipi-message.h>
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
#include <linux/remoteproc.h>
#include "remoteproc_internal.h"
/* IPI buffer MAX length */
#define IPI_BUF_LEN_MAX 32U
/* RX mailbox client buffer max length */
#define MBOX_CLIENT_BUF_MAX (IPI_BUF_LEN_MAX + \
sizeof(struct zynqmp_ipi_message))
/*
* settings for RPU cluster mode which
* reflects possible values of xlnx,cluster-mode dt-property
*/
enum zynqmp_r5_cluster_mode {
SPLIT_MODE = 0, /* When cores run as separate processor */
LOCKSTEP_MODE = 1, /* cores execute same code in lockstep,clk-for-clk */
SINGLE_CPU_MODE = 2, /* core0 is held in reset and only core1 runs */
};
/**
* struct mem_bank_data - Memory Bank description
*
* @addr: Start address of memory bank
* @size: Size of Memory bank
* @pm_domain_id: Power-domains id of memory bank for firmware to turn on/off
* @bank_name: name of the bank for remoteproc framework
*/
struct mem_bank_data {
phys_addr_t addr;
size_t size;
u32 pm_domain_id;
char *bank_name;
};
/**
* struct mbox_info
*
* @rx_mc_buf: to copy data from mailbox rx channel
* @tx_mc_buf: to copy data to mailbox tx channel
* @r5_core: this mailbox's corresponding r5_core pointer
* @mbox_work: schedule work after receiving data from mailbox
* @mbox_cl: mailbox client
* @tx_chan: mailbox tx channel
* @rx_chan: mailbox rx channel
*/
struct mbox_info {
unsigned char rx_mc_buf[MBOX_CLIENT_BUF_MAX];
unsigned char tx_mc_buf[MBOX_CLIENT_BUF_MAX];
struct zynqmp_r5_core *r5_core;
struct work_struct mbox_work;
struct mbox_client mbox_cl;
struct mbox_chan *tx_chan;
struct mbox_chan *rx_chan;
};
/*
* Hardcoded TCM bank values. This will be removed once TCM bindings are
* accepted for system-dt specifications and upstreamed in linux kernel
*/
static const struct mem_bank_data zynqmp_tcm_banks[] = {
{0xffe00000UL, 0x10000UL, PD_R5_0_ATCM, "atcm0"}, /* TCM 64KB each */
{0xffe20000UL, 0x10000UL, PD_R5_0_BTCM, "btcm0"},
{0xffe90000UL, 0x10000UL, PD_R5_1_ATCM, "atcm1"},
{0xffeb0000UL, 0x10000UL, PD_R5_1_BTCM, "btcm1"},
};
/**
* struct zynqmp_r5_core
*
* @dev: device of RPU instance
* @np: device node of RPU instance
* @tcm_bank_count: number TCM banks accessible to this RPU
* @tcm_banks: array of each TCM bank data
* @rproc: rproc handle
* @pm_domain_id: RPU CPU power domain id
* @ipi: pointer to mailbox information
*/
struct zynqmp_r5_core {
struct device *dev;
struct device_node *np;
int tcm_bank_count;
struct mem_bank_data **tcm_banks;
struct rproc *rproc;
u32 pm_domain_id;
struct mbox_info *ipi;
};
/**
* struct zynqmp_r5_cluster
*
* @dev: r5f subsystem cluster device node
* @mode: cluster mode of type zynqmp_r5_cluster_mode
* @core_count: number of r5 cores used for this cluster mode
* @r5_cores: Array of pointers pointing to r5 core
*/
struct zynqmp_r5_cluster {
struct device *dev;
enum zynqmp_r5_cluster_mode mode;
int core_count;
struct zynqmp_r5_core **r5_cores;
};
/**
* event_notified_idr_cb() - callback for vq_interrupt per notifyid
* @id: rproc->notify id
* @ptr: pointer to idr private data
* @data: data passed to idr_for_each callback
*
* Pass notification to remoteproc virtio
*
* Return: 0. having return is to satisfy the idr_for_each() function
* pointer input argument requirement.
**/
static int event_notified_idr_cb(int id, void *ptr, void *data)
{
struct rproc *rproc = data;
if (rproc_vq_interrupt(rproc, id) == IRQ_NONE)
dev_dbg(&rproc->dev, "data not found for vqid=%d\n", id);
return 0;
}
/**
* handle_event_notified() - remoteproc notification work function
* @work: pointer to the work structure
*
* It checks each registered remoteproc notify IDs.
*/
static void handle_event_notified(struct work_struct *work)
{
struct mbox_info *ipi;
struct rproc *rproc;
ipi = container_of(work, struct mbox_info, mbox_work);
rproc = ipi->r5_core->rproc;
/*
* We only use IPI for interrupt. The RPU firmware side may or may
* not write the notifyid when it trigger IPI.
* And thus, we scan through all the registered notifyids and
* find which one is valid to get the message.
* Even if message from firmware is NULL, we attempt to get vqid
*/
idr_for_each(&rproc->notifyids, event_notified_idr_cb, rproc);
}
/**
* zynqmp_r5_mb_rx_cb() - receive channel mailbox callback
* @cl: mailbox client
* @msg: message pointer
*
* Receive data from ipi buffer, ack interrupt and then
* it will schedule the R5 notification work.
*/
static void zynqmp_r5_mb_rx_cb(struct mbox_client *cl, void *msg)
{
struct zynqmp_ipi_message *ipi_msg, *buf_msg;
struct mbox_info *ipi;
size_t len;
ipi = container_of(cl, struct mbox_info, mbox_cl);
/* copy data from ipi buffer to r5_core */
ipi_msg = (struct zynqmp_ipi_message *)msg;
buf_msg = (struct zynqmp_ipi_message *)ipi->rx_mc_buf;
len = ipi_msg->len;
if (len > IPI_BUF_LEN_MAX) {
dev_warn(cl->dev, "msg size exceeded than %d\n",
IPI_BUF_LEN_MAX);
len = IPI_BUF_LEN_MAX;
}
buf_msg->len = len;
memcpy(buf_msg->data, ipi_msg->data, len);
/* received and processed interrupt ack */
if (mbox_send_message(ipi->rx_chan, NULL) < 0)
dev_err(cl->dev, "ack failed to mbox rx_chan\n");
schedule_work(&ipi->mbox_work);
}
/**
* zynqmp_r5_setup_mbox() - Setup mailboxes related properties
* this is used for each individual R5 core
*
* @cdev: child node device
*
* Function to setup mailboxes related properties
* return : NULL if failed else pointer to mbox_info
*/
static struct mbox_info *zynqmp_r5_setup_mbox(struct device *cdev)
{
struct mbox_client *mbox_cl;
struct mbox_info *ipi;
ipi = kzalloc(sizeof(*ipi), GFP_KERNEL);
if (!ipi)
return NULL;
mbox_cl = &ipi->mbox_cl;
mbox_cl->rx_callback = zynqmp_r5_mb_rx_cb;
mbox_cl->tx_block = false;
mbox_cl->knows_txdone = false;
mbox_cl->tx_done = NULL;
mbox_cl->dev = cdev;
/* Request TX and RX channels */
ipi->tx_chan = mbox_request_channel_byname(mbox_cl, "tx");
if (IS_ERR(ipi->tx_chan)) {
ipi->tx_chan = NULL;
kfree(ipi);
dev_warn(cdev, "mbox tx channel request failed\n");
return NULL;
}
ipi->rx_chan = mbox_request_channel_byname(mbox_cl, "rx");
if (IS_ERR(ipi->rx_chan)) {
mbox_free_channel(ipi->tx_chan);
ipi->rx_chan = NULL;
ipi->tx_chan = NULL;
kfree(ipi);
dev_warn(cdev, "mbox rx channel request failed\n");
return NULL;
}
INIT_WORK(&ipi->mbox_work, handle_event_notified);
return ipi;
}
static void zynqmp_r5_free_mbox(struct mbox_info *ipi)
{
if (!ipi)
return;
if (ipi->tx_chan) {
mbox_free_channel(ipi->tx_chan);
ipi->tx_chan = NULL;
}
if (ipi->rx_chan) {
mbox_free_channel(ipi->rx_chan);
ipi->rx_chan = NULL;
}
kfree(ipi);
}
/*
* zynqmp_r5_core_kick() - kick a firmware if mbox is provided
* @rproc: r5 core's corresponding rproc structure
* @vqid: virtqueue ID
*/
static void zynqmp_r5_rproc_kick(struct rproc *rproc, int vqid)
{
struct zynqmp_r5_core *r5_core = rproc->priv;
struct device *dev = r5_core->dev;
struct zynqmp_ipi_message *mb_msg;
struct mbox_info *ipi;
int ret;
ipi = r5_core->ipi;
if (!ipi)
return;
mb_msg = (struct zynqmp_ipi_message *)ipi->tx_mc_buf;
memcpy(mb_msg->data, &vqid, sizeof(vqid));
mb_msg->len = sizeof(vqid);
ret = mbox_send_message(ipi->tx_chan, mb_msg);
if (ret < 0)
dev_warn(dev, "failed to send message\n");
}
/*
* zynqmp_r5_set_mode()
*
* set RPU cluster and TCM operation mode
*
* @r5_core: pointer to zynqmp_r5_core type object
* @fw_reg_val: value expected by firmware to configure RPU cluster mode
* @tcm_mode: value expected by fw to configure TCM mode (lockstep or split)
*
* Return: 0 for success and < 0 for failure
*/
static int zynqmp_r5_set_mode(struct zynqmp_r5_core *r5_core,
enum rpu_oper_mode fw_reg_val,
enum rpu_tcm_comb tcm_mode)
{
int ret;
ret = zynqmp_pm_set_rpu_mode(r5_core->pm_domain_id, fw_reg_val);
if (ret < 0) {
dev_err(r5_core->dev, "failed to set RPU mode\n");
return ret;
}
ret = zynqmp_pm_set_tcm_config(r5_core->pm_domain_id, tcm_mode);
if (ret < 0)
dev_err(r5_core->dev, "failed to configure TCM\n");
return ret;
}
/*
* zynqmp_r5_rproc_start()
* @rproc: single R5 core's corresponding rproc instance
*
* Start R5 Core from designated boot address.
*
* return 0 on success, otherwise non-zero value on failure
*/
static int zynqmp_r5_rproc_start(struct rproc *rproc)
{
struct zynqmp_r5_core *r5_core = rproc->priv;
enum rpu_boot_mem bootmem;
int ret;
/*
* The exception vector pointers (EVP) refer to the base-address of
* exception vectors (for reset, IRQ, FIQ, etc). The reset-vector
* starts at the base-address and subsequent vectors are on 4-byte
* boundaries.
*
* Exception vectors can start either from 0x0000_0000 (LOVEC) or
* from 0xFFFF_0000 (HIVEC) which is mapped in the OCM (On-Chip Memory)
*
* Usually firmware will put Exception vectors at LOVEC.
*
* It is not recommend that you change the exception vector.
* Changing the EVP to HIVEC will result in increased interrupt latency
* and jitter. Also, if the OCM is secured and the Cortex-R5F processor
* is non-secured, then the Cortex-R5F processor cannot access the
* HIVEC exception vectors in the OCM.
*/
bootmem = (rproc->bootaddr >= 0xFFFC0000) ?
PM_RPU_BOOTMEM_HIVEC : PM_RPU_BOOTMEM_LOVEC;
dev_dbg(r5_core->dev, "RPU boot addr 0x%llx from %s.", rproc->bootaddr,
bootmem == PM_RPU_BOOTMEM_HIVEC ? "OCM" : "TCM");
ret = zynqmp_pm_request_wake(r5_core->pm_domain_id, 1,
bootmem, ZYNQMP_PM_REQUEST_ACK_NO);
if (ret)
dev_err(r5_core->dev,
"failed to start RPU = 0x%x\n", r5_core->pm_domain_id);
return ret;
}
/*
* zynqmp_r5_rproc_stop()
* @rproc: single R5 core's corresponding rproc instance
*
* Power down R5 Core.
*
* return 0 on success, otherwise non-zero value on failure
*/
static int zynqmp_r5_rproc_stop(struct rproc *rproc)
{
struct zynqmp_r5_core *r5_core = rproc->priv;
int ret;
ret = zynqmp_pm_force_pwrdwn(r5_core->pm_domain_id,
ZYNQMP_PM_REQUEST_ACK_BLOCKING);
if (ret)
dev_err(r5_core->dev, "failed to stop remoteproc RPU %d\n", ret);
return ret;
}
/*
* zynqmp_r5_mem_region_map()
* @rproc: single R5 core's corresponding rproc instance
* @mem: mem descriptor to map reserved memory-regions
*
* Callback to map va for memory-region's carveout.
*
* return 0 on success, otherwise non-zero value on failure
*/
static int zynqmp_r5_mem_region_map(struct rproc *rproc,
struct rproc_mem_entry *mem)
{
void __iomem *va;
va = ioremap_wc(mem->dma, mem->len);
if (IS_ERR_OR_NULL(va))
return -ENOMEM;
mem->va = (void *)va;
return 0;
}
/*
* zynqmp_r5_rproc_mem_unmap
* @rproc: single R5 core's corresponding rproc instance
* @mem: mem entry to unmap
*
* Unmap memory-region carveout
*
* return: always returns 0
*/
static int zynqmp_r5_mem_region_unmap(struct rproc *rproc,
struct rproc_mem_entry *mem)
{
iounmap((void __iomem *)mem->va);
return 0;
}
/*
* add_mem_regions_carveout()
* @rproc: single R5 core's corresponding rproc instance
*
* Construct rproc mem carveouts from memory-region property nodes
*
* return 0 on success, otherwise non-zero value on failure
*/
static int add_mem_regions_carveout(struct rproc *rproc)
{
struct rproc_mem_entry *rproc_mem;
struct zynqmp_r5_core *r5_core;
struct of_phandle_iterator it;
struct reserved_mem *rmem;
int i = 0;
r5_core = rproc->priv;
/* Register associated reserved memory regions */
of_phandle_iterator_init(&it, r5_core->np, "memory-region", NULL, 0);
while (of_phandle_iterator_next(&it) == 0) {
rmem = of_reserved_mem_lookup(it.node);
if (!rmem) {
of_node_put(it.node);
dev_err(&rproc->dev, "unable to acquire memory-region\n");
return -EINVAL;
}
if (!strcmp(it.node->name, "vdev0buffer")) {
/* Init reserved memory for vdev buffer */
rproc_mem = rproc_of_resm_mem_entry_init(&rproc->dev, i,
rmem->size,
rmem->base,
it.node->name);
} else {
/* Register associated reserved memory regions */
rproc_mem = rproc_mem_entry_init(&rproc->dev, NULL,
(dma_addr_t)rmem->base,
rmem->size, rmem->base,
zynqmp_r5_mem_region_map,
zynqmp_r5_mem_region_unmap,
it.node->name);
}
if (!rproc_mem) {
of_node_put(it.node);
return -ENOMEM;
}
rproc_add_carveout(rproc, rproc_mem);
dev_dbg(&rproc->dev, "reserved mem carveout %s addr=%llx, size=0x%llx",
it.node->name, rmem->base, rmem->size);
i++;
}
return 0;
}
/*
* tcm_mem_unmap()
* @rproc: single R5 core's corresponding rproc instance
* @mem: tcm mem entry to unmap
*
* Unmap TCM banks when powering down R5 core.
*
* return always 0
*/
static int tcm_mem_unmap(struct rproc *rproc, struct rproc_mem_entry *mem)
{
iounmap((void __iomem *)mem->va);
return 0;
}
/*
* tcm_mem_map()
* @rproc: single R5 core's corresponding rproc instance
* @mem: tcm memory entry descriptor
*
* Given TCM bank entry, this func setup virtual address for TCM bank
* remoteproc carveout. It also takes care of va to da address translation
*
* return 0 on success, otherwise non-zero value on failure
*/
static int tcm_mem_map(struct rproc *rproc,
struct rproc_mem_entry *mem)
{
void __iomem *va;
va = ioremap_wc(mem->dma, mem->len);
if (IS_ERR_OR_NULL(va))
return -ENOMEM;
/* Update memory entry va */
mem->va = (void *)va;
/* clear TCMs */
memset_io(va, 0, mem->len);
/*
* The R5s expect their TCM banks to be at address 0x0 and 0x2000,
* while on the Linux side they are at 0xffexxxxx.
*
* Zero out the high 12 bits of the address. This will give
* expected values for TCM Banks 0A and 0B (0x0 and 0x20000).
*/
mem->da &= 0x000fffff;
/*
* TCM Banks 1A and 1B still have to be translated.
*
* Below handle these two banks' absolute addresses (0xffe90000 and
* 0xffeb0000) and convert to the expected relative addresses
* (0x0 and 0x20000).
*/
if (mem->da == 0x90000 || mem->da == 0xB0000)
mem->da -= 0x90000;
/* if translated TCM bank address is not valid report error */
if (mem->da != 0x0 && mem->da != 0x20000) {
dev_err(&rproc->dev, "invalid TCM address: %x\n", mem->da);
return -EINVAL;
}
return 0;
}
/*
* add_tcm_carveout_split_mode()
* @rproc: single R5 core's corresponding rproc instance
*
* allocate and add remoteproc carveout for TCM memory in split mode
*
* return 0 on success, otherwise non-zero value on failure
*/
static int add_tcm_carveout_split_mode(struct rproc *rproc)
{
struct rproc_mem_entry *rproc_mem;
struct zynqmp_r5_core *r5_core;
int i, num_banks, ret;
phys_addr_t bank_addr;
struct device *dev;
u32 pm_domain_id;
size_t bank_size;
char *bank_name;
r5_core = rproc->priv;
dev = r5_core->dev;
num_banks = r5_core->tcm_bank_count;
/*
* Power-on Each 64KB TCM,
* register its address space, map and unmap functions
* and add carveouts accordingly
*/
for (i = 0; i < num_banks; i++) {
bank_addr = r5_core->tcm_banks[i]->addr;
bank_name = r5_core->tcm_banks[i]->bank_name;
bank_size = r5_core->tcm_banks[i]->size;
pm_domain_id = r5_core->tcm_banks[i]->pm_domain_id;
ret = zynqmp_pm_request_node(pm_domain_id,
ZYNQMP_PM_CAPABILITY_ACCESS, 0,
ZYNQMP_PM_REQUEST_ACK_BLOCKING);
if (ret < 0) {
dev_err(dev, "failed to turn on TCM 0x%x", pm_domain_id);
goto release_tcm_split;
}
dev_dbg(dev, "TCM carveout split mode %s addr=%llx, size=0x%lx",
bank_name, bank_addr, bank_size);
rproc_mem = rproc_mem_entry_init(dev, NULL, bank_addr,
bank_size, bank_addr,
tcm_mem_map, tcm_mem_unmap,
bank_name);
if (!rproc_mem) {
ret = -ENOMEM;
zynqmp_pm_release_node(pm_domain_id);
goto release_tcm_split;
}
rproc_add_carveout(rproc, rproc_mem);
}
return 0;
release_tcm_split:
/* If failed, Turn off all TCM banks turned on before */
for (i--; i >= 0; i--) {
pm_domain_id = r5_core->tcm_banks[i]->pm_domain_id;
zynqmp_pm_release_node(pm_domain_id);
}
return ret;
}
/*
* add_tcm_carveout_lockstep_mode()
* @rproc: single R5 core's corresponding rproc instance
*
* allocate and add remoteproc carveout for TCM memory in lockstep mode
*
* return 0 on success, otherwise non-zero value on failure
*/
static int add_tcm_carveout_lockstep_mode(struct rproc *rproc)
{
struct rproc_mem_entry *rproc_mem;
struct zynqmp_r5_core *r5_core;
int i, num_banks, ret;
phys_addr_t bank_addr;
size_t bank_size = 0;
struct device *dev;
u32 pm_domain_id;
char *bank_name;
r5_core = rproc->priv;
dev = r5_core->dev;
/* Go through zynqmp banks for r5 node */
num_banks = r5_core->tcm_bank_count;
/*
* In lockstep mode, TCM is contiguous memory block
* However, each TCM block still needs to be enabled individually.
* So, Enable each TCM block individually, but add their size
* to create contiguous memory region.
*/
bank_addr = r5_core->tcm_banks[0]->addr;
bank_name = r5_core->tcm_banks[0]->bank_name;
for (i = 0; i < num_banks; i++) {
bank_size += r5_core->tcm_banks[i]->size;
pm_domain_id = r5_core->tcm_banks[i]->pm_domain_id;
/* Turn on each TCM bank individually */
ret = zynqmp_pm_request_node(pm_domain_id,
ZYNQMP_PM_CAPABILITY_ACCESS, 0,
ZYNQMP_PM_REQUEST_ACK_BLOCKING);
if (ret < 0) {
dev_err(dev, "failed to turn on TCM 0x%x", pm_domain_id);
goto release_tcm_lockstep;
}
}
dev_dbg(dev, "TCM add carveout lockstep mode %s addr=0x%llx, size=0x%lx",
bank_name, bank_addr, bank_size);
/* Register TCM address range, TCM map and unmap functions */
rproc_mem = rproc_mem_entry_init(dev, NULL, bank_addr,
bank_size, bank_addr,
tcm_mem_map, tcm_mem_unmap,
bank_name);
if (!rproc_mem) {
ret = -ENOMEM;
goto release_tcm_lockstep;
}
/* If registration is success, add carveouts */
rproc_add_carveout(rproc, rproc_mem);
return 0;
release_tcm_lockstep:
/* If failed, Turn off all TCM banks turned on before */
for (i--; i >= 0; i--) {
pm_domain_id = r5_core->tcm_banks[i]->pm_domain_id;
zynqmp_pm_release_node(pm_domain_id);
}
return ret;
}
/*
* add_tcm_banks()
* @rproc: single R5 core's corresponding rproc instance
*
* allocate and add remoteproc carveouts for TCM memory based on cluster mode
*
* return 0 on success, otherwise non-zero value on failure
*/
static int add_tcm_banks(struct rproc *rproc)
{
struct zynqmp_r5_cluster *cluster;
struct zynqmp_r5_core *r5_core;
struct device *dev;
r5_core = rproc->priv;
if (!r5_core)
return -EINVAL;
dev = r5_core->dev;
cluster = dev_get_drvdata(dev->parent);
if (!cluster) {
dev_err(dev->parent, "Invalid driver data\n");
return -EINVAL;
}
/*
* In lockstep mode TCM banks are one contiguous memory region of 256Kb
* In split mode, each TCM bank is 64Kb and not contiguous.
* We add memory carveouts accordingly.
*/
if (cluster->mode == SPLIT_MODE)
return add_tcm_carveout_split_mode(rproc);
else if (cluster->mode == LOCKSTEP_MODE)
return add_tcm_carveout_lockstep_mode(rproc);
return -EINVAL;
}
/*
* zynqmp_r5_parse_fw()
* @rproc: single R5 core's corresponding rproc instance
* @fw: ptr to firmware to be loaded onto r5 core
*
* get resource table if available
*
* return 0 on success, otherwise non-zero value on failure
*/
static int zynqmp_r5_parse_fw(struct rproc *rproc, const struct firmware *fw)
{
int ret;
ret = rproc_elf_load_rsc_table(rproc, fw);
if (ret == -EINVAL) {
/*
* resource table only required for IPC.
* if not present, this is not necessarily an error;
* for example, loading r5 hello world application
* so simply inform user and keep going.
*/
dev_info(&rproc->dev, "no resource table found.\n");
ret = 0;
}
return ret;
}
/**
* zynqmp_r5_rproc_prepare()
* adds carveouts for TCM bank and reserved memory regions
*
* @rproc: Device node of each rproc
*
* Return: 0 for success else < 0 error code
*/
static int zynqmp_r5_rproc_prepare(struct rproc *rproc)
{
int ret;
ret = add_tcm_banks(rproc);
if (ret) {
dev_err(&rproc->dev, "failed to get TCM banks, err %d\n", ret);
return ret;
}
ret = add_mem_regions_carveout(rproc);
if (ret) {
dev_err(&rproc->dev, "failed to get reserve mem regions %d\n", ret);
return ret;
}
return 0;
}
/**
* zynqmp_r5_rproc_unprepare()
* Turns off TCM banks using power-domain id
*
* @rproc: Device node of each rproc
*
* Return: always 0
*/
static int zynqmp_r5_rproc_unprepare(struct rproc *rproc)
{
struct zynqmp_r5_core *r5_core;
u32 pm_domain_id;
int i;
r5_core = rproc->priv;
for (i = 0; i < r5_core->tcm_bank_count; i++) {
pm_domain_id = r5_core->tcm_banks[i]->pm_domain_id;
if (zynqmp_pm_release_node(pm_domain_id))
dev_warn(r5_core->dev,
"can't turn off TCM bank 0x%x", pm_domain_id);
}
return 0;
}
static const struct rproc_ops zynqmp_r5_rproc_ops = {
.prepare = zynqmp_r5_rproc_prepare,
.unprepare = zynqmp_r5_rproc_unprepare,
.start = zynqmp_r5_rproc_start,
.stop = zynqmp_r5_rproc_stop,
.load = rproc_elf_load_segments,
.parse_fw = zynqmp_r5_parse_fw,
.find_loaded_rsc_table = rproc_elf_find_loaded_rsc_table,
.sanity_check = rproc_elf_sanity_check,
.get_boot_addr = rproc_elf_get_boot_addr,
.kick = zynqmp_r5_rproc_kick,
};
/**
* zynqmp_r5_add_rproc_core()
* Allocate and add struct rproc object for each r5f core
* This is called for each individual r5f core
*
* @cdev: Device node of each r5 core
*
* Return: zynqmp_r5_core object for success else error code pointer
*/
static struct zynqmp_r5_core *zynqmp_r5_add_rproc_core(struct device *cdev)
{
struct zynqmp_r5_core *r5_core;
struct rproc *r5_rproc;
int ret;
/* Set up DMA mask */
ret = dma_set_coherent_mask(cdev, DMA_BIT_MASK(32));
if (ret)
return ERR_PTR(ret);
/* Allocate remoteproc instance */
r5_rproc = rproc_alloc(cdev, dev_name(cdev),
&zynqmp_r5_rproc_ops,
NULL, sizeof(struct zynqmp_r5_core));
if (!r5_rproc) {
dev_err(cdev, "failed to allocate memory for rproc instance\n");
return ERR_PTR(-ENOMEM);
}
r5_rproc->auto_boot = false;
r5_core = r5_rproc->priv;
r5_core->dev = cdev;
r5_core->np = dev_of_node(cdev);
if (!r5_core->np) {
dev_err(cdev, "can't get device node for r5 core\n");
ret = -EINVAL;
goto free_rproc;
}
/* Add R5 remoteproc core */
ret = rproc_add(r5_rproc);
if (ret) {
dev_err(cdev, "failed to add r5 remoteproc\n");
goto free_rproc;
}
r5_core->rproc = r5_rproc;
return r5_core;
free_rproc:
rproc_free(r5_rproc);
return ERR_PTR(ret);
}
/**
* zynqmp_r5_get_tcm_node()
* Ideally this function should parse tcm node and store information
* in r5_core instance. For now, Hardcoded TCM information is used.
* This approach is used as TCM bindings for system-dt is being developed
*
* @cluster: pointer to zynqmp_r5_cluster type object
*
* Return: 0 for success and < 0 error code for failure.
*/
static int zynqmp_r5_get_tcm_node(struct zynqmp_r5_cluster *cluster)
{
struct device *dev = cluster->dev;
struct zynqmp_r5_core *r5_core;
int tcm_bank_count, tcm_node;
int i, j;
tcm_bank_count = ARRAY_SIZE(zynqmp_tcm_banks);
/* count per core tcm banks */
tcm_bank_count = tcm_bank_count / cluster->core_count;
/*
* r5 core 0 will use all of TCM banks in lockstep mode.
* In split mode, r5 core0 will use 128k and r5 core1 will use another
* 128k. Assign TCM banks to each core accordingly
*/
tcm_node = 0;
for (i = 0; i < cluster->core_count; i++) {
r5_core = cluster->r5_cores[i];
r5_core->tcm_banks = devm_kcalloc(dev, tcm_bank_count,
sizeof(struct mem_bank_data *),
GFP_KERNEL);
if (!r5_core->tcm_banks)
return -ENOMEM;
for (j = 0; j < tcm_bank_count; j++) {
/*
* Use pre-defined TCM reg values.
* Eventually this should be replaced by values
* parsed from dts.
*/
r5_core->tcm_banks[j] =
(struct mem_bank_data *)&zynqmp_tcm_banks[tcm_node];
tcm_node++;
}
r5_core->tcm_bank_count = tcm_bank_count;
}
return 0;
}
/*
* zynqmp_r5_core_init()
* Create and initialize zynqmp_r5_core type object
*
* @cluster: pointer to zynqmp_r5_cluster type object
* @fw_reg_val: value expected by firmware to configure RPU cluster mode
* @tcm_mode: value expected by fw to configure TCM mode (lockstep or split)
*
* Return: 0 for success and error code for failure.
*/
static int zynqmp_r5_core_init(struct zynqmp_r5_cluster *cluster,
enum rpu_oper_mode fw_reg_val,
enum rpu_tcm_comb tcm_mode)
{
struct device *dev = cluster->dev;
struct zynqmp_r5_core *r5_core;
int ret, i;
ret = zynqmp_r5_get_tcm_node(cluster);
if (ret < 0) {
dev_err(dev, "can't get tcm node, err %d\n", ret);
return ret;
}
for (i = 0; i < cluster->core_count; i++) {
r5_core = cluster->r5_cores[i];
/* Initialize r5 cores with power-domains parsed from dts */
ret = of_property_read_u32_index(r5_core->np, "power-domains",
1, &r5_core->pm_domain_id);
if (ret) {
dev_err(dev, "failed to get power-domains property\n");
return ret;
}
ret = zynqmp_r5_set_mode(r5_core, fw_reg_val, tcm_mode);
if (ret) {
dev_err(dev, "failed to set r5 cluster mode %d, err %d\n",
cluster->mode, ret);
return ret;
}
}
return 0;
}
/*
* zynqmp_r5_cluster_init()
* Create and initialize zynqmp_r5_cluster type object
*
* @cluster: pointer to zynqmp_r5_cluster type object
*
* Return: 0 for success and error code for failure.
*/
static int zynqmp_r5_cluster_init(struct zynqmp_r5_cluster *cluster)
{
enum zynqmp_r5_cluster_mode cluster_mode = LOCKSTEP_MODE;
struct device *dev = cluster->dev;
struct device_node *dev_node = dev_of_node(dev);
struct platform_device *child_pdev;
struct zynqmp_r5_core **r5_cores;
enum rpu_oper_mode fw_reg_val;
struct device **child_devs;
struct device_node *child;
enum rpu_tcm_comb tcm_mode;
int core_count, ret, i;
struct mbox_info *ipi;
ret = of_property_read_u32(dev_node, "xlnx,cluster-mode", &cluster_mode);
/*
* on success returns 0, if not defined then returns -EINVAL,
* In that case, default is LOCKSTEP mode. Other than that
* returns relative error code < 0.
*/
if (ret != -EINVAL && ret != 0) {
dev_err(dev, "Invalid xlnx,cluster-mode property\n");
return ret;
}
/*
* For now driver only supports split mode and lockstep mode.
* fail driver probe if either of that is not set in dts.
*/
if (cluster_mode == LOCKSTEP_MODE) {
tcm_mode = PM_RPU_TCM_COMB;
fw_reg_val = PM_RPU_MODE_LOCKSTEP;
} else if (cluster_mode == SPLIT_MODE) {
tcm_mode = PM_RPU_TCM_SPLIT;
fw_reg_val = PM_RPU_MODE_SPLIT;
} else {
dev_err(dev, "driver does not support cluster mode %d\n", cluster_mode);
return -EINVAL;
}
/*
* Number of cores is decided by number of child nodes of
* r5f subsystem node in dts. If Split mode is used in dts
* 2 child nodes are expected.
* In lockstep mode if two child nodes are available,
* only use first child node and consider it as core0
* and ignore core1 dt node.
*/
core_count = of_get_available_child_count(dev_node);
if (core_count == 0) {
dev_err(dev, "Invalid number of r5 cores %d", core_count);
return -EINVAL;
} else if (cluster_mode == SPLIT_MODE && core_count != 2) {
dev_err(dev, "Invalid number of r5 cores for split mode\n");
return -EINVAL;
} else if (cluster_mode == LOCKSTEP_MODE && core_count == 2) {
dev_warn(dev, "Only r5 core0 will be used\n");
core_count = 1;
}
child_devs = kcalloc(core_count, sizeof(struct device *), GFP_KERNEL);
if (!child_devs)
return -ENOMEM;
r5_cores = kcalloc(core_count,
sizeof(struct zynqmp_r5_core *), GFP_KERNEL);
if (!r5_cores) {
kfree(child_devs);
return -ENOMEM;
}
i = 0;
for_each_available_child_of_node(dev_node, child) {
child_pdev = of_find_device_by_node(child);
if (!child_pdev) {
of_node_put(child);
ret = -ENODEV;
goto release_r5_cores;
}
child_devs[i] = &child_pdev->dev;
/* create and add remoteproc instance of type struct rproc */
r5_cores[i] = zynqmp_r5_add_rproc_core(&child_pdev->dev);
if (IS_ERR(r5_cores[i])) {
of_node_put(child);
ret = PTR_ERR(r5_cores[i]);
r5_cores[i] = NULL;
goto release_r5_cores;
}
/*
* If mailbox nodes are disabled using "status" property then
* setting up mailbox channels will fail.
*/
ipi = zynqmp_r5_setup_mbox(&child_pdev->dev);
if (ipi) {
r5_cores[i]->ipi = ipi;
ipi->r5_core = r5_cores[i];
}
/*
* If two child nodes are available in dts in lockstep mode,
* then ignore second child node.
*/
if (cluster_mode == LOCKSTEP_MODE) {
of_node_put(child);
break;
}
i++;
}
cluster->mode = cluster_mode;
cluster->core_count = core_count;
cluster->r5_cores = r5_cores;
ret = zynqmp_r5_core_init(cluster, fw_reg_val, tcm_mode);
if (ret < 0) {
dev_err(dev, "failed to init r5 core err %d\n", ret);
cluster->core_count = 0;
cluster->r5_cores = NULL;
/*
* at this point rproc resources for each core are allocated.
* adjust index to free resources in reverse order
*/
i = core_count - 1;
goto release_r5_cores;
}
kfree(child_devs);
return 0;
release_r5_cores:
while (i >= 0) {
put_device(child_devs[i]);
if (r5_cores[i]) {
zynqmp_r5_free_mbox(r5_cores[i]->ipi);
of_reserved_mem_device_release(r5_cores[i]->dev);
rproc_del(r5_cores[i]->rproc);
rproc_free(r5_cores[i]->rproc);
}
i--;
}
kfree(r5_cores);
kfree(child_devs);
return ret;
}
static void zynqmp_r5_cluster_exit(void *data)
{
struct platform_device *pdev = data;
struct zynqmp_r5_cluster *cluster;
struct zynqmp_r5_core *r5_core;
int i;
cluster = platform_get_drvdata(pdev);
if (!cluster)
return;
for (i = 0; i < cluster->core_count; i++) {
r5_core = cluster->r5_cores[i];
zynqmp_r5_free_mbox(r5_core->ipi);
of_reserved_mem_device_release(r5_core->dev);
put_device(r5_core->dev);
rproc_del(r5_core->rproc);
rproc_free(r5_core->rproc);
}
kfree(cluster->r5_cores);
kfree(cluster);
platform_set_drvdata(pdev, NULL);
}
/*
* zynqmp_r5_remoteproc_probe()
* parse device-tree, initialize hardware and allocate required resources
* and remoteproc ops
*
* @pdev: domain platform device for R5 cluster
*
* Return: 0 for success and < 0 for failure.
*/
static int zynqmp_r5_remoteproc_probe(struct platform_device *pdev)
{
struct zynqmp_r5_cluster *cluster;
struct device *dev = &pdev->dev;
int ret;
cluster = kzalloc(sizeof(*cluster), GFP_KERNEL);
if (!cluster)
return -ENOMEM;
cluster->dev = dev;
ret = devm_of_platform_populate(dev);
if (ret) {
dev_err_probe(dev, ret, "failed to populate platform dev\n");
kfree(cluster);
return ret;
}
/* wire in so each core can be cleaned up at driver remove */
platform_set_drvdata(pdev, cluster);
ret = zynqmp_r5_cluster_init(cluster);
if (ret) {
kfree(cluster);
platform_set_drvdata(pdev, NULL);
dev_err_probe(dev, ret, "Invalid r5f subsystem device tree\n");
return ret;
}
ret = devm_add_action_or_reset(dev, zynqmp_r5_cluster_exit, pdev);
if (ret)
return ret;
return 0;
}
/* Match table for OF platform binding */
static const struct of_device_id zynqmp_r5_remoteproc_match[] = {
{ .compatible = "xlnx,zynqmp-r5fss", },
{ /* end of list */ },
};
MODULE_DEVICE_TABLE(of, zynqmp_r5_remoteproc_match);
static struct platform_driver zynqmp_r5_remoteproc_driver = {
.probe = zynqmp_r5_remoteproc_probe,
.driver = {
.name = "zynqmp_r5_remoteproc",
.of_match_table = zynqmp_r5_remoteproc_match,
},
};
module_platform_driver(zynqmp_r5_remoteproc_driver);
MODULE_DESCRIPTION("Xilinx R5F remote processor driver");
MODULE_AUTHOR("Xilinx Inc.");
MODULE_LICENSE("GPL");
|
linux-master
|
drivers/remoteproc/xlnx_r5_remoteproc.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Qualcomm Wireless Connectivity Subsystem Iris driver
*
* Copyright (C) 2016 Linaro Ltd
* Copyright (C) 2014 Sony Mobile Communications AB
* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
*/
#include <linux/clk.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include "qcom_wcnss.h"
struct qcom_iris {
struct device dev;
struct clk *xo_clk;
struct regulator_bulk_data *vregs;
size_t num_vregs;
};
struct iris_data {
const struct wcnss_vreg_info *vregs;
size_t num_vregs;
bool use_48mhz_xo;
};
static const struct iris_data wcn3620_data = {
.vregs = (struct wcnss_vreg_info[]) {
{ "vddxo", 1800000, 1800000, 10000 },
{ "vddrfa", 1300000, 1300000, 100000 },
{ "vddpa", 3300000, 3300000, 515000 },
{ "vdddig", 1800000, 1800000, 10000 },
},
.num_vregs = 4,
.use_48mhz_xo = false,
};
static const struct iris_data wcn3660_data = {
.vregs = (struct wcnss_vreg_info[]) {
{ "vddxo", 1800000, 1800000, 10000 },
{ "vddrfa", 1300000, 1300000, 100000 },
{ "vddpa", 2900000, 3000000, 515000 },
{ "vdddig", 1200000, 1225000, 10000 },
},
.num_vregs = 4,
.use_48mhz_xo = true,
};
static const struct iris_data wcn3680_data = {
.vregs = (struct wcnss_vreg_info[]) {
{ "vddxo", 1800000, 1800000, 10000 },
{ "vddrfa", 1300000, 1300000, 100000 },
{ "vddpa", 3300000, 3300000, 515000 },
{ "vdddig", 1800000, 1800000, 10000 },
},
.num_vregs = 4,
.use_48mhz_xo = true,
};
int qcom_iris_enable(struct qcom_iris *iris)
{
int ret;
ret = regulator_bulk_enable(iris->num_vregs, iris->vregs);
if (ret)
return ret;
ret = clk_prepare_enable(iris->xo_clk);
if (ret) {
dev_err(&iris->dev, "failed to enable xo clk\n");
goto disable_regulators;
}
return 0;
disable_regulators:
regulator_bulk_disable(iris->num_vregs, iris->vregs);
return ret;
}
void qcom_iris_disable(struct qcom_iris *iris)
{
clk_disable_unprepare(iris->xo_clk);
regulator_bulk_disable(iris->num_vregs, iris->vregs);
}
static const struct of_device_id iris_of_match[] = {
{ .compatible = "qcom,wcn3620", .data = &wcn3620_data },
{ .compatible = "qcom,wcn3660", .data = &wcn3660_data },
{ .compatible = "qcom,wcn3660b", .data = &wcn3680_data },
{ .compatible = "qcom,wcn3680", .data = &wcn3680_data },
{}
};
static void qcom_iris_release(struct device *dev)
{
struct qcom_iris *iris = container_of(dev, struct qcom_iris, dev);
of_node_put(iris->dev.of_node);
kfree(iris);
}
struct qcom_iris *qcom_iris_probe(struct device *parent, bool *use_48mhz_xo)
{
const struct of_device_id *match;
const struct iris_data *data;
struct device_node *of_node;
struct qcom_iris *iris;
int ret;
int i;
of_node = of_get_child_by_name(parent->of_node, "iris");
if (!of_node) {
dev_err(parent, "No child node \"iris\" found\n");
return ERR_PTR(-EINVAL);
}
iris = kzalloc(sizeof(*iris), GFP_KERNEL);
if (!iris) {
of_node_put(of_node);
return ERR_PTR(-ENOMEM);
}
device_initialize(&iris->dev);
iris->dev.parent = parent;
iris->dev.release = qcom_iris_release;
iris->dev.of_node = of_node;
dev_set_name(&iris->dev, "%s.iris", dev_name(parent));
ret = device_add(&iris->dev);
if (ret) {
put_device(&iris->dev);
return ERR_PTR(ret);
}
match = of_match_device(iris_of_match, &iris->dev);
if (!match) {
dev_err(&iris->dev, "no matching compatible for iris\n");
ret = -EINVAL;
goto err_device_del;
}
data = match->data;
iris->xo_clk = devm_clk_get(&iris->dev, "xo");
if (IS_ERR(iris->xo_clk)) {
ret = PTR_ERR(iris->xo_clk);
if (ret != -EPROBE_DEFER)
dev_err(&iris->dev, "failed to acquire xo clk\n");
goto err_device_del;
}
iris->num_vregs = data->num_vregs;
iris->vregs = devm_kcalloc(&iris->dev,
iris->num_vregs,
sizeof(struct regulator_bulk_data),
GFP_KERNEL);
if (!iris->vregs) {
ret = -ENOMEM;
goto err_device_del;
}
for (i = 0; i < iris->num_vregs; i++)
iris->vregs[i].supply = data->vregs[i].name;
ret = devm_regulator_bulk_get(&iris->dev, iris->num_vregs, iris->vregs);
if (ret) {
dev_err(&iris->dev, "failed to get regulators\n");
goto err_device_del;
}
for (i = 0; i < iris->num_vregs; i++) {
if (data->vregs[i].max_voltage)
regulator_set_voltage(iris->vregs[i].consumer,
data->vregs[i].min_voltage,
data->vregs[i].max_voltage);
if (data->vregs[i].load_uA)
regulator_set_load(iris->vregs[i].consumer,
data->vregs[i].load_uA);
}
*use_48mhz_xo = data->use_48mhz_xo;
return iris;
err_device_del:
device_del(&iris->dev);
return ERR_PTR(ret);
}
void qcom_iris_remove(struct qcom_iris *iris)
{
device_del(&iris->dev);
}
|
linux-master
|
drivers/remoteproc/qcom_wcnss_iris.c
|
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) STMicroelectronics 2018 - All Rights Reserved
* Authors: Ludovic Barre <[email protected]> for STMicroelectronics.
* Fabien Dessenne <[email protected]> for STMicroelectronics.
*/
#include <linux/arm-smccc.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/mailbox_client.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
#include <linux/pm_wakeirq.h>
#include <linux/regmap.h>
#include <linux/remoteproc.h>
#include <linux/reset.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
#include "remoteproc_internal.h"
#define HOLD_BOOT 0
#define RELEASE_BOOT 1
#define MBOX_NB_VQ 2
#define MBOX_NB_MBX 4
#define STM32_SMC_RCC 0x82001000
#define STM32_SMC_REG_WRITE 0x1
#define STM32_MBX_VQ0 "vq0"
#define STM32_MBX_VQ0_ID 0
#define STM32_MBX_VQ1 "vq1"
#define STM32_MBX_VQ1_ID 1
#define STM32_MBX_SHUTDOWN "shutdown"
#define STM32_MBX_DETACH "detach"
#define RSC_TBL_SIZE 1024
#define M4_STATE_OFF 0
#define M4_STATE_INI 1
#define M4_STATE_CRUN 2
#define M4_STATE_CSTOP 3
#define M4_STATE_STANDBY 4
#define M4_STATE_CRASH 5
struct stm32_syscon {
struct regmap *map;
u32 reg;
u32 mask;
};
struct stm32_rproc_mem {
char name[20];
void __iomem *cpu_addr;
phys_addr_t bus_addr;
u32 dev_addr;
size_t size;
};
struct stm32_rproc_mem_ranges {
u32 dev_addr;
u32 bus_addr;
u32 size;
};
struct stm32_mbox {
const unsigned char name[10];
struct mbox_chan *chan;
struct mbox_client client;
struct work_struct vq_work;
int vq_id;
};
struct stm32_rproc {
struct reset_control *rst;
struct reset_control *hold_boot_rst;
struct stm32_syscon hold_boot;
struct stm32_syscon pdds;
struct stm32_syscon m4_state;
struct stm32_syscon rsctbl;
int wdg_irq;
u32 nb_rmems;
struct stm32_rproc_mem *rmems;
struct stm32_mbox mb[MBOX_NB_MBX];
struct workqueue_struct *workqueue;
bool hold_boot_smc;
void __iomem *rsc_va;
};
static int stm32_rproc_pa_to_da(struct rproc *rproc, phys_addr_t pa, u64 *da)
{
unsigned int i;
struct stm32_rproc *ddata = rproc->priv;
struct stm32_rproc_mem *p_mem;
for (i = 0; i < ddata->nb_rmems; i++) {
p_mem = &ddata->rmems[i];
if (pa < p_mem->bus_addr ||
pa >= p_mem->bus_addr + p_mem->size)
continue;
*da = pa - p_mem->bus_addr + p_mem->dev_addr;
dev_dbg(rproc->dev.parent, "pa %pa to da %llx\n", &pa, *da);
return 0;
}
return -EINVAL;
}
static int stm32_rproc_mem_alloc(struct rproc *rproc,
struct rproc_mem_entry *mem)
{
struct device *dev = rproc->dev.parent;
void *va;
dev_dbg(dev, "map memory: %pad+%zx\n", &mem->dma, mem->len);
va = ioremap_wc(mem->dma, mem->len);
if (IS_ERR_OR_NULL(va)) {
dev_err(dev, "Unable to map memory region: %pad+0x%zx\n",
&mem->dma, mem->len);
return -ENOMEM;
}
/* Update memory entry va */
mem->va = va;
return 0;
}
static int stm32_rproc_mem_release(struct rproc *rproc,
struct rproc_mem_entry *mem)
{
dev_dbg(rproc->dev.parent, "unmap memory: %pa\n", &mem->dma);
iounmap(mem->va);
return 0;
}
static int stm32_rproc_of_memory_translations(struct platform_device *pdev,
struct stm32_rproc *ddata)
{
struct device *parent, *dev = &pdev->dev;
struct device_node *np;
struct stm32_rproc_mem *p_mems;
struct stm32_rproc_mem_ranges *mem_range;
int cnt, array_size, i, ret = 0;
parent = dev->parent;
np = parent->of_node;
cnt = of_property_count_elems_of_size(np, "dma-ranges",
sizeof(*mem_range));
if (cnt <= 0) {
dev_err(dev, "%s: dma-ranges property not defined\n", __func__);
return -EINVAL;
}
p_mems = devm_kcalloc(dev, cnt, sizeof(*p_mems), GFP_KERNEL);
if (!p_mems)
return -ENOMEM;
mem_range = kcalloc(cnt, sizeof(*mem_range), GFP_KERNEL);
if (!mem_range)
return -ENOMEM;
array_size = cnt * sizeof(struct stm32_rproc_mem_ranges) / sizeof(u32);
ret = of_property_read_u32_array(np, "dma-ranges",
(u32 *)mem_range, array_size);
if (ret) {
dev_err(dev, "error while get dma-ranges property: %x\n", ret);
goto free_mem;
}
for (i = 0; i < cnt; i++) {
p_mems[i].bus_addr = mem_range[i].bus_addr;
p_mems[i].dev_addr = mem_range[i].dev_addr;
p_mems[i].size = mem_range[i].size;
dev_dbg(dev, "memory range[%i]: da %#x, pa %pa, size %#zx:\n",
i, p_mems[i].dev_addr, &p_mems[i].bus_addr,
p_mems[i].size);
}
ddata->rmems = p_mems;
ddata->nb_rmems = cnt;
free_mem:
kfree(mem_range);
return ret;
}
static int stm32_rproc_mbox_idx(struct rproc *rproc, const unsigned char *name)
{
struct stm32_rproc *ddata = rproc->priv;
int i;
for (i = 0; i < ARRAY_SIZE(ddata->mb); i++) {
if (!strncmp(ddata->mb[i].name, name, strlen(name)))
return i;
}
dev_err(&rproc->dev, "mailbox %s not found\n", name);
return -EINVAL;
}
static int stm32_rproc_prepare(struct rproc *rproc)
{
struct device *dev = rproc->dev.parent;
struct device_node *np = dev->of_node;
struct of_phandle_iterator it;
struct rproc_mem_entry *mem;
struct reserved_mem *rmem;
u64 da;
int index = 0;
/* Register associated reserved memory regions */
of_phandle_iterator_init(&it, np, "memory-region", NULL, 0);
while (of_phandle_iterator_next(&it) == 0) {
rmem = of_reserved_mem_lookup(it.node);
if (!rmem) {
of_node_put(it.node);
dev_err(dev, "unable to acquire memory-region\n");
return -EINVAL;
}
if (stm32_rproc_pa_to_da(rproc, rmem->base, &da) < 0) {
of_node_put(it.node);
dev_err(dev, "memory region not valid %pa\n",
&rmem->base);
return -EINVAL;
}
/* No need to map vdev buffer */
if (strcmp(it.node->name, "vdev0buffer")) {
/* Register memory region */
mem = rproc_mem_entry_init(dev, NULL,
(dma_addr_t)rmem->base,
rmem->size, da,
stm32_rproc_mem_alloc,
stm32_rproc_mem_release,
it.node->name);
if (mem)
rproc_coredump_add_segment(rproc, da,
rmem->size);
} else {
/* Register reserved memory for vdev buffer alloc */
mem = rproc_of_resm_mem_entry_init(dev, index,
rmem->size,
rmem->base,
it.node->name);
}
if (!mem) {
of_node_put(it.node);
return -ENOMEM;
}
rproc_add_carveout(rproc, mem);
index++;
}
return 0;
}
static int stm32_rproc_parse_fw(struct rproc *rproc, const struct firmware *fw)
{
if (rproc_elf_load_rsc_table(rproc, fw))
dev_warn(&rproc->dev, "no resource table found for this firmware\n");
return 0;
}
static irqreturn_t stm32_rproc_wdg(int irq, void *data)
{
struct platform_device *pdev = data;
struct rproc *rproc = platform_get_drvdata(pdev);
rproc_report_crash(rproc, RPROC_WATCHDOG);
return IRQ_HANDLED;
}
static void stm32_rproc_mb_vq_work(struct work_struct *work)
{
struct stm32_mbox *mb = container_of(work, struct stm32_mbox, vq_work);
struct rproc *rproc = dev_get_drvdata(mb->client.dev);
mutex_lock(&rproc->lock);
if (rproc->state != RPROC_RUNNING)
goto unlock_mutex;
if (rproc_vq_interrupt(rproc, mb->vq_id) == IRQ_NONE)
dev_dbg(&rproc->dev, "no message found in vq%d\n", mb->vq_id);
unlock_mutex:
mutex_unlock(&rproc->lock);
}
static void stm32_rproc_mb_callback(struct mbox_client *cl, void *data)
{
struct rproc *rproc = dev_get_drvdata(cl->dev);
struct stm32_mbox *mb = container_of(cl, struct stm32_mbox, client);
struct stm32_rproc *ddata = rproc->priv;
queue_work(ddata->workqueue, &mb->vq_work);
}
static void stm32_rproc_free_mbox(struct rproc *rproc)
{
struct stm32_rproc *ddata = rproc->priv;
unsigned int i;
for (i = 0; i < ARRAY_SIZE(ddata->mb); i++) {
if (ddata->mb[i].chan)
mbox_free_channel(ddata->mb[i].chan);
ddata->mb[i].chan = NULL;
}
}
static const struct stm32_mbox stm32_rproc_mbox[MBOX_NB_MBX] = {
{
.name = STM32_MBX_VQ0,
.vq_id = STM32_MBX_VQ0_ID,
.client = {
.rx_callback = stm32_rproc_mb_callback,
.tx_block = false,
},
},
{
.name = STM32_MBX_VQ1,
.vq_id = STM32_MBX_VQ1_ID,
.client = {
.rx_callback = stm32_rproc_mb_callback,
.tx_block = false,
},
},
{
.name = STM32_MBX_SHUTDOWN,
.vq_id = -1,
.client = {
.tx_block = true,
.tx_done = NULL,
.tx_tout = 500, /* 500 ms time out */
},
},
{
.name = STM32_MBX_DETACH,
.vq_id = -1,
.client = {
.tx_block = true,
.tx_done = NULL,
.tx_tout = 200, /* 200 ms time out to detach should be fair enough */
},
}
};
static int stm32_rproc_request_mbox(struct rproc *rproc)
{
struct stm32_rproc *ddata = rproc->priv;
struct device *dev = &rproc->dev;
unsigned int i;
int j;
const unsigned char *name;
struct mbox_client *cl;
/* Initialise mailbox structure table */
memcpy(ddata->mb, stm32_rproc_mbox, sizeof(stm32_rproc_mbox));
for (i = 0; i < MBOX_NB_MBX; i++) {
name = ddata->mb[i].name;
cl = &ddata->mb[i].client;
cl->dev = dev->parent;
ddata->mb[i].chan = mbox_request_channel_byname(cl, name);
if (IS_ERR(ddata->mb[i].chan)) {
if (PTR_ERR(ddata->mb[i].chan) == -EPROBE_DEFER) {
dev_err_probe(dev->parent,
PTR_ERR(ddata->mb[i].chan),
"failed to request mailbox %s\n",
name);
goto err_probe;
}
dev_warn(dev, "cannot get %s mbox\n", name);
ddata->mb[i].chan = NULL;
}
if (ddata->mb[i].vq_id >= 0) {
INIT_WORK(&ddata->mb[i].vq_work,
stm32_rproc_mb_vq_work);
}
}
return 0;
err_probe:
for (j = i - 1; j >= 0; j--)
if (ddata->mb[j].chan)
mbox_free_channel(ddata->mb[j].chan);
return -EPROBE_DEFER;
}
static int stm32_rproc_set_hold_boot(struct rproc *rproc, bool hold)
{
struct stm32_rproc *ddata = rproc->priv;
struct stm32_syscon hold_boot = ddata->hold_boot;
struct arm_smccc_res smc_res;
int val, err;
/*
* Three ways to manage the hold boot
* - using SCMI: the hold boot is managed as a reset,
* - using Linux(no SCMI): the hold boot is managed as a syscon register
* - using SMC call (deprecated): use SMC reset interface
*/
val = hold ? HOLD_BOOT : RELEASE_BOOT;
if (ddata->hold_boot_rst) {
/* Use the SCMI reset controller */
if (!hold)
err = reset_control_deassert(ddata->hold_boot_rst);
else
err = reset_control_assert(ddata->hold_boot_rst);
} else if (IS_ENABLED(CONFIG_HAVE_ARM_SMCCC) && ddata->hold_boot_smc) {
/* Use the SMC call */
arm_smccc_smc(STM32_SMC_RCC, STM32_SMC_REG_WRITE,
hold_boot.reg, val, 0, 0, 0, 0, &smc_res);
err = smc_res.a0;
} else {
/* Use syscon */
err = regmap_update_bits(hold_boot.map, hold_boot.reg,
hold_boot.mask, val);
}
if (err)
dev_err(&rproc->dev, "failed to set hold boot\n");
return err;
}
static void stm32_rproc_add_coredump_trace(struct rproc *rproc)
{
struct rproc_debug_trace *trace;
struct rproc_dump_segment *segment;
bool already_added;
list_for_each_entry(trace, &rproc->traces, node) {
already_added = false;
list_for_each_entry(segment, &rproc->dump_segments, node) {
if (segment->da == trace->trace_mem.da) {
already_added = true;
break;
}
}
if (!already_added)
rproc_coredump_add_segment(rproc, trace->trace_mem.da,
trace->trace_mem.len);
}
}
static int stm32_rproc_start(struct rproc *rproc)
{
struct stm32_rproc *ddata = rproc->priv;
int err;
stm32_rproc_add_coredump_trace(rproc);
/* clear remote proc Deep Sleep */
if (ddata->pdds.map) {
err = regmap_update_bits(ddata->pdds.map, ddata->pdds.reg,
ddata->pdds.mask, 0);
if (err) {
dev_err(&rproc->dev, "failed to clear pdds\n");
return err;
}
}
err = stm32_rproc_set_hold_boot(rproc, false);
if (err)
return err;
return stm32_rproc_set_hold_boot(rproc, true);
}
static int stm32_rproc_attach(struct rproc *rproc)
{
stm32_rproc_add_coredump_trace(rproc);
return stm32_rproc_set_hold_boot(rproc, true);
}
static int stm32_rproc_detach(struct rproc *rproc)
{
struct stm32_rproc *ddata = rproc->priv;
int err, idx;
/* Inform the remote processor of the detach */
idx = stm32_rproc_mbox_idx(rproc, STM32_MBX_DETACH);
if (idx >= 0 && ddata->mb[idx].chan) {
err = mbox_send_message(ddata->mb[idx].chan, "stop");
if (err < 0)
dev_warn(&rproc->dev, "warning: remote FW detach without ack\n");
}
/* Allow remote processor to auto-reboot */
return stm32_rproc_set_hold_boot(rproc, false);
}
static int stm32_rproc_stop(struct rproc *rproc)
{
struct stm32_rproc *ddata = rproc->priv;
int err, idx;
/* request shutdown of the remote processor */
if (rproc->state != RPROC_OFFLINE && rproc->state != RPROC_CRASHED) {
idx = stm32_rproc_mbox_idx(rproc, STM32_MBX_SHUTDOWN);
if (idx >= 0 && ddata->mb[idx].chan) {
err = mbox_send_message(ddata->mb[idx].chan, "detach");
if (err < 0)
dev_warn(&rproc->dev, "warning: remote FW shutdown without ack\n");
}
}
err = stm32_rproc_set_hold_boot(rproc, true);
if (err)
return err;
err = reset_control_assert(ddata->rst);
if (err) {
dev_err(&rproc->dev, "failed to assert the reset\n");
return err;
}
/* to allow platform Standby power mode, set remote proc Deep Sleep */
if (ddata->pdds.map) {
err = regmap_update_bits(ddata->pdds.map, ddata->pdds.reg,
ddata->pdds.mask, 1);
if (err) {
dev_err(&rproc->dev, "failed to set pdds\n");
return err;
}
}
/* update coprocessor state to OFF if available */
if (ddata->m4_state.map) {
err = regmap_update_bits(ddata->m4_state.map,
ddata->m4_state.reg,
ddata->m4_state.mask,
M4_STATE_OFF);
if (err) {
dev_err(&rproc->dev, "failed to set copro state\n");
return err;
}
}
return 0;
}
static void stm32_rproc_kick(struct rproc *rproc, int vqid)
{
struct stm32_rproc *ddata = rproc->priv;
unsigned int i;
int err;
if (WARN_ON(vqid >= MBOX_NB_VQ))
return;
for (i = 0; i < MBOX_NB_MBX; i++) {
if (vqid != ddata->mb[i].vq_id)
continue;
if (!ddata->mb[i].chan)
return;
err = mbox_send_message(ddata->mb[i].chan, "kick");
if (err < 0)
dev_err(&rproc->dev, "%s: failed (%s, err:%d)\n",
__func__, ddata->mb[i].name, err);
return;
}
}
static int stm32_rproc_da_to_pa(struct rproc *rproc,
u64 da, phys_addr_t *pa)
{
struct stm32_rproc *ddata = rproc->priv;
struct device *dev = rproc->dev.parent;
struct stm32_rproc_mem *p_mem;
unsigned int i;
for (i = 0; i < ddata->nb_rmems; i++) {
p_mem = &ddata->rmems[i];
if (da < p_mem->dev_addr ||
da >= p_mem->dev_addr + p_mem->size)
continue;
*pa = da - p_mem->dev_addr + p_mem->bus_addr;
dev_dbg(dev, "da %llx to pa %pap\n", da, pa);
return 0;
}
dev_err(dev, "can't translate da %llx\n", da);
return -EINVAL;
}
static struct resource_table *
stm32_rproc_get_loaded_rsc_table(struct rproc *rproc, size_t *table_sz)
{
struct stm32_rproc *ddata = rproc->priv;
struct device *dev = rproc->dev.parent;
phys_addr_t rsc_pa;
u32 rsc_da;
int err;
/* The resource table has already been mapped, nothing to do */
if (ddata->rsc_va)
goto done;
err = regmap_read(ddata->rsctbl.map, ddata->rsctbl.reg, &rsc_da);
if (err) {
dev_err(dev, "failed to read rsc tbl addr\n");
return ERR_PTR(-EINVAL);
}
if (!rsc_da)
/* no rsc table */
return ERR_PTR(-ENOENT);
err = stm32_rproc_da_to_pa(rproc, rsc_da, &rsc_pa);
if (err)
return ERR_PTR(err);
ddata->rsc_va = devm_ioremap_wc(dev, rsc_pa, RSC_TBL_SIZE);
if (IS_ERR_OR_NULL(ddata->rsc_va)) {
dev_err(dev, "Unable to map memory region: %pa+%x\n",
&rsc_pa, RSC_TBL_SIZE);
ddata->rsc_va = NULL;
return ERR_PTR(-ENOMEM);
}
done:
/*
* Assuming the resource table fits in 1kB is fair.
* Notice for the detach, that this 1 kB memory area has to be reserved in the coprocessor
* firmware for the resource table. On detach, the remoteproc core re-initializes this
* entire area by overwriting it with the initial values stored in rproc->clean_table.
*/
*table_sz = RSC_TBL_SIZE;
return (struct resource_table *)ddata->rsc_va;
}
static const struct rproc_ops st_rproc_ops = {
.prepare = stm32_rproc_prepare,
.start = stm32_rproc_start,
.stop = stm32_rproc_stop,
.attach = stm32_rproc_attach,
.detach = stm32_rproc_detach,
.kick = stm32_rproc_kick,
.load = rproc_elf_load_segments,
.parse_fw = stm32_rproc_parse_fw,
.find_loaded_rsc_table = rproc_elf_find_loaded_rsc_table,
.get_loaded_rsc_table = stm32_rproc_get_loaded_rsc_table,
.sanity_check = rproc_elf_sanity_check,
.get_boot_addr = rproc_elf_get_boot_addr,
};
static const struct of_device_id stm32_rproc_match[] = {
{ .compatible = "st,stm32mp1-m4" },
{},
};
MODULE_DEVICE_TABLE(of, stm32_rproc_match);
static int stm32_rproc_get_syscon(struct device_node *np, const char *prop,
struct stm32_syscon *syscon)
{
int err = 0;
syscon->map = syscon_regmap_lookup_by_phandle(np, prop);
if (IS_ERR(syscon->map)) {
err = PTR_ERR(syscon->map);
syscon->map = NULL;
goto out;
}
err = of_property_read_u32_index(np, prop, 1, &syscon->reg);
if (err)
goto out;
err = of_property_read_u32_index(np, prop, 2, &syscon->mask);
out:
return err;
}
static int stm32_rproc_parse_dt(struct platform_device *pdev,
struct stm32_rproc *ddata, bool *auto_boot)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct stm32_syscon tz;
unsigned int tzen;
int err, irq;
irq = platform_get_irq(pdev, 0);
if (irq == -EPROBE_DEFER)
return dev_err_probe(dev, irq, "failed to get interrupt\n");
if (irq > 0) {
err = devm_request_irq(dev, irq, stm32_rproc_wdg, 0,
dev_name(dev), pdev);
if (err)
return dev_err_probe(dev, err,
"failed to request wdg irq\n");
ddata->wdg_irq = irq;
if (of_property_read_bool(np, "wakeup-source")) {
device_init_wakeup(dev, true);
dev_pm_set_wake_irq(dev, irq);
}
dev_info(dev, "wdg irq registered\n");
}
ddata->rst = devm_reset_control_get_optional(dev, "mcu_rst");
if (!ddata->rst) {
/* Try legacy fallback method: get it by index */
ddata->rst = devm_reset_control_get_by_index(dev, 0);
}
if (IS_ERR(ddata->rst))
return dev_err_probe(dev, PTR_ERR(ddata->rst),
"failed to get mcu_reset\n");
/*
* Three ways to manage the hold boot
* - using SCMI: the hold boot is managed as a reset
* The DT "reset-mames" property should be defined with 2 items:
* reset-names = "mcu_rst", "hold_boot";
* - using SMC call (deprecated): use SMC reset interface
* The DT "reset-mames" property is optional, "st,syscfg-tz" is required
* - default(no SCMI, no SMC): the hold boot is managed as a syscon register
* The DT "reset-mames" property is optional, "st,syscfg-holdboot" is required
*/
ddata->hold_boot_rst = devm_reset_control_get_optional(dev, "hold_boot");
if (IS_ERR(ddata->hold_boot_rst))
return dev_err_probe(dev, PTR_ERR(ddata->hold_boot_rst),
"failed to get hold_boot reset\n");
if (!ddata->hold_boot_rst && IS_ENABLED(CONFIG_HAVE_ARM_SMCCC)) {
/* Manage the MCU_BOOT using SMC call */
err = stm32_rproc_get_syscon(np, "st,syscfg-tz", &tz);
if (!err) {
err = regmap_read(tz.map, tz.reg, &tzen);
if (err) {
dev_err(dev, "failed to read tzen\n");
return err;
}
ddata->hold_boot_smc = tzen & tz.mask;
}
}
if (!ddata->hold_boot_rst && !ddata->hold_boot_smc) {
/* Default: hold boot manage it through the syscon controller */
err = stm32_rproc_get_syscon(np, "st,syscfg-holdboot",
&ddata->hold_boot);
if (err) {
dev_err(dev, "failed to get hold boot\n");
return err;
}
}
err = stm32_rproc_get_syscon(np, "st,syscfg-pdds", &ddata->pdds);
if (err)
dev_info(dev, "failed to get pdds\n");
*auto_boot = of_property_read_bool(np, "st,auto-boot");
/*
* See if we can check the M4 status, i.e if it was started
* from the boot loader or not.
*/
err = stm32_rproc_get_syscon(np, "st,syscfg-m4-state",
&ddata->m4_state);
if (err) {
/* remember this */
ddata->m4_state.map = NULL;
/* no coprocessor state syscon (optional) */
dev_warn(dev, "m4 state not supported\n");
/* no need to go further */
return 0;
}
/* See if we can get the resource table */
err = stm32_rproc_get_syscon(np, "st,syscfg-rsc-tbl",
&ddata->rsctbl);
if (err) {
/* no rsc table syscon (optional) */
dev_warn(dev, "rsc tbl syscon not supported\n");
}
return 0;
}
static int stm32_rproc_get_m4_status(struct stm32_rproc *ddata,
unsigned int *state)
{
/* See stm32_rproc_parse_dt() */
if (!ddata->m4_state.map) {
/*
* We couldn't get the coprocessor's state, assume
* it is not running.
*/
*state = M4_STATE_OFF;
return 0;
}
return regmap_read(ddata->m4_state.map, ddata->m4_state.reg, state);
}
static int stm32_rproc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct stm32_rproc *ddata;
struct device_node *np = dev->of_node;
struct rproc *rproc;
unsigned int state;
int ret;
ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32));
if (ret)
return ret;
rproc = rproc_alloc(dev, np->name, &st_rproc_ops, NULL, sizeof(*ddata));
if (!rproc)
return -ENOMEM;
ddata = rproc->priv;
rproc_coredump_set_elf_info(rproc, ELFCLASS32, EM_NONE);
ret = stm32_rproc_parse_dt(pdev, ddata, &rproc->auto_boot);
if (ret)
goto free_rproc;
ret = stm32_rproc_of_memory_translations(pdev, ddata);
if (ret)
goto free_rproc;
ret = stm32_rproc_get_m4_status(ddata, &state);
if (ret)
goto free_rproc;
if (state == M4_STATE_CRUN)
rproc->state = RPROC_DETACHED;
rproc->has_iommu = false;
ddata->workqueue = create_workqueue(dev_name(dev));
if (!ddata->workqueue) {
dev_err(dev, "cannot create workqueue\n");
ret = -ENOMEM;
goto free_resources;
}
platform_set_drvdata(pdev, rproc);
ret = stm32_rproc_request_mbox(rproc);
if (ret)
goto free_wkq;
ret = rproc_add(rproc);
if (ret)
goto free_mb;
return 0;
free_mb:
stm32_rproc_free_mbox(rproc);
free_wkq:
destroy_workqueue(ddata->workqueue);
free_resources:
rproc_resource_cleanup(rproc);
free_rproc:
if (device_may_wakeup(dev)) {
dev_pm_clear_wake_irq(dev);
device_init_wakeup(dev, false);
}
rproc_free(rproc);
return ret;
}
static void stm32_rproc_remove(struct platform_device *pdev)
{
struct rproc *rproc = platform_get_drvdata(pdev);
struct stm32_rproc *ddata = rproc->priv;
struct device *dev = &pdev->dev;
if (atomic_read(&rproc->power) > 0)
rproc_shutdown(rproc);
rproc_del(rproc);
stm32_rproc_free_mbox(rproc);
destroy_workqueue(ddata->workqueue);
if (device_may_wakeup(dev)) {
dev_pm_clear_wake_irq(dev);
device_init_wakeup(dev, false);
}
rproc_free(rproc);
}
static int stm32_rproc_suspend(struct device *dev)
{
struct rproc *rproc = dev_get_drvdata(dev);
struct stm32_rproc *ddata = rproc->priv;
if (device_may_wakeup(dev))
return enable_irq_wake(ddata->wdg_irq);
return 0;
}
static int stm32_rproc_resume(struct device *dev)
{
struct rproc *rproc = dev_get_drvdata(dev);
struct stm32_rproc *ddata = rproc->priv;
if (device_may_wakeup(dev))
return disable_irq_wake(ddata->wdg_irq);
return 0;
}
static DEFINE_SIMPLE_DEV_PM_OPS(stm32_rproc_pm_ops,
stm32_rproc_suspend, stm32_rproc_resume);
static struct platform_driver stm32_rproc_driver = {
.probe = stm32_rproc_probe,
.remove_new = stm32_rproc_remove,
.driver = {
.name = "stm32-rproc",
.pm = pm_ptr(&stm32_rproc_pm_ops),
.of_match_table = stm32_rproc_match,
},
};
module_platform_driver(stm32_rproc_driver);
MODULE_DESCRIPTION("STM32 Remote Processor Control Driver");
MODULE_AUTHOR("Ludovic Barre <[email protected]>");
MODULE_AUTHOR("Fabien Dessenne <[email protected]>");
MODULE_LICENSE("GPL v2");
|
linux-master
|
drivers/remoteproc/stm32_rproc.c
|
// SPDX-License-Identifier: GPL-2.0+
/*
* Ingenic JZ47xx remoteproc driver
* Copyright 2019, Paul Cercueil <[email protected]>
*/
#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/remoteproc.h>
#include "remoteproc_internal.h"
#define REG_AUX_CTRL 0x0
#define REG_AUX_MSG_ACK 0x10
#define REG_AUX_MSG 0x14
#define REG_CORE_MSG_ACK 0x18
#define REG_CORE_MSG 0x1C
#define AUX_CTRL_SLEEP BIT(31)
#define AUX_CTRL_MSG_IRQ_EN BIT(3)
#define AUX_CTRL_NMI_RESETS BIT(2)
#define AUX_CTRL_NMI BIT(1)
#define AUX_CTRL_SW_RESET BIT(0)
static bool auto_boot;
module_param(auto_boot, bool, 0400);
MODULE_PARM_DESC(auto_boot,
"Auto-boot the remote processor [default=false]");
struct vpu_mem_map {
const char *name;
unsigned int da;
};
struct vpu_mem_info {
const struct vpu_mem_map *map;
unsigned long len;
void __iomem *base;
};
static const struct vpu_mem_map vpu_mem_map[] = {
{ "tcsm0", 0x132b0000 },
{ "tcsm1", 0xf4000000 },
{ "sram", 0x132f0000 },
};
/**
* struct vpu - Ingenic VPU remoteproc private structure
* @irq: interrupt number
* @clks: pointers to the VPU and AUX clocks
* @aux_base: raw pointer to the AUX interface registers
* @mem_info: array of struct vpu_mem_info, which contain the mapping info of
* each of the external memories
* @dev: private pointer to the device
*/
struct vpu {
int irq;
struct clk_bulk_data clks[2];
void __iomem *aux_base;
struct vpu_mem_info mem_info[ARRAY_SIZE(vpu_mem_map)];
struct device *dev;
};
static int ingenic_rproc_prepare(struct rproc *rproc)
{
struct vpu *vpu = rproc->priv;
int ret;
/* The clocks must be enabled for the firmware to be loaded in TCSM */
ret = clk_bulk_prepare_enable(ARRAY_SIZE(vpu->clks), vpu->clks);
if (ret)
dev_err(vpu->dev, "Unable to start clocks: %d\n", ret);
return ret;
}
static int ingenic_rproc_unprepare(struct rproc *rproc)
{
struct vpu *vpu = rproc->priv;
clk_bulk_disable_unprepare(ARRAY_SIZE(vpu->clks), vpu->clks);
return 0;
}
static int ingenic_rproc_start(struct rproc *rproc)
{
struct vpu *vpu = rproc->priv;
u32 ctrl;
enable_irq(vpu->irq);
/* Reset the AUX and enable message IRQ */
ctrl = AUX_CTRL_NMI_RESETS | AUX_CTRL_NMI | AUX_CTRL_MSG_IRQ_EN;
writel(ctrl, vpu->aux_base + REG_AUX_CTRL);
return 0;
}
static int ingenic_rproc_stop(struct rproc *rproc)
{
struct vpu *vpu = rproc->priv;
disable_irq(vpu->irq);
/* Keep AUX in reset mode */
writel(AUX_CTRL_SW_RESET, vpu->aux_base + REG_AUX_CTRL);
return 0;
}
static void ingenic_rproc_kick(struct rproc *rproc, int vqid)
{
struct vpu *vpu = rproc->priv;
writel(vqid, vpu->aux_base + REG_CORE_MSG);
}
static void *ingenic_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
{
struct vpu *vpu = rproc->priv;
void __iomem *va = NULL;
unsigned int i;
for (i = 0; i < ARRAY_SIZE(vpu_mem_map); i++) {
const struct vpu_mem_info *info = &vpu->mem_info[i];
const struct vpu_mem_map *map = info->map;
if (da >= map->da && (da + len) < (map->da + info->len)) {
va = info->base + (da - map->da);
break;
}
}
return (__force void *)va;
}
static const struct rproc_ops ingenic_rproc_ops = {
.prepare = ingenic_rproc_prepare,
.unprepare = ingenic_rproc_unprepare,
.start = ingenic_rproc_start,
.stop = ingenic_rproc_stop,
.kick = ingenic_rproc_kick,
.da_to_va = ingenic_rproc_da_to_va,
};
static irqreturn_t vpu_interrupt(int irq, void *data)
{
struct rproc *rproc = data;
struct vpu *vpu = rproc->priv;
u32 vring;
vring = readl(vpu->aux_base + REG_AUX_MSG);
/* Ack the interrupt */
writel(0, vpu->aux_base + REG_AUX_MSG_ACK);
return rproc_vq_interrupt(rproc, vring);
}
static int ingenic_rproc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct resource *mem;
struct rproc *rproc;
struct vpu *vpu;
unsigned int i;
int ret;
rproc = devm_rproc_alloc(dev, "ingenic-vpu",
&ingenic_rproc_ops, NULL, sizeof(*vpu));
if (!rproc)
return -ENOMEM;
rproc->auto_boot = auto_boot;
vpu = rproc->priv;
vpu->dev = &pdev->dev;
platform_set_drvdata(pdev, vpu);
mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "aux");
vpu->aux_base = devm_ioremap_resource(dev, mem);
if (IS_ERR(vpu->aux_base)) {
dev_err(dev, "Failed to ioremap\n");
return PTR_ERR(vpu->aux_base);
}
for (i = 0; i < ARRAY_SIZE(vpu_mem_map); i++) {
mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
vpu_mem_map[i].name);
vpu->mem_info[i].base = devm_ioremap_resource(dev, mem);
if (IS_ERR(vpu->mem_info[i].base)) {
ret = PTR_ERR(vpu->mem_info[i].base);
dev_err(dev, "Failed to ioremap\n");
return ret;
}
vpu->mem_info[i].len = resource_size(mem);
vpu->mem_info[i].map = &vpu_mem_map[i];
}
vpu->clks[0].id = "vpu";
vpu->clks[1].id = "aux";
ret = devm_clk_bulk_get(dev, ARRAY_SIZE(vpu->clks), vpu->clks);
if (ret) {
dev_err(dev, "Failed to get clocks\n");
return ret;
}
vpu->irq = platform_get_irq(pdev, 0);
if (vpu->irq < 0)
return vpu->irq;
ret = devm_request_irq(dev, vpu->irq, vpu_interrupt, IRQF_NO_AUTOEN,
"VPU", rproc);
if (ret < 0) {
dev_err(dev, "Failed to request IRQ\n");
return ret;
}
ret = devm_rproc_add(dev, rproc);
if (ret) {
dev_err(dev, "Failed to register remote processor\n");
return ret;
}
return 0;
}
static const struct of_device_id ingenic_rproc_of_matches[] = {
{ .compatible = "ingenic,jz4770-vpu-rproc", },
{}
};
MODULE_DEVICE_TABLE(of, ingenic_rproc_of_matches);
static struct platform_driver ingenic_rproc_driver = {
.probe = ingenic_rproc_probe,
.driver = {
.name = "ingenic-vpu",
.of_match_table = ingenic_rproc_of_matches,
},
};
module_platform_driver(ingenic_rproc_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Paul Cercueil <[email protected]>");
MODULE_DESCRIPTION("Ingenic JZ47xx Remote Processor control driver");
|
linux-master
|
drivers/remoteproc/ingenic_rproc.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Remote processor machine-specific module for DA8XX
*
* Copyright (C) 2013 Texas Instruments, Inc.
*/
#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/reset.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
#include <linux/remoteproc.h>
#include "remoteproc_internal.h"
static char *da8xx_fw_name;
module_param(da8xx_fw_name, charp, 0444);
MODULE_PARM_DESC(da8xx_fw_name,
"Name of DSP firmware file in /lib/firmware (if not specified defaults to 'rproc-dsp-fw')");
/*
* OMAP-L138 Technical References:
* http://www.ti.com/product/omap-l138
*/
#define SYSCFG_CHIPSIG0 BIT(0)
#define SYSCFG_CHIPSIG1 BIT(1)
#define SYSCFG_CHIPSIG2 BIT(2)
#define SYSCFG_CHIPSIG3 BIT(3)
#define SYSCFG_CHIPSIG4 BIT(4)
#define DA8XX_RPROC_LOCAL_ADDRESS_MASK (SZ_16M - 1)
/**
* struct da8xx_rproc_mem - internal memory structure
* @cpu_addr: MPU virtual address of the memory region
* @bus_addr: Bus address used to access the memory region
* @dev_addr: Device address of the memory region from DSP view
* @size: Size of the memory region
*/
struct da8xx_rproc_mem {
void __iomem *cpu_addr;
phys_addr_t bus_addr;
u32 dev_addr;
size_t size;
};
/**
* struct da8xx_rproc - da8xx remote processor instance state
* @rproc: rproc handle
* @mem: internal memory regions data
* @num_mems: number of internal memory regions
* @dsp_clk: placeholder for platform's DSP clk
* @ack_fxn: chip-specific ack function for ack'ing irq
* @irq_data: ack_fxn function parameter
* @chipsig: virt ptr to DSP interrupt registers (CHIPSIG & CHIPSIG_CLR)
* @bootreg: virt ptr to DSP boot address register (HOST1CFG)
* @irq: irq # used by this instance
*/
struct da8xx_rproc {
struct rproc *rproc;
struct da8xx_rproc_mem *mem;
int num_mems;
struct clk *dsp_clk;
struct reset_control *dsp_reset;
void (*ack_fxn)(struct irq_data *data);
struct irq_data *irq_data;
void __iomem *chipsig;
void __iomem *bootreg;
int irq;
};
/**
* handle_event() - inbound virtqueue message workqueue function
*
* This function is registered as a kernel thread and is scheduled by the
* kernel handler.
*/
static irqreturn_t handle_event(int irq, void *p)
{
struct rproc *rproc = p;
/* Process incoming buffers on all our vrings */
rproc_vq_interrupt(rproc, 0);
rproc_vq_interrupt(rproc, 1);
return IRQ_HANDLED;
}
/**
* da8xx_rproc_callback() - inbound virtqueue message handler
*
* This handler is invoked directly by the kernel whenever the remote
* core (DSP) has modified the state of a virtqueue. There is no
* "payload" message indicating the virtqueue index as is the case with
* mailbox-based implementations on OMAP4. As such, this handler "polls"
* each known virtqueue index for every invocation.
*/
static irqreturn_t da8xx_rproc_callback(int irq, void *p)
{
struct rproc *rproc = p;
struct da8xx_rproc *drproc = rproc->priv;
u32 chipsig;
chipsig = readl(drproc->chipsig);
if (chipsig & SYSCFG_CHIPSIG0) {
/* Clear interrupt level source */
writel(SYSCFG_CHIPSIG0, drproc->chipsig + 4);
/*
* ACK intr to AINTC.
*
* It has already been ack'ed by the kernel before calling
* this function, but since the ARM<->DSP interrupts in the
* CHIPSIG register are "level" instead of "pulse" variety,
* we need to ack it after taking down the level else we'll
* be called again immediately after returning.
*/
drproc->ack_fxn(drproc->irq_data);
return IRQ_WAKE_THREAD;
}
return IRQ_HANDLED;
}
static int da8xx_rproc_start(struct rproc *rproc)
{
struct device *dev = rproc->dev.parent;
struct da8xx_rproc *drproc = rproc->priv;
struct clk *dsp_clk = drproc->dsp_clk;
struct reset_control *dsp_reset = drproc->dsp_reset;
int ret;
/* hw requires the start (boot) address be on 1KB boundary */
if (rproc->bootaddr & 0x3ff) {
dev_err(dev, "invalid boot address: must be aligned to 1KB\n");
return -EINVAL;
}
writel(rproc->bootaddr, drproc->bootreg);
ret = clk_prepare_enable(dsp_clk);
if (ret) {
dev_err(dev, "clk_prepare_enable() failed: %d\n", ret);
return ret;
}
ret = reset_control_deassert(dsp_reset);
if (ret) {
dev_err(dev, "reset_control_deassert() failed: %d\n", ret);
clk_disable_unprepare(dsp_clk);
return ret;
}
return 0;
}
static int da8xx_rproc_stop(struct rproc *rproc)
{
struct da8xx_rproc *drproc = rproc->priv;
struct device *dev = rproc->dev.parent;
int ret;
ret = reset_control_assert(drproc->dsp_reset);
if (ret) {
dev_err(dev, "reset_control_assert() failed: %d\n", ret);
return ret;
}
clk_disable_unprepare(drproc->dsp_clk);
return 0;
}
/* kick a virtqueue */
static void da8xx_rproc_kick(struct rproc *rproc, int vqid)
{
struct da8xx_rproc *drproc = rproc->priv;
/* Interrupt remote proc */
writel(SYSCFG_CHIPSIG2, drproc->chipsig);
}
static const struct rproc_ops da8xx_rproc_ops = {
.start = da8xx_rproc_start,
.stop = da8xx_rproc_stop,
.kick = da8xx_rproc_kick,
};
static int da8xx_rproc_get_internal_memories(struct platform_device *pdev,
struct da8xx_rproc *drproc)
{
static const char * const mem_names[] = {"l2sram", "l1pram", "l1dram"};
int num_mems = ARRAY_SIZE(mem_names);
struct device *dev = &pdev->dev;
struct resource *res;
int i;
drproc->mem = devm_kcalloc(dev, num_mems, sizeof(*drproc->mem),
GFP_KERNEL);
if (!drproc->mem)
return -ENOMEM;
for (i = 0; i < num_mems; i++) {
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
mem_names[i]);
drproc->mem[i].cpu_addr = devm_ioremap_resource(dev, res);
if (IS_ERR(drproc->mem[i].cpu_addr)) {
dev_err(dev, "failed to parse and map %s memory\n",
mem_names[i]);
return PTR_ERR(drproc->mem[i].cpu_addr);
}
drproc->mem[i].bus_addr = res->start;
drproc->mem[i].dev_addr =
res->start & DA8XX_RPROC_LOCAL_ADDRESS_MASK;
drproc->mem[i].size = resource_size(res);
dev_dbg(dev, "memory %8s: bus addr %pa size 0x%zx va %p da 0x%x\n",
mem_names[i], &drproc->mem[i].bus_addr,
drproc->mem[i].size, drproc->mem[i].cpu_addr,
drproc->mem[i].dev_addr);
}
drproc->num_mems = num_mems;
return 0;
}
static int da8xx_rproc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct da8xx_rproc *drproc;
struct rproc *rproc;
struct irq_data *irq_data;
struct resource *bootreg_res;
struct resource *chipsig_res;
struct clk *dsp_clk;
struct reset_control *dsp_reset;
void __iomem *chipsig;
void __iomem *bootreg;
int irq;
int ret;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
irq_data = irq_get_irq_data(irq);
if (!irq_data) {
dev_err(dev, "irq_get_irq_data(%d): NULL\n", irq);
return -EINVAL;
}
bootreg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"host1cfg");
bootreg = devm_ioremap_resource(dev, bootreg_res);
if (IS_ERR(bootreg))
return PTR_ERR(bootreg);
chipsig_res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"chipsig");
chipsig = devm_ioremap_resource(dev, chipsig_res);
if (IS_ERR(chipsig))
return PTR_ERR(chipsig);
dsp_clk = devm_clk_get(dev, NULL);
if (IS_ERR(dsp_clk)) {
dev_err(dev, "clk_get error: %ld\n", PTR_ERR(dsp_clk));
return PTR_ERR(dsp_clk);
}
dsp_reset = devm_reset_control_get_exclusive(dev, NULL);
if (IS_ERR(dsp_reset)) {
if (PTR_ERR(dsp_reset) != -EPROBE_DEFER)
dev_err(dev, "unable to get reset control: %ld\n",
PTR_ERR(dsp_reset));
return PTR_ERR(dsp_reset);
}
if (dev->of_node) {
ret = of_reserved_mem_device_init(dev);
if (ret) {
dev_err(dev, "device does not have specific CMA pool: %d\n",
ret);
return ret;
}
}
rproc = rproc_alloc(dev, "dsp", &da8xx_rproc_ops, da8xx_fw_name,
sizeof(*drproc));
if (!rproc) {
ret = -ENOMEM;
goto free_mem;
}
/* error recovery is not supported at present */
rproc->recovery_disabled = true;
drproc = rproc->priv;
drproc->rproc = rproc;
drproc->dsp_clk = dsp_clk;
drproc->dsp_reset = dsp_reset;
rproc->has_iommu = false;
ret = da8xx_rproc_get_internal_memories(pdev, drproc);
if (ret)
goto free_rproc;
platform_set_drvdata(pdev, rproc);
/* everything the ISR needs is now setup, so hook it up */
ret = devm_request_threaded_irq(dev, irq, da8xx_rproc_callback,
handle_event, 0, "da8xx-remoteproc",
rproc);
if (ret) {
dev_err(dev, "devm_request_threaded_irq error: %d\n", ret);
goto free_rproc;
}
/*
* rproc_add() can end up enabling the DSP's clk with the DSP
* *not* in reset, but da8xx_rproc_start() needs the DSP to be
* held in reset at the time it is called.
*/
ret = reset_control_assert(dsp_reset);
if (ret)
goto free_rproc;
drproc->chipsig = chipsig;
drproc->bootreg = bootreg;
drproc->ack_fxn = irq_data->chip->irq_ack;
drproc->irq_data = irq_data;
drproc->irq = irq;
ret = rproc_add(rproc);
if (ret) {
dev_err(dev, "rproc_add failed: %d\n", ret);
goto free_rproc;
}
return 0;
free_rproc:
rproc_free(rproc);
free_mem:
if (dev->of_node)
of_reserved_mem_device_release(dev);
return ret;
}
static void da8xx_rproc_remove(struct platform_device *pdev)
{
struct rproc *rproc = platform_get_drvdata(pdev);
struct da8xx_rproc *drproc = rproc->priv;
struct device *dev = &pdev->dev;
/*
* The devm subsystem might end up releasing things before
* freeing the irq, thus allowing an interrupt to sneak in while
* the device is being removed. This should prevent that.
*/
disable_irq(drproc->irq);
rproc_del(rproc);
rproc_free(rproc);
if (dev->of_node)
of_reserved_mem_device_release(dev);
}
static const struct of_device_id davinci_rproc_of_match[] __maybe_unused = {
{ .compatible = "ti,da850-dsp", },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, davinci_rproc_of_match);
static struct platform_driver da8xx_rproc_driver = {
.probe = da8xx_rproc_probe,
.remove_new = da8xx_rproc_remove,
.driver = {
.name = "davinci-rproc",
.of_match_table = of_match_ptr(davinci_rproc_of_match),
},
};
module_platform_driver(da8xx_rproc_driver);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("DA8XX Remote Processor control driver");
|
linux-master
|
drivers/remoteproc/da8xx_remoteproc.c
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* SLIM core rproc driver
*
* Copyright (C) 2016 STMicroelectronics
*
* Author: Peter Griffin <[email protected]>
*/
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/remoteproc.h>
#include <linux/remoteproc/st_slim_rproc.h>
#include "remoteproc_internal.h"
/* SLIM core registers */
#define SLIM_ID_OFST 0x0
#define SLIM_VER_OFST 0x4
#define SLIM_EN_OFST 0x8
#define SLIM_EN_RUN BIT(0)
#define SLIM_CLK_GATE_OFST 0xC
#define SLIM_CLK_GATE_DIS BIT(0)
#define SLIM_CLK_GATE_RESET BIT(2)
#define SLIM_SLIM_PC_OFST 0x20
/* DMEM registers */
#define SLIM_REV_ID_OFST 0x0
#define SLIM_REV_ID_MIN_MASK GENMASK(15, 8)
#define SLIM_REV_ID_MIN(id) ((id & SLIM_REV_ID_MIN_MASK) >> 8)
#define SLIM_REV_ID_MAJ_MASK GENMASK(23, 16)
#define SLIM_REV_ID_MAJ(id) ((id & SLIM_REV_ID_MAJ_MASK) >> 16)
/* peripherals registers */
#define SLIM_STBUS_SYNC_OFST 0xF88
#define SLIM_STBUS_SYNC_DIS BIT(0)
#define SLIM_INT_SET_OFST 0xFD4
#define SLIM_INT_CLR_OFST 0xFD8
#define SLIM_INT_MASK_OFST 0xFDC
#define SLIM_CMD_CLR_OFST 0xFC8
#define SLIM_CMD_MASK_OFST 0xFCC
static const char *mem_names[ST_SLIM_MEM_MAX] = {
[ST_SLIM_DMEM] = "dmem",
[ST_SLIM_IMEM] = "imem",
};
static int slim_clk_get(struct st_slim_rproc *slim_rproc, struct device *dev)
{
int clk, err;
for (clk = 0; clk < ST_SLIM_MAX_CLK; clk++) {
slim_rproc->clks[clk] = of_clk_get(dev->of_node, clk);
if (IS_ERR(slim_rproc->clks[clk])) {
err = PTR_ERR(slim_rproc->clks[clk]);
if (err == -EPROBE_DEFER)
goto err_put_clks;
slim_rproc->clks[clk] = NULL;
break;
}
}
return 0;
err_put_clks:
while (--clk >= 0)
clk_put(slim_rproc->clks[clk]);
return err;
}
static void slim_clk_disable(struct st_slim_rproc *slim_rproc)
{
int clk;
for (clk = 0; clk < ST_SLIM_MAX_CLK && slim_rproc->clks[clk]; clk++)
clk_disable_unprepare(slim_rproc->clks[clk]);
}
static int slim_clk_enable(struct st_slim_rproc *slim_rproc)
{
int clk, ret;
for (clk = 0; clk < ST_SLIM_MAX_CLK && slim_rproc->clks[clk]; clk++) {
ret = clk_prepare_enable(slim_rproc->clks[clk]);
if (ret)
goto err_disable_clks;
}
return 0;
err_disable_clks:
while (--clk >= 0)
clk_disable_unprepare(slim_rproc->clks[clk]);
return ret;
}
/*
* Remoteproc slim specific device handlers
*/
static int slim_rproc_start(struct rproc *rproc)
{
struct device *dev = &rproc->dev;
struct st_slim_rproc *slim_rproc = rproc->priv;
unsigned long hw_id, hw_ver, fw_rev;
u32 val;
/* disable CPU pipeline clock & reset CPU pipeline */
val = SLIM_CLK_GATE_DIS | SLIM_CLK_GATE_RESET;
writel(val, slim_rproc->slimcore + SLIM_CLK_GATE_OFST);
/* disable SLIM core STBus sync */
writel(SLIM_STBUS_SYNC_DIS, slim_rproc->peri + SLIM_STBUS_SYNC_OFST);
/* enable cpu pipeline clock */
writel(!SLIM_CLK_GATE_DIS,
slim_rproc->slimcore + SLIM_CLK_GATE_OFST);
/* clear int & cmd mailbox */
writel(~0U, slim_rproc->peri + SLIM_INT_CLR_OFST);
writel(~0U, slim_rproc->peri + SLIM_CMD_CLR_OFST);
/* enable all channels cmd & int */
writel(~0U, slim_rproc->peri + SLIM_INT_MASK_OFST);
writel(~0U, slim_rproc->peri + SLIM_CMD_MASK_OFST);
/* enable cpu */
writel(SLIM_EN_RUN, slim_rproc->slimcore + SLIM_EN_OFST);
hw_id = readl_relaxed(slim_rproc->slimcore + SLIM_ID_OFST);
hw_ver = readl_relaxed(slim_rproc->slimcore + SLIM_VER_OFST);
fw_rev = readl(slim_rproc->mem[ST_SLIM_DMEM].cpu_addr +
SLIM_REV_ID_OFST);
dev_info(dev, "fw rev:%ld.%ld on SLIM %ld.%ld\n",
SLIM_REV_ID_MAJ(fw_rev), SLIM_REV_ID_MIN(fw_rev),
hw_id, hw_ver);
return 0;
}
static int slim_rproc_stop(struct rproc *rproc)
{
struct st_slim_rproc *slim_rproc = rproc->priv;
u32 val;
/* mask all (cmd & int) channels */
writel(0UL, slim_rproc->peri + SLIM_INT_MASK_OFST);
writel(0UL, slim_rproc->peri + SLIM_CMD_MASK_OFST);
/* disable cpu pipeline clock */
writel(SLIM_CLK_GATE_DIS, slim_rproc->slimcore + SLIM_CLK_GATE_OFST);
writel(!SLIM_EN_RUN, slim_rproc->slimcore + SLIM_EN_OFST);
val = readl(slim_rproc->slimcore + SLIM_EN_OFST);
if (val & SLIM_EN_RUN)
dev_warn(&rproc->dev, "Failed to disable SLIM");
dev_dbg(&rproc->dev, "slim stopped\n");
return 0;
}
static void *slim_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
{
struct st_slim_rproc *slim_rproc = rproc->priv;
void *va = NULL;
int i;
for (i = 0; i < ST_SLIM_MEM_MAX; i++) {
if (da != slim_rproc->mem[i].bus_addr)
continue;
if (len <= slim_rproc->mem[i].size) {
/* __force to make sparse happy with type conversion */
va = (__force void *)slim_rproc->mem[i].cpu_addr;
break;
}
}
dev_dbg(&rproc->dev, "da = 0x%llx len = 0x%zx va = 0x%pK\n",
da, len, va);
return va;
}
static const struct rproc_ops slim_rproc_ops = {
.start = slim_rproc_start,
.stop = slim_rproc_stop,
.da_to_va = slim_rproc_da_to_va,
.get_boot_addr = rproc_elf_get_boot_addr,
.load = rproc_elf_load_segments,
.sanity_check = rproc_elf_sanity_check,
};
/**
* st_slim_rproc_alloc() - allocate and initialise slim rproc
* @pdev: Pointer to the platform_device struct
* @fw_name: Name of firmware for rproc to use
*
* Function for allocating and initialising a slim rproc for use by
* device drivers whose IP is based around the SLIM core. It
* obtains and enables any clocks required by the SLIM core and also
* ioremaps the various IO.
*
* Return: st_slim_rproc pointer or PTR_ERR() on error.
*/
struct st_slim_rproc *st_slim_rproc_alloc(struct platform_device *pdev,
char *fw_name)
{
struct device *dev = &pdev->dev;
struct st_slim_rproc *slim_rproc;
struct device_node *np = dev->of_node;
struct rproc *rproc;
struct resource *res;
int err, i;
if (!fw_name)
return ERR_PTR(-EINVAL);
if (!of_device_is_compatible(np, "st,slim-rproc"))
return ERR_PTR(-EINVAL);
rproc = rproc_alloc(dev, np->name, &slim_rproc_ops,
fw_name, sizeof(*slim_rproc));
if (!rproc)
return ERR_PTR(-ENOMEM);
rproc->has_iommu = false;
slim_rproc = rproc->priv;
slim_rproc->rproc = rproc;
/* get imem and dmem */
for (i = 0; i < ARRAY_SIZE(mem_names); i++) {
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
mem_names[i]);
slim_rproc->mem[i].cpu_addr = devm_ioremap_resource(dev, res);
if (IS_ERR(slim_rproc->mem[i].cpu_addr)) {
dev_err(&pdev->dev, "devm_ioremap_resource failed\n");
err = PTR_ERR(slim_rproc->mem[i].cpu_addr);
goto err;
}
slim_rproc->mem[i].bus_addr = res->start;
slim_rproc->mem[i].size = resource_size(res);
}
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "slimcore");
slim_rproc->slimcore = devm_ioremap_resource(dev, res);
if (IS_ERR(slim_rproc->slimcore)) {
dev_err(&pdev->dev, "failed to ioremap slimcore IO\n");
err = PTR_ERR(slim_rproc->slimcore);
goto err;
}
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "peripherals");
slim_rproc->peri = devm_ioremap_resource(dev, res);
if (IS_ERR(slim_rproc->peri)) {
dev_err(&pdev->dev, "failed to ioremap peripherals IO\n");
err = PTR_ERR(slim_rproc->peri);
goto err;
}
err = slim_clk_get(slim_rproc, dev);
if (err)
goto err;
err = slim_clk_enable(slim_rproc);
if (err) {
dev_err(dev, "Failed to enable clocks\n");
goto err_clk_put;
}
/* Register as a remoteproc device */
err = rproc_add(rproc);
if (err) {
dev_err(dev, "registration of slim remoteproc failed\n");
goto err_clk_dis;
}
return slim_rproc;
err_clk_dis:
slim_clk_disable(slim_rproc);
err_clk_put:
for (i = 0; i < ST_SLIM_MAX_CLK && slim_rproc->clks[i]; i++)
clk_put(slim_rproc->clks[i]);
err:
rproc_free(rproc);
return ERR_PTR(err);
}
EXPORT_SYMBOL(st_slim_rproc_alloc);
/**
* st_slim_rproc_put() - put slim rproc resources
* @slim_rproc: Pointer to the st_slim_rproc struct
*
* Function for calling respective _put() functions on slim_rproc resources.
*
*/
void st_slim_rproc_put(struct st_slim_rproc *slim_rproc)
{
int clk;
if (!slim_rproc)
return;
slim_clk_disable(slim_rproc);
for (clk = 0; clk < ST_SLIM_MAX_CLK && slim_rproc->clks[clk]; clk++)
clk_put(slim_rproc->clks[clk]);
rproc_del(slim_rproc->rproc);
rproc_free(slim_rproc->rproc);
}
EXPORT_SYMBOL(st_slim_rproc_put);
MODULE_AUTHOR("Peter Griffin <[email protected]>");
MODULE_DESCRIPTION("STMicroelectronics SLIM core rproc driver");
MODULE_LICENSE("GPL v2");
|
linux-master
|
drivers/remoteproc/st_slim_rproc.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Remote processor messaging transport (OMAP platform-specific bits)
*
* Copyright (C) 2011 Texas Instruments, Inc.
* Copyright (C) 2011 Google, Inc.
*
* Ohad Ben-Cohen <[email protected]>
* Brian Swetland <[email protected]>
*/
#include <linux/dma-direct.h>
#include <linux/dma-map-ops.h>
#include <linux/dma-mapping.h>
#include <linux/export.h>
#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
#include <linux/remoteproc.h>
#include <linux/virtio.h>
#include <linux/virtio_config.h>
#include <linux/virtio_ids.h>
#include <linux/virtio_ring.h>
#include <linux/err.h>
#include <linux/kref.h>
#include <linux/slab.h>
#include "remoteproc_internal.h"
static int copy_dma_range_map(struct device *to, struct device *from)
{
const struct bus_dma_region *map = from->dma_range_map, *new_map, *r;
int num_ranges = 0;
if (!map)
return 0;
for (r = map; r->size; r++)
num_ranges++;
new_map = kmemdup(map, array_size(num_ranges + 1, sizeof(*map)),
GFP_KERNEL);
if (!new_map)
return -ENOMEM;
to->dma_range_map = new_map;
return 0;
}
static struct rproc_vdev *vdev_to_rvdev(struct virtio_device *vdev)
{
struct platform_device *pdev;
pdev = container_of(vdev->dev.parent, struct platform_device, dev);
return platform_get_drvdata(pdev);
}
static struct rproc *vdev_to_rproc(struct virtio_device *vdev)
{
struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
return rvdev->rproc;
}
/* kick the remote processor, and let it know which virtqueue to poke at */
static bool rproc_virtio_notify(struct virtqueue *vq)
{
struct rproc_vring *rvring = vq->priv;
struct rproc *rproc = rvring->rvdev->rproc;
int notifyid = rvring->notifyid;
dev_dbg(&rproc->dev, "kicking vq index: %d\n", notifyid);
rproc->ops->kick(rproc, notifyid);
return true;
}
/**
* rproc_vq_interrupt() - tell remoteproc that a virtqueue is interrupted
* @rproc: handle to the remote processor
* @notifyid: index of the signalled virtqueue (unique per this @rproc)
*
* This function should be called by the platform-specific rproc driver,
* when the remote processor signals that a specific virtqueue has pending
* messages available.
*
* Return: IRQ_NONE if no message was found in the @notifyid virtqueue,
* and otherwise returns IRQ_HANDLED.
*/
irqreturn_t rproc_vq_interrupt(struct rproc *rproc, int notifyid)
{
struct rproc_vring *rvring;
dev_dbg(&rproc->dev, "vq index %d is interrupted\n", notifyid);
rvring = idr_find(&rproc->notifyids, notifyid);
if (!rvring || !rvring->vq)
return IRQ_NONE;
return vring_interrupt(0, rvring->vq);
}
EXPORT_SYMBOL(rproc_vq_interrupt);
static struct virtqueue *rp_find_vq(struct virtio_device *vdev,
unsigned int id,
void (*callback)(struct virtqueue *vq),
const char *name, bool ctx)
{
struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
struct rproc *rproc = vdev_to_rproc(vdev);
struct device *dev = &rproc->dev;
struct rproc_mem_entry *mem;
struct rproc_vring *rvring;
struct fw_rsc_vdev *rsc;
struct virtqueue *vq;
void *addr;
int num, size;
/* we're temporarily limited to two virtqueues per rvdev */
if (id >= ARRAY_SIZE(rvdev->vring))
return ERR_PTR(-EINVAL);
if (!name)
return NULL;
/* Search allocated memory region by name */
mem = rproc_find_carveout_by_name(rproc, "vdev%dvring%d", rvdev->index,
id);
if (!mem || !mem->va)
return ERR_PTR(-ENOMEM);
rvring = &rvdev->vring[id];
addr = mem->va;
num = rvring->num;
/* zero vring */
size = vring_size(num, rvring->align);
memset(addr, 0, size);
dev_dbg(dev, "vring%d: va %pK qsz %d notifyid %d\n",
id, addr, num, rvring->notifyid);
/*
* Create the new vq, and tell virtio we're not interested in
* the 'weak' smp barriers, since we're talking with a real device.
*/
vq = vring_new_virtqueue(id, num, rvring->align, vdev, false, ctx,
addr, rproc_virtio_notify, callback, name);
if (!vq) {
dev_err(dev, "vring_new_virtqueue %s failed\n", name);
rproc_free_vring(rvring);
return ERR_PTR(-ENOMEM);
}
vq->num_max = num;
rvring->vq = vq;
vq->priv = rvring;
/* Update vring in resource table */
rsc = (void *)rproc->table_ptr + rvdev->rsc_offset;
rsc->vring[id].da = mem->da;
return vq;
}
static void __rproc_virtio_del_vqs(struct virtio_device *vdev)
{
struct virtqueue *vq, *n;
struct rproc_vring *rvring;
list_for_each_entry_safe(vq, n, &vdev->vqs, list) {
rvring = vq->priv;
rvring->vq = NULL;
vring_del_virtqueue(vq);
}
}
static void rproc_virtio_del_vqs(struct virtio_device *vdev)
{
__rproc_virtio_del_vqs(vdev);
}
static int rproc_virtio_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
struct virtqueue *vqs[],
vq_callback_t *callbacks[],
const char * const names[],
const bool * ctx,
struct irq_affinity *desc)
{
int i, ret, queue_idx = 0;
for (i = 0; i < nvqs; ++i) {
if (!names[i]) {
vqs[i] = NULL;
continue;
}
vqs[i] = rp_find_vq(vdev, queue_idx++, callbacks[i], names[i],
ctx ? ctx[i] : false);
if (IS_ERR(vqs[i])) {
ret = PTR_ERR(vqs[i]);
goto error;
}
}
return 0;
error:
__rproc_virtio_del_vqs(vdev);
return ret;
}
static u8 rproc_virtio_get_status(struct virtio_device *vdev)
{
struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
struct fw_rsc_vdev *rsc;
rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset;
return rsc->status;
}
static void rproc_virtio_set_status(struct virtio_device *vdev, u8 status)
{
struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
struct fw_rsc_vdev *rsc;
rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset;
rsc->status = status;
dev_dbg(&vdev->dev, "status: %d\n", status);
}
static void rproc_virtio_reset(struct virtio_device *vdev)
{
struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
struct fw_rsc_vdev *rsc;
rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset;
rsc->status = 0;
dev_dbg(&vdev->dev, "reset !\n");
}
/* provide the vdev features as retrieved from the firmware */
static u64 rproc_virtio_get_features(struct virtio_device *vdev)
{
struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
struct fw_rsc_vdev *rsc;
rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset;
return rsc->dfeatures;
}
static void rproc_transport_features(struct virtio_device *vdev)
{
/*
* Packed ring isn't enabled on remoteproc for now,
* because remoteproc uses vring_new_virtqueue() which
* creates virtio rings on preallocated memory.
*/
__virtio_clear_bit(vdev, VIRTIO_F_RING_PACKED);
}
static int rproc_virtio_finalize_features(struct virtio_device *vdev)
{
struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
struct fw_rsc_vdev *rsc;
rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset;
/* Give virtio_ring a chance to accept features */
vring_transport_features(vdev);
/* Give virtio_rproc a chance to accept features. */
rproc_transport_features(vdev);
/* Make sure we don't have any features > 32 bits! */
BUG_ON((u32)vdev->features != vdev->features);
/*
* Remember the finalized features of our vdev, and provide it
* to the remote processor once it is powered on.
*/
rsc->gfeatures = vdev->features;
return 0;
}
static void rproc_virtio_get(struct virtio_device *vdev, unsigned int offset,
void *buf, unsigned int len)
{
struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
struct fw_rsc_vdev *rsc;
void *cfg;
rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset;
cfg = &rsc->vring[rsc->num_of_vrings];
if (offset + len > rsc->config_len || offset + len < len) {
dev_err(&vdev->dev, "rproc_virtio_get: access out of bounds\n");
return;
}
memcpy(buf, cfg + offset, len);
}
static void rproc_virtio_set(struct virtio_device *vdev, unsigned int offset,
const void *buf, unsigned int len)
{
struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
struct fw_rsc_vdev *rsc;
void *cfg;
rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset;
cfg = &rsc->vring[rsc->num_of_vrings];
if (offset + len > rsc->config_len || offset + len < len) {
dev_err(&vdev->dev, "rproc_virtio_set: access out of bounds\n");
return;
}
memcpy(cfg + offset, buf, len);
}
static const struct virtio_config_ops rproc_virtio_config_ops = {
.get_features = rproc_virtio_get_features,
.finalize_features = rproc_virtio_finalize_features,
.find_vqs = rproc_virtio_find_vqs,
.del_vqs = rproc_virtio_del_vqs,
.reset = rproc_virtio_reset,
.set_status = rproc_virtio_set_status,
.get_status = rproc_virtio_get_status,
.get = rproc_virtio_get,
.set = rproc_virtio_set,
};
/*
* This function is called whenever vdev is released, and is responsible
* to decrement the remote processor's refcount which was taken when vdev was
* added.
*
* Never call this function directly; it will be called by the driver
* core when needed.
*/
static void rproc_virtio_dev_release(struct device *dev)
{
struct virtio_device *vdev = dev_to_virtio(dev);
struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
kfree(vdev);
put_device(&rvdev->pdev->dev);
}
/**
* rproc_add_virtio_dev() - register an rproc-induced virtio device
* @rvdev: the remote vdev
* @id: the device type identification (used to match it with a driver).
*
* This function registers a virtio device. This vdev's partent is
* the rproc device.
*
* Return: 0 on success or an appropriate error value otherwise
*/
static int rproc_add_virtio_dev(struct rproc_vdev *rvdev, int id)
{
struct rproc *rproc = rvdev->rproc;
struct device *dev = &rvdev->pdev->dev;
struct virtio_device *vdev;
struct rproc_mem_entry *mem;
int ret;
if (rproc->ops->kick == NULL) {
ret = -EINVAL;
dev_err(dev, ".kick method not defined for %s\n", rproc->name);
goto out;
}
/* Try to find dedicated vdev buffer carveout */
mem = rproc_find_carveout_by_name(rproc, "vdev%dbuffer", rvdev->index);
if (mem) {
phys_addr_t pa;
if (mem->of_resm_idx != -1) {
struct device_node *np = rproc->dev.parent->of_node;
/* Associate reserved memory to vdev device */
ret = of_reserved_mem_device_init_by_idx(dev, np,
mem->of_resm_idx);
if (ret) {
dev_err(dev, "Can't associate reserved memory\n");
goto out;
}
} else {
if (mem->va) {
dev_warn(dev, "vdev %d buffer already mapped\n",
rvdev->index);
pa = rproc_va_to_pa(mem->va);
} else {
/* Use dma address as carveout no memmapped yet */
pa = (phys_addr_t)mem->dma;
}
/* Associate vdev buffer memory pool to vdev subdev */
ret = dma_declare_coherent_memory(dev, pa,
mem->da,
mem->len);
if (ret < 0) {
dev_err(dev, "Failed to associate buffer\n");
goto out;
}
}
} else {
struct device_node *np = rproc->dev.parent->of_node;
/*
* If we don't have dedicated buffer, just attempt to re-assign
* the reserved memory from our parent. A default memory-region
* at index 0 from the parent's memory-regions is assigned for
* the rvdev dev to allocate from. Failure is non-critical and
* the allocations will fall back to global pools, so don't
* check return value either.
*/
of_reserved_mem_device_init_by_idx(dev, np, 0);
}
/* Allocate virtio device */
vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
if (!vdev) {
ret = -ENOMEM;
goto out;
}
vdev->id.device = id,
vdev->config = &rproc_virtio_config_ops,
vdev->dev.parent = dev;
vdev->dev.release = rproc_virtio_dev_release;
/* Reference the vdev and vring allocations */
get_device(dev);
ret = register_virtio_device(vdev);
if (ret) {
put_device(&vdev->dev);
dev_err(dev, "failed to register vdev: %d\n", ret);
goto out;
}
dev_info(dev, "registered %s (type %d)\n", dev_name(&vdev->dev), id);
out:
return ret;
}
/**
* rproc_remove_virtio_dev() - remove an rproc-induced virtio device
* @dev: the virtio device
* @data: must be null
*
* This function unregisters an existing virtio device.
*
* Return: 0
*/
static int rproc_remove_virtio_dev(struct device *dev, void *data)
{
struct virtio_device *vdev = dev_to_virtio(dev);
unregister_virtio_device(vdev);
return 0;
}
static int rproc_vdev_do_start(struct rproc_subdev *subdev)
{
struct rproc_vdev *rvdev = container_of(subdev, struct rproc_vdev, subdev);
return rproc_add_virtio_dev(rvdev, rvdev->id);
}
static void rproc_vdev_do_stop(struct rproc_subdev *subdev, bool crashed)
{
struct rproc_vdev *rvdev = container_of(subdev, struct rproc_vdev, subdev);
struct device *dev = &rvdev->pdev->dev;
int ret;
ret = device_for_each_child(dev, NULL, rproc_remove_virtio_dev);
if (ret)
dev_warn(dev, "can't remove vdev child device: %d\n", ret);
}
static int rproc_virtio_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct rproc_vdev_data *rvdev_data = dev->platform_data;
struct rproc_vdev *rvdev;
struct rproc *rproc = container_of(dev->parent, struct rproc, dev);
struct fw_rsc_vdev *rsc;
int i, ret;
if (!rvdev_data)
return -EINVAL;
rvdev = devm_kzalloc(dev, sizeof(*rvdev), GFP_KERNEL);
if (!rvdev)
return -ENOMEM;
rvdev->id = rvdev_data->id;
rvdev->rproc = rproc;
rvdev->index = rvdev_data->index;
ret = copy_dma_range_map(dev, rproc->dev.parent);
if (ret)
return ret;
/* Make device dma capable by inheriting from parent's capabilities */
set_dma_ops(dev, get_dma_ops(rproc->dev.parent));
ret = dma_coerce_mask_and_coherent(dev, dma_get_mask(rproc->dev.parent));
if (ret) {
dev_warn(dev, "Failed to set DMA mask %llx. Trying to continue... (%pe)\n",
dma_get_mask(rproc->dev.parent), ERR_PTR(ret));
}
platform_set_drvdata(pdev, rvdev);
rvdev->pdev = pdev;
rsc = rvdev_data->rsc;
/* parse the vrings */
for (i = 0; i < rsc->num_of_vrings; i++) {
ret = rproc_parse_vring(rvdev, rsc, i);
if (ret)
return ret;
}
/* remember the resource offset*/
rvdev->rsc_offset = rvdev_data->rsc_offset;
/* allocate the vring resources */
for (i = 0; i < rsc->num_of_vrings; i++) {
ret = rproc_alloc_vring(rvdev, i);
if (ret)
goto unwind_vring_allocations;
}
rproc_add_rvdev(rproc, rvdev);
rvdev->subdev.start = rproc_vdev_do_start;
rvdev->subdev.stop = rproc_vdev_do_stop;
rproc_add_subdev(rproc, &rvdev->subdev);
/*
* We're indirectly making a non-temporary copy of the rproc pointer
* here, because the platform device or the vdev device will indirectly
* access the wrapping rproc.
*
* Therefore we must increment the rproc refcount here, and decrement
* it _only_ on platform remove.
*/
get_device(&rproc->dev);
return 0;
unwind_vring_allocations:
for (i--; i >= 0; i--)
rproc_free_vring(&rvdev->vring[i]);
return ret;
}
static void rproc_virtio_remove(struct platform_device *pdev)
{
struct rproc_vdev *rvdev = dev_get_drvdata(&pdev->dev);
struct rproc *rproc = rvdev->rproc;
struct rproc_vring *rvring;
int id;
for (id = 0; id < ARRAY_SIZE(rvdev->vring); id++) {
rvring = &rvdev->vring[id];
rproc_free_vring(rvring);
}
rproc_remove_subdev(rproc, &rvdev->subdev);
rproc_remove_rvdev(rvdev);
of_reserved_mem_device_release(&pdev->dev);
dma_release_coherent_memory(&pdev->dev);
put_device(&rproc->dev);
}
/* Platform driver */
static struct platform_driver rproc_virtio_driver = {
.probe = rproc_virtio_probe,
.remove_new = rproc_virtio_remove,
.driver = {
.name = "rproc-virtio",
},
};
builtin_platform_driver(rproc_virtio_driver);
|
linux-master
|
drivers/remoteproc/remoteproc_virtio.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Coredump functionality for Remoteproc framework.
*
* Copyright (c) 2020, The Linux Foundation. All rights reserved.
*/
#include <linux/completion.h>
#include <linux/devcoredump.h>
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/remoteproc.h>
#include "remoteproc_internal.h"
#include "remoteproc_elf_helpers.h"
struct rproc_coredump_state {
struct rproc *rproc;
void *header;
struct completion dump_done;
};
/**
* rproc_coredump_cleanup() - clean up dump_segments list
* @rproc: the remote processor handle
*/
void rproc_coredump_cleanup(struct rproc *rproc)
{
struct rproc_dump_segment *entry, *tmp;
list_for_each_entry_safe(entry, tmp, &rproc->dump_segments, node) {
list_del(&entry->node);
kfree(entry);
}
}
EXPORT_SYMBOL_GPL(rproc_coredump_cleanup);
/**
* rproc_coredump_add_segment() - add segment of device memory to coredump
* @rproc: handle of a remote processor
* @da: device address
* @size: size of segment
*
* Add device memory to the list of segments to be included in a coredump for
* the remoteproc.
*
* Return: 0 on success, negative errno on error.
*/
int rproc_coredump_add_segment(struct rproc *rproc, dma_addr_t da, size_t size)
{
struct rproc_dump_segment *segment;
segment = kzalloc(sizeof(*segment), GFP_KERNEL);
if (!segment)
return -ENOMEM;
segment->da = da;
segment->size = size;
list_add_tail(&segment->node, &rproc->dump_segments);
return 0;
}
EXPORT_SYMBOL(rproc_coredump_add_segment);
/**
* rproc_coredump_add_custom_segment() - add custom coredump segment
* @rproc: handle of a remote processor
* @da: device address
* @size: size of segment
* @dumpfn: custom dump function called for each segment during coredump
* @priv: private data
*
* Add device memory to the list of segments to be included in the coredump
* and associate the segment with the given custom dump function and private
* data.
*
* Return: 0 on success, negative errno on error.
*/
int rproc_coredump_add_custom_segment(struct rproc *rproc,
dma_addr_t da, size_t size,
void (*dumpfn)(struct rproc *rproc,
struct rproc_dump_segment *segment,
void *dest, size_t offset,
size_t size),
void *priv)
{
struct rproc_dump_segment *segment;
segment = kzalloc(sizeof(*segment), GFP_KERNEL);
if (!segment)
return -ENOMEM;
segment->da = da;
segment->size = size;
segment->priv = priv;
segment->dump = dumpfn;
list_add_tail(&segment->node, &rproc->dump_segments);
return 0;
}
EXPORT_SYMBOL(rproc_coredump_add_custom_segment);
/**
* rproc_coredump_set_elf_info() - set coredump elf information
* @rproc: handle of a remote processor
* @class: elf class for coredump elf file
* @machine: elf machine for coredump elf file
*
* Set elf information which will be used for coredump elf file.
*
* Return: 0 on success, negative errno on error.
*/
int rproc_coredump_set_elf_info(struct rproc *rproc, u8 class, u16 machine)
{
if (class != ELFCLASS64 && class != ELFCLASS32)
return -EINVAL;
rproc->elf_class = class;
rproc->elf_machine = machine;
return 0;
}
EXPORT_SYMBOL(rproc_coredump_set_elf_info);
static void rproc_coredump_free(void *data)
{
struct rproc_coredump_state *dump_state = data;
vfree(dump_state->header);
complete(&dump_state->dump_done);
}
static void *rproc_coredump_find_segment(loff_t user_offset,
struct list_head *segments,
size_t *data_left)
{
struct rproc_dump_segment *segment;
list_for_each_entry(segment, segments, node) {
if (user_offset < segment->size) {
*data_left = segment->size - user_offset;
return segment;
}
user_offset -= segment->size;
}
*data_left = 0;
return NULL;
}
static void rproc_copy_segment(struct rproc *rproc, void *dest,
struct rproc_dump_segment *segment,
size_t offset, size_t size)
{
bool is_iomem = false;
void *ptr;
if (segment->dump) {
segment->dump(rproc, segment, dest, offset, size);
} else {
ptr = rproc_da_to_va(rproc, segment->da + offset, size, &is_iomem);
if (!ptr) {
dev_err(&rproc->dev,
"invalid copy request for segment %pad with offset %zu and size %zu)\n",
&segment->da, offset, size);
memset(dest, 0xff, size);
} else {
if (is_iomem)
memcpy_fromio(dest, (void const __iomem *)ptr, size);
else
memcpy(dest, ptr, size);
}
}
}
static ssize_t rproc_coredump_read(char *buffer, loff_t offset, size_t count,
void *data, size_t header_sz)
{
size_t seg_data, bytes_left = count;
ssize_t copy_sz;
struct rproc_dump_segment *seg;
struct rproc_coredump_state *dump_state = data;
struct rproc *rproc = dump_state->rproc;
void *elfcore = dump_state->header;
/* Copy the vmalloc'ed header first. */
if (offset < header_sz) {
copy_sz = memory_read_from_buffer(buffer, count, &offset,
elfcore, header_sz);
return copy_sz;
}
/*
* Find out the segment memory chunk to be copied based on offset.
* Keep copying data until count bytes are read.
*/
while (bytes_left) {
seg = rproc_coredump_find_segment(offset - header_sz,
&rproc->dump_segments,
&seg_data);
/* EOF check */
if (!seg) {
dev_info(&rproc->dev, "Ramdump done, %lld bytes read",
offset);
break;
}
copy_sz = min_t(size_t, bytes_left, seg_data);
rproc_copy_segment(rproc, buffer, seg, seg->size - seg_data,
copy_sz);
offset += copy_sz;
buffer += copy_sz;
bytes_left -= copy_sz;
}
return count - bytes_left;
}
/**
* rproc_coredump() - perform coredump
* @rproc: rproc handle
*
* This function will generate an ELF header for the registered segments
* and create a devcoredump device associated with rproc. Based on the
* coredump configuration this function will directly copy the segments
* from device memory to userspace or copy segments from device memory to
* a separate buffer, which can then be read by userspace.
* The first approach avoids using extra vmalloc memory. But it will stall
* recovery flow until dump is read by userspace.
*/
void rproc_coredump(struct rproc *rproc)
{
struct rproc_dump_segment *segment;
void *phdr;
void *ehdr;
size_t data_size;
size_t offset;
void *data;
u8 class = rproc->elf_class;
int phnum = 0;
struct rproc_coredump_state dump_state;
enum rproc_dump_mechanism dump_conf = rproc->dump_conf;
if (list_empty(&rproc->dump_segments) ||
dump_conf == RPROC_COREDUMP_DISABLED)
return;
if (class == ELFCLASSNONE) {
dev_err(&rproc->dev, "ELF class is not set\n");
return;
}
data_size = elf_size_of_hdr(class);
list_for_each_entry(segment, &rproc->dump_segments, node) {
/*
* For default configuration buffer includes headers & segments.
* For inline dump buffer just includes headers as segments are
* directly read from device memory.
*/
data_size += elf_size_of_phdr(class);
if (dump_conf == RPROC_COREDUMP_ENABLED)
data_size += segment->size;
phnum++;
}
data = vmalloc(data_size);
if (!data)
return;
ehdr = data;
memset(ehdr, 0, elf_size_of_hdr(class));
/* e_ident field is common for both elf32 and elf64 */
elf_hdr_init_ident(ehdr, class);
elf_hdr_set_e_type(class, ehdr, ET_CORE);
elf_hdr_set_e_machine(class, ehdr, rproc->elf_machine);
elf_hdr_set_e_version(class, ehdr, EV_CURRENT);
elf_hdr_set_e_entry(class, ehdr, rproc->bootaddr);
elf_hdr_set_e_phoff(class, ehdr, elf_size_of_hdr(class));
elf_hdr_set_e_ehsize(class, ehdr, elf_size_of_hdr(class));
elf_hdr_set_e_phentsize(class, ehdr, elf_size_of_phdr(class));
elf_hdr_set_e_phnum(class, ehdr, phnum);
phdr = data + elf_hdr_get_e_phoff(class, ehdr);
offset = elf_hdr_get_e_phoff(class, ehdr);
offset += elf_size_of_phdr(class) * elf_hdr_get_e_phnum(class, ehdr);
list_for_each_entry(segment, &rproc->dump_segments, node) {
memset(phdr, 0, elf_size_of_phdr(class));
elf_phdr_set_p_type(class, phdr, PT_LOAD);
elf_phdr_set_p_offset(class, phdr, offset);
elf_phdr_set_p_vaddr(class, phdr, segment->da);
elf_phdr_set_p_paddr(class, phdr, segment->da);
elf_phdr_set_p_filesz(class, phdr, segment->size);
elf_phdr_set_p_memsz(class, phdr, segment->size);
elf_phdr_set_p_flags(class, phdr, PF_R | PF_W | PF_X);
elf_phdr_set_p_align(class, phdr, 0);
if (dump_conf == RPROC_COREDUMP_ENABLED)
rproc_copy_segment(rproc, data + offset, segment, 0,
segment->size);
offset += elf_phdr_get_p_filesz(class, phdr);
phdr += elf_size_of_phdr(class);
}
if (dump_conf == RPROC_COREDUMP_ENABLED) {
dev_coredumpv(&rproc->dev, data, data_size, GFP_KERNEL);
return;
}
/* Initialize the dump state struct to be used by rproc_coredump_read */
dump_state.rproc = rproc;
dump_state.header = data;
init_completion(&dump_state.dump_done);
dev_coredumpm(&rproc->dev, NULL, &dump_state, data_size, GFP_KERNEL,
rproc_coredump_read, rproc_coredump_free);
/*
* Wait until the dump is read and free is called. Data is freed
* by devcoredump framework automatically after 5 minutes.
*/
wait_for_completion(&dump_state.dump_done);
}
EXPORT_SYMBOL_GPL(rproc_coredump);
/**
* rproc_coredump_using_sections() - perform coredump using section headers
* @rproc: rproc handle
*
* This function will generate an ELF header for the registered sections of
* segments and create a devcoredump device associated with rproc. Based on
* the coredump configuration this function will directly copy the segments
* from device memory to userspace or copy segments from device memory to
* a separate buffer, which can then be read by userspace.
* The first approach avoids using extra vmalloc memory. But it will stall
* recovery flow until dump is read by userspace.
*/
void rproc_coredump_using_sections(struct rproc *rproc)
{
struct rproc_dump_segment *segment;
void *shdr;
void *ehdr;
size_t data_size;
size_t strtbl_size = 0;
size_t strtbl_index = 1;
size_t offset;
void *data;
u8 class = rproc->elf_class;
int shnum;
struct rproc_coredump_state dump_state;
unsigned int dump_conf = rproc->dump_conf;
char *str_tbl = "STR_TBL";
if (list_empty(&rproc->dump_segments) ||
dump_conf == RPROC_COREDUMP_DISABLED)
return;
if (class == ELFCLASSNONE) {
dev_err(&rproc->dev, "ELF class is not set\n");
return;
}
/*
* We allocate two extra section headers. The first one is null.
* Second section header is for the string table. Also space is
* allocated for string table.
*/
data_size = elf_size_of_hdr(class) + 2 * elf_size_of_shdr(class);
shnum = 2;
/* the extra byte is for the null character at index 0 */
strtbl_size += strlen(str_tbl) + 2;
list_for_each_entry(segment, &rproc->dump_segments, node) {
data_size += elf_size_of_shdr(class);
strtbl_size += strlen(segment->priv) + 1;
if (dump_conf == RPROC_COREDUMP_ENABLED)
data_size += segment->size;
shnum++;
}
data_size += strtbl_size;
data = vmalloc(data_size);
if (!data)
return;
ehdr = data;
memset(ehdr, 0, elf_size_of_hdr(class));
/* e_ident field is common for both elf32 and elf64 */
elf_hdr_init_ident(ehdr, class);
elf_hdr_set_e_type(class, ehdr, ET_CORE);
elf_hdr_set_e_machine(class, ehdr, rproc->elf_machine);
elf_hdr_set_e_version(class, ehdr, EV_CURRENT);
elf_hdr_set_e_entry(class, ehdr, rproc->bootaddr);
elf_hdr_set_e_shoff(class, ehdr, elf_size_of_hdr(class));
elf_hdr_set_e_ehsize(class, ehdr, elf_size_of_hdr(class));
elf_hdr_set_e_shentsize(class, ehdr, elf_size_of_shdr(class));
elf_hdr_set_e_shnum(class, ehdr, shnum);
elf_hdr_set_e_shstrndx(class, ehdr, 1);
/*
* The zeroth index of the section header is reserved and is rarely used.
* Set the section header as null (SHN_UNDEF) and move to the next one.
*/
shdr = data + elf_hdr_get_e_shoff(class, ehdr);
memset(shdr, 0, elf_size_of_shdr(class));
shdr += elf_size_of_shdr(class);
/* Initialize the string table. */
offset = elf_hdr_get_e_shoff(class, ehdr) +
elf_size_of_shdr(class) * elf_hdr_get_e_shnum(class, ehdr);
memset(data + offset, 0, strtbl_size);
/* Fill in the string table section header. */
memset(shdr, 0, elf_size_of_shdr(class));
elf_shdr_set_sh_type(class, shdr, SHT_STRTAB);
elf_shdr_set_sh_offset(class, shdr, offset);
elf_shdr_set_sh_size(class, shdr, strtbl_size);
elf_shdr_set_sh_entsize(class, shdr, 0);
elf_shdr_set_sh_flags(class, shdr, 0);
elf_shdr_set_sh_name(class, shdr, elf_strtbl_add(str_tbl, ehdr, class, &strtbl_index));
offset += elf_shdr_get_sh_size(class, shdr);
shdr += elf_size_of_shdr(class);
list_for_each_entry(segment, &rproc->dump_segments, node) {
memset(shdr, 0, elf_size_of_shdr(class));
elf_shdr_set_sh_type(class, shdr, SHT_PROGBITS);
elf_shdr_set_sh_offset(class, shdr, offset);
elf_shdr_set_sh_addr(class, shdr, segment->da);
elf_shdr_set_sh_size(class, shdr, segment->size);
elf_shdr_set_sh_entsize(class, shdr, 0);
elf_shdr_set_sh_flags(class, shdr, SHF_WRITE);
elf_shdr_set_sh_name(class, shdr,
elf_strtbl_add(segment->priv, ehdr, class, &strtbl_index));
/* No need to copy segments for inline dumps */
if (dump_conf == RPROC_COREDUMP_ENABLED)
rproc_copy_segment(rproc, data + offset, segment, 0,
segment->size);
offset += elf_shdr_get_sh_size(class, shdr);
shdr += elf_size_of_shdr(class);
}
if (dump_conf == RPROC_COREDUMP_ENABLED) {
dev_coredumpv(&rproc->dev, data, data_size, GFP_KERNEL);
return;
}
/* Initialize the dump state struct to be used by rproc_coredump_read */
dump_state.rproc = rproc;
dump_state.header = data;
init_completion(&dump_state.dump_done);
dev_coredumpm(&rproc->dev, NULL, &dump_state, data_size, GFP_KERNEL,
rproc_coredump_read, rproc_coredump_free);
/* Wait until the dump is read and free is called. Data is freed
* by devcoredump framework automatically after 5 minutes.
*/
wait_for_completion(&dump_state.dump_done);
}
EXPORT_SYMBOL(rproc_coredump_using_sections);
|
linux-master
|
drivers/remoteproc/remoteproc_coredump.c
|
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2016-2018 Linaro Ltd.
* Copyright (C) 2014 Sony Mobile Communications AB
* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/reset.h>
#include <linux/soc/qcom/mdt_loader.h>
#include "qcom_common.h"
#include "qcom_pil_info.h"
#include "qcom_q6v5.h"
#define WCSS_CRASH_REASON 421
/* Q6SS Register Offsets */
#define Q6SS_RESET_REG 0x014
#define Q6SS_GFMUX_CTL_REG 0x020
#define Q6SS_PWR_CTL_REG 0x030
#define Q6SS_MEM_PWR_CTL 0x0B0
#define Q6SS_STRAP_ACC 0x110
#define Q6SS_CGC_OVERRIDE 0x034
#define Q6SS_BCR_REG 0x6000
/* AXI Halt Register Offsets */
#define AXI_HALTREQ_REG 0x0
#define AXI_HALTACK_REG 0x4
#define AXI_IDLE_REG 0x8
#define HALT_ACK_TIMEOUT_MS 100
/* Q6SS_RESET */
#define Q6SS_STOP_CORE BIT(0)
#define Q6SS_CORE_ARES BIT(1)
#define Q6SS_BUS_ARES_ENABLE BIT(2)
/* Q6SS_BRC_RESET */
#define Q6SS_BRC_BLK_ARES BIT(0)
/* Q6SS_GFMUX_CTL */
#define Q6SS_CLK_ENABLE BIT(1)
#define Q6SS_SWITCH_CLK_SRC BIT(8)
/* Q6SS_PWR_CTL */
#define Q6SS_L2DATA_STBY_N BIT(18)
#define Q6SS_SLP_RET_N BIT(19)
#define Q6SS_CLAMP_IO BIT(20)
#define QDSS_BHS_ON BIT(21)
#define QDSS_Q6_MEMORIES GENMASK(15, 0)
/* Q6SS parameters */
#define Q6SS_LDO_BYP BIT(25)
#define Q6SS_BHS_ON BIT(24)
#define Q6SS_CLAMP_WL BIT(21)
#define Q6SS_CLAMP_QMC_MEM BIT(22)
#define HALT_CHECK_MAX_LOOPS 200
#define Q6SS_XO_CBCR GENMASK(5, 3)
#define Q6SS_SLEEP_CBCR GENMASK(5, 2)
/* Q6SS config/status registers */
#define TCSR_GLOBAL_CFG0 0x0
#define TCSR_GLOBAL_CFG1 0x4
#define SSCAON_CONFIG 0x8
#define SSCAON_STATUS 0xc
#define Q6SS_BHS_STATUS 0x78
#define Q6SS_RST_EVB 0x10
#define BHS_EN_REST_ACK BIT(0)
#define SSCAON_ENABLE BIT(13)
#define SSCAON_BUS_EN BIT(15)
#define SSCAON_BUS_MUX_MASK GENMASK(18, 16)
#define MEM_BANKS 19
#define TCSR_WCSS_CLK_MASK 0x1F
#define TCSR_WCSS_CLK_ENABLE 0x14
#define MAX_HALT_REG 3
enum {
WCSS_IPQ8074,
WCSS_QCS404,
};
struct wcss_data {
const char *firmware_name;
unsigned int crash_reason_smem;
u32 version;
bool aon_reset_required;
bool wcss_q6_reset_required;
const char *ssr_name;
const char *sysmon_name;
int ssctl_id;
const struct rproc_ops *ops;
bool requires_force_stop;
};
struct q6v5_wcss {
struct device *dev;
void __iomem *reg_base;
void __iomem *rmb_base;
struct regmap *halt_map;
u32 halt_q6;
u32 halt_wcss;
u32 halt_nc;
struct clk *xo;
struct clk *ahbfabric_cbcr_clk;
struct clk *gcc_abhs_cbcr;
struct clk *gcc_axim_cbcr;
struct clk *lcc_csr_cbcr;
struct clk *ahbs_cbcr;
struct clk *tcm_slave_cbcr;
struct clk *qdsp6ss_abhm_cbcr;
struct clk *qdsp6ss_sleep_cbcr;
struct clk *qdsp6ss_axim_cbcr;
struct clk *qdsp6ss_xo_cbcr;
struct clk *qdsp6ss_core_gfmux;
struct clk *lcc_bcr_sleep;
struct regulator *cx_supply;
struct qcom_sysmon *sysmon;
struct reset_control *wcss_aon_reset;
struct reset_control *wcss_reset;
struct reset_control *wcss_q6_reset;
struct reset_control *wcss_q6_bcr_reset;
struct qcom_q6v5 q6v5;
phys_addr_t mem_phys;
phys_addr_t mem_reloc;
void *mem_region;
size_t mem_size;
unsigned int crash_reason_smem;
u32 version;
bool requires_force_stop;
struct qcom_rproc_glink glink_subdev;
struct qcom_rproc_ssr ssr_subdev;
};
static int q6v5_wcss_reset(struct q6v5_wcss *wcss)
{
int ret;
u32 val;
int i;
/* Assert resets, stop core */
val = readl(wcss->reg_base + Q6SS_RESET_REG);
val |= Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE;
writel(val, wcss->reg_base + Q6SS_RESET_REG);
/* BHS require xo cbcr to be enabled */
val = readl(wcss->reg_base + Q6SS_XO_CBCR);
val |= 0x1;
writel(val, wcss->reg_base + Q6SS_XO_CBCR);
/* Read CLKOFF bit to go low indicating CLK is enabled */
ret = readl_poll_timeout(wcss->reg_base + Q6SS_XO_CBCR,
val, !(val & BIT(31)), 1,
HALT_CHECK_MAX_LOOPS);
if (ret) {
dev_err(wcss->dev,
"xo cbcr enabling timed out (rc:%d)\n", ret);
return ret;
}
/* Enable power block headswitch and wait for it to stabilize */
val = readl(wcss->reg_base + Q6SS_PWR_CTL_REG);
val |= Q6SS_BHS_ON;
writel(val, wcss->reg_base + Q6SS_PWR_CTL_REG);
udelay(1);
/* Put LDO in bypass mode */
val |= Q6SS_LDO_BYP;
writel(val, wcss->reg_base + Q6SS_PWR_CTL_REG);
/* Deassert Q6 compiler memory clamp */
val = readl(wcss->reg_base + Q6SS_PWR_CTL_REG);
val &= ~Q6SS_CLAMP_QMC_MEM;
writel(val, wcss->reg_base + Q6SS_PWR_CTL_REG);
/* Deassert memory peripheral sleep and L2 memory standby */
val |= Q6SS_L2DATA_STBY_N | Q6SS_SLP_RET_N;
writel(val, wcss->reg_base + Q6SS_PWR_CTL_REG);
/* Turn on L1, L2, ETB and JU memories 1 at a time */
val = readl(wcss->reg_base + Q6SS_MEM_PWR_CTL);
for (i = MEM_BANKS; i >= 0; i--) {
val |= BIT(i);
writel(val, wcss->reg_base + Q6SS_MEM_PWR_CTL);
/*
* Read back value to ensure the write is done then
* wait for 1us for both memory peripheral and data
* array to turn on.
*/
val |= readl(wcss->reg_base + Q6SS_MEM_PWR_CTL);
udelay(1);
}
/* Remove word line clamp */
val = readl(wcss->reg_base + Q6SS_PWR_CTL_REG);
val &= ~Q6SS_CLAMP_WL;
writel(val, wcss->reg_base + Q6SS_PWR_CTL_REG);
/* Remove IO clamp */
val &= ~Q6SS_CLAMP_IO;
writel(val, wcss->reg_base + Q6SS_PWR_CTL_REG);
/* Bring core out of reset */
val = readl(wcss->reg_base + Q6SS_RESET_REG);
val &= ~Q6SS_CORE_ARES;
writel(val, wcss->reg_base + Q6SS_RESET_REG);
/* Turn on core clock */
val = readl(wcss->reg_base + Q6SS_GFMUX_CTL_REG);
val |= Q6SS_CLK_ENABLE;
writel(val, wcss->reg_base + Q6SS_GFMUX_CTL_REG);
/* Start core execution */
val = readl(wcss->reg_base + Q6SS_RESET_REG);
val &= ~Q6SS_STOP_CORE;
writel(val, wcss->reg_base + Q6SS_RESET_REG);
return 0;
}
static int q6v5_wcss_start(struct rproc *rproc)
{
struct q6v5_wcss *wcss = rproc->priv;
int ret;
qcom_q6v5_prepare(&wcss->q6v5);
/* Release Q6 and WCSS reset */
ret = reset_control_deassert(wcss->wcss_reset);
if (ret) {
dev_err(wcss->dev, "wcss_reset failed\n");
return ret;
}
ret = reset_control_deassert(wcss->wcss_q6_reset);
if (ret) {
dev_err(wcss->dev, "wcss_q6_reset failed\n");
goto wcss_reset;
}
/* Lithium configuration - clock gating and bus arbitration */
ret = regmap_update_bits(wcss->halt_map,
wcss->halt_nc + TCSR_GLOBAL_CFG0,
TCSR_WCSS_CLK_MASK,
TCSR_WCSS_CLK_ENABLE);
if (ret)
goto wcss_q6_reset;
ret = regmap_update_bits(wcss->halt_map,
wcss->halt_nc + TCSR_GLOBAL_CFG1,
1, 0);
if (ret)
goto wcss_q6_reset;
/* Write bootaddr to EVB so that Q6WCSS will jump there after reset */
writel(rproc->bootaddr >> 4, wcss->reg_base + Q6SS_RST_EVB);
ret = q6v5_wcss_reset(wcss);
if (ret)
goto wcss_q6_reset;
ret = qcom_q6v5_wait_for_start(&wcss->q6v5, 5 * HZ);
if (ret == -ETIMEDOUT)
dev_err(wcss->dev, "start timed out\n");
return ret;
wcss_q6_reset:
reset_control_assert(wcss->wcss_q6_reset);
wcss_reset:
reset_control_assert(wcss->wcss_reset);
return ret;
}
static int q6v5_wcss_qcs404_power_on(struct q6v5_wcss *wcss)
{
unsigned long val;
int ret, idx;
/* Toggle the restart */
reset_control_assert(wcss->wcss_reset);
usleep_range(200, 300);
reset_control_deassert(wcss->wcss_reset);
usleep_range(200, 300);
/* Enable GCC_WDSP_Q6SS_AHBS_CBCR clock */
ret = clk_prepare_enable(wcss->gcc_abhs_cbcr);
if (ret)
return ret;
/* Remove reset to the WCNSS QDSP6SS */
reset_control_deassert(wcss->wcss_q6_bcr_reset);
/* Enable Q6SSTOP_AHBFABRIC_CBCR clock */
ret = clk_prepare_enable(wcss->ahbfabric_cbcr_clk);
if (ret)
goto disable_gcc_abhs_cbcr_clk;
/* Enable the LCCCSR CBC clock, Q6SSTOP_Q6SSTOP_LCC_CSR_CBCR clock */
ret = clk_prepare_enable(wcss->lcc_csr_cbcr);
if (ret)
goto disable_ahbfabric_cbcr_clk;
/* Enable the Q6AHBS CBC, Q6SSTOP_Q6SS_AHBS_CBCR clock */
ret = clk_prepare_enable(wcss->ahbs_cbcr);
if (ret)
goto disable_csr_cbcr_clk;
/* Enable the TCM slave CBC, Q6SSTOP_Q6SS_TCM_SLAVE_CBCR clock */
ret = clk_prepare_enable(wcss->tcm_slave_cbcr);
if (ret)
goto disable_ahbs_cbcr_clk;
/* Enable the Q6SS AHB master CBC, Q6SSTOP_Q6SS_AHBM_CBCR clock */
ret = clk_prepare_enable(wcss->qdsp6ss_abhm_cbcr);
if (ret)
goto disable_tcm_slave_cbcr_clk;
/* Enable the Q6SS AXI master CBC, Q6SSTOP_Q6SS_AXIM_CBCR clock */
ret = clk_prepare_enable(wcss->qdsp6ss_axim_cbcr);
if (ret)
goto disable_abhm_cbcr_clk;
/* Enable the Q6SS XO CBC */
val = readl(wcss->reg_base + Q6SS_XO_CBCR);
val |= BIT(0);
writel(val, wcss->reg_base + Q6SS_XO_CBCR);
/* Read CLKOFF bit to go low indicating CLK is enabled */
ret = readl_poll_timeout(wcss->reg_base + Q6SS_XO_CBCR,
val, !(val & BIT(31)), 1,
HALT_CHECK_MAX_LOOPS);
if (ret) {
dev_err(wcss->dev,
"xo cbcr enabling timed out (rc:%d)\n", ret);
goto disable_xo_cbcr_clk;
}
writel(0, wcss->reg_base + Q6SS_CGC_OVERRIDE);
/* Enable QDSP6 sleep clock clock */
val = readl(wcss->reg_base + Q6SS_SLEEP_CBCR);
val |= BIT(0);
writel(val, wcss->reg_base + Q6SS_SLEEP_CBCR);
/* Enable the Enable the Q6 AXI clock, GCC_WDSP_Q6SS_AXIM_CBCR*/
ret = clk_prepare_enable(wcss->gcc_axim_cbcr);
if (ret)
goto disable_sleep_cbcr_clk;
/* Assert resets, stop core */
val = readl(wcss->reg_base + Q6SS_RESET_REG);
val |= Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE;
writel(val, wcss->reg_base + Q6SS_RESET_REG);
/* Program the QDSP6SS PWR_CTL register */
writel(0x01700000, wcss->reg_base + Q6SS_PWR_CTL_REG);
writel(0x03700000, wcss->reg_base + Q6SS_PWR_CTL_REG);
writel(0x03300000, wcss->reg_base + Q6SS_PWR_CTL_REG);
writel(0x033C0000, wcss->reg_base + Q6SS_PWR_CTL_REG);
/*
* Enable memories by turning on the QDSP6 memory foot/head switch, one
* bank at a time to avoid in-rush current
*/
for (idx = 28; idx >= 0; idx--) {
writel((readl(wcss->reg_base + Q6SS_MEM_PWR_CTL) |
(1 << idx)), wcss->reg_base + Q6SS_MEM_PWR_CTL);
}
writel(0x031C0000, wcss->reg_base + Q6SS_PWR_CTL_REG);
writel(0x030C0000, wcss->reg_base + Q6SS_PWR_CTL_REG);
val = readl(wcss->reg_base + Q6SS_RESET_REG);
val &= ~Q6SS_CORE_ARES;
writel(val, wcss->reg_base + Q6SS_RESET_REG);
/* Enable the Q6 core clock at the GFM, Q6SSTOP_QDSP6SS_GFMUX_CTL */
val = readl(wcss->reg_base + Q6SS_GFMUX_CTL_REG);
val |= Q6SS_CLK_ENABLE | Q6SS_SWITCH_CLK_SRC;
writel(val, wcss->reg_base + Q6SS_GFMUX_CTL_REG);
/* Enable sleep clock branch needed for BCR circuit */
ret = clk_prepare_enable(wcss->lcc_bcr_sleep);
if (ret)
goto disable_core_gfmux_clk;
return 0;
disable_core_gfmux_clk:
val = readl(wcss->reg_base + Q6SS_GFMUX_CTL_REG);
val &= ~(Q6SS_CLK_ENABLE | Q6SS_SWITCH_CLK_SRC);
writel(val, wcss->reg_base + Q6SS_GFMUX_CTL_REG);
clk_disable_unprepare(wcss->gcc_axim_cbcr);
disable_sleep_cbcr_clk:
val = readl(wcss->reg_base + Q6SS_SLEEP_CBCR);
val &= ~Q6SS_CLK_ENABLE;
writel(val, wcss->reg_base + Q6SS_SLEEP_CBCR);
disable_xo_cbcr_clk:
val = readl(wcss->reg_base + Q6SS_XO_CBCR);
val &= ~Q6SS_CLK_ENABLE;
writel(val, wcss->reg_base + Q6SS_XO_CBCR);
clk_disable_unprepare(wcss->qdsp6ss_axim_cbcr);
disable_abhm_cbcr_clk:
clk_disable_unprepare(wcss->qdsp6ss_abhm_cbcr);
disable_tcm_slave_cbcr_clk:
clk_disable_unprepare(wcss->tcm_slave_cbcr);
disable_ahbs_cbcr_clk:
clk_disable_unprepare(wcss->ahbs_cbcr);
disable_csr_cbcr_clk:
clk_disable_unprepare(wcss->lcc_csr_cbcr);
disable_ahbfabric_cbcr_clk:
clk_disable_unprepare(wcss->ahbfabric_cbcr_clk);
disable_gcc_abhs_cbcr_clk:
clk_disable_unprepare(wcss->gcc_abhs_cbcr);
return ret;
}
static inline int q6v5_wcss_qcs404_reset(struct q6v5_wcss *wcss)
{
unsigned long val;
writel(0x80800000, wcss->reg_base + Q6SS_STRAP_ACC);
/* Start core execution */
val = readl(wcss->reg_base + Q6SS_RESET_REG);
val &= ~Q6SS_STOP_CORE;
writel(val, wcss->reg_base + Q6SS_RESET_REG);
return 0;
}
static int q6v5_qcs404_wcss_start(struct rproc *rproc)
{
struct q6v5_wcss *wcss = rproc->priv;
int ret;
ret = clk_prepare_enable(wcss->xo);
if (ret)
return ret;
ret = regulator_enable(wcss->cx_supply);
if (ret)
goto disable_xo_clk;
qcom_q6v5_prepare(&wcss->q6v5);
ret = q6v5_wcss_qcs404_power_on(wcss);
if (ret) {
dev_err(wcss->dev, "wcss clk_enable failed\n");
goto disable_cx_supply;
}
writel(rproc->bootaddr >> 4, wcss->reg_base + Q6SS_RST_EVB);
q6v5_wcss_qcs404_reset(wcss);
ret = qcom_q6v5_wait_for_start(&wcss->q6v5, 5 * HZ);
if (ret == -ETIMEDOUT) {
dev_err(wcss->dev, "start timed out\n");
goto disable_cx_supply;
}
return 0;
disable_cx_supply:
regulator_disable(wcss->cx_supply);
disable_xo_clk:
clk_disable_unprepare(wcss->xo);
return ret;
}
static void q6v5_wcss_halt_axi_port(struct q6v5_wcss *wcss,
struct regmap *halt_map,
u32 offset)
{
unsigned long timeout;
unsigned int val;
int ret;
/* Check if we're already idle */
ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val);
if (!ret && val)
return;
/* Assert halt request */
regmap_write(halt_map, offset + AXI_HALTREQ_REG, 1);
/* Wait for halt */
timeout = jiffies + msecs_to_jiffies(HALT_ACK_TIMEOUT_MS);
for (;;) {
ret = regmap_read(halt_map, offset + AXI_HALTACK_REG, &val);
if (ret || val || time_after(jiffies, timeout))
break;
msleep(1);
}
ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val);
if (ret || !val)
dev_err(wcss->dev, "port failed halt\n");
/* Clear halt request (port will remain halted until reset) */
regmap_write(halt_map, offset + AXI_HALTREQ_REG, 0);
}
static int q6v5_qcs404_wcss_shutdown(struct q6v5_wcss *wcss)
{
unsigned long val;
int ret;
q6v5_wcss_halt_axi_port(wcss, wcss->halt_map, wcss->halt_wcss);
/* assert clamps to avoid MX current inrush */
val = readl(wcss->reg_base + Q6SS_PWR_CTL_REG);
val |= (Q6SS_CLAMP_IO | Q6SS_CLAMP_WL | Q6SS_CLAMP_QMC_MEM);
writel(val, wcss->reg_base + Q6SS_PWR_CTL_REG);
/* Disable memories by turning off memory foot/headswitch */
writel((readl(wcss->reg_base + Q6SS_MEM_PWR_CTL) &
~QDSS_Q6_MEMORIES),
wcss->reg_base + Q6SS_MEM_PWR_CTL);
/* Clear the BHS_ON bit */
val = readl(wcss->reg_base + Q6SS_PWR_CTL_REG);
val &= ~Q6SS_BHS_ON;
writel(val, wcss->reg_base + Q6SS_PWR_CTL_REG);
clk_disable_unprepare(wcss->ahbfabric_cbcr_clk);
clk_disable_unprepare(wcss->lcc_csr_cbcr);
clk_disable_unprepare(wcss->tcm_slave_cbcr);
clk_disable_unprepare(wcss->qdsp6ss_abhm_cbcr);
clk_disable_unprepare(wcss->qdsp6ss_axim_cbcr);
val = readl(wcss->reg_base + Q6SS_SLEEP_CBCR);
val &= ~BIT(0);
writel(val, wcss->reg_base + Q6SS_SLEEP_CBCR);
val = readl(wcss->reg_base + Q6SS_XO_CBCR);
val &= ~BIT(0);
writel(val, wcss->reg_base + Q6SS_XO_CBCR);
clk_disable_unprepare(wcss->ahbs_cbcr);
clk_disable_unprepare(wcss->lcc_bcr_sleep);
val = readl(wcss->reg_base + Q6SS_GFMUX_CTL_REG);
val &= ~(Q6SS_CLK_ENABLE | Q6SS_SWITCH_CLK_SRC);
writel(val, wcss->reg_base + Q6SS_GFMUX_CTL_REG);
clk_disable_unprepare(wcss->gcc_abhs_cbcr);
ret = reset_control_assert(wcss->wcss_reset);
if (ret) {
dev_err(wcss->dev, "wcss_reset failed\n");
return ret;
}
usleep_range(200, 300);
ret = reset_control_deassert(wcss->wcss_reset);
if (ret) {
dev_err(wcss->dev, "wcss_reset failed\n");
return ret;
}
usleep_range(200, 300);
clk_disable_unprepare(wcss->gcc_axim_cbcr);
return 0;
}
static int q6v5_wcss_powerdown(struct q6v5_wcss *wcss)
{
int ret;
u32 val;
/* 1 - Assert WCSS/Q6 HALTREQ */
q6v5_wcss_halt_axi_port(wcss, wcss->halt_map, wcss->halt_wcss);
/* 2 - Enable WCSSAON_CONFIG */
val = readl(wcss->rmb_base + SSCAON_CONFIG);
val |= SSCAON_ENABLE;
writel(val, wcss->rmb_base + SSCAON_CONFIG);
/* 3 - Set SSCAON_CONFIG */
val |= SSCAON_BUS_EN;
val &= ~SSCAON_BUS_MUX_MASK;
writel(val, wcss->rmb_base + SSCAON_CONFIG);
/* 4 - SSCAON_CONFIG 1 */
val |= BIT(1);
writel(val, wcss->rmb_base + SSCAON_CONFIG);
/* 5 - wait for SSCAON_STATUS */
ret = readl_poll_timeout(wcss->rmb_base + SSCAON_STATUS,
val, (val & 0xffff) == 0x400, 1000,
HALT_CHECK_MAX_LOOPS);
if (ret) {
dev_err(wcss->dev,
"can't get SSCAON_STATUS rc:%d)\n", ret);
return ret;
}
/* 6 - De-assert WCSS_AON reset */
reset_control_assert(wcss->wcss_aon_reset);
/* 7 - Disable WCSSAON_CONFIG 13 */
val = readl(wcss->rmb_base + SSCAON_CONFIG);
val &= ~SSCAON_ENABLE;
writel(val, wcss->rmb_base + SSCAON_CONFIG);
/* 8 - De-assert WCSS/Q6 HALTREQ */
reset_control_assert(wcss->wcss_reset);
return 0;
}
static int q6v5_q6_powerdown(struct q6v5_wcss *wcss)
{
int ret;
u32 val;
int i;
/* 1 - Halt Q6 bus interface */
q6v5_wcss_halt_axi_port(wcss, wcss->halt_map, wcss->halt_q6);
/* 2 - Disable Q6 Core clock */
val = readl(wcss->reg_base + Q6SS_GFMUX_CTL_REG);
val &= ~Q6SS_CLK_ENABLE;
writel(val, wcss->reg_base + Q6SS_GFMUX_CTL_REG);
/* 3 - Clamp I/O */
val = readl(wcss->reg_base + Q6SS_PWR_CTL_REG);
val |= Q6SS_CLAMP_IO;
writel(val, wcss->reg_base + Q6SS_PWR_CTL_REG);
/* 4 - Clamp WL */
val |= QDSS_BHS_ON;
writel(val, wcss->reg_base + Q6SS_PWR_CTL_REG);
/* 5 - Clear Erase standby */
val &= ~Q6SS_L2DATA_STBY_N;
writel(val, wcss->reg_base + Q6SS_PWR_CTL_REG);
/* 6 - Clear Sleep RTN */
val &= ~Q6SS_SLP_RET_N;
writel(val, wcss->reg_base + Q6SS_PWR_CTL_REG);
/* 7 - turn off Q6 memory foot/head switch one bank at a time */
for (i = 0; i < 20; i++) {
val = readl(wcss->reg_base + Q6SS_MEM_PWR_CTL);
val &= ~BIT(i);
writel(val, wcss->reg_base + Q6SS_MEM_PWR_CTL);
mdelay(1);
}
/* 8 - Assert QMC memory RTN */
val = readl(wcss->reg_base + Q6SS_PWR_CTL_REG);
val |= Q6SS_CLAMP_QMC_MEM;
writel(val, wcss->reg_base + Q6SS_PWR_CTL_REG);
/* 9 - Turn off BHS */
val &= ~Q6SS_BHS_ON;
writel(val, wcss->reg_base + Q6SS_PWR_CTL_REG);
udelay(1);
/* 10 - Wait till BHS Reset is done */
ret = readl_poll_timeout(wcss->reg_base + Q6SS_BHS_STATUS,
val, !(val & BHS_EN_REST_ACK), 1000,
HALT_CHECK_MAX_LOOPS);
if (ret) {
dev_err(wcss->dev, "BHS_STATUS not OFF (rc:%d)\n", ret);
return ret;
}
/* 11 - Assert WCSS reset */
reset_control_assert(wcss->wcss_reset);
/* 12 - Assert Q6 reset */
reset_control_assert(wcss->wcss_q6_reset);
return 0;
}
static int q6v5_wcss_stop(struct rproc *rproc)
{
struct q6v5_wcss *wcss = rproc->priv;
int ret;
/* WCSS powerdown */
if (wcss->requires_force_stop) {
ret = qcom_q6v5_request_stop(&wcss->q6v5, NULL);
if (ret == -ETIMEDOUT) {
dev_err(wcss->dev, "timed out on wait\n");
return ret;
}
}
if (wcss->version == WCSS_QCS404) {
ret = q6v5_qcs404_wcss_shutdown(wcss);
if (ret)
return ret;
} else {
ret = q6v5_wcss_powerdown(wcss);
if (ret)
return ret;
/* Q6 Power down */
ret = q6v5_q6_powerdown(wcss);
if (ret)
return ret;
}
qcom_q6v5_unprepare(&wcss->q6v5);
return 0;
}
static void *q6v5_wcss_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
{
struct q6v5_wcss *wcss = rproc->priv;
int offset;
offset = da - wcss->mem_reloc;
if (offset < 0 || offset + len > wcss->mem_size)
return NULL;
return wcss->mem_region + offset;
}
static int q6v5_wcss_load(struct rproc *rproc, const struct firmware *fw)
{
struct q6v5_wcss *wcss = rproc->priv;
int ret;
ret = qcom_mdt_load_no_init(wcss->dev, fw, rproc->firmware,
0, wcss->mem_region, wcss->mem_phys,
wcss->mem_size, &wcss->mem_reloc);
if (ret)
return ret;
qcom_pil_info_store("wcnss", wcss->mem_phys, wcss->mem_size);
return ret;
}
static const struct rproc_ops q6v5_wcss_ipq8074_ops = {
.start = q6v5_wcss_start,
.stop = q6v5_wcss_stop,
.da_to_va = q6v5_wcss_da_to_va,
.load = q6v5_wcss_load,
.get_boot_addr = rproc_elf_get_boot_addr,
};
static const struct rproc_ops q6v5_wcss_qcs404_ops = {
.start = q6v5_qcs404_wcss_start,
.stop = q6v5_wcss_stop,
.da_to_va = q6v5_wcss_da_to_va,
.load = q6v5_wcss_load,
.get_boot_addr = rproc_elf_get_boot_addr,
.parse_fw = qcom_register_dump_segments,
};
static int q6v5_wcss_init_reset(struct q6v5_wcss *wcss,
const struct wcss_data *desc)
{
struct device *dev = wcss->dev;
if (desc->aon_reset_required) {
wcss->wcss_aon_reset = devm_reset_control_get_exclusive(dev, "wcss_aon_reset");
if (IS_ERR(wcss->wcss_aon_reset)) {
dev_err(wcss->dev, "fail to acquire wcss_aon_reset\n");
return PTR_ERR(wcss->wcss_aon_reset);
}
}
wcss->wcss_reset = devm_reset_control_get_exclusive(dev, "wcss_reset");
if (IS_ERR(wcss->wcss_reset)) {
dev_err(wcss->dev, "unable to acquire wcss_reset\n");
return PTR_ERR(wcss->wcss_reset);
}
if (desc->wcss_q6_reset_required) {
wcss->wcss_q6_reset = devm_reset_control_get_exclusive(dev, "wcss_q6_reset");
if (IS_ERR(wcss->wcss_q6_reset)) {
dev_err(wcss->dev, "unable to acquire wcss_q6_reset\n");
return PTR_ERR(wcss->wcss_q6_reset);
}
}
wcss->wcss_q6_bcr_reset = devm_reset_control_get_exclusive(dev, "wcss_q6_bcr_reset");
if (IS_ERR(wcss->wcss_q6_bcr_reset)) {
dev_err(wcss->dev, "unable to acquire wcss_q6_bcr_reset\n");
return PTR_ERR(wcss->wcss_q6_bcr_reset);
}
return 0;
}
static int q6v5_wcss_init_mmio(struct q6v5_wcss *wcss,
struct platform_device *pdev)
{
unsigned int halt_reg[MAX_HALT_REG] = {0};
struct device_node *syscon;
struct resource *res;
int ret;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qdsp6");
if (!res)
return -EINVAL;
wcss->reg_base = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
if (!wcss->reg_base)
return -ENOMEM;
if (wcss->version == WCSS_IPQ8074) {
wcss->rmb_base = devm_platform_ioremap_resource_byname(pdev, "rmb");
if (IS_ERR(wcss->rmb_base))
return PTR_ERR(wcss->rmb_base);
}
syscon = of_parse_phandle(pdev->dev.of_node,
"qcom,halt-regs", 0);
if (!syscon) {
dev_err(&pdev->dev, "failed to parse qcom,halt-regs\n");
return -EINVAL;
}
wcss->halt_map = syscon_node_to_regmap(syscon);
of_node_put(syscon);
if (IS_ERR(wcss->halt_map))
return PTR_ERR(wcss->halt_map);
ret = of_property_read_variable_u32_array(pdev->dev.of_node,
"qcom,halt-regs",
halt_reg, 0,
MAX_HALT_REG);
if (ret < 0) {
dev_err(&pdev->dev, "failed to parse qcom,halt-regs\n");
return -EINVAL;
}
wcss->halt_q6 = halt_reg[0];
wcss->halt_wcss = halt_reg[1];
wcss->halt_nc = halt_reg[2];
return 0;
}
static int q6v5_alloc_memory_region(struct q6v5_wcss *wcss)
{
struct reserved_mem *rmem = NULL;
struct device_node *node;
struct device *dev = wcss->dev;
node = of_parse_phandle(dev->of_node, "memory-region", 0);
if (node)
rmem = of_reserved_mem_lookup(node);
of_node_put(node);
if (!rmem) {
dev_err(dev, "unable to acquire memory-region\n");
return -EINVAL;
}
wcss->mem_phys = rmem->base;
wcss->mem_reloc = rmem->base;
wcss->mem_size = rmem->size;
wcss->mem_region = devm_ioremap_wc(dev, wcss->mem_phys, wcss->mem_size);
if (!wcss->mem_region) {
dev_err(dev, "unable to map memory region: %pa+%pa\n",
&rmem->base, &rmem->size);
return -EBUSY;
}
return 0;
}
static int q6v5_wcss_init_clock(struct q6v5_wcss *wcss)
{
int ret;
wcss->xo = devm_clk_get(wcss->dev, "xo");
if (IS_ERR(wcss->xo)) {
ret = PTR_ERR(wcss->xo);
if (ret != -EPROBE_DEFER)
dev_err(wcss->dev, "failed to get xo clock");
return ret;
}
wcss->gcc_abhs_cbcr = devm_clk_get(wcss->dev, "gcc_abhs_cbcr");
if (IS_ERR(wcss->gcc_abhs_cbcr)) {
ret = PTR_ERR(wcss->gcc_abhs_cbcr);
if (ret != -EPROBE_DEFER)
dev_err(wcss->dev, "failed to get gcc abhs clock");
return ret;
}
wcss->gcc_axim_cbcr = devm_clk_get(wcss->dev, "gcc_axim_cbcr");
if (IS_ERR(wcss->gcc_axim_cbcr)) {
ret = PTR_ERR(wcss->gcc_axim_cbcr);
if (ret != -EPROBE_DEFER)
dev_err(wcss->dev, "failed to get gcc axim clock\n");
return ret;
}
wcss->ahbfabric_cbcr_clk = devm_clk_get(wcss->dev,
"lcc_ahbfabric_cbc");
if (IS_ERR(wcss->ahbfabric_cbcr_clk)) {
ret = PTR_ERR(wcss->ahbfabric_cbcr_clk);
if (ret != -EPROBE_DEFER)
dev_err(wcss->dev, "failed to get ahbfabric clock\n");
return ret;
}
wcss->lcc_csr_cbcr = devm_clk_get(wcss->dev, "tcsr_lcc_cbc");
if (IS_ERR(wcss->lcc_csr_cbcr)) {
ret = PTR_ERR(wcss->lcc_csr_cbcr);
if (ret != -EPROBE_DEFER)
dev_err(wcss->dev, "failed to get csr cbcr clk\n");
return ret;
}
wcss->ahbs_cbcr = devm_clk_get(wcss->dev,
"lcc_abhs_cbc");
if (IS_ERR(wcss->ahbs_cbcr)) {
ret = PTR_ERR(wcss->ahbs_cbcr);
if (ret != -EPROBE_DEFER)
dev_err(wcss->dev, "failed to get ahbs_cbcr clk\n");
return ret;
}
wcss->tcm_slave_cbcr = devm_clk_get(wcss->dev,
"lcc_tcm_slave_cbc");
if (IS_ERR(wcss->tcm_slave_cbcr)) {
ret = PTR_ERR(wcss->tcm_slave_cbcr);
if (ret != -EPROBE_DEFER)
dev_err(wcss->dev, "failed to get tcm cbcr clk\n");
return ret;
}
wcss->qdsp6ss_abhm_cbcr = devm_clk_get(wcss->dev, "lcc_abhm_cbc");
if (IS_ERR(wcss->qdsp6ss_abhm_cbcr)) {
ret = PTR_ERR(wcss->qdsp6ss_abhm_cbcr);
if (ret != -EPROBE_DEFER)
dev_err(wcss->dev, "failed to get abhm cbcr clk\n");
return ret;
}
wcss->qdsp6ss_axim_cbcr = devm_clk_get(wcss->dev, "lcc_axim_cbc");
if (IS_ERR(wcss->qdsp6ss_axim_cbcr)) {
ret = PTR_ERR(wcss->qdsp6ss_axim_cbcr);
if (ret != -EPROBE_DEFER)
dev_err(wcss->dev, "failed to get axim cbcr clk\n");
return ret;
}
wcss->lcc_bcr_sleep = devm_clk_get(wcss->dev, "lcc_bcr_sleep");
if (IS_ERR(wcss->lcc_bcr_sleep)) {
ret = PTR_ERR(wcss->lcc_bcr_sleep);
if (ret != -EPROBE_DEFER)
dev_err(wcss->dev, "failed to get bcr cbcr clk\n");
return ret;
}
return 0;
}
static int q6v5_wcss_init_regulator(struct q6v5_wcss *wcss)
{
wcss->cx_supply = devm_regulator_get(wcss->dev, "cx");
if (IS_ERR(wcss->cx_supply))
return PTR_ERR(wcss->cx_supply);
regulator_set_load(wcss->cx_supply, 100000);
return 0;
}
static int q6v5_wcss_probe(struct platform_device *pdev)
{
const struct wcss_data *desc;
struct q6v5_wcss *wcss;
struct rproc *rproc;
int ret;
desc = device_get_match_data(&pdev->dev);
if (!desc)
return -EINVAL;
rproc = rproc_alloc(&pdev->dev, pdev->name, desc->ops,
desc->firmware_name, sizeof(*wcss));
if (!rproc) {
dev_err(&pdev->dev, "failed to allocate rproc\n");
return -ENOMEM;
}
wcss = rproc->priv;
wcss->dev = &pdev->dev;
wcss->version = desc->version;
wcss->version = desc->version;
wcss->requires_force_stop = desc->requires_force_stop;
ret = q6v5_wcss_init_mmio(wcss, pdev);
if (ret)
goto free_rproc;
ret = q6v5_alloc_memory_region(wcss);
if (ret)
goto free_rproc;
if (wcss->version == WCSS_QCS404) {
ret = q6v5_wcss_init_clock(wcss);
if (ret)
goto free_rproc;
ret = q6v5_wcss_init_regulator(wcss);
if (ret)
goto free_rproc;
}
ret = q6v5_wcss_init_reset(wcss, desc);
if (ret)
goto free_rproc;
ret = qcom_q6v5_init(&wcss->q6v5, pdev, rproc, desc->crash_reason_smem, NULL, NULL);
if (ret)
goto free_rproc;
qcom_add_glink_subdev(rproc, &wcss->glink_subdev, "q6wcss");
qcom_add_ssr_subdev(rproc, &wcss->ssr_subdev, "q6wcss");
if (desc->ssctl_id)
wcss->sysmon = qcom_add_sysmon_subdev(rproc,
desc->sysmon_name,
desc->ssctl_id);
ret = rproc_add(rproc);
if (ret)
goto free_rproc;
platform_set_drvdata(pdev, rproc);
return 0;
free_rproc:
rproc_free(rproc);
return ret;
}
static void q6v5_wcss_remove(struct platform_device *pdev)
{
struct rproc *rproc = platform_get_drvdata(pdev);
struct q6v5_wcss *wcss = rproc->priv;
qcom_q6v5_deinit(&wcss->q6v5);
rproc_del(rproc);
rproc_free(rproc);
}
static const struct wcss_data wcss_ipq8074_res_init = {
.firmware_name = "IPQ8074/q6_fw.mdt",
.crash_reason_smem = WCSS_CRASH_REASON,
.aon_reset_required = true,
.wcss_q6_reset_required = true,
.ops = &q6v5_wcss_ipq8074_ops,
.requires_force_stop = true,
};
static const struct wcss_data wcss_qcs404_res_init = {
.crash_reason_smem = WCSS_CRASH_REASON,
.firmware_name = "wcnss.mdt",
.version = WCSS_QCS404,
.aon_reset_required = false,
.wcss_q6_reset_required = false,
.ssr_name = "mpss",
.sysmon_name = "wcnss",
.ssctl_id = 0x12,
.ops = &q6v5_wcss_qcs404_ops,
.requires_force_stop = false,
};
static const struct of_device_id q6v5_wcss_of_match[] = {
{ .compatible = "qcom,ipq8074-wcss-pil", .data = &wcss_ipq8074_res_init },
{ .compatible = "qcom,qcs404-wcss-pil", .data = &wcss_qcs404_res_init },
{ },
};
MODULE_DEVICE_TABLE(of, q6v5_wcss_of_match);
static struct platform_driver q6v5_wcss_driver = {
.probe = q6v5_wcss_probe,
.remove_new = q6v5_wcss_remove,
.driver = {
.name = "qcom-q6v5-wcss-pil",
.of_match_table = q6v5_wcss_of_match,
},
};
module_platform_driver(q6v5_wcss_driver);
MODULE_DESCRIPTION("Hexagon WCSS Peripheral Image Loader");
MODULE_LICENSE("GPL v2");
|
linux-master
|
drivers/remoteproc/qcom_q6v5_wcss.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Qualcomm Wireless Connectivity Subsystem Peripheral Image Loader
*
* Copyright (C) 2016 Linaro Ltd
* Copyright (C) 2014 Sony Mobile Communications AB
* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/firmware.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
#include <linux/firmware/qcom/qcom_scm.h>
#include <linux/regulator/consumer.h>
#include <linux/remoteproc.h>
#include <linux/soc/qcom/mdt_loader.h>
#include <linux/soc/qcom/smem.h>
#include <linux/soc/qcom/smem_state.h>
#include "qcom_common.h"
#include "remoteproc_internal.h"
#include "qcom_pil_info.h"
#include "qcom_wcnss.h"
#define WCNSS_CRASH_REASON_SMEM 422
#define WCNSS_FIRMWARE_NAME "wcnss.mdt"
#define WCNSS_PAS_ID 6
#define WCNSS_SSCTL_ID 0x13
#define WCNSS_SPARE_NVBIN_DLND BIT(25)
#define WCNSS_PMU_IRIS_XO_CFG BIT(3)
#define WCNSS_PMU_IRIS_XO_EN BIT(4)
#define WCNSS_PMU_GC_BUS_MUX_SEL_TOP BIT(5)
#define WCNSS_PMU_IRIS_XO_CFG_STS BIT(6) /* 1: in progress, 0: done */
#define WCNSS_PMU_IRIS_RESET BIT(7)
#define WCNSS_PMU_IRIS_RESET_STS BIT(8) /* 1: in progress, 0: done */
#define WCNSS_PMU_IRIS_XO_READ BIT(9)
#define WCNSS_PMU_IRIS_XO_READ_STS BIT(10)
#define WCNSS_PMU_XO_MODE_MASK GENMASK(2, 1)
#define WCNSS_PMU_XO_MODE_19p2 0
#define WCNSS_PMU_XO_MODE_48 3
#define WCNSS_MAX_PDS 2
struct wcnss_data {
size_t pmu_offset;
size_t spare_offset;
const char *pd_names[WCNSS_MAX_PDS];
const struct wcnss_vreg_info *vregs;
size_t num_vregs, num_pd_vregs;
};
struct qcom_wcnss {
struct device *dev;
struct rproc *rproc;
void __iomem *pmu_cfg;
void __iomem *spare_out;
bool use_48mhz_xo;
int wdog_irq;
int fatal_irq;
int ready_irq;
int handover_irq;
int stop_ack_irq;
struct qcom_smem_state *state;
unsigned stop_bit;
struct mutex iris_lock;
struct qcom_iris *iris;
struct device *pds[WCNSS_MAX_PDS];
size_t num_pds;
struct regulator_bulk_data *vregs;
size_t num_vregs;
struct completion start_done;
struct completion stop_done;
phys_addr_t mem_phys;
phys_addr_t mem_reloc;
void *mem_region;
size_t mem_size;
struct qcom_rproc_subdev smd_subdev;
struct qcom_sysmon *sysmon;
};
static const struct wcnss_data riva_data = {
.pmu_offset = 0x28,
.spare_offset = 0xb4,
.vregs = (struct wcnss_vreg_info[]) {
{ "vddmx", 1050000, 1150000, 0 },
{ "vddcx", 1050000, 1150000, 0 },
{ "vddpx", 1800000, 1800000, 0 },
},
.num_vregs = 3,
};
static const struct wcnss_data pronto_v1_data = {
.pmu_offset = 0x1004,
.spare_offset = 0x1088,
.pd_names = { "mx", "cx" },
.vregs = (struct wcnss_vreg_info[]) {
{ "vddmx", 950000, 1150000, 0 },
{ "vddcx", .super_turbo = true},
{ "vddpx", 1800000, 1800000, 0 },
},
.num_pd_vregs = 2,
.num_vregs = 1,
};
static const struct wcnss_data pronto_v2_data = {
.pmu_offset = 0x1004,
.spare_offset = 0x1088,
.pd_names = { "mx", "cx" },
.vregs = (struct wcnss_vreg_info[]) {
{ "vddmx", 1287500, 1287500, 0 },
{ "vddcx", .super_turbo = true },
{ "vddpx", 1800000, 1800000, 0 },
},
.num_pd_vregs = 2,
.num_vregs = 1,
};
static const struct wcnss_data pronto_v3_data = {
.pmu_offset = 0x1004,
.spare_offset = 0x1088,
.pd_names = { "mx", "cx" },
.vregs = (struct wcnss_vreg_info[]) {
{ "vddpx", 1800000, 1800000, 0 },
},
.num_vregs = 1,
};
static int wcnss_load(struct rproc *rproc, const struct firmware *fw)
{
struct qcom_wcnss *wcnss = rproc->priv;
int ret;
ret = qcom_mdt_load(wcnss->dev, fw, rproc->firmware, WCNSS_PAS_ID,
wcnss->mem_region, wcnss->mem_phys,
wcnss->mem_size, &wcnss->mem_reloc);
if (ret)
return ret;
qcom_pil_info_store("wcnss", wcnss->mem_phys, wcnss->mem_size);
return 0;
}
static void wcnss_indicate_nv_download(struct qcom_wcnss *wcnss)
{
u32 val;
/* Indicate NV download capability */
val = readl(wcnss->spare_out);
val |= WCNSS_SPARE_NVBIN_DLND;
writel(val, wcnss->spare_out);
}
static void wcnss_configure_iris(struct qcom_wcnss *wcnss)
{
u32 val;
/* Clear PMU cfg register */
writel(0, wcnss->pmu_cfg);
val = WCNSS_PMU_GC_BUS_MUX_SEL_TOP | WCNSS_PMU_IRIS_XO_EN;
writel(val, wcnss->pmu_cfg);
/* Clear XO_MODE */
val &= ~WCNSS_PMU_XO_MODE_MASK;
if (wcnss->use_48mhz_xo)
val |= WCNSS_PMU_XO_MODE_48 << 1;
else
val |= WCNSS_PMU_XO_MODE_19p2 << 1;
writel(val, wcnss->pmu_cfg);
/* Reset IRIS */
val |= WCNSS_PMU_IRIS_RESET;
writel(val, wcnss->pmu_cfg);
/* Wait for PMU.iris_reg_reset_sts */
while (readl(wcnss->pmu_cfg) & WCNSS_PMU_IRIS_RESET_STS)
cpu_relax();
/* Clear IRIS reset */
val &= ~WCNSS_PMU_IRIS_RESET;
writel(val, wcnss->pmu_cfg);
/* Start IRIS XO configuration */
val |= WCNSS_PMU_IRIS_XO_CFG;
writel(val, wcnss->pmu_cfg);
/* Wait for XO configuration to finish */
while (readl(wcnss->pmu_cfg) & WCNSS_PMU_IRIS_XO_CFG_STS)
cpu_relax();
/* Stop IRIS XO configuration */
val &= ~WCNSS_PMU_GC_BUS_MUX_SEL_TOP;
val &= ~WCNSS_PMU_IRIS_XO_CFG;
writel(val, wcnss->pmu_cfg);
/* Add some delay for XO to settle */
msleep(20);
}
static int wcnss_start(struct rproc *rproc)
{
struct qcom_wcnss *wcnss = rproc->priv;
int ret, i;
mutex_lock(&wcnss->iris_lock);
if (!wcnss->iris) {
dev_err(wcnss->dev, "no iris registered\n");
ret = -EINVAL;
goto release_iris_lock;
}
for (i = 0; i < wcnss->num_pds; i++) {
dev_pm_genpd_set_performance_state(wcnss->pds[i], INT_MAX);
ret = pm_runtime_get_sync(wcnss->pds[i]);
if (ret < 0) {
pm_runtime_put_noidle(wcnss->pds[i]);
goto disable_pds;
}
}
ret = regulator_bulk_enable(wcnss->num_vregs, wcnss->vregs);
if (ret)
goto disable_pds;
ret = qcom_iris_enable(wcnss->iris);
if (ret)
goto disable_regulators;
wcnss_indicate_nv_download(wcnss);
wcnss_configure_iris(wcnss);
ret = qcom_scm_pas_auth_and_reset(WCNSS_PAS_ID);
if (ret) {
dev_err(wcnss->dev,
"failed to authenticate image and release reset\n");
goto disable_iris;
}
ret = wait_for_completion_timeout(&wcnss->start_done,
msecs_to_jiffies(5000));
if (wcnss->ready_irq > 0 && ret == 0) {
/* We have a ready_irq, but it didn't fire in time. */
dev_err(wcnss->dev, "start timed out\n");
qcom_scm_pas_shutdown(WCNSS_PAS_ID);
ret = -ETIMEDOUT;
goto disable_iris;
}
ret = 0;
disable_iris:
qcom_iris_disable(wcnss->iris);
disable_regulators:
regulator_bulk_disable(wcnss->num_vregs, wcnss->vregs);
disable_pds:
for (i--; i >= 0; i--) {
pm_runtime_put(wcnss->pds[i]);
dev_pm_genpd_set_performance_state(wcnss->pds[i], 0);
}
release_iris_lock:
mutex_unlock(&wcnss->iris_lock);
return ret;
}
static int wcnss_stop(struct rproc *rproc)
{
struct qcom_wcnss *wcnss = rproc->priv;
int ret;
if (wcnss->state) {
qcom_smem_state_update_bits(wcnss->state,
BIT(wcnss->stop_bit),
BIT(wcnss->stop_bit));
ret = wait_for_completion_timeout(&wcnss->stop_done,
msecs_to_jiffies(5000));
if (ret == 0)
dev_err(wcnss->dev, "timed out on wait\n");
qcom_smem_state_update_bits(wcnss->state,
BIT(wcnss->stop_bit),
0);
}
ret = qcom_scm_pas_shutdown(WCNSS_PAS_ID);
if (ret)
dev_err(wcnss->dev, "failed to shutdown: %d\n", ret);
return ret;
}
static void *wcnss_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
{
struct qcom_wcnss *wcnss = rproc->priv;
int offset;
offset = da - wcnss->mem_reloc;
if (offset < 0 || offset + len > wcnss->mem_size)
return NULL;
return wcnss->mem_region + offset;
}
static const struct rproc_ops wcnss_ops = {
.start = wcnss_start,
.stop = wcnss_stop,
.da_to_va = wcnss_da_to_va,
.parse_fw = qcom_register_dump_segments,
.load = wcnss_load,
};
static irqreturn_t wcnss_wdog_interrupt(int irq, void *dev)
{
struct qcom_wcnss *wcnss = dev;
rproc_report_crash(wcnss->rproc, RPROC_WATCHDOG);
return IRQ_HANDLED;
}
static irqreturn_t wcnss_fatal_interrupt(int irq, void *dev)
{
struct qcom_wcnss *wcnss = dev;
size_t len;
char *msg;
msg = qcom_smem_get(QCOM_SMEM_HOST_ANY, WCNSS_CRASH_REASON_SMEM, &len);
if (!IS_ERR(msg) && len > 0 && msg[0])
dev_err(wcnss->dev, "fatal error received: %s\n", msg);
rproc_report_crash(wcnss->rproc, RPROC_FATAL_ERROR);
return IRQ_HANDLED;
}
static irqreturn_t wcnss_ready_interrupt(int irq, void *dev)
{
struct qcom_wcnss *wcnss = dev;
complete(&wcnss->start_done);
return IRQ_HANDLED;
}
static irqreturn_t wcnss_handover_interrupt(int irq, void *dev)
{
/*
* XXX: At this point we're supposed to release the resources that we
* have been holding on behalf of the WCNSS. Unfortunately this
* interrupt comes way before the other side seems to be done.
*
* So we're currently relying on the ready interrupt firing later then
* this and we just disable the resources at the end of wcnss_start().
*/
return IRQ_HANDLED;
}
static irqreturn_t wcnss_stop_ack_interrupt(int irq, void *dev)
{
struct qcom_wcnss *wcnss = dev;
complete(&wcnss->stop_done);
return IRQ_HANDLED;
}
static int wcnss_init_pds(struct qcom_wcnss *wcnss,
const char * const pd_names[WCNSS_MAX_PDS])
{
int i, ret;
for (i = 0; i < WCNSS_MAX_PDS; i++) {
if (!pd_names[i])
break;
wcnss->pds[i] = dev_pm_domain_attach_by_name(wcnss->dev, pd_names[i]);
if (IS_ERR_OR_NULL(wcnss->pds[i])) {
ret = PTR_ERR(wcnss->pds[i]) ? : -ENODATA;
for (i--; i >= 0; i--)
dev_pm_domain_detach(wcnss->pds[i], false);
return ret;
}
}
wcnss->num_pds = i;
return 0;
}
static void wcnss_release_pds(struct qcom_wcnss *wcnss)
{
int i;
for (i = 0; i < wcnss->num_pds; i++)
dev_pm_domain_detach(wcnss->pds[i], false);
}
static int wcnss_init_regulators(struct qcom_wcnss *wcnss,
const struct wcnss_vreg_info *info,
int num_vregs, int num_pd_vregs)
{
struct regulator_bulk_data *bulk;
int ret;
int i;
/*
* If attaching the power domains suceeded we can skip requesting
* the regulators for the power domains. For old device trees we need to
* reserve extra space to manage them through the regulator interface.
*/
if (wcnss->num_pds)
info += num_pd_vregs;
else
num_vregs += num_pd_vregs;
bulk = devm_kcalloc(wcnss->dev,
num_vregs, sizeof(struct regulator_bulk_data),
GFP_KERNEL);
if (!bulk)
return -ENOMEM;
for (i = 0; i < num_vregs; i++)
bulk[i].supply = info[i].name;
ret = devm_regulator_bulk_get(wcnss->dev, num_vregs, bulk);
if (ret)
return ret;
for (i = 0; i < num_vregs; i++) {
if (info[i].max_voltage)
regulator_set_voltage(bulk[i].consumer,
info[i].min_voltage,
info[i].max_voltage);
if (info[i].load_uA)
regulator_set_load(bulk[i].consumer, info[i].load_uA);
}
wcnss->vregs = bulk;
wcnss->num_vregs = num_vregs;
return 0;
}
static int wcnss_request_irq(struct qcom_wcnss *wcnss,
struct platform_device *pdev,
const char *name,
bool optional,
irq_handler_t thread_fn)
{
int ret;
int irq_number;
ret = platform_get_irq_byname(pdev, name);
if (ret < 0 && optional) {
dev_dbg(&pdev->dev, "no %s IRQ defined, ignoring\n", name);
return 0;
} else if (ret < 0) {
dev_err(&pdev->dev, "no %s IRQ defined\n", name);
return ret;
}
irq_number = ret;
ret = devm_request_threaded_irq(&pdev->dev, ret,
NULL, thread_fn,
IRQF_TRIGGER_RISING | IRQF_ONESHOT,
"wcnss", wcnss);
if (ret) {
dev_err(&pdev->dev, "request %s IRQ failed\n", name);
return ret;
}
/* Return the IRQ number if the IRQ was successfully acquired */
return irq_number;
}
static int wcnss_alloc_memory_region(struct qcom_wcnss *wcnss)
{
struct reserved_mem *rmem = NULL;
struct device_node *node;
node = of_parse_phandle(wcnss->dev->of_node, "memory-region", 0);
if (node)
rmem = of_reserved_mem_lookup(node);
of_node_put(node);
if (!rmem) {
dev_err(wcnss->dev, "unable to resolve memory-region\n");
return -EINVAL;
}
wcnss->mem_phys = wcnss->mem_reloc = rmem->base;
wcnss->mem_size = rmem->size;
wcnss->mem_region = devm_ioremap_wc(wcnss->dev, wcnss->mem_phys, wcnss->mem_size);
if (!wcnss->mem_region) {
dev_err(wcnss->dev, "unable to map memory region: %pa+%zx\n",
&rmem->base, wcnss->mem_size);
return -EBUSY;
}
return 0;
}
static int wcnss_probe(struct platform_device *pdev)
{
const char *fw_name = WCNSS_FIRMWARE_NAME;
const struct wcnss_data *data;
struct qcom_wcnss *wcnss;
struct rproc *rproc;
void __iomem *mmio;
int ret;
data = of_device_get_match_data(&pdev->dev);
if (!qcom_scm_is_available())
return -EPROBE_DEFER;
if (!qcom_scm_pas_supported(WCNSS_PAS_ID)) {
dev_err(&pdev->dev, "PAS is not available for WCNSS\n");
return -ENXIO;
}
ret = of_property_read_string(pdev->dev.of_node, "firmware-name",
&fw_name);
if (ret < 0 && ret != -EINVAL)
return ret;
rproc = rproc_alloc(&pdev->dev, pdev->name, &wcnss_ops,
fw_name, sizeof(*wcnss));
if (!rproc) {
dev_err(&pdev->dev, "unable to allocate remoteproc\n");
return -ENOMEM;
}
rproc_coredump_set_elf_info(rproc, ELFCLASS32, EM_NONE);
wcnss = rproc->priv;
wcnss->dev = &pdev->dev;
wcnss->rproc = rproc;
platform_set_drvdata(pdev, wcnss);
init_completion(&wcnss->start_done);
init_completion(&wcnss->stop_done);
mutex_init(&wcnss->iris_lock);
mmio = devm_platform_ioremap_resource_byname(pdev, "pmu");
if (IS_ERR(mmio)) {
ret = PTR_ERR(mmio);
goto free_rproc;
}
ret = wcnss_alloc_memory_region(wcnss);
if (ret)
goto free_rproc;
wcnss->pmu_cfg = mmio + data->pmu_offset;
wcnss->spare_out = mmio + data->spare_offset;
/*
* We might need to fallback to regulators instead of power domains
* for old device trees. Don't report an error in that case.
*/
ret = wcnss_init_pds(wcnss, data->pd_names);
if (ret && (ret != -ENODATA || !data->num_pd_vregs))
goto free_rproc;
ret = wcnss_init_regulators(wcnss, data->vregs, data->num_vregs,
data->num_pd_vregs);
if (ret)
goto detach_pds;
ret = wcnss_request_irq(wcnss, pdev, "wdog", false, wcnss_wdog_interrupt);
if (ret < 0)
goto detach_pds;
wcnss->wdog_irq = ret;
ret = wcnss_request_irq(wcnss, pdev, "fatal", false, wcnss_fatal_interrupt);
if (ret < 0)
goto detach_pds;
wcnss->fatal_irq = ret;
ret = wcnss_request_irq(wcnss, pdev, "ready", true, wcnss_ready_interrupt);
if (ret < 0)
goto detach_pds;
wcnss->ready_irq = ret;
ret = wcnss_request_irq(wcnss, pdev, "handover", true, wcnss_handover_interrupt);
if (ret < 0)
goto detach_pds;
wcnss->handover_irq = ret;
ret = wcnss_request_irq(wcnss, pdev, "stop-ack", true, wcnss_stop_ack_interrupt);
if (ret < 0)
goto detach_pds;
wcnss->stop_ack_irq = ret;
if (wcnss->stop_ack_irq) {
wcnss->state = devm_qcom_smem_state_get(&pdev->dev, "stop",
&wcnss->stop_bit);
if (IS_ERR(wcnss->state)) {
ret = PTR_ERR(wcnss->state);
goto detach_pds;
}
}
qcom_add_smd_subdev(rproc, &wcnss->smd_subdev);
wcnss->sysmon = qcom_add_sysmon_subdev(rproc, "wcnss", WCNSS_SSCTL_ID);
if (IS_ERR(wcnss->sysmon)) {
ret = PTR_ERR(wcnss->sysmon);
goto detach_pds;
}
wcnss->iris = qcom_iris_probe(&pdev->dev, &wcnss->use_48mhz_xo);
if (IS_ERR(wcnss->iris)) {
ret = PTR_ERR(wcnss->iris);
goto detach_pds;
}
ret = rproc_add(rproc);
if (ret)
goto remove_iris;
return 0;
remove_iris:
qcom_iris_remove(wcnss->iris);
detach_pds:
wcnss_release_pds(wcnss);
free_rproc:
rproc_free(rproc);
return ret;
}
static void wcnss_remove(struct platform_device *pdev)
{
struct qcom_wcnss *wcnss = platform_get_drvdata(pdev);
qcom_iris_remove(wcnss->iris);
rproc_del(wcnss->rproc);
qcom_remove_sysmon_subdev(wcnss->sysmon);
qcom_remove_smd_subdev(wcnss->rproc, &wcnss->smd_subdev);
wcnss_release_pds(wcnss);
rproc_free(wcnss->rproc);
}
static const struct of_device_id wcnss_of_match[] = {
{ .compatible = "qcom,riva-pil", &riva_data },
{ .compatible = "qcom,pronto-v1-pil", &pronto_v1_data },
{ .compatible = "qcom,pronto-v2-pil", &pronto_v2_data },
{ .compatible = "qcom,pronto-v3-pil", &pronto_v3_data },
{ },
};
MODULE_DEVICE_TABLE(of, wcnss_of_match);
static struct platform_driver wcnss_driver = {
.probe = wcnss_probe,
.remove_new = wcnss_remove,
.driver = {
.name = "qcom-wcnss-pil",
.of_match_table = wcnss_of_match,
},
};
module_platform_driver(wcnss_driver);
MODULE_DESCRIPTION("Qualcomm Peripheral Image Loader for Wireless Subsystem");
MODULE_LICENSE("GPL v2");
|
linux-master
|
drivers/remoteproc/qcom_wcnss.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2017 Pengutronix, Oleksij Rempel <[email protected]>
*/
#include <dt-bindings/firmware/imx/rsrc.h>
#include <linux/arm-smccc.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/firmware/imx/sci.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/mailbox_client.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
#include <linux/regmap.h>
#include <linux/remoteproc.h>
#include <linux/workqueue.h>
#include "imx_rproc.h"
#include "remoteproc_internal.h"
#define IMX7D_SRC_SCR 0x0C
#define IMX7D_ENABLE_M4 BIT(3)
#define IMX7D_SW_M4P_RST BIT(2)
#define IMX7D_SW_M4C_RST BIT(1)
#define IMX7D_SW_M4C_NON_SCLR_RST BIT(0)
#define IMX7D_M4_RST_MASK (IMX7D_ENABLE_M4 | IMX7D_SW_M4P_RST \
| IMX7D_SW_M4C_RST \
| IMX7D_SW_M4C_NON_SCLR_RST)
#define IMX7D_M4_START (IMX7D_ENABLE_M4 | IMX7D_SW_M4P_RST \
| IMX7D_SW_M4C_RST)
#define IMX7D_M4_STOP (IMX7D_ENABLE_M4 | IMX7D_SW_M4C_RST | \
IMX7D_SW_M4C_NON_SCLR_RST)
#define IMX8M_M7_STOP (IMX7D_ENABLE_M4 | IMX7D_SW_M4C_RST)
#define IMX8M_M7_POLL IMX7D_ENABLE_M4
#define IMX8M_GPR22 0x58
#define IMX8M_GPR22_CM7_CPUWAIT BIT(0)
/* Address: 0x020D8000 */
#define IMX6SX_SRC_SCR 0x00
#define IMX6SX_ENABLE_M4 BIT(22)
#define IMX6SX_SW_M4P_RST BIT(12)
#define IMX6SX_SW_M4C_NON_SCLR_RST BIT(4)
#define IMX6SX_SW_M4C_RST BIT(3)
#define IMX6SX_M4_START (IMX6SX_ENABLE_M4 | IMX6SX_SW_M4P_RST \
| IMX6SX_SW_M4C_RST)
#define IMX6SX_M4_STOP (IMX6SX_ENABLE_M4 | IMX6SX_SW_M4C_RST | \
IMX6SX_SW_M4C_NON_SCLR_RST)
#define IMX6SX_M4_RST_MASK (IMX6SX_ENABLE_M4 | IMX6SX_SW_M4P_RST \
| IMX6SX_SW_M4C_NON_SCLR_RST \
| IMX6SX_SW_M4C_RST)
#define IMX_RPROC_MEM_MAX 32
#define IMX_SIP_RPROC 0xC2000005
#define IMX_SIP_RPROC_START 0x00
#define IMX_SIP_RPROC_STARTED 0x01
#define IMX_SIP_RPROC_STOP 0x02
#define IMX_SC_IRQ_GROUP_REBOOTED 5
/**
* struct imx_rproc_mem - slim internal memory structure
* @cpu_addr: MPU virtual address of the memory region
* @sys_addr: Bus address used to access the memory region
* @size: Size of the memory region
*/
struct imx_rproc_mem {
void __iomem *cpu_addr;
phys_addr_t sys_addr;
size_t size;
};
/* att flags: lower 16 bits specifying core, higher 16 bits for flags */
/* M4 own area. Can be mapped at probe */
#define ATT_OWN BIT(31)
#define ATT_IOMEM BIT(30)
#define ATT_CORE_MASK 0xffff
#define ATT_CORE(I) BIT((I))
static int imx_rproc_xtr_mbox_init(struct rproc *rproc);
static void imx_rproc_free_mbox(struct rproc *rproc);
static int imx_rproc_detach_pd(struct rproc *rproc);
struct imx_rproc {
struct device *dev;
struct regmap *regmap;
struct regmap *gpr;
struct rproc *rproc;
const struct imx_rproc_dcfg *dcfg;
struct imx_rproc_mem mem[IMX_RPROC_MEM_MAX];
struct clk *clk;
struct mbox_client cl;
struct mbox_chan *tx_ch;
struct mbox_chan *rx_ch;
struct work_struct rproc_work;
struct workqueue_struct *workqueue;
void __iomem *rsc_table;
struct imx_sc_ipc *ipc_handle;
struct notifier_block rproc_nb;
u32 rproc_pt; /* partition id */
u32 rsrc_id; /* resource id */
u32 entry; /* cpu start address */
int num_pd;
u32 core_index;
struct device **pd_dev;
struct device_link **pd_dev_link;
};
static const struct imx_rproc_att imx_rproc_att_imx93[] = {
/* dev addr , sys addr , size , flags */
/* TCM CODE NON-SECURE */
{ 0x0FFC0000, 0x201C0000, 0x00020000, ATT_OWN | ATT_IOMEM },
{ 0x0FFE0000, 0x201E0000, 0x00020000, ATT_OWN | ATT_IOMEM },
/* TCM CODE SECURE */
{ 0x1FFC0000, 0x201C0000, 0x00020000, ATT_OWN | ATT_IOMEM },
{ 0x1FFE0000, 0x201E0000, 0x00020000, ATT_OWN | ATT_IOMEM },
/* TCM SYS NON-SECURE*/
{ 0x20000000, 0x20200000, 0x00020000, ATT_OWN | ATT_IOMEM },
{ 0x20020000, 0x20220000, 0x00020000, ATT_OWN | ATT_IOMEM },
/* TCM SYS SECURE*/
{ 0x30000000, 0x20200000, 0x00020000, ATT_OWN | ATT_IOMEM },
{ 0x30020000, 0x20220000, 0x00020000, ATT_OWN | ATT_IOMEM },
/* DDR */
{ 0x80000000, 0x80000000, 0x10000000, 0 },
{ 0x90000000, 0x80000000, 0x10000000, 0 },
{ 0xC0000000, 0xC0000000, 0x10000000, 0 },
{ 0xD0000000, 0xC0000000, 0x10000000, 0 },
};
static const struct imx_rproc_att imx_rproc_att_imx8qm[] = {
/* dev addr , sys addr , size , flags */
{ 0x08000000, 0x08000000, 0x10000000, 0},
/* TCML */
{ 0x1FFE0000, 0x34FE0000, 0x00020000, ATT_OWN | ATT_IOMEM | ATT_CORE(0)},
{ 0x1FFE0000, 0x38FE0000, 0x00020000, ATT_OWN | ATT_IOMEM | ATT_CORE(1)},
/* TCMU */
{ 0x20000000, 0x35000000, 0x00020000, ATT_OWN | ATT_IOMEM | ATT_CORE(0)},
{ 0x20000000, 0x39000000, 0x00020000, ATT_OWN | ATT_IOMEM | ATT_CORE(1)},
/* DDR (Data) */
{ 0x80000000, 0x80000000, 0x60000000, 0 },
};
static const struct imx_rproc_att imx_rproc_att_imx8qxp[] = {
{ 0x08000000, 0x08000000, 0x10000000, 0 },
/* TCML/U */
{ 0x1FFE0000, 0x34FE0000, 0x00040000, ATT_OWN | ATT_IOMEM },
/* OCRAM(Low 96KB) */
{ 0x21000000, 0x00100000, 0x00018000, 0 },
/* OCRAM */
{ 0x21100000, 0x00100000, 0x00040000, 0 },
/* DDR (Data) */
{ 0x80000000, 0x80000000, 0x60000000, 0 },
};
static const struct imx_rproc_att imx_rproc_att_imx8mn[] = {
/* dev addr , sys addr , size , flags */
/* ITCM */
{ 0x00000000, 0x007E0000, 0x00020000, ATT_OWN | ATT_IOMEM },
/* OCRAM_S */
{ 0x00180000, 0x00180000, 0x00009000, 0 },
/* OCRAM */
{ 0x00900000, 0x00900000, 0x00020000, 0 },
/* OCRAM */
{ 0x00920000, 0x00920000, 0x00020000, 0 },
/* OCRAM */
{ 0x00940000, 0x00940000, 0x00050000, 0 },
/* QSPI Code - alias */
{ 0x08000000, 0x08000000, 0x08000000, 0 },
/* DDR (Code) - alias */
{ 0x10000000, 0x40000000, 0x0FFE0000, 0 },
/* DTCM */
{ 0x20000000, 0x00800000, 0x00020000, ATT_OWN | ATT_IOMEM },
/* OCRAM_S - alias */
{ 0x20180000, 0x00180000, 0x00008000, ATT_OWN },
/* OCRAM */
{ 0x20200000, 0x00900000, 0x00020000, ATT_OWN },
/* OCRAM */
{ 0x20220000, 0x00920000, 0x00020000, ATT_OWN },
/* OCRAM */
{ 0x20240000, 0x00940000, 0x00040000, ATT_OWN },
/* DDR (Data) */
{ 0x40000000, 0x40000000, 0x80000000, 0 },
};
static const struct imx_rproc_att imx_rproc_att_imx8mq[] = {
/* dev addr , sys addr , size , flags */
/* TCML - alias */
{ 0x00000000, 0x007e0000, 0x00020000, ATT_IOMEM},
/* OCRAM_S */
{ 0x00180000, 0x00180000, 0x00008000, 0 },
/* OCRAM */
{ 0x00900000, 0x00900000, 0x00020000, 0 },
/* OCRAM */
{ 0x00920000, 0x00920000, 0x00020000, 0 },
/* QSPI Code - alias */
{ 0x08000000, 0x08000000, 0x08000000, 0 },
/* DDR (Code) - alias */
{ 0x10000000, 0x80000000, 0x0FFE0000, 0 },
/* TCML */
{ 0x1FFE0000, 0x007E0000, 0x00020000, ATT_OWN | ATT_IOMEM},
/* TCMU */
{ 0x20000000, 0x00800000, 0x00020000, ATT_OWN | ATT_IOMEM},
/* OCRAM_S */
{ 0x20180000, 0x00180000, 0x00008000, ATT_OWN },
/* OCRAM */
{ 0x20200000, 0x00900000, 0x00020000, ATT_OWN },
/* OCRAM */
{ 0x20220000, 0x00920000, 0x00020000, ATT_OWN },
/* DDR (Data) */
{ 0x40000000, 0x40000000, 0x80000000, 0 },
};
static const struct imx_rproc_att imx_rproc_att_imx8ulp[] = {
{0x1FFC0000, 0x1FFC0000, 0xC0000, ATT_OWN},
{0x21000000, 0x21000000, 0x10000, ATT_OWN},
{0x80000000, 0x80000000, 0x60000000, 0}
};
static const struct imx_rproc_att imx_rproc_att_imx7ulp[] = {
{0x1FFD0000, 0x1FFD0000, 0x30000, ATT_OWN},
{0x20000000, 0x20000000, 0x10000, ATT_OWN},
{0x2F000000, 0x2F000000, 0x20000, ATT_OWN},
{0x2F020000, 0x2F020000, 0x20000, ATT_OWN},
{0x60000000, 0x60000000, 0x40000000, 0}
};
static const struct imx_rproc_att imx_rproc_att_imx7d[] = {
/* dev addr , sys addr , size , flags */
/* OCRAM_S (M4 Boot code) - alias */
{ 0x00000000, 0x00180000, 0x00008000, 0 },
/* OCRAM_S (Code) */
{ 0x00180000, 0x00180000, 0x00008000, ATT_OWN },
/* OCRAM (Code) - alias */
{ 0x00900000, 0x00900000, 0x00020000, 0 },
/* OCRAM_EPDC (Code) - alias */
{ 0x00920000, 0x00920000, 0x00020000, 0 },
/* OCRAM_PXP (Code) - alias */
{ 0x00940000, 0x00940000, 0x00008000, 0 },
/* TCML (Code) */
{ 0x1FFF8000, 0x007F8000, 0x00008000, ATT_OWN | ATT_IOMEM },
/* DDR (Code) - alias, first part of DDR (Data) */
{ 0x10000000, 0x80000000, 0x0FFF0000, 0 },
/* TCMU (Data) */
{ 0x20000000, 0x00800000, 0x00008000, ATT_OWN | ATT_IOMEM },
/* OCRAM (Data) */
{ 0x20200000, 0x00900000, 0x00020000, 0 },
/* OCRAM_EPDC (Data) */
{ 0x20220000, 0x00920000, 0x00020000, 0 },
/* OCRAM_PXP (Data) */
{ 0x20240000, 0x00940000, 0x00008000, 0 },
/* DDR (Data) */
{ 0x80000000, 0x80000000, 0x60000000, 0 },
};
static const struct imx_rproc_att imx_rproc_att_imx6sx[] = {
/* dev addr , sys addr , size , flags */
/* TCML (M4 Boot Code) - alias */
{ 0x00000000, 0x007F8000, 0x00008000, ATT_IOMEM },
/* OCRAM_S (Code) */
{ 0x00180000, 0x008F8000, 0x00004000, 0 },
/* OCRAM_S (Code) - alias */
{ 0x00180000, 0x008FC000, 0x00004000, 0 },
/* TCML (Code) */
{ 0x1FFF8000, 0x007F8000, 0x00008000, ATT_OWN | ATT_IOMEM },
/* DDR (Code) - alias, first part of DDR (Data) */
{ 0x10000000, 0x80000000, 0x0FFF8000, 0 },
/* TCMU (Data) */
{ 0x20000000, 0x00800000, 0x00008000, ATT_OWN | ATT_IOMEM },
/* OCRAM_S (Data) - alias? */
{ 0x208F8000, 0x008F8000, 0x00004000, 0 },
/* DDR (Data) */
{ 0x80000000, 0x80000000, 0x60000000, 0 },
};
static const struct imx_rproc_dcfg imx_rproc_cfg_imx8mn_mmio = {
.src_reg = IMX7D_SRC_SCR,
.src_mask = IMX7D_M4_RST_MASK,
.src_start = IMX7D_M4_START,
.src_stop = IMX8M_M7_STOP,
.gpr_reg = IMX8M_GPR22,
.gpr_wait = IMX8M_GPR22_CM7_CPUWAIT,
.att = imx_rproc_att_imx8mn,
.att_size = ARRAY_SIZE(imx_rproc_att_imx8mn),
.method = IMX_RPROC_MMIO,
};
static const struct imx_rproc_dcfg imx_rproc_cfg_imx8mn = {
.att = imx_rproc_att_imx8mn,
.att_size = ARRAY_SIZE(imx_rproc_att_imx8mn),
.method = IMX_RPROC_SMC,
};
static const struct imx_rproc_dcfg imx_rproc_cfg_imx8mq = {
.src_reg = IMX7D_SRC_SCR,
.src_mask = IMX7D_M4_RST_MASK,
.src_start = IMX7D_M4_START,
.src_stop = IMX7D_M4_STOP,
.att = imx_rproc_att_imx8mq,
.att_size = ARRAY_SIZE(imx_rproc_att_imx8mq),
.method = IMX_RPROC_MMIO,
};
static const struct imx_rproc_dcfg imx_rproc_cfg_imx8qm = {
.att = imx_rproc_att_imx8qm,
.att_size = ARRAY_SIZE(imx_rproc_att_imx8qm),
.method = IMX_RPROC_SCU_API,
};
static const struct imx_rproc_dcfg imx_rproc_cfg_imx8qxp = {
.att = imx_rproc_att_imx8qxp,
.att_size = ARRAY_SIZE(imx_rproc_att_imx8qxp),
.method = IMX_RPROC_SCU_API,
};
static const struct imx_rproc_dcfg imx_rproc_cfg_imx8ulp = {
.att = imx_rproc_att_imx8ulp,
.att_size = ARRAY_SIZE(imx_rproc_att_imx8ulp),
.method = IMX_RPROC_NONE,
};
static const struct imx_rproc_dcfg imx_rproc_cfg_imx7ulp = {
.att = imx_rproc_att_imx7ulp,
.att_size = ARRAY_SIZE(imx_rproc_att_imx7ulp),
.method = IMX_RPROC_NONE,
};
static const struct imx_rproc_dcfg imx_rproc_cfg_imx7d = {
.src_reg = IMX7D_SRC_SCR,
.src_mask = IMX7D_M4_RST_MASK,
.src_start = IMX7D_M4_START,
.src_stop = IMX7D_M4_STOP,
.att = imx_rproc_att_imx7d,
.att_size = ARRAY_SIZE(imx_rproc_att_imx7d),
.method = IMX_RPROC_MMIO,
};
static const struct imx_rproc_dcfg imx_rproc_cfg_imx6sx = {
.src_reg = IMX6SX_SRC_SCR,
.src_mask = IMX6SX_M4_RST_MASK,
.src_start = IMX6SX_M4_START,
.src_stop = IMX6SX_M4_STOP,
.att = imx_rproc_att_imx6sx,
.att_size = ARRAY_SIZE(imx_rproc_att_imx6sx),
.method = IMX_RPROC_MMIO,
};
static const struct imx_rproc_dcfg imx_rproc_cfg_imx93 = {
.att = imx_rproc_att_imx93,
.att_size = ARRAY_SIZE(imx_rproc_att_imx93),
.method = IMX_RPROC_SMC,
};
static int imx_rproc_start(struct rproc *rproc)
{
struct imx_rproc *priv = rproc->priv;
const struct imx_rproc_dcfg *dcfg = priv->dcfg;
struct device *dev = priv->dev;
struct arm_smccc_res res;
int ret;
ret = imx_rproc_xtr_mbox_init(rproc);
if (ret)
return ret;
switch (dcfg->method) {
case IMX_RPROC_MMIO:
if (priv->gpr) {
ret = regmap_clear_bits(priv->gpr, dcfg->gpr_reg,
dcfg->gpr_wait);
} else {
ret = regmap_update_bits(priv->regmap, dcfg->src_reg,
dcfg->src_mask,
dcfg->src_start);
}
break;
case IMX_RPROC_SMC:
arm_smccc_smc(IMX_SIP_RPROC, IMX_SIP_RPROC_START, 0, 0, 0, 0, 0, 0, &res);
ret = res.a0;
break;
case IMX_RPROC_SCU_API:
ret = imx_sc_pm_cpu_start(priv->ipc_handle, priv->rsrc_id, true, priv->entry);
break;
default:
return -EOPNOTSUPP;
}
if (ret)
dev_err(dev, "Failed to enable remote core!\n");
return ret;
}
static int imx_rproc_stop(struct rproc *rproc)
{
struct imx_rproc *priv = rproc->priv;
const struct imx_rproc_dcfg *dcfg = priv->dcfg;
struct device *dev = priv->dev;
struct arm_smccc_res res;
int ret;
switch (dcfg->method) {
case IMX_RPROC_MMIO:
if (priv->gpr) {
ret = regmap_set_bits(priv->gpr, dcfg->gpr_reg,
dcfg->gpr_wait);
if (ret) {
dev_err(priv->dev,
"Failed to quiescence M4 platform!\n");
return ret;
}
}
ret = regmap_update_bits(priv->regmap, dcfg->src_reg, dcfg->src_mask,
dcfg->src_stop);
break;
case IMX_RPROC_SMC:
arm_smccc_smc(IMX_SIP_RPROC, IMX_SIP_RPROC_STOP, 0, 0, 0, 0, 0, 0, &res);
ret = res.a0;
if (res.a1)
dev_info(dev, "Not in wfi, force stopped\n");
break;
case IMX_RPROC_SCU_API:
ret = imx_sc_pm_cpu_start(priv->ipc_handle, priv->rsrc_id, false, priv->entry);
break;
default:
return -EOPNOTSUPP;
}
if (ret)
dev_err(dev, "Failed to stop remote core\n");
else
imx_rproc_free_mbox(rproc);
return ret;
}
static int imx_rproc_da_to_sys(struct imx_rproc *priv, u64 da,
size_t len, u64 *sys, bool *is_iomem)
{
const struct imx_rproc_dcfg *dcfg = priv->dcfg;
int i;
/* parse address translation table */
for (i = 0; i < dcfg->att_size; i++) {
const struct imx_rproc_att *att = &dcfg->att[i];
/*
* Ignore entries not belong to current core:
* i.MX8QM has dual general M4_[0,1] cores, M4_0's own entries
* has "ATT_CORE(0) & BIT(0)" true, M4_1's own entries has
* "ATT_CORE(1) & BIT(1)" true.
*/
if (att->flags & ATT_CORE_MASK) {
if (!((BIT(priv->core_index)) & (att->flags & ATT_CORE_MASK)))
continue;
}
if (da >= att->da && da + len < att->da + att->size) {
unsigned int offset = da - att->da;
*sys = att->sa + offset;
if (is_iomem)
*is_iomem = att->flags & ATT_IOMEM;
return 0;
}
}
dev_warn(priv->dev, "Translation failed: da = 0x%llx len = 0x%zx\n",
da, len);
return -ENOENT;
}
static void *imx_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
{
struct imx_rproc *priv = rproc->priv;
void *va = NULL;
u64 sys;
int i;
if (len == 0)
return NULL;
/*
* On device side we have many aliases, so we need to convert device
* address (M4) to system bus address first.
*/
if (imx_rproc_da_to_sys(priv, da, len, &sys, is_iomem))
return NULL;
for (i = 0; i < IMX_RPROC_MEM_MAX; i++) {
if (sys >= priv->mem[i].sys_addr && sys + len <
priv->mem[i].sys_addr + priv->mem[i].size) {
unsigned int offset = sys - priv->mem[i].sys_addr;
/* __force to make sparse happy with type conversion */
va = (__force void *)(priv->mem[i].cpu_addr + offset);
break;
}
}
dev_dbg(&rproc->dev, "da = 0x%llx len = 0x%zx va = 0x%p\n",
da, len, va);
return va;
}
static int imx_rproc_mem_alloc(struct rproc *rproc,
struct rproc_mem_entry *mem)
{
struct device *dev = rproc->dev.parent;
void *va;
dev_dbg(dev, "map memory: %p+%zx\n", &mem->dma, mem->len);
va = ioremap_wc(mem->dma, mem->len);
if (IS_ERR_OR_NULL(va)) {
dev_err(dev, "Unable to map memory region: %p+%zx\n",
&mem->dma, mem->len);
return -ENOMEM;
}
/* Update memory entry va */
mem->va = va;
return 0;
}
static int imx_rproc_mem_release(struct rproc *rproc,
struct rproc_mem_entry *mem)
{
dev_dbg(rproc->dev.parent, "unmap memory: %pa\n", &mem->dma);
iounmap(mem->va);
return 0;
}
static int imx_rproc_prepare(struct rproc *rproc)
{
struct imx_rproc *priv = rproc->priv;
struct device_node *np = priv->dev->of_node;
struct of_phandle_iterator it;
struct rproc_mem_entry *mem;
struct reserved_mem *rmem;
u32 da;
/* Register associated reserved memory regions */
of_phandle_iterator_init(&it, np, "memory-region", NULL, 0);
while (of_phandle_iterator_next(&it) == 0) {
/*
* Ignore the first memory region which will be used vdev buffer.
* No need to do extra handlings, rproc_add_virtio_dev will handle it.
*/
if (!strcmp(it.node->name, "vdev0buffer"))
continue;
if (!strcmp(it.node->name, "rsc-table"))
continue;
rmem = of_reserved_mem_lookup(it.node);
if (!rmem) {
of_node_put(it.node);
dev_err(priv->dev, "unable to acquire memory-region\n");
return -EINVAL;
}
/* No need to translate pa to da, i.MX use same map */
da = rmem->base;
/* Register memory region */
mem = rproc_mem_entry_init(priv->dev, NULL, (dma_addr_t)rmem->base, rmem->size, da,
imx_rproc_mem_alloc, imx_rproc_mem_release,
it.node->name);
if (mem) {
rproc_coredump_add_segment(rproc, da, rmem->size);
} else {
of_node_put(it.node);
return -ENOMEM;
}
rproc_add_carveout(rproc, mem);
}
return 0;
}
static int imx_rproc_parse_fw(struct rproc *rproc, const struct firmware *fw)
{
int ret;
ret = rproc_elf_load_rsc_table(rproc, fw);
if (ret)
dev_info(&rproc->dev, "No resource table in elf\n");
return 0;
}
static void imx_rproc_kick(struct rproc *rproc, int vqid)
{
struct imx_rproc *priv = rproc->priv;
int err;
__u32 mmsg;
if (!priv->tx_ch) {
dev_err(priv->dev, "No initialized mbox tx channel\n");
return;
}
/*
* Send the index of the triggered virtqueue as the mu payload.
* Let remote processor know which virtqueue is used.
*/
mmsg = vqid << 16;
err = mbox_send_message(priv->tx_ch, (void *)&mmsg);
if (err < 0)
dev_err(priv->dev, "%s: failed (%d, err:%d)\n",
__func__, vqid, err);
}
static int imx_rproc_attach(struct rproc *rproc)
{
return imx_rproc_xtr_mbox_init(rproc);
}
static int imx_rproc_detach(struct rproc *rproc)
{
struct imx_rproc *priv = rproc->priv;
const struct imx_rproc_dcfg *dcfg = priv->dcfg;
if (dcfg->method != IMX_RPROC_SCU_API)
return -EOPNOTSUPP;
if (imx_sc_rm_is_resource_owned(priv->ipc_handle, priv->rsrc_id))
return -EOPNOTSUPP;
imx_rproc_free_mbox(rproc);
return 0;
}
static struct resource_table *imx_rproc_get_loaded_rsc_table(struct rproc *rproc, size_t *table_sz)
{
struct imx_rproc *priv = rproc->priv;
/* The resource table has already been mapped in imx_rproc_addr_init */
if (!priv->rsc_table)
return NULL;
*table_sz = SZ_1K;
return (struct resource_table *)priv->rsc_table;
}
static const struct rproc_ops imx_rproc_ops = {
.prepare = imx_rproc_prepare,
.attach = imx_rproc_attach,
.detach = imx_rproc_detach,
.start = imx_rproc_start,
.stop = imx_rproc_stop,
.kick = imx_rproc_kick,
.da_to_va = imx_rproc_da_to_va,
.load = rproc_elf_load_segments,
.parse_fw = imx_rproc_parse_fw,
.find_loaded_rsc_table = rproc_elf_find_loaded_rsc_table,
.get_loaded_rsc_table = imx_rproc_get_loaded_rsc_table,
.sanity_check = rproc_elf_sanity_check,
.get_boot_addr = rproc_elf_get_boot_addr,
};
static int imx_rproc_addr_init(struct imx_rproc *priv,
struct platform_device *pdev)
{
const struct imx_rproc_dcfg *dcfg = priv->dcfg;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
int a, b = 0, err, nph;
/* remap required addresses */
for (a = 0; a < dcfg->att_size; a++) {
const struct imx_rproc_att *att = &dcfg->att[a];
if (!(att->flags & ATT_OWN))
continue;
if (b >= IMX_RPROC_MEM_MAX)
break;
if (att->flags & ATT_IOMEM)
priv->mem[b].cpu_addr = devm_ioremap(&pdev->dev,
att->sa, att->size);
else
priv->mem[b].cpu_addr = devm_ioremap_wc(&pdev->dev,
att->sa, att->size);
if (!priv->mem[b].cpu_addr) {
dev_err(dev, "failed to remap %#x bytes from %#x\n", att->size, att->sa);
return -ENOMEM;
}
priv->mem[b].sys_addr = att->sa;
priv->mem[b].size = att->size;
b++;
}
/* memory-region is optional property */
nph = of_count_phandle_with_args(np, "memory-region", NULL);
if (nph <= 0)
return 0;
/* remap optional addresses */
for (a = 0; a < nph; a++) {
struct device_node *node;
struct resource res;
node = of_parse_phandle(np, "memory-region", a);
/* Not map vdevbuffer, vdevring region */
if (!strncmp(node->name, "vdev", strlen("vdev"))) {
of_node_put(node);
continue;
}
err = of_address_to_resource(node, 0, &res);
of_node_put(node);
if (err) {
dev_err(dev, "unable to resolve memory region\n");
return err;
}
if (b >= IMX_RPROC_MEM_MAX)
break;
/* Not use resource version, because we might share region */
priv->mem[b].cpu_addr = devm_ioremap_wc(&pdev->dev, res.start, resource_size(&res));
if (!priv->mem[b].cpu_addr) {
dev_err(dev, "failed to remap %pr\n", &res);
return -ENOMEM;
}
priv->mem[b].sys_addr = res.start;
priv->mem[b].size = resource_size(&res);
if (!strcmp(node->name, "rsc-table"))
priv->rsc_table = priv->mem[b].cpu_addr;
b++;
}
return 0;
}
static int imx_rproc_notified_idr_cb(int id, void *ptr, void *data)
{
struct rproc *rproc = data;
rproc_vq_interrupt(rproc, id);
return 0;
}
static void imx_rproc_vq_work(struct work_struct *work)
{
struct imx_rproc *priv = container_of(work, struct imx_rproc,
rproc_work);
struct rproc *rproc = priv->rproc;
idr_for_each(&rproc->notifyids, imx_rproc_notified_idr_cb, rproc);
}
static void imx_rproc_rx_callback(struct mbox_client *cl, void *msg)
{
struct rproc *rproc = dev_get_drvdata(cl->dev);
struct imx_rproc *priv = rproc->priv;
queue_work(priv->workqueue, &priv->rproc_work);
}
static int imx_rproc_xtr_mbox_init(struct rproc *rproc)
{
struct imx_rproc *priv = rproc->priv;
struct device *dev = priv->dev;
struct mbox_client *cl;
/*
* stop() and detach() will free the mbox channels, so need
* to request mbox channels in start() and attach().
*
* Because start() and attach() not able to handle mbox defer
* probe, imx_rproc_xtr_mbox_init is also called in probe().
* The check is to avoid request mbox again when start() or
* attach() after probe() returns success.
*/
if (priv->tx_ch && priv->rx_ch)
return 0;
if (!of_get_property(dev->of_node, "mbox-names", NULL))
return 0;
cl = &priv->cl;
cl->dev = dev;
cl->tx_block = true;
cl->tx_tout = 100;
cl->knows_txdone = false;
cl->rx_callback = imx_rproc_rx_callback;
priv->tx_ch = mbox_request_channel_byname(cl, "tx");
if (IS_ERR(priv->tx_ch))
return dev_err_probe(cl->dev, PTR_ERR(priv->tx_ch),
"failed to request tx mailbox channel\n");
priv->rx_ch = mbox_request_channel_byname(cl, "rx");
if (IS_ERR(priv->rx_ch)) {
mbox_free_channel(priv->tx_ch);
return dev_err_probe(cl->dev, PTR_ERR(priv->rx_ch),
"failed to request rx mailbox channel\n");
}
return 0;
}
static void imx_rproc_free_mbox(struct rproc *rproc)
{
struct imx_rproc *priv = rproc->priv;
if (priv->tx_ch) {
mbox_free_channel(priv->tx_ch);
priv->tx_ch = NULL;
}
if (priv->rx_ch) {
mbox_free_channel(priv->rx_ch);
priv->rx_ch = NULL;
}
}
static void imx_rproc_put_scu(struct rproc *rproc)
{
struct imx_rproc *priv = rproc->priv;
const struct imx_rproc_dcfg *dcfg = priv->dcfg;
if (dcfg->method != IMX_RPROC_SCU_API)
return;
if (imx_sc_rm_is_resource_owned(priv->ipc_handle, priv->rsrc_id)) {
imx_rproc_detach_pd(rproc);
return;
}
imx_scu_irq_group_enable(IMX_SC_IRQ_GROUP_REBOOTED, BIT(priv->rproc_pt), false);
imx_scu_irq_unregister_notifier(&priv->rproc_nb);
}
static int imx_rproc_partition_notify(struct notifier_block *nb,
unsigned long event, void *group)
{
struct imx_rproc *priv = container_of(nb, struct imx_rproc, rproc_nb);
/* Ignore other irqs */
if (!((event & BIT(priv->rproc_pt)) && (*(u8 *)group == IMX_SC_IRQ_GROUP_REBOOTED)))
return 0;
rproc_report_crash(priv->rproc, RPROC_WATCHDOG);
pr_info("Partition%d reset!\n", priv->rproc_pt);
return 0;
}
static int imx_rproc_attach_pd(struct imx_rproc *priv)
{
struct device *dev = priv->dev;
int ret, i;
/*
* If there is only one power-domain entry, the platform driver framework
* will handle it, no need handle it in this driver.
*/
priv->num_pd = of_count_phandle_with_args(dev->of_node, "power-domains",
"#power-domain-cells");
if (priv->num_pd <= 1)
return 0;
priv->pd_dev = devm_kmalloc_array(dev, priv->num_pd, sizeof(*priv->pd_dev), GFP_KERNEL);
if (!priv->pd_dev)
return -ENOMEM;
priv->pd_dev_link = devm_kmalloc_array(dev, priv->num_pd, sizeof(*priv->pd_dev_link),
GFP_KERNEL);
if (!priv->pd_dev_link)
return -ENOMEM;
for (i = 0; i < priv->num_pd; i++) {
priv->pd_dev[i] = dev_pm_domain_attach_by_id(dev, i);
if (IS_ERR(priv->pd_dev[i])) {
ret = PTR_ERR(priv->pd_dev[i]);
goto detach_pd;
}
priv->pd_dev_link[i] = device_link_add(dev, priv->pd_dev[i], DL_FLAG_STATELESS |
DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE);
if (!priv->pd_dev_link[i]) {
dev_pm_domain_detach(priv->pd_dev[i], false);
ret = -EINVAL;
goto detach_pd;
}
}
return 0;
detach_pd:
while (--i >= 0) {
device_link_del(priv->pd_dev_link[i]);
dev_pm_domain_detach(priv->pd_dev[i], false);
}
return ret;
}
static int imx_rproc_detach_pd(struct rproc *rproc)
{
struct imx_rproc *priv = rproc->priv;
int i;
/*
* If there is only one power-domain entry, the platform driver framework
* will handle it, no need handle it in this driver.
*/
if (priv->num_pd <= 1)
return 0;
for (i = 0; i < priv->num_pd; i++) {
device_link_del(priv->pd_dev_link[i]);
dev_pm_domain_detach(priv->pd_dev[i], false);
}
return 0;
}
static int imx_rproc_detect_mode(struct imx_rproc *priv)
{
struct regmap_config config = { .name = "imx-rproc" };
const struct imx_rproc_dcfg *dcfg = priv->dcfg;
struct device *dev = priv->dev;
struct regmap *regmap;
struct arm_smccc_res res;
int ret;
u32 val;
u8 pt;
switch (dcfg->method) {
case IMX_RPROC_NONE:
priv->rproc->state = RPROC_DETACHED;
return 0;
case IMX_RPROC_SMC:
arm_smccc_smc(IMX_SIP_RPROC, IMX_SIP_RPROC_STARTED, 0, 0, 0, 0, 0, 0, &res);
if (res.a0)
priv->rproc->state = RPROC_DETACHED;
return 0;
case IMX_RPROC_SCU_API:
ret = imx_scu_get_handle(&priv->ipc_handle);
if (ret)
return ret;
ret = of_property_read_u32(dev->of_node, "fsl,resource-id", &priv->rsrc_id);
if (ret) {
dev_err(dev, "No fsl,resource-id property\n");
return ret;
}
if (priv->rsrc_id == IMX_SC_R_M4_1_PID0)
priv->core_index = 1;
else
priv->core_index = 0;
/*
* If Mcore resource is not owned by Acore partition, It is kicked by ROM,
* and Linux could only do IPC with Mcore and nothing else.
*/
if (imx_sc_rm_is_resource_owned(priv->ipc_handle, priv->rsrc_id)) {
if (of_property_read_u32(dev->of_node, "fsl,entry-address", &priv->entry))
return -EINVAL;
return imx_rproc_attach_pd(priv);
}
priv->rproc->state = RPROC_DETACHED;
priv->rproc->recovery_disabled = false;
rproc_set_feature(priv->rproc, RPROC_FEAT_ATTACH_ON_RECOVERY);
/* Get partition id and enable irq in SCFW */
ret = imx_sc_rm_get_resource_owner(priv->ipc_handle, priv->rsrc_id, &pt);
if (ret) {
dev_err(dev, "not able to get resource owner\n");
return ret;
}
priv->rproc_pt = pt;
priv->rproc_nb.notifier_call = imx_rproc_partition_notify;
ret = imx_scu_irq_register_notifier(&priv->rproc_nb);
if (ret) {
dev_err(dev, "register scu notifier failed, %d\n", ret);
return ret;
}
ret = imx_scu_irq_group_enable(IMX_SC_IRQ_GROUP_REBOOTED, BIT(priv->rproc_pt),
true);
if (ret) {
imx_scu_irq_unregister_notifier(&priv->rproc_nb);
dev_err(dev, "Enable irq failed, %d\n", ret);
return ret;
}
return 0;
default:
break;
}
priv->gpr = syscon_regmap_lookup_by_phandle(dev->of_node, "fsl,iomuxc-gpr");
if (IS_ERR(priv->gpr))
priv->gpr = NULL;
regmap = syscon_regmap_lookup_by_phandle(dev->of_node, "syscon");
if (IS_ERR(regmap)) {
dev_err(dev, "failed to find syscon\n");
return PTR_ERR(regmap);
}
priv->regmap = regmap;
regmap_attach_dev(dev, regmap, &config);
if (priv->gpr) {
ret = regmap_read(priv->gpr, dcfg->gpr_reg, &val);
if (val & dcfg->gpr_wait) {
/*
* After cold boot, the CM indicates its in wait
* state, but not fully powered off. Power it off
* fully so firmware can be loaded into it.
*/
imx_rproc_stop(priv->rproc);
return 0;
}
}
ret = regmap_read(regmap, dcfg->src_reg, &val);
if (ret) {
dev_err(dev, "Failed to read src\n");
return ret;
}
if ((val & dcfg->src_mask) != dcfg->src_stop)
priv->rproc->state = RPROC_DETACHED;
return 0;
}
static int imx_rproc_clk_enable(struct imx_rproc *priv)
{
const struct imx_rproc_dcfg *dcfg = priv->dcfg;
struct device *dev = priv->dev;
int ret;
/* Remote core is not under control of Linux */
if (dcfg->method == IMX_RPROC_NONE)
return 0;
priv->clk = devm_clk_get(dev, NULL);
if (IS_ERR(priv->clk)) {
dev_err(dev, "Failed to get clock\n");
return PTR_ERR(priv->clk);
}
/*
* clk for M4 block including memory. Should be
* enabled before .start for FW transfer.
*/
ret = clk_prepare_enable(priv->clk);
if (ret) {
dev_err(dev, "Failed to enable clock\n");
return ret;
}
return 0;
}
static int imx_rproc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct imx_rproc *priv;
struct rproc *rproc;
const struct imx_rproc_dcfg *dcfg;
int ret;
/* set some other name then imx */
rproc = rproc_alloc(dev, "imx-rproc", &imx_rproc_ops,
NULL, sizeof(*priv));
if (!rproc)
return -ENOMEM;
dcfg = of_device_get_match_data(dev);
if (!dcfg) {
ret = -EINVAL;
goto err_put_rproc;
}
priv = rproc->priv;
priv->rproc = rproc;
priv->dcfg = dcfg;
priv->dev = dev;
dev_set_drvdata(dev, rproc);
priv->workqueue = create_workqueue(dev_name(dev));
if (!priv->workqueue) {
dev_err(dev, "cannot create workqueue\n");
ret = -ENOMEM;
goto err_put_rproc;
}
ret = imx_rproc_xtr_mbox_init(rproc);
if (ret)
goto err_put_wkq;
ret = imx_rproc_addr_init(priv, pdev);
if (ret) {
dev_err(dev, "failed on imx_rproc_addr_init\n");
goto err_put_mbox;
}
ret = imx_rproc_detect_mode(priv);
if (ret)
goto err_put_mbox;
ret = imx_rproc_clk_enable(priv);
if (ret)
goto err_put_scu;
INIT_WORK(&priv->rproc_work, imx_rproc_vq_work);
if (rproc->state != RPROC_DETACHED)
rproc->auto_boot = of_property_read_bool(np, "fsl,auto-boot");
ret = rproc_add(rproc);
if (ret) {
dev_err(dev, "rproc_add failed\n");
goto err_put_clk;
}
return 0;
err_put_clk:
clk_disable_unprepare(priv->clk);
err_put_scu:
imx_rproc_put_scu(rproc);
err_put_mbox:
imx_rproc_free_mbox(rproc);
err_put_wkq:
destroy_workqueue(priv->workqueue);
err_put_rproc:
rproc_free(rproc);
return ret;
}
static void imx_rproc_remove(struct platform_device *pdev)
{
struct rproc *rproc = platform_get_drvdata(pdev);
struct imx_rproc *priv = rproc->priv;
clk_disable_unprepare(priv->clk);
rproc_del(rproc);
imx_rproc_put_scu(rproc);
imx_rproc_free_mbox(rproc);
destroy_workqueue(priv->workqueue);
rproc_free(rproc);
}
static const struct of_device_id imx_rproc_of_match[] = {
{ .compatible = "fsl,imx7ulp-cm4", .data = &imx_rproc_cfg_imx7ulp },
{ .compatible = "fsl,imx7d-cm4", .data = &imx_rproc_cfg_imx7d },
{ .compatible = "fsl,imx6sx-cm4", .data = &imx_rproc_cfg_imx6sx },
{ .compatible = "fsl,imx8mq-cm4", .data = &imx_rproc_cfg_imx8mq },
{ .compatible = "fsl,imx8mm-cm4", .data = &imx_rproc_cfg_imx8mq },
{ .compatible = "fsl,imx8mn-cm7", .data = &imx_rproc_cfg_imx8mn },
{ .compatible = "fsl,imx8mp-cm7", .data = &imx_rproc_cfg_imx8mn },
{ .compatible = "fsl,imx8mn-cm7-mmio", .data = &imx_rproc_cfg_imx8mn_mmio },
{ .compatible = "fsl,imx8mp-cm7-mmio", .data = &imx_rproc_cfg_imx8mn_mmio },
{ .compatible = "fsl,imx8qxp-cm4", .data = &imx_rproc_cfg_imx8qxp },
{ .compatible = "fsl,imx8qm-cm4", .data = &imx_rproc_cfg_imx8qm },
{ .compatible = "fsl,imx8ulp-cm33", .data = &imx_rproc_cfg_imx8ulp },
{ .compatible = "fsl,imx93-cm33", .data = &imx_rproc_cfg_imx93 },
{},
};
MODULE_DEVICE_TABLE(of, imx_rproc_of_match);
static struct platform_driver imx_rproc_driver = {
.probe = imx_rproc_probe,
.remove_new = imx_rproc_remove,
.driver = {
.name = "imx-rproc",
.of_match_table = imx_rproc_of_match,
},
};
module_platform_driver(imx_rproc_driver);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("i.MX remote processor control driver");
MODULE_AUTHOR("Oleksij Rempel <[email protected]>");
|
linux-master
|
drivers/remoteproc/imx_rproc.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* TI K3 R5F (MCU) Remote Processor driver
*
* Copyright (C) 2017-2022 Texas Instruments Incorporated - https://www.ti.com/
* Suman Anna <[email protected]>
*/
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/mailbox_client.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_reserved_mem.h>
#include <linux/of_platform.h>
#include <linux/omap-mailbox.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/remoteproc.h>
#include <linux/reset.h>
#include <linux/slab.h>
#include "omap_remoteproc.h"
#include "remoteproc_internal.h"
#include "ti_sci_proc.h"
/* This address can either be for ATCM or BTCM with the other at address 0x0 */
#define K3_R5_TCM_DEV_ADDR 0x41010000
/* R5 TI-SCI Processor Configuration Flags */
#define PROC_BOOT_CFG_FLAG_R5_DBG_EN 0x00000001
#define PROC_BOOT_CFG_FLAG_R5_DBG_NIDEN 0x00000002
#define PROC_BOOT_CFG_FLAG_R5_LOCKSTEP 0x00000100
#define PROC_BOOT_CFG_FLAG_R5_TEINIT 0x00000200
#define PROC_BOOT_CFG_FLAG_R5_NMFI_EN 0x00000400
#define PROC_BOOT_CFG_FLAG_R5_TCM_RSTBASE 0x00000800
#define PROC_BOOT_CFG_FLAG_R5_BTCM_EN 0x00001000
#define PROC_BOOT_CFG_FLAG_R5_ATCM_EN 0x00002000
/* Available from J7200 SoCs onwards */
#define PROC_BOOT_CFG_FLAG_R5_MEM_INIT_DIS 0x00004000
/* Applicable to only AM64x SoCs */
#define PROC_BOOT_CFG_FLAG_R5_SINGLE_CORE 0x00008000
/* R5 TI-SCI Processor Control Flags */
#define PROC_BOOT_CTRL_FLAG_R5_CORE_HALT 0x00000001
/* R5 TI-SCI Processor Status Flags */
#define PROC_BOOT_STATUS_FLAG_R5_WFE 0x00000001
#define PROC_BOOT_STATUS_FLAG_R5_WFI 0x00000002
#define PROC_BOOT_STATUS_FLAG_R5_CLK_GATED 0x00000004
#define PROC_BOOT_STATUS_FLAG_R5_LOCKSTEP_PERMITTED 0x00000100
/* Applicable to only AM64x SoCs */
#define PROC_BOOT_STATUS_FLAG_R5_SINGLECORE_ONLY 0x00000200
/**
* struct k3_r5_mem - internal memory structure
* @cpu_addr: MPU virtual address of the memory region
* @bus_addr: Bus address used to access the memory region
* @dev_addr: Device address from remoteproc view
* @size: Size of the memory region
*/
struct k3_r5_mem {
void __iomem *cpu_addr;
phys_addr_t bus_addr;
u32 dev_addr;
size_t size;
};
/*
* All cluster mode values are not applicable on all SoCs. The following
* are the modes supported on various SoCs:
* Split mode : AM65x, J721E, J7200 and AM64x SoCs
* LockStep mode : AM65x, J721E and J7200 SoCs
* Single-CPU mode : AM64x SoCs only
* Single-Core mode : AM62x, AM62A SoCs
*/
enum cluster_mode {
CLUSTER_MODE_SPLIT = 0,
CLUSTER_MODE_LOCKSTEP,
CLUSTER_MODE_SINGLECPU,
CLUSTER_MODE_SINGLECORE
};
/**
* struct k3_r5_soc_data - match data to handle SoC variations
* @tcm_is_double: flag to denote the larger unified TCMs in certain modes
* @tcm_ecc_autoinit: flag to denote the auto-initialization of TCMs for ECC
* @single_cpu_mode: flag to denote if SoC/IP supports Single-CPU mode
* @is_single_core: flag to denote if SoC/IP has only single core R5
*/
struct k3_r5_soc_data {
bool tcm_is_double;
bool tcm_ecc_autoinit;
bool single_cpu_mode;
bool is_single_core;
};
/**
* struct k3_r5_cluster - K3 R5F Cluster structure
* @dev: cached device pointer
* @mode: Mode to configure the Cluster - Split or LockStep
* @cores: list of R5 cores within the cluster
* @soc_data: SoC-specific feature data for a R5FSS
*/
struct k3_r5_cluster {
struct device *dev;
enum cluster_mode mode;
struct list_head cores;
const struct k3_r5_soc_data *soc_data;
};
/**
* struct k3_r5_core - K3 R5 core structure
* @elem: linked list item
* @dev: cached device pointer
* @rproc: rproc handle representing this core
* @mem: internal memory regions data
* @sram: on-chip SRAM memory regions data
* @num_mems: number of internal memory regions
* @num_sram: number of on-chip SRAM memory regions
* @reset: reset control handle
* @tsp: TI-SCI processor control handle
* @ti_sci: TI-SCI handle
* @ti_sci_id: TI-SCI device identifier
* @atcm_enable: flag to control ATCM enablement
* @btcm_enable: flag to control BTCM enablement
* @loczrama: flag to dictate which TCM is at device address 0x0
*/
struct k3_r5_core {
struct list_head elem;
struct device *dev;
struct rproc *rproc;
struct k3_r5_mem *mem;
struct k3_r5_mem *sram;
int num_mems;
int num_sram;
struct reset_control *reset;
struct ti_sci_proc *tsp;
const struct ti_sci_handle *ti_sci;
u32 ti_sci_id;
u32 atcm_enable;
u32 btcm_enable;
u32 loczrama;
};
/**
* struct k3_r5_rproc - K3 remote processor state
* @dev: cached device pointer
* @cluster: cached pointer to parent cluster structure
* @mbox: mailbox channel handle
* @client: mailbox client to request the mailbox channel
* @rproc: rproc handle
* @core: cached pointer to r5 core structure being used
* @rmem: reserved memory regions data
* @num_rmems: number of reserved memory regions
*/
struct k3_r5_rproc {
struct device *dev;
struct k3_r5_cluster *cluster;
struct mbox_chan *mbox;
struct mbox_client client;
struct rproc *rproc;
struct k3_r5_core *core;
struct k3_r5_mem *rmem;
int num_rmems;
};
/**
* k3_r5_rproc_mbox_callback() - inbound mailbox message handler
* @client: mailbox client pointer used for requesting the mailbox channel
* @data: mailbox payload
*
* This handler is invoked by the OMAP mailbox driver whenever a mailbox
* message is received. Usually, the mailbox payload simply contains
* the index of the virtqueue that is kicked by the remote processor,
* and we let remoteproc core handle it.
*
* In addition to virtqueue indices, we also have some out-of-band values
* that indicate different events. Those values are deliberately very
* large so they don't coincide with virtqueue indices.
*/
static void k3_r5_rproc_mbox_callback(struct mbox_client *client, void *data)
{
struct k3_r5_rproc *kproc = container_of(client, struct k3_r5_rproc,
client);
struct device *dev = kproc->rproc->dev.parent;
const char *name = kproc->rproc->name;
u32 msg = omap_mbox_message(data);
dev_dbg(dev, "mbox msg: 0x%x\n", msg);
switch (msg) {
case RP_MBOX_CRASH:
/*
* remoteproc detected an exception, but error recovery is not
* supported. So, just log this for now
*/
dev_err(dev, "K3 R5F rproc %s crashed\n", name);
break;
case RP_MBOX_ECHO_REPLY:
dev_info(dev, "received echo reply from %s\n", name);
break;
default:
/* silently handle all other valid messages */
if (msg >= RP_MBOX_READY && msg < RP_MBOX_END_MSG)
return;
if (msg > kproc->rproc->max_notifyid) {
dev_dbg(dev, "dropping unknown message 0x%x", msg);
return;
}
/* msg contains the index of the triggered vring */
if (rproc_vq_interrupt(kproc->rproc, msg) == IRQ_NONE)
dev_dbg(dev, "no message was found in vqid %d\n", msg);
}
}
/* kick a virtqueue */
static void k3_r5_rproc_kick(struct rproc *rproc, int vqid)
{
struct k3_r5_rproc *kproc = rproc->priv;
struct device *dev = rproc->dev.parent;
mbox_msg_t msg = (mbox_msg_t)vqid;
int ret;
/* send the index of the triggered virtqueue in the mailbox payload */
ret = mbox_send_message(kproc->mbox, (void *)msg);
if (ret < 0)
dev_err(dev, "failed to send mailbox message, status = %d\n",
ret);
}
static int k3_r5_split_reset(struct k3_r5_core *core)
{
int ret;
ret = reset_control_assert(core->reset);
if (ret) {
dev_err(core->dev, "local-reset assert failed, ret = %d\n",
ret);
return ret;
}
ret = core->ti_sci->ops.dev_ops.put_device(core->ti_sci,
core->ti_sci_id);
if (ret) {
dev_err(core->dev, "module-reset assert failed, ret = %d\n",
ret);
if (reset_control_deassert(core->reset))
dev_warn(core->dev, "local-reset deassert back failed\n");
}
return ret;
}
static int k3_r5_split_release(struct k3_r5_core *core)
{
int ret;
ret = core->ti_sci->ops.dev_ops.get_device(core->ti_sci,
core->ti_sci_id);
if (ret) {
dev_err(core->dev, "module-reset deassert failed, ret = %d\n",
ret);
return ret;
}
ret = reset_control_deassert(core->reset);
if (ret) {
dev_err(core->dev, "local-reset deassert failed, ret = %d\n",
ret);
if (core->ti_sci->ops.dev_ops.put_device(core->ti_sci,
core->ti_sci_id))
dev_warn(core->dev, "module-reset assert back failed\n");
}
return ret;
}
static int k3_r5_lockstep_reset(struct k3_r5_cluster *cluster)
{
struct k3_r5_core *core;
int ret;
/* assert local reset on all applicable cores */
list_for_each_entry(core, &cluster->cores, elem) {
ret = reset_control_assert(core->reset);
if (ret) {
dev_err(core->dev, "local-reset assert failed, ret = %d\n",
ret);
core = list_prev_entry(core, elem);
goto unroll_local_reset;
}
}
/* disable PSC modules on all applicable cores */
list_for_each_entry(core, &cluster->cores, elem) {
ret = core->ti_sci->ops.dev_ops.put_device(core->ti_sci,
core->ti_sci_id);
if (ret) {
dev_err(core->dev, "module-reset assert failed, ret = %d\n",
ret);
goto unroll_module_reset;
}
}
return 0;
unroll_module_reset:
list_for_each_entry_continue_reverse(core, &cluster->cores, elem) {
if (core->ti_sci->ops.dev_ops.put_device(core->ti_sci,
core->ti_sci_id))
dev_warn(core->dev, "module-reset assert back failed\n");
}
core = list_last_entry(&cluster->cores, struct k3_r5_core, elem);
unroll_local_reset:
list_for_each_entry_from_reverse(core, &cluster->cores, elem) {
if (reset_control_deassert(core->reset))
dev_warn(core->dev, "local-reset deassert back failed\n");
}
return ret;
}
static int k3_r5_lockstep_release(struct k3_r5_cluster *cluster)
{
struct k3_r5_core *core;
int ret;
/* enable PSC modules on all applicable cores */
list_for_each_entry_reverse(core, &cluster->cores, elem) {
ret = core->ti_sci->ops.dev_ops.get_device(core->ti_sci,
core->ti_sci_id);
if (ret) {
dev_err(core->dev, "module-reset deassert failed, ret = %d\n",
ret);
core = list_next_entry(core, elem);
goto unroll_module_reset;
}
}
/* deassert local reset on all applicable cores */
list_for_each_entry_reverse(core, &cluster->cores, elem) {
ret = reset_control_deassert(core->reset);
if (ret) {
dev_err(core->dev, "module-reset deassert failed, ret = %d\n",
ret);
goto unroll_local_reset;
}
}
return 0;
unroll_local_reset:
list_for_each_entry_continue(core, &cluster->cores, elem) {
if (reset_control_assert(core->reset))
dev_warn(core->dev, "local-reset assert back failed\n");
}
core = list_first_entry(&cluster->cores, struct k3_r5_core, elem);
unroll_module_reset:
list_for_each_entry_from(core, &cluster->cores, elem) {
if (core->ti_sci->ops.dev_ops.put_device(core->ti_sci,
core->ti_sci_id))
dev_warn(core->dev, "module-reset assert back failed\n");
}
return ret;
}
static inline int k3_r5_core_halt(struct k3_r5_core *core)
{
return ti_sci_proc_set_control(core->tsp,
PROC_BOOT_CTRL_FLAG_R5_CORE_HALT, 0);
}
static inline int k3_r5_core_run(struct k3_r5_core *core)
{
return ti_sci_proc_set_control(core->tsp,
0, PROC_BOOT_CTRL_FLAG_R5_CORE_HALT);
}
static int k3_r5_rproc_request_mbox(struct rproc *rproc)
{
struct k3_r5_rproc *kproc = rproc->priv;
struct mbox_client *client = &kproc->client;
struct device *dev = kproc->dev;
int ret;
client->dev = dev;
client->tx_done = NULL;
client->rx_callback = k3_r5_rproc_mbox_callback;
client->tx_block = false;
client->knows_txdone = false;
kproc->mbox = mbox_request_channel(client, 0);
if (IS_ERR(kproc->mbox)) {
ret = -EBUSY;
dev_err(dev, "mbox_request_channel failed: %ld\n",
PTR_ERR(kproc->mbox));
return ret;
}
/*
* Ping the remote processor, this is only for sanity-sake for now;
* there is no functional effect whatsoever.
*
* Note that the reply will _not_ arrive immediately: this message
* will wait in the mailbox fifo until the remote processor is booted.
*/
ret = mbox_send_message(kproc->mbox, (void *)RP_MBOX_ECHO_REQUEST);
if (ret < 0) {
dev_err(dev, "mbox_send_message failed: %d\n", ret);
mbox_free_channel(kproc->mbox);
return ret;
}
return 0;
}
/*
* The R5F cores have controls for both a reset and a halt/run. The code
* execution from DDR requires the initial boot-strapping code to be run
* from the internal TCMs. This function is used to release the resets on
* applicable cores to allow loading into the TCMs. The .prepare() ops is
* invoked by remoteproc core before any firmware loading, and is followed
* by the .start() ops after loading to actually let the R5 cores run.
*
* The Single-CPU mode on applicable SoCs (eg: AM64x) only uses Core0 to
* execute code, but combines the TCMs from both cores. The resets for both
* cores need to be released to make this possible, as the TCMs are in general
* private to each core. Only Core0 needs to be unhalted for running the
* cluster in this mode. The function uses the same reset logic as LockStep
* mode for this (though the behavior is agnostic of the reset release order).
* This callback is invoked only in remoteproc mode.
*/
static int k3_r5_rproc_prepare(struct rproc *rproc)
{
struct k3_r5_rproc *kproc = rproc->priv;
struct k3_r5_cluster *cluster = kproc->cluster;
struct k3_r5_core *core = kproc->core;
struct device *dev = kproc->dev;
u32 ctrl = 0, cfg = 0, stat = 0;
u64 boot_vec = 0;
bool mem_init_dis;
int ret;
ret = ti_sci_proc_get_status(core->tsp, &boot_vec, &cfg, &ctrl, &stat);
if (ret < 0)
return ret;
mem_init_dis = !!(cfg & PROC_BOOT_CFG_FLAG_R5_MEM_INIT_DIS);
/* Re-use LockStep-mode reset logic for Single-CPU mode */
ret = (cluster->mode == CLUSTER_MODE_LOCKSTEP ||
cluster->mode == CLUSTER_MODE_SINGLECPU) ?
k3_r5_lockstep_release(cluster) : k3_r5_split_release(core);
if (ret) {
dev_err(dev, "unable to enable cores for TCM loading, ret = %d\n",
ret);
return ret;
}
/*
* Newer IP revisions like on J7200 SoCs support h/w auto-initialization
* of TCMs, so there is no need to perform the s/w memzero. This bit is
* configurable through System Firmware, the default value does perform
* auto-init, but account for it in case it is disabled
*/
if (cluster->soc_data->tcm_ecc_autoinit && !mem_init_dis) {
dev_dbg(dev, "leveraging h/w init for TCM memories\n");
return 0;
}
/*
* Zero out both TCMs unconditionally (access from v8 Arm core is not
* affected by ATCM & BTCM enable configuration values) so that ECC
* can be effective on all TCM addresses.
*/
dev_dbg(dev, "zeroing out ATCM memory\n");
memset(core->mem[0].cpu_addr, 0x00, core->mem[0].size);
dev_dbg(dev, "zeroing out BTCM memory\n");
memset(core->mem[1].cpu_addr, 0x00, core->mem[1].size);
return 0;
}
/*
* This function implements the .unprepare() ops and performs the complimentary
* operations to that of the .prepare() ops. The function is used to assert the
* resets on all applicable cores for the rproc device (depending on LockStep
* or Split mode). This completes the second portion of powering down the R5F
* cores. The cores themselves are only halted in the .stop() ops, and the
* .unprepare() ops is invoked by the remoteproc core after the remoteproc is
* stopped.
*
* The Single-CPU mode on applicable SoCs (eg: AM64x) combines the TCMs from
* both cores. The access is made possible only with releasing the resets for
* both cores, but with only Core0 unhalted. This function re-uses the same
* reset assert logic as LockStep mode for this mode (though the behavior is
* agnostic of the reset assert order). This callback is invoked only in
* remoteproc mode.
*/
static int k3_r5_rproc_unprepare(struct rproc *rproc)
{
struct k3_r5_rproc *kproc = rproc->priv;
struct k3_r5_cluster *cluster = kproc->cluster;
struct k3_r5_core *core = kproc->core;
struct device *dev = kproc->dev;
int ret;
/* Re-use LockStep-mode reset logic for Single-CPU mode */
ret = (cluster->mode == CLUSTER_MODE_LOCKSTEP ||
cluster->mode == CLUSTER_MODE_SINGLECPU) ?
k3_r5_lockstep_reset(cluster) : k3_r5_split_reset(core);
if (ret)
dev_err(dev, "unable to disable cores, ret = %d\n", ret);
return ret;
}
/*
* The R5F start sequence includes two different operations
* 1. Configure the boot vector for R5F core(s)
* 2. Unhalt/Run the R5F core(s)
*
* The sequence is different between LockStep and Split modes. The LockStep
* mode requires the boot vector to be configured only for Core0, and then
* unhalt both the cores to start the execution - Core1 needs to be unhalted
* first followed by Core0. The Split-mode requires that Core0 to be maintained
* always in a higher power state that Core1 (implying Core1 needs to be started
* always only after Core0 is started).
*
* The Single-CPU mode on applicable SoCs (eg: AM64x) only uses Core0 to execute
* code, so only Core0 needs to be unhalted. The function uses the same logic
* flow as Split-mode for this. This callback is invoked only in remoteproc
* mode.
*/
static int k3_r5_rproc_start(struct rproc *rproc)
{
struct k3_r5_rproc *kproc = rproc->priv;
struct k3_r5_cluster *cluster = kproc->cluster;
struct device *dev = kproc->dev;
struct k3_r5_core *core;
u32 boot_addr;
int ret;
ret = k3_r5_rproc_request_mbox(rproc);
if (ret)
return ret;
boot_addr = rproc->bootaddr;
/* TODO: add boot_addr sanity checking */
dev_dbg(dev, "booting R5F core using boot addr = 0x%x\n", boot_addr);
/* boot vector need not be programmed for Core1 in LockStep mode */
core = kproc->core;
ret = ti_sci_proc_set_config(core->tsp, boot_addr, 0, 0);
if (ret)
goto put_mbox;
/* unhalt/run all applicable cores */
if (cluster->mode == CLUSTER_MODE_LOCKSTEP) {
list_for_each_entry_reverse(core, &cluster->cores, elem) {
ret = k3_r5_core_run(core);
if (ret)
goto unroll_core_run;
}
} else {
ret = k3_r5_core_run(core);
if (ret)
goto put_mbox;
}
return 0;
unroll_core_run:
list_for_each_entry_continue(core, &cluster->cores, elem) {
if (k3_r5_core_halt(core))
dev_warn(core->dev, "core halt back failed\n");
}
put_mbox:
mbox_free_channel(kproc->mbox);
return ret;
}
/*
* The R5F stop function includes the following operations
* 1. Halt R5F core(s)
*
* The sequence is different between LockStep and Split modes, and the order
* of cores the operations are performed are also in general reverse to that
* of the start function. The LockStep mode requires each operation to be
* performed first on Core0 followed by Core1. The Split-mode requires that
* Core0 to be maintained always in a higher power state that Core1 (implying
* Core1 needs to be stopped first before Core0).
*
* The Single-CPU mode on applicable SoCs (eg: AM64x) only uses Core0 to execute
* code, so only Core0 needs to be halted. The function uses the same logic
* flow as Split-mode for this.
*
* Note that the R5F halt operation in general is not effective when the R5F
* core is running, but is needed to make sure the core won't run after
* deasserting the reset the subsequent time. The asserting of reset can
* be done here, but is preferred to be done in the .unprepare() ops - this
* maintains the symmetric behavior between the .start(), .stop(), .prepare()
* and .unprepare() ops, and also balances them well between sysfs 'state'
* flow and device bind/unbind or module removal. This callback is invoked
* only in remoteproc mode.
*/
static int k3_r5_rproc_stop(struct rproc *rproc)
{
struct k3_r5_rproc *kproc = rproc->priv;
struct k3_r5_cluster *cluster = kproc->cluster;
struct k3_r5_core *core = kproc->core;
int ret;
/* halt all applicable cores */
if (cluster->mode == CLUSTER_MODE_LOCKSTEP) {
list_for_each_entry(core, &cluster->cores, elem) {
ret = k3_r5_core_halt(core);
if (ret) {
core = list_prev_entry(core, elem);
goto unroll_core_halt;
}
}
} else {
ret = k3_r5_core_halt(core);
if (ret)
goto out;
}
mbox_free_channel(kproc->mbox);
return 0;
unroll_core_halt:
list_for_each_entry_from_reverse(core, &cluster->cores, elem) {
if (k3_r5_core_run(core))
dev_warn(core->dev, "core run back failed\n");
}
out:
return ret;
}
/*
* Attach to a running R5F remote processor (IPC-only mode)
*
* The R5F attach callback only needs to request the mailbox, the remote
* processor is already booted, so there is no need to issue any TI-SCI
* commands to boot the R5F cores in IPC-only mode. This callback is invoked
* only in IPC-only mode.
*/
static int k3_r5_rproc_attach(struct rproc *rproc)
{
struct k3_r5_rproc *kproc = rproc->priv;
struct device *dev = kproc->dev;
int ret;
ret = k3_r5_rproc_request_mbox(rproc);
if (ret)
return ret;
dev_info(dev, "R5F core initialized in IPC-only mode\n");
return 0;
}
/*
* Detach from a running R5F remote processor (IPC-only mode)
*
* The R5F detach callback performs the opposite operation to attach callback
* and only needs to release the mailbox, the R5F cores are not stopped and
* will be left in booted state in IPC-only mode. This callback is invoked
* only in IPC-only mode.
*/
static int k3_r5_rproc_detach(struct rproc *rproc)
{
struct k3_r5_rproc *kproc = rproc->priv;
struct device *dev = kproc->dev;
mbox_free_channel(kproc->mbox);
dev_info(dev, "R5F core deinitialized in IPC-only mode\n");
return 0;
}
/*
* This function implements the .get_loaded_rsc_table() callback and is used
* to provide the resource table for the booted R5F in IPC-only mode. The K3 R5F
* firmwares follow a design-by-contract approach and are expected to have the
* resource table at the base of the DDR region reserved for firmware usage.
* This provides flexibility for the remote processor to be booted by different
* bootloaders that may or may not have the ability to publish the resource table
* address and size through a DT property. This callback is invoked only in
* IPC-only mode.
*/
static struct resource_table *k3_r5_get_loaded_rsc_table(struct rproc *rproc,
size_t *rsc_table_sz)
{
struct k3_r5_rproc *kproc = rproc->priv;
struct device *dev = kproc->dev;
if (!kproc->rmem[0].cpu_addr) {
dev_err(dev, "memory-region #1 does not exist, loaded rsc table can't be found");
return ERR_PTR(-ENOMEM);
}
/*
* NOTE: The resource table size is currently hard-coded to a maximum
* of 256 bytes. The most common resource table usage for K3 firmwares
* is to only have the vdev resource entry and an optional trace entry.
* The exact size could be computed based on resource table address, but
* the hard-coded value suffices to support the IPC-only mode.
*/
*rsc_table_sz = 256;
return (struct resource_table *)kproc->rmem[0].cpu_addr;
}
/*
* Internal Memory translation helper
*
* Custom function implementing the rproc .da_to_va ops to provide address
* translation (device address to kernel virtual address) for internal RAMs
* present in a DSP or IPU device). The translated addresses can be used
* either by the remoteproc core for loading, or by any rpmsg bus drivers.
*/
static void *k3_r5_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
{
struct k3_r5_rproc *kproc = rproc->priv;
struct k3_r5_core *core = kproc->core;
void __iomem *va = NULL;
phys_addr_t bus_addr;
u32 dev_addr, offset;
size_t size;
int i;
if (len == 0)
return NULL;
/* handle both R5 and SoC views of ATCM and BTCM */
for (i = 0; i < core->num_mems; i++) {
bus_addr = core->mem[i].bus_addr;
dev_addr = core->mem[i].dev_addr;
size = core->mem[i].size;
/* handle R5-view addresses of TCMs */
if (da >= dev_addr && ((da + len) <= (dev_addr + size))) {
offset = da - dev_addr;
va = core->mem[i].cpu_addr + offset;
return (__force void *)va;
}
/* handle SoC-view addresses of TCMs */
if (da >= bus_addr && ((da + len) <= (bus_addr + size))) {
offset = da - bus_addr;
va = core->mem[i].cpu_addr + offset;
return (__force void *)va;
}
}
/* handle any SRAM regions using SoC-view addresses */
for (i = 0; i < core->num_sram; i++) {
dev_addr = core->sram[i].dev_addr;
size = core->sram[i].size;
if (da >= dev_addr && ((da + len) <= (dev_addr + size))) {
offset = da - dev_addr;
va = core->sram[i].cpu_addr + offset;
return (__force void *)va;
}
}
/* handle static DDR reserved memory regions */
for (i = 0; i < kproc->num_rmems; i++) {
dev_addr = kproc->rmem[i].dev_addr;
size = kproc->rmem[i].size;
if (da >= dev_addr && ((da + len) <= (dev_addr + size))) {
offset = da - dev_addr;
va = kproc->rmem[i].cpu_addr + offset;
return (__force void *)va;
}
}
return NULL;
}
static const struct rproc_ops k3_r5_rproc_ops = {
.prepare = k3_r5_rproc_prepare,
.unprepare = k3_r5_rproc_unprepare,
.start = k3_r5_rproc_start,
.stop = k3_r5_rproc_stop,
.kick = k3_r5_rproc_kick,
.da_to_va = k3_r5_rproc_da_to_va,
};
/*
* Internal R5F Core configuration
*
* Each R5FSS has a cluster-level setting for configuring the processor
* subsystem either in a safety/fault-tolerant LockStep mode or a performance
* oriented Split mode on most SoCs. A fewer SoCs support a non-safety mode
* as an alternate for LockStep mode that exercises only a single R5F core
* called Single-CPU mode. Each R5F core has a number of settings to either
* enable/disable each of the TCMs, control which TCM appears at the R5F core's
* address 0x0. These settings need to be configured before the resets for the
* corresponding core are released. These settings are all protected and managed
* by the System Processor.
*
* This function is used to pre-configure these settings for each R5F core, and
* the configuration is all done through various ti_sci_proc functions that
* communicate with the System Processor. The function also ensures that both
* the cores are halted before the .prepare() step.
*
* The function is called from k3_r5_cluster_rproc_init() and is invoked either
* once (in LockStep mode or Single-CPU modes) or twice (in Split mode). Support
* for LockStep-mode is dictated by an eFUSE register bit, and the config
* settings retrieved from DT are adjusted accordingly as per the permitted
* cluster mode. Another eFUSE register bit dictates if the R5F cluster only
* supports a Single-CPU mode. All cluster level settings like Cluster mode and
* TEINIT (exception handling state dictating ARM or Thumb mode) can only be set
* and retrieved using Core0.
*
* The function behavior is different based on the cluster mode. The R5F cores
* are configured independently as per their individual settings in Split mode.
* They are identically configured in LockStep mode using the primary Core0
* settings. However, some individual settings cannot be set in LockStep mode.
* This is overcome by switching to Split-mode initially and then programming
* both the cores with the same settings, before reconfiguing again for
* LockStep mode.
*/
static int k3_r5_rproc_configure(struct k3_r5_rproc *kproc)
{
struct k3_r5_cluster *cluster = kproc->cluster;
struct device *dev = kproc->dev;
struct k3_r5_core *core0, *core, *temp;
u32 ctrl = 0, cfg = 0, stat = 0;
u32 set_cfg = 0, clr_cfg = 0;
u64 boot_vec = 0;
bool lockstep_en;
bool single_cpu;
int ret;
core0 = list_first_entry(&cluster->cores, struct k3_r5_core, elem);
if (cluster->mode == CLUSTER_MODE_LOCKSTEP ||
cluster->mode == CLUSTER_MODE_SINGLECPU ||
cluster->mode == CLUSTER_MODE_SINGLECORE) {
core = core0;
} else {
core = kproc->core;
}
ret = ti_sci_proc_get_status(core->tsp, &boot_vec, &cfg, &ctrl,
&stat);
if (ret < 0)
return ret;
dev_dbg(dev, "boot_vector = 0x%llx, cfg = 0x%x ctrl = 0x%x stat = 0x%x\n",
boot_vec, cfg, ctrl, stat);
single_cpu = !!(stat & PROC_BOOT_STATUS_FLAG_R5_SINGLECORE_ONLY);
lockstep_en = !!(stat & PROC_BOOT_STATUS_FLAG_R5_LOCKSTEP_PERMITTED);
/* Override to single CPU mode if set in status flag */
if (single_cpu && cluster->mode == CLUSTER_MODE_SPLIT) {
dev_err(cluster->dev, "split-mode not permitted, force configuring for single-cpu mode\n");
cluster->mode = CLUSTER_MODE_SINGLECPU;
}
/* Override to split mode if lockstep enable bit is not set in status flag */
if (!lockstep_en && cluster->mode == CLUSTER_MODE_LOCKSTEP) {
dev_err(cluster->dev, "lockstep mode not permitted, force configuring for split-mode\n");
cluster->mode = CLUSTER_MODE_SPLIT;
}
/* always enable ARM mode and set boot vector to 0 */
boot_vec = 0x0;
if (core == core0) {
clr_cfg = PROC_BOOT_CFG_FLAG_R5_TEINIT;
/*
* Single-CPU configuration bit can only be configured
* on Core0 and system firmware will NACK any requests
* with the bit configured, so program it only on
* permitted cores
*/
if (cluster->mode == CLUSTER_MODE_SINGLECPU ||
cluster->mode == CLUSTER_MODE_SINGLECORE) {
set_cfg = PROC_BOOT_CFG_FLAG_R5_SINGLE_CORE;
} else {
/*
* LockStep configuration bit is Read-only on Split-mode
* _only_ devices and system firmware will NACK any
* requests with the bit configured, so program it only
* on permitted devices
*/
if (lockstep_en)
clr_cfg |= PROC_BOOT_CFG_FLAG_R5_LOCKSTEP;
}
}
if (core->atcm_enable)
set_cfg |= PROC_BOOT_CFG_FLAG_R5_ATCM_EN;
else
clr_cfg |= PROC_BOOT_CFG_FLAG_R5_ATCM_EN;
if (core->btcm_enable)
set_cfg |= PROC_BOOT_CFG_FLAG_R5_BTCM_EN;
else
clr_cfg |= PROC_BOOT_CFG_FLAG_R5_BTCM_EN;
if (core->loczrama)
set_cfg |= PROC_BOOT_CFG_FLAG_R5_TCM_RSTBASE;
else
clr_cfg |= PROC_BOOT_CFG_FLAG_R5_TCM_RSTBASE;
if (cluster->mode == CLUSTER_MODE_LOCKSTEP) {
/*
* work around system firmware limitations to make sure both
* cores are programmed symmetrically in LockStep. LockStep
* and TEINIT config is only allowed with Core0.
*/
list_for_each_entry(temp, &cluster->cores, elem) {
ret = k3_r5_core_halt(temp);
if (ret)
goto out;
if (temp != core) {
clr_cfg &= ~PROC_BOOT_CFG_FLAG_R5_LOCKSTEP;
clr_cfg &= ~PROC_BOOT_CFG_FLAG_R5_TEINIT;
}
ret = ti_sci_proc_set_config(temp->tsp, boot_vec,
set_cfg, clr_cfg);
if (ret)
goto out;
}
set_cfg = PROC_BOOT_CFG_FLAG_R5_LOCKSTEP;
clr_cfg = 0;
ret = ti_sci_proc_set_config(core->tsp, boot_vec,
set_cfg, clr_cfg);
} else {
ret = k3_r5_core_halt(core);
if (ret)
goto out;
ret = ti_sci_proc_set_config(core->tsp, boot_vec,
set_cfg, clr_cfg);
}
out:
return ret;
}
static int k3_r5_reserved_mem_init(struct k3_r5_rproc *kproc)
{
struct device *dev = kproc->dev;
struct device_node *np = dev_of_node(dev);
struct device_node *rmem_np;
struct reserved_mem *rmem;
int num_rmems;
int ret, i;
num_rmems = of_property_count_elems_of_size(np, "memory-region",
sizeof(phandle));
if (num_rmems <= 0) {
dev_err(dev, "device does not have reserved memory regions, ret = %d\n",
num_rmems);
return -EINVAL;
}
if (num_rmems < 2) {
dev_err(dev, "device needs at least two memory regions to be defined, num = %d\n",
num_rmems);
return -EINVAL;
}
/* use reserved memory region 0 for vring DMA allocations */
ret = of_reserved_mem_device_init_by_idx(dev, np, 0);
if (ret) {
dev_err(dev, "device cannot initialize DMA pool, ret = %d\n",
ret);
return ret;
}
num_rmems--;
kproc->rmem = kcalloc(num_rmems, sizeof(*kproc->rmem), GFP_KERNEL);
if (!kproc->rmem) {
ret = -ENOMEM;
goto release_rmem;
}
/* use remaining reserved memory regions for static carveouts */
for (i = 0; i < num_rmems; i++) {
rmem_np = of_parse_phandle(np, "memory-region", i + 1);
if (!rmem_np) {
ret = -EINVAL;
goto unmap_rmem;
}
rmem = of_reserved_mem_lookup(rmem_np);
if (!rmem) {
of_node_put(rmem_np);
ret = -EINVAL;
goto unmap_rmem;
}
of_node_put(rmem_np);
kproc->rmem[i].bus_addr = rmem->base;
/*
* R5Fs do not have an MMU, but have a Region Address Translator
* (RAT) module that provides a fixed entry translation between
* the 32-bit processor addresses to 64-bit bus addresses. The
* RAT is programmable only by the R5F cores. Support for RAT
* is currently not supported, so 64-bit address regions are not
* supported. The absence of MMUs implies that the R5F device
* addresses/supported memory regions are restricted to 32-bit
* bus addresses, and are identical
*/
kproc->rmem[i].dev_addr = (u32)rmem->base;
kproc->rmem[i].size = rmem->size;
kproc->rmem[i].cpu_addr = ioremap_wc(rmem->base, rmem->size);
if (!kproc->rmem[i].cpu_addr) {
dev_err(dev, "failed to map reserved memory#%d at %pa of size %pa\n",
i + 1, &rmem->base, &rmem->size);
ret = -ENOMEM;
goto unmap_rmem;
}
dev_dbg(dev, "reserved memory%d: bus addr %pa size 0x%zx va %pK da 0x%x\n",
i + 1, &kproc->rmem[i].bus_addr,
kproc->rmem[i].size, kproc->rmem[i].cpu_addr,
kproc->rmem[i].dev_addr);
}
kproc->num_rmems = num_rmems;
return 0;
unmap_rmem:
for (i--; i >= 0; i--)
iounmap(kproc->rmem[i].cpu_addr);
kfree(kproc->rmem);
release_rmem:
of_reserved_mem_device_release(dev);
return ret;
}
static void k3_r5_reserved_mem_exit(struct k3_r5_rproc *kproc)
{
int i;
for (i = 0; i < kproc->num_rmems; i++)
iounmap(kproc->rmem[i].cpu_addr);
kfree(kproc->rmem);
of_reserved_mem_device_release(kproc->dev);
}
/*
* Each R5F core within a typical R5FSS instance has a total of 64 KB of TCMs,
* split equally into two 32 KB banks between ATCM and BTCM. The TCMs from both
* cores are usable in Split-mode, but only the Core0 TCMs can be used in
* LockStep-mode. The newer revisions of the R5FSS IP maximizes these TCMs by
* leveraging the Core1 TCMs as well in certain modes where they would have
* otherwise been unusable (Eg: LockStep-mode on J7200 SoCs, Single-CPU mode on
* AM64x SoCs). This is done by making a Core1 TCM visible immediately after the
* corresponding Core0 TCM. The SoC memory map uses the larger 64 KB sizes for
* the Core0 TCMs, and the dts representation reflects this increased size on
* supported SoCs. The Core0 TCM sizes therefore have to be adjusted to only
* half the original size in Split mode.
*/
static void k3_r5_adjust_tcm_sizes(struct k3_r5_rproc *kproc)
{
struct k3_r5_cluster *cluster = kproc->cluster;
struct k3_r5_core *core = kproc->core;
struct device *cdev = core->dev;
struct k3_r5_core *core0;
if (cluster->mode == CLUSTER_MODE_LOCKSTEP ||
cluster->mode == CLUSTER_MODE_SINGLECPU ||
cluster->mode == CLUSTER_MODE_SINGLECORE ||
!cluster->soc_data->tcm_is_double)
return;
core0 = list_first_entry(&cluster->cores, struct k3_r5_core, elem);
if (core == core0) {
WARN_ON(core->mem[0].size != SZ_64K);
WARN_ON(core->mem[1].size != SZ_64K);
core->mem[0].size /= 2;
core->mem[1].size /= 2;
dev_dbg(cdev, "adjusted TCM sizes, ATCM = 0x%zx BTCM = 0x%zx\n",
core->mem[0].size, core->mem[1].size);
}
}
/*
* This function checks and configures a R5F core for IPC-only or remoteproc
* mode. The driver is configured to be in IPC-only mode for a R5F core when
* the core has been loaded and started by a bootloader. The IPC-only mode is
* detected by querying the System Firmware for reset, power on and halt status
* and ensuring that the core is running. Any incomplete steps at bootloader
* are validated and errored out.
*
* In IPC-only mode, the driver state flags for ATCM, BTCM and LOCZRAMA settings
* and cluster mode parsed originally from kernel DT are updated to reflect the
* actual values configured by bootloader. The driver internal device memory
* addresses for TCMs are also updated.
*/
static int k3_r5_rproc_configure_mode(struct k3_r5_rproc *kproc)
{
struct k3_r5_cluster *cluster = kproc->cluster;
struct k3_r5_core *core = kproc->core;
struct device *cdev = core->dev;
bool r_state = false, c_state = false, lockstep_en = false, single_cpu = false;
u32 ctrl = 0, cfg = 0, stat = 0, halted = 0;
u64 boot_vec = 0;
u32 atcm_enable, btcm_enable, loczrama;
struct k3_r5_core *core0;
enum cluster_mode mode = cluster->mode;
int ret;
core0 = list_first_entry(&cluster->cores, struct k3_r5_core, elem);
ret = core->ti_sci->ops.dev_ops.is_on(core->ti_sci, core->ti_sci_id,
&r_state, &c_state);
if (ret) {
dev_err(cdev, "failed to get initial state, mode cannot be determined, ret = %d\n",
ret);
return ret;
}
if (r_state != c_state) {
dev_warn(cdev, "R5F core may have been powered on by a different host, programmed state (%d) != actual state (%d)\n",
r_state, c_state);
}
ret = reset_control_status(core->reset);
if (ret < 0) {
dev_err(cdev, "failed to get initial local reset status, ret = %d\n",
ret);
return ret;
}
ret = ti_sci_proc_get_status(core->tsp, &boot_vec, &cfg, &ctrl,
&stat);
if (ret < 0) {
dev_err(cdev, "failed to get initial processor status, ret = %d\n",
ret);
return ret;
}
atcm_enable = cfg & PROC_BOOT_CFG_FLAG_R5_ATCM_EN ? 1 : 0;
btcm_enable = cfg & PROC_BOOT_CFG_FLAG_R5_BTCM_EN ? 1 : 0;
loczrama = cfg & PROC_BOOT_CFG_FLAG_R5_TCM_RSTBASE ? 1 : 0;
single_cpu = cfg & PROC_BOOT_CFG_FLAG_R5_SINGLE_CORE ? 1 : 0;
lockstep_en = cfg & PROC_BOOT_CFG_FLAG_R5_LOCKSTEP ? 1 : 0;
if (single_cpu && mode != CLUSTER_MODE_SINGLECORE)
mode = CLUSTER_MODE_SINGLECPU;
if (lockstep_en)
mode = CLUSTER_MODE_LOCKSTEP;
halted = ctrl & PROC_BOOT_CTRL_FLAG_R5_CORE_HALT;
/*
* IPC-only mode detection requires both local and module resets to
* be deasserted and R5F core to be unhalted. Local reset status is
* irrelevant if module reset is asserted (POR value has local reset
* deasserted), and is deemed as remoteproc mode
*/
if (c_state && !ret && !halted) {
dev_info(cdev, "configured R5F for IPC-only mode\n");
kproc->rproc->state = RPROC_DETACHED;
ret = 1;
/* override rproc ops with only required IPC-only mode ops */
kproc->rproc->ops->prepare = NULL;
kproc->rproc->ops->unprepare = NULL;
kproc->rproc->ops->start = NULL;
kproc->rproc->ops->stop = NULL;
kproc->rproc->ops->attach = k3_r5_rproc_attach;
kproc->rproc->ops->detach = k3_r5_rproc_detach;
kproc->rproc->ops->get_loaded_rsc_table =
k3_r5_get_loaded_rsc_table;
} else if (!c_state) {
dev_info(cdev, "configured R5F for remoteproc mode\n");
ret = 0;
} else {
dev_err(cdev, "mismatched mode: local_reset = %s, module_reset = %s, core_state = %s\n",
!ret ? "deasserted" : "asserted",
c_state ? "deasserted" : "asserted",
halted ? "halted" : "unhalted");
ret = -EINVAL;
}
/* fixup TCMs, cluster & core flags to actual values in IPC-only mode */
if (ret > 0) {
if (core == core0)
cluster->mode = mode;
core->atcm_enable = atcm_enable;
core->btcm_enable = btcm_enable;
core->loczrama = loczrama;
core->mem[0].dev_addr = loczrama ? 0 : K3_R5_TCM_DEV_ADDR;
core->mem[1].dev_addr = loczrama ? K3_R5_TCM_DEV_ADDR : 0;
}
return ret;
}
static int k3_r5_cluster_rproc_init(struct platform_device *pdev)
{
struct k3_r5_cluster *cluster = platform_get_drvdata(pdev);
struct device *dev = &pdev->dev;
struct k3_r5_rproc *kproc;
struct k3_r5_core *core, *core1;
struct device *cdev;
const char *fw_name;
struct rproc *rproc;
int ret, ret1;
core1 = list_last_entry(&cluster->cores, struct k3_r5_core, elem);
list_for_each_entry(core, &cluster->cores, elem) {
cdev = core->dev;
ret = rproc_of_parse_firmware(cdev, 0, &fw_name);
if (ret) {
dev_err(dev, "failed to parse firmware-name property, ret = %d\n",
ret);
goto out;
}
rproc = rproc_alloc(cdev, dev_name(cdev), &k3_r5_rproc_ops,
fw_name, sizeof(*kproc));
if (!rproc) {
ret = -ENOMEM;
goto out;
}
/* K3 R5s have a Region Address Translator (RAT) but no MMU */
rproc->has_iommu = false;
/* error recovery is not supported at present */
rproc->recovery_disabled = true;
kproc = rproc->priv;
kproc->cluster = cluster;
kproc->core = core;
kproc->dev = cdev;
kproc->rproc = rproc;
core->rproc = rproc;
ret = k3_r5_rproc_configure_mode(kproc);
if (ret < 0)
goto err_config;
if (ret)
goto init_rmem;
ret = k3_r5_rproc_configure(kproc);
if (ret) {
dev_err(dev, "initial configure failed, ret = %d\n",
ret);
goto err_config;
}
init_rmem:
k3_r5_adjust_tcm_sizes(kproc);
ret = k3_r5_reserved_mem_init(kproc);
if (ret) {
dev_err(dev, "reserved memory init failed, ret = %d\n",
ret);
goto err_config;
}
ret = rproc_add(rproc);
if (ret) {
dev_err(dev, "rproc_add failed, ret = %d\n", ret);
goto err_add;
}
/* create only one rproc in lockstep, single-cpu or
* single core mode
*/
if (cluster->mode == CLUSTER_MODE_LOCKSTEP ||
cluster->mode == CLUSTER_MODE_SINGLECPU ||
cluster->mode == CLUSTER_MODE_SINGLECORE)
break;
}
return 0;
err_split:
if (rproc->state == RPROC_ATTACHED) {
ret1 = rproc_detach(rproc);
if (ret1) {
dev_err(kproc->dev, "failed to detach rproc, ret = %d\n",
ret1);
return ret1;
}
}
rproc_del(rproc);
err_add:
k3_r5_reserved_mem_exit(kproc);
err_config:
rproc_free(rproc);
core->rproc = NULL;
out:
/* undo core0 upon any failures on core1 in split-mode */
if (cluster->mode == CLUSTER_MODE_SPLIT && core == core1) {
core = list_prev_entry(core, elem);
rproc = core->rproc;
kproc = rproc->priv;
goto err_split;
}
return ret;
}
static void k3_r5_cluster_rproc_exit(void *data)
{
struct k3_r5_cluster *cluster = platform_get_drvdata(data);
struct k3_r5_rproc *kproc;
struct k3_r5_core *core;
struct rproc *rproc;
int ret;
/*
* lockstep mode and single-cpu modes have only one rproc associated
* with first core, whereas split-mode has two rprocs associated with
* each core, and requires that core1 be powered down first
*/
core = (cluster->mode == CLUSTER_MODE_LOCKSTEP ||
cluster->mode == CLUSTER_MODE_SINGLECPU) ?
list_first_entry(&cluster->cores, struct k3_r5_core, elem) :
list_last_entry(&cluster->cores, struct k3_r5_core, elem);
list_for_each_entry_from_reverse(core, &cluster->cores, elem) {
rproc = core->rproc;
kproc = rproc->priv;
if (rproc->state == RPROC_ATTACHED) {
ret = rproc_detach(rproc);
if (ret) {
dev_err(kproc->dev, "failed to detach rproc, ret = %d\n", ret);
return;
}
}
rproc_del(rproc);
k3_r5_reserved_mem_exit(kproc);
rproc_free(rproc);
core->rproc = NULL;
}
}
static int k3_r5_core_of_get_internal_memories(struct platform_device *pdev,
struct k3_r5_core *core)
{
static const char * const mem_names[] = {"atcm", "btcm"};
struct device *dev = &pdev->dev;
struct resource *res;
int num_mems;
int i;
num_mems = ARRAY_SIZE(mem_names);
core->mem = devm_kcalloc(dev, num_mems, sizeof(*core->mem), GFP_KERNEL);
if (!core->mem)
return -ENOMEM;
for (i = 0; i < num_mems; i++) {
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
mem_names[i]);
if (!res) {
dev_err(dev, "found no memory resource for %s\n",
mem_names[i]);
return -EINVAL;
}
if (!devm_request_mem_region(dev, res->start,
resource_size(res),
dev_name(dev))) {
dev_err(dev, "could not request %s region for resource\n",
mem_names[i]);
return -EBUSY;
}
/*
* TCMs are designed in general to support RAM-like backing
* memories. So, map these as Normal Non-Cached memories. This
* also avoids/fixes any potential alignment faults due to
* unaligned data accesses when using memcpy() or memset()
* functions (normally seen with device type memory).
*/
core->mem[i].cpu_addr = devm_ioremap_wc(dev, res->start,
resource_size(res));
if (!core->mem[i].cpu_addr) {
dev_err(dev, "failed to map %s memory\n", mem_names[i]);
return -ENOMEM;
}
core->mem[i].bus_addr = res->start;
/*
* TODO:
* The R5F cores can place ATCM & BTCM anywhere in its address
* based on the corresponding Region Registers in the System
* Control coprocessor. For now, place ATCM and BTCM at
* addresses 0 and 0x41010000 (same as the bus address on AM65x
* SoCs) based on loczrama setting
*/
if (!strcmp(mem_names[i], "atcm")) {
core->mem[i].dev_addr = core->loczrama ?
0 : K3_R5_TCM_DEV_ADDR;
} else {
core->mem[i].dev_addr = core->loczrama ?
K3_R5_TCM_DEV_ADDR : 0;
}
core->mem[i].size = resource_size(res);
dev_dbg(dev, "memory %5s: bus addr %pa size 0x%zx va %pK da 0x%x\n",
mem_names[i], &core->mem[i].bus_addr,
core->mem[i].size, core->mem[i].cpu_addr,
core->mem[i].dev_addr);
}
core->num_mems = num_mems;
return 0;
}
static int k3_r5_core_of_get_sram_memories(struct platform_device *pdev,
struct k3_r5_core *core)
{
struct device_node *np = pdev->dev.of_node;
struct device *dev = &pdev->dev;
struct device_node *sram_np;
struct resource res;
int num_sram;
int i, ret;
num_sram = of_property_count_elems_of_size(np, "sram", sizeof(phandle));
if (num_sram <= 0) {
dev_dbg(dev, "device does not use reserved on-chip memories, num_sram = %d\n",
num_sram);
return 0;
}
core->sram = devm_kcalloc(dev, num_sram, sizeof(*core->sram), GFP_KERNEL);
if (!core->sram)
return -ENOMEM;
for (i = 0; i < num_sram; i++) {
sram_np = of_parse_phandle(np, "sram", i);
if (!sram_np)
return -EINVAL;
if (!of_device_is_available(sram_np)) {
of_node_put(sram_np);
return -EINVAL;
}
ret = of_address_to_resource(sram_np, 0, &res);
of_node_put(sram_np);
if (ret)
return -EINVAL;
core->sram[i].bus_addr = res.start;
core->sram[i].dev_addr = res.start;
core->sram[i].size = resource_size(&res);
core->sram[i].cpu_addr = devm_ioremap_wc(dev, res.start,
resource_size(&res));
if (!core->sram[i].cpu_addr) {
dev_err(dev, "failed to parse and map sram%d memory at %pad\n",
i, &res.start);
return -ENOMEM;
}
dev_dbg(dev, "memory sram%d: bus addr %pa size 0x%zx va %pK da 0x%x\n",
i, &core->sram[i].bus_addr,
core->sram[i].size, core->sram[i].cpu_addr,
core->sram[i].dev_addr);
}
core->num_sram = num_sram;
return 0;
}
static
struct ti_sci_proc *k3_r5_core_of_get_tsp(struct device *dev,
const struct ti_sci_handle *sci)
{
struct ti_sci_proc *tsp;
u32 temp[2];
int ret;
ret = of_property_read_u32_array(dev_of_node(dev), "ti,sci-proc-ids",
temp, 2);
if (ret < 0)
return ERR_PTR(ret);
tsp = devm_kzalloc(dev, sizeof(*tsp), GFP_KERNEL);
if (!tsp)
return ERR_PTR(-ENOMEM);
tsp->dev = dev;
tsp->sci = sci;
tsp->ops = &sci->ops.proc_ops;
tsp->proc_id = temp[0];
tsp->host_id = temp[1];
return tsp;
}
static int k3_r5_core_of_init(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev_of_node(dev);
struct k3_r5_core *core;
int ret;
if (!devres_open_group(dev, k3_r5_core_of_init, GFP_KERNEL))
return -ENOMEM;
core = devm_kzalloc(dev, sizeof(*core), GFP_KERNEL);
if (!core) {
ret = -ENOMEM;
goto err;
}
core->dev = dev;
/*
* Use SoC Power-on-Reset values as default if no DT properties are
* used to dictate the TCM configurations
*/
core->atcm_enable = 0;
core->btcm_enable = 1;
core->loczrama = 1;
ret = of_property_read_u32(np, "ti,atcm-enable", &core->atcm_enable);
if (ret < 0 && ret != -EINVAL) {
dev_err(dev, "invalid format for ti,atcm-enable, ret = %d\n",
ret);
goto err;
}
ret = of_property_read_u32(np, "ti,btcm-enable", &core->btcm_enable);
if (ret < 0 && ret != -EINVAL) {
dev_err(dev, "invalid format for ti,btcm-enable, ret = %d\n",
ret);
goto err;
}
ret = of_property_read_u32(np, "ti,loczrama", &core->loczrama);
if (ret < 0 && ret != -EINVAL) {
dev_err(dev, "invalid format for ti,loczrama, ret = %d\n", ret);
goto err;
}
core->ti_sci = devm_ti_sci_get_by_phandle(dev, "ti,sci");
if (IS_ERR(core->ti_sci)) {
ret = PTR_ERR(core->ti_sci);
if (ret != -EPROBE_DEFER) {
dev_err(dev, "failed to get ti-sci handle, ret = %d\n",
ret);
}
core->ti_sci = NULL;
goto err;
}
ret = of_property_read_u32(np, "ti,sci-dev-id", &core->ti_sci_id);
if (ret) {
dev_err(dev, "missing 'ti,sci-dev-id' property\n");
goto err;
}
core->reset = devm_reset_control_get_exclusive(dev, NULL);
if (IS_ERR_OR_NULL(core->reset)) {
ret = PTR_ERR_OR_ZERO(core->reset);
if (!ret)
ret = -ENODEV;
if (ret != -EPROBE_DEFER) {
dev_err(dev, "failed to get reset handle, ret = %d\n",
ret);
}
goto err;
}
core->tsp = k3_r5_core_of_get_tsp(dev, core->ti_sci);
if (IS_ERR(core->tsp)) {
ret = PTR_ERR(core->tsp);
dev_err(dev, "failed to construct ti-sci proc control, ret = %d\n",
ret);
goto err;
}
ret = k3_r5_core_of_get_internal_memories(pdev, core);
if (ret) {
dev_err(dev, "failed to get internal memories, ret = %d\n",
ret);
goto err;
}
ret = k3_r5_core_of_get_sram_memories(pdev, core);
if (ret) {
dev_err(dev, "failed to get sram memories, ret = %d\n", ret);
goto err;
}
ret = ti_sci_proc_request(core->tsp);
if (ret < 0) {
dev_err(dev, "ti_sci_proc_request failed, ret = %d\n", ret);
goto err;
}
platform_set_drvdata(pdev, core);
devres_close_group(dev, k3_r5_core_of_init);
return 0;
err:
devres_release_group(dev, k3_r5_core_of_init);
return ret;
}
/*
* free the resources explicitly since driver model is not being used
* for the child R5F devices
*/
static void k3_r5_core_of_exit(struct platform_device *pdev)
{
struct k3_r5_core *core = platform_get_drvdata(pdev);
struct device *dev = &pdev->dev;
int ret;
ret = ti_sci_proc_release(core->tsp);
if (ret)
dev_err(dev, "failed to release proc, ret = %d\n", ret);
platform_set_drvdata(pdev, NULL);
devres_release_group(dev, k3_r5_core_of_init);
}
static void k3_r5_cluster_of_exit(void *data)
{
struct k3_r5_cluster *cluster = platform_get_drvdata(data);
struct platform_device *cpdev;
struct k3_r5_core *core, *temp;
list_for_each_entry_safe_reverse(core, temp, &cluster->cores, elem) {
list_del(&core->elem);
cpdev = to_platform_device(core->dev);
k3_r5_core_of_exit(cpdev);
}
}
static int k3_r5_cluster_of_init(struct platform_device *pdev)
{
struct k3_r5_cluster *cluster = platform_get_drvdata(pdev);
struct device *dev = &pdev->dev;
struct device_node *np = dev_of_node(dev);
struct platform_device *cpdev;
struct device_node *child;
struct k3_r5_core *core;
int ret;
for_each_available_child_of_node(np, child) {
cpdev = of_find_device_by_node(child);
if (!cpdev) {
ret = -ENODEV;
dev_err(dev, "could not get R5 core platform device\n");
of_node_put(child);
goto fail;
}
ret = k3_r5_core_of_init(cpdev);
if (ret) {
dev_err(dev, "k3_r5_core_of_init failed, ret = %d\n",
ret);
put_device(&cpdev->dev);
of_node_put(child);
goto fail;
}
core = platform_get_drvdata(cpdev);
put_device(&cpdev->dev);
list_add_tail(&core->elem, &cluster->cores);
}
return 0;
fail:
k3_r5_cluster_of_exit(pdev);
return ret;
}
static int k3_r5_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev_of_node(dev);
struct k3_r5_cluster *cluster;
const struct k3_r5_soc_data *data;
int ret;
int num_cores;
data = of_device_get_match_data(&pdev->dev);
if (!data) {
dev_err(dev, "SoC-specific data is not defined\n");
return -ENODEV;
}
cluster = devm_kzalloc(dev, sizeof(*cluster), GFP_KERNEL);
if (!cluster)
return -ENOMEM;
cluster->dev = dev;
cluster->soc_data = data;
INIT_LIST_HEAD(&cluster->cores);
ret = of_property_read_u32(np, "ti,cluster-mode", &cluster->mode);
if (ret < 0 && ret != -EINVAL) {
dev_err(dev, "invalid format for ti,cluster-mode, ret = %d\n",
ret);
return ret;
}
if (ret == -EINVAL) {
/*
* default to most common efuse configurations - Split-mode on AM64x
* and LockStep-mode on all others
* default to most common efuse configurations -
* Split-mode on AM64x
* Single core on AM62x
* LockStep-mode on all others
*/
if (!data->is_single_core)
cluster->mode = data->single_cpu_mode ?
CLUSTER_MODE_SPLIT : CLUSTER_MODE_LOCKSTEP;
else
cluster->mode = CLUSTER_MODE_SINGLECORE;
}
if ((cluster->mode == CLUSTER_MODE_SINGLECPU && !data->single_cpu_mode) ||
(cluster->mode == CLUSTER_MODE_SINGLECORE && !data->is_single_core)) {
dev_err(dev, "Cluster mode = %d is not supported on this SoC\n", cluster->mode);
return -EINVAL;
}
num_cores = of_get_available_child_count(np);
if (num_cores != 2 && !data->is_single_core) {
dev_err(dev, "MCU cluster requires both R5F cores to be enabled but num_cores is set to = %d\n",
num_cores);
return -ENODEV;
}
if (num_cores != 1 && data->is_single_core) {
dev_err(dev, "SoC supports only single core R5 but num_cores is set to %d\n",
num_cores);
return -ENODEV;
}
platform_set_drvdata(pdev, cluster);
ret = devm_of_platform_populate(dev);
if (ret) {
dev_err(dev, "devm_of_platform_populate failed, ret = %d\n",
ret);
return ret;
}
ret = k3_r5_cluster_of_init(pdev);
if (ret) {
dev_err(dev, "k3_r5_cluster_of_init failed, ret = %d\n", ret);
return ret;
}
ret = devm_add_action_or_reset(dev, k3_r5_cluster_of_exit, pdev);
if (ret)
return ret;
ret = k3_r5_cluster_rproc_init(pdev);
if (ret) {
dev_err(dev, "k3_r5_cluster_rproc_init failed, ret = %d\n",
ret);
return ret;
}
ret = devm_add_action_or_reset(dev, k3_r5_cluster_rproc_exit, pdev);
if (ret)
return ret;
return 0;
}
static const struct k3_r5_soc_data am65_j721e_soc_data = {
.tcm_is_double = false,
.tcm_ecc_autoinit = false,
.single_cpu_mode = false,
.is_single_core = false,
};
static const struct k3_r5_soc_data j7200_j721s2_soc_data = {
.tcm_is_double = true,
.tcm_ecc_autoinit = true,
.single_cpu_mode = false,
.is_single_core = false,
};
static const struct k3_r5_soc_data am64_soc_data = {
.tcm_is_double = true,
.tcm_ecc_autoinit = true,
.single_cpu_mode = true,
.is_single_core = false,
};
static const struct k3_r5_soc_data am62_soc_data = {
.tcm_is_double = false,
.tcm_ecc_autoinit = true,
.single_cpu_mode = false,
.is_single_core = true,
};
static const struct of_device_id k3_r5_of_match[] = {
{ .compatible = "ti,am654-r5fss", .data = &am65_j721e_soc_data, },
{ .compatible = "ti,j721e-r5fss", .data = &am65_j721e_soc_data, },
{ .compatible = "ti,j7200-r5fss", .data = &j7200_j721s2_soc_data, },
{ .compatible = "ti,am64-r5fss", .data = &am64_soc_data, },
{ .compatible = "ti,am62-r5fss", .data = &am62_soc_data, },
{ .compatible = "ti,j721s2-r5fss", .data = &j7200_j721s2_soc_data, },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, k3_r5_of_match);
static struct platform_driver k3_r5_rproc_driver = {
.probe = k3_r5_probe,
.driver = {
.name = "k3_r5_rproc",
.of_match_table = k3_r5_of_match,
},
};
module_platform_driver(k3_r5_rproc_driver);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("TI K3 R5F remote processor driver");
MODULE_AUTHOR("Suman Anna <[email protected]>");
|
linux-master
|
drivers/remoteproc/ti_k3_r5_remoteproc.c
|
// SPDX-License-Identifier: GPL-2.0
//
// Copyright (c) 2019 MediaTek Inc.
#include <asm/barrier.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/time64.h>
#include <linux/remoteproc/mtk_scp.h>
#include "mtk_common.h"
#define SCP_TIMEOUT_US (2000 * USEC_PER_MSEC)
/**
* scp_ipi_register() - register an ipi function
*
* @scp: mtk_scp structure
* @id: IPI ID
* @handler: IPI handler
* @priv: private data for IPI handler
*
* Register an ipi function to receive ipi interrupt from SCP.
*
* Return: 0 if ipi registers successfully, -error on error.
*/
int scp_ipi_register(struct mtk_scp *scp,
u32 id,
scp_ipi_handler_t handler,
void *priv)
{
if (!scp)
return -EPROBE_DEFER;
if (WARN_ON(id >= SCP_IPI_MAX) || WARN_ON(handler == NULL))
return -EINVAL;
scp_ipi_lock(scp, id);
scp->ipi_desc[id].handler = handler;
scp->ipi_desc[id].priv = priv;
scp_ipi_unlock(scp, id);
return 0;
}
EXPORT_SYMBOL_GPL(scp_ipi_register);
/**
* scp_ipi_unregister() - unregister an ipi function
*
* @scp: mtk_scp structure
* @id: IPI ID
*
* Unregister an ipi function to receive ipi interrupt from SCP.
*/
void scp_ipi_unregister(struct mtk_scp *scp, u32 id)
{
if (!scp)
return;
if (WARN_ON(id >= SCP_IPI_MAX))
return;
scp_ipi_lock(scp, id);
scp->ipi_desc[id].handler = NULL;
scp->ipi_desc[id].priv = NULL;
scp_ipi_unlock(scp, id);
}
EXPORT_SYMBOL_GPL(scp_ipi_unregister);
/*
* scp_memcpy_aligned() - Copy src to dst, where dst is in SCP SRAM region.
*
* @dst: Pointer to the destination buffer, should be in SCP SRAM region.
* @src: Pointer to the source buffer.
* @len: Length of the source buffer to be copied.
*
* Since AP access of SCP SRAM don't support byte write, this always write a
* full word at a time, and may cause some extra bytes to be written at the
* beginning & ending of dst.
*/
void scp_memcpy_aligned(void __iomem *dst, const void *src, unsigned int len)
{
void __iomem *ptr;
u32 val;
unsigned int i = 0, remain;
if (!IS_ALIGNED((unsigned long)dst, 4)) {
ptr = (void __iomem *)ALIGN_DOWN((unsigned long)dst, 4);
i = 4 - (dst - ptr);
val = readl_relaxed(ptr);
memcpy((u8 *)&val + (4 - i), src, i);
writel_relaxed(val, ptr);
}
__iowrite32_copy(dst + i, src + i, (len - i) / 4);
remain = (len - i) % 4;
if (remain > 0) {
val = readl_relaxed(dst + len - remain);
memcpy(&val, src + len - remain, remain);
writel_relaxed(val, dst + len - remain);
}
}
EXPORT_SYMBOL_GPL(scp_memcpy_aligned);
/**
* scp_ipi_lock() - Lock before operations of an IPI ID
*
* @scp: mtk_scp structure
* @id: IPI ID
*
* Note: This should not be used by drivers other than mtk_scp.
*/
void scp_ipi_lock(struct mtk_scp *scp, u32 id)
{
if (WARN_ON(id >= SCP_IPI_MAX))
return;
mutex_lock(&scp->ipi_desc[id].lock);
}
EXPORT_SYMBOL_GPL(scp_ipi_lock);
/**
* scp_ipi_unlock() - Unlock after operations of an IPI ID
*
* @scp: mtk_scp structure
* @id: IPI ID
*
* Note: This should not be used by drivers other than mtk_scp.
*/
void scp_ipi_unlock(struct mtk_scp *scp, u32 id)
{
if (WARN_ON(id >= SCP_IPI_MAX))
return;
mutex_unlock(&scp->ipi_desc[id].lock);
}
EXPORT_SYMBOL_GPL(scp_ipi_unlock);
/**
* scp_ipi_send() - send data from AP to scp.
*
* @scp: mtk_scp structure
* @id: IPI ID
* @buf: the data buffer
* @len: the data buffer length
* @wait: number of msecs to wait for ack. 0 to skip waiting.
*
* This function is thread-safe. When this function returns,
* SCP has received the data and starts the processing.
* When the processing completes, IPI handler registered
* by scp_ipi_register will be called in interrupt context.
*
* Return: 0 if sending data successfully, -error on error.
**/
int scp_ipi_send(struct mtk_scp *scp, u32 id, void *buf, unsigned int len,
unsigned int wait)
{
struct mtk_share_obj __iomem *send_obj = scp->send_buf;
u32 val;
int ret;
if (WARN_ON(id <= SCP_IPI_INIT) || WARN_ON(id >= SCP_IPI_MAX) ||
WARN_ON(id == SCP_IPI_NS_SERVICE) ||
WARN_ON(len > sizeof(send_obj->share_buf)) || WARN_ON(!buf))
return -EINVAL;
ret = clk_prepare_enable(scp->clk);
if (ret) {
dev_err(scp->dev, "failed to enable clock\n");
return ret;
}
mutex_lock(&scp->send_lock);
/* Wait until SCP receives the last command */
ret = readl_poll_timeout_atomic(scp->reg_base + scp->data->host_to_scp_reg,
val, !val, 0, SCP_TIMEOUT_US);
if (ret) {
dev_err(scp->dev, "%s: IPI timeout!\n", __func__);
goto unlock_mutex;
}
scp_memcpy_aligned(send_obj->share_buf, buf, len);
writel(len, &send_obj->len);
writel(id, &send_obj->id);
scp->ipi_id_ack[id] = false;
/* send the command to SCP */
writel(scp->data->host_to_scp_int_bit,
scp->reg_base + scp->data->host_to_scp_reg);
if (wait) {
/* wait for SCP's ACK */
ret = wait_event_timeout(scp->ack_wq,
scp->ipi_id_ack[id],
msecs_to_jiffies(wait));
scp->ipi_id_ack[id] = false;
if (WARN(!ret, "scp ipi %d ack time out !", id))
ret = -EIO;
else
ret = 0;
}
unlock_mutex:
mutex_unlock(&scp->send_lock);
clk_disable_unprepare(scp->clk);
return ret;
}
EXPORT_SYMBOL_GPL(scp_ipi_send);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("MediaTek scp IPI interface");
|
linux-master
|
drivers/remoteproc/mtk_scp_ipi.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Qualcomm ADSP/SLPI Peripheral Image Loader for MSM8974 and MSM8996
*
* Copyright (C) 2016 Linaro Ltd
* Copyright (C) 2014 Sony Mobile Communications AB
* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/firmware.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
#include <linux/firmware/qcom/qcom_scm.h>
#include <linux/regulator/consumer.h>
#include <linux/remoteproc.h>
#include <linux/soc/qcom/mdt_loader.h>
#include <linux/soc/qcom/smem.h>
#include <linux/soc/qcom/smem_state.h>
#include "qcom_common.h"
#include "qcom_pil_info.h"
#include "qcom_q6v5.h"
#include "remoteproc_internal.h"
#define ADSP_DECRYPT_SHUTDOWN_DELAY_MS 100
struct adsp_data {
int crash_reason_smem;
const char *firmware_name;
const char *dtb_firmware_name;
int pas_id;
int dtb_pas_id;
unsigned int minidump_id;
bool auto_boot;
bool decrypt_shutdown;
char **proxy_pd_names;
const char *load_state;
const char *ssr_name;
const char *sysmon_name;
int ssctl_id;
int region_assign_idx;
};
struct qcom_adsp {
struct device *dev;
struct rproc *rproc;
struct qcom_q6v5 q6v5;
struct clk *xo;
struct clk *aggre2_clk;
struct regulator *cx_supply;
struct regulator *px_supply;
struct device *proxy_pds[3];
int proxy_pd_count;
const char *dtb_firmware_name;
int pas_id;
int dtb_pas_id;
unsigned int minidump_id;
int crash_reason_smem;
bool decrypt_shutdown;
const char *info_name;
const struct firmware *firmware;
const struct firmware *dtb_firmware;
struct completion start_done;
struct completion stop_done;
phys_addr_t mem_phys;
phys_addr_t dtb_mem_phys;
phys_addr_t mem_reloc;
phys_addr_t dtb_mem_reloc;
phys_addr_t region_assign_phys;
void *mem_region;
void *dtb_mem_region;
size_t mem_size;
size_t dtb_mem_size;
size_t region_assign_size;
int region_assign_idx;
u64 region_assign_perms;
struct qcom_rproc_glink glink_subdev;
struct qcom_rproc_subdev smd_subdev;
struct qcom_rproc_ssr ssr_subdev;
struct qcom_sysmon *sysmon;
struct qcom_scm_pas_metadata pas_metadata;
struct qcom_scm_pas_metadata dtb_pas_metadata;
};
static void adsp_segment_dump(struct rproc *rproc, struct rproc_dump_segment *segment,
void *dest, size_t offset, size_t size)
{
struct qcom_adsp *adsp = rproc->priv;
int total_offset;
total_offset = segment->da + segment->offset + offset - adsp->mem_phys;
if (total_offset < 0 || total_offset + size > adsp->mem_size) {
dev_err(adsp->dev,
"invalid copy request for segment %pad with offset %zu and size %zu)\n",
&segment->da, offset, size);
memset(dest, 0xff, size);
return;
}
memcpy_fromio(dest, adsp->mem_region + total_offset, size);
}
static void adsp_minidump(struct rproc *rproc)
{
struct qcom_adsp *adsp = rproc->priv;
if (rproc->dump_conf == RPROC_COREDUMP_DISABLED)
return;
qcom_minidump(rproc, adsp->minidump_id, adsp_segment_dump);
}
static int adsp_pds_enable(struct qcom_adsp *adsp, struct device **pds,
size_t pd_count)
{
int ret;
int i;
for (i = 0; i < pd_count; i++) {
dev_pm_genpd_set_performance_state(pds[i], INT_MAX);
ret = pm_runtime_get_sync(pds[i]);
if (ret < 0) {
pm_runtime_put_noidle(pds[i]);
dev_pm_genpd_set_performance_state(pds[i], 0);
goto unroll_pd_votes;
}
}
return 0;
unroll_pd_votes:
for (i--; i >= 0; i--) {
dev_pm_genpd_set_performance_state(pds[i], 0);
pm_runtime_put(pds[i]);
}
return ret;
};
static void adsp_pds_disable(struct qcom_adsp *adsp, struct device **pds,
size_t pd_count)
{
int i;
for (i = 0; i < pd_count; i++) {
dev_pm_genpd_set_performance_state(pds[i], 0);
pm_runtime_put(pds[i]);
}
}
static int adsp_shutdown_poll_decrypt(struct qcom_adsp *adsp)
{
unsigned int retry_num = 50;
int ret;
do {
msleep(ADSP_DECRYPT_SHUTDOWN_DELAY_MS);
ret = qcom_scm_pas_shutdown(adsp->pas_id);
} while (ret == -EINVAL && --retry_num);
return ret;
}
static int adsp_unprepare(struct rproc *rproc)
{
struct qcom_adsp *adsp = rproc->priv;
/*
* adsp_load() did pass pas_metadata to the SCM driver for storing
* metadata context. It might have been released already if
* auth_and_reset() was successful, but in other cases clean it up
* here.
*/
qcom_scm_pas_metadata_release(&adsp->pas_metadata);
if (adsp->dtb_pas_id)
qcom_scm_pas_metadata_release(&adsp->dtb_pas_metadata);
return 0;
}
static int adsp_load(struct rproc *rproc, const struct firmware *fw)
{
struct qcom_adsp *adsp = rproc->priv;
int ret;
/* Store firmware handle to be used in adsp_start() */
adsp->firmware = fw;
if (adsp->dtb_pas_id) {
ret = request_firmware(&adsp->dtb_firmware, adsp->dtb_firmware_name, adsp->dev);
if (ret) {
dev_err(adsp->dev, "request_firmware failed for %s: %d\n",
adsp->dtb_firmware_name, ret);
return ret;
}
ret = qcom_mdt_pas_init(adsp->dev, adsp->dtb_firmware, adsp->dtb_firmware_name,
adsp->dtb_pas_id, adsp->dtb_mem_phys,
&adsp->dtb_pas_metadata);
if (ret)
goto release_dtb_firmware;
ret = qcom_mdt_load_no_init(adsp->dev, adsp->dtb_firmware, adsp->dtb_firmware_name,
adsp->dtb_pas_id, adsp->dtb_mem_region,
adsp->dtb_mem_phys, adsp->dtb_mem_size,
&adsp->dtb_mem_reloc);
if (ret)
goto release_dtb_metadata;
}
return 0;
release_dtb_metadata:
qcom_scm_pas_metadata_release(&adsp->dtb_pas_metadata);
release_dtb_firmware:
release_firmware(adsp->dtb_firmware);
return ret;
}
static int adsp_start(struct rproc *rproc)
{
struct qcom_adsp *adsp = rproc->priv;
int ret;
ret = qcom_q6v5_prepare(&adsp->q6v5);
if (ret)
return ret;
ret = adsp_pds_enable(adsp, adsp->proxy_pds, adsp->proxy_pd_count);
if (ret < 0)
goto disable_irqs;
ret = clk_prepare_enable(adsp->xo);
if (ret)
goto disable_proxy_pds;
ret = clk_prepare_enable(adsp->aggre2_clk);
if (ret)
goto disable_xo_clk;
if (adsp->cx_supply) {
ret = regulator_enable(adsp->cx_supply);
if (ret)
goto disable_aggre2_clk;
}
if (adsp->px_supply) {
ret = regulator_enable(adsp->px_supply);
if (ret)
goto disable_cx_supply;
}
if (adsp->dtb_pas_id) {
ret = qcom_scm_pas_auth_and_reset(adsp->dtb_pas_id);
if (ret) {
dev_err(adsp->dev,
"failed to authenticate dtb image and release reset\n");
goto disable_px_supply;
}
}
ret = qcom_mdt_pas_init(adsp->dev, adsp->firmware, rproc->firmware, adsp->pas_id,
adsp->mem_phys, &adsp->pas_metadata);
if (ret)
goto disable_px_supply;
ret = qcom_mdt_load_no_init(adsp->dev, adsp->firmware, rproc->firmware, adsp->pas_id,
adsp->mem_region, adsp->mem_phys, adsp->mem_size,
&adsp->mem_reloc);
if (ret)
goto release_pas_metadata;
qcom_pil_info_store(adsp->info_name, adsp->mem_phys, adsp->mem_size);
ret = qcom_scm_pas_auth_and_reset(adsp->pas_id);
if (ret) {
dev_err(adsp->dev,
"failed to authenticate image and release reset\n");
goto release_pas_metadata;
}
ret = qcom_q6v5_wait_for_start(&adsp->q6v5, msecs_to_jiffies(5000));
if (ret == -ETIMEDOUT) {
dev_err(adsp->dev, "start timed out\n");
qcom_scm_pas_shutdown(adsp->pas_id);
goto release_pas_metadata;
}
qcom_scm_pas_metadata_release(&adsp->pas_metadata);
if (adsp->dtb_pas_id)
qcom_scm_pas_metadata_release(&adsp->dtb_pas_metadata);
/* Remove pointer to the loaded firmware, only valid in adsp_load() & adsp_start() */
adsp->firmware = NULL;
return 0;
release_pas_metadata:
qcom_scm_pas_metadata_release(&adsp->pas_metadata);
if (adsp->dtb_pas_id)
qcom_scm_pas_metadata_release(&adsp->dtb_pas_metadata);
disable_px_supply:
if (adsp->px_supply)
regulator_disable(adsp->px_supply);
disable_cx_supply:
if (adsp->cx_supply)
regulator_disable(adsp->cx_supply);
disable_aggre2_clk:
clk_disable_unprepare(adsp->aggre2_clk);
disable_xo_clk:
clk_disable_unprepare(adsp->xo);
disable_proxy_pds:
adsp_pds_disable(adsp, adsp->proxy_pds, adsp->proxy_pd_count);
disable_irqs:
qcom_q6v5_unprepare(&adsp->q6v5);
/* Remove pointer to the loaded firmware, only valid in adsp_load() & adsp_start() */
adsp->firmware = NULL;
return ret;
}
static void qcom_pas_handover(struct qcom_q6v5 *q6v5)
{
struct qcom_adsp *adsp = container_of(q6v5, struct qcom_adsp, q6v5);
if (adsp->px_supply)
regulator_disable(adsp->px_supply);
if (adsp->cx_supply)
regulator_disable(adsp->cx_supply);
clk_disable_unprepare(adsp->aggre2_clk);
clk_disable_unprepare(adsp->xo);
adsp_pds_disable(adsp, adsp->proxy_pds, adsp->proxy_pd_count);
}
static int adsp_stop(struct rproc *rproc)
{
struct qcom_adsp *adsp = rproc->priv;
int handover;
int ret;
ret = qcom_q6v5_request_stop(&adsp->q6v5, adsp->sysmon);
if (ret == -ETIMEDOUT)
dev_err(adsp->dev, "timed out on wait\n");
ret = qcom_scm_pas_shutdown(adsp->pas_id);
if (ret && adsp->decrypt_shutdown)
ret = adsp_shutdown_poll_decrypt(adsp);
if (ret)
dev_err(adsp->dev, "failed to shutdown: %d\n", ret);
if (adsp->dtb_pas_id) {
ret = qcom_scm_pas_shutdown(adsp->dtb_pas_id);
if (ret)
dev_err(adsp->dev, "failed to shutdown dtb: %d\n", ret);
}
handover = qcom_q6v5_unprepare(&adsp->q6v5);
if (handover)
qcom_pas_handover(&adsp->q6v5);
return ret;
}
static void *adsp_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
{
struct qcom_adsp *adsp = rproc->priv;
int offset;
offset = da - adsp->mem_reloc;
if (offset < 0 || offset + len > adsp->mem_size)
return NULL;
if (is_iomem)
*is_iomem = true;
return adsp->mem_region + offset;
}
static unsigned long adsp_panic(struct rproc *rproc)
{
struct qcom_adsp *adsp = rproc->priv;
return qcom_q6v5_panic(&adsp->q6v5);
}
static const struct rproc_ops adsp_ops = {
.unprepare = adsp_unprepare,
.start = adsp_start,
.stop = adsp_stop,
.da_to_va = adsp_da_to_va,
.parse_fw = qcom_register_dump_segments,
.load = adsp_load,
.panic = adsp_panic,
};
static const struct rproc_ops adsp_minidump_ops = {
.unprepare = adsp_unprepare,
.start = adsp_start,
.stop = adsp_stop,
.da_to_va = adsp_da_to_va,
.parse_fw = qcom_register_dump_segments,
.load = adsp_load,
.panic = adsp_panic,
.coredump = adsp_minidump,
};
static int adsp_init_clock(struct qcom_adsp *adsp)
{
int ret;
adsp->xo = devm_clk_get(adsp->dev, "xo");
if (IS_ERR(adsp->xo)) {
ret = PTR_ERR(adsp->xo);
if (ret != -EPROBE_DEFER)
dev_err(adsp->dev, "failed to get xo clock");
return ret;
}
adsp->aggre2_clk = devm_clk_get_optional(adsp->dev, "aggre2");
if (IS_ERR(adsp->aggre2_clk)) {
ret = PTR_ERR(adsp->aggre2_clk);
if (ret != -EPROBE_DEFER)
dev_err(adsp->dev,
"failed to get aggre2 clock");
return ret;
}
return 0;
}
static int adsp_init_regulator(struct qcom_adsp *adsp)
{
adsp->cx_supply = devm_regulator_get_optional(adsp->dev, "cx");
if (IS_ERR(adsp->cx_supply)) {
if (PTR_ERR(adsp->cx_supply) == -ENODEV)
adsp->cx_supply = NULL;
else
return PTR_ERR(adsp->cx_supply);
}
if (adsp->cx_supply)
regulator_set_load(adsp->cx_supply, 100000);
adsp->px_supply = devm_regulator_get_optional(adsp->dev, "px");
if (IS_ERR(adsp->px_supply)) {
if (PTR_ERR(adsp->px_supply) == -ENODEV)
adsp->px_supply = NULL;
else
return PTR_ERR(adsp->px_supply);
}
return 0;
}
static int adsp_pds_attach(struct device *dev, struct device **devs,
char **pd_names)
{
size_t num_pds = 0;
int ret;
int i;
if (!pd_names)
return 0;
/* Handle single power domain */
if (dev->pm_domain) {
devs[0] = dev;
pm_runtime_enable(dev);
return 1;
}
while (pd_names[num_pds])
num_pds++;
for (i = 0; i < num_pds; i++) {
devs[i] = dev_pm_domain_attach_by_name(dev, pd_names[i]);
if (IS_ERR_OR_NULL(devs[i])) {
ret = PTR_ERR(devs[i]) ? : -ENODATA;
goto unroll_attach;
}
}
return num_pds;
unroll_attach:
for (i--; i >= 0; i--)
dev_pm_domain_detach(devs[i], false);
return ret;
};
static void adsp_pds_detach(struct qcom_adsp *adsp, struct device **pds,
size_t pd_count)
{
struct device *dev = adsp->dev;
int i;
/* Handle single power domain */
if (dev->pm_domain && pd_count) {
pm_runtime_disable(dev);
return;
}
for (i = 0; i < pd_count; i++)
dev_pm_domain_detach(pds[i], false);
}
static int adsp_alloc_memory_region(struct qcom_adsp *adsp)
{
struct reserved_mem *rmem;
struct device_node *node;
node = of_parse_phandle(adsp->dev->of_node, "memory-region", 0);
if (!node) {
dev_err(adsp->dev, "no memory-region specified\n");
return -EINVAL;
}
rmem = of_reserved_mem_lookup(node);
of_node_put(node);
if (!rmem) {
dev_err(adsp->dev, "unable to resolve memory-region\n");
return -EINVAL;
}
adsp->mem_phys = adsp->mem_reloc = rmem->base;
adsp->mem_size = rmem->size;
adsp->mem_region = devm_ioremap_wc(adsp->dev, adsp->mem_phys, adsp->mem_size);
if (!adsp->mem_region) {
dev_err(adsp->dev, "unable to map memory region: %pa+%zx\n",
&rmem->base, adsp->mem_size);
return -EBUSY;
}
if (!adsp->dtb_pas_id)
return 0;
node = of_parse_phandle(adsp->dev->of_node, "memory-region", 1);
if (!node) {
dev_err(adsp->dev, "no dtb memory-region specified\n");
return -EINVAL;
}
rmem = of_reserved_mem_lookup(node);
of_node_put(node);
if (!rmem) {
dev_err(adsp->dev, "unable to resolve dtb memory-region\n");
return -EINVAL;
}
adsp->dtb_mem_phys = adsp->dtb_mem_reloc = rmem->base;
adsp->dtb_mem_size = rmem->size;
adsp->dtb_mem_region = devm_ioremap_wc(adsp->dev, adsp->dtb_mem_phys, adsp->dtb_mem_size);
if (!adsp->dtb_mem_region) {
dev_err(adsp->dev, "unable to map dtb memory region: %pa+%zx\n",
&rmem->base, adsp->dtb_mem_size);
return -EBUSY;
}
return 0;
}
static int adsp_assign_memory_region(struct qcom_adsp *adsp)
{
struct reserved_mem *rmem = NULL;
struct qcom_scm_vmperm perm;
struct device_node *node;
int ret;
if (!adsp->region_assign_idx)
return 0;
node = of_parse_phandle(adsp->dev->of_node, "memory-region", adsp->region_assign_idx);
if (node)
rmem = of_reserved_mem_lookup(node);
of_node_put(node);
if (!rmem) {
dev_err(adsp->dev, "unable to resolve shareable memory-region\n");
return -EINVAL;
}
perm.vmid = QCOM_SCM_VMID_MSS_MSA;
perm.perm = QCOM_SCM_PERM_RW;
adsp->region_assign_phys = rmem->base;
adsp->region_assign_size = rmem->size;
adsp->region_assign_perms = BIT(QCOM_SCM_VMID_HLOS);
ret = qcom_scm_assign_mem(adsp->region_assign_phys,
adsp->region_assign_size,
&adsp->region_assign_perms,
&perm, 1);
if (ret < 0) {
dev_err(adsp->dev, "assign memory failed\n");
return ret;
}
return 0;
}
static void adsp_unassign_memory_region(struct qcom_adsp *adsp)
{
struct qcom_scm_vmperm perm;
int ret;
if (!adsp->region_assign_idx)
return;
perm.vmid = QCOM_SCM_VMID_HLOS;
perm.perm = QCOM_SCM_PERM_RW;
ret = qcom_scm_assign_mem(adsp->region_assign_phys,
adsp->region_assign_size,
&adsp->region_assign_perms,
&perm, 1);
if (ret < 0)
dev_err(adsp->dev, "unassign memory failed\n");
}
static int adsp_probe(struct platform_device *pdev)
{
const struct adsp_data *desc;
struct qcom_adsp *adsp;
struct rproc *rproc;
const char *fw_name, *dtb_fw_name = NULL;
const struct rproc_ops *ops = &adsp_ops;
int ret;
desc = of_device_get_match_data(&pdev->dev);
if (!desc)
return -EINVAL;
if (!qcom_scm_is_available())
return -EPROBE_DEFER;
fw_name = desc->firmware_name;
ret = of_property_read_string(pdev->dev.of_node, "firmware-name",
&fw_name);
if (ret < 0 && ret != -EINVAL)
return ret;
if (desc->dtb_firmware_name) {
dtb_fw_name = desc->dtb_firmware_name;
ret = of_property_read_string_index(pdev->dev.of_node, "firmware-name", 1,
&dtb_fw_name);
if (ret < 0 && ret != -EINVAL)
return ret;
}
if (desc->minidump_id)
ops = &adsp_minidump_ops;
rproc = rproc_alloc(&pdev->dev, pdev->name, ops, fw_name, sizeof(*adsp));
if (!rproc) {
dev_err(&pdev->dev, "unable to allocate remoteproc\n");
return -ENOMEM;
}
rproc->auto_boot = desc->auto_boot;
rproc_coredump_set_elf_info(rproc, ELFCLASS32, EM_NONE);
adsp = rproc->priv;
adsp->dev = &pdev->dev;
adsp->rproc = rproc;
adsp->minidump_id = desc->minidump_id;
adsp->pas_id = desc->pas_id;
adsp->info_name = desc->sysmon_name;
adsp->decrypt_shutdown = desc->decrypt_shutdown;
adsp->region_assign_idx = desc->region_assign_idx;
if (dtb_fw_name) {
adsp->dtb_firmware_name = dtb_fw_name;
adsp->dtb_pas_id = desc->dtb_pas_id;
}
platform_set_drvdata(pdev, adsp);
ret = device_init_wakeup(adsp->dev, true);
if (ret)
goto free_rproc;
ret = adsp_alloc_memory_region(adsp);
if (ret)
goto free_rproc;
ret = adsp_assign_memory_region(adsp);
if (ret)
goto free_rproc;
ret = adsp_init_clock(adsp);
if (ret)
goto free_rproc;
ret = adsp_init_regulator(adsp);
if (ret)
goto free_rproc;
ret = adsp_pds_attach(&pdev->dev, adsp->proxy_pds,
desc->proxy_pd_names);
if (ret < 0)
goto free_rproc;
adsp->proxy_pd_count = ret;
ret = qcom_q6v5_init(&adsp->q6v5, pdev, rproc, desc->crash_reason_smem, desc->load_state,
qcom_pas_handover);
if (ret)
goto detach_proxy_pds;
qcom_add_glink_subdev(rproc, &adsp->glink_subdev, desc->ssr_name);
qcom_add_smd_subdev(rproc, &adsp->smd_subdev);
adsp->sysmon = qcom_add_sysmon_subdev(rproc,
desc->sysmon_name,
desc->ssctl_id);
if (IS_ERR(adsp->sysmon)) {
ret = PTR_ERR(adsp->sysmon);
goto detach_proxy_pds;
}
qcom_add_ssr_subdev(rproc, &adsp->ssr_subdev, desc->ssr_name);
ret = rproc_add(rproc);
if (ret)
goto detach_proxy_pds;
return 0;
detach_proxy_pds:
adsp_pds_detach(adsp, adsp->proxy_pds, adsp->proxy_pd_count);
free_rproc:
device_init_wakeup(adsp->dev, false);
rproc_free(rproc);
return ret;
}
static void adsp_remove(struct platform_device *pdev)
{
struct qcom_adsp *adsp = platform_get_drvdata(pdev);
rproc_del(adsp->rproc);
qcom_q6v5_deinit(&adsp->q6v5);
adsp_unassign_memory_region(adsp);
qcom_remove_glink_subdev(adsp->rproc, &adsp->glink_subdev);
qcom_remove_sysmon_subdev(adsp->sysmon);
qcom_remove_smd_subdev(adsp->rproc, &adsp->smd_subdev);
qcom_remove_ssr_subdev(adsp->rproc, &adsp->ssr_subdev);
adsp_pds_detach(adsp, adsp->proxy_pds, adsp->proxy_pd_count);
device_init_wakeup(adsp->dev, false);
rproc_free(adsp->rproc);
}
static const struct adsp_data adsp_resource_init = {
.crash_reason_smem = 423,
.firmware_name = "adsp.mdt",
.pas_id = 1,
.auto_boot = true,
.ssr_name = "lpass",
.sysmon_name = "adsp",
.ssctl_id = 0x14,
};
static const struct adsp_data sdm845_adsp_resource_init = {
.crash_reason_smem = 423,
.firmware_name = "adsp.mdt",
.pas_id = 1,
.auto_boot = true,
.load_state = "adsp",
.ssr_name = "lpass",
.sysmon_name = "adsp",
.ssctl_id = 0x14,
};
static const struct adsp_data sm6350_adsp_resource = {
.crash_reason_smem = 423,
.firmware_name = "adsp.mdt",
.pas_id = 1,
.auto_boot = true,
.proxy_pd_names = (char*[]){
"lcx",
"lmx",
NULL
},
.load_state = "adsp",
.ssr_name = "lpass",
.sysmon_name = "adsp",
.ssctl_id = 0x14,
};
static const struct adsp_data sm8150_adsp_resource = {
.crash_reason_smem = 423,
.firmware_name = "adsp.mdt",
.pas_id = 1,
.auto_boot = true,
.proxy_pd_names = (char*[]){
"cx",
NULL
},
.load_state = "adsp",
.ssr_name = "lpass",
.sysmon_name = "adsp",
.ssctl_id = 0x14,
};
static const struct adsp_data sm8250_adsp_resource = {
.crash_reason_smem = 423,
.firmware_name = "adsp.mdt",
.pas_id = 1,
.auto_boot = true,
.proxy_pd_names = (char*[]){
"lcx",
"lmx",
NULL
},
.load_state = "adsp",
.ssr_name = "lpass",
.sysmon_name = "adsp",
.ssctl_id = 0x14,
};
static const struct adsp_data sm8350_adsp_resource = {
.crash_reason_smem = 423,
.firmware_name = "adsp.mdt",
.pas_id = 1,
.auto_boot = true,
.proxy_pd_names = (char*[]){
"lcx",
"lmx",
NULL
},
.load_state = "adsp",
.ssr_name = "lpass",
.sysmon_name = "adsp",
.ssctl_id = 0x14,
};
static const struct adsp_data msm8996_adsp_resource = {
.crash_reason_smem = 423,
.firmware_name = "adsp.mdt",
.pas_id = 1,
.auto_boot = true,
.proxy_pd_names = (char*[]){
"cx",
NULL
},
.ssr_name = "lpass",
.sysmon_name = "adsp",
.ssctl_id = 0x14,
};
static const struct adsp_data cdsp_resource_init = {
.crash_reason_smem = 601,
.firmware_name = "cdsp.mdt",
.pas_id = 18,
.auto_boot = true,
.ssr_name = "cdsp",
.sysmon_name = "cdsp",
.ssctl_id = 0x17,
};
static const struct adsp_data sdm845_cdsp_resource_init = {
.crash_reason_smem = 601,
.firmware_name = "cdsp.mdt",
.pas_id = 18,
.auto_boot = true,
.load_state = "cdsp",
.ssr_name = "cdsp",
.sysmon_name = "cdsp",
.ssctl_id = 0x17,
};
static const struct adsp_data sm6350_cdsp_resource = {
.crash_reason_smem = 601,
.firmware_name = "cdsp.mdt",
.pas_id = 18,
.auto_boot = true,
.proxy_pd_names = (char*[]){
"cx",
"mx",
NULL
},
.load_state = "cdsp",
.ssr_name = "cdsp",
.sysmon_name = "cdsp",
.ssctl_id = 0x17,
};
static const struct adsp_data sm8150_cdsp_resource = {
.crash_reason_smem = 601,
.firmware_name = "cdsp.mdt",
.pas_id = 18,
.auto_boot = true,
.proxy_pd_names = (char*[]){
"cx",
NULL
},
.load_state = "cdsp",
.ssr_name = "cdsp",
.sysmon_name = "cdsp",
.ssctl_id = 0x17,
};
static const struct adsp_data sm8250_cdsp_resource = {
.crash_reason_smem = 601,
.firmware_name = "cdsp.mdt",
.pas_id = 18,
.auto_boot = true,
.proxy_pd_names = (char*[]){
"cx",
NULL
},
.load_state = "cdsp",
.ssr_name = "cdsp",
.sysmon_name = "cdsp",
.ssctl_id = 0x17,
};
static const struct adsp_data sc8280xp_nsp0_resource = {
.crash_reason_smem = 601,
.firmware_name = "cdsp.mdt",
.pas_id = 18,
.auto_boot = true,
.proxy_pd_names = (char*[]){
"nsp",
NULL
},
.ssr_name = "cdsp0",
.sysmon_name = "cdsp",
.ssctl_id = 0x17,
};
static const struct adsp_data sc8280xp_nsp1_resource = {
.crash_reason_smem = 633,
.firmware_name = "cdsp.mdt",
.pas_id = 30,
.auto_boot = true,
.proxy_pd_names = (char*[]){
"nsp",
NULL
},
.ssr_name = "cdsp1",
.sysmon_name = "cdsp1",
.ssctl_id = 0x20,
};
static const struct adsp_data sm8350_cdsp_resource = {
.crash_reason_smem = 601,
.firmware_name = "cdsp.mdt",
.pas_id = 18,
.auto_boot = true,
.proxy_pd_names = (char*[]){
"cx",
"mxc",
NULL
},
.load_state = "cdsp",
.ssr_name = "cdsp",
.sysmon_name = "cdsp",
.ssctl_id = 0x17,
};
static const struct adsp_data mpss_resource_init = {
.crash_reason_smem = 421,
.firmware_name = "modem.mdt",
.pas_id = 4,
.minidump_id = 3,
.auto_boot = false,
.proxy_pd_names = (char*[]){
"cx",
"mss",
NULL
},
.load_state = "modem",
.ssr_name = "mpss",
.sysmon_name = "modem",
.ssctl_id = 0x12,
};
static const struct adsp_data sc8180x_mpss_resource = {
.crash_reason_smem = 421,
.firmware_name = "modem.mdt",
.pas_id = 4,
.auto_boot = false,
.proxy_pd_names = (char*[]){
"cx",
NULL
},
.load_state = "modem",
.ssr_name = "mpss",
.sysmon_name = "modem",
.ssctl_id = 0x12,
};
static const struct adsp_data msm8996_slpi_resource_init = {
.crash_reason_smem = 424,
.firmware_name = "slpi.mdt",
.pas_id = 12,
.auto_boot = true,
.proxy_pd_names = (char*[]){
"ssc_cx",
NULL
},
.ssr_name = "dsps",
.sysmon_name = "slpi",
.ssctl_id = 0x16,
};
static const struct adsp_data sdm845_slpi_resource_init = {
.crash_reason_smem = 424,
.firmware_name = "slpi.mdt",
.pas_id = 12,
.auto_boot = true,
.proxy_pd_names = (char*[]){
"lcx",
"lmx",
NULL
},
.load_state = "slpi",
.ssr_name = "dsps",
.sysmon_name = "slpi",
.ssctl_id = 0x16,
};
static const struct adsp_data wcss_resource_init = {
.crash_reason_smem = 421,
.firmware_name = "wcnss.mdt",
.pas_id = 6,
.auto_boot = true,
.ssr_name = "mpss",
.sysmon_name = "wcnss",
.ssctl_id = 0x12,
};
static const struct adsp_data sdx55_mpss_resource = {
.crash_reason_smem = 421,
.firmware_name = "modem.mdt",
.pas_id = 4,
.auto_boot = true,
.proxy_pd_names = (char*[]){
"cx",
"mss",
NULL
},
.ssr_name = "mpss",
.sysmon_name = "modem",
.ssctl_id = 0x22,
};
static const struct adsp_data sm8450_mpss_resource = {
.crash_reason_smem = 421,
.firmware_name = "modem.mdt",
.pas_id = 4,
.minidump_id = 3,
.auto_boot = false,
.decrypt_shutdown = true,
.proxy_pd_names = (char*[]){
"cx",
"mss",
NULL
},
.load_state = "modem",
.ssr_name = "mpss",
.sysmon_name = "modem",
.ssctl_id = 0x12,
};
static const struct adsp_data sm8550_adsp_resource = {
.crash_reason_smem = 423,
.firmware_name = "adsp.mdt",
.dtb_firmware_name = "adsp_dtb.mdt",
.pas_id = 1,
.dtb_pas_id = 0x24,
.minidump_id = 5,
.auto_boot = true,
.proxy_pd_names = (char*[]){
"lcx",
"lmx",
NULL
},
.load_state = "adsp",
.ssr_name = "lpass",
.sysmon_name = "adsp",
.ssctl_id = 0x14,
};
static const struct adsp_data sm8550_cdsp_resource = {
.crash_reason_smem = 601,
.firmware_name = "cdsp.mdt",
.dtb_firmware_name = "cdsp_dtb.mdt",
.pas_id = 18,
.dtb_pas_id = 0x25,
.minidump_id = 7,
.auto_boot = true,
.proxy_pd_names = (char*[]){
"cx",
"mxc",
"nsp",
NULL
},
.load_state = "cdsp",
.ssr_name = "cdsp",
.sysmon_name = "cdsp",
.ssctl_id = 0x17,
};
static const struct adsp_data sm8550_mpss_resource = {
.crash_reason_smem = 421,
.firmware_name = "modem.mdt",
.dtb_firmware_name = "modem_dtb.mdt",
.pas_id = 4,
.dtb_pas_id = 0x26,
.minidump_id = 3,
.auto_boot = false,
.decrypt_shutdown = true,
.proxy_pd_names = (char*[]){
"cx",
"mss",
NULL
},
.load_state = "modem",
.ssr_name = "mpss",
.sysmon_name = "modem",
.ssctl_id = 0x12,
.region_assign_idx = 2,
};
static const struct of_device_id adsp_of_match[] = {
{ .compatible = "qcom,msm8226-adsp-pil", .data = &adsp_resource_init},
{ .compatible = "qcom,msm8953-adsp-pil", .data = &msm8996_adsp_resource},
{ .compatible = "qcom,msm8974-adsp-pil", .data = &adsp_resource_init},
{ .compatible = "qcom,msm8996-adsp-pil", .data = &msm8996_adsp_resource},
{ .compatible = "qcom,msm8996-slpi-pil", .data = &msm8996_slpi_resource_init},
{ .compatible = "qcom,msm8998-adsp-pas", .data = &msm8996_adsp_resource},
{ .compatible = "qcom,msm8998-slpi-pas", .data = &msm8996_slpi_resource_init},
{ .compatible = "qcom,qcs404-adsp-pas", .data = &adsp_resource_init },
{ .compatible = "qcom,qcs404-cdsp-pas", .data = &cdsp_resource_init },
{ .compatible = "qcom,qcs404-wcss-pas", .data = &wcss_resource_init },
{ .compatible = "qcom,sc7180-mpss-pas", .data = &mpss_resource_init},
{ .compatible = "qcom,sc7280-mpss-pas", .data = &mpss_resource_init},
{ .compatible = "qcom,sc8180x-adsp-pas", .data = &sm8150_adsp_resource},
{ .compatible = "qcom,sc8180x-cdsp-pas", .data = &sm8150_cdsp_resource},
{ .compatible = "qcom,sc8180x-mpss-pas", .data = &sc8180x_mpss_resource},
{ .compatible = "qcom,sc8280xp-adsp-pas", .data = &sm8250_adsp_resource},
{ .compatible = "qcom,sc8280xp-nsp0-pas", .data = &sc8280xp_nsp0_resource},
{ .compatible = "qcom,sc8280xp-nsp1-pas", .data = &sc8280xp_nsp1_resource},
{ .compatible = "qcom,sdm660-adsp-pas", .data = &adsp_resource_init},
{ .compatible = "qcom,sdm845-adsp-pas", .data = &sdm845_adsp_resource_init},
{ .compatible = "qcom,sdm845-cdsp-pas", .data = &sdm845_cdsp_resource_init},
{ .compatible = "qcom,sdm845-slpi-pas", .data = &sdm845_slpi_resource_init},
{ .compatible = "qcom,sdx55-mpss-pas", .data = &sdx55_mpss_resource},
{ .compatible = "qcom,sm6115-adsp-pas", .data = &adsp_resource_init},
{ .compatible = "qcom,sm6115-cdsp-pas", .data = &cdsp_resource_init},
{ .compatible = "qcom,sm6115-mpss-pas", .data = &sc8180x_mpss_resource},
{ .compatible = "qcom,sm6350-adsp-pas", .data = &sm6350_adsp_resource},
{ .compatible = "qcom,sm6350-cdsp-pas", .data = &sm6350_cdsp_resource},
{ .compatible = "qcom,sm6350-mpss-pas", .data = &mpss_resource_init},
{ .compatible = "qcom,sm8150-adsp-pas", .data = &sm8150_adsp_resource},
{ .compatible = "qcom,sm8150-cdsp-pas", .data = &sm8150_cdsp_resource},
{ .compatible = "qcom,sm8150-mpss-pas", .data = &mpss_resource_init},
{ .compatible = "qcom,sm8150-slpi-pas", .data = &sdm845_slpi_resource_init},
{ .compatible = "qcom,sm8250-adsp-pas", .data = &sm8250_adsp_resource},
{ .compatible = "qcom,sm8250-cdsp-pas", .data = &sm8250_cdsp_resource},
{ .compatible = "qcom,sm8250-slpi-pas", .data = &sdm845_slpi_resource_init},
{ .compatible = "qcom,sm8350-adsp-pas", .data = &sm8350_adsp_resource},
{ .compatible = "qcom,sm8350-cdsp-pas", .data = &sm8350_cdsp_resource},
{ .compatible = "qcom,sm8350-slpi-pas", .data = &sdm845_slpi_resource_init},
{ .compatible = "qcom,sm8350-mpss-pas", .data = &mpss_resource_init},
{ .compatible = "qcom,sm8450-adsp-pas", .data = &sm8350_adsp_resource},
{ .compatible = "qcom,sm8450-cdsp-pas", .data = &sm8350_cdsp_resource},
{ .compatible = "qcom,sm8450-slpi-pas", .data = &sdm845_slpi_resource_init},
{ .compatible = "qcom,sm8450-mpss-pas", .data = &sm8450_mpss_resource},
{ .compatible = "qcom,sm8550-adsp-pas", .data = &sm8550_adsp_resource},
{ .compatible = "qcom,sm8550-cdsp-pas", .data = &sm8550_cdsp_resource},
{ .compatible = "qcom,sm8550-mpss-pas", .data = &sm8550_mpss_resource},
{ },
};
MODULE_DEVICE_TABLE(of, adsp_of_match);
static struct platform_driver adsp_driver = {
.probe = adsp_probe,
.remove_new = adsp_remove,
.driver = {
.name = "qcom_q6v5_pas",
.of_match_table = adsp_of_match,
},
};
module_platform_driver(adsp_driver);
MODULE_DESCRIPTION("Qualcomm Hexagon v5 Peripheral Authentication Service driver");
MODULE_LICENSE("GPL v2");
|
linux-master
|
drivers/remoteproc/qcom_q6v5_pas.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* TI Keystone DSP remoteproc driver
*
* Copyright (C) 2015-2017 Texas Instruments Incorporated - http://www.ti.com/
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/workqueue.h>
#include <linux/of_address.h>
#include <linux/of_reserved_mem.h>
#include <linux/gpio/consumer.h>
#include <linux/regmap.h>
#include <linux/mfd/syscon.h>
#include <linux/remoteproc.h>
#include <linux/reset.h>
#include "remoteproc_internal.h"
#define KEYSTONE_RPROC_LOCAL_ADDRESS_MASK (SZ_16M - 1)
/**
* struct keystone_rproc_mem - internal memory structure
* @cpu_addr: MPU virtual address of the memory region
* @bus_addr: Bus address used to access the memory region
* @dev_addr: Device address of the memory region from DSP view
* @size: Size of the memory region
*/
struct keystone_rproc_mem {
void __iomem *cpu_addr;
phys_addr_t bus_addr;
u32 dev_addr;
size_t size;
};
/**
* struct keystone_rproc - keystone remote processor driver structure
* @dev: cached device pointer
* @rproc: remoteproc device handle
* @mem: internal memory regions data
* @num_mems: number of internal memory regions
* @dev_ctrl: device control regmap handle
* @reset: reset control handle
* @boot_offset: boot register offset in @dev_ctrl regmap
* @irq_ring: irq entry for vring
* @irq_fault: irq entry for exception
* @kick_gpio: gpio used for virtio kicks
* @workqueue: workqueue for processing virtio interrupts
*/
struct keystone_rproc {
struct device *dev;
struct rproc *rproc;
struct keystone_rproc_mem *mem;
int num_mems;
struct regmap *dev_ctrl;
struct reset_control *reset;
struct gpio_desc *kick_gpio;
u32 boot_offset;
int irq_ring;
int irq_fault;
struct work_struct workqueue;
};
/* Put the DSP processor into reset */
static void keystone_rproc_dsp_reset(struct keystone_rproc *ksproc)
{
reset_control_assert(ksproc->reset);
}
/* Configure the boot address and boot the DSP processor */
static int keystone_rproc_dsp_boot(struct keystone_rproc *ksproc, u32 boot_addr)
{
int ret;
if (boot_addr & (SZ_1K - 1)) {
dev_err(ksproc->dev, "invalid boot address 0x%x, must be aligned on a 1KB boundary\n",
boot_addr);
return -EINVAL;
}
ret = regmap_write(ksproc->dev_ctrl, ksproc->boot_offset, boot_addr);
if (ret) {
dev_err(ksproc->dev, "regmap_write of boot address failed, status = %d\n",
ret);
return ret;
}
reset_control_deassert(ksproc->reset);
return 0;
}
/*
* Process the remoteproc exceptions
*
* The exception reporting on Keystone DSP remote processors is very simple
* compared to the equivalent processors on the OMAP family, it is notified
* through a software-designed specific interrupt source in the IPC interrupt
* generation register.
*
* This function just invokes the rproc_report_crash to report the exception
* to the remoteproc driver core, to trigger a recovery.
*/
static irqreturn_t keystone_rproc_exception_interrupt(int irq, void *dev_id)
{
struct keystone_rproc *ksproc = dev_id;
rproc_report_crash(ksproc->rproc, RPROC_FATAL_ERROR);
return IRQ_HANDLED;
}
/*
* Main virtqueue message workqueue function
*
* This function is executed upon scheduling of the keystone remoteproc
* driver's workqueue. The workqueue is scheduled by the vring ISR handler.
*
* There is no payload message indicating the virtqueue index as is the
* case with mailbox-based implementations on OMAP family. As such, this
* handler processes both the Tx and Rx virtqueue indices on every invocation.
* The rproc_vq_interrupt function can detect if there are new unprocessed
* messages or not (returns IRQ_NONE vs IRQ_HANDLED), but there is no need
* to check for these return values. The index 0 triggering will process all
* pending Rx buffers, and the index 1 triggering will process all newly
* available Tx buffers and will wakeup any potentially blocked senders.
*
* NOTE:
* 1. A payload could be added by using some of the source bits in the
* IPC interrupt generation registers, but this would need additional
* changes to the overall IPC stack, and currently there are no benefits
* of adapting that approach.
* 2. The current logic is based on an inherent design assumption of supporting
* only 2 vrings, but this can be changed if needed.
*/
static void handle_event(struct work_struct *work)
{
struct keystone_rproc *ksproc =
container_of(work, struct keystone_rproc, workqueue);
rproc_vq_interrupt(ksproc->rproc, 0);
rproc_vq_interrupt(ksproc->rproc, 1);
}
/*
* Interrupt handler for processing vring kicks from remote processor
*/
static irqreturn_t keystone_rproc_vring_interrupt(int irq, void *dev_id)
{
struct keystone_rproc *ksproc = dev_id;
schedule_work(&ksproc->workqueue);
return IRQ_HANDLED;
}
/*
* Power up the DSP remote processor.
*
* This function will be invoked only after the firmware for this rproc
* was loaded, parsed successfully, and all of its resource requirements
* were met.
*/
static int keystone_rproc_start(struct rproc *rproc)
{
struct keystone_rproc *ksproc = rproc->priv;
int ret;
INIT_WORK(&ksproc->workqueue, handle_event);
ret = request_irq(ksproc->irq_ring, keystone_rproc_vring_interrupt, 0,
dev_name(ksproc->dev), ksproc);
if (ret) {
dev_err(ksproc->dev, "failed to enable vring interrupt, ret = %d\n",
ret);
goto out;
}
ret = request_irq(ksproc->irq_fault, keystone_rproc_exception_interrupt,
0, dev_name(ksproc->dev), ksproc);
if (ret) {
dev_err(ksproc->dev, "failed to enable exception interrupt, ret = %d\n",
ret);
goto free_vring_irq;
}
ret = keystone_rproc_dsp_boot(ksproc, rproc->bootaddr);
if (ret)
goto free_exc_irq;
return 0;
free_exc_irq:
free_irq(ksproc->irq_fault, ksproc);
free_vring_irq:
free_irq(ksproc->irq_ring, ksproc);
flush_work(&ksproc->workqueue);
out:
return ret;
}
/*
* Stop the DSP remote processor.
*
* This function puts the DSP processor into reset, and finishes processing
* of any pending messages.
*/
static int keystone_rproc_stop(struct rproc *rproc)
{
struct keystone_rproc *ksproc = rproc->priv;
keystone_rproc_dsp_reset(ksproc);
free_irq(ksproc->irq_fault, ksproc);
free_irq(ksproc->irq_ring, ksproc);
flush_work(&ksproc->workqueue);
return 0;
}
/*
* Kick the remote processor to notify about pending unprocessed messages.
* The vqid usage is not used and is inconsequential, as the kick is performed
* through a simulated GPIO (a bit in an IPC interrupt-triggering register),
* the remote processor is expected to process both its Tx and Rx virtqueues.
*/
static void keystone_rproc_kick(struct rproc *rproc, int vqid)
{
struct keystone_rproc *ksproc = rproc->priv;
if (!ksproc->kick_gpio)
return;
gpiod_set_value(ksproc->kick_gpio, 1);
}
/*
* Custom function to translate a DSP device address (internal RAMs only) to a
* kernel virtual address. The DSPs can access their RAMs at either an internal
* address visible only from a DSP, or at the SoC-level bus address. Both these
* addresses need to be looked through for translation. The translated addresses
* can be used either by the remoteproc core for loading (when using kernel
* remoteproc loader), or by any rpmsg bus drivers.
*/
static void *keystone_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
{
struct keystone_rproc *ksproc = rproc->priv;
void __iomem *va = NULL;
phys_addr_t bus_addr;
u32 dev_addr, offset;
size_t size;
int i;
if (len == 0)
return NULL;
for (i = 0; i < ksproc->num_mems; i++) {
bus_addr = ksproc->mem[i].bus_addr;
dev_addr = ksproc->mem[i].dev_addr;
size = ksproc->mem[i].size;
if (da < KEYSTONE_RPROC_LOCAL_ADDRESS_MASK) {
/* handle DSP-view addresses */
if ((da >= dev_addr) &&
((da + len) <= (dev_addr + size))) {
offset = da - dev_addr;
va = ksproc->mem[i].cpu_addr + offset;
break;
}
} else {
/* handle SoC-view addresses */
if ((da >= bus_addr) &&
(da + len) <= (bus_addr + size)) {
offset = da - bus_addr;
va = ksproc->mem[i].cpu_addr + offset;
break;
}
}
}
return (__force void *)va;
}
static const struct rproc_ops keystone_rproc_ops = {
.start = keystone_rproc_start,
.stop = keystone_rproc_stop,
.kick = keystone_rproc_kick,
.da_to_va = keystone_rproc_da_to_va,
};
static int keystone_rproc_of_get_memories(struct platform_device *pdev,
struct keystone_rproc *ksproc)
{
static const char * const mem_names[] = {"l2sram", "l1pram", "l1dram"};
struct device *dev = &pdev->dev;
struct resource *res;
int num_mems = 0;
int i;
num_mems = ARRAY_SIZE(mem_names);
ksproc->mem = devm_kcalloc(ksproc->dev, num_mems,
sizeof(*ksproc->mem), GFP_KERNEL);
if (!ksproc->mem)
return -ENOMEM;
for (i = 0; i < num_mems; i++) {
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
mem_names[i]);
ksproc->mem[i].cpu_addr = devm_ioremap_resource(dev, res);
if (IS_ERR(ksproc->mem[i].cpu_addr)) {
dev_err(dev, "failed to parse and map %s memory\n",
mem_names[i]);
return PTR_ERR(ksproc->mem[i].cpu_addr);
}
ksproc->mem[i].bus_addr = res->start;
ksproc->mem[i].dev_addr =
res->start & KEYSTONE_RPROC_LOCAL_ADDRESS_MASK;
ksproc->mem[i].size = resource_size(res);
/* zero out memories to start in a pristine state */
memset((__force void *)ksproc->mem[i].cpu_addr, 0,
ksproc->mem[i].size);
}
ksproc->num_mems = num_mems;
return 0;
}
static int keystone_rproc_of_get_dev_syscon(struct platform_device *pdev,
struct keystone_rproc *ksproc)
{
struct device_node *np = pdev->dev.of_node;
struct device *dev = &pdev->dev;
int ret;
if (!of_property_read_bool(np, "ti,syscon-dev")) {
dev_err(dev, "ti,syscon-dev property is absent\n");
return -EINVAL;
}
ksproc->dev_ctrl =
syscon_regmap_lookup_by_phandle(np, "ti,syscon-dev");
if (IS_ERR(ksproc->dev_ctrl)) {
ret = PTR_ERR(ksproc->dev_ctrl);
return ret;
}
if (of_property_read_u32_index(np, "ti,syscon-dev", 1,
&ksproc->boot_offset)) {
dev_err(dev, "couldn't read the boot register offset\n");
return -EINVAL;
}
return 0;
}
static int keystone_rproc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct keystone_rproc *ksproc;
struct rproc *rproc;
int dsp_id;
char *fw_name = NULL;
char *template = "keystone-dsp%d-fw";
int name_len = 0;
int ret = 0;
if (!np) {
dev_err(dev, "only DT-based devices are supported\n");
return -ENODEV;
}
dsp_id = of_alias_get_id(np, "rproc");
if (dsp_id < 0) {
dev_warn(dev, "device does not have an alias id\n");
return dsp_id;
}
/* construct a custom default fw name - subject to change in future */
name_len = strlen(template); /* assuming a single digit alias */
fw_name = devm_kzalloc(dev, name_len, GFP_KERNEL);
if (!fw_name)
return -ENOMEM;
snprintf(fw_name, name_len, template, dsp_id);
rproc = rproc_alloc(dev, dev_name(dev), &keystone_rproc_ops, fw_name,
sizeof(*ksproc));
if (!rproc)
return -ENOMEM;
rproc->has_iommu = false;
ksproc = rproc->priv;
ksproc->rproc = rproc;
ksproc->dev = dev;
ret = keystone_rproc_of_get_dev_syscon(pdev, ksproc);
if (ret)
goto free_rproc;
ksproc->reset = devm_reset_control_get_exclusive(dev, NULL);
if (IS_ERR(ksproc->reset)) {
ret = PTR_ERR(ksproc->reset);
goto free_rproc;
}
/* enable clock for accessing DSP internal memories */
pm_runtime_enable(dev);
ret = pm_runtime_resume_and_get(dev);
if (ret < 0) {
dev_err(dev, "failed to enable clock, status = %d\n", ret);
goto disable_rpm;
}
ret = keystone_rproc_of_get_memories(pdev, ksproc);
if (ret)
goto disable_clk;
ksproc->irq_ring = platform_get_irq_byname(pdev, "vring");
if (ksproc->irq_ring < 0) {
ret = ksproc->irq_ring;
goto disable_clk;
}
ksproc->irq_fault = platform_get_irq_byname(pdev, "exception");
if (ksproc->irq_fault < 0) {
ret = ksproc->irq_fault;
goto disable_clk;
}
ksproc->kick_gpio = gpiod_get(dev, "kick", GPIOD_ASIS);
ret = PTR_ERR_OR_ZERO(ksproc->kick_gpio);
if (ret) {
dev_err(dev, "failed to get gpio for virtio kicks, status = %d\n",
ret);
goto disable_clk;
}
if (of_reserved_mem_device_init(dev))
dev_warn(dev, "device does not have specific CMA pool\n");
/* ensure the DSP is in reset before loading firmware */
ret = reset_control_status(ksproc->reset);
if (ret < 0) {
dev_err(dev, "failed to get reset status, status = %d\n", ret);
goto release_mem;
} else if (ret == 0) {
WARN(1, "device is not in reset\n");
keystone_rproc_dsp_reset(ksproc);
}
ret = rproc_add(rproc);
if (ret) {
dev_err(dev, "failed to add register device with remoteproc core, status = %d\n",
ret);
goto release_mem;
}
platform_set_drvdata(pdev, ksproc);
return 0;
release_mem:
of_reserved_mem_device_release(dev);
gpiod_put(ksproc->kick_gpio);
disable_clk:
pm_runtime_put_sync(dev);
disable_rpm:
pm_runtime_disable(dev);
free_rproc:
rproc_free(rproc);
return ret;
}
static void keystone_rproc_remove(struct platform_device *pdev)
{
struct keystone_rproc *ksproc = platform_get_drvdata(pdev);
rproc_del(ksproc->rproc);
gpiod_put(ksproc->kick_gpio);
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
rproc_free(ksproc->rproc);
of_reserved_mem_device_release(&pdev->dev);
}
static const struct of_device_id keystone_rproc_of_match[] = {
{ .compatible = "ti,k2hk-dsp", },
{ .compatible = "ti,k2l-dsp", },
{ .compatible = "ti,k2e-dsp", },
{ .compatible = "ti,k2g-dsp", },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, keystone_rproc_of_match);
static struct platform_driver keystone_rproc_driver = {
.probe = keystone_rproc_probe,
.remove_new = keystone_rproc_remove,
.driver = {
.name = "keystone-rproc",
.of_match_table = keystone_rproc_of_match,
},
};
module_platform_driver(keystone_rproc_driver);
MODULE_AUTHOR("Suman Anna <[email protected]>");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("TI Keystone DSP Remoteproc driver");
|
linux-master
|
drivers/remoteproc/keystone_remoteproc.c
|
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) IoT.bzh 2021
*/
#include <linux/limits.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/remoteproc.h>
#include <linux/reset.h>
#include <linux/soc/renesas/rcar-rst.h>
#include "remoteproc_internal.h"
struct rcar_rproc {
struct reset_control *rst;
};
static int rcar_rproc_mem_alloc(struct rproc *rproc,
struct rproc_mem_entry *mem)
{
struct device *dev = &rproc->dev;
void *va;
dev_dbg(dev, "map memory: %pa+%zx\n", &mem->dma, mem->len);
va = ioremap_wc(mem->dma, mem->len);
if (!va) {
dev_err(dev, "Unable to map memory region: %pa+%zx\n",
&mem->dma, mem->len);
return -ENOMEM;
}
/* Update memory entry va */
mem->va = va;
return 0;
}
static int rcar_rproc_mem_release(struct rproc *rproc,
struct rproc_mem_entry *mem)
{
dev_dbg(&rproc->dev, "unmap memory: %pa\n", &mem->dma);
iounmap(mem->va);
return 0;
}
static int rcar_rproc_prepare(struct rproc *rproc)
{
struct device *dev = rproc->dev.parent;
struct device_node *np = dev->of_node;
struct of_phandle_iterator it;
struct rproc_mem_entry *mem;
struct reserved_mem *rmem;
u32 da;
/* Register associated reserved memory regions */
of_phandle_iterator_init(&it, np, "memory-region", NULL, 0);
while (of_phandle_iterator_next(&it) == 0) {
rmem = of_reserved_mem_lookup(it.node);
if (!rmem) {
of_node_put(it.node);
dev_err(&rproc->dev,
"unable to acquire memory-region\n");
return -EINVAL;
}
if (rmem->base > U32_MAX) {
of_node_put(it.node);
return -EINVAL;
}
/* No need to translate pa to da, R-Car use same map */
da = rmem->base;
mem = rproc_mem_entry_init(dev, NULL,
rmem->base,
rmem->size, da,
rcar_rproc_mem_alloc,
rcar_rproc_mem_release,
it.node->name);
if (!mem) {
of_node_put(it.node);
return -ENOMEM;
}
rproc_add_carveout(rproc, mem);
}
return 0;
}
static int rcar_rproc_parse_fw(struct rproc *rproc, const struct firmware *fw)
{
int ret;
ret = rproc_elf_load_rsc_table(rproc, fw);
if (ret)
dev_info(&rproc->dev, "No resource table in elf\n");
return 0;
}
static int rcar_rproc_start(struct rproc *rproc)
{
struct rcar_rproc *priv = rproc->priv;
int err;
if (!rproc->bootaddr)
return -EINVAL;
err = rcar_rst_set_rproc_boot_addr(rproc->bootaddr);
if (err) {
dev_err(&rproc->dev, "failed to set rproc boot addr\n");
return err;
}
err = reset_control_deassert(priv->rst);
if (err)
dev_err(&rproc->dev, "failed to deassert reset\n");
return err;
}
static int rcar_rproc_stop(struct rproc *rproc)
{
struct rcar_rproc *priv = rproc->priv;
int err;
err = reset_control_assert(priv->rst);
if (err)
dev_err(&rproc->dev, "failed to assert reset\n");
return err;
}
static struct rproc_ops rcar_rproc_ops = {
.prepare = rcar_rproc_prepare,
.start = rcar_rproc_start,
.stop = rcar_rproc_stop,
.load = rproc_elf_load_segments,
.parse_fw = rcar_rproc_parse_fw,
.find_loaded_rsc_table = rproc_elf_find_loaded_rsc_table,
.sanity_check = rproc_elf_sanity_check,
.get_boot_addr = rproc_elf_get_boot_addr,
};
static int rcar_rproc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct rcar_rproc *priv;
struct rproc *rproc;
int ret;
rproc = devm_rproc_alloc(dev, np->name, &rcar_rproc_ops,
NULL, sizeof(*priv));
if (!rproc)
return -ENOMEM;
priv = rproc->priv;
priv->rst = devm_reset_control_get_exclusive(dev, NULL);
if (IS_ERR(priv->rst)) {
ret = PTR_ERR(priv->rst);
dev_err_probe(dev, ret, "fail to acquire rproc reset\n");
return ret;
}
pm_runtime_enable(dev);
ret = pm_runtime_resume_and_get(dev);
if (ret) {
dev_err(dev, "failed to power up\n");
return ret;
}
dev_set_drvdata(dev, rproc);
/* Manually start the rproc */
rproc->auto_boot = false;
ret = devm_rproc_add(dev, rproc);
if (ret) {
dev_err(dev, "rproc_add failed\n");
goto pm_disable;
}
return 0;
pm_disable:
pm_runtime_disable(dev);
return ret;
}
static void rcar_rproc_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
pm_runtime_disable(dev);
}
static const struct of_device_id rcar_rproc_of_match[] = {
{ .compatible = "renesas,rcar-cr7" },
{},
};
MODULE_DEVICE_TABLE(of, rcar_rproc_of_match);
static struct platform_driver rcar_rproc_driver = {
.probe = rcar_rproc_probe,
.remove_new = rcar_rproc_remove,
.driver = {
.name = "rcar-rproc",
.of_match_table = rcar_rproc_of_match,
},
};
module_platform_driver(rcar_rproc_driver);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Renesas R-Car Gen3 remote processor control driver");
MODULE_AUTHOR("Julien Massot <[email protected]>");
|
linux-master
|
drivers/remoteproc/rcar_rproc.c
|
// SPDX-License-Identifier: GPL-2.0
//
// Copyright (c) 2019 MediaTek Inc.
#include <asm/barrier.h>
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
#include <linux/remoteproc.h>
#include <linux/remoteproc/mtk_scp.h>
#include <linux/rpmsg/mtk_rpmsg.h>
#include "mtk_common.h"
#include "remoteproc_internal.h"
#define MAX_CODE_SIZE 0x500000
#define SECTION_NAME_IPI_BUFFER ".ipi_buffer"
/**
* scp_get() - get a reference to SCP.
*
* @pdev: the platform device of the module requesting SCP platform
* device for using SCP API.
*
* Return: Return NULL if failed. otherwise reference to SCP.
**/
struct mtk_scp *scp_get(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *scp_node;
struct platform_device *scp_pdev;
scp_node = of_parse_phandle(dev->of_node, "mediatek,scp", 0);
if (!scp_node) {
dev_err(dev, "can't get SCP node\n");
return NULL;
}
scp_pdev = of_find_device_by_node(scp_node);
of_node_put(scp_node);
if (WARN_ON(!scp_pdev)) {
dev_err(dev, "SCP pdev failed\n");
return NULL;
}
return platform_get_drvdata(scp_pdev);
}
EXPORT_SYMBOL_GPL(scp_get);
/**
* scp_put() - "free" the SCP
*
* @scp: mtk_scp structure from scp_get().
**/
void scp_put(struct mtk_scp *scp)
{
put_device(scp->dev);
}
EXPORT_SYMBOL_GPL(scp_put);
static void scp_wdt_handler(struct mtk_scp *scp, u32 scp_to_host)
{
dev_err(scp->dev, "SCP watchdog timeout! 0x%x", scp_to_host);
rproc_report_crash(scp->rproc, RPROC_WATCHDOG);
}
static void scp_init_ipi_handler(void *data, unsigned int len, void *priv)
{
struct mtk_scp *scp = priv;
struct scp_run *run = data;
scp->run.signaled = run->signaled;
strscpy(scp->run.fw_ver, run->fw_ver, SCP_FW_VER_LEN);
scp->run.dec_capability = run->dec_capability;
scp->run.enc_capability = run->enc_capability;
wake_up_interruptible(&scp->run.wq);
}
static void scp_ipi_handler(struct mtk_scp *scp)
{
struct mtk_share_obj __iomem *rcv_obj = scp->recv_buf;
struct scp_ipi_desc *ipi_desc = scp->ipi_desc;
u8 tmp_data[SCP_SHARE_BUFFER_SIZE];
scp_ipi_handler_t handler;
u32 id = readl(&rcv_obj->id);
u32 len = readl(&rcv_obj->len);
if (len > SCP_SHARE_BUFFER_SIZE) {
dev_err(scp->dev, "ipi message too long (len %d, max %d)", len,
SCP_SHARE_BUFFER_SIZE);
return;
}
if (id >= SCP_IPI_MAX) {
dev_err(scp->dev, "No such ipi id = %d\n", id);
return;
}
scp_ipi_lock(scp, id);
handler = ipi_desc[id].handler;
if (!handler) {
dev_err(scp->dev, "No such ipi id = %d\n", id);
scp_ipi_unlock(scp, id);
return;
}
memcpy_fromio(tmp_data, &rcv_obj->share_buf, len);
handler(tmp_data, len, ipi_desc[id].priv);
scp_ipi_unlock(scp, id);
scp->ipi_id_ack[id] = true;
wake_up(&scp->ack_wq);
}
static int scp_elf_read_ipi_buf_addr(struct mtk_scp *scp,
const struct firmware *fw,
size_t *offset);
static int scp_ipi_init(struct mtk_scp *scp, const struct firmware *fw)
{
int ret;
size_t offset;
/* read the ipi buf addr from FW itself first */
ret = scp_elf_read_ipi_buf_addr(scp, fw, &offset);
if (ret) {
/* use default ipi buf addr if the FW doesn't have it */
offset = scp->data->ipi_buf_offset;
if (!offset)
return ret;
}
dev_info(scp->dev, "IPI buf addr %#010zx\n", offset);
scp->recv_buf = (struct mtk_share_obj __iomem *)
(scp->sram_base + offset);
scp->send_buf = (struct mtk_share_obj __iomem *)
(scp->sram_base + offset + sizeof(*scp->recv_buf));
memset_io(scp->recv_buf, 0, sizeof(*scp->recv_buf));
memset_io(scp->send_buf, 0, sizeof(*scp->send_buf));
return 0;
}
static void mt8183_scp_reset_assert(struct mtk_scp *scp)
{
u32 val;
val = readl(scp->reg_base + MT8183_SW_RSTN);
val &= ~MT8183_SW_RSTN_BIT;
writel(val, scp->reg_base + MT8183_SW_RSTN);
}
static void mt8183_scp_reset_deassert(struct mtk_scp *scp)
{
u32 val;
val = readl(scp->reg_base + MT8183_SW_RSTN);
val |= MT8183_SW_RSTN_BIT;
writel(val, scp->reg_base + MT8183_SW_RSTN);
}
static void mt8192_scp_reset_assert(struct mtk_scp *scp)
{
writel(1, scp->reg_base + MT8192_CORE0_SW_RSTN_SET);
}
static void mt8192_scp_reset_deassert(struct mtk_scp *scp)
{
writel(1, scp->reg_base + MT8192_CORE0_SW_RSTN_CLR);
}
static void mt8183_scp_irq_handler(struct mtk_scp *scp)
{
u32 scp_to_host;
scp_to_host = readl(scp->reg_base + MT8183_SCP_TO_HOST);
if (scp_to_host & MT8183_SCP_IPC_INT_BIT)
scp_ipi_handler(scp);
else
scp_wdt_handler(scp, scp_to_host);
/* SCP won't send another interrupt until we set SCP_TO_HOST to 0. */
writel(MT8183_SCP_IPC_INT_BIT | MT8183_SCP_WDT_INT_BIT,
scp->reg_base + MT8183_SCP_TO_HOST);
}
static void mt8192_scp_irq_handler(struct mtk_scp *scp)
{
u32 scp_to_host;
scp_to_host = readl(scp->reg_base + MT8192_SCP2APMCU_IPC_SET);
if (scp_to_host & MT8192_SCP_IPC_INT_BIT) {
scp_ipi_handler(scp);
/*
* SCP won't send another interrupt until we clear
* MT8192_SCP2APMCU_IPC.
*/
writel(MT8192_SCP_IPC_INT_BIT,
scp->reg_base + MT8192_SCP2APMCU_IPC_CLR);
} else {
scp_wdt_handler(scp, scp_to_host);
writel(1, scp->reg_base + MT8192_CORE0_WDT_IRQ);
}
}
static irqreturn_t scp_irq_handler(int irq, void *priv)
{
struct mtk_scp *scp = priv;
int ret;
ret = clk_prepare_enable(scp->clk);
if (ret) {
dev_err(scp->dev, "failed to enable clocks\n");
return IRQ_NONE;
}
scp->data->scp_irq_handler(scp);
clk_disable_unprepare(scp->clk);
return IRQ_HANDLED;
}
static int scp_elf_load_segments(struct rproc *rproc, const struct firmware *fw)
{
struct device *dev = &rproc->dev;
struct elf32_hdr *ehdr;
struct elf32_phdr *phdr;
int i, ret = 0;
const u8 *elf_data = fw->data;
ehdr = (struct elf32_hdr *)elf_data;
phdr = (struct elf32_phdr *)(elf_data + ehdr->e_phoff);
/* go through the available ELF segments */
for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
u32 da = phdr->p_paddr;
u32 memsz = phdr->p_memsz;
u32 filesz = phdr->p_filesz;
u32 offset = phdr->p_offset;
void __iomem *ptr;
dev_dbg(dev, "phdr: type %d da 0x%x memsz 0x%x filesz 0x%x\n",
phdr->p_type, da, memsz, filesz);
if (phdr->p_type != PT_LOAD)
continue;
if (!filesz)
continue;
if (filesz > memsz) {
dev_err(dev, "bad phdr filesz 0x%x memsz 0x%x\n",
filesz, memsz);
ret = -EINVAL;
break;
}
if (offset + filesz > fw->size) {
dev_err(dev, "truncated fw: need 0x%x avail 0x%zx\n",
offset + filesz, fw->size);
ret = -EINVAL;
break;
}
/* grab the kernel address for this device address */
ptr = (void __iomem *)rproc_da_to_va(rproc, da, memsz, NULL);
if (!ptr) {
dev_err(dev, "bad phdr da 0x%x mem 0x%x\n", da, memsz);
ret = -EINVAL;
break;
}
/* put the segment where the remote processor expects it */
scp_memcpy_aligned(ptr, elf_data + phdr->p_offset, filesz);
}
return ret;
}
static int scp_elf_read_ipi_buf_addr(struct mtk_scp *scp,
const struct firmware *fw,
size_t *offset)
{
struct elf32_hdr *ehdr;
struct elf32_shdr *shdr, *shdr_strtab;
int i;
const u8 *elf_data = fw->data;
const char *strtab;
ehdr = (struct elf32_hdr *)elf_data;
shdr = (struct elf32_shdr *)(elf_data + ehdr->e_shoff);
shdr_strtab = shdr + ehdr->e_shstrndx;
strtab = (const char *)(elf_data + shdr_strtab->sh_offset);
for (i = 0; i < ehdr->e_shnum; i++, shdr++) {
if (strcmp(strtab + shdr->sh_name,
SECTION_NAME_IPI_BUFFER) == 0) {
*offset = shdr->sh_addr;
return 0;
}
}
return -ENOENT;
}
static int mt8183_scp_clk_get(struct mtk_scp *scp)
{
struct device *dev = scp->dev;
int ret = 0;
scp->clk = devm_clk_get(dev, "main");
if (IS_ERR(scp->clk)) {
dev_err(dev, "Failed to get clock\n");
ret = PTR_ERR(scp->clk);
}
return ret;
}
static int mt8192_scp_clk_get(struct mtk_scp *scp)
{
return mt8183_scp_clk_get(scp);
}
static int mt8195_scp_clk_get(struct mtk_scp *scp)
{
scp->clk = NULL;
return 0;
}
static int mt8183_scp_before_load(struct mtk_scp *scp)
{
/* Clear SCP to host interrupt */
writel(MT8183_SCP_IPC_INT_BIT, scp->reg_base + MT8183_SCP_TO_HOST);
/* Reset clocks before loading FW */
writel(0x0, scp->reg_base + MT8183_SCP_CLK_SW_SEL);
writel(0x0, scp->reg_base + MT8183_SCP_CLK_DIV_SEL);
/* Initialize TCM before loading FW. */
writel(0x0, scp->reg_base + MT8183_SCP_L1_SRAM_PD);
writel(0x0, scp->reg_base + MT8183_SCP_TCM_TAIL_SRAM_PD);
/* Turn on the power of SCP's SRAM before using it. */
writel(0x0, scp->reg_base + MT8183_SCP_SRAM_PDN);
/*
* Set I-cache and D-cache size before loading SCP FW.
* SCP SRAM logical address may change when cache size setting differs.
*/
writel(MT8183_SCP_CACHE_CON_WAYEN | MT8183_SCP_CACHESIZE_8KB,
scp->reg_base + MT8183_SCP_CACHE_CON);
writel(MT8183_SCP_CACHESIZE_8KB, scp->reg_base + MT8183_SCP_DCACHE_CON);
return 0;
}
static void scp_sram_power_on(void __iomem *addr, u32 reserved_mask)
{
int i;
for (i = 31; i >= 0; i--)
writel(GENMASK(i, 0) & ~reserved_mask, addr);
writel(0, addr);
}
static void scp_sram_power_off(void __iomem *addr, u32 reserved_mask)
{
int i;
writel(0, addr);
for (i = 0; i < 32; i++)
writel(GENMASK(i, 0) & ~reserved_mask, addr);
}
static int mt8186_scp_before_load(struct mtk_scp *scp)
{
/* Clear SCP to host interrupt */
writel(MT8183_SCP_IPC_INT_BIT, scp->reg_base + MT8183_SCP_TO_HOST);
/* Reset clocks before loading FW */
writel(0x0, scp->reg_base + MT8183_SCP_CLK_SW_SEL);
writel(0x0, scp->reg_base + MT8183_SCP_CLK_DIV_SEL);
/* Turn on the power of SCP's SRAM before using it. Enable 1 block per time*/
scp_sram_power_on(scp->reg_base + MT8183_SCP_SRAM_PDN, 0);
/* Initialize TCM before loading FW. */
writel(0x0, scp->reg_base + MT8183_SCP_L1_SRAM_PD);
writel(0x0, scp->reg_base + MT8183_SCP_TCM_TAIL_SRAM_PD);
writel(0x0, scp->reg_base + MT8186_SCP_L1_SRAM_PD_P1);
writel(0x0, scp->reg_base + MT8186_SCP_L1_SRAM_PD_p2);
/*
* Set I-cache and D-cache size before loading SCP FW.
* SCP SRAM logical address may change when cache size setting differs.
*/
writel(MT8183_SCP_CACHE_CON_WAYEN | MT8183_SCP_CACHESIZE_8KB,
scp->reg_base + MT8183_SCP_CACHE_CON);
writel(MT8183_SCP_CACHESIZE_8KB, scp->reg_base + MT8183_SCP_DCACHE_CON);
return 0;
}
static int mt8192_scp_before_load(struct mtk_scp *scp)
{
/* clear SPM interrupt, SCP2SPM_IPC_CLR */
writel(0xff, scp->reg_base + MT8192_SCP2SPM_IPC_CLR);
writel(1, scp->reg_base + MT8192_CORE0_SW_RSTN_SET);
/* enable SRAM clock */
scp_sram_power_on(scp->reg_base + MT8192_L2TCM_SRAM_PD_0, 0);
scp_sram_power_on(scp->reg_base + MT8192_L2TCM_SRAM_PD_1, 0);
scp_sram_power_on(scp->reg_base + MT8192_L2TCM_SRAM_PD_2, 0);
scp_sram_power_on(scp->reg_base + MT8192_L1TCM_SRAM_PDN, 0);
scp_sram_power_on(scp->reg_base + MT8192_CPU0_SRAM_PD, 0);
/* enable MPU for all memory regions */
writel(0xff, scp->reg_base + MT8192_CORE0_MEM_ATT_PREDEF);
return 0;
}
static int mt8195_scp_before_load(struct mtk_scp *scp)
{
/* clear SPM interrupt, SCP2SPM_IPC_CLR */
writel(0xff, scp->reg_base + MT8192_SCP2SPM_IPC_CLR);
writel(1, scp->reg_base + MT8192_CORE0_SW_RSTN_SET);
/* enable SRAM clock */
scp_sram_power_on(scp->reg_base + MT8192_L2TCM_SRAM_PD_0, 0);
scp_sram_power_on(scp->reg_base + MT8192_L2TCM_SRAM_PD_1, 0);
scp_sram_power_on(scp->reg_base + MT8192_L2TCM_SRAM_PD_2, 0);
scp_sram_power_on(scp->reg_base + MT8192_L1TCM_SRAM_PDN,
MT8195_L1TCM_SRAM_PDN_RESERVED_RSI_BITS);
scp_sram_power_on(scp->reg_base + MT8192_CPU0_SRAM_PD, 0);
/* enable MPU for all memory regions */
writel(0xff, scp->reg_base + MT8192_CORE0_MEM_ATT_PREDEF);
return 0;
}
static int scp_load(struct rproc *rproc, const struct firmware *fw)
{
struct mtk_scp *scp = rproc->priv;
struct device *dev = scp->dev;
int ret;
ret = clk_prepare_enable(scp->clk);
if (ret) {
dev_err(dev, "failed to enable clocks\n");
return ret;
}
/* Hold SCP in reset while loading FW. */
scp->data->scp_reset_assert(scp);
ret = scp->data->scp_before_load(scp);
if (ret < 0)
goto leave;
ret = scp_elf_load_segments(rproc, fw);
leave:
clk_disable_unprepare(scp->clk);
return ret;
}
static int scp_parse_fw(struct rproc *rproc, const struct firmware *fw)
{
struct mtk_scp *scp = rproc->priv;
struct device *dev = scp->dev;
int ret;
ret = clk_prepare_enable(scp->clk);
if (ret) {
dev_err(dev, "failed to enable clocks\n");
return ret;
}
ret = scp_ipi_init(scp, fw);
clk_disable_unprepare(scp->clk);
return ret;
}
static int scp_start(struct rproc *rproc)
{
struct mtk_scp *scp = rproc->priv;
struct device *dev = scp->dev;
struct scp_run *run = &scp->run;
int ret;
ret = clk_prepare_enable(scp->clk);
if (ret) {
dev_err(dev, "failed to enable clocks\n");
return ret;
}
run->signaled = false;
scp->data->scp_reset_deassert(scp);
ret = wait_event_interruptible_timeout(
run->wq,
run->signaled,
msecs_to_jiffies(2000));
if (ret == 0) {
dev_err(dev, "wait SCP initialization timeout!\n");
ret = -ETIME;
goto stop;
}
if (ret == -ERESTARTSYS) {
dev_err(dev, "wait SCP interrupted by a signal!\n");
goto stop;
}
clk_disable_unprepare(scp->clk);
dev_info(dev, "SCP is ready. FW version %s\n", run->fw_ver);
return 0;
stop:
scp->data->scp_reset_assert(scp);
clk_disable_unprepare(scp->clk);
return ret;
}
static void *mt8183_scp_da_to_va(struct mtk_scp *scp, u64 da, size_t len)
{
int offset;
if (da < scp->sram_size) {
offset = da;
if (offset >= 0 && (offset + len) <= scp->sram_size)
return (void __force *)scp->sram_base + offset;
} else if (scp->dram_size) {
offset = da - scp->dma_addr;
if (offset >= 0 && (offset + len) <= scp->dram_size)
return scp->cpu_addr + offset;
}
return NULL;
}
static void *mt8192_scp_da_to_va(struct mtk_scp *scp, u64 da, size_t len)
{
int offset;
if (da >= scp->sram_phys &&
(da + len) <= scp->sram_phys + scp->sram_size) {
offset = da - scp->sram_phys;
return (void __force *)scp->sram_base + offset;
}
/* optional memory region */
if (scp->l1tcm_size &&
da >= scp->l1tcm_phys &&
(da + len) <= scp->l1tcm_phys + scp->l1tcm_size) {
offset = da - scp->l1tcm_phys;
return (void __force *)scp->l1tcm_base + offset;
}
/* optional memory region */
if (scp->dram_size &&
da >= scp->dma_addr &&
(da + len) <= scp->dma_addr + scp->dram_size) {
offset = da - scp->dma_addr;
return scp->cpu_addr + offset;
}
return NULL;
}
static void *scp_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
{
struct mtk_scp *scp = rproc->priv;
return scp->data->scp_da_to_va(scp, da, len);
}
static void mt8183_scp_stop(struct mtk_scp *scp)
{
/* Disable SCP watchdog */
writel(0, scp->reg_base + MT8183_WDT_CFG);
}
static void mt8192_scp_stop(struct mtk_scp *scp)
{
/* Disable SRAM clock */
scp_sram_power_off(scp->reg_base + MT8192_L2TCM_SRAM_PD_0, 0);
scp_sram_power_off(scp->reg_base + MT8192_L2TCM_SRAM_PD_1, 0);
scp_sram_power_off(scp->reg_base + MT8192_L2TCM_SRAM_PD_2, 0);
scp_sram_power_off(scp->reg_base + MT8192_L1TCM_SRAM_PDN, 0);
scp_sram_power_off(scp->reg_base + MT8192_CPU0_SRAM_PD, 0);
/* Disable SCP watchdog */
writel(0, scp->reg_base + MT8192_CORE0_WDT_CFG);
}
static void mt8195_scp_stop(struct mtk_scp *scp)
{
/* Disable SRAM clock */
scp_sram_power_off(scp->reg_base + MT8192_L2TCM_SRAM_PD_0, 0);
scp_sram_power_off(scp->reg_base + MT8192_L2TCM_SRAM_PD_1, 0);
scp_sram_power_off(scp->reg_base + MT8192_L2TCM_SRAM_PD_2, 0);
scp_sram_power_off(scp->reg_base + MT8192_L1TCM_SRAM_PDN,
MT8195_L1TCM_SRAM_PDN_RESERVED_RSI_BITS);
scp_sram_power_off(scp->reg_base + MT8192_CPU0_SRAM_PD, 0);
/* Disable SCP watchdog */
writel(0, scp->reg_base + MT8192_CORE0_WDT_CFG);
}
static int scp_stop(struct rproc *rproc)
{
struct mtk_scp *scp = rproc->priv;
int ret;
ret = clk_prepare_enable(scp->clk);
if (ret) {
dev_err(scp->dev, "failed to enable clocks\n");
return ret;
}
scp->data->scp_reset_assert(scp);
scp->data->scp_stop(scp);
clk_disable_unprepare(scp->clk);
return 0;
}
static const struct rproc_ops scp_ops = {
.start = scp_start,
.stop = scp_stop,
.load = scp_load,
.da_to_va = scp_da_to_va,
.parse_fw = scp_parse_fw,
.sanity_check = rproc_elf_sanity_check,
};
/**
* scp_get_device() - get device struct of SCP
*
* @scp: mtk_scp structure
**/
struct device *scp_get_device(struct mtk_scp *scp)
{
return scp->dev;
}
EXPORT_SYMBOL_GPL(scp_get_device);
/**
* scp_get_rproc() - get rproc struct of SCP
*
* @scp: mtk_scp structure
**/
struct rproc *scp_get_rproc(struct mtk_scp *scp)
{
return scp->rproc;
}
EXPORT_SYMBOL_GPL(scp_get_rproc);
/**
* scp_get_vdec_hw_capa() - get video decoder hardware capability
*
* @scp: mtk_scp structure
*
* Return: video decoder hardware capability
**/
unsigned int scp_get_vdec_hw_capa(struct mtk_scp *scp)
{
return scp->run.dec_capability;
}
EXPORT_SYMBOL_GPL(scp_get_vdec_hw_capa);
/**
* scp_get_venc_hw_capa() - get video encoder hardware capability
*
* @scp: mtk_scp structure
*
* Return: video encoder hardware capability
**/
unsigned int scp_get_venc_hw_capa(struct mtk_scp *scp)
{
return scp->run.enc_capability;
}
EXPORT_SYMBOL_GPL(scp_get_venc_hw_capa);
/**
* scp_mapping_dm_addr() - Mapping SRAM/DRAM to kernel virtual address
*
* @scp: mtk_scp structure
* @mem_addr: SCP views memory address
*
* Mapping the SCP's SRAM address /
* DMEM (Data Extended Memory) memory address /
* Working buffer memory address to
* kernel virtual address.
*
* Return: Return ERR_PTR(-EINVAL) if mapping failed,
* otherwise the mapped kernel virtual address
**/
void *scp_mapping_dm_addr(struct mtk_scp *scp, u32 mem_addr)
{
void *ptr;
ptr = scp_da_to_va(scp->rproc, mem_addr, 0, NULL);
if (!ptr)
return ERR_PTR(-EINVAL);
return ptr;
}
EXPORT_SYMBOL_GPL(scp_mapping_dm_addr);
static int scp_map_memory_region(struct mtk_scp *scp)
{
int ret;
ret = of_reserved_mem_device_init(scp->dev);
/* reserved memory is optional. */
if (ret == -ENODEV) {
dev_info(scp->dev, "skipping reserved memory initialization.");
return 0;
}
if (ret) {
dev_err(scp->dev, "failed to assign memory-region: %d\n", ret);
return -ENOMEM;
}
/* Reserved SCP code size */
scp->dram_size = MAX_CODE_SIZE;
scp->cpu_addr = dma_alloc_coherent(scp->dev, scp->dram_size,
&scp->dma_addr, GFP_KERNEL);
if (!scp->cpu_addr)
return -ENOMEM;
return 0;
}
static void scp_unmap_memory_region(struct mtk_scp *scp)
{
if (scp->dram_size == 0)
return;
dma_free_coherent(scp->dev, scp->dram_size, scp->cpu_addr,
scp->dma_addr);
of_reserved_mem_device_release(scp->dev);
}
static int scp_register_ipi(struct platform_device *pdev, u32 id,
ipi_handler_t handler, void *priv)
{
struct mtk_scp *scp = platform_get_drvdata(pdev);
return scp_ipi_register(scp, id, handler, priv);
}
static void scp_unregister_ipi(struct platform_device *pdev, u32 id)
{
struct mtk_scp *scp = platform_get_drvdata(pdev);
scp_ipi_unregister(scp, id);
}
static int scp_send_ipi(struct platform_device *pdev, u32 id, void *buf,
unsigned int len, unsigned int wait)
{
struct mtk_scp *scp = platform_get_drvdata(pdev);
return scp_ipi_send(scp, id, buf, len, wait);
}
static struct mtk_rpmsg_info mtk_scp_rpmsg_info = {
.send_ipi = scp_send_ipi,
.register_ipi = scp_register_ipi,
.unregister_ipi = scp_unregister_ipi,
.ns_ipi_id = SCP_IPI_NS_SERVICE,
};
static void scp_add_rpmsg_subdev(struct mtk_scp *scp)
{
scp->rpmsg_subdev =
mtk_rpmsg_create_rproc_subdev(to_platform_device(scp->dev),
&mtk_scp_rpmsg_info);
if (scp->rpmsg_subdev)
rproc_add_subdev(scp->rproc, scp->rpmsg_subdev);
}
static void scp_remove_rpmsg_subdev(struct mtk_scp *scp)
{
if (scp->rpmsg_subdev) {
rproc_remove_subdev(scp->rproc, scp->rpmsg_subdev);
mtk_rpmsg_destroy_rproc_subdev(scp->rpmsg_subdev);
scp->rpmsg_subdev = NULL;
}
}
static int scp_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct mtk_scp *scp;
struct rproc *rproc;
struct resource *res;
const char *fw_name = "scp.img";
int ret, i;
ret = rproc_of_parse_firmware(dev, 0, &fw_name);
if (ret < 0 && ret != -EINVAL)
return ret;
rproc = devm_rproc_alloc(dev, np->name, &scp_ops, fw_name, sizeof(*scp));
if (!rproc)
return dev_err_probe(dev, -ENOMEM, "unable to allocate remoteproc\n");
scp = rproc->priv;
scp->rproc = rproc;
scp->dev = dev;
scp->data = of_device_get_match_data(dev);
platform_set_drvdata(pdev, scp);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sram");
scp->sram_base = devm_ioremap_resource(dev, res);
if (IS_ERR(scp->sram_base))
return dev_err_probe(dev, PTR_ERR(scp->sram_base),
"Failed to parse and map sram memory\n");
scp->sram_size = resource_size(res);
scp->sram_phys = res->start;
/* l1tcm is an optional memory region */
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "l1tcm");
scp->l1tcm_base = devm_ioremap_resource(dev, res);
if (IS_ERR(scp->l1tcm_base)) {
ret = PTR_ERR(scp->l1tcm_base);
if (ret != -EINVAL) {
return dev_err_probe(dev, ret, "Failed to map l1tcm memory\n");
}
} else {
scp->l1tcm_size = resource_size(res);
scp->l1tcm_phys = res->start;
}
scp->reg_base = devm_platform_ioremap_resource_byname(pdev, "cfg");
if (IS_ERR(scp->reg_base))
return dev_err_probe(dev, PTR_ERR(scp->reg_base),
"Failed to parse and map cfg memory\n");
ret = scp->data->scp_clk_get(scp);
if (ret)
return ret;
ret = scp_map_memory_region(scp);
if (ret)
return ret;
mutex_init(&scp->send_lock);
for (i = 0; i < SCP_IPI_MAX; i++)
mutex_init(&scp->ipi_desc[i].lock);
/* register SCP initialization IPI */
ret = scp_ipi_register(scp, SCP_IPI_INIT, scp_init_ipi_handler, scp);
if (ret) {
dev_err(dev, "Failed to register IPI_SCP_INIT\n");
goto release_dev_mem;
}
init_waitqueue_head(&scp->run.wq);
init_waitqueue_head(&scp->ack_wq);
scp_add_rpmsg_subdev(scp);
ret = devm_request_threaded_irq(dev, platform_get_irq(pdev, 0), NULL,
scp_irq_handler, IRQF_ONESHOT,
pdev->name, scp);
if (ret) {
dev_err(dev, "failed to request irq\n");
goto remove_subdev;
}
ret = rproc_add(rproc);
if (ret)
goto remove_subdev;
return 0;
remove_subdev:
scp_remove_rpmsg_subdev(scp);
scp_ipi_unregister(scp, SCP_IPI_INIT);
release_dev_mem:
scp_unmap_memory_region(scp);
for (i = 0; i < SCP_IPI_MAX; i++)
mutex_destroy(&scp->ipi_desc[i].lock);
mutex_destroy(&scp->send_lock);
return ret;
}
static void scp_remove(struct platform_device *pdev)
{
struct mtk_scp *scp = platform_get_drvdata(pdev);
int i;
rproc_del(scp->rproc);
scp_remove_rpmsg_subdev(scp);
scp_ipi_unregister(scp, SCP_IPI_INIT);
scp_unmap_memory_region(scp);
for (i = 0; i < SCP_IPI_MAX; i++)
mutex_destroy(&scp->ipi_desc[i].lock);
mutex_destroy(&scp->send_lock);
}
static const struct mtk_scp_of_data mt8183_of_data = {
.scp_clk_get = mt8183_scp_clk_get,
.scp_before_load = mt8183_scp_before_load,
.scp_irq_handler = mt8183_scp_irq_handler,
.scp_reset_assert = mt8183_scp_reset_assert,
.scp_reset_deassert = mt8183_scp_reset_deassert,
.scp_stop = mt8183_scp_stop,
.scp_da_to_va = mt8183_scp_da_to_va,
.host_to_scp_reg = MT8183_HOST_TO_SCP,
.host_to_scp_int_bit = MT8183_HOST_IPC_INT_BIT,
.ipi_buf_offset = 0x7bdb0,
};
static const struct mtk_scp_of_data mt8186_of_data = {
.scp_clk_get = mt8195_scp_clk_get,
.scp_before_load = mt8186_scp_before_load,
.scp_irq_handler = mt8183_scp_irq_handler,
.scp_reset_assert = mt8183_scp_reset_assert,
.scp_reset_deassert = mt8183_scp_reset_deassert,
.scp_stop = mt8183_scp_stop,
.scp_da_to_va = mt8183_scp_da_to_va,
.host_to_scp_reg = MT8183_HOST_TO_SCP,
.host_to_scp_int_bit = MT8183_HOST_IPC_INT_BIT,
.ipi_buf_offset = 0x3bdb0,
};
static const struct mtk_scp_of_data mt8188_of_data = {
.scp_clk_get = mt8195_scp_clk_get,
.scp_before_load = mt8192_scp_before_load,
.scp_irq_handler = mt8192_scp_irq_handler,
.scp_reset_assert = mt8192_scp_reset_assert,
.scp_reset_deassert = mt8192_scp_reset_deassert,
.scp_stop = mt8192_scp_stop,
.scp_da_to_va = mt8192_scp_da_to_va,
.host_to_scp_reg = MT8192_GIPC_IN_SET,
.host_to_scp_int_bit = MT8192_HOST_IPC_INT_BIT,
};
static const struct mtk_scp_of_data mt8192_of_data = {
.scp_clk_get = mt8192_scp_clk_get,
.scp_before_load = mt8192_scp_before_load,
.scp_irq_handler = mt8192_scp_irq_handler,
.scp_reset_assert = mt8192_scp_reset_assert,
.scp_reset_deassert = mt8192_scp_reset_deassert,
.scp_stop = mt8192_scp_stop,
.scp_da_to_va = mt8192_scp_da_to_va,
.host_to_scp_reg = MT8192_GIPC_IN_SET,
.host_to_scp_int_bit = MT8192_HOST_IPC_INT_BIT,
};
static const struct mtk_scp_of_data mt8195_of_data = {
.scp_clk_get = mt8195_scp_clk_get,
.scp_before_load = mt8195_scp_before_load,
.scp_irq_handler = mt8192_scp_irq_handler,
.scp_reset_assert = mt8192_scp_reset_assert,
.scp_reset_deassert = mt8192_scp_reset_deassert,
.scp_stop = mt8195_scp_stop,
.scp_da_to_va = mt8192_scp_da_to_va,
.host_to_scp_reg = MT8192_GIPC_IN_SET,
.host_to_scp_int_bit = MT8192_HOST_IPC_INT_BIT,
};
static const struct of_device_id mtk_scp_of_match[] = {
{ .compatible = "mediatek,mt8183-scp", .data = &mt8183_of_data },
{ .compatible = "mediatek,mt8186-scp", .data = &mt8186_of_data },
{ .compatible = "mediatek,mt8188-scp", .data = &mt8188_of_data },
{ .compatible = "mediatek,mt8192-scp", .data = &mt8192_of_data },
{ .compatible = "mediatek,mt8195-scp", .data = &mt8195_of_data },
{},
};
MODULE_DEVICE_TABLE(of, mtk_scp_of_match);
static struct platform_driver mtk_scp_driver = {
.probe = scp_probe,
.remove_new = scp_remove,
.driver = {
.name = "mtk-scp",
.of_match_table = mtk_scp_of_match,
},
};
module_platform_driver(mtk_scp_driver);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("MediaTek SCP control driver");
|
linux-master
|
drivers/remoteproc/mtk_scp.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* OMAP Remote Processor driver
*
* Copyright (C) 2011-2020 Texas Instruments Incorporated - http://www.ti.com/
* Copyright (C) 2011 Google, Inc.
*
* Ohad Ben-Cohen <[email protected]>
* Brian Swetland <[email protected]>
* Fernando Guzman Lugo <[email protected]>
* Mark Grosen <[email protected]>
* Suman Anna <[email protected]>
* Hari Kanigeri <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/clk.h>
#include <linux/clk/ti.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/remoteproc.h>
#include <linux/mailbox_client.h>
#include <linux/omap-iommu.h>
#include <linux/omap-mailbox.h>
#include <linux/regmap.h>
#include <linux/mfd/syscon.h>
#include <linux/reset.h>
#include <clocksource/timer-ti-dm.h>
#include <linux/platform_data/dmtimer-omap.h>
#include "omap_remoteproc.h"
#include "remoteproc_internal.h"
/* default auto-suspend delay (ms) */
#define DEFAULT_AUTOSUSPEND_DELAY 10000
/**
* struct omap_rproc_boot_data - boot data structure for the DSP omap rprocs
* @syscon: regmap handle for the system control configuration module
* @boot_reg: boot register offset within the @syscon regmap
* @boot_reg_shift: bit-field shift required for the boot address value in
* @boot_reg
*/
struct omap_rproc_boot_data {
struct regmap *syscon;
unsigned int boot_reg;
unsigned int boot_reg_shift;
};
/**
* struct omap_rproc_mem - internal memory structure
* @cpu_addr: MPU virtual address of the memory region
* @bus_addr: bus address used to access the memory region
* @dev_addr: device address of the memory region from DSP view
* @size: size of the memory region
*/
struct omap_rproc_mem {
void __iomem *cpu_addr;
phys_addr_t bus_addr;
u32 dev_addr;
size_t size;
};
/**
* struct omap_rproc_timer - data structure for a timer used by a omap rproc
* @odt: timer pointer
* @timer_ops: OMAP dmtimer ops for @odt timer
* @irq: timer irq
*/
struct omap_rproc_timer {
struct omap_dm_timer *odt;
const struct omap_dm_timer_ops *timer_ops;
int irq;
};
/**
* struct omap_rproc - omap remote processor state
* @mbox: mailbox channel handle
* @client: mailbox client to request the mailbox channel
* @boot_data: boot data structure for setting processor boot address
* @mem: internal memory regions data
* @num_mems: number of internal memory regions
* @num_timers: number of rproc timer(s)
* @num_wd_timers: number of rproc watchdog timers
* @timers: timer(s) info used by rproc
* @autosuspend_delay: auto-suspend delay value to be used for runtime pm
* @need_resume: if true a resume is needed in the system resume callback
* @rproc: rproc handle
* @reset: reset handle
* @pm_comp: completion primitive to sync for suspend response
* @fck: functional clock for the remoteproc
* @suspend_acked: state machine flag to store the suspend request ack
*/
struct omap_rproc {
struct mbox_chan *mbox;
struct mbox_client client;
struct omap_rproc_boot_data *boot_data;
struct omap_rproc_mem *mem;
int num_mems;
int num_timers;
int num_wd_timers;
struct omap_rproc_timer *timers;
int autosuspend_delay;
bool need_resume;
struct rproc *rproc;
struct reset_control *reset;
struct completion pm_comp;
struct clk *fck;
bool suspend_acked;
};
/**
* struct omap_rproc_mem_data - memory definitions for an omap remote processor
* @name: name for this memory entry
* @dev_addr: device address for the memory entry
*/
struct omap_rproc_mem_data {
const char *name;
const u32 dev_addr;
};
/**
* struct omap_rproc_dev_data - device data for the omap remote processor
* @device_name: device name of the remote processor
* @mems: memory definitions for this remote processor
*/
struct omap_rproc_dev_data {
const char *device_name;
const struct omap_rproc_mem_data *mems;
};
/**
* omap_rproc_request_timer() - request a timer for a remoteproc
* @dev: device requesting the timer
* @np: device node pointer to the desired timer
* @timer: handle to a struct omap_rproc_timer to return the timer handle
*
* This helper function is used primarily to request a timer associated with
* a remoteproc. The returned handle is stored in the .odt field of the
* @timer structure passed in, and is used to invoke other timer specific
* ops (like starting a timer either during device initialization or during
* a resume operation, or for stopping/freeing a timer).
*
* Return: 0 on success, otherwise an appropriate failure
*/
static int omap_rproc_request_timer(struct device *dev, struct device_node *np,
struct omap_rproc_timer *timer)
{
int ret;
timer->odt = timer->timer_ops->request_by_node(np);
if (!timer->odt) {
dev_err(dev, "request for timer node %p failed\n", np);
return -EBUSY;
}
ret = timer->timer_ops->set_source(timer->odt, OMAP_TIMER_SRC_SYS_CLK);
if (ret) {
dev_err(dev, "error setting OMAP_TIMER_SRC_SYS_CLK as source for timer node %p\n",
np);
timer->timer_ops->free(timer->odt);
return ret;
}
/* clean counter, remoteproc code will set the value */
timer->timer_ops->set_load(timer->odt, 0);
return 0;
}
/**
* omap_rproc_start_timer() - start a timer for a remoteproc
* @timer: handle to a OMAP rproc timer
*
* This helper function is used to start a timer associated with a remoteproc,
* obtained using the request_timer ops. The helper function needs to be
* invoked by the driver to start the timer (during device initialization)
* or to just resume the timer.
*
* Return: 0 on success, otherwise a failure as returned by DMTimer ops
*/
static inline int omap_rproc_start_timer(struct omap_rproc_timer *timer)
{
return timer->timer_ops->start(timer->odt);
}
/**
* omap_rproc_stop_timer() - stop a timer for a remoteproc
* @timer: handle to a OMAP rproc timer
*
* This helper function is used to disable a timer associated with a
* remoteproc, and needs to be called either during a device shutdown
* or suspend operation. The separate helper function allows the driver
* to just stop a timer without having to release the timer during a
* suspend operation.
*
* Return: 0 on success, otherwise a failure as returned by DMTimer ops
*/
static inline int omap_rproc_stop_timer(struct omap_rproc_timer *timer)
{
return timer->timer_ops->stop(timer->odt);
}
/**
* omap_rproc_release_timer() - release a timer for a remoteproc
* @timer: handle to a OMAP rproc timer
*
* This helper function is used primarily to release a timer associated
* with a remoteproc. The dmtimer will be available for other clients to
* use once released.
*
* Return: 0 on success, otherwise a failure as returned by DMTimer ops
*/
static inline int omap_rproc_release_timer(struct omap_rproc_timer *timer)
{
return timer->timer_ops->free(timer->odt);
}
/**
* omap_rproc_get_timer_irq() - get the irq for a timer
* @timer: handle to a OMAP rproc timer
*
* This function is used to get the irq associated with a watchdog timer. The
* function is called by the OMAP remoteproc driver to register a interrupt
* handler to handle watchdog events on the remote processor.
*
* Return: irq id on success, otherwise a failure as returned by DMTimer ops
*/
static inline int omap_rproc_get_timer_irq(struct omap_rproc_timer *timer)
{
return timer->timer_ops->get_irq(timer->odt);
}
/**
* omap_rproc_ack_timer_irq() - acknowledge a timer irq
* @timer: handle to a OMAP rproc timer
*
* This function is used to clear the irq associated with a watchdog timer.
* The function is called by the OMAP remoteproc upon a watchdog event on the
* remote processor to clear the interrupt status of the watchdog timer.
*/
static inline void omap_rproc_ack_timer_irq(struct omap_rproc_timer *timer)
{
timer->timer_ops->write_status(timer->odt, OMAP_TIMER_INT_OVERFLOW);
}
/**
* omap_rproc_watchdog_isr() - Watchdog ISR handler for remoteproc device
* @irq: IRQ number associated with a watchdog timer
* @data: IRQ handler data
*
* This ISR routine executes the required necessary low-level code to
* acknowledge a watchdog timer interrupt. There can be multiple watchdog
* timers associated with a rproc (like IPUs which have 2 watchdog timers,
* one per Cortex M3/M4 core), so a lookup has to be performed to identify
* the timer to acknowledge its interrupt.
*
* The function also invokes rproc_report_crash to report the watchdog event
* to the remoteproc driver core, to trigger a recovery.
*
* Return: IRQ_HANDLED on success, otherwise IRQ_NONE
*/
static irqreturn_t omap_rproc_watchdog_isr(int irq, void *data)
{
struct rproc *rproc = data;
struct omap_rproc *oproc = rproc->priv;
struct device *dev = rproc->dev.parent;
struct omap_rproc_timer *timers = oproc->timers;
struct omap_rproc_timer *wd_timer = NULL;
int num_timers = oproc->num_timers + oproc->num_wd_timers;
int i;
for (i = oproc->num_timers; i < num_timers; i++) {
if (timers[i].irq > 0 && irq == timers[i].irq) {
wd_timer = &timers[i];
break;
}
}
if (!wd_timer) {
dev_err(dev, "invalid timer\n");
return IRQ_NONE;
}
omap_rproc_ack_timer_irq(wd_timer);
rproc_report_crash(rproc, RPROC_WATCHDOG);
return IRQ_HANDLED;
}
/**
* omap_rproc_enable_timers() - enable the timers for a remoteproc
* @rproc: handle of a remote processor
* @configure: boolean flag used to acquire and configure the timer handle
*
* This function is used primarily to enable the timers associated with
* a remoteproc. The configure flag is provided to allow the driver
* to either acquire and start a timer (during device initialization) or
* to just start a timer (during a resume operation).
*
* Return: 0 on success, otherwise an appropriate failure
*/
static int omap_rproc_enable_timers(struct rproc *rproc, bool configure)
{
int i;
int ret = 0;
struct platform_device *tpdev;
struct dmtimer_platform_data *tpdata;
const struct omap_dm_timer_ops *timer_ops;
struct omap_rproc *oproc = rproc->priv;
struct omap_rproc_timer *timers = oproc->timers;
struct device *dev = rproc->dev.parent;
struct device_node *np = NULL;
int num_timers = oproc->num_timers + oproc->num_wd_timers;
if (!num_timers)
return 0;
if (!configure)
goto start_timers;
for (i = 0; i < num_timers; i++) {
if (i < oproc->num_timers)
np = of_parse_phandle(dev->of_node, "ti,timers", i);
else
np = of_parse_phandle(dev->of_node,
"ti,watchdog-timers",
(i - oproc->num_timers));
if (!np) {
ret = -ENXIO;
dev_err(dev, "device node lookup for timer at index %d failed: %d\n",
i < oproc->num_timers ? i :
i - oproc->num_timers, ret);
goto free_timers;
}
tpdev = of_find_device_by_node(np);
if (!tpdev) {
ret = -ENODEV;
dev_err(dev, "could not get timer platform device\n");
goto put_node;
}
tpdata = dev_get_platdata(&tpdev->dev);
put_device(&tpdev->dev);
if (!tpdata) {
ret = -EINVAL;
dev_err(dev, "dmtimer pdata structure NULL\n");
goto put_node;
}
timer_ops = tpdata->timer_ops;
if (!timer_ops || !timer_ops->request_by_node ||
!timer_ops->set_source || !timer_ops->set_load ||
!timer_ops->free || !timer_ops->start ||
!timer_ops->stop || !timer_ops->get_irq ||
!timer_ops->write_status) {
ret = -EINVAL;
dev_err(dev, "device does not have required timer ops\n");
goto put_node;
}
timers[i].irq = -1;
timers[i].timer_ops = timer_ops;
ret = omap_rproc_request_timer(dev, np, &timers[i]);
if (ret) {
dev_err(dev, "request for timer %p failed: %d\n", np,
ret);
goto put_node;
}
of_node_put(np);
if (i >= oproc->num_timers) {
timers[i].irq = omap_rproc_get_timer_irq(&timers[i]);
if (timers[i].irq < 0) {
dev_err(dev, "get_irq for timer %p failed: %d\n",
np, timers[i].irq);
ret = -EBUSY;
goto free_timers;
}
ret = request_irq(timers[i].irq,
omap_rproc_watchdog_isr, IRQF_SHARED,
"rproc-wdt", rproc);
if (ret) {
dev_err(dev, "error requesting irq for timer %p\n",
np);
omap_rproc_release_timer(&timers[i]);
timers[i].odt = NULL;
timers[i].timer_ops = NULL;
timers[i].irq = -1;
goto free_timers;
}
}
}
start_timers:
for (i = 0; i < num_timers; i++) {
ret = omap_rproc_start_timer(&timers[i]);
if (ret) {
dev_err(dev, "start timer %p failed failed: %d\n", np,
ret);
break;
}
}
if (ret) {
while (i >= 0) {
omap_rproc_stop_timer(&timers[i]);
i--;
}
goto put_node;
}
return 0;
put_node:
if (configure)
of_node_put(np);
free_timers:
while (i--) {
if (i >= oproc->num_timers)
free_irq(timers[i].irq, rproc);
omap_rproc_release_timer(&timers[i]);
timers[i].odt = NULL;
timers[i].timer_ops = NULL;
timers[i].irq = -1;
}
return ret;
}
/**
* omap_rproc_disable_timers() - disable the timers for a remoteproc
* @rproc: handle of a remote processor
* @configure: boolean flag used to release the timer handle
*
* This function is used primarily to disable the timers associated with
* a remoteproc. The configure flag is provided to allow the driver
* to either stop and release a timer (during device shutdown) or to just
* stop a timer (during a suspend operation).
*
* Return: 0 on success or no timers
*/
static int omap_rproc_disable_timers(struct rproc *rproc, bool configure)
{
int i;
struct omap_rproc *oproc = rproc->priv;
struct omap_rproc_timer *timers = oproc->timers;
int num_timers = oproc->num_timers + oproc->num_wd_timers;
if (!num_timers)
return 0;
for (i = 0; i < num_timers; i++) {
omap_rproc_stop_timer(&timers[i]);
if (configure) {
if (i >= oproc->num_timers)
free_irq(timers[i].irq, rproc);
omap_rproc_release_timer(&timers[i]);
timers[i].odt = NULL;
timers[i].timer_ops = NULL;
timers[i].irq = -1;
}
}
return 0;
}
/**
* omap_rproc_mbox_callback() - inbound mailbox message handler
* @client: mailbox client pointer used for requesting the mailbox channel
* @data: mailbox payload
*
* This handler is invoked by omap's mailbox driver whenever a mailbox
* message is received. Usually, the mailbox payload simply contains
* the index of the virtqueue that is kicked by the remote processor,
* and we let remoteproc core handle it.
*
* In addition to virtqueue indices, we also have some out-of-band values
* that indicates different events. Those values are deliberately very
* big so they don't coincide with virtqueue indices.
*/
static void omap_rproc_mbox_callback(struct mbox_client *client, void *data)
{
struct omap_rproc *oproc = container_of(client, struct omap_rproc,
client);
struct device *dev = oproc->rproc->dev.parent;
const char *name = oproc->rproc->name;
u32 msg = (u32)data;
dev_dbg(dev, "mbox msg: 0x%x\n", msg);
switch (msg) {
case RP_MBOX_CRASH:
/*
* remoteproc detected an exception, notify the rproc core.
* The remoteproc core will handle the recovery.
*/
dev_err(dev, "omap rproc %s crashed\n", name);
rproc_report_crash(oproc->rproc, RPROC_FATAL_ERROR);
break;
case RP_MBOX_ECHO_REPLY:
dev_info(dev, "received echo reply from %s\n", name);
break;
case RP_MBOX_SUSPEND_ACK:
case RP_MBOX_SUSPEND_CANCEL:
oproc->suspend_acked = msg == RP_MBOX_SUSPEND_ACK;
complete(&oproc->pm_comp);
break;
default:
if (msg >= RP_MBOX_READY && msg < RP_MBOX_END_MSG)
return;
if (msg > oproc->rproc->max_notifyid) {
dev_dbg(dev, "dropping unknown message 0x%x", msg);
return;
}
/* msg contains the index of the triggered vring */
if (rproc_vq_interrupt(oproc->rproc, msg) == IRQ_NONE)
dev_dbg(dev, "no message was found in vqid %d\n", msg);
}
}
/* kick a virtqueue */
static void omap_rproc_kick(struct rproc *rproc, int vqid)
{
struct omap_rproc *oproc = rproc->priv;
struct device *dev = rproc->dev.parent;
int ret;
/* wake up the rproc before kicking it */
ret = pm_runtime_get_sync(dev);
if (WARN_ON(ret < 0)) {
dev_err(dev, "pm_runtime_get_sync() failed during kick, ret = %d\n",
ret);
pm_runtime_put_noidle(dev);
return;
}
/* send the index of the triggered virtqueue in the mailbox payload */
ret = mbox_send_message(oproc->mbox, (void *)vqid);
if (ret < 0)
dev_err(dev, "failed to send mailbox message, status = %d\n",
ret);
pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
}
/**
* omap_rproc_write_dsp_boot_addr() - set boot address for DSP remote processor
* @rproc: handle of a remote processor
*
* Set boot address for a supported DSP remote processor.
*
* Return: 0 on success, or -EINVAL if boot address is not aligned properly
*/
static int omap_rproc_write_dsp_boot_addr(struct rproc *rproc)
{
struct device *dev = rproc->dev.parent;
struct omap_rproc *oproc = rproc->priv;
struct omap_rproc_boot_data *bdata = oproc->boot_data;
u32 offset = bdata->boot_reg;
u32 value;
u32 mask;
if (rproc->bootaddr & (SZ_1K - 1)) {
dev_err(dev, "invalid boot address 0x%llx, must be aligned on a 1KB boundary\n",
rproc->bootaddr);
return -EINVAL;
}
value = rproc->bootaddr >> bdata->boot_reg_shift;
mask = ~(SZ_1K - 1) >> bdata->boot_reg_shift;
return regmap_update_bits(bdata->syscon, offset, mask, value);
}
/*
* Power up the remote processor.
*
* This function will be invoked only after the firmware for this rproc
* was loaded, parsed successfully, and all of its resource requirements
* were met.
*/
static int omap_rproc_start(struct rproc *rproc)
{
struct omap_rproc *oproc = rproc->priv;
struct device *dev = rproc->dev.parent;
int ret;
struct mbox_client *client = &oproc->client;
if (oproc->boot_data) {
ret = omap_rproc_write_dsp_boot_addr(rproc);
if (ret)
return ret;
}
client->dev = dev;
client->tx_done = NULL;
client->rx_callback = omap_rproc_mbox_callback;
client->tx_block = false;
client->knows_txdone = false;
oproc->mbox = mbox_request_channel(client, 0);
if (IS_ERR(oproc->mbox)) {
ret = -EBUSY;
dev_err(dev, "mbox_request_channel failed: %ld\n",
PTR_ERR(oproc->mbox));
return ret;
}
/*
* Ping the remote processor. this is only for sanity-sake;
* there is no functional effect whatsoever.
*
* Note that the reply will _not_ arrive immediately: this message
* will wait in the mailbox fifo until the remote processor is booted.
*/
ret = mbox_send_message(oproc->mbox, (void *)RP_MBOX_ECHO_REQUEST);
if (ret < 0) {
dev_err(dev, "mbox_send_message failed: %d\n", ret);
goto put_mbox;
}
ret = omap_rproc_enable_timers(rproc, true);
if (ret) {
dev_err(dev, "omap_rproc_enable_timers failed: %d\n", ret);
goto put_mbox;
}
ret = reset_control_deassert(oproc->reset);
if (ret) {
dev_err(dev, "reset control deassert failed: %d\n", ret);
goto disable_timers;
}
/*
* remote processor is up, so update the runtime pm status and
* enable the auto-suspend. The device usage count is incremented
* manually for balancing it for auto-suspend
*/
pm_runtime_set_active(dev);
pm_runtime_use_autosuspend(dev);
pm_runtime_get_noresume(dev);
pm_runtime_enable(dev);
pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return 0;
disable_timers:
omap_rproc_disable_timers(rproc, true);
put_mbox:
mbox_free_channel(oproc->mbox);
return ret;
}
/* power off the remote processor */
static int omap_rproc_stop(struct rproc *rproc)
{
struct device *dev = rproc->dev.parent;
struct omap_rproc *oproc = rproc->priv;
int ret;
/*
* cancel any possible scheduled runtime suspend by incrementing
* the device usage count, and resuming the device. The remoteproc
* also needs to be woken up if suspended, to avoid the remoteproc
* OS to continue to remember any context that it has saved, and
* avoid potential issues in misindentifying a subsequent device
* reboot as a power restore boot
*/
ret = pm_runtime_get_sync(dev);
if (ret < 0) {
pm_runtime_put_noidle(dev);
return ret;
}
ret = reset_control_assert(oproc->reset);
if (ret)
goto out;
ret = omap_rproc_disable_timers(rproc, true);
if (ret)
goto enable_device;
mbox_free_channel(oproc->mbox);
/*
* update the runtime pm states and status now that the remoteproc
* has stopped
*/
pm_runtime_disable(dev);
pm_runtime_dont_use_autosuspend(dev);
pm_runtime_put_noidle(dev);
pm_runtime_set_suspended(dev);
return 0;
enable_device:
reset_control_deassert(oproc->reset);
out:
/* schedule the next auto-suspend */
pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return ret;
}
/**
* omap_rproc_da_to_va() - internal memory translation helper
* @rproc: remote processor to apply the address translation for
* @da: device address to translate
* @len: length of the memory buffer
*
* Custom function implementing the rproc .da_to_va ops to provide address
* translation (device address to kernel virtual address) for internal RAMs
* present in a DSP or IPU device). The translated addresses can be used
* either by the remoteproc core for loading, or by any rpmsg bus drivers.
*
* Return: translated virtual address in kernel memory space on success,
* or NULL on failure.
*/
static void *omap_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
{
struct omap_rproc *oproc = rproc->priv;
int i;
u32 offset;
if (len <= 0)
return NULL;
if (!oproc->num_mems)
return NULL;
for (i = 0; i < oproc->num_mems; i++) {
if (da >= oproc->mem[i].dev_addr && da + len <=
oproc->mem[i].dev_addr + oproc->mem[i].size) {
offset = da - oproc->mem[i].dev_addr;
/* __force to make sparse happy with type conversion */
return (__force void *)(oproc->mem[i].cpu_addr +
offset);
}
}
return NULL;
}
static const struct rproc_ops omap_rproc_ops = {
.start = omap_rproc_start,
.stop = omap_rproc_stop,
.kick = omap_rproc_kick,
.da_to_va = omap_rproc_da_to_va,
};
#ifdef CONFIG_PM
static bool _is_rproc_in_standby(struct omap_rproc *oproc)
{
return ti_clk_is_in_standby(oproc->fck);
}
/* 1 sec is long enough time to let the remoteproc side suspend the device */
#define DEF_SUSPEND_TIMEOUT 1000
static int _omap_rproc_suspend(struct rproc *rproc, bool auto_suspend)
{
struct device *dev = rproc->dev.parent;
struct omap_rproc *oproc = rproc->priv;
unsigned long to = msecs_to_jiffies(DEF_SUSPEND_TIMEOUT);
unsigned long ta = jiffies + to;
u32 suspend_msg = auto_suspend ?
RP_MBOX_SUSPEND_AUTO : RP_MBOX_SUSPEND_SYSTEM;
int ret;
reinit_completion(&oproc->pm_comp);
oproc->suspend_acked = false;
ret = mbox_send_message(oproc->mbox, (void *)suspend_msg);
if (ret < 0) {
dev_err(dev, "PM mbox_send_message failed: %d\n", ret);
return ret;
}
ret = wait_for_completion_timeout(&oproc->pm_comp, to);
if (!oproc->suspend_acked)
return -EBUSY;
/*
* The remoteproc side is returning the ACK message before saving the
* context, because the context saving is performed within a SYS/BIOS
* function, and it cannot have any inter-dependencies against the IPC
* layer. Also, as the SYS/BIOS needs to preserve properly the processor
* register set, sending this ACK or signalling the completion of the
* context save through a shared memory variable can never be the
* absolute last thing to be executed on the remoteproc side, and the
* MPU cannot use the ACK message as a sync point to put the remoteproc
* into reset. The only way to ensure that the remote processor has
* completed saving the context is to check that the module has reached
* STANDBY state (after saving the context, the SYS/BIOS executes the
* appropriate target-specific WFI instruction causing the module to
* enter STANDBY).
*/
while (!_is_rproc_in_standby(oproc)) {
if (time_after(jiffies, ta))
return -ETIME;
schedule();
}
ret = reset_control_assert(oproc->reset);
if (ret) {
dev_err(dev, "reset assert during suspend failed %d\n", ret);
return ret;
}
ret = omap_rproc_disable_timers(rproc, false);
if (ret) {
dev_err(dev, "disabling timers during suspend failed %d\n",
ret);
goto enable_device;
}
/*
* IOMMUs would have to be disabled specifically for runtime suspend.
* They are handled automatically through System PM callbacks for
* regular system suspend
*/
if (auto_suspend) {
ret = omap_iommu_domain_deactivate(rproc->domain);
if (ret) {
dev_err(dev, "iommu domain deactivate failed %d\n",
ret);
goto enable_timers;
}
}
return 0;
enable_timers:
/* ignore errors on re-enabling code */
omap_rproc_enable_timers(rproc, false);
enable_device:
reset_control_deassert(oproc->reset);
return ret;
}
static int _omap_rproc_resume(struct rproc *rproc, bool auto_suspend)
{
struct device *dev = rproc->dev.parent;
struct omap_rproc *oproc = rproc->priv;
int ret;
/*
* IOMMUs would have to be enabled specifically for runtime resume.
* They would have been already enabled automatically through System
* PM callbacks for regular system resume
*/
if (auto_suspend) {
ret = omap_iommu_domain_activate(rproc->domain);
if (ret) {
dev_err(dev, "omap_iommu activate failed %d\n", ret);
goto out;
}
}
/* boot address could be lost after suspend, so restore it */
if (oproc->boot_data) {
ret = omap_rproc_write_dsp_boot_addr(rproc);
if (ret) {
dev_err(dev, "boot address restore failed %d\n", ret);
goto suspend_iommu;
}
}
ret = omap_rproc_enable_timers(rproc, false);
if (ret) {
dev_err(dev, "enabling timers during resume failed %d\n", ret);
goto suspend_iommu;
}
ret = reset_control_deassert(oproc->reset);
if (ret) {
dev_err(dev, "reset deassert during resume failed %d\n", ret);
goto disable_timers;
}
return 0;
disable_timers:
omap_rproc_disable_timers(rproc, false);
suspend_iommu:
if (auto_suspend)
omap_iommu_domain_deactivate(rproc->domain);
out:
return ret;
}
static int __maybe_unused omap_rproc_suspend(struct device *dev)
{
struct rproc *rproc = dev_get_drvdata(dev);
struct omap_rproc *oproc = rproc->priv;
int ret = 0;
mutex_lock(&rproc->lock);
if (rproc->state == RPROC_OFFLINE)
goto out;
if (rproc->state == RPROC_SUSPENDED)
goto out;
if (rproc->state != RPROC_RUNNING) {
ret = -EBUSY;
goto out;
}
ret = _omap_rproc_suspend(rproc, false);
if (ret) {
dev_err(dev, "suspend failed %d\n", ret);
goto out;
}
/*
* remoteproc is running at the time of system suspend, so remember
* it so as to wake it up during system resume
*/
oproc->need_resume = true;
rproc->state = RPROC_SUSPENDED;
out:
mutex_unlock(&rproc->lock);
return ret;
}
static int __maybe_unused omap_rproc_resume(struct device *dev)
{
struct rproc *rproc = dev_get_drvdata(dev);
struct omap_rproc *oproc = rproc->priv;
int ret = 0;
mutex_lock(&rproc->lock);
if (rproc->state == RPROC_OFFLINE)
goto out;
if (rproc->state != RPROC_SUSPENDED) {
ret = -EBUSY;
goto out;
}
/*
* remoteproc was auto-suspended at the time of system suspend,
* so no need to wake-up the processor (leave it in suspended
* state, will be woken up during a subsequent runtime_resume)
*/
if (!oproc->need_resume)
goto out;
ret = _omap_rproc_resume(rproc, false);
if (ret) {
dev_err(dev, "resume failed %d\n", ret);
goto out;
}
oproc->need_resume = false;
rproc->state = RPROC_RUNNING;
pm_runtime_mark_last_busy(dev);
out:
mutex_unlock(&rproc->lock);
return ret;
}
static int omap_rproc_runtime_suspend(struct device *dev)
{
struct rproc *rproc = dev_get_drvdata(dev);
struct omap_rproc *oproc = rproc->priv;
int ret;
mutex_lock(&rproc->lock);
if (rproc->state == RPROC_CRASHED) {
dev_dbg(dev, "rproc cannot be runtime suspended when crashed!\n");
ret = -EBUSY;
goto out;
}
if (WARN_ON(rproc->state != RPROC_RUNNING)) {
dev_err(dev, "rproc cannot be runtime suspended when not running!\n");
ret = -EBUSY;
goto out;
}
/*
* do not even attempt suspend if the remote processor is not
* idled for runtime auto-suspend
*/
if (!_is_rproc_in_standby(oproc)) {
ret = -EBUSY;
goto abort;
}
ret = _omap_rproc_suspend(rproc, true);
if (ret)
goto abort;
rproc->state = RPROC_SUSPENDED;
mutex_unlock(&rproc->lock);
return 0;
abort:
pm_runtime_mark_last_busy(dev);
out:
mutex_unlock(&rproc->lock);
return ret;
}
static int omap_rproc_runtime_resume(struct device *dev)
{
struct rproc *rproc = dev_get_drvdata(dev);
int ret;
mutex_lock(&rproc->lock);
if (WARN_ON(rproc->state != RPROC_SUSPENDED)) {
dev_err(dev, "rproc cannot be runtime resumed if not suspended! state=%d\n",
rproc->state);
ret = -EBUSY;
goto out;
}
ret = _omap_rproc_resume(rproc, true);
if (ret) {
dev_err(dev, "runtime resume failed %d\n", ret);
goto out;
}
rproc->state = RPROC_RUNNING;
out:
mutex_unlock(&rproc->lock);
return ret;
}
#endif /* CONFIG_PM */
static const struct omap_rproc_mem_data ipu_mems[] = {
{ .name = "l2ram", .dev_addr = 0x20000000 },
{ },
};
static const struct omap_rproc_mem_data dra7_dsp_mems[] = {
{ .name = "l2ram", .dev_addr = 0x800000 },
{ .name = "l1pram", .dev_addr = 0xe00000 },
{ .name = "l1dram", .dev_addr = 0xf00000 },
{ },
};
static const struct omap_rproc_dev_data omap4_dsp_dev_data = {
.device_name = "dsp",
};
static const struct omap_rproc_dev_data omap4_ipu_dev_data = {
.device_name = "ipu",
.mems = ipu_mems,
};
static const struct omap_rproc_dev_data omap5_dsp_dev_data = {
.device_name = "dsp",
};
static const struct omap_rproc_dev_data omap5_ipu_dev_data = {
.device_name = "ipu",
.mems = ipu_mems,
};
static const struct omap_rproc_dev_data dra7_dsp_dev_data = {
.device_name = "dsp",
.mems = dra7_dsp_mems,
};
static const struct omap_rproc_dev_data dra7_ipu_dev_data = {
.device_name = "ipu",
.mems = ipu_mems,
};
static const struct of_device_id omap_rproc_of_match[] = {
{
.compatible = "ti,omap4-dsp",
.data = &omap4_dsp_dev_data,
},
{
.compatible = "ti,omap4-ipu",
.data = &omap4_ipu_dev_data,
},
{
.compatible = "ti,omap5-dsp",
.data = &omap5_dsp_dev_data,
},
{
.compatible = "ti,omap5-ipu",
.data = &omap5_ipu_dev_data,
},
{
.compatible = "ti,dra7-dsp",
.data = &dra7_dsp_dev_data,
},
{
.compatible = "ti,dra7-ipu",
.data = &dra7_ipu_dev_data,
},
{
/* end */
},
};
MODULE_DEVICE_TABLE(of, omap_rproc_of_match);
static const char *omap_rproc_get_firmware(struct platform_device *pdev)
{
const char *fw_name;
int ret;
ret = of_property_read_string(pdev->dev.of_node, "firmware-name",
&fw_name);
if (ret)
return ERR_PTR(ret);
return fw_name;
}
static int omap_rproc_get_boot_data(struct platform_device *pdev,
struct rproc *rproc)
{
struct device_node *np = pdev->dev.of_node;
struct omap_rproc *oproc = rproc->priv;
const struct omap_rproc_dev_data *data;
int ret;
data = of_device_get_match_data(&pdev->dev);
if (!data)
return -ENODEV;
if (!of_property_read_bool(np, "ti,bootreg"))
return 0;
oproc->boot_data = devm_kzalloc(&pdev->dev, sizeof(*oproc->boot_data),
GFP_KERNEL);
if (!oproc->boot_data)
return -ENOMEM;
oproc->boot_data->syscon =
syscon_regmap_lookup_by_phandle(np, "ti,bootreg");
if (IS_ERR(oproc->boot_data->syscon)) {
ret = PTR_ERR(oproc->boot_data->syscon);
return ret;
}
if (of_property_read_u32_index(np, "ti,bootreg", 1,
&oproc->boot_data->boot_reg)) {
dev_err(&pdev->dev, "couldn't get the boot register\n");
return -EINVAL;
}
of_property_read_u32_index(np, "ti,bootreg", 2,
&oproc->boot_data->boot_reg_shift);
return 0;
}
static int omap_rproc_of_get_internal_memories(struct platform_device *pdev,
struct rproc *rproc)
{
struct omap_rproc *oproc = rproc->priv;
struct device *dev = &pdev->dev;
const struct omap_rproc_dev_data *data;
struct resource *res;
int num_mems;
int i;
data = of_device_get_match_data(dev);
if (!data)
return -ENODEV;
if (!data->mems)
return 0;
num_mems = of_property_count_elems_of_size(dev->of_node, "reg",
sizeof(u32)) / 2;
oproc->mem = devm_kcalloc(dev, num_mems, sizeof(*oproc->mem),
GFP_KERNEL);
if (!oproc->mem)
return -ENOMEM;
for (i = 0; data->mems[i].name; i++) {
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
data->mems[i].name);
if (!res) {
dev_err(dev, "no memory defined for %s\n",
data->mems[i].name);
return -ENOMEM;
}
oproc->mem[i].cpu_addr = devm_ioremap_resource(dev, res);
if (IS_ERR(oproc->mem[i].cpu_addr)) {
dev_err(dev, "failed to parse and map %s memory\n",
data->mems[i].name);
return PTR_ERR(oproc->mem[i].cpu_addr);
}
oproc->mem[i].bus_addr = res->start;
oproc->mem[i].dev_addr = data->mems[i].dev_addr;
oproc->mem[i].size = resource_size(res);
dev_dbg(dev, "memory %8s: bus addr %pa size 0x%x va %pK da 0x%x\n",
data->mems[i].name, &oproc->mem[i].bus_addr,
oproc->mem[i].size, oproc->mem[i].cpu_addr,
oproc->mem[i].dev_addr);
}
oproc->num_mems = num_mems;
return 0;
}
#ifdef CONFIG_OMAP_REMOTEPROC_WATCHDOG
static int omap_rproc_count_wdog_timers(struct device *dev)
{
struct device_node *np = dev->of_node;
int ret;
ret = of_count_phandle_with_args(np, "ti,watchdog-timers", NULL);
if (ret <= 0) {
dev_dbg(dev, "device does not have watchdog timers, status = %d\n",
ret);
ret = 0;
}
return ret;
}
#else
static int omap_rproc_count_wdog_timers(struct device *dev)
{
return 0;
}
#endif
static int omap_rproc_of_get_timers(struct platform_device *pdev,
struct rproc *rproc)
{
struct device_node *np = pdev->dev.of_node;
struct omap_rproc *oproc = rproc->priv;
struct device *dev = &pdev->dev;
int num_timers;
/*
* Timer nodes are directly used in client nodes as phandles, so
* retrieve the count using appropriate size
*/
oproc->num_timers = of_count_phandle_with_args(np, "ti,timers", NULL);
if (oproc->num_timers <= 0) {
dev_dbg(dev, "device does not have timers, status = %d\n",
oproc->num_timers);
oproc->num_timers = 0;
}
oproc->num_wd_timers = omap_rproc_count_wdog_timers(dev);
num_timers = oproc->num_timers + oproc->num_wd_timers;
if (num_timers) {
oproc->timers = devm_kcalloc(dev, num_timers,
sizeof(*oproc->timers),
GFP_KERNEL);
if (!oproc->timers)
return -ENOMEM;
dev_dbg(dev, "device has %d tick timers and %d watchdog timers\n",
oproc->num_timers, oproc->num_wd_timers);
}
return 0;
}
static int omap_rproc_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct omap_rproc *oproc;
struct rproc *rproc;
const char *firmware;
int ret;
struct reset_control *reset;
if (!np) {
dev_err(&pdev->dev, "only DT-based devices are supported\n");
return -ENODEV;
}
reset = devm_reset_control_array_get_exclusive(&pdev->dev);
if (IS_ERR(reset))
return PTR_ERR(reset);
firmware = omap_rproc_get_firmware(pdev);
if (IS_ERR(firmware))
return PTR_ERR(firmware);
ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
if (ret) {
dev_err(&pdev->dev, "dma_set_coherent_mask: %d\n", ret);
return ret;
}
rproc = rproc_alloc(&pdev->dev, dev_name(&pdev->dev), &omap_rproc_ops,
firmware, sizeof(*oproc));
if (!rproc)
return -ENOMEM;
oproc = rproc->priv;
oproc->rproc = rproc;
oproc->reset = reset;
/* All existing OMAP IPU and DSP processors have an MMU */
rproc->has_iommu = true;
ret = omap_rproc_of_get_internal_memories(pdev, rproc);
if (ret)
goto free_rproc;
ret = omap_rproc_get_boot_data(pdev, rproc);
if (ret)
goto free_rproc;
ret = omap_rproc_of_get_timers(pdev, rproc);
if (ret)
goto free_rproc;
init_completion(&oproc->pm_comp);
oproc->autosuspend_delay = DEFAULT_AUTOSUSPEND_DELAY;
of_property_read_u32(pdev->dev.of_node, "ti,autosuspend-delay-ms",
&oproc->autosuspend_delay);
pm_runtime_set_autosuspend_delay(&pdev->dev, oproc->autosuspend_delay);
oproc->fck = devm_clk_get(&pdev->dev, 0);
if (IS_ERR(oproc->fck)) {
ret = PTR_ERR(oproc->fck);
goto free_rproc;
}
ret = of_reserved_mem_device_init(&pdev->dev);
if (ret) {
dev_warn(&pdev->dev, "device does not have specific CMA pool.\n");
dev_warn(&pdev->dev, "Typically this should be provided,\n");
dev_warn(&pdev->dev, "only omit if you know what you are doing.\n");
}
platform_set_drvdata(pdev, rproc);
ret = rproc_add(rproc);
if (ret)
goto release_mem;
return 0;
release_mem:
of_reserved_mem_device_release(&pdev->dev);
free_rproc:
rproc_free(rproc);
return ret;
}
static void omap_rproc_remove(struct platform_device *pdev)
{
struct rproc *rproc = platform_get_drvdata(pdev);
rproc_del(rproc);
rproc_free(rproc);
of_reserved_mem_device_release(&pdev->dev);
}
static const struct dev_pm_ops omap_rproc_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(omap_rproc_suspend, omap_rproc_resume)
SET_RUNTIME_PM_OPS(omap_rproc_runtime_suspend,
omap_rproc_runtime_resume, NULL)
};
static struct platform_driver omap_rproc_driver = {
.probe = omap_rproc_probe,
.remove_new = omap_rproc_remove,
.driver = {
.name = "omap-rproc",
.pm = &omap_rproc_pm_ops,
.of_match_table = omap_rproc_of_match,
},
};
module_platform_driver(omap_rproc_driver);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("OMAP Remote Processor control driver");
|
linux-master
|
drivers/remoteproc/omap_remoteproc.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* vDPA bus.
*
* Copyright (c) 2020, Red Hat. All rights reserved.
* Author: Jason Wang <[email protected]>
*
*/
#include <linux/module.h>
#include <linux/idr.h>
#include <linux/slab.h>
#include <linux/vdpa.h>
#include <uapi/linux/vdpa.h>
#include <net/genetlink.h>
#include <linux/mod_devicetable.h>
#include <linux/virtio_ids.h>
static LIST_HEAD(mdev_head);
/* A global mutex that protects vdpa management device and device level operations. */
static DECLARE_RWSEM(vdpa_dev_lock);
static DEFINE_IDA(vdpa_index_ida);
void vdpa_set_status(struct vdpa_device *vdev, u8 status)
{
down_write(&vdev->cf_lock);
vdev->config->set_status(vdev, status);
up_write(&vdev->cf_lock);
}
EXPORT_SYMBOL(vdpa_set_status);
static struct genl_family vdpa_nl_family;
static int vdpa_dev_probe(struct device *d)
{
struct vdpa_device *vdev = dev_to_vdpa(d);
struct vdpa_driver *drv = drv_to_vdpa(vdev->dev.driver);
const struct vdpa_config_ops *ops = vdev->config;
u32 max_num, min_num = 1;
int ret = 0;
d->dma_mask = &d->coherent_dma_mask;
ret = dma_set_mask_and_coherent(d, DMA_BIT_MASK(64));
if (ret)
return ret;
max_num = ops->get_vq_num_max(vdev);
if (ops->get_vq_num_min)
min_num = ops->get_vq_num_min(vdev);
if (max_num < min_num)
return -EINVAL;
if (drv && drv->probe)
ret = drv->probe(vdev);
return ret;
}
static void vdpa_dev_remove(struct device *d)
{
struct vdpa_device *vdev = dev_to_vdpa(d);
struct vdpa_driver *drv = drv_to_vdpa(vdev->dev.driver);
if (drv && drv->remove)
drv->remove(vdev);
}
static int vdpa_dev_match(struct device *dev, struct device_driver *drv)
{
struct vdpa_device *vdev = dev_to_vdpa(dev);
/* Check override first, and if set, only use the named driver */
if (vdev->driver_override)
return strcmp(vdev->driver_override, drv->name) == 0;
/* Currently devices must be supported by all vDPA bus drivers */
return 1;
}
static ssize_t driver_override_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct vdpa_device *vdev = dev_to_vdpa(dev);
int ret;
ret = driver_set_override(dev, &vdev->driver_override, buf, count);
if (ret)
return ret;
return count;
}
static ssize_t driver_override_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct vdpa_device *vdev = dev_to_vdpa(dev);
ssize_t len;
device_lock(dev);
len = snprintf(buf, PAGE_SIZE, "%s\n", vdev->driver_override);
device_unlock(dev);
return len;
}
static DEVICE_ATTR_RW(driver_override);
static struct attribute *vdpa_dev_attrs[] = {
&dev_attr_driver_override.attr,
NULL,
};
static const struct attribute_group vdpa_dev_group = {
.attrs = vdpa_dev_attrs,
};
__ATTRIBUTE_GROUPS(vdpa_dev);
static struct bus_type vdpa_bus = {
.name = "vdpa",
.dev_groups = vdpa_dev_groups,
.match = vdpa_dev_match,
.probe = vdpa_dev_probe,
.remove = vdpa_dev_remove,
};
static void vdpa_release_dev(struct device *d)
{
struct vdpa_device *vdev = dev_to_vdpa(d);
const struct vdpa_config_ops *ops = vdev->config;
if (ops->free)
ops->free(vdev);
ida_simple_remove(&vdpa_index_ida, vdev->index);
kfree(vdev->driver_override);
kfree(vdev);
}
/**
* __vdpa_alloc_device - allocate and initilaize a vDPA device
* This allows driver to some prepartion after device is
* initialized but before registered.
* @parent: the parent device
* @config: the bus operations that is supported by this device
* @ngroups: number of groups supported by this device
* @nas: number of address spaces supported by this device
* @size: size of the parent structure that contains private data
* @name: name of the vdpa device; optional.
* @use_va: indicate whether virtual address must be used by this device
*
* Driver should use vdpa_alloc_device() wrapper macro instead of
* using this directly.
*
* Return: Returns an error when parent/config/dma_dev is not set or fail to get
* ida.
*/
struct vdpa_device *__vdpa_alloc_device(struct device *parent,
const struct vdpa_config_ops *config,
unsigned int ngroups, unsigned int nas,
size_t size, const char *name,
bool use_va)
{
struct vdpa_device *vdev;
int err = -EINVAL;
if (!config)
goto err;
if (!!config->dma_map != !!config->dma_unmap)
goto err;
/* It should only work for the device that use on-chip IOMMU */
if (use_va && !(config->dma_map || config->set_map))
goto err;
err = -ENOMEM;
vdev = kzalloc(size, GFP_KERNEL);
if (!vdev)
goto err;
err = ida_alloc(&vdpa_index_ida, GFP_KERNEL);
if (err < 0)
goto err_ida;
vdev->dev.bus = &vdpa_bus;
vdev->dev.parent = parent;
vdev->dev.release = vdpa_release_dev;
vdev->index = err;
vdev->config = config;
vdev->features_valid = false;
vdev->use_va = use_va;
vdev->ngroups = ngroups;
vdev->nas = nas;
if (name)
err = dev_set_name(&vdev->dev, "%s", name);
else
err = dev_set_name(&vdev->dev, "vdpa%u", vdev->index);
if (err)
goto err_name;
init_rwsem(&vdev->cf_lock);
device_initialize(&vdev->dev);
return vdev;
err_name:
ida_simple_remove(&vdpa_index_ida, vdev->index);
err_ida:
kfree(vdev);
err:
return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(__vdpa_alloc_device);
static int vdpa_name_match(struct device *dev, const void *data)
{
struct vdpa_device *vdev = container_of(dev, struct vdpa_device, dev);
return (strcmp(dev_name(&vdev->dev), data) == 0);
}
static int __vdpa_register_device(struct vdpa_device *vdev, u32 nvqs)
{
struct device *dev;
vdev->nvqs = nvqs;
lockdep_assert_held(&vdpa_dev_lock);
dev = bus_find_device(&vdpa_bus, NULL, dev_name(&vdev->dev), vdpa_name_match);
if (dev) {
put_device(dev);
return -EEXIST;
}
return device_add(&vdev->dev);
}
/**
* _vdpa_register_device - register a vDPA device with vdpa lock held
* Caller must have a succeed call of vdpa_alloc_device() before.
* Caller must invoke this routine in the management device dev_add()
* callback after setting up valid mgmtdev for this vdpa device.
* @vdev: the vdpa device to be registered to vDPA bus
* @nvqs: number of virtqueues supported by this device
*
* Return: Returns an error when fail to add device to vDPA bus
*/
int _vdpa_register_device(struct vdpa_device *vdev, u32 nvqs)
{
if (!vdev->mdev)
return -EINVAL;
return __vdpa_register_device(vdev, nvqs);
}
EXPORT_SYMBOL_GPL(_vdpa_register_device);
/**
* vdpa_register_device - register a vDPA device
* Callers must have a succeed call of vdpa_alloc_device() before.
* @vdev: the vdpa device to be registered to vDPA bus
* @nvqs: number of virtqueues supported by this device
*
* Return: Returns an error when fail to add to vDPA bus
*/
int vdpa_register_device(struct vdpa_device *vdev, u32 nvqs)
{
int err;
down_write(&vdpa_dev_lock);
err = __vdpa_register_device(vdev, nvqs);
up_write(&vdpa_dev_lock);
return err;
}
EXPORT_SYMBOL_GPL(vdpa_register_device);
/**
* _vdpa_unregister_device - unregister a vDPA device
* Caller must invoke this routine as part of management device dev_del()
* callback.
* @vdev: the vdpa device to be unregisted from vDPA bus
*/
void _vdpa_unregister_device(struct vdpa_device *vdev)
{
lockdep_assert_held(&vdpa_dev_lock);
WARN_ON(!vdev->mdev);
device_unregister(&vdev->dev);
}
EXPORT_SYMBOL_GPL(_vdpa_unregister_device);
/**
* vdpa_unregister_device - unregister a vDPA device
* @vdev: the vdpa device to be unregisted from vDPA bus
*/
void vdpa_unregister_device(struct vdpa_device *vdev)
{
down_write(&vdpa_dev_lock);
device_unregister(&vdev->dev);
up_write(&vdpa_dev_lock);
}
EXPORT_SYMBOL_GPL(vdpa_unregister_device);
/**
* __vdpa_register_driver - register a vDPA device driver
* @drv: the vdpa device driver to be registered
* @owner: module owner of the driver
*
* Return: Returns an err when fail to do the registration
*/
int __vdpa_register_driver(struct vdpa_driver *drv, struct module *owner)
{
drv->driver.bus = &vdpa_bus;
drv->driver.owner = owner;
return driver_register(&drv->driver);
}
EXPORT_SYMBOL_GPL(__vdpa_register_driver);
/**
* vdpa_unregister_driver - unregister a vDPA device driver
* @drv: the vdpa device driver to be unregistered
*/
void vdpa_unregister_driver(struct vdpa_driver *drv)
{
driver_unregister(&drv->driver);
}
EXPORT_SYMBOL_GPL(vdpa_unregister_driver);
/**
* vdpa_mgmtdev_register - register a vdpa management device
*
* @mdev: Pointer to vdpa management device
* vdpa_mgmtdev_register() register a vdpa management device which supports
* vdpa device management.
* Return: Returns 0 on success or failure when required callback ops are not
* initialized.
*/
int vdpa_mgmtdev_register(struct vdpa_mgmt_dev *mdev)
{
if (!mdev->device || !mdev->ops || !mdev->ops->dev_add || !mdev->ops->dev_del)
return -EINVAL;
INIT_LIST_HEAD(&mdev->list);
down_write(&vdpa_dev_lock);
list_add_tail(&mdev->list, &mdev_head);
up_write(&vdpa_dev_lock);
return 0;
}
EXPORT_SYMBOL_GPL(vdpa_mgmtdev_register);
static int vdpa_match_remove(struct device *dev, void *data)
{
struct vdpa_device *vdev = container_of(dev, struct vdpa_device, dev);
struct vdpa_mgmt_dev *mdev = vdev->mdev;
if (mdev == data)
mdev->ops->dev_del(mdev, vdev);
return 0;
}
void vdpa_mgmtdev_unregister(struct vdpa_mgmt_dev *mdev)
{
down_write(&vdpa_dev_lock);
list_del(&mdev->list);
/* Filter out all the entries belong to this management device and delete it. */
bus_for_each_dev(&vdpa_bus, NULL, mdev, vdpa_match_remove);
up_write(&vdpa_dev_lock);
}
EXPORT_SYMBOL_GPL(vdpa_mgmtdev_unregister);
static void vdpa_get_config_unlocked(struct vdpa_device *vdev,
unsigned int offset,
void *buf, unsigned int len)
{
const struct vdpa_config_ops *ops = vdev->config;
/*
* Config accesses aren't supposed to trigger before features are set.
* If it does happen we assume a legacy guest.
*/
if (!vdev->features_valid)
vdpa_set_features_unlocked(vdev, 0);
ops->get_config(vdev, offset, buf, len);
}
/**
* vdpa_get_config - Get one or more device configuration fields.
* @vdev: vdpa device to operate on
* @offset: starting byte offset of the field
* @buf: buffer pointer to read to
* @len: length of the configuration fields in bytes
*/
void vdpa_get_config(struct vdpa_device *vdev, unsigned int offset,
void *buf, unsigned int len)
{
down_read(&vdev->cf_lock);
vdpa_get_config_unlocked(vdev, offset, buf, len);
up_read(&vdev->cf_lock);
}
EXPORT_SYMBOL_GPL(vdpa_get_config);
/**
* vdpa_set_config - Set one or more device configuration fields.
* @vdev: vdpa device to operate on
* @offset: starting byte offset of the field
* @buf: buffer pointer to read from
* @length: length of the configuration fields in bytes
*/
void vdpa_set_config(struct vdpa_device *vdev, unsigned int offset,
const void *buf, unsigned int length)
{
down_write(&vdev->cf_lock);
vdev->config->set_config(vdev, offset, buf, length);
up_write(&vdev->cf_lock);
}
EXPORT_SYMBOL_GPL(vdpa_set_config);
static bool mgmtdev_handle_match(const struct vdpa_mgmt_dev *mdev,
const char *busname, const char *devname)
{
/* Bus name is optional for simulated management device, so ignore the
* device with bus if bus attribute is provided.
*/
if ((busname && !mdev->device->bus) || (!busname && mdev->device->bus))
return false;
if (!busname && strcmp(dev_name(mdev->device), devname) == 0)
return true;
if (busname && (strcmp(mdev->device->bus->name, busname) == 0) &&
(strcmp(dev_name(mdev->device), devname) == 0))
return true;
return false;
}
static struct vdpa_mgmt_dev *vdpa_mgmtdev_get_from_attr(struct nlattr **attrs)
{
struct vdpa_mgmt_dev *mdev;
const char *busname = NULL;
const char *devname;
if (!attrs[VDPA_ATTR_MGMTDEV_DEV_NAME])
return ERR_PTR(-EINVAL);
devname = nla_data(attrs[VDPA_ATTR_MGMTDEV_DEV_NAME]);
if (attrs[VDPA_ATTR_MGMTDEV_BUS_NAME])
busname = nla_data(attrs[VDPA_ATTR_MGMTDEV_BUS_NAME]);
list_for_each_entry(mdev, &mdev_head, list) {
if (mgmtdev_handle_match(mdev, busname, devname))
return mdev;
}
return ERR_PTR(-ENODEV);
}
static int vdpa_nl_mgmtdev_handle_fill(struct sk_buff *msg, const struct vdpa_mgmt_dev *mdev)
{
if (mdev->device->bus &&
nla_put_string(msg, VDPA_ATTR_MGMTDEV_BUS_NAME, mdev->device->bus->name))
return -EMSGSIZE;
if (nla_put_string(msg, VDPA_ATTR_MGMTDEV_DEV_NAME, dev_name(mdev->device)))
return -EMSGSIZE;
return 0;
}
static u64 vdpa_mgmtdev_get_classes(const struct vdpa_mgmt_dev *mdev,
unsigned int *nclasses)
{
u64 supported_classes = 0;
unsigned int n = 0;
for (int i = 0; mdev->id_table[i].device; i++) {
if (mdev->id_table[i].device > 63)
continue;
supported_classes |= BIT_ULL(mdev->id_table[i].device);
n++;
}
if (nclasses)
*nclasses = n;
return supported_classes;
}
static int vdpa_mgmtdev_fill(const struct vdpa_mgmt_dev *mdev, struct sk_buff *msg,
u32 portid, u32 seq, int flags)
{
void *hdr;
int err;
hdr = genlmsg_put(msg, portid, seq, &vdpa_nl_family, flags, VDPA_CMD_MGMTDEV_NEW);
if (!hdr)
return -EMSGSIZE;
err = vdpa_nl_mgmtdev_handle_fill(msg, mdev);
if (err)
goto msg_err;
if (nla_put_u64_64bit(msg, VDPA_ATTR_MGMTDEV_SUPPORTED_CLASSES,
vdpa_mgmtdev_get_classes(mdev, NULL),
VDPA_ATTR_UNSPEC)) {
err = -EMSGSIZE;
goto msg_err;
}
if (nla_put_u32(msg, VDPA_ATTR_DEV_MGMTDEV_MAX_VQS,
mdev->max_supported_vqs)) {
err = -EMSGSIZE;
goto msg_err;
}
if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_SUPPORTED_FEATURES,
mdev->supported_features, VDPA_ATTR_PAD)) {
err = -EMSGSIZE;
goto msg_err;
}
genlmsg_end(msg, hdr);
return 0;
msg_err:
genlmsg_cancel(msg, hdr);
return err;
}
static int vdpa_nl_cmd_mgmtdev_get_doit(struct sk_buff *skb, struct genl_info *info)
{
struct vdpa_mgmt_dev *mdev;
struct sk_buff *msg;
int err;
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg)
return -ENOMEM;
down_read(&vdpa_dev_lock);
mdev = vdpa_mgmtdev_get_from_attr(info->attrs);
if (IS_ERR(mdev)) {
up_read(&vdpa_dev_lock);
NL_SET_ERR_MSG_MOD(info->extack, "Fail to find the specified mgmt device");
err = PTR_ERR(mdev);
goto out;
}
err = vdpa_mgmtdev_fill(mdev, msg, info->snd_portid, info->snd_seq, 0);
up_read(&vdpa_dev_lock);
if (err)
goto out;
err = genlmsg_reply(msg, info);
return err;
out:
nlmsg_free(msg);
return err;
}
static int
vdpa_nl_cmd_mgmtdev_get_dumpit(struct sk_buff *msg, struct netlink_callback *cb)
{
struct vdpa_mgmt_dev *mdev;
int start = cb->args[0];
int idx = 0;
int err;
down_read(&vdpa_dev_lock);
list_for_each_entry(mdev, &mdev_head, list) {
if (idx < start) {
idx++;
continue;
}
err = vdpa_mgmtdev_fill(mdev, msg, NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq, NLM_F_MULTI);
if (err)
goto out;
idx++;
}
out:
up_read(&vdpa_dev_lock);
cb->args[0] = idx;
return msg->len;
}
#define VDPA_DEV_NET_ATTRS_MASK (BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MACADDR) | \
BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MTU) | \
BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MAX_VQP))
/*
* Bitmask for all per-device features: feature bits VIRTIO_TRANSPORT_F_START
* through VIRTIO_TRANSPORT_F_END are unset, i.e. 0xfffffc000fffffff for
* all 64bit features. If the features are extended beyond 64 bits, or new
* "holes" are reserved for other type of features than per-device, this
* macro would have to be updated.
*/
#define VIRTIO_DEVICE_F_MASK (~0ULL << (VIRTIO_TRANSPORT_F_END + 1) | \
((1ULL << VIRTIO_TRANSPORT_F_START) - 1))
static int vdpa_nl_cmd_dev_add_set_doit(struct sk_buff *skb, struct genl_info *info)
{
struct vdpa_dev_set_config config = {};
struct nlattr **nl_attrs = info->attrs;
struct vdpa_mgmt_dev *mdev;
unsigned int ncls = 0;
const u8 *macaddr;
const char *name;
u64 classes;
int err = 0;
if (!info->attrs[VDPA_ATTR_DEV_NAME])
return -EINVAL;
name = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]);
if (nl_attrs[VDPA_ATTR_DEV_NET_CFG_MACADDR]) {
macaddr = nla_data(nl_attrs[VDPA_ATTR_DEV_NET_CFG_MACADDR]);
memcpy(config.net.mac, macaddr, sizeof(config.net.mac));
config.mask |= BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MACADDR);
}
if (nl_attrs[VDPA_ATTR_DEV_NET_CFG_MTU]) {
config.net.mtu =
nla_get_u16(nl_attrs[VDPA_ATTR_DEV_NET_CFG_MTU]);
config.mask |= BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MTU);
}
if (nl_attrs[VDPA_ATTR_DEV_NET_CFG_MAX_VQP]) {
config.net.max_vq_pairs =
nla_get_u16(nl_attrs[VDPA_ATTR_DEV_NET_CFG_MAX_VQP]);
if (!config.net.max_vq_pairs) {
NL_SET_ERR_MSG_MOD(info->extack,
"At least one pair of VQs is required");
return -EINVAL;
}
config.mask |= BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MAX_VQP);
}
if (nl_attrs[VDPA_ATTR_DEV_FEATURES]) {
u64 missing = 0x0ULL;
config.device_features =
nla_get_u64(nl_attrs[VDPA_ATTR_DEV_FEATURES]);
if (nl_attrs[VDPA_ATTR_DEV_NET_CFG_MACADDR] &&
!(config.device_features & BIT_ULL(VIRTIO_NET_F_MAC)))
missing |= BIT_ULL(VIRTIO_NET_F_MAC);
if (nl_attrs[VDPA_ATTR_DEV_NET_CFG_MTU] &&
!(config.device_features & BIT_ULL(VIRTIO_NET_F_MTU)))
missing |= BIT_ULL(VIRTIO_NET_F_MTU);
if (nl_attrs[VDPA_ATTR_DEV_NET_CFG_MAX_VQP] &&
config.net.max_vq_pairs > 1 &&
!(config.device_features & BIT_ULL(VIRTIO_NET_F_MQ)))
missing |= BIT_ULL(VIRTIO_NET_F_MQ);
if (missing) {
NL_SET_ERR_MSG_FMT_MOD(info->extack,
"Missing features 0x%llx for provided attributes",
missing);
return -EINVAL;
}
config.mask |= BIT_ULL(VDPA_ATTR_DEV_FEATURES);
}
/* Skip checking capability if user didn't prefer to configure any
* device networking attributes. It is likely that user might have used
* a device specific method to configure such attributes or using device
* default attributes.
*/
if ((config.mask & VDPA_DEV_NET_ATTRS_MASK) &&
!netlink_capable(skb, CAP_NET_ADMIN))
return -EPERM;
down_write(&vdpa_dev_lock);
mdev = vdpa_mgmtdev_get_from_attr(info->attrs);
if (IS_ERR(mdev)) {
NL_SET_ERR_MSG_MOD(info->extack, "Fail to find the specified management device");
err = PTR_ERR(mdev);
goto err;
}
if ((config.mask & mdev->config_attr_mask) != config.mask) {
NL_SET_ERR_MSG_FMT_MOD(info->extack,
"Some provided attributes are not supported: 0x%llx",
config.mask & ~mdev->config_attr_mask);
err = -EOPNOTSUPP;
goto err;
}
classes = vdpa_mgmtdev_get_classes(mdev, &ncls);
if (config.mask & VDPA_DEV_NET_ATTRS_MASK &&
!(classes & BIT_ULL(VIRTIO_ID_NET))) {
NL_SET_ERR_MSG_MOD(info->extack,
"Network class attributes provided on unsupported management device");
err = -EINVAL;
goto err;
}
if (!(config.mask & VDPA_DEV_NET_ATTRS_MASK) &&
config.mask & BIT_ULL(VDPA_ATTR_DEV_FEATURES) &&
classes & BIT_ULL(VIRTIO_ID_NET) && ncls > 1 &&
config.device_features & VIRTIO_DEVICE_F_MASK) {
NL_SET_ERR_MSG_MOD(info->extack,
"Management device supports multi-class while device features specified are ambiguous");
err = -EINVAL;
goto err;
}
err = mdev->ops->dev_add(mdev, name, &config);
err:
up_write(&vdpa_dev_lock);
return err;
}
static int vdpa_nl_cmd_dev_del_set_doit(struct sk_buff *skb, struct genl_info *info)
{
struct vdpa_mgmt_dev *mdev;
struct vdpa_device *vdev;
struct device *dev;
const char *name;
int err = 0;
if (!info->attrs[VDPA_ATTR_DEV_NAME])
return -EINVAL;
name = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]);
down_write(&vdpa_dev_lock);
dev = bus_find_device(&vdpa_bus, NULL, name, vdpa_name_match);
if (!dev) {
NL_SET_ERR_MSG_MOD(info->extack, "device not found");
err = -ENODEV;
goto dev_err;
}
vdev = container_of(dev, struct vdpa_device, dev);
if (!vdev->mdev) {
NL_SET_ERR_MSG_MOD(info->extack, "Only user created device can be deleted by user");
err = -EINVAL;
goto mdev_err;
}
mdev = vdev->mdev;
mdev->ops->dev_del(mdev, vdev);
mdev_err:
put_device(dev);
dev_err:
up_write(&vdpa_dev_lock);
return err;
}
static int
vdpa_dev_fill(struct vdpa_device *vdev, struct sk_buff *msg, u32 portid, u32 seq,
int flags, struct netlink_ext_ack *extack)
{
u16 max_vq_size;
u16 min_vq_size = 1;
u32 device_id;
u32 vendor_id;
void *hdr;
int err;
hdr = genlmsg_put(msg, portid, seq, &vdpa_nl_family, flags, VDPA_CMD_DEV_NEW);
if (!hdr)
return -EMSGSIZE;
err = vdpa_nl_mgmtdev_handle_fill(msg, vdev->mdev);
if (err)
goto msg_err;
device_id = vdev->config->get_device_id(vdev);
vendor_id = vdev->config->get_vendor_id(vdev);
max_vq_size = vdev->config->get_vq_num_max(vdev);
if (vdev->config->get_vq_num_min)
min_vq_size = vdev->config->get_vq_num_min(vdev);
err = -EMSGSIZE;
if (nla_put_string(msg, VDPA_ATTR_DEV_NAME, dev_name(&vdev->dev)))
goto msg_err;
if (nla_put_u32(msg, VDPA_ATTR_DEV_ID, device_id))
goto msg_err;
if (nla_put_u32(msg, VDPA_ATTR_DEV_VENDOR_ID, vendor_id))
goto msg_err;
if (nla_put_u32(msg, VDPA_ATTR_DEV_MAX_VQS, vdev->nvqs))
goto msg_err;
if (nla_put_u16(msg, VDPA_ATTR_DEV_MAX_VQ_SIZE, max_vq_size))
goto msg_err;
if (nla_put_u16(msg, VDPA_ATTR_DEV_MIN_VQ_SIZE, min_vq_size))
goto msg_err;
genlmsg_end(msg, hdr);
return 0;
msg_err:
genlmsg_cancel(msg, hdr);
return err;
}
static int vdpa_nl_cmd_dev_get_doit(struct sk_buff *skb, struct genl_info *info)
{
struct vdpa_device *vdev;
struct sk_buff *msg;
const char *devname;
struct device *dev;
int err;
if (!info->attrs[VDPA_ATTR_DEV_NAME])
return -EINVAL;
devname = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]);
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg)
return -ENOMEM;
down_read(&vdpa_dev_lock);
dev = bus_find_device(&vdpa_bus, NULL, devname, vdpa_name_match);
if (!dev) {
NL_SET_ERR_MSG_MOD(info->extack, "device not found");
err = -ENODEV;
goto err;
}
vdev = container_of(dev, struct vdpa_device, dev);
if (!vdev->mdev) {
err = -EINVAL;
goto mdev_err;
}
err = vdpa_dev_fill(vdev, msg, info->snd_portid, info->snd_seq, 0, info->extack);
if (err)
goto mdev_err;
err = genlmsg_reply(msg, info);
put_device(dev);
up_read(&vdpa_dev_lock);
return err;
mdev_err:
put_device(dev);
err:
up_read(&vdpa_dev_lock);
nlmsg_free(msg);
return err;
}
struct vdpa_dev_dump_info {
struct sk_buff *msg;
struct netlink_callback *cb;
int start_idx;
int idx;
};
static int vdpa_dev_dump(struct device *dev, void *data)
{
struct vdpa_device *vdev = container_of(dev, struct vdpa_device, dev);
struct vdpa_dev_dump_info *info = data;
int err;
if (!vdev->mdev)
return 0;
if (info->idx < info->start_idx) {
info->idx++;
return 0;
}
err = vdpa_dev_fill(vdev, info->msg, NETLINK_CB(info->cb->skb).portid,
info->cb->nlh->nlmsg_seq, NLM_F_MULTI, info->cb->extack);
if (err)
return err;
info->idx++;
return 0;
}
static int vdpa_nl_cmd_dev_get_dumpit(struct sk_buff *msg, struct netlink_callback *cb)
{
struct vdpa_dev_dump_info info;
info.msg = msg;
info.cb = cb;
info.start_idx = cb->args[0];
info.idx = 0;
down_read(&vdpa_dev_lock);
bus_for_each_dev(&vdpa_bus, NULL, &info, vdpa_dev_dump);
up_read(&vdpa_dev_lock);
cb->args[0] = info.idx;
return msg->len;
}
static int vdpa_dev_net_mq_config_fill(struct sk_buff *msg, u64 features,
const struct virtio_net_config *config)
{
u16 val_u16;
if ((features & BIT_ULL(VIRTIO_NET_F_MQ)) == 0 &&
(features & BIT_ULL(VIRTIO_NET_F_RSS)) == 0)
return 0;
val_u16 = __virtio16_to_cpu(true, config->max_virtqueue_pairs);
return nla_put_u16(msg, VDPA_ATTR_DEV_NET_CFG_MAX_VQP, val_u16);
}
static int vdpa_dev_net_mtu_config_fill(struct sk_buff *msg, u64 features,
const struct virtio_net_config *config)
{
u16 val_u16;
if ((features & BIT_ULL(VIRTIO_NET_F_MTU)) == 0)
return 0;
val_u16 = __virtio16_to_cpu(true, config->mtu);
return nla_put_u16(msg, VDPA_ATTR_DEV_NET_CFG_MTU, val_u16);
}
static int vdpa_dev_net_mac_config_fill(struct sk_buff *msg, u64 features,
const struct virtio_net_config *config)
{
if ((features & BIT_ULL(VIRTIO_NET_F_MAC)) == 0)
return 0;
return nla_put(msg, VDPA_ATTR_DEV_NET_CFG_MACADDR,
sizeof(config->mac), config->mac);
}
static int vdpa_dev_net_status_config_fill(struct sk_buff *msg, u64 features,
const struct virtio_net_config *config)
{
u16 val_u16;
if ((features & BIT_ULL(VIRTIO_NET_F_STATUS)) == 0)
return 0;
val_u16 = __virtio16_to_cpu(true, config->status);
return nla_put_u16(msg, VDPA_ATTR_DEV_NET_STATUS, val_u16);
}
static int vdpa_dev_net_config_fill(struct vdpa_device *vdev, struct sk_buff *msg)
{
struct virtio_net_config config = {};
u64 features_device;
vdev->config->get_config(vdev, 0, &config, sizeof(config));
features_device = vdev->config->get_device_features(vdev);
if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_FEATURES, features_device,
VDPA_ATTR_PAD))
return -EMSGSIZE;
if (vdpa_dev_net_mtu_config_fill(msg, features_device, &config))
return -EMSGSIZE;
if (vdpa_dev_net_mac_config_fill(msg, features_device, &config))
return -EMSGSIZE;
if (vdpa_dev_net_status_config_fill(msg, features_device, &config))
return -EMSGSIZE;
return vdpa_dev_net_mq_config_fill(msg, features_device, &config);
}
static int
vdpa_dev_config_fill(struct vdpa_device *vdev, struct sk_buff *msg, u32 portid, u32 seq,
int flags, struct netlink_ext_ack *extack)
{
u64 features_driver;
u8 status = 0;
u32 device_id;
void *hdr;
int err;
down_read(&vdev->cf_lock);
hdr = genlmsg_put(msg, portid, seq, &vdpa_nl_family, flags,
VDPA_CMD_DEV_CONFIG_GET);
if (!hdr) {
err = -EMSGSIZE;
goto out;
}
if (nla_put_string(msg, VDPA_ATTR_DEV_NAME, dev_name(&vdev->dev))) {
err = -EMSGSIZE;
goto msg_err;
}
device_id = vdev->config->get_device_id(vdev);
if (nla_put_u32(msg, VDPA_ATTR_DEV_ID, device_id)) {
err = -EMSGSIZE;
goto msg_err;
}
/* only read driver features after the feature negotiation is done */
status = vdev->config->get_status(vdev);
if (status & VIRTIO_CONFIG_S_FEATURES_OK) {
features_driver = vdev->config->get_driver_features(vdev);
if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_NEGOTIATED_FEATURES, features_driver,
VDPA_ATTR_PAD)) {
err = -EMSGSIZE;
goto msg_err;
}
}
switch (device_id) {
case VIRTIO_ID_NET:
err = vdpa_dev_net_config_fill(vdev, msg);
break;
default:
err = -EOPNOTSUPP;
break;
}
if (err)
goto msg_err;
up_read(&vdev->cf_lock);
genlmsg_end(msg, hdr);
return 0;
msg_err:
genlmsg_cancel(msg, hdr);
out:
up_read(&vdev->cf_lock);
return err;
}
static int vdpa_fill_stats_rec(struct vdpa_device *vdev, struct sk_buff *msg,
struct genl_info *info, u32 index)
{
struct virtio_net_config config = {};
u64 features;
u8 status;
int err;
status = vdev->config->get_status(vdev);
if (!(status & VIRTIO_CONFIG_S_FEATURES_OK)) {
NL_SET_ERR_MSG_MOD(info->extack, "feature negotiation not complete");
return -EAGAIN;
}
vdpa_get_config_unlocked(vdev, 0, &config, sizeof(config));
features = vdev->config->get_driver_features(vdev);
if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_NEGOTIATED_FEATURES,
features, VDPA_ATTR_PAD))
return -EMSGSIZE;
err = vdpa_dev_net_mq_config_fill(msg, features, &config);
if (err)
return err;
if (nla_put_u32(msg, VDPA_ATTR_DEV_QUEUE_INDEX, index))
return -EMSGSIZE;
err = vdev->config->get_vendor_vq_stats(vdev, index, msg, info->extack);
if (err)
return err;
return 0;
}
static int vendor_stats_fill(struct vdpa_device *vdev, struct sk_buff *msg,
struct genl_info *info, u32 index)
{
int err;
down_read(&vdev->cf_lock);
if (!vdev->config->get_vendor_vq_stats) {
err = -EOPNOTSUPP;
goto out;
}
err = vdpa_fill_stats_rec(vdev, msg, info, index);
out:
up_read(&vdev->cf_lock);
return err;
}
static int vdpa_dev_vendor_stats_fill(struct vdpa_device *vdev,
struct sk_buff *msg,
struct genl_info *info, u32 index)
{
u32 device_id;
void *hdr;
int err;
u32 portid = info->snd_portid;
u32 seq = info->snd_seq;
u32 flags = 0;
hdr = genlmsg_put(msg, portid, seq, &vdpa_nl_family, flags,
VDPA_CMD_DEV_VSTATS_GET);
if (!hdr)
return -EMSGSIZE;
if (nla_put_string(msg, VDPA_ATTR_DEV_NAME, dev_name(&vdev->dev))) {
err = -EMSGSIZE;
goto undo_msg;
}
device_id = vdev->config->get_device_id(vdev);
if (nla_put_u32(msg, VDPA_ATTR_DEV_ID, device_id)) {
err = -EMSGSIZE;
goto undo_msg;
}
switch (device_id) {
case VIRTIO_ID_NET:
if (index > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX) {
NL_SET_ERR_MSG_MOD(info->extack, "queue index exceeds max value");
err = -ERANGE;
break;
}
err = vendor_stats_fill(vdev, msg, info, index);
break;
default:
err = -EOPNOTSUPP;
break;
}
genlmsg_end(msg, hdr);
return err;
undo_msg:
genlmsg_cancel(msg, hdr);
return err;
}
static int vdpa_nl_cmd_dev_config_get_doit(struct sk_buff *skb, struct genl_info *info)
{
struct vdpa_device *vdev;
struct sk_buff *msg;
const char *devname;
struct device *dev;
int err;
if (!info->attrs[VDPA_ATTR_DEV_NAME])
return -EINVAL;
devname = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]);
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg)
return -ENOMEM;
down_read(&vdpa_dev_lock);
dev = bus_find_device(&vdpa_bus, NULL, devname, vdpa_name_match);
if (!dev) {
NL_SET_ERR_MSG_MOD(info->extack, "device not found");
err = -ENODEV;
goto dev_err;
}
vdev = container_of(dev, struct vdpa_device, dev);
if (!vdev->mdev) {
NL_SET_ERR_MSG_MOD(info->extack, "unmanaged vdpa device");
err = -EINVAL;
goto mdev_err;
}
err = vdpa_dev_config_fill(vdev, msg, info->snd_portid, info->snd_seq,
0, info->extack);
if (!err)
err = genlmsg_reply(msg, info);
mdev_err:
put_device(dev);
dev_err:
up_read(&vdpa_dev_lock);
if (err)
nlmsg_free(msg);
return err;
}
static int vdpa_dev_config_dump(struct device *dev, void *data)
{
struct vdpa_device *vdev = container_of(dev, struct vdpa_device, dev);
struct vdpa_dev_dump_info *info = data;
int err;
if (!vdev->mdev)
return 0;
if (info->idx < info->start_idx) {
info->idx++;
return 0;
}
err = vdpa_dev_config_fill(vdev, info->msg, NETLINK_CB(info->cb->skb).portid,
info->cb->nlh->nlmsg_seq, NLM_F_MULTI,
info->cb->extack);
if (err)
return err;
info->idx++;
return 0;
}
static int
vdpa_nl_cmd_dev_config_get_dumpit(struct sk_buff *msg, struct netlink_callback *cb)
{
struct vdpa_dev_dump_info info;
info.msg = msg;
info.cb = cb;
info.start_idx = cb->args[0];
info.idx = 0;
down_read(&vdpa_dev_lock);
bus_for_each_dev(&vdpa_bus, NULL, &info, vdpa_dev_config_dump);
up_read(&vdpa_dev_lock);
cb->args[0] = info.idx;
return msg->len;
}
static int vdpa_nl_cmd_dev_stats_get_doit(struct sk_buff *skb,
struct genl_info *info)
{
struct vdpa_device *vdev;
struct sk_buff *msg;
const char *devname;
struct device *dev;
u32 index;
int err;
if (!info->attrs[VDPA_ATTR_DEV_NAME])
return -EINVAL;
if (!info->attrs[VDPA_ATTR_DEV_QUEUE_INDEX])
return -EINVAL;
devname = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]);
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg)
return -ENOMEM;
index = nla_get_u32(info->attrs[VDPA_ATTR_DEV_QUEUE_INDEX]);
down_read(&vdpa_dev_lock);
dev = bus_find_device(&vdpa_bus, NULL, devname, vdpa_name_match);
if (!dev) {
NL_SET_ERR_MSG_MOD(info->extack, "device not found");
err = -ENODEV;
goto dev_err;
}
vdev = container_of(dev, struct vdpa_device, dev);
if (!vdev->mdev) {
NL_SET_ERR_MSG_MOD(info->extack, "unmanaged vdpa device");
err = -EINVAL;
goto mdev_err;
}
err = vdpa_dev_vendor_stats_fill(vdev, msg, info, index);
if (err)
goto mdev_err;
err = genlmsg_reply(msg, info);
put_device(dev);
up_read(&vdpa_dev_lock);
return err;
mdev_err:
put_device(dev);
dev_err:
nlmsg_free(msg);
up_read(&vdpa_dev_lock);
return err;
}
static const struct nla_policy vdpa_nl_policy[VDPA_ATTR_MAX + 1] = {
[VDPA_ATTR_MGMTDEV_BUS_NAME] = { .type = NLA_NUL_STRING },
[VDPA_ATTR_MGMTDEV_DEV_NAME] = { .type = NLA_STRING },
[VDPA_ATTR_DEV_NAME] = { .type = NLA_STRING },
[VDPA_ATTR_DEV_NET_CFG_MACADDR] = NLA_POLICY_ETH_ADDR,
[VDPA_ATTR_DEV_NET_CFG_MAX_VQP] = { .type = NLA_U16 },
/* virtio spec 1.1 section 5.1.4.1 for valid MTU range */
[VDPA_ATTR_DEV_NET_CFG_MTU] = NLA_POLICY_MIN(NLA_U16, 68),
[VDPA_ATTR_DEV_QUEUE_INDEX] = { .type = NLA_U32 },
[VDPA_ATTR_DEV_FEATURES] = { .type = NLA_U64 },
};
static const struct genl_ops vdpa_nl_ops[] = {
{
.cmd = VDPA_CMD_MGMTDEV_GET,
.doit = vdpa_nl_cmd_mgmtdev_get_doit,
.dumpit = vdpa_nl_cmd_mgmtdev_get_dumpit,
},
{
.cmd = VDPA_CMD_DEV_NEW,
.doit = vdpa_nl_cmd_dev_add_set_doit,
.flags = GENL_ADMIN_PERM,
},
{
.cmd = VDPA_CMD_DEV_DEL,
.doit = vdpa_nl_cmd_dev_del_set_doit,
.flags = GENL_ADMIN_PERM,
},
{
.cmd = VDPA_CMD_DEV_GET,
.doit = vdpa_nl_cmd_dev_get_doit,
.dumpit = vdpa_nl_cmd_dev_get_dumpit,
},
{
.cmd = VDPA_CMD_DEV_CONFIG_GET,
.doit = vdpa_nl_cmd_dev_config_get_doit,
.dumpit = vdpa_nl_cmd_dev_config_get_dumpit,
},
{
.cmd = VDPA_CMD_DEV_VSTATS_GET,
.doit = vdpa_nl_cmd_dev_stats_get_doit,
.flags = GENL_ADMIN_PERM,
},
};
static struct genl_family vdpa_nl_family __ro_after_init = {
.name = VDPA_GENL_NAME,
.version = VDPA_GENL_VERSION,
.maxattr = VDPA_ATTR_MAX,
.policy = vdpa_nl_policy,
.netnsok = false,
.module = THIS_MODULE,
.ops = vdpa_nl_ops,
.n_ops = ARRAY_SIZE(vdpa_nl_ops),
.resv_start_op = VDPA_CMD_DEV_VSTATS_GET + 1,
};
static int vdpa_init(void)
{
int err;
err = bus_register(&vdpa_bus);
if (err)
return err;
err = genl_register_family(&vdpa_nl_family);
if (err)
goto err;
return 0;
err:
bus_unregister(&vdpa_bus);
return err;
}
static void __exit vdpa_exit(void)
{
genl_unregister_family(&vdpa_nl_family);
bus_unregister(&vdpa_bus);
ida_destroy(&vdpa_index_ida);
}
core_initcall(vdpa_init);
module_exit(vdpa_exit);
MODULE_AUTHOR("Jason Wang <[email protected]>");
MODULE_LICENSE("GPL v2");
|
linux-master
|
drivers/vdpa/vdpa.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Intel IFC VF NIC driver for virtio dataplane offloading
*
* Copyright (C) 2020 Intel Corporation.
*
* Author: Zhu Lingshan <[email protected]>
*
*/
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/sysfs.h>
#include "ifcvf_base.h"
#define DRIVER_AUTHOR "Intel Corporation"
#define IFCVF_DRIVER_NAME "ifcvf"
static irqreturn_t ifcvf_config_changed(int irq, void *arg)
{
struct ifcvf_hw *vf = arg;
if (vf->config_cb.callback)
return vf->config_cb.callback(vf->config_cb.private);
return IRQ_HANDLED;
}
static irqreturn_t ifcvf_vq_intr_handler(int irq, void *arg)
{
struct vring_info *vring = arg;
if (vring->cb.callback)
return vring->cb.callback(vring->cb.private);
return IRQ_HANDLED;
}
static irqreturn_t ifcvf_vqs_reused_intr_handler(int irq, void *arg)
{
struct ifcvf_hw *vf = arg;
struct vring_info *vring;
int i;
for (i = 0; i < vf->nr_vring; i++) {
vring = &vf->vring[i];
if (vring->cb.callback)
vring->cb.callback(vring->cb.private);
}
return IRQ_HANDLED;
}
static irqreturn_t ifcvf_dev_intr_handler(int irq, void *arg)
{
struct ifcvf_hw *vf = arg;
u8 isr;
isr = vp_ioread8(vf->isr);
if (isr & VIRTIO_PCI_ISR_CONFIG)
ifcvf_config_changed(irq, arg);
return ifcvf_vqs_reused_intr_handler(irq, arg);
}
static void ifcvf_free_irq_vectors(void *data)
{
pci_free_irq_vectors(data);
}
static void ifcvf_free_per_vq_irq(struct ifcvf_hw *vf)
{
struct pci_dev *pdev = vf->pdev;
int i;
for (i = 0; i < vf->nr_vring; i++) {
if (vf->vring[i].irq != -EINVAL) {
devm_free_irq(&pdev->dev, vf->vring[i].irq, &vf->vring[i]);
vf->vring[i].irq = -EINVAL;
}
}
}
static void ifcvf_free_vqs_reused_irq(struct ifcvf_hw *vf)
{
struct pci_dev *pdev = vf->pdev;
if (vf->vqs_reused_irq != -EINVAL) {
devm_free_irq(&pdev->dev, vf->vqs_reused_irq, vf);
vf->vqs_reused_irq = -EINVAL;
}
}
static void ifcvf_free_vq_irq(struct ifcvf_hw *vf)
{
if (vf->msix_vector_status == MSIX_VECTOR_PER_VQ_AND_CONFIG)
ifcvf_free_per_vq_irq(vf);
else
ifcvf_free_vqs_reused_irq(vf);
}
static void ifcvf_free_config_irq(struct ifcvf_hw *vf)
{
struct pci_dev *pdev = vf->pdev;
if (vf->config_irq == -EINVAL)
return;
/* If the irq is shared by all vqs and the config interrupt,
* it is already freed in ifcvf_free_vq_irq, so here only
* need to free config irq when msix_vector_status != MSIX_VECTOR_DEV_SHARED
*/
if (vf->msix_vector_status != MSIX_VECTOR_DEV_SHARED) {
devm_free_irq(&pdev->dev, vf->config_irq, vf);
vf->config_irq = -EINVAL;
}
}
static void ifcvf_free_irq(struct ifcvf_hw *vf)
{
struct pci_dev *pdev = vf->pdev;
ifcvf_free_vq_irq(vf);
ifcvf_free_config_irq(vf);
ifcvf_free_irq_vectors(pdev);
vf->num_msix_vectors = 0;
}
/* ifcvf MSIX vectors allocator, this helper tries to allocate
* vectors for all virtqueues and the config interrupt.
* It returns the number of allocated vectors, negative
* return value when fails.
*/
static int ifcvf_alloc_vectors(struct ifcvf_hw *vf)
{
struct pci_dev *pdev = vf->pdev;
int max_intr, ret;
/* all queues and config interrupt */
max_intr = vf->nr_vring + 1;
ret = pci_alloc_irq_vectors(pdev, 1, max_intr, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
if (ret < 0) {
IFCVF_ERR(pdev, "Failed to alloc IRQ vectors\n");
return ret;
}
if (ret < max_intr)
IFCVF_INFO(pdev,
"Requested %u vectors, however only %u allocated, lower performance\n",
max_intr, ret);
return ret;
}
static int ifcvf_request_per_vq_irq(struct ifcvf_hw *vf)
{
struct pci_dev *pdev = vf->pdev;
int i, vector, ret, irq;
vf->vqs_reused_irq = -EINVAL;
for (i = 0; i < vf->nr_vring; i++) {
snprintf(vf->vring[i].msix_name, 256, "ifcvf[%s]-%d\n", pci_name(pdev), i);
vector = i;
irq = pci_irq_vector(pdev, vector);
ret = devm_request_irq(&pdev->dev, irq,
ifcvf_vq_intr_handler, 0,
vf->vring[i].msix_name,
&vf->vring[i]);
if (ret) {
IFCVF_ERR(pdev, "Failed to request irq for vq %d\n", i);
goto err;
}
vf->vring[i].irq = irq;
ret = ifcvf_set_vq_vector(vf, i, vector);
if (ret == VIRTIO_MSI_NO_VECTOR) {
IFCVF_ERR(pdev, "No msix vector for vq %u\n", i);
goto err;
}
}
return 0;
err:
ifcvf_free_irq(vf);
return -EFAULT;
}
static int ifcvf_request_vqs_reused_irq(struct ifcvf_hw *vf)
{
struct pci_dev *pdev = vf->pdev;
int i, vector, ret, irq;
vector = 0;
snprintf(vf->vring[0].msix_name, 256, "ifcvf[%s]-vqs-reused-irq\n", pci_name(pdev));
irq = pci_irq_vector(pdev, vector);
ret = devm_request_irq(&pdev->dev, irq,
ifcvf_vqs_reused_intr_handler, 0,
vf->vring[0].msix_name, vf);
if (ret) {
IFCVF_ERR(pdev, "Failed to request reused irq for the device\n");
goto err;
}
vf->vqs_reused_irq = irq;
for (i = 0; i < vf->nr_vring; i++) {
vf->vring[i].irq = -EINVAL;
ret = ifcvf_set_vq_vector(vf, i, vector);
if (ret == VIRTIO_MSI_NO_VECTOR) {
IFCVF_ERR(pdev, "No msix vector for vq %u\n", i);
goto err;
}
}
return 0;
err:
ifcvf_free_irq(vf);
return -EFAULT;
}
static int ifcvf_request_dev_irq(struct ifcvf_hw *vf)
{
struct pci_dev *pdev = vf->pdev;
int i, vector, ret, irq;
vector = 0;
snprintf(vf->vring[0].msix_name, 256, "ifcvf[%s]-dev-irq\n", pci_name(pdev));
irq = pci_irq_vector(pdev, vector);
ret = devm_request_irq(&pdev->dev, irq,
ifcvf_dev_intr_handler, 0,
vf->vring[0].msix_name, vf);
if (ret) {
IFCVF_ERR(pdev, "Failed to request irq for the device\n");
goto err;
}
vf->vqs_reused_irq = irq;
for (i = 0; i < vf->nr_vring; i++) {
vf->vring[i].irq = -EINVAL;
ret = ifcvf_set_vq_vector(vf, i, vector);
if (ret == VIRTIO_MSI_NO_VECTOR) {
IFCVF_ERR(pdev, "No msix vector for vq %u\n", i);
goto err;
}
}
vf->config_irq = irq;
ret = ifcvf_set_config_vector(vf, vector);
if (ret == VIRTIO_MSI_NO_VECTOR) {
IFCVF_ERR(pdev, "No msix vector for device config\n");
goto err;
}
return 0;
err:
ifcvf_free_irq(vf);
return -EFAULT;
}
static int ifcvf_request_vq_irq(struct ifcvf_hw *vf)
{
int ret;
if (vf->msix_vector_status == MSIX_VECTOR_PER_VQ_AND_CONFIG)
ret = ifcvf_request_per_vq_irq(vf);
else
ret = ifcvf_request_vqs_reused_irq(vf);
return ret;
}
static int ifcvf_request_config_irq(struct ifcvf_hw *vf)
{
struct pci_dev *pdev = vf->pdev;
int config_vector, ret;
if (vf->msix_vector_status == MSIX_VECTOR_PER_VQ_AND_CONFIG)
config_vector = vf->nr_vring;
else if (vf->msix_vector_status == MSIX_VECTOR_SHARED_VQ_AND_CONFIG)
/* vector 0 for vqs and 1 for config interrupt */
config_vector = 1;
else if (vf->msix_vector_status == MSIX_VECTOR_DEV_SHARED)
/* re-use the vqs vector */
return 0;
else
return -EINVAL;
snprintf(vf->config_msix_name, 256, "ifcvf[%s]-config\n",
pci_name(pdev));
vf->config_irq = pci_irq_vector(pdev, config_vector);
ret = devm_request_irq(&pdev->dev, vf->config_irq,
ifcvf_config_changed, 0,
vf->config_msix_name, vf);
if (ret) {
IFCVF_ERR(pdev, "Failed to request config irq\n");
goto err;
}
ret = ifcvf_set_config_vector(vf, config_vector);
if (ret == VIRTIO_MSI_NO_VECTOR) {
IFCVF_ERR(pdev, "No msix vector for device config\n");
goto err;
}
return 0;
err:
ifcvf_free_irq(vf);
return -EFAULT;
}
static int ifcvf_request_irq(struct ifcvf_hw *vf)
{
int nvectors, ret, max_intr;
nvectors = ifcvf_alloc_vectors(vf);
if (nvectors <= 0)
return -EFAULT;
vf->msix_vector_status = MSIX_VECTOR_PER_VQ_AND_CONFIG;
max_intr = vf->nr_vring + 1;
if (nvectors < max_intr)
vf->msix_vector_status = MSIX_VECTOR_SHARED_VQ_AND_CONFIG;
if (nvectors == 1) {
vf->msix_vector_status = MSIX_VECTOR_DEV_SHARED;
ret = ifcvf_request_dev_irq(vf);
return ret;
}
ret = ifcvf_request_vq_irq(vf);
if (ret)
return ret;
ret = ifcvf_request_config_irq(vf);
if (ret)
return ret;
vf->num_msix_vectors = nvectors;
return 0;
}
static struct ifcvf_adapter *vdpa_to_adapter(struct vdpa_device *vdpa_dev)
{
return container_of(vdpa_dev, struct ifcvf_adapter, vdpa);
}
static struct ifcvf_hw *vdpa_to_vf(struct vdpa_device *vdpa_dev)
{
struct ifcvf_adapter *adapter = vdpa_to_adapter(vdpa_dev);
return adapter->vf;
}
static u64 ifcvf_vdpa_get_device_features(struct vdpa_device *vdpa_dev)
{
struct ifcvf_adapter *adapter = vdpa_to_adapter(vdpa_dev);
struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
struct pci_dev *pdev = adapter->pdev;
u32 type = vf->dev_type;
u64 features;
if (type == VIRTIO_ID_NET || type == VIRTIO_ID_BLOCK)
features = ifcvf_get_dev_features(vf);
else {
features = 0;
IFCVF_ERR(pdev, "VIRTIO ID %u not supported\n", vf->dev_type);
}
return features;
}
static int ifcvf_vdpa_set_driver_features(struct vdpa_device *vdpa_dev, u64 features)
{
struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
int ret;
ret = ifcvf_verify_min_features(vf, features);
if (ret)
return ret;
ifcvf_set_driver_features(vf, features);
return 0;
}
static u64 ifcvf_vdpa_get_driver_features(struct vdpa_device *vdpa_dev)
{
struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
u64 features;
features = ifcvf_get_driver_features(vf);
return features;
}
static u8 ifcvf_vdpa_get_status(struct vdpa_device *vdpa_dev)
{
struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
return ifcvf_get_status(vf);
}
static void ifcvf_vdpa_set_status(struct vdpa_device *vdpa_dev, u8 status)
{
struct ifcvf_hw *vf;
u8 status_old;
int ret;
vf = vdpa_to_vf(vdpa_dev);
status_old = ifcvf_get_status(vf);
if (status_old == status)
return;
if ((status & VIRTIO_CONFIG_S_DRIVER_OK) &&
!(status_old & VIRTIO_CONFIG_S_DRIVER_OK)) {
ret = ifcvf_request_irq(vf);
if (ret) {
IFCVF_ERR(vf->pdev, "failed to request irq with error %d\n", ret);
return;
}
}
ifcvf_set_status(vf, status);
}
static int ifcvf_vdpa_reset(struct vdpa_device *vdpa_dev)
{
struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
u8 status = ifcvf_get_status(vf);
ifcvf_stop(vf);
if (status & VIRTIO_CONFIG_S_DRIVER_OK)
ifcvf_free_irq(vf);
ifcvf_reset(vf);
return 0;
}
static u16 ifcvf_vdpa_get_vq_num_max(struct vdpa_device *vdpa_dev)
{
struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
return ifcvf_get_max_vq_size(vf);
}
static int ifcvf_vdpa_get_vq_state(struct vdpa_device *vdpa_dev, u16 qid,
struct vdpa_vq_state *state)
{
struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
state->split.avail_index = ifcvf_get_vq_state(vf, qid);
return 0;
}
static int ifcvf_vdpa_set_vq_state(struct vdpa_device *vdpa_dev, u16 qid,
const struct vdpa_vq_state *state)
{
struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
return ifcvf_set_vq_state(vf, qid, state->split.avail_index);
}
static void ifcvf_vdpa_set_vq_cb(struct vdpa_device *vdpa_dev, u16 qid,
struct vdpa_callback *cb)
{
struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
vf->vring[qid].cb = *cb;
}
static void ifcvf_vdpa_set_vq_ready(struct vdpa_device *vdpa_dev,
u16 qid, bool ready)
{
struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
ifcvf_set_vq_ready(vf, qid, ready);
}
static bool ifcvf_vdpa_get_vq_ready(struct vdpa_device *vdpa_dev, u16 qid)
{
struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
return ifcvf_get_vq_ready(vf, qid);
}
static void ifcvf_vdpa_set_vq_num(struct vdpa_device *vdpa_dev, u16 qid,
u32 num)
{
struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
ifcvf_set_vq_num(vf, qid, num);
}
static int ifcvf_vdpa_set_vq_address(struct vdpa_device *vdpa_dev, u16 qid,
u64 desc_area, u64 driver_area,
u64 device_area)
{
struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
return ifcvf_set_vq_address(vf, qid, desc_area, driver_area, device_area);
}
static void ifcvf_vdpa_kick_vq(struct vdpa_device *vdpa_dev, u16 qid)
{
struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
ifcvf_notify_queue(vf, qid);
}
static u32 ifcvf_vdpa_get_generation(struct vdpa_device *vdpa_dev)
{
struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
return vp_ioread8(&vf->common_cfg->config_generation);
}
static u32 ifcvf_vdpa_get_device_id(struct vdpa_device *vdpa_dev)
{
struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
return vf->dev_type;
}
static u32 ifcvf_vdpa_get_vendor_id(struct vdpa_device *vdpa_dev)
{
struct ifcvf_adapter *adapter = vdpa_to_adapter(vdpa_dev);
struct pci_dev *pdev = adapter->pdev;
return pdev->subsystem_vendor;
}
static u32 ifcvf_vdpa_get_vq_align(struct vdpa_device *vdpa_dev)
{
return IFCVF_QUEUE_ALIGNMENT;
}
static size_t ifcvf_vdpa_get_config_size(struct vdpa_device *vdpa_dev)
{
struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
return vf->config_size;
}
static u32 ifcvf_vdpa_get_vq_group(struct vdpa_device *vdpa, u16 idx)
{
return 0;
}
static void ifcvf_vdpa_get_config(struct vdpa_device *vdpa_dev,
unsigned int offset,
void *buf, unsigned int len)
{
struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
ifcvf_read_dev_config(vf, offset, buf, len);
}
static void ifcvf_vdpa_set_config(struct vdpa_device *vdpa_dev,
unsigned int offset, const void *buf,
unsigned int len)
{
struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
ifcvf_write_dev_config(vf, offset, buf, len);
}
static void ifcvf_vdpa_set_config_cb(struct vdpa_device *vdpa_dev,
struct vdpa_callback *cb)
{
struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
vf->config_cb.callback = cb->callback;
vf->config_cb.private = cb->private;
}
static int ifcvf_vdpa_get_vq_irq(struct vdpa_device *vdpa_dev,
u16 qid)
{
struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
if (vf->vqs_reused_irq < 0)
return vf->vring[qid].irq;
else
return -EINVAL;
}
static struct vdpa_notification_area ifcvf_get_vq_notification(struct vdpa_device *vdpa_dev,
u16 idx)
{
struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
struct vdpa_notification_area area;
area.addr = vf->vring[idx].notify_pa;
if (!vf->notify_off_multiplier)
area.size = PAGE_SIZE;
else
area.size = vf->notify_off_multiplier;
return area;
}
/*
* IFCVF currently doesn't have on-chip IOMMU, so not
* implemented set_map()/dma_map()/dma_unmap()
*/
static const struct vdpa_config_ops ifc_vdpa_ops = {
.get_device_features = ifcvf_vdpa_get_device_features,
.set_driver_features = ifcvf_vdpa_set_driver_features,
.get_driver_features = ifcvf_vdpa_get_driver_features,
.get_status = ifcvf_vdpa_get_status,
.set_status = ifcvf_vdpa_set_status,
.reset = ifcvf_vdpa_reset,
.get_vq_num_max = ifcvf_vdpa_get_vq_num_max,
.get_vq_state = ifcvf_vdpa_get_vq_state,
.set_vq_state = ifcvf_vdpa_set_vq_state,
.set_vq_cb = ifcvf_vdpa_set_vq_cb,
.set_vq_ready = ifcvf_vdpa_set_vq_ready,
.get_vq_ready = ifcvf_vdpa_get_vq_ready,
.set_vq_num = ifcvf_vdpa_set_vq_num,
.set_vq_address = ifcvf_vdpa_set_vq_address,
.get_vq_irq = ifcvf_vdpa_get_vq_irq,
.kick_vq = ifcvf_vdpa_kick_vq,
.get_generation = ifcvf_vdpa_get_generation,
.get_device_id = ifcvf_vdpa_get_device_id,
.get_vendor_id = ifcvf_vdpa_get_vendor_id,
.get_vq_align = ifcvf_vdpa_get_vq_align,
.get_vq_group = ifcvf_vdpa_get_vq_group,
.get_config_size = ifcvf_vdpa_get_config_size,
.get_config = ifcvf_vdpa_get_config,
.set_config = ifcvf_vdpa_set_config,
.set_config_cb = ifcvf_vdpa_set_config_cb,
.get_vq_notification = ifcvf_get_vq_notification,
};
static struct virtio_device_id id_table_net[] = {
{VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID},
{0},
};
static struct virtio_device_id id_table_blk[] = {
{VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID},
{0},
};
static u32 get_dev_type(struct pci_dev *pdev)
{
u32 dev_type;
/* This drirver drives both modern virtio devices and transitional
* devices in modern mode.
* vDPA requires feature bit VIRTIO_F_ACCESS_PLATFORM,
* so legacy devices and transitional devices in legacy
* mode will not work for vDPA, this driver will not
* drive devices with legacy interface.
*/
if (pdev->device < 0x1040)
dev_type = pdev->subsystem_device;
else
dev_type = pdev->device - 0x1040;
return dev_type;
}
static int ifcvf_vdpa_dev_add(struct vdpa_mgmt_dev *mdev, const char *name,
const struct vdpa_dev_set_config *config)
{
struct ifcvf_vdpa_mgmt_dev *ifcvf_mgmt_dev;
struct ifcvf_adapter *adapter;
struct vdpa_device *vdpa_dev;
struct pci_dev *pdev;
struct ifcvf_hw *vf;
u64 device_features;
int ret;
ifcvf_mgmt_dev = container_of(mdev, struct ifcvf_vdpa_mgmt_dev, mdev);
vf = &ifcvf_mgmt_dev->vf;
pdev = vf->pdev;
adapter = vdpa_alloc_device(struct ifcvf_adapter, vdpa,
&pdev->dev, &ifc_vdpa_ops, 1, 1, NULL, false);
if (IS_ERR(adapter)) {
IFCVF_ERR(pdev, "Failed to allocate vDPA structure");
return PTR_ERR(adapter);
}
ifcvf_mgmt_dev->adapter = adapter;
adapter->pdev = pdev;
adapter->vdpa.dma_dev = &pdev->dev;
adapter->vdpa.mdev = mdev;
adapter->vf = vf;
vdpa_dev = &adapter->vdpa;
device_features = vf->hw_features;
if (config->mask & BIT_ULL(VDPA_ATTR_DEV_FEATURES)) {
if (config->device_features & ~device_features) {
IFCVF_ERR(pdev, "The provisioned features 0x%llx are not supported by this device with features 0x%llx\n",
config->device_features, device_features);
return -EINVAL;
}
device_features &= config->device_features;
}
vf->dev_features = device_features;
if (name)
ret = dev_set_name(&vdpa_dev->dev, "%s", name);
else
ret = dev_set_name(&vdpa_dev->dev, "vdpa%u", vdpa_dev->index);
ret = _vdpa_register_device(&adapter->vdpa, vf->nr_vring);
if (ret) {
put_device(&adapter->vdpa.dev);
IFCVF_ERR(pdev, "Failed to register to vDPA bus");
return ret;
}
return 0;
}
static void ifcvf_vdpa_dev_del(struct vdpa_mgmt_dev *mdev, struct vdpa_device *dev)
{
struct ifcvf_vdpa_mgmt_dev *ifcvf_mgmt_dev;
ifcvf_mgmt_dev = container_of(mdev, struct ifcvf_vdpa_mgmt_dev, mdev);
_vdpa_unregister_device(dev);
ifcvf_mgmt_dev->adapter = NULL;
}
static const struct vdpa_mgmtdev_ops ifcvf_vdpa_mgmt_dev_ops = {
.dev_add = ifcvf_vdpa_dev_add,
.dev_del = ifcvf_vdpa_dev_del
};
static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct ifcvf_vdpa_mgmt_dev *ifcvf_mgmt_dev;
struct device *dev = &pdev->dev;
struct ifcvf_hw *vf;
u32 dev_type;
int ret, i;
ret = pcim_enable_device(pdev);
if (ret) {
IFCVF_ERR(pdev, "Failed to enable device\n");
return ret;
}
ret = pcim_iomap_regions(pdev, BIT(0) | BIT(2) | BIT(4),
IFCVF_DRIVER_NAME);
if (ret) {
IFCVF_ERR(pdev, "Failed to request MMIO region\n");
return ret;
}
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
if (ret) {
IFCVF_ERR(pdev, "No usable DMA configuration\n");
return ret;
}
ret = devm_add_action_or_reset(dev, ifcvf_free_irq_vectors, pdev);
if (ret) {
IFCVF_ERR(pdev,
"Failed for adding devres for freeing irq vectors\n");
return ret;
}
pci_set_master(pdev);
ifcvf_mgmt_dev = kzalloc(sizeof(struct ifcvf_vdpa_mgmt_dev), GFP_KERNEL);
if (!ifcvf_mgmt_dev) {
IFCVF_ERR(pdev, "Failed to alloc memory for the vDPA management device\n");
return -ENOMEM;
}
vf = &ifcvf_mgmt_dev->vf;
vf->dev_type = get_dev_type(pdev);
vf->base = pcim_iomap_table(pdev);
vf->pdev = pdev;
ret = ifcvf_init_hw(vf, pdev);
if (ret) {
IFCVF_ERR(pdev, "Failed to init IFCVF hw\n");
goto err;
}
for (i = 0; i < vf->nr_vring; i++)
vf->vring[i].irq = -EINVAL;
vf->hw_features = ifcvf_get_hw_features(vf);
vf->config_size = ifcvf_get_config_size(vf);
dev_type = get_dev_type(pdev);
switch (dev_type) {
case VIRTIO_ID_NET:
ifcvf_mgmt_dev->mdev.id_table = id_table_net;
break;
case VIRTIO_ID_BLOCK:
ifcvf_mgmt_dev->mdev.id_table = id_table_blk;
break;
default:
IFCVF_ERR(pdev, "VIRTIO ID %u not supported\n", dev_type);
ret = -EOPNOTSUPP;
goto err;
}
ifcvf_mgmt_dev->mdev.ops = &ifcvf_vdpa_mgmt_dev_ops;
ifcvf_mgmt_dev->mdev.device = dev;
ifcvf_mgmt_dev->mdev.max_supported_vqs = vf->nr_vring;
ifcvf_mgmt_dev->mdev.supported_features = vf->hw_features;
ifcvf_mgmt_dev->mdev.config_attr_mask = (1 << VDPA_ATTR_DEV_FEATURES);
ret = vdpa_mgmtdev_register(&ifcvf_mgmt_dev->mdev);
if (ret) {
IFCVF_ERR(pdev,
"Failed to initialize the management interfaces\n");
goto err;
}
pci_set_drvdata(pdev, ifcvf_mgmt_dev);
return 0;
err:
kfree(ifcvf_mgmt_dev->vf.vring);
kfree(ifcvf_mgmt_dev);
return ret;
}
static void ifcvf_remove(struct pci_dev *pdev)
{
struct ifcvf_vdpa_mgmt_dev *ifcvf_mgmt_dev;
ifcvf_mgmt_dev = pci_get_drvdata(pdev);
vdpa_mgmtdev_unregister(&ifcvf_mgmt_dev->mdev);
kfree(ifcvf_mgmt_dev->vf.vring);
kfree(ifcvf_mgmt_dev);
}
static struct pci_device_id ifcvf_pci_ids[] = {
/* N3000 network device */
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_REDHAT_QUMRANET,
N3000_DEVICE_ID,
PCI_VENDOR_ID_INTEL,
N3000_SUBSYS_DEVICE_ID) },
/* C5000X-PL network device
* F2000X-PL network device
*/
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_REDHAT_QUMRANET,
VIRTIO_TRANS_ID_NET,
PCI_VENDOR_ID_INTEL,
VIRTIO_ID_NET) },
/* C5000X-PL block device */
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_REDHAT_QUMRANET,
VIRTIO_TRANS_ID_BLOCK,
PCI_VENDOR_ID_INTEL,
VIRTIO_ID_BLOCK) },
{ 0 },
};
MODULE_DEVICE_TABLE(pci, ifcvf_pci_ids);
static struct pci_driver ifcvf_driver = {
.name = IFCVF_DRIVER_NAME,
.id_table = ifcvf_pci_ids,
.probe = ifcvf_probe,
.remove = ifcvf_remove,
};
module_pci_driver(ifcvf_driver);
MODULE_LICENSE("GPL v2");
|
linux-master
|
drivers/vdpa/ifcvf/ifcvf_main.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Intel IFC VF NIC driver for virtio dataplane offloading
*
* Copyright (C) 2020 Intel Corporation.
*
* Author: Zhu Lingshan <[email protected]>
*
*/
#include "ifcvf_base.h"
u16 ifcvf_set_vq_vector(struct ifcvf_hw *hw, u16 qid, int vector)
{
struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
vp_iowrite16(qid, &cfg->queue_select);
vp_iowrite16(vector, &cfg->queue_msix_vector);
return vp_ioread16(&cfg->queue_msix_vector);
}
u16 ifcvf_set_config_vector(struct ifcvf_hw *hw, int vector)
{
struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
vp_iowrite16(vector, &cfg->msix_config);
return vp_ioread16(&cfg->msix_config);
}
static void __iomem *get_cap_addr(struct ifcvf_hw *hw,
struct virtio_pci_cap *cap)
{
u32 length, offset;
u8 bar;
length = le32_to_cpu(cap->length);
offset = le32_to_cpu(cap->offset);
bar = cap->bar;
if (bar >= IFCVF_PCI_MAX_RESOURCE) {
IFCVF_DBG(hw->pdev,
"Invalid bar number %u to get capabilities\n", bar);
return NULL;
}
if (offset + length > pci_resource_len(hw->pdev, bar)) {
IFCVF_DBG(hw->pdev,
"offset(%u) + len(%u) overflows bar%u's capability\n",
offset, length, bar);
return NULL;
}
return hw->base[bar] + offset;
}
static int ifcvf_read_config_range(struct pci_dev *dev,
uint32_t *val, int size, int where)
{
int ret, i;
for (i = 0; i < size; i += 4) {
ret = pci_read_config_dword(dev, where + i, val + i / 4);
if (ret < 0)
return ret;
}
return 0;
}
static u16 ifcvf_get_vq_size(struct ifcvf_hw *hw, u16 qid)
{
u16 queue_size;
vp_iowrite16(qid, &hw->common_cfg->queue_select);
queue_size = vp_ioread16(&hw->common_cfg->queue_size);
return queue_size;
}
/* This function returns the max allowed safe size for
* all virtqueues. It is the minimal size that can be
* suppprted by all virtqueues.
*/
u16 ifcvf_get_max_vq_size(struct ifcvf_hw *hw)
{
u16 queue_size, max_size, qid;
max_size = ifcvf_get_vq_size(hw, 0);
for (qid = 1; qid < hw->nr_vring; qid++) {
queue_size = ifcvf_get_vq_size(hw, qid);
/* 0 means the queue is unavailable */
if (!queue_size)
continue;
max_size = min(queue_size, max_size);
}
return max_size;
}
int ifcvf_init_hw(struct ifcvf_hw *hw, struct pci_dev *pdev)
{
struct virtio_pci_cap cap;
u16 notify_off;
int ret;
u8 pos;
u32 i;
ret = pci_read_config_byte(pdev, PCI_CAPABILITY_LIST, &pos);
if (ret < 0) {
IFCVF_ERR(pdev, "Failed to read PCI capability list\n");
return -EIO;
}
hw->pdev = pdev;
while (pos) {
ret = ifcvf_read_config_range(pdev, (u32 *)&cap,
sizeof(cap), pos);
if (ret < 0) {
IFCVF_ERR(pdev,
"Failed to get PCI capability at %x\n", pos);
break;
}
if (cap.cap_vndr != PCI_CAP_ID_VNDR)
goto next;
switch (cap.cfg_type) {
case VIRTIO_PCI_CAP_COMMON_CFG:
hw->common_cfg = get_cap_addr(hw, &cap);
IFCVF_DBG(pdev, "hw->common_cfg = %p\n",
hw->common_cfg);
break;
case VIRTIO_PCI_CAP_NOTIFY_CFG:
pci_read_config_dword(pdev, pos + sizeof(cap),
&hw->notify_off_multiplier);
hw->notify_bar = cap.bar;
hw->notify_base = get_cap_addr(hw, &cap);
hw->notify_base_pa = pci_resource_start(pdev, cap.bar) +
le32_to_cpu(cap.offset);
IFCVF_DBG(pdev, "hw->notify_base = %p\n",
hw->notify_base);
break;
case VIRTIO_PCI_CAP_ISR_CFG:
hw->isr = get_cap_addr(hw, &cap);
IFCVF_DBG(pdev, "hw->isr = %p\n", hw->isr);
break;
case VIRTIO_PCI_CAP_DEVICE_CFG:
hw->dev_cfg = get_cap_addr(hw, &cap);
hw->cap_dev_config_size = le32_to_cpu(cap.length);
IFCVF_DBG(pdev, "hw->dev_cfg = %p\n", hw->dev_cfg);
break;
}
next:
pos = cap.cap_next;
}
if (hw->common_cfg == NULL || hw->notify_base == NULL ||
hw->isr == NULL || hw->dev_cfg == NULL) {
IFCVF_ERR(pdev, "Incomplete PCI capabilities\n");
return -EIO;
}
hw->nr_vring = vp_ioread16(&hw->common_cfg->num_queues);
hw->vring = kzalloc(sizeof(struct vring_info) * hw->nr_vring, GFP_KERNEL);
if (!hw->vring)
return -ENOMEM;
for (i = 0; i < hw->nr_vring; i++) {
vp_iowrite16(i, &hw->common_cfg->queue_select);
notify_off = vp_ioread16(&hw->common_cfg->queue_notify_off);
hw->vring[i].notify_addr = hw->notify_base +
notify_off * hw->notify_off_multiplier;
hw->vring[i].notify_pa = hw->notify_base_pa +
notify_off * hw->notify_off_multiplier;
hw->vring[i].irq = -EINVAL;
}
hw->lm_cfg = hw->base[IFCVF_LM_BAR];
IFCVF_DBG(pdev,
"PCI capability mapping: common cfg: %p, notify base: %p\n, isr cfg: %p, device cfg: %p, multiplier: %u\n",
hw->common_cfg, hw->notify_base, hw->isr,
hw->dev_cfg, hw->notify_off_multiplier);
hw->vqs_reused_irq = -EINVAL;
hw->config_irq = -EINVAL;
return 0;
}
u8 ifcvf_get_status(struct ifcvf_hw *hw)
{
return vp_ioread8(&hw->common_cfg->device_status);
}
void ifcvf_set_status(struct ifcvf_hw *hw, u8 status)
{
vp_iowrite8(status, &hw->common_cfg->device_status);
}
void ifcvf_reset(struct ifcvf_hw *hw)
{
ifcvf_set_status(hw, 0);
while (ifcvf_get_status(hw))
msleep(1);
}
u64 ifcvf_get_hw_features(struct ifcvf_hw *hw)
{
struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
u32 features_lo, features_hi;
u64 features;
vp_iowrite32(0, &cfg->device_feature_select);
features_lo = vp_ioread32(&cfg->device_feature);
vp_iowrite32(1, &cfg->device_feature_select);
features_hi = vp_ioread32(&cfg->device_feature);
features = ((u64)features_hi << 32) | features_lo;
return features;
}
/* return provisioned vDPA dev features */
u64 ifcvf_get_dev_features(struct ifcvf_hw *hw)
{
return hw->dev_features;
}
u64 ifcvf_get_driver_features(struct ifcvf_hw *hw)
{
struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
u32 features_lo, features_hi;
u64 features;
vp_iowrite32(0, &cfg->device_feature_select);
features_lo = vp_ioread32(&cfg->guest_feature);
vp_iowrite32(1, &cfg->device_feature_select);
features_hi = vp_ioread32(&cfg->guest_feature);
features = ((u64)features_hi << 32) | features_lo;
return features;
}
int ifcvf_verify_min_features(struct ifcvf_hw *hw, u64 features)
{
if (!(features & BIT_ULL(VIRTIO_F_ACCESS_PLATFORM)) && features) {
IFCVF_ERR(hw->pdev, "VIRTIO_F_ACCESS_PLATFORM is not negotiated\n");
return -EINVAL;
}
return 0;
}
u32 ifcvf_get_config_size(struct ifcvf_hw *hw)
{
u32 net_config_size = sizeof(struct virtio_net_config);
u32 blk_config_size = sizeof(struct virtio_blk_config);
u32 cap_size = hw->cap_dev_config_size;
u32 config_size;
/* If the onboard device config space size is greater than
* the size of struct virtio_net/blk_config, only the spec
* implementing contents size is returned, this is very
* unlikely, defensive programming.
*/
switch (hw->dev_type) {
case VIRTIO_ID_NET:
config_size = min(cap_size, net_config_size);
break;
case VIRTIO_ID_BLOCK:
config_size = min(cap_size, blk_config_size);
break;
default:
config_size = 0;
IFCVF_ERR(hw->pdev, "VIRTIO ID %u not supported\n", hw->dev_type);
}
return config_size;
}
void ifcvf_read_dev_config(struct ifcvf_hw *hw, u64 offset,
void *dst, int length)
{
u8 old_gen, new_gen, *p;
int i;
WARN_ON(offset + length > hw->config_size);
do {
old_gen = vp_ioread8(&hw->common_cfg->config_generation);
p = dst;
for (i = 0; i < length; i++)
*p++ = vp_ioread8(hw->dev_cfg + offset + i);
new_gen = vp_ioread8(&hw->common_cfg->config_generation);
} while (old_gen != new_gen);
}
void ifcvf_write_dev_config(struct ifcvf_hw *hw, u64 offset,
const void *src, int length)
{
const u8 *p;
int i;
p = src;
WARN_ON(offset + length > hw->config_size);
for (i = 0; i < length; i++)
vp_iowrite8(*p++, hw->dev_cfg + offset + i);
}
void ifcvf_set_driver_features(struct ifcvf_hw *hw, u64 features)
{
struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
vp_iowrite32(0, &cfg->guest_feature_select);
vp_iowrite32((u32)features, &cfg->guest_feature);
vp_iowrite32(1, &cfg->guest_feature_select);
vp_iowrite32(features >> 32, &cfg->guest_feature);
}
u16 ifcvf_get_vq_state(struct ifcvf_hw *hw, u16 qid)
{
struct ifcvf_lm_cfg __iomem *lm_cfg = hw->lm_cfg;
u16 last_avail_idx;
last_avail_idx = vp_ioread16(&lm_cfg->vq_state_region + qid * 2);
return last_avail_idx;
}
int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u16 num)
{
struct ifcvf_lm_cfg __iomem *lm_cfg = hw->lm_cfg;
vp_iowrite16(num, &lm_cfg->vq_state_region + qid * 2);
return 0;
}
void ifcvf_set_vq_num(struct ifcvf_hw *hw, u16 qid, u32 num)
{
struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
vp_iowrite16(qid, &cfg->queue_select);
vp_iowrite16(num, &cfg->queue_size);
}
int ifcvf_set_vq_address(struct ifcvf_hw *hw, u16 qid, u64 desc_area,
u64 driver_area, u64 device_area)
{
struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
vp_iowrite16(qid, &cfg->queue_select);
vp_iowrite64_twopart(desc_area, &cfg->queue_desc_lo,
&cfg->queue_desc_hi);
vp_iowrite64_twopart(driver_area, &cfg->queue_avail_lo,
&cfg->queue_avail_hi);
vp_iowrite64_twopart(device_area, &cfg->queue_used_lo,
&cfg->queue_used_hi);
return 0;
}
bool ifcvf_get_vq_ready(struct ifcvf_hw *hw, u16 qid)
{
struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
u16 queue_enable;
vp_iowrite16(qid, &cfg->queue_select);
queue_enable = vp_ioread16(&cfg->queue_enable);
return (bool)queue_enable;
}
void ifcvf_set_vq_ready(struct ifcvf_hw *hw, u16 qid, bool ready)
{
struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
vp_iowrite16(qid, &cfg->queue_select);
vp_iowrite16(ready, &cfg->queue_enable);
}
static void ifcvf_reset_vring(struct ifcvf_hw *hw)
{
u16 qid;
for (qid = 0; qid < hw->nr_vring; qid++) {
hw->vring[qid].cb.callback = NULL;
hw->vring[qid].cb.private = NULL;
ifcvf_set_vq_vector(hw, qid, VIRTIO_MSI_NO_VECTOR);
}
}
static void ifcvf_reset_config_handler(struct ifcvf_hw *hw)
{
hw->config_cb.callback = NULL;
hw->config_cb.private = NULL;
ifcvf_set_config_vector(hw, VIRTIO_MSI_NO_VECTOR);
}
static void ifcvf_synchronize_irq(struct ifcvf_hw *hw)
{
u32 nvectors = hw->num_msix_vectors;
struct pci_dev *pdev = hw->pdev;
int i, irq;
for (i = 0; i < nvectors; i++) {
irq = pci_irq_vector(pdev, i);
if (irq >= 0)
synchronize_irq(irq);
}
}
void ifcvf_stop(struct ifcvf_hw *hw)
{
ifcvf_synchronize_irq(hw);
ifcvf_reset_vring(hw);
ifcvf_reset_config_handler(hw);
}
void ifcvf_notify_queue(struct ifcvf_hw *hw, u16 qid)
{
vp_iowrite16(qid, hw->vring[qid].notify_addr);
}
|
linux-master
|
drivers/vdpa/ifcvf/ifcvf_base.c
|
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
#include <linux/debugfs.h>
#include <linux/mlx5/fs.h>
#include "mlx5_vnet.h"
static int tirn_show(struct seq_file *file, void *priv)
{
struct mlx5_vdpa_net *ndev = file->private;
seq_printf(file, "0x%x\n", ndev->res.tirn);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(tirn);
void mlx5_vdpa_remove_tirn(struct mlx5_vdpa_net *ndev)
{
if (ndev->debugfs)
debugfs_remove(ndev->res.tirn_dent);
}
void mlx5_vdpa_add_tirn(struct mlx5_vdpa_net *ndev)
{
ndev->res.tirn_dent = debugfs_create_file("tirn", 0444, ndev->rx_dent,
ndev, &tirn_fops);
}
static int rx_flow_table_show(struct seq_file *file, void *priv)
{
struct mlx5_vdpa_net *ndev = file->private;
seq_printf(file, "0x%x\n", mlx5_flow_table_id(ndev->rxft));
return 0;
}
DEFINE_SHOW_ATTRIBUTE(rx_flow_table);
void mlx5_vdpa_remove_rx_flow_table(struct mlx5_vdpa_net *ndev)
{
if (ndev->debugfs)
debugfs_remove(ndev->rx_table_dent);
}
void mlx5_vdpa_add_rx_flow_table(struct mlx5_vdpa_net *ndev)
{
ndev->rx_table_dent = debugfs_create_file("table_id", 0444, ndev->rx_dent,
ndev, &rx_flow_table_fops);
}
#if defined(CONFIG_MLX5_VDPA_STEERING_DEBUG)
static int packets_show(struct seq_file *file, void *priv)
{
struct mlx5_vdpa_counter *counter = file->private;
u64 packets;
u64 bytes;
int err;
err = mlx5_fc_query(counter->mdev, counter->counter, &packets, &bytes);
if (err)
return err;
seq_printf(file, "0x%llx\n", packets);
return 0;
}
static int bytes_show(struct seq_file *file, void *priv)
{
struct mlx5_vdpa_counter *counter = file->private;
u64 packets;
u64 bytes;
int err;
err = mlx5_fc_query(counter->mdev, counter->counter, &packets, &bytes);
if (err)
return err;
seq_printf(file, "0x%llx\n", bytes);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(packets);
DEFINE_SHOW_ATTRIBUTE(bytes);
static void add_counter_node(struct mlx5_vdpa_counter *counter,
struct dentry *parent)
{
debugfs_create_file("packets", 0444, parent, counter,
&packets_fops);
debugfs_create_file("bytes", 0444, parent, counter,
&bytes_fops);
}
void mlx5_vdpa_add_rx_counters(struct mlx5_vdpa_net *ndev,
struct macvlan_node *node)
{
static const char *ut = "untagged";
char vidstr[9];
u16 vid;
node->ucast_counter.mdev = ndev->mvdev.mdev;
node->mcast_counter.mdev = ndev->mvdev.mdev;
if (node->tagged) {
vid = key2vid(node->macvlan);
snprintf(vidstr, sizeof(vidstr), "0x%x", vid);
} else {
strcpy(vidstr, ut);
}
node->dent = debugfs_create_dir(vidstr, ndev->rx_dent);
if (IS_ERR(node->dent)) {
node->dent = NULL;
return;
}
node->ucast_counter.dent = debugfs_create_dir("ucast", node->dent);
if (IS_ERR(node->ucast_counter.dent))
return;
add_counter_node(&node->ucast_counter, node->ucast_counter.dent);
node->mcast_counter.dent = debugfs_create_dir("mcast", node->dent);
if (IS_ERR(node->mcast_counter.dent))
return;
add_counter_node(&node->mcast_counter, node->mcast_counter.dent);
}
void mlx5_vdpa_remove_rx_counters(struct mlx5_vdpa_net *ndev,
struct macvlan_node *node)
{
if (node->dent && ndev->debugfs)
debugfs_remove_recursive(node->dent);
}
#endif
void mlx5_vdpa_add_debugfs(struct mlx5_vdpa_net *ndev)
{
struct mlx5_core_dev *mdev;
mdev = ndev->mvdev.mdev;
ndev->debugfs = debugfs_create_dir(dev_name(&ndev->mvdev.vdev.dev),
mlx5_debugfs_get_dev_root(mdev));
if (!IS_ERR(ndev->debugfs))
ndev->rx_dent = debugfs_create_dir("rx", ndev->debugfs);
}
void mlx5_vdpa_remove_debugfs(struct dentry *dbg)
{
debugfs_remove_recursive(dbg);
}
|
linux-master
|
drivers/vdpa/mlx5/net/debug.c
|
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2020 Mellanox Technologies Ltd. */
#include <linux/module.h>
#include <linux/vdpa.h>
#include <linux/vringh.h>
#include <uapi/linux/virtio_net.h>
#include <uapi/linux/virtio_ids.h>
#include <uapi/linux/vdpa.h>
#include <linux/virtio_config.h>
#include <linux/auxiliary_bus.h>
#include <linux/mlx5/cq.h>
#include <linux/mlx5/qp.h>
#include <linux/mlx5/device.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/vport.h>
#include <linux/mlx5/fs.h>
#include <linux/mlx5/mlx5_ifc_vdpa.h>
#include <linux/mlx5/mpfs.h>
#include "mlx5_vdpa.h"
#include "mlx5_vnet.h"
MODULE_AUTHOR("Eli Cohen <[email protected]>");
MODULE_DESCRIPTION("Mellanox VDPA driver");
MODULE_LICENSE("Dual BSD/GPL");
#define VALID_FEATURES_MASK \
(BIT_ULL(VIRTIO_NET_F_CSUM) | BIT_ULL(VIRTIO_NET_F_GUEST_CSUM) | \
BIT_ULL(VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) | BIT_ULL(VIRTIO_NET_F_MTU) | BIT_ULL(VIRTIO_NET_F_MAC) | \
BIT_ULL(VIRTIO_NET_F_GUEST_TSO4) | BIT_ULL(VIRTIO_NET_F_GUEST_TSO6) | \
BIT_ULL(VIRTIO_NET_F_GUEST_ECN) | BIT_ULL(VIRTIO_NET_F_GUEST_UFO) | BIT_ULL(VIRTIO_NET_F_HOST_TSO4) | \
BIT_ULL(VIRTIO_NET_F_HOST_TSO6) | BIT_ULL(VIRTIO_NET_F_HOST_ECN) | BIT_ULL(VIRTIO_NET_F_HOST_UFO) | \
BIT_ULL(VIRTIO_NET_F_MRG_RXBUF) | BIT_ULL(VIRTIO_NET_F_STATUS) | BIT_ULL(VIRTIO_NET_F_CTRL_VQ) | \
BIT_ULL(VIRTIO_NET_F_CTRL_RX) | BIT_ULL(VIRTIO_NET_F_CTRL_VLAN) | \
BIT_ULL(VIRTIO_NET_F_CTRL_RX_EXTRA) | BIT_ULL(VIRTIO_NET_F_GUEST_ANNOUNCE) | \
BIT_ULL(VIRTIO_NET_F_MQ) | BIT_ULL(VIRTIO_NET_F_CTRL_MAC_ADDR) | BIT_ULL(VIRTIO_NET_F_HASH_REPORT) | \
BIT_ULL(VIRTIO_NET_F_RSS) | BIT_ULL(VIRTIO_NET_F_RSC_EXT) | BIT_ULL(VIRTIO_NET_F_STANDBY) | \
BIT_ULL(VIRTIO_NET_F_SPEED_DUPLEX) | BIT_ULL(VIRTIO_F_NOTIFY_ON_EMPTY) | \
BIT_ULL(VIRTIO_F_ANY_LAYOUT) | BIT_ULL(VIRTIO_F_VERSION_1) | BIT_ULL(VIRTIO_F_ACCESS_PLATFORM) | \
BIT_ULL(VIRTIO_F_RING_PACKED) | BIT_ULL(VIRTIO_F_ORDER_PLATFORM) | BIT_ULL(VIRTIO_F_SR_IOV))
#define VALID_STATUS_MASK \
(VIRTIO_CONFIG_S_ACKNOWLEDGE | VIRTIO_CONFIG_S_DRIVER | VIRTIO_CONFIG_S_DRIVER_OK | \
VIRTIO_CONFIG_S_FEATURES_OK | VIRTIO_CONFIG_S_NEEDS_RESET | VIRTIO_CONFIG_S_FAILED)
#define MLX5_FEATURE(_mvdev, _feature) (!!((_mvdev)->actual_features & BIT_ULL(_feature)))
#define MLX5V_UNTAGGED 0x1000
struct mlx5_vdpa_cq_buf {
struct mlx5_frag_buf_ctrl fbc;
struct mlx5_frag_buf frag_buf;
int cqe_size;
int nent;
};
struct mlx5_vdpa_cq {
struct mlx5_core_cq mcq;
struct mlx5_vdpa_cq_buf buf;
struct mlx5_db db;
int cqe;
};
struct mlx5_vdpa_umem {
struct mlx5_frag_buf_ctrl fbc;
struct mlx5_frag_buf frag_buf;
int size;
u32 id;
};
struct mlx5_vdpa_qp {
struct mlx5_core_qp mqp;
struct mlx5_frag_buf frag_buf;
struct mlx5_db db;
u16 head;
bool fw;
};
struct mlx5_vq_restore_info {
u32 num_ent;
u64 desc_addr;
u64 device_addr;
u64 driver_addr;
u16 avail_index;
u16 used_index;
struct msi_map map;
bool ready;
bool restore;
};
struct mlx5_vdpa_virtqueue {
bool ready;
u64 desc_addr;
u64 device_addr;
u64 driver_addr;
u32 num_ent;
/* Resources for implementing the notification channel from the device
* to the driver. fwqp is the firmware end of an RC connection; the
* other end is vqqp used by the driver. cq is where completions are
* reported.
*/
struct mlx5_vdpa_cq cq;
struct mlx5_vdpa_qp fwqp;
struct mlx5_vdpa_qp vqqp;
/* umem resources are required for the virtqueue operation. They're use
* is internal and they must be provided by the driver.
*/
struct mlx5_vdpa_umem umem1;
struct mlx5_vdpa_umem umem2;
struct mlx5_vdpa_umem umem3;
u32 counter_set_id;
bool initialized;
int index;
u32 virtq_id;
struct mlx5_vdpa_net *ndev;
u16 avail_idx;
u16 used_idx;
int fw_state;
struct msi_map map;
/* keep last in the struct */
struct mlx5_vq_restore_info ri;
};
static bool is_index_valid(struct mlx5_vdpa_dev *mvdev, u16 idx)
{
if (!(mvdev->actual_features & BIT_ULL(VIRTIO_NET_F_MQ))) {
if (!(mvdev->actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ)))
return idx < 2;
else
return idx < 3;
}
return idx <= mvdev->max_idx;
}
static void free_resources(struct mlx5_vdpa_net *ndev);
static void init_mvqs(struct mlx5_vdpa_net *ndev);
static int setup_driver(struct mlx5_vdpa_dev *mvdev);
static void teardown_driver(struct mlx5_vdpa_net *ndev);
static bool mlx5_vdpa_debug;
#define MLX5_CVQ_MAX_ENT 16
#define MLX5_LOG_VIO_FLAG(_feature) \
do { \
if (features & BIT_ULL(_feature)) \
mlx5_vdpa_info(mvdev, "%s\n", #_feature); \
} while (0)
#define MLX5_LOG_VIO_STAT(_status) \
do { \
if (status & (_status)) \
mlx5_vdpa_info(mvdev, "%s\n", #_status); \
} while (0)
/* TODO: cross-endian support */
static inline bool mlx5_vdpa_is_little_endian(struct mlx5_vdpa_dev *mvdev)
{
return virtio_legacy_is_little_endian() ||
(mvdev->actual_features & BIT_ULL(VIRTIO_F_VERSION_1));
}
static u16 mlx5vdpa16_to_cpu(struct mlx5_vdpa_dev *mvdev, __virtio16 val)
{
return __virtio16_to_cpu(mlx5_vdpa_is_little_endian(mvdev), val);
}
static __virtio16 cpu_to_mlx5vdpa16(struct mlx5_vdpa_dev *mvdev, u16 val)
{
return __cpu_to_virtio16(mlx5_vdpa_is_little_endian(mvdev), val);
}
static u16 ctrl_vq_idx(struct mlx5_vdpa_dev *mvdev)
{
if (!(mvdev->actual_features & BIT_ULL(VIRTIO_NET_F_MQ)))
return 2;
return mvdev->max_vqs;
}
static bool is_ctrl_vq_idx(struct mlx5_vdpa_dev *mvdev, u16 idx)
{
return idx == ctrl_vq_idx(mvdev);
}
static void print_status(struct mlx5_vdpa_dev *mvdev, u8 status, bool set)
{
if (status & ~VALID_STATUS_MASK)
mlx5_vdpa_warn(mvdev, "Warning: there are invalid status bits 0x%x\n",
status & ~VALID_STATUS_MASK);
if (!mlx5_vdpa_debug)
return;
mlx5_vdpa_info(mvdev, "driver status %s", set ? "set" : "get");
if (set && !status) {
mlx5_vdpa_info(mvdev, "driver resets the device\n");
return;
}
MLX5_LOG_VIO_STAT(VIRTIO_CONFIG_S_ACKNOWLEDGE);
MLX5_LOG_VIO_STAT(VIRTIO_CONFIG_S_DRIVER);
MLX5_LOG_VIO_STAT(VIRTIO_CONFIG_S_DRIVER_OK);
MLX5_LOG_VIO_STAT(VIRTIO_CONFIG_S_FEATURES_OK);
MLX5_LOG_VIO_STAT(VIRTIO_CONFIG_S_NEEDS_RESET);
MLX5_LOG_VIO_STAT(VIRTIO_CONFIG_S_FAILED);
}
static void print_features(struct mlx5_vdpa_dev *mvdev, u64 features, bool set)
{
if (features & ~VALID_FEATURES_MASK)
mlx5_vdpa_warn(mvdev, "There are invalid feature bits 0x%llx\n",
features & ~VALID_FEATURES_MASK);
if (!mlx5_vdpa_debug)
return;
mlx5_vdpa_info(mvdev, "driver %s feature bits:\n", set ? "sets" : "reads");
if (!features)
mlx5_vdpa_info(mvdev, "all feature bits are cleared\n");
MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_CSUM);
MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_GUEST_CSUM);
MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_CTRL_GUEST_OFFLOADS);
MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_MTU);
MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_MAC);
MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_GUEST_TSO4);
MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_GUEST_TSO6);
MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_GUEST_ECN);
MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_GUEST_UFO);
MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_HOST_TSO4);
MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_HOST_TSO6);
MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_HOST_ECN);
MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_HOST_UFO);
MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_MRG_RXBUF);
MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_STATUS);
MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_CTRL_VQ);
MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_CTRL_RX);
MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_CTRL_VLAN);
MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_CTRL_RX_EXTRA);
MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_GUEST_ANNOUNCE);
MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_MQ);
MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_CTRL_MAC_ADDR);
MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_HASH_REPORT);
MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_RSS);
MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_RSC_EXT);
MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_STANDBY);
MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_SPEED_DUPLEX);
MLX5_LOG_VIO_FLAG(VIRTIO_F_NOTIFY_ON_EMPTY);
MLX5_LOG_VIO_FLAG(VIRTIO_F_ANY_LAYOUT);
MLX5_LOG_VIO_FLAG(VIRTIO_F_VERSION_1);
MLX5_LOG_VIO_FLAG(VIRTIO_F_ACCESS_PLATFORM);
MLX5_LOG_VIO_FLAG(VIRTIO_F_RING_PACKED);
MLX5_LOG_VIO_FLAG(VIRTIO_F_ORDER_PLATFORM);
MLX5_LOG_VIO_FLAG(VIRTIO_F_SR_IOV);
}
static int create_tis(struct mlx5_vdpa_net *ndev)
{
struct mlx5_vdpa_dev *mvdev = &ndev->mvdev;
u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {};
void *tisc;
int err;
tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
MLX5_SET(tisc, tisc, transport_domain, ndev->res.tdn);
err = mlx5_vdpa_create_tis(mvdev, in, &ndev->res.tisn);
if (err)
mlx5_vdpa_warn(mvdev, "create TIS (%d)\n", err);
return err;
}
static void destroy_tis(struct mlx5_vdpa_net *ndev)
{
mlx5_vdpa_destroy_tis(&ndev->mvdev, ndev->res.tisn);
}
#define MLX5_VDPA_CQE_SIZE 64
#define MLX5_VDPA_LOG_CQE_SIZE ilog2(MLX5_VDPA_CQE_SIZE)
static int cq_frag_buf_alloc(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_cq_buf *buf, int nent)
{
struct mlx5_frag_buf *frag_buf = &buf->frag_buf;
u8 log_wq_stride = MLX5_VDPA_LOG_CQE_SIZE;
u8 log_wq_sz = MLX5_VDPA_LOG_CQE_SIZE;
int err;
err = mlx5_frag_buf_alloc_node(ndev->mvdev.mdev, nent * MLX5_VDPA_CQE_SIZE, frag_buf,
ndev->mvdev.mdev->priv.numa_node);
if (err)
return err;
mlx5_init_fbc(frag_buf->frags, log_wq_stride, log_wq_sz, &buf->fbc);
buf->cqe_size = MLX5_VDPA_CQE_SIZE;
buf->nent = nent;
return 0;
}
static int umem_frag_buf_alloc(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_umem *umem, int size)
{
struct mlx5_frag_buf *frag_buf = &umem->frag_buf;
return mlx5_frag_buf_alloc_node(ndev->mvdev.mdev, size, frag_buf,
ndev->mvdev.mdev->priv.numa_node);
}
static void cq_frag_buf_free(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_cq_buf *buf)
{
mlx5_frag_buf_free(ndev->mvdev.mdev, &buf->frag_buf);
}
static void *get_cqe(struct mlx5_vdpa_cq *vcq, int n)
{
return mlx5_frag_buf_get_wqe(&vcq->buf.fbc, n);
}
static void cq_frag_buf_init(struct mlx5_vdpa_cq *vcq, struct mlx5_vdpa_cq_buf *buf)
{
struct mlx5_cqe64 *cqe64;
void *cqe;
int i;
for (i = 0; i < buf->nent; i++) {
cqe = get_cqe(vcq, i);
cqe64 = cqe;
cqe64->op_own = MLX5_CQE_INVALID << 4;
}
}
static void *get_sw_cqe(struct mlx5_vdpa_cq *cq, int n)
{
struct mlx5_cqe64 *cqe64 = get_cqe(cq, n & (cq->cqe - 1));
if (likely(get_cqe_opcode(cqe64) != MLX5_CQE_INVALID) &&
!((cqe64->op_own & MLX5_CQE_OWNER_MASK) ^ !!(n & cq->cqe)))
return cqe64;
return NULL;
}
static void rx_post(struct mlx5_vdpa_qp *vqp, int n)
{
vqp->head += n;
vqp->db.db[0] = cpu_to_be32(vqp->head);
}
static void qp_prepare(struct mlx5_vdpa_net *ndev, bool fw, void *in,
struct mlx5_vdpa_virtqueue *mvq, u32 num_ent)
{
struct mlx5_vdpa_qp *vqp;
__be64 *pas;
void *qpc;
vqp = fw ? &mvq->fwqp : &mvq->vqqp;
MLX5_SET(create_qp_in, in, uid, ndev->mvdev.res.uid);
qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
if (vqp->fw) {
/* Firmware QP is allocated by the driver for the firmware's
* use so we can skip part of the params as they will be chosen by firmware
*/
qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
MLX5_SET(qpc, qpc, rq_type, MLX5_ZERO_LEN_RQ);
MLX5_SET(qpc, qpc, no_sq, 1);
return;
}
MLX5_SET(qpc, qpc, st, MLX5_QP_ST_RC);
MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
MLX5_SET(qpc, qpc, pd, ndev->mvdev.res.pdn);
MLX5_SET(qpc, qpc, mtu, MLX5_QPC_MTU_256_BYTES);
MLX5_SET(qpc, qpc, uar_page, ndev->mvdev.res.uar->index);
MLX5_SET(qpc, qpc, log_page_size, vqp->frag_buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
MLX5_SET(qpc, qpc, no_sq, 1);
MLX5_SET(qpc, qpc, cqn_rcv, mvq->cq.mcq.cqn);
MLX5_SET(qpc, qpc, log_rq_size, ilog2(num_ent));
MLX5_SET(qpc, qpc, rq_type, MLX5_NON_ZERO_RQ);
pas = (__be64 *)MLX5_ADDR_OF(create_qp_in, in, pas);
mlx5_fill_page_frag_array(&vqp->frag_buf, pas);
}
static int rq_buf_alloc(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_qp *vqp, u32 num_ent)
{
return mlx5_frag_buf_alloc_node(ndev->mvdev.mdev,
num_ent * sizeof(struct mlx5_wqe_data_seg), &vqp->frag_buf,
ndev->mvdev.mdev->priv.numa_node);
}
static void rq_buf_free(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_qp *vqp)
{
mlx5_frag_buf_free(ndev->mvdev.mdev, &vqp->frag_buf);
}
static int qp_create(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq,
struct mlx5_vdpa_qp *vqp)
{
struct mlx5_core_dev *mdev = ndev->mvdev.mdev;
int inlen = MLX5_ST_SZ_BYTES(create_qp_in);
u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {};
void *qpc;
void *in;
int err;
if (!vqp->fw) {
vqp = &mvq->vqqp;
err = rq_buf_alloc(ndev, vqp, mvq->num_ent);
if (err)
return err;
err = mlx5_db_alloc(ndev->mvdev.mdev, &vqp->db);
if (err)
goto err_db;
inlen += vqp->frag_buf.npages * sizeof(__be64);
}
in = kzalloc(inlen, GFP_KERNEL);
if (!in) {
err = -ENOMEM;
goto err_kzalloc;
}
qp_prepare(ndev, vqp->fw, in, mvq, mvq->num_ent);
qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
MLX5_SET(qpc, qpc, st, MLX5_QP_ST_RC);
MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
MLX5_SET(qpc, qpc, pd, ndev->mvdev.res.pdn);
MLX5_SET(qpc, qpc, mtu, MLX5_QPC_MTU_256_BYTES);
if (!vqp->fw)
MLX5_SET64(qpc, qpc, dbr_addr, vqp->db.dma);
MLX5_SET(create_qp_in, in, opcode, MLX5_CMD_OP_CREATE_QP);
err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
kfree(in);
if (err)
goto err_kzalloc;
vqp->mqp.uid = ndev->mvdev.res.uid;
vqp->mqp.qpn = MLX5_GET(create_qp_out, out, qpn);
if (!vqp->fw)
rx_post(vqp, mvq->num_ent);
return 0;
err_kzalloc:
if (!vqp->fw)
mlx5_db_free(ndev->mvdev.mdev, &vqp->db);
err_db:
if (!vqp->fw)
rq_buf_free(ndev, vqp);
return err;
}
static void qp_destroy(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_qp *vqp)
{
u32 in[MLX5_ST_SZ_DW(destroy_qp_in)] = {};
MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP);
MLX5_SET(destroy_qp_in, in, qpn, vqp->mqp.qpn);
MLX5_SET(destroy_qp_in, in, uid, ndev->mvdev.res.uid);
if (mlx5_cmd_exec_in(ndev->mvdev.mdev, destroy_qp, in))
mlx5_vdpa_warn(&ndev->mvdev, "destroy qp 0x%x\n", vqp->mqp.qpn);
if (!vqp->fw) {
mlx5_db_free(ndev->mvdev.mdev, &vqp->db);
rq_buf_free(ndev, vqp);
}
}
static void *next_cqe_sw(struct mlx5_vdpa_cq *cq)
{
return get_sw_cqe(cq, cq->mcq.cons_index);
}
static int mlx5_vdpa_poll_one(struct mlx5_vdpa_cq *vcq)
{
struct mlx5_cqe64 *cqe64;
cqe64 = next_cqe_sw(vcq);
if (!cqe64)
return -EAGAIN;
vcq->mcq.cons_index++;
return 0;
}
static void mlx5_vdpa_handle_completions(struct mlx5_vdpa_virtqueue *mvq, int num)
{
struct mlx5_vdpa_net *ndev = mvq->ndev;
struct vdpa_callback *event_cb;
event_cb = &ndev->event_cbs[mvq->index];
mlx5_cq_set_ci(&mvq->cq.mcq);
/* make sure CQ cosumer update is visible to the hardware before updating
* RX doorbell record.
*/
dma_wmb();
rx_post(&mvq->vqqp, num);
if (event_cb->callback)
event_cb->callback(event_cb->private);
}
static void mlx5_vdpa_cq_comp(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe)
{
struct mlx5_vdpa_virtqueue *mvq = container_of(mcq, struct mlx5_vdpa_virtqueue, cq.mcq);
struct mlx5_vdpa_net *ndev = mvq->ndev;
void __iomem *uar_page = ndev->mvdev.res.uar->map;
int num = 0;
while (!mlx5_vdpa_poll_one(&mvq->cq)) {
num++;
if (num > mvq->num_ent / 2) {
/* If completions keep coming while we poll, we want to
* let the hardware know that we consumed them by
* updating the doorbell record. We also let vdpa core
* know about this so it passes it on the virtio driver
* on the guest.
*/
mlx5_vdpa_handle_completions(mvq, num);
num = 0;
}
}
if (num)
mlx5_vdpa_handle_completions(mvq, num);
mlx5_cq_arm(&mvq->cq.mcq, MLX5_CQ_DB_REQ_NOT, uar_page, mvq->cq.mcq.cons_index);
}
static int cq_create(struct mlx5_vdpa_net *ndev, u16 idx, u32 num_ent)
{
struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx];
struct mlx5_core_dev *mdev = ndev->mvdev.mdev;
void __iomem *uar_page = ndev->mvdev.res.uar->map;
u32 out[MLX5_ST_SZ_DW(create_cq_out)];
struct mlx5_vdpa_cq *vcq = &mvq->cq;
__be64 *pas;
int inlen;
void *cqc;
void *in;
int err;
int eqn;
err = mlx5_db_alloc(mdev, &vcq->db);
if (err)
return err;
vcq->mcq.set_ci_db = vcq->db.db;
vcq->mcq.arm_db = vcq->db.db + 1;
vcq->mcq.cqe_sz = 64;
err = cq_frag_buf_alloc(ndev, &vcq->buf, num_ent);
if (err)
goto err_db;
cq_frag_buf_init(vcq, &vcq->buf);
inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) * vcq->buf.frag_buf.npages;
in = kzalloc(inlen, GFP_KERNEL);
if (!in) {
err = -ENOMEM;
goto err_vzalloc;
}
MLX5_SET(create_cq_in, in, uid, ndev->mvdev.res.uid);
pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas);
mlx5_fill_page_frag_array(&vcq->buf.frag_buf, pas);
cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
MLX5_SET(cqc, cqc, log_page_size, vcq->buf.frag_buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
/* Use vector 0 by default. Consider adding code to choose least used
* vector.
*/
err = mlx5_comp_eqn_get(mdev, 0, &eqn);
if (err)
goto err_vec;
cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
MLX5_SET(cqc, cqc, log_cq_size, ilog2(num_ent));
MLX5_SET(cqc, cqc, uar_page, ndev->mvdev.res.uar->index);
MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn);
MLX5_SET64(cqc, cqc, dbr_addr, vcq->db.dma);
err = mlx5_core_create_cq(mdev, &vcq->mcq, in, inlen, out, sizeof(out));
if (err)
goto err_vec;
vcq->mcq.comp = mlx5_vdpa_cq_comp;
vcq->cqe = num_ent;
vcq->mcq.set_ci_db = vcq->db.db;
vcq->mcq.arm_db = vcq->db.db + 1;
mlx5_cq_arm(&mvq->cq.mcq, MLX5_CQ_DB_REQ_NOT, uar_page, mvq->cq.mcq.cons_index);
kfree(in);
return 0;
err_vec:
kfree(in);
err_vzalloc:
cq_frag_buf_free(ndev, &vcq->buf);
err_db:
mlx5_db_free(ndev->mvdev.mdev, &vcq->db);
return err;
}
static void cq_destroy(struct mlx5_vdpa_net *ndev, u16 idx)
{
struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx];
struct mlx5_core_dev *mdev = ndev->mvdev.mdev;
struct mlx5_vdpa_cq *vcq = &mvq->cq;
if (mlx5_core_destroy_cq(mdev, &vcq->mcq)) {
mlx5_vdpa_warn(&ndev->mvdev, "destroy CQ 0x%x\n", vcq->mcq.cqn);
return;
}
cq_frag_buf_free(ndev, &vcq->buf);
mlx5_db_free(ndev->mvdev.mdev, &vcq->db);
}
static void set_umem_size(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, int num,
struct mlx5_vdpa_umem **umemp)
{
struct mlx5_core_dev *mdev = ndev->mvdev.mdev;
int p_a;
int p_b;
switch (num) {
case 1:
p_a = MLX5_CAP_DEV_VDPA_EMULATION(mdev, umem_1_buffer_param_a);
p_b = MLX5_CAP_DEV_VDPA_EMULATION(mdev, umem_1_buffer_param_b);
*umemp = &mvq->umem1;
break;
case 2:
p_a = MLX5_CAP_DEV_VDPA_EMULATION(mdev, umem_2_buffer_param_a);
p_b = MLX5_CAP_DEV_VDPA_EMULATION(mdev, umem_2_buffer_param_b);
*umemp = &mvq->umem2;
break;
case 3:
p_a = MLX5_CAP_DEV_VDPA_EMULATION(mdev, umem_3_buffer_param_a);
p_b = MLX5_CAP_DEV_VDPA_EMULATION(mdev, umem_3_buffer_param_b);
*umemp = &mvq->umem3;
break;
}
(*umemp)->size = p_a * mvq->num_ent + p_b;
}
static void umem_frag_buf_free(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_umem *umem)
{
mlx5_frag_buf_free(ndev->mvdev.mdev, &umem->frag_buf);
}
static int create_umem(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, int num)
{
int inlen;
u32 out[MLX5_ST_SZ_DW(create_umem_out)] = {};
void *um;
void *in;
int err;
__be64 *pas;
struct mlx5_vdpa_umem *umem;
set_umem_size(ndev, mvq, num, &umem);
err = umem_frag_buf_alloc(ndev, umem, umem->size);
if (err)
return err;
inlen = MLX5_ST_SZ_BYTES(create_umem_in) + MLX5_ST_SZ_BYTES(mtt) * umem->frag_buf.npages;
in = kzalloc(inlen, GFP_KERNEL);
if (!in) {
err = -ENOMEM;
goto err_in;
}
MLX5_SET(create_umem_in, in, opcode, MLX5_CMD_OP_CREATE_UMEM);
MLX5_SET(create_umem_in, in, uid, ndev->mvdev.res.uid);
um = MLX5_ADDR_OF(create_umem_in, in, umem);
MLX5_SET(umem, um, log_page_size, umem->frag_buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
MLX5_SET64(umem, um, num_of_mtt, umem->frag_buf.npages);
pas = (__be64 *)MLX5_ADDR_OF(umem, um, mtt[0]);
mlx5_fill_page_frag_array_perm(&umem->frag_buf, pas, MLX5_MTT_PERM_RW);
err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, sizeof(out));
if (err) {
mlx5_vdpa_warn(&ndev->mvdev, "create umem(%d)\n", err);
goto err_cmd;
}
kfree(in);
umem->id = MLX5_GET(create_umem_out, out, umem_id);
return 0;
err_cmd:
kfree(in);
err_in:
umem_frag_buf_free(ndev, umem);
return err;
}
static void umem_destroy(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, int num)
{
u32 in[MLX5_ST_SZ_DW(destroy_umem_in)] = {};
u32 out[MLX5_ST_SZ_DW(destroy_umem_out)] = {};
struct mlx5_vdpa_umem *umem;
switch (num) {
case 1:
umem = &mvq->umem1;
break;
case 2:
umem = &mvq->umem2;
break;
case 3:
umem = &mvq->umem3;
break;
}
MLX5_SET(destroy_umem_in, in, opcode, MLX5_CMD_OP_DESTROY_UMEM);
MLX5_SET(destroy_umem_in, in, umem_id, umem->id);
if (mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, sizeof(out)))
return;
umem_frag_buf_free(ndev, umem);
}
static int umems_create(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
{
int num;
int err;
for (num = 1; num <= 3; num++) {
err = create_umem(ndev, mvq, num);
if (err)
goto err_umem;
}
return 0;
err_umem:
for (num--; num > 0; num--)
umem_destroy(ndev, mvq, num);
return err;
}
static void umems_destroy(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
{
int num;
for (num = 3; num > 0; num--)
umem_destroy(ndev, mvq, num);
}
static int get_queue_type(struct mlx5_vdpa_net *ndev)
{
u32 type_mask;
type_mask = MLX5_CAP_DEV_VDPA_EMULATION(ndev->mvdev.mdev, virtio_queue_type);
/* prefer split queue */
if (type_mask & MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_SPLIT)
return MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_SPLIT;
WARN_ON(!(type_mask & MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_PACKED));
return MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_PACKED;
}
static bool vq_is_tx(u16 idx)
{
return idx % 2;
}
enum {
MLX5_VIRTIO_NET_F_MRG_RXBUF = 2,
MLX5_VIRTIO_NET_F_HOST_ECN = 4,
MLX5_VIRTIO_NET_F_GUEST_ECN = 6,
MLX5_VIRTIO_NET_F_GUEST_TSO6 = 7,
MLX5_VIRTIO_NET_F_GUEST_TSO4 = 8,
MLX5_VIRTIO_NET_F_GUEST_CSUM = 9,
MLX5_VIRTIO_NET_F_CSUM = 10,
MLX5_VIRTIO_NET_F_HOST_TSO6 = 11,
MLX5_VIRTIO_NET_F_HOST_TSO4 = 12,
};
static u16 get_features(u64 features)
{
return (!!(features & BIT_ULL(VIRTIO_NET_F_MRG_RXBUF)) << MLX5_VIRTIO_NET_F_MRG_RXBUF) |
(!!(features & BIT_ULL(VIRTIO_NET_F_HOST_ECN)) << MLX5_VIRTIO_NET_F_HOST_ECN) |
(!!(features & BIT_ULL(VIRTIO_NET_F_GUEST_ECN)) << MLX5_VIRTIO_NET_F_GUEST_ECN) |
(!!(features & BIT_ULL(VIRTIO_NET_F_GUEST_TSO6)) << MLX5_VIRTIO_NET_F_GUEST_TSO6) |
(!!(features & BIT_ULL(VIRTIO_NET_F_GUEST_TSO4)) << MLX5_VIRTIO_NET_F_GUEST_TSO4) |
(!!(features & BIT_ULL(VIRTIO_NET_F_CSUM)) << MLX5_VIRTIO_NET_F_CSUM) |
(!!(features & BIT_ULL(VIRTIO_NET_F_HOST_TSO6)) << MLX5_VIRTIO_NET_F_HOST_TSO6) |
(!!(features & BIT_ULL(VIRTIO_NET_F_HOST_TSO4)) << MLX5_VIRTIO_NET_F_HOST_TSO4);
}
static bool counters_supported(const struct mlx5_vdpa_dev *mvdev)
{
return MLX5_CAP_GEN_64(mvdev->mdev, general_obj_types) &
BIT_ULL(MLX5_OBJ_TYPE_VIRTIO_Q_COUNTERS);
}
static bool msix_mode_supported(struct mlx5_vdpa_dev *mvdev)
{
return MLX5_CAP_DEV_VDPA_EMULATION(mvdev->mdev, event_mode) &
(1 << MLX5_VIRTIO_Q_EVENT_MODE_MSIX_MODE) &&
pci_msix_can_alloc_dyn(mvdev->mdev->pdev);
}
static int create_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
{
int inlen = MLX5_ST_SZ_BYTES(create_virtio_net_q_in);
u32 out[MLX5_ST_SZ_DW(create_virtio_net_q_out)] = {};
void *obj_context;
u16 mlx_features;
void *cmd_hdr;
void *vq_ctx;
void *in;
int err;
err = umems_create(ndev, mvq);
if (err)
return err;
in = kzalloc(inlen, GFP_KERNEL);
if (!in) {
err = -ENOMEM;
goto err_alloc;
}
mlx_features = get_features(ndev->mvdev.actual_features);
cmd_hdr = MLX5_ADDR_OF(create_virtio_net_q_in, in, general_obj_in_cmd_hdr);
MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_type, MLX5_OBJ_TYPE_VIRTIO_NET_Q);
MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid);
obj_context = MLX5_ADDR_OF(create_virtio_net_q_in, in, obj_context);
MLX5_SET(virtio_net_q_object, obj_context, hw_available_index, mvq->avail_idx);
MLX5_SET(virtio_net_q_object, obj_context, hw_used_index, mvq->used_idx);
MLX5_SET(virtio_net_q_object, obj_context, queue_feature_bit_mask_12_3,
mlx_features >> 3);
MLX5_SET(virtio_net_q_object, obj_context, queue_feature_bit_mask_2_0,
mlx_features & 7);
vq_ctx = MLX5_ADDR_OF(virtio_net_q_object, obj_context, virtio_q_context);
MLX5_SET(virtio_q, vq_ctx, virtio_q_type, get_queue_type(ndev));
if (vq_is_tx(mvq->index))
MLX5_SET(virtio_net_q_object, obj_context, tisn_or_qpn, ndev->res.tisn);
if (mvq->map.virq) {
MLX5_SET(virtio_q, vq_ctx, event_mode, MLX5_VIRTIO_Q_EVENT_MODE_MSIX_MODE);
MLX5_SET(virtio_q, vq_ctx, event_qpn_or_msix, mvq->map.index);
} else {
MLX5_SET(virtio_q, vq_ctx, event_mode, MLX5_VIRTIO_Q_EVENT_MODE_QP_MODE);
MLX5_SET(virtio_q, vq_ctx, event_qpn_or_msix, mvq->fwqp.mqp.qpn);
}
MLX5_SET(virtio_q, vq_ctx, queue_index, mvq->index);
MLX5_SET(virtio_q, vq_ctx, queue_size, mvq->num_ent);
MLX5_SET(virtio_q, vq_ctx, virtio_version_1_0,
!!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_F_VERSION_1)));
MLX5_SET64(virtio_q, vq_ctx, desc_addr, mvq->desc_addr);
MLX5_SET64(virtio_q, vq_ctx, used_addr, mvq->device_addr);
MLX5_SET64(virtio_q, vq_ctx, available_addr, mvq->driver_addr);
MLX5_SET(virtio_q, vq_ctx, virtio_q_mkey, ndev->mvdev.mr.mkey);
MLX5_SET(virtio_q, vq_ctx, umem_1_id, mvq->umem1.id);
MLX5_SET(virtio_q, vq_ctx, umem_1_size, mvq->umem1.size);
MLX5_SET(virtio_q, vq_ctx, umem_2_id, mvq->umem2.id);
MLX5_SET(virtio_q, vq_ctx, umem_2_size, mvq->umem2.size);
MLX5_SET(virtio_q, vq_ctx, umem_3_id, mvq->umem3.id);
MLX5_SET(virtio_q, vq_ctx, umem_3_size, mvq->umem3.size);
MLX5_SET(virtio_q, vq_ctx, pd, ndev->mvdev.res.pdn);
if (counters_supported(&ndev->mvdev))
MLX5_SET(virtio_q, vq_ctx, counter_set_id, mvq->counter_set_id);
err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, sizeof(out));
if (err)
goto err_cmd;
mvq->fw_state = MLX5_VIRTIO_NET_Q_OBJECT_STATE_INIT;
kfree(in);
mvq->virtq_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
return 0;
err_cmd:
kfree(in);
err_alloc:
umems_destroy(ndev, mvq);
return err;
}
static void destroy_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
{
u32 in[MLX5_ST_SZ_DW(destroy_virtio_net_q_in)] = {};
u32 out[MLX5_ST_SZ_DW(destroy_virtio_net_q_out)] = {};
MLX5_SET(destroy_virtio_net_q_in, in, general_obj_out_cmd_hdr.opcode,
MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
MLX5_SET(destroy_virtio_net_q_in, in, general_obj_out_cmd_hdr.obj_id, mvq->virtq_id);
MLX5_SET(destroy_virtio_net_q_in, in, general_obj_out_cmd_hdr.uid, ndev->mvdev.res.uid);
MLX5_SET(destroy_virtio_net_q_in, in, general_obj_out_cmd_hdr.obj_type,
MLX5_OBJ_TYPE_VIRTIO_NET_Q);
if (mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, sizeof(out))) {
mlx5_vdpa_warn(&ndev->mvdev, "destroy virtqueue 0x%x\n", mvq->virtq_id);
return;
}
mvq->fw_state = MLX5_VIRTIO_NET_Q_OBJECT_NONE;
umems_destroy(ndev, mvq);
}
static u32 get_rqpn(struct mlx5_vdpa_virtqueue *mvq, bool fw)
{
return fw ? mvq->vqqp.mqp.qpn : mvq->fwqp.mqp.qpn;
}
static u32 get_qpn(struct mlx5_vdpa_virtqueue *mvq, bool fw)
{
return fw ? mvq->fwqp.mqp.qpn : mvq->vqqp.mqp.qpn;
}
static void alloc_inout(struct mlx5_vdpa_net *ndev, int cmd, void **in, int *inlen, void **out,
int *outlen, u32 qpn, u32 rqpn)
{
void *qpc;
void *pp;
switch (cmd) {
case MLX5_CMD_OP_2RST_QP:
*inlen = MLX5_ST_SZ_BYTES(qp_2rst_in);
*outlen = MLX5_ST_SZ_BYTES(qp_2rst_out);
*in = kzalloc(*inlen, GFP_KERNEL);
*out = kzalloc(*outlen, GFP_KERNEL);
if (!*in || !*out)
goto outerr;
MLX5_SET(qp_2rst_in, *in, opcode, cmd);
MLX5_SET(qp_2rst_in, *in, uid, ndev->mvdev.res.uid);
MLX5_SET(qp_2rst_in, *in, qpn, qpn);
break;
case MLX5_CMD_OP_RST2INIT_QP:
*inlen = MLX5_ST_SZ_BYTES(rst2init_qp_in);
*outlen = MLX5_ST_SZ_BYTES(rst2init_qp_out);
*in = kzalloc(*inlen, GFP_KERNEL);
*out = kzalloc(MLX5_ST_SZ_BYTES(rst2init_qp_out), GFP_KERNEL);
if (!*in || !*out)
goto outerr;
MLX5_SET(rst2init_qp_in, *in, opcode, cmd);
MLX5_SET(rst2init_qp_in, *in, uid, ndev->mvdev.res.uid);
MLX5_SET(rst2init_qp_in, *in, qpn, qpn);
qpc = MLX5_ADDR_OF(rst2init_qp_in, *in, qpc);
MLX5_SET(qpc, qpc, remote_qpn, rqpn);
MLX5_SET(qpc, qpc, rwe, 1);
pp = MLX5_ADDR_OF(qpc, qpc, primary_address_path);
MLX5_SET(ads, pp, vhca_port_num, 1);
break;
case MLX5_CMD_OP_INIT2RTR_QP:
*inlen = MLX5_ST_SZ_BYTES(init2rtr_qp_in);
*outlen = MLX5_ST_SZ_BYTES(init2rtr_qp_out);
*in = kzalloc(*inlen, GFP_KERNEL);
*out = kzalloc(MLX5_ST_SZ_BYTES(init2rtr_qp_out), GFP_KERNEL);
if (!*in || !*out)
goto outerr;
MLX5_SET(init2rtr_qp_in, *in, opcode, cmd);
MLX5_SET(init2rtr_qp_in, *in, uid, ndev->mvdev.res.uid);
MLX5_SET(init2rtr_qp_in, *in, qpn, qpn);
qpc = MLX5_ADDR_OF(rst2init_qp_in, *in, qpc);
MLX5_SET(qpc, qpc, mtu, MLX5_QPC_MTU_256_BYTES);
MLX5_SET(qpc, qpc, log_msg_max, 30);
MLX5_SET(qpc, qpc, remote_qpn, rqpn);
pp = MLX5_ADDR_OF(qpc, qpc, primary_address_path);
MLX5_SET(ads, pp, fl, 1);
break;
case MLX5_CMD_OP_RTR2RTS_QP:
*inlen = MLX5_ST_SZ_BYTES(rtr2rts_qp_in);
*outlen = MLX5_ST_SZ_BYTES(rtr2rts_qp_out);
*in = kzalloc(*inlen, GFP_KERNEL);
*out = kzalloc(MLX5_ST_SZ_BYTES(rtr2rts_qp_out), GFP_KERNEL);
if (!*in || !*out)
goto outerr;
MLX5_SET(rtr2rts_qp_in, *in, opcode, cmd);
MLX5_SET(rtr2rts_qp_in, *in, uid, ndev->mvdev.res.uid);
MLX5_SET(rtr2rts_qp_in, *in, qpn, qpn);
qpc = MLX5_ADDR_OF(rst2init_qp_in, *in, qpc);
pp = MLX5_ADDR_OF(qpc, qpc, primary_address_path);
MLX5_SET(ads, pp, ack_timeout, 14);
MLX5_SET(qpc, qpc, retry_count, 7);
MLX5_SET(qpc, qpc, rnr_retry, 7);
break;
default:
goto outerr_nullify;
}
return;
outerr:
kfree(*in);
kfree(*out);
outerr_nullify:
*in = NULL;
*out = NULL;
}
static void free_inout(void *in, void *out)
{
kfree(in);
kfree(out);
}
/* Two QPs are used by each virtqueue. One is used by the driver and one by
* firmware. The fw argument indicates whether the subjected QP is the one used
* by firmware.
*/
static int modify_qp(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, bool fw, int cmd)
{
int outlen;
int inlen;
void *out;
void *in;
int err;
alloc_inout(ndev, cmd, &in, &inlen, &out, &outlen, get_qpn(mvq, fw), get_rqpn(mvq, fw));
if (!in || !out)
return -ENOMEM;
err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, outlen);
free_inout(in, out);
return err;
}
static int connect_qps(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
{
int err;
err = modify_qp(ndev, mvq, true, MLX5_CMD_OP_2RST_QP);
if (err)
return err;
err = modify_qp(ndev, mvq, false, MLX5_CMD_OP_2RST_QP);
if (err)
return err;
err = modify_qp(ndev, mvq, true, MLX5_CMD_OP_RST2INIT_QP);
if (err)
return err;
err = modify_qp(ndev, mvq, false, MLX5_CMD_OP_RST2INIT_QP);
if (err)
return err;
err = modify_qp(ndev, mvq, true, MLX5_CMD_OP_INIT2RTR_QP);
if (err)
return err;
err = modify_qp(ndev, mvq, false, MLX5_CMD_OP_INIT2RTR_QP);
if (err)
return err;
return modify_qp(ndev, mvq, true, MLX5_CMD_OP_RTR2RTS_QP);
}
struct mlx5_virtq_attr {
u8 state;
u16 available_index;
u16 used_index;
};
static int query_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq,
struct mlx5_virtq_attr *attr)
{
int outlen = MLX5_ST_SZ_BYTES(query_virtio_net_q_out);
u32 in[MLX5_ST_SZ_DW(query_virtio_net_q_in)] = {};
void *out;
void *obj_context;
void *cmd_hdr;
int err;
out = kzalloc(outlen, GFP_KERNEL);
if (!out)
return -ENOMEM;
cmd_hdr = MLX5_ADDR_OF(query_virtio_net_q_in, in, general_obj_in_cmd_hdr);
MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, opcode, MLX5_CMD_OP_QUERY_GENERAL_OBJECT);
MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_type, MLX5_OBJ_TYPE_VIRTIO_NET_Q);
MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_id, mvq->virtq_id);
MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid);
err = mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, outlen);
if (err)
goto err_cmd;
obj_context = MLX5_ADDR_OF(query_virtio_net_q_out, out, obj_context);
memset(attr, 0, sizeof(*attr));
attr->state = MLX5_GET(virtio_net_q_object, obj_context, state);
attr->available_index = MLX5_GET(virtio_net_q_object, obj_context, hw_available_index);
attr->used_index = MLX5_GET(virtio_net_q_object, obj_context, hw_used_index);
kfree(out);
return 0;
err_cmd:
kfree(out);
return err;
}
static bool is_valid_state_change(int oldstate, int newstate)
{
switch (oldstate) {
case MLX5_VIRTIO_NET_Q_OBJECT_STATE_INIT:
return newstate == MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY;
case MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY:
return newstate == MLX5_VIRTIO_NET_Q_OBJECT_STATE_SUSPEND;
case MLX5_VIRTIO_NET_Q_OBJECT_STATE_SUSPEND:
case MLX5_VIRTIO_NET_Q_OBJECT_STATE_ERR:
default:
return false;
}
}
static int modify_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, int state)
{
int inlen = MLX5_ST_SZ_BYTES(modify_virtio_net_q_in);
u32 out[MLX5_ST_SZ_DW(modify_virtio_net_q_out)] = {};
void *obj_context;
void *cmd_hdr;
void *in;
int err;
if (mvq->fw_state == MLX5_VIRTIO_NET_Q_OBJECT_NONE)
return 0;
if (!is_valid_state_change(mvq->fw_state, state))
return -EINVAL;
in = kzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
cmd_hdr = MLX5_ADDR_OF(modify_virtio_net_q_in, in, general_obj_in_cmd_hdr);
MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, opcode, MLX5_CMD_OP_MODIFY_GENERAL_OBJECT);
MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_type, MLX5_OBJ_TYPE_VIRTIO_NET_Q);
MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_id, mvq->virtq_id);
MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid);
obj_context = MLX5_ADDR_OF(modify_virtio_net_q_in, in, obj_context);
MLX5_SET64(virtio_net_q_object, obj_context, modify_field_select,
MLX5_VIRTQ_MODIFY_MASK_STATE);
MLX5_SET(virtio_net_q_object, obj_context, state, state);
err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, sizeof(out));
kfree(in);
if (!err)
mvq->fw_state = state;
return err;
}
static int counter_set_alloc(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
{
u32 in[MLX5_ST_SZ_DW(create_virtio_q_counters_in)] = {};
u32 out[MLX5_ST_SZ_DW(create_virtio_q_counters_out)] = {};
void *cmd_hdr;
int err;
if (!counters_supported(&ndev->mvdev))
return 0;
cmd_hdr = MLX5_ADDR_OF(create_virtio_q_counters_in, in, hdr);
MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_type, MLX5_OBJ_TYPE_VIRTIO_Q_COUNTERS);
MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid);
err = mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, sizeof(out));
if (err)
return err;
mvq->counter_set_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
return 0;
}
static void counter_set_dealloc(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
{
u32 in[MLX5_ST_SZ_DW(destroy_virtio_q_counters_in)] = {};
u32 out[MLX5_ST_SZ_DW(destroy_virtio_q_counters_out)] = {};
if (!counters_supported(&ndev->mvdev))
return;
MLX5_SET(destroy_virtio_q_counters_in, in, hdr.opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
MLX5_SET(destroy_virtio_q_counters_in, in, hdr.obj_id, mvq->counter_set_id);
MLX5_SET(destroy_virtio_q_counters_in, in, hdr.uid, ndev->mvdev.res.uid);
MLX5_SET(destroy_virtio_q_counters_in, in, hdr.obj_type, MLX5_OBJ_TYPE_VIRTIO_Q_COUNTERS);
if (mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, sizeof(out)))
mlx5_vdpa_warn(&ndev->mvdev, "dealloc counter set 0x%x\n", mvq->counter_set_id);
}
static irqreturn_t mlx5_vdpa_int_handler(int irq, void *priv)
{
struct vdpa_callback *cb = priv;
if (cb->callback)
return cb->callback(cb->private);
return IRQ_HANDLED;
}
static void alloc_vector(struct mlx5_vdpa_net *ndev,
struct mlx5_vdpa_virtqueue *mvq)
{
struct mlx5_vdpa_irq_pool *irqp = &ndev->irqp;
struct mlx5_vdpa_irq_pool_entry *ent;
int err;
int i;
for (i = 0; i < irqp->num_ent; i++) {
ent = &irqp->entries[i];
if (!ent->used) {
snprintf(ent->name, MLX5_VDPA_IRQ_NAME_LEN, "%s-vq-%d",
dev_name(&ndev->mvdev.vdev.dev), mvq->index);
ent->dev_id = &ndev->event_cbs[mvq->index];
err = request_irq(ent->map.virq, mlx5_vdpa_int_handler, 0,
ent->name, ent->dev_id);
if (err)
return;
ent->used = true;
mvq->map = ent->map;
return;
}
}
}
static void dealloc_vector(struct mlx5_vdpa_net *ndev,
struct mlx5_vdpa_virtqueue *mvq)
{
struct mlx5_vdpa_irq_pool *irqp = &ndev->irqp;
int i;
for (i = 0; i < irqp->num_ent; i++)
if (mvq->map.virq == irqp->entries[i].map.virq) {
free_irq(mvq->map.virq, irqp->entries[i].dev_id);
irqp->entries[i].used = false;
return;
}
}
static int setup_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
{
u16 idx = mvq->index;
int err;
if (!mvq->num_ent)
return 0;
if (mvq->initialized)
return 0;
err = cq_create(ndev, idx, mvq->num_ent);
if (err)
return err;
err = qp_create(ndev, mvq, &mvq->fwqp);
if (err)
goto err_fwqp;
err = qp_create(ndev, mvq, &mvq->vqqp);
if (err)
goto err_vqqp;
err = connect_qps(ndev, mvq);
if (err)
goto err_connect;
err = counter_set_alloc(ndev, mvq);
if (err)
goto err_connect;
alloc_vector(ndev, mvq);
err = create_virtqueue(ndev, mvq);
if (err)
goto err_vq;
if (mvq->ready) {
err = modify_virtqueue(ndev, mvq, MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY);
if (err) {
mlx5_vdpa_warn(&ndev->mvdev, "failed to modify to ready vq idx %d(%d)\n",
idx, err);
goto err_modify;
}
}
mvq->initialized = true;
return 0;
err_modify:
destroy_virtqueue(ndev, mvq);
err_vq:
dealloc_vector(ndev, mvq);
counter_set_dealloc(ndev, mvq);
err_connect:
qp_destroy(ndev, &mvq->vqqp);
err_vqqp:
qp_destroy(ndev, &mvq->fwqp);
err_fwqp:
cq_destroy(ndev, idx);
return err;
}
static void suspend_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
{
struct mlx5_virtq_attr attr;
if (!mvq->initialized)
return;
if (mvq->fw_state != MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY)
return;
if (modify_virtqueue(ndev, mvq, MLX5_VIRTIO_NET_Q_OBJECT_STATE_SUSPEND))
mlx5_vdpa_warn(&ndev->mvdev, "modify to suspend failed\n");
if (query_virtqueue(ndev, mvq, &attr)) {
mlx5_vdpa_warn(&ndev->mvdev, "failed to query virtqueue\n");
return;
}
mvq->avail_idx = attr.available_index;
mvq->used_idx = attr.used_index;
}
static void suspend_vqs(struct mlx5_vdpa_net *ndev)
{
int i;
for (i = 0; i < ndev->mvdev.max_vqs; i++)
suspend_vq(ndev, &ndev->vqs[i]);
}
static void teardown_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
{
if (!mvq->initialized)
return;
suspend_vq(ndev, mvq);
destroy_virtqueue(ndev, mvq);
dealloc_vector(ndev, mvq);
counter_set_dealloc(ndev, mvq);
qp_destroy(ndev, &mvq->vqqp);
qp_destroy(ndev, &mvq->fwqp);
cq_destroy(ndev, mvq->index);
mvq->initialized = false;
}
static int create_rqt(struct mlx5_vdpa_net *ndev)
{
int rqt_table_size = roundup_pow_of_two(ndev->rqt_size);
int act_sz = roundup_pow_of_two(ndev->cur_num_vqs / 2);
__be32 *list;
void *rqtc;
int inlen;
void *in;
int i, j;
int err;
inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + rqt_table_size * MLX5_ST_SZ_BYTES(rq_num);
in = kzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
MLX5_SET(create_rqt_in, in, uid, ndev->mvdev.res.uid);
rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
MLX5_SET(rqtc, rqtc, list_q_type, MLX5_RQTC_LIST_Q_TYPE_VIRTIO_NET_Q);
MLX5_SET(rqtc, rqtc, rqt_max_size, rqt_table_size);
list = MLX5_ADDR_OF(rqtc, rqtc, rq_num[0]);
for (i = 0, j = 0; i < act_sz; i++, j += 2)
list[i] = cpu_to_be32(ndev->vqs[j % ndev->cur_num_vqs].virtq_id);
MLX5_SET(rqtc, rqtc, rqt_actual_size, act_sz);
err = mlx5_vdpa_create_rqt(&ndev->mvdev, in, inlen, &ndev->res.rqtn);
kfree(in);
if (err)
return err;
return 0;
}
#define MLX5_MODIFY_RQT_NUM_RQS ((u64)1)
static int modify_rqt(struct mlx5_vdpa_net *ndev, int num)
{
int act_sz = roundup_pow_of_two(num / 2);
__be32 *list;
void *rqtc;
int inlen;
void *in;
int i, j;
int err;
inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + act_sz * MLX5_ST_SZ_BYTES(rq_num);
in = kzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
MLX5_SET(modify_rqt_in, in, uid, ndev->mvdev.res.uid);
MLX5_SET64(modify_rqt_in, in, bitmask, MLX5_MODIFY_RQT_NUM_RQS);
rqtc = MLX5_ADDR_OF(modify_rqt_in, in, ctx);
MLX5_SET(rqtc, rqtc, list_q_type, MLX5_RQTC_LIST_Q_TYPE_VIRTIO_NET_Q);
list = MLX5_ADDR_OF(rqtc, rqtc, rq_num[0]);
for (i = 0, j = 0; i < act_sz; i++, j = j + 2)
list[i] = cpu_to_be32(ndev->vqs[j % num].virtq_id);
MLX5_SET(rqtc, rqtc, rqt_actual_size, act_sz);
err = mlx5_vdpa_modify_rqt(&ndev->mvdev, in, inlen, ndev->res.rqtn);
kfree(in);
if (err)
return err;
return 0;
}
static void destroy_rqt(struct mlx5_vdpa_net *ndev)
{
mlx5_vdpa_destroy_rqt(&ndev->mvdev, ndev->res.rqtn);
}
static int create_tir(struct mlx5_vdpa_net *ndev)
{
#define HASH_IP_L4PORTS \
(MLX5_HASH_FIELD_SEL_SRC_IP | MLX5_HASH_FIELD_SEL_DST_IP | MLX5_HASH_FIELD_SEL_L4_SPORT | \
MLX5_HASH_FIELD_SEL_L4_DPORT)
static const u8 rx_hash_toeplitz_key[] = { 0x2c, 0xc6, 0x81, 0xd1, 0x5b, 0xdb, 0xf4, 0xf7,
0xfc, 0xa2, 0x83, 0x19, 0xdb, 0x1a, 0x3e, 0x94,
0x6b, 0x9e, 0x38, 0xd9, 0x2c, 0x9c, 0x03, 0xd1,
0xad, 0x99, 0x44, 0xa7, 0xd9, 0x56, 0x3d, 0x59,
0x06, 0x3c, 0x25, 0xf3, 0xfc, 0x1f, 0xdc, 0x2a };
void *rss_key;
void *outer;
void *tirc;
void *in;
int err;
in = kzalloc(MLX5_ST_SZ_BYTES(create_tir_in), GFP_KERNEL);
if (!in)
return -ENOMEM;
MLX5_SET(create_tir_in, in, uid, ndev->mvdev.res.uid);
tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_TOEPLITZ);
rss_key = MLX5_ADDR_OF(tirc, tirc, rx_hash_toeplitz_key);
memcpy(rss_key, rx_hash_toeplitz_key, sizeof(rx_hash_toeplitz_key));
outer = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
MLX5_SET(rx_hash_field_select, outer, l3_prot_type, MLX5_L3_PROT_TYPE_IPV4);
MLX5_SET(rx_hash_field_select, outer, l4_prot_type, MLX5_L4_PROT_TYPE_TCP);
MLX5_SET(rx_hash_field_select, outer, selected_fields, HASH_IP_L4PORTS);
MLX5_SET(tirc, tirc, indirect_table, ndev->res.rqtn);
MLX5_SET(tirc, tirc, transport_domain, ndev->res.tdn);
err = mlx5_vdpa_create_tir(&ndev->mvdev, in, &ndev->res.tirn);
kfree(in);
if (err)
return err;
mlx5_vdpa_add_tirn(ndev);
return err;
}
static void destroy_tir(struct mlx5_vdpa_net *ndev)
{
mlx5_vdpa_remove_tirn(ndev);
mlx5_vdpa_destroy_tir(&ndev->mvdev, ndev->res.tirn);
}
#define MAX_STEERING_ENT 0x8000
#define MAX_STEERING_GROUPS 2
#if defined(CONFIG_MLX5_VDPA_STEERING_DEBUG)
#define NUM_DESTS 2
#else
#define NUM_DESTS 1
#endif
static int add_steering_counters(struct mlx5_vdpa_net *ndev,
struct macvlan_node *node,
struct mlx5_flow_act *flow_act,
struct mlx5_flow_destination *dests)
{
#if defined(CONFIG_MLX5_VDPA_STEERING_DEBUG)
int err;
node->ucast_counter.counter = mlx5_fc_create(ndev->mvdev.mdev, false);
if (IS_ERR(node->ucast_counter.counter))
return PTR_ERR(node->ucast_counter.counter);
node->mcast_counter.counter = mlx5_fc_create(ndev->mvdev.mdev, false);
if (IS_ERR(node->mcast_counter.counter)) {
err = PTR_ERR(node->mcast_counter.counter);
goto err_mcast_counter;
}
dests[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
return 0;
err_mcast_counter:
mlx5_fc_destroy(ndev->mvdev.mdev, node->ucast_counter.counter);
return err;
#else
return 0;
#endif
}
static void remove_steering_counters(struct mlx5_vdpa_net *ndev,
struct macvlan_node *node)
{
#if defined(CONFIG_MLX5_VDPA_STEERING_DEBUG)
mlx5_fc_destroy(ndev->mvdev.mdev, node->mcast_counter.counter);
mlx5_fc_destroy(ndev->mvdev.mdev, node->ucast_counter.counter);
#endif
}
static int mlx5_vdpa_add_mac_vlan_rules(struct mlx5_vdpa_net *ndev, u8 *mac,
struct macvlan_node *node)
{
struct mlx5_flow_destination dests[NUM_DESTS] = {};
struct mlx5_flow_act flow_act = {};
struct mlx5_flow_spec *spec;
void *headers_c;
void *headers_v;
u8 *dmac_c;
u8 *dmac_v;
int err;
u16 vid;
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec)
return -ENOMEM;
vid = key2vid(node->macvlan);
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, outer_headers);
headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
dmac_c = MLX5_ADDR_OF(fte_match_param, headers_c, outer_headers.dmac_47_16);
dmac_v = MLX5_ADDR_OF(fte_match_param, headers_v, outer_headers.dmac_47_16);
eth_broadcast_addr(dmac_c);
ether_addr_copy(dmac_v, mac);
if (ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VLAN)) {
MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, first_vid);
}
if (node->tagged) {
MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, vid);
}
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
dests[0].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
dests[0].tir_num = ndev->res.tirn;
err = add_steering_counters(ndev, node, &flow_act, dests);
if (err)
goto out_free;
#if defined(CONFIG_MLX5_VDPA_STEERING_DEBUG)
dests[1].counter_id = mlx5_fc_id(node->ucast_counter.counter);
#endif
node->ucast_rule = mlx5_add_flow_rules(ndev->rxft, spec, &flow_act, dests, NUM_DESTS);
if (IS_ERR(node->ucast_rule)) {
err = PTR_ERR(node->ucast_rule);
goto err_ucast;
}
#if defined(CONFIG_MLX5_VDPA_STEERING_DEBUG)
dests[1].counter_id = mlx5_fc_id(node->mcast_counter.counter);
#endif
memset(dmac_c, 0, ETH_ALEN);
memset(dmac_v, 0, ETH_ALEN);
dmac_c[0] = 1;
dmac_v[0] = 1;
node->mcast_rule = mlx5_add_flow_rules(ndev->rxft, spec, &flow_act, dests, NUM_DESTS);
if (IS_ERR(node->mcast_rule)) {
err = PTR_ERR(node->mcast_rule);
goto err_mcast;
}
kvfree(spec);
mlx5_vdpa_add_rx_counters(ndev, node);
return 0;
err_mcast:
mlx5_del_flow_rules(node->ucast_rule);
err_ucast:
remove_steering_counters(ndev, node);
out_free:
kvfree(spec);
return err;
}
static void mlx5_vdpa_del_mac_vlan_rules(struct mlx5_vdpa_net *ndev,
struct macvlan_node *node)
{
mlx5_vdpa_remove_rx_counters(ndev, node);
mlx5_del_flow_rules(node->ucast_rule);
mlx5_del_flow_rules(node->mcast_rule);
}
static u64 search_val(u8 *mac, u16 vlan, bool tagged)
{
u64 val;
if (!tagged)
vlan = MLX5V_UNTAGGED;
val = (u64)vlan << 48 |
(u64)mac[0] << 40 |
(u64)mac[1] << 32 |
(u64)mac[2] << 24 |
(u64)mac[3] << 16 |
(u64)mac[4] << 8 |
(u64)mac[5];
return val;
}
static struct macvlan_node *mac_vlan_lookup(struct mlx5_vdpa_net *ndev, u64 value)
{
struct macvlan_node *pos;
u32 idx;
idx = hash_64(value, 8); // tbd 8
hlist_for_each_entry(pos, &ndev->macvlan_hash[idx], hlist) {
if (pos->macvlan == value)
return pos;
}
return NULL;
}
static int mac_vlan_add(struct mlx5_vdpa_net *ndev, u8 *mac, u16 vid, bool tagged)
{
struct macvlan_node *ptr;
u64 val;
u32 idx;
int err;
val = search_val(mac, vid, tagged);
if (mac_vlan_lookup(ndev, val))
return -EEXIST;
ptr = kzalloc(sizeof(*ptr), GFP_KERNEL);
if (!ptr)
return -ENOMEM;
ptr->tagged = tagged;
ptr->macvlan = val;
ptr->ndev = ndev;
err = mlx5_vdpa_add_mac_vlan_rules(ndev, ndev->config.mac, ptr);
if (err)
goto err_add;
idx = hash_64(val, 8);
hlist_add_head(&ptr->hlist, &ndev->macvlan_hash[idx]);
return 0;
err_add:
kfree(ptr);
return err;
}
static void mac_vlan_del(struct mlx5_vdpa_net *ndev, u8 *mac, u16 vlan, bool tagged)
{
struct macvlan_node *ptr;
ptr = mac_vlan_lookup(ndev, search_val(mac, vlan, tagged));
if (!ptr)
return;
hlist_del(&ptr->hlist);
mlx5_vdpa_del_mac_vlan_rules(ndev, ptr);
remove_steering_counters(ndev, ptr);
kfree(ptr);
}
static void clear_mac_vlan_table(struct mlx5_vdpa_net *ndev)
{
struct macvlan_node *pos;
struct hlist_node *n;
int i;
for (i = 0; i < MLX5V_MACVLAN_SIZE; i++) {
hlist_for_each_entry_safe(pos, n, &ndev->macvlan_hash[i], hlist) {
hlist_del(&pos->hlist);
mlx5_vdpa_del_mac_vlan_rules(ndev, pos);
remove_steering_counters(ndev, pos);
kfree(pos);
}
}
}
static int setup_steering(struct mlx5_vdpa_net *ndev)
{
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_flow_namespace *ns;
int err;
ft_attr.max_fte = MAX_STEERING_ENT;
ft_attr.autogroup.max_num_groups = MAX_STEERING_GROUPS;
ns = mlx5_get_flow_namespace(ndev->mvdev.mdev, MLX5_FLOW_NAMESPACE_BYPASS);
if (!ns) {
mlx5_vdpa_warn(&ndev->mvdev, "failed to get flow namespace\n");
return -EOPNOTSUPP;
}
ndev->rxft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
if (IS_ERR(ndev->rxft)) {
mlx5_vdpa_warn(&ndev->mvdev, "failed to create flow table\n");
return PTR_ERR(ndev->rxft);
}
mlx5_vdpa_add_rx_flow_table(ndev);
err = mac_vlan_add(ndev, ndev->config.mac, 0, false);
if (err)
goto err_add;
return 0;
err_add:
mlx5_vdpa_remove_rx_flow_table(ndev);
mlx5_destroy_flow_table(ndev->rxft);
return err;
}
static void teardown_steering(struct mlx5_vdpa_net *ndev)
{
clear_mac_vlan_table(ndev);
mlx5_vdpa_remove_rx_flow_table(ndev);
mlx5_destroy_flow_table(ndev->rxft);
}
static virtio_net_ctrl_ack handle_ctrl_mac(struct mlx5_vdpa_dev *mvdev, u8 cmd)
{
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
struct mlx5_control_vq *cvq = &mvdev->cvq;
virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
struct mlx5_core_dev *pfmdev;
size_t read;
u8 mac[ETH_ALEN], mac_back[ETH_ALEN];
pfmdev = pci_get_drvdata(pci_physfn(mvdev->mdev->pdev));
switch (cmd) {
case VIRTIO_NET_CTRL_MAC_ADDR_SET:
read = vringh_iov_pull_iotlb(&cvq->vring, &cvq->riov, (void *)mac, ETH_ALEN);
if (read != ETH_ALEN)
break;
if (!memcmp(ndev->config.mac, mac, 6)) {
status = VIRTIO_NET_OK;
break;
}
if (is_zero_ether_addr(mac))
break;
if (!is_zero_ether_addr(ndev->config.mac)) {
if (mlx5_mpfs_del_mac(pfmdev, ndev->config.mac)) {
mlx5_vdpa_warn(mvdev, "failed to delete old MAC %pM from MPFS table\n",
ndev->config.mac);
break;
}
}
if (mlx5_mpfs_add_mac(pfmdev, mac)) {
mlx5_vdpa_warn(mvdev, "failed to insert new MAC %pM into MPFS table\n",
mac);
break;
}
/* backup the original mac address so that if failed to add the forward rules
* we could restore it
*/
memcpy(mac_back, ndev->config.mac, ETH_ALEN);
memcpy(ndev->config.mac, mac, ETH_ALEN);
/* Need recreate the flow table entry, so that the packet could forward back
*/
mac_vlan_del(ndev, mac_back, 0, false);
if (mac_vlan_add(ndev, ndev->config.mac, 0, false)) {
mlx5_vdpa_warn(mvdev, "failed to insert forward rules, try to restore\n");
/* Although it hardly run here, we still need double check */
if (is_zero_ether_addr(mac_back)) {
mlx5_vdpa_warn(mvdev, "restore mac failed: Original MAC is zero\n");
break;
}
/* Try to restore original mac address to MFPS table, and try to restore
* the forward rule entry.
*/
if (mlx5_mpfs_del_mac(pfmdev, ndev->config.mac)) {
mlx5_vdpa_warn(mvdev, "restore mac failed: delete MAC %pM from MPFS table failed\n",
ndev->config.mac);
}
if (mlx5_mpfs_add_mac(pfmdev, mac_back)) {
mlx5_vdpa_warn(mvdev, "restore mac failed: insert old MAC %pM into MPFS table failed\n",
mac_back);
}
memcpy(ndev->config.mac, mac_back, ETH_ALEN);
if (mac_vlan_add(ndev, ndev->config.mac, 0, false))
mlx5_vdpa_warn(mvdev, "restore forward rules failed: insert forward rules failed\n");
break;
}
status = VIRTIO_NET_OK;
break;
default:
break;
}
return status;
}
static int change_num_qps(struct mlx5_vdpa_dev *mvdev, int newqps)
{
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
int cur_qps = ndev->cur_num_vqs / 2;
int err;
int i;
if (cur_qps > newqps) {
err = modify_rqt(ndev, 2 * newqps);
if (err)
return err;
for (i = ndev->cur_num_vqs - 1; i >= 2 * newqps; i--)
teardown_vq(ndev, &ndev->vqs[i]);
ndev->cur_num_vqs = 2 * newqps;
} else {
ndev->cur_num_vqs = 2 * newqps;
for (i = cur_qps * 2; i < 2 * newqps; i++) {
err = setup_vq(ndev, &ndev->vqs[i]);
if (err)
goto clean_added;
}
err = modify_rqt(ndev, 2 * newqps);
if (err)
goto clean_added;
}
return 0;
clean_added:
for (--i; i >= 2 * cur_qps; --i)
teardown_vq(ndev, &ndev->vqs[i]);
ndev->cur_num_vqs = 2 * cur_qps;
return err;
}
static virtio_net_ctrl_ack handle_ctrl_mq(struct mlx5_vdpa_dev *mvdev, u8 cmd)
{
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
struct mlx5_control_vq *cvq = &mvdev->cvq;
struct virtio_net_ctrl_mq mq;
size_t read;
u16 newqps;
switch (cmd) {
case VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET:
/* This mq feature check aligns with pre-existing userspace
* implementation.
*
* Without it, an untrusted driver could fake a multiqueue config
* request down to a non-mq device that may cause kernel to
* panic due to uninitialized resources for extra vqs. Even with
* a well behaving guest driver, it is not expected to allow
* changing the number of vqs on a non-mq device.
*/
if (!MLX5_FEATURE(mvdev, VIRTIO_NET_F_MQ))
break;
read = vringh_iov_pull_iotlb(&cvq->vring, &cvq->riov, (void *)&mq, sizeof(mq));
if (read != sizeof(mq))
break;
newqps = mlx5vdpa16_to_cpu(mvdev, mq.virtqueue_pairs);
if (newqps < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
newqps > ndev->rqt_size)
break;
if (ndev->cur_num_vqs == 2 * newqps) {
status = VIRTIO_NET_OK;
break;
}
if (!change_num_qps(mvdev, newqps))
status = VIRTIO_NET_OK;
break;
default:
break;
}
return status;
}
static virtio_net_ctrl_ack handle_ctrl_vlan(struct mlx5_vdpa_dev *mvdev, u8 cmd)
{
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
struct mlx5_control_vq *cvq = &mvdev->cvq;
__virtio16 vlan;
size_t read;
u16 id;
if (!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VLAN)))
return status;
switch (cmd) {
case VIRTIO_NET_CTRL_VLAN_ADD:
read = vringh_iov_pull_iotlb(&cvq->vring, &cvq->riov, &vlan, sizeof(vlan));
if (read != sizeof(vlan))
break;
id = mlx5vdpa16_to_cpu(mvdev, vlan);
if (mac_vlan_add(ndev, ndev->config.mac, id, true))
break;
status = VIRTIO_NET_OK;
break;
case VIRTIO_NET_CTRL_VLAN_DEL:
read = vringh_iov_pull_iotlb(&cvq->vring, &cvq->riov, &vlan, sizeof(vlan));
if (read != sizeof(vlan))
break;
id = mlx5vdpa16_to_cpu(mvdev, vlan);
mac_vlan_del(ndev, ndev->config.mac, id, true);
status = VIRTIO_NET_OK;
break;
default:
break;
}
return status;
}
static void mlx5_cvq_kick_handler(struct work_struct *work)
{
virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
struct virtio_net_ctrl_hdr ctrl;
struct mlx5_vdpa_wq_ent *wqent;
struct mlx5_vdpa_dev *mvdev;
struct mlx5_control_vq *cvq;
struct mlx5_vdpa_net *ndev;
size_t read, write;
int err;
wqent = container_of(work, struct mlx5_vdpa_wq_ent, work);
mvdev = wqent->mvdev;
ndev = to_mlx5_vdpa_ndev(mvdev);
cvq = &mvdev->cvq;
down_write(&ndev->reslock);
if (!(mvdev->status & VIRTIO_CONFIG_S_DRIVER_OK))
goto out;
if (!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ)))
goto out;
if (!cvq->ready)
goto out;
while (true) {
err = vringh_getdesc_iotlb(&cvq->vring, &cvq->riov, &cvq->wiov, &cvq->head,
GFP_ATOMIC);
if (err <= 0)
break;
read = vringh_iov_pull_iotlb(&cvq->vring, &cvq->riov, &ctrl, sizeof(ctrl));
if (read != sizeof(ctrl))
break;
cvq->received_desc++;
switch (ctrl.class) {
case VIRTIO_NET_CTRL_MAC:
status = handle_ctrl_mac(mvdev, ctrl.cmd);
break;
case VIRTIO_NET_CTRL_MQ:
status = handle_ctrl_mq(mvdev, ctrl.cmd);
break;
case VIRTIO_NET_CTRL_VLAN:
status = handle_ctrl_vlan(mvdev, ctrl.cmd);
break;
default:
break;
}
/* Make sure data is written before advancing index */
smp_wmb();
write = vringh_iov_push_iotlb(&cvq->vring, &cvq->wiov, &status, sizeof(status));
vringh_complete_iotlb(&cvq->vring, cvq->head, write);
vringh_kiov_cleanup(&cvq->riov);
vringh_kiov_cleanup(&cvq->wiov);
if (vringh_need_notify_iotlb(&cvq->vring))
vringh_notify(&cvq->vring);
cvq->completed_desc++;
queue_work(mvdev->wq, &wqent->work);
break;
}
out:
up_write(&ndev->reslock);
}
static void mlx5_vdpa_kick_vq(struct vdpa_device *vdev, u16 idx)
{
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
struct mlx5_vdpa_virtqueue *mvq;
if (!is_index_valid(mvdev, idx))
return;
if (unlikely(is_ctrl_vq_idx(mvdev, idx))) {
if (!mvdev->wq || !mvdev->cvq.ready)
return;
queue_work(mvdev->wq, &ndev->cvq_ent.work);
return;
}
mvq = &ndev->vqs[idx];
if (unlikely(!mvq->ready))
return;
iowrite16(idx, ndev->mvdev.res.kick_addr);
}
static int mlx5_vdpa_set_vq_address(struct vdpa_device *vdev, u16 idx, u64 desc_area,
u64 driver_area, u64 device_area)
{
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
struct mlx5_vdpa_virtqueue *mvq;
if (!is_index_valid(mvdev, idx))
return -EINVAL;
if (is_ctrl_vq_idx(mvdev, idx)) {
mvdev->cvq.desc_addr = desc_area;
mvdev->cvq.device_addr = device_area;
mvdev->cvq.driver_addr = driver_area;
return 0;
}
mvq = &ndev->vqs[idx];
mvq->desc_addr = desc_area;
mvq->device_addr = device_area;
mvq->driver_addr = driver_area;
return 0;
}
static void mlx5_vdpa_set_vq_num(struct vdpa_device *vdev, u16 idx, u32 num)
{
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
struct mlx5_vdpa_virtqueue *mvq;
if (!is_index_valid(mvdev, idx) || is_ctrl_vq_idx(mvdev, idx))
return;
mvq = &ndev->vqs[idx];
mvq->num_ent = num;
}
static void mlx5_vdpa_set_vq_cb(struct vdpa_device *vdev, u16 idx, struct vdpa_callback *cb)
{
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
ndev->event_cbs[idx] = *cb;
if (is_ctrl_vq_idx(mvdev, idx))
mvdev->cvq.event_cb = *cb;
}
static void mlx5_cvq_notify(struct vringh *vring)
{
struct mlx5_control_vq *cvq = container_of(vring, struct mlx5_control_vq, vring);
if (!cvq->event_cb.callback)
return;
cvq->event_cb.callback(cvq->event_cb.private);
}
static void set_cvq_ready(struct mlx5_vdpa_dev *mvdev, bool ready)
{
struct mlx5_control_vq *cvq = &mvdev->cvq;
cvq->ready = ready;
if (!ready)
return;
cvq->vring.notify = mlx5_cvq_notify;
}
static void mlx5_vdpa_set_vq_ready(struct vdpa_device *vdev, u16 idx, bool ready)
{
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
struct mlx5_vdpa_virtqueue *mvq;
int err;
if (!mvdev->actual_features)
return;
if (!is_index_valid(mvdev, idx))
return;
if (is_ctrl_vq_idx(mvdev, idx)) {
set_cvq_ready(mvdev, ready);
return;
}
mvq = &ndev->vqs[idx];
if (!ready) {
suspend_vq(ndev, mvq);
} else {
err = modify_virtqueue(ndev, mvq, MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY);
if (err) {
mlx5_vdpa_warn(mvdev, "modify VQ %d to ready failed (%d)\n", idx, err);
ready = false;
}
}
mvq->ready = ready;
}
static bool mlx5_vdpa_get_vq_ready(struct vdpa_device *vdev, u16 idx)
{
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
if (!is_index_valid(mvdev, idx))
return false;
if (is_ctrl_vq_idx(mvdev, idx))
return mvdev->cvq.ready;
return ndev->vqs[idx].ready;
}
static int mlx5_vdpa_set_vq_state(struct vdpa_device *vdev, u16 idx,
const struct vdpa_vq_state *state)
{
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
struct mlx5_vdpa_virtqueue *mvq;
if (!is_index_valid(mvdev, idx))
return -EINVAL;
if (is_ctrl_vq_idx(mvdev, idx)) {
mvdev->cvq.vring.last_avail_idx = state->split.avail_index;
return 0;
}
mvq = &ndev->vqs[idx];
if (mvq->fw_state == MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY) {
mlx5_vdpa_warn(mvdev, "can't modify available index\n");
return -EINVAL;
}
mvq->used_idx = state->split.avail_index;
mvq->avail_idx = state->split.avail_index;
return 0;
}
static int mlx5_vdpa_get_vq_state(struct vdpa_device *vdev, u16 idx, struct vdpa_vq_state *state)
{
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
struct mlx5_vdpa_virtqueue *mvq;
struct mlx5_virtq_attr attr;
int err;
if (!is_index_valid(mvdev, idx))
return -EINVAL;
if (is_ctrl_vq_idx(mvdev, idx)) {
state->split.avail_index = mvdev->cvq.vring.last_avail_idx;
return 0;
}
mvq = &ndev->vqs[idx];
/* If the virtq object was destroyed, use the value saved at
* the last minute of suspend_vq. This caters for userspace
* that cares about emulating the index after vq is stopped.
*/
if (!mvq->initialized) {
/* Firmware returns a wrong value for the available index.
* Since both values should be identical, we take the value of
* used_idx which is reported correctly.
*/
state->split.avail_index = mvq->used_idx;
return 0;
}
err = query_virtqueue(ndev, mvq, &attr);
if (err) {
mlx5_vdpa_warn(mvdev, "failed to query virtqueue\n");
return err;
}
state->split.avail_index = attr.used_index;
return 0;
}
static u32 mlx5_vdpa_get_vq_align(struct vdpa_device *vdev)
{
return PAGE_SIZE;
}
static u32 mlx5_vdpa_get_vq_group(struct vdpa_device *vdev, u16 idx)
{
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
if (is_ctrl_vq_idx(mvdev, idx))
return MLX5_VDPA_CVQ_GROUP;
return MLX5_VDPA_DATAVQ_GROUP;
}
static u64 mlx_to_vritio_features(u16 dev_features)
{
u64 result = 0;
if (dev_features & BIT_ULL(MLX5_VIRTIO_NET_F_MRG_RXBUF))
result |= BIT_ULL(VIRTIO_NET_F_MRG_RXBUF);
if (dev_features & BIT_ULL(MLX5_VIRTIO_NET_F_HOST_ECN))
result |= BIT_ULL(VIRTIO_NET_F_HOST_ECN);
if (dev_features & BIT_ULL(MLX5_VIRTIO_NET_F_GUEST_ECN))
result |= BIT_ULL(VIRTIO_NET_F_GUEST_ECN);
if (dev_features & BIT_ULL(MLX5_VIRTIO_NET_F_GUEST_TSO6))
result |= BIT_ULL(VIRTIO_NET_F_GUEST_TSO6);
if (dev_features & BIT_ULL(MLX5_VIRTIO_NET_F_GUEST_TSO4))
result |= BIT_ULL(VIRTIO_NET_F_GUEST_TSO4);
if (dev_features & BIT_ULL(MLX5_VIRTIO_NET_F_GUEST_CSUM))
result |= BIT_ULL(VIRTIO_NET_F_GUEST_CSUM);
if (dev_features & BIT_ULL(MLX5_VIRTIO_NET_F_CSUM))
result |= BIT_ULL(VIRTIO_NET_F_CSUM);
if (dev_features & BIT_ULL(MLX5_VIRTIO_NET_F_HOST_TSO6))
result |= BIT_ULL(VIRTIO_NET_F_HOST_TSO6);
if (dev_features & BIT_ULL(MLX5_VIRTIO_NET_F_HOST_TSO4))
result |= BIT_ULL(VIRTIO_NET_F_HOST_TSO4);
return result;
}
static u64 get_supported_features(struct mlx5_core_dev *mdev)
{
u64 mlx_vdpa_features = 0;
u16 dev_features;
dev_features = MLX5_CAP_DEV_VDPA_EMULATION(mdev, device_features_bits_mask);
mlx_vdpa_features |= mlx_to_vritio_features(dev_features);
if (MLX5_CAP_DEV_VDPA_EMULATION(mdev, virtio_version_1_0))
mlx_vdpa_features |= BIT_ULL(VIRTIO_F_VERSION_1);
mlx_vdpa_features |= BIT_ULL(VIRTIO_F_ACCESS_PLATFORM);
mlx_vdpa_features |= BIT_ULL(VIRTIO_NET_F_CTRL_VQ);
mlx_vdpa_features |= BIT_ULL(VIRTIO_NET_F_CTRL_MAC_ADDR);
mlx_vdpa_features |= BIT_ULL(VIRTIO_NET_F_MQ);
mlx_vdpa_features |= BIT_ULL(VIRTIO_NET_F_STATUS);
mlx_vdpa_features |= BIT_ULL(VIRTIO_NET_F_MTU);
mlx_vdpa_features |= BIT_ULL(VIRTIO_NET_F_CTRL_VLAN);
mlx_vdpa_features |= BIT_ULL(VIRTIO_NET_F_MAC);
return mlx_vdpa_features;
}
static u64 mlx5_vdpa_get_device_features(struct vdpa_device *vdev)
{
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
print_features(mvdev, ndev->mvdev.mlx_features, false);
return ndev->mvdev.mlx_features;
}
static int verify_driver_features(struct mlx5_vdpa_dev *mvdev, u64 features)
{
/* Minimum features to expect */
if (!(features & BIT_ULL(VIRTIO_F_ACCESS_PLATFORM)))
return -EOPNOTSUPP;
/* Double check features combination sent down by the driver.
* Fail invalid features due to absence of the depended feature.
*
* Per VIRTIO v1.1 specification, section 5.1.3.1 Feature bit
* requirements: "VIRTIO_NET_F_MQ Requires VIRTIO_NET_F_CTRL_VQ".
* By failing the invalid features sent down by untrusted drivers,
* we're assured the assumption made upon is_index_valid() and
* is_ctrl_vq_idx() will not be compromised.
*/
if ((features & (BIT_ULL(VIRTIO_NET_F_MQ) | BIT_ULL(VIRTIO_NET_F_CTRL_VQ))) ==
BIT_ULL(VIRTIO_NET_F_MQ))
return -EINVAL;
return 0;
}
static int setup_virtqueues(struct mlx5_vdpa_dev *mvdev)
{
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
int err;
int i;
for (i = 0; i < mvdev->max_vqs; i++) {
err = setup_vq(ndev, &ndev->vqs[i]);
if (err)
goto err_vq;
}
return 0;
err_vq:
for (--i; i >= 0; i--)
teardown_vq(ndev, &ndev->vqs[i]);
return err;
}
static void teardown_virtqueues(struct mlx5_vdpa_net *ndev)
{
struct mlx5_vdpa_virtqueue *mvq;
int i;
for (i = ndev->mvdev.max_vqs - 1; i >= 0; i--) {
mvq = &ndev->vqs[i];
if (!mvq->initialized)
continue;
teardown_vq(ndev, mvq);
}
}
static void update_cvq_info(struct mlx5_vdpa_dev *mvdev)
{
if (MLX5_FEATURE(mvdev, VIRTIO_NET_F_CTRL_VQ)) {
if (MLX5_FEATURE(mvdev, VIRTIO_NET_F_MQ)) {
/* MQ supported. CVQ index is right above the last data virtqueue's */
mvdev->max_idx = mvdev->max_vqs;
} else {
/* Only CVQ supportted. data virtqueues occupy indices 0 and 1.
* CVQ gets index 2
*/
mvdev->max_idx = 2;
}
} else {
/* Two data virtqueues only: one for rx and one for tx */
mvdev->max_idx = 1;
}
}
static u8 query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport)
{
u32 out[MLX5_ST_SZ_DW(query_vport_state_out)] = {};
u32 in[MLX5_ST_SZ_DW(query_vport_state_in)] = {};
int err;
MLX5_SET(query_vport_state_in, in, opcode, MLX5_CMD_OP_QUERY_VPORT_STATE);
MLX5_SET(query_vport_state_in, in, op_mod, opmod);
MLX5_SET(query_vport_state_in, in, vport_number, vport);
if (vport)
MLX5_SET(query_vport_state_in, in, other_vport, 1);
err = mlx5_cmd_exec_inout(mdev, query_vport_state, in, out);
if (err)
return 0;
return MLX5_GET(query_vport_state_out, out, state);
}
static bool get_link_state(struct mlx5_vdpa_dev *mvdev)
{
if (query_vport_state(mvdev->mdev, MLX5_VPORT_STATE_OP_MOD_VNIC_VPORT, 0) ==
VPORT_STATE_UP)
return true;
return false;
}
static void update_carrier(struct work_struct *work)
{
struct mlx5_vdpa_wq_ent *wqent;
struct mlx5_vdpa_dev *mvdev;
struct mlx5_vdpa_net *ndev;
wqent = container_of(work, struct mlx5_vdpa_wq_ent, work);
mvdev = wqent->mvdev;
ndev = to_mlx5_vdpa_ndev(mvdev);
if (get_link_state(mvdev))
ndev->config.status |= cpu_to_mlx5vdpa16(mvdev, VIRTIO_NET_S_LINK_UP);
else
ndev->config.status &= cpu_to_mlx5vdpa16(mvdev, ~VIRTIO_NET_S_LINK_UP);
if (ndev->config_cb.callback)
ndev->config_cb.callback(ndev->config_cb.private);
kfree(wqent);
}
static int queue_link_work(struct mlx5_vdpa_net *ndev)
{
struct mlx5_vdpa_wq_ent *wqent;
wqent = kzalloc(sizeof(*wqent), GFP_ATOMIC);
if (!wqent)
return -ENOMEM;
wqent->mvdev = &ndev->mvdev;
INIT_WORK(&wqent->work, update_carrier);
queue_work(ndev->mvdev.wq, &wqent->work);
return 0;
}
static int event_handler(struct notifier_block *nb, unsigned long event, void *param)
{
struct mlx5_vdpa_net *ndev = container_of(nb, struct mlx5_vdpa_net, nb);
struct mlx5_eqe *eqe = param;
int ret = NOTIFY_DONE;
if (event == MLX5_EVENT_TYPE_PORT_CHANGE) {
switch (eqe->sub_type) {
case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
if (queue_link_work(ndev))
return NOTIFY_DONE;
ret = NOTIFY_OK;
break;
default:
return NOTIFY_DONE;
}
return ret;
}
return ret;
}
static void register_link_notifier(struct mlx5_vdpa_net *ndev)
{
if (!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_STATUS)))
return;
ndev->nb.notifier_call = event_handler;
mlx5_notifier_register(ndev->mvdev.mdev, &ndev->nb);
ndev->nb_registered = true;
queue_link_work(ndev);
}
static void unregister_link_notifier(struct mlx5_vdpa_net *ndev)
{
if (!ndev->nb_registered)
return;
ndev->nb_registered = false;
mlx5_notifier_unregister(ndev->mvdev.mdev, &ndev->nb);
if (ndev->mvdev.wq)
flush_workqueue(ndev->mvdev.wq);
}
static int mlx5_vdpa_set_driver_features(struct vdpa_device *vdev, u64 features)
{
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
int err;
print_features(mvdev, features, true);
err = verify_driver_features(mvdev, features);
if (err)
return err;
ndev->mvdev.actual_features = features & ndev->mvdev.mlx_features;
if (ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_MQ))
ndev->rqt_size = mlx5vdpa16_to_cpu(mvdev, ndev->config.max_virtqueue_pairs);
else
ndev->rqt_size = 1;
/* Device must start with 1 queue pair, as per VIRTIO v1.2 spec, section
* 5.1.6.5.5 "Device operation in multiqueue mode":
*
* Multiqueue is disabled by default.
* The driver enables multiqueue by sending a command using class
* VIRTIO_NET_CTRL_MQ. The command selects the mode of multiqueue
* operation, as follows: ...
*/
ndev->cur_num_vqs = 2;
update_cvq_info(mvdev);
return err;
}
static void mlx5_vdpa_set_config_cb(struct vdpa_device *vdev, struct vdpa_callback *cb)
{
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
ndev->config_cb = *cb;
}
#define MLX5_VDPA_MAX_VQ_ENTRIES 256
static u16 mlx5_vdpa_get_vq_num_max(struct vdpa_device *vdev)
{
return MLX5_VDPA_MAX_VQ_ENTRIES;
}
static u32 mlx5_vdpa_get_device_id(struct vdpa_device *vdev)
{
return VIRTIO_ID_NET;
}
static u32 mlx5_vdpa_get_vendor_id(struct vdpa_device *vdev)
{
return PCI_VENDOR_ID_MELLANOX;
}
static u8 mlx5_vdpa_get_status(struct vdpa_device *vdev)
{
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
print_status(mvdev, ndev->mvdev.status, false);
return ndev->mvdev.status;
}
static int save_channel_info(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
{
struct mlx5_vq_restore_info *ri = &mvq->ri;
struct mlx5_virtq_attr attr = {};
int err;
if (mvq->initialized) {
err = query_virtqueue(ndev, mvq, &attr);
if (err)
return err;
}
ri->avail_index = attr.available_index;
ri->used_index = attr.used_index;
ri->ready = mvq->ready;
ri->num_ent = mvq->num_ent;
ri->desc_addr = mvq->desc_addr;
ri->device_addr = mvq->device_addr;
ri->driver_addr = mvq->driver_addr;
ri->map = mvq->map;
ri->restore = true;
return 0;
}
static int save_channels_info(struct mlx5_vdpa_net *ndev)
{
int i;
for (i = 0; i < ndev->mvdev.max_vqs; i++) {
memset(&ndev->vqs[i].ri, 0, sizeof(ndev->vqs[i].ri));
save_channel_info(ndev, &ndev->vqs[i]);
}
return 0;
}
static void mlx5_clear_vqs(struct mlx5_vdpa_net *ndev)
{
int i;
for (i = 0; i < ndev->mvdev.max_vqs; i++)
memset(&ndev->vqs[i], 0, offsetof(struct mlx5_vdpa_virtqueue, ri));
}
static void restore_channels_info(struct mlx5_vdpa_net *ndev)
{
struct mlx5_vdpa_virtqueue *mvq;
struct mlx5_vq_restore_info *ri;
int i;
mlx5_clear_vqs(ndev);
init_mvqs(ndev);
for (i = 0; i < ndev->mvdev.max_vqs; i++) {
mvq = &ndev->vqs[i];
ri = &mvq->ri;
if (!ri->restore)
continue;
mvq->avail_idx = ri->avail_index;
mvq->used_idx = ri->used_index;
mvq->ready = ri->ready;
mvq->num_ent = ri->num_ent;
mvq->desc_addr = ri->desc_addr;
mvq->device_addr = ri->device_addr;
mvq->driver_addr = ri->driver_addr;
mvq->map = ri->map;
}
}
static int mlx5_vdpa_change_map(struct mlx5_vdpa_dev *mvdev,
struct vhost_iotlb *iotlb, unsigned int asid)
{
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
int err;
suspend_vqs(ndev);
err = save_channels_info(ndev);
if (err)
goto err_mr;
teardown_driver(ndev);
mlx5_vdpa_destroy_mr_asid(mvdev, asid);
err = mlx5_vdpa_create_mr(mvdev, iotlb, asid);
if (err)
goto err_mr;
if (!(mvdev->status & VIRTIO_CONFIG_S_DRIVER_OK) || mvdev->suspended)
goto err_mr;
restore_channels_info(ndev);
err = setup_driver(mvdev);
if (err)
goto err_setup;
return 0;
err_setup:
mlx5_vdpa_destroy_mr_asid(mvdev, asid);
err_mr:
return err;
}
/* reslock must be held for this function */
static int setup_driver(struct mlx5_vdpa_dev *mvdev)
{
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
int err;
WARN_ON(!rwsem_is_locked(&ndev->reslock));
if (ndev->setup) {
mlx5_vdpa_warn(mvdev, "setup driver called for already setup driver\n");
err = 0;
goto out;
}
mlx5_vdpa_add_debugfs(ndev);
err = setup_virtqueues(mvdev);
if (err) {
mlx5_vdpa_warn(mvdev, "setup_virtqueues\n");
goto err_setup;
}
err = create_rqt(ndev);
if (err) {
mlx5_vdpa_warn(mvdev, "create_rqt\n");
goto err_rqt;
}
err = create_tir(ndev);
if (err) {
mlx5_vdpa_warn(mvdev, "create_tir\n");
goto err_tir;
}
err = setup_steering(ndev);
if (err) {
mlx5_vdpa_warn(mvdev, "setup_steering\n");
goto err_fwd;
}
ndev->setup = true;
return 0;
err_fwd:
destroy_tir(ndev);
err_tir:
destroy_rqt(ndev);
err_rqt:
teardown_virtqueues(ndev);
err_setup:
mlx5_vdpa_remove_debugfs(ndev->debugfs);
out:
return err;
}
/* reslock must be held for this function */
static void teardown_driver(struct mlx5_vdpa_net *ndev)
{
WARN_ON(!rwsem_is_locked(&ndev->reslock));
if (!ndev->setup)
return;
mlx5_vdpa_remove_debugfs(ndev->debugfs);
ndev->debugfs = NULL;
teardown_steering(ndev);
destroy_tir(ndev);
destroy_rqt(ndev);
teardown_virtqueues(ndev);
ndev->setup = false;
}
static void clear_vqs_ready(struct mlx5_vdpa_net *ndev)
{
int i;
for (i = 0; i < ndev->mvdev.max_vqs; i++)
ndev->vqs[i].ready = false;
ndev->mvdev.cvq.ready = false;
}
static int setup_cvq_vring(struct mlx5_vdpa_dev *mvdev)
{
struct mlx5_control_vq *cvq = &mvdev->cvq;
int err = 0;
if (mvdev->actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ))
err = vringh_init_iotlb(&cvq->vring, mvdev->actual_features,
MLX5_CVQ_MAX_ENT, false,
(struct vring_desc *)(uintptr_t)cvq->desc_addr,
(struct vring_avail *)(uintptr_t)cvq->driver_addr,
(struct vring_used *)(uintptr_t)cvq->device_addr);
return err;
}
static void mlx5_vdpa_set_status(struct vdpa_device *vdev, u8 status)
{
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
int err;
print_status(mvdev, status, true);
down_write(&ndev->reslock);
if ((status ^ ndev->mvdev.status) & VIRTIO_CONFIG_S_DRIVER_OK) {
if (status & VIRTIO_CONFIG_S_DRIVER_OK) {
err = setup_cvq_vring(mvdev);
if (err) {
mlx5_vdpa_warn(mvdev, "failed to setup control VQ vring\n");
goto err_setup;
}
register_link_notifier(ndev);
err = setup_driver(mvdev);
if (err) {
mlx5_vdpa_warn(mvdev, "failed to setup driver\n");
goto err_driver;
}
} else {
mlx5_vdpa_warn(mvdev, "did not expect DRIVER_OK to be cleared\n");
goto err_clear;
}
}
ndev->mvdev.status = status;
up_write(&ndev->reslock);
return;
err_driver:
unregister_link_notifier(ndev);
err_setup:
mlx5_vdpa_destroy_mr(&ndev->mvdev);
ndev->mvdev.status |= VIRTIO_CONFIG_S_FAILED;
err_clear:
up_write(&ndev->reslock);
}
static void init_group_to_asid_map(struct mlx5_vdpa_dev *mvdev)
{
int i;
/* default mapping all groups are mapped to asid 0 */
for (i = 0; i < MLX5_VDPA_NUMVQ_GROUPS; i++)
mvdev->group2asid[i] = 0;
}
static int mlx5_vdpa_reset(struct vdpa_device *vdev)
{
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
print_status(mvdev, 0, true);
mlx5_vdpa_info(mvdev, "performing device reset\n");
down_write(&ndev->reslock);
unregister_link_notifier(ndev);
teardown_driver(ndev);
clear_vqs_ready(ndev);
mlx5_vdpa_destroy_mr(&ndev->mvdev);
ndev->mvdev.status = 0;
ndev->mvdev.suspended = false;
ndev->cur_num_vqs = 0;
ndev->mvdev.cvq.received_desc = 0;
ndev->mvdev.cvq.completed_desc = 0;
memset(ndev->event_cbs, 0, sizeof(*ndev->event_cbs) * (mvdev->max_vqs + 1));
ndev->mvdev.actual_features = 0;
init_group_to_asid_map(mvdev);
++mvdev->generation;
if (MLX5_CAP_GEN(mvdev->mdev, umem_uid_0)) {
if (mlx5_vdpa_create_mr(mvdev, NULL, 0))
mlx5_vdpa_warn(mvdev, "create MR failed\n");
}
up_write(&ndev->reslock);
return 0;
}
static size_t mlx5_vdpa_get_config_size(struct vdpa_device *vdev)
{
return sizeof(struct virtio_net_config);
}
static void mlx5_vdpa_get_config(struct vdpa_device *vdev, unsigned int offset, void *buf,
unsigned int len)
{
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
if (offset + len <= sizeof(struct virtio_net_config))
memcpy(buf, (u8 *)&ndev->config + offset, len);
}
static void mlx5_vdpa_set_config(struct vdpa_device *vdev, unsigned int offset, const void *buf,
unsigned int len)
{
/* not supported */
}
static u32 mlx5_vdpa_get_generation(struct vdpa_device *vdev)
{
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
return mvdev->generation;
}
static int set_map_data(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb,
unsigned int asid)
{
bool change_map;
int err;
err = mlx5_vdpa_handle_set_map(mvdev, iotlb, &change_map, asid);
if (err) {
mlx5_vdpa_warn(mvdev, "set map failed(%d)\n", err);
return err;
}
if (change_map)
err = mlx5_vdpa_change_map(mvdev, iotlb, asid);
return err;
}
static int mlx5_vdpa_set_map(struct vdpa_device *vdev, unsigned int asid,
struct vhost_iotlb *iotlb)
{
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
int err = -EINVAL;
down_write(&ndev->reslock);
err = set_map_data(mvdev, iotlb, asid);
up_write(&ndev->reslock);
return err;
}
static struct device *mlx5_get_vq_dma_dev(struct vdpa_device *vdev, u16 idx)
{
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
if (is_ctrl_vq_idx(mvdev, idx))
return &vdev->dev;
return mvdev->vdev.dma_dev;
}
static void free_irqs(struct mlx5_vdpa_net *ndev)
{
struct mlx5_vdpa_irq_pool_entry *ent;
int i;
if (!msix_mode_supported(&ndev->mvdev))
return;
if (!ndev->irqp.entries)
return;
for (i = ndev->irqp.num_ent - 1; i >= 0; i--) {
ent = ndev->irqp.entries + i;
if (ent->map.virq)
pci_msix_free_irq(ndev->mvdev.mdev->pdev, ent->map);
}
kfree(ndev->irqp.entries);
}
static void mlx5_vdpa_free(struct vdpa_device *vdev)
{
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
struct mlx5_core_dev *pfmdev;
struct mlx5_vdpa_net *ndev;
ndev = to_mlx5_vdpa_ndev(mvdev);
free_resources(ndev);
mlx5_vdpa_destroy_mr(mvdev);
if (!is_zero_ether_addr(ndev->config.mac)) {
pfmdev = pci_get_drvdata(pci_physfn(mvdev->mdev->pdev));
mlx5_mpfs_del_mac(pfmdev, ndev->config.mac);
}
mlx5_vdpa_free_resources(&ndev->mvdev);
free_irqs(ndev);
kfree(ndev->event_cbs);
kfree(ndev->vqs);
}
static struct vdpa_notification_area mlx5_get_vq_notification(struct vdpa_device *vdev, u16 idx)
{
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
struct vdpa_notification_area ret = {};
struct mlx5_vdpa_net *ndev;
phys_addr_t addr;
if (!is_index_valid(mvdev, idx) || is_ctrl_vq_idx(mvdev, idx))
return ret;
/* If SF BAR size is smaller than PAGE_SIZE, do not use direct
* notification to avoid the risk of mapping pages that contain BAR of more
* than one SF
*/
if (MLX5_CAP_GEN(mvdev->mdev, log_min_sf_size) + 12 < PAGE_SHIFT)
return ret;
ndev = to_mlx5_vdpa_ndev(mvdev);
addr = (phys_addr_t)ndev->mvdev.res.phys_kick_addr;
ret.addr = addr;
ret.size = PAGE_SIZE;
return ret;
}
static int mlx5_get_vq_irq(struct vdpa_device *vdev, u16 idx)
{
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
struct mlx5_vdpa_virtqueue *mvq;
if (!is_index_valid(mvdev, idx))
return -EINVAL;
if (is_ctrl_vq_idx(mvdev, idx))
return -EOPNOTSUPP;
mvq = &ndev->vqs[idx];
if (!mvq->map.virq)
return -EOPNOTSUPP;
return mvq->map.virq;
}
static u64 mlx5_vdpa_get_driver_features(struct vdpa_device *vdev)
{
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
return mvdev->actual_features;
}
static int counter_set_query(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq,
u64 *received_desc, u64 *completed_desc)
{
u32 in[MLX5_ST_SZ_DW(query_virtio_q_counters_in)] = {};
u32 out[MLX5_ST_SZ_DW(query_virtio_q_counters_out)] = {};
void *cmd_hdr;
void *ctx;
int err;
if (!counters_supported(&ndev->mvdev))
return -EOPNOTSUPP;
if (mvq->fw_state != MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY)
return -EAGAIN;
cmd_hdr = MLX5_ADDR_OF(query_virtio_q_counters_in, in, hdr);
MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, opcode, MLX5_CMD_OP_QUERY_GENERAL_OBJECT);
MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_type, MLX5_OBJ_TYPE_VIRTIO_Q_COUNTERS);
MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid);
MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_id, mvq->counter_set_id);
err = mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, sizeof(out));
if (err)
return err;
ctx = MLX5_ADDR_OF(query_virtio_q_counters_out, out, counters);
*received_desc = MLX5_GET64(virtio_q_counters, ctx, received_desc);
*completed_desc = MLX5_GET64(virtio_q_counters, ctx, completed_desc);
return 0;
}
static int mlx5_vdpa_get_vendor_vq_stats(struct vdpa_device *vdev, u16 idx,
struct sk_buff *msg,
struct netlink_ext_ack *extack)
{
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
struct mlx5_vdpa_virtqueue *mvq;
struct mlx5_control_vq *cvq;
u64 received_desc;
u64 completed_desc;
int err = 0;
down_read(&ndev->reslock);
if (!is_index_valid(mvdev, idx)) {
NL_SET_ERR_MSG_MOD(extack, "virtqueue index is not valid");
err = -EINVAL;
goto out_err;
}
if (idx == ctrl_vq_idx(mvdev)) {
cvq = &mvdev->cvq;
received_desc = cvq->received_desc;
completed_desc = cvq->completed_desc;
goto out;
}
mvq = &ndev->vqs[idx];
err = counter_set_query(ndev, mvq, &received_desc, &completed_desc);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "failed to query hardware");
goto out_err;
}
out:
err = -EMSGSIZE;
if (nla_put_string(msg, VDPA_ATTR_DEV_VENDOR_ATTR_NAME, "received_desc"))
goto out_err;
if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_VENDOR_ATTR_VALUE, received_desc,
VDPA_ATTR_PAD))
goto out_err;
if (nla_put_string(msg, VDPA_ATTR_DEV_VENDOR_ATTR_NAME, "completed_desc"))
goto out_err;
if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_VENDOR_ATTR_VALUE, completed_desc,
VDPA_ATTR_PAD))
goto out_err;
err = 0;
out_err:
up_read(&ndev->reslock);
return err;
}
static void mlx5_vdpa_cvq_suspend(struct mlx5_vdpa_dev *mvdev)
{
struct mlx5_control_vq *cvq;
if (!(mvdev->actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ)))
return;
cvq = &mvdev->cvq;
cvq->ready = false;
}
static int mlx5_vdpa_suspend(struct vdpa_device *vdev)
{
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
struct mlx5_vdpa_virtqueue *mvq;
int i;
mlx5_vdpa_info(mvdev, "suspending device\n");
down_write(&ndev->reslock);
unregister_link_notifier(ndev);
for (i = 0; i < ndev->cur_num_vqs; i++) {
mvq = &ndev->vqs[i];
suspend_vq(ndev, mvq);
}
mlx5_vdpa_cvq_suspend(mvdev);
mvdev->suspended = true;
up_write(&ndev->reslock);
return 0;
}
static int mlx5_set_group_asid(struct vdpa_device *vdev, u32 group,
unsigned int asid)
{
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
if (group >= MLX5_VDPA_NUMVQ_GROUPS)
return -EINVAL;
mvdev->group2asid[group] = asid;
return 0;
}
static const struct vdpa_config_ops mlx5_vdpa_ops = {
.set_vq_address = mlx5_vdpa_set_vq_address,
.set_vq_num = mlx5_vdpa_set_vq_num,
.kick_vq = mlx5_vdpa_kick_vq,
.set_vq_cb = mlx5_vdpa_set_vq_cb,
.set_vq_ready = mlx5_vdpa_set_vq_ready,
.get_vq_ready = mlx5_vdpa_get_vq_ready,
.set_vq_state = mlx5_vdpa_set_vq_state,
.get_vq_state = mlx5_vdpa_get_vq_state,
.get_vendor_vq_stats = mlx5_vdpa_get_vendor_vq_stats,
.get_vq_notification = mlx5_get_vq_notification,
.get_vq_irq = mlx5_get_vq_irq,
.get_vq_align = mlx5_vdpa_get_vq_align,
.get_vq_group = mlx5_vdpa_get_vq_group,
.get_device_features = mlx5_vdpa_get_device_features,
.set_driver_features = mlx5_vdpa_set_driver_features,
.get_driver_features = mlx5_vdpa_get_driver_features,
.set_config_cb = mlx5_vdpa_set_config_cb,
.get_vq_num_max = mlx5_vdpa_get_vq_num_max,
.get_device_id = mlx5_vdpa_get_device_id,
.get_vendor_id = mlx5_vdpa_get_vendor_id,
.get_status = mlx5_vdpa_get_status,
.set_status = mlx5_vdpa_set_status,
.reset = mlx5_vdpa_reset,
.get_config_size = mlx5_vdpa_get_config_size,
.get_config = mlx5_vdpa_get_config,
.set_config = mlx5_vdpa_set_config,
.get_generation = mlx5_vdpa_get_generation,
.set_map = mlx5_vdpa_set_map,
.set_group_asid = mlx5_set_group_asid,
.get_vq_dma_dev = mlx5_get_vq_dma_dev,
.free = mlx5_vdpa_free,
.suspend = mlx5_vdpa_suspend,
};
static int query_mtu(struct mlx5_core_dev *mdev, u16 *mtu)
{
u16 hw_mtu;
int err;
err = mlx5_query_nic_vport_mtu(mdev, &hw_mtu);
if (err)
return err;
*mtu = hw_mtu - MLX5V_ETH_HARD_MTU;
return 0;
}
static int alloc_resources(struct mlx5_vdpa_net *ndev)
{
struct mlx5_vdpa_net_resources *res = &ndev->res;
int err;
if (res->valid) {
mlx5_vdpa_warn(&ndev->mvdev, "resources already allocated\n");
return -EEXIST;
}
err = mlx5_vdpa_alloc_transport_domain(&ndev->mvdev, &res->tdn);
if (err)
return err;
err = create_tis(ndev);
if (err)
goto err_tis;
res->valid = true;
return 0;
err_tis:
mlx5_vdpa_dealloc_transport_domain(&ndev->mvdev, res->tdn);
return err;
}
static void free_resources(struct mlx5_vdpa_net *ndev)
{
struct mlx5_vdpa_net_resources *res = &ndev->res;
if (!res->valid)
return;
destroy_tis(ndev);
mlx5_vdpa_dealloc_transport_domain(&ndev->mvdev, res->tdn);
res->valid = false;
}
static void init_mvqs(struct mlx5_vdpa_net *ndev)
{
struct mlx5_vdpa_virtqueue *mvq;
int i;
for (i = 0; i < ndev->mvdev.max_vqs; ++i) {
mvq = &ndev->vqs[i];
memset(mvq, 0, offsetof(struct mlx5_vdpa_virtqueue, ri));
mvq->index = i;
mvq->ndev = ndev;
mvq->fwqp.fw = true;
mvq->fw_state = MLX5_VIRTIO_NET_Q_OBJECT_NONE;
}
for (; i < ndev->mvdev.max_vqs; i++) {
mvq = &ndev->vqs[i];
memset(mvq, 0, offsetof(struct mlx5_vdpa_virtqueue, ri));
mvq->index = i;
mvq->ndev = ndev;
}
}
struct mlx5_vdpa_mgmtdev {
struct vdpa_mgmt_dev mgtdev;
struct mlx5_adev *madev;
struct mlx5_vdpa_net *ndev;
};
static int config_func_mtu(struct mlx5_core_dev *mdev, u16 mtu)
{
int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
void *in;
int err;
in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
MLX5_SET(modify_nic_vport_context_in, in, field_select.mtu, 1);
MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.mtu,
mtu + MLX5V_ETH_HARD_MTU);
MLX5_SET(modify_nic_vport_context_in, in, opcode,
MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
err = mlx5_cmd_exec_in(mdev, modify_nic_vport_context, in);
kvfree(in);
return err;
}
static void allocate_irqs(struct mlx5_vdpa_net *ndev)
{
struct mlx5_vdpa_irq_pool_entry *ent;
int i;
if (!msix_mode_supported(&ndev->mvdev))
return;
if (!ndev->mvdev.mdev->pdev)
return;
ndev->irqp.entries = kcalloc(ndev->mvdev.max_vqs, sizeof(*ndev->irqp.entries), GFP_KERNEL);
if (!ndev->irqp.entries)
return;
for (i = 0; i < ndev->mvdev.max_vqs; i++) {
ent = ndev->irqp.entries + i;
snprintf(ent->name, MLX5_VDPA_IRQ_NAME_LEN, "%s-vq-%d",
dev_name(&ndev->mvdev.vdev.dev), i);
ent->map = pci_msix_alloc_irq_at(ndev->mvdev.mdev->pdev, MSI_ANY_INDEX, NULL);
if (!ent->map.virq)
return;
ndev->irqp.num_ent++;
}
}
static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
const struct vdpa_dev_set_config *add_config)
{
struct mlx5_vdpa_mgmtdev *mgtdev = container_of(v_mdev, struct mlx5_vdpa_mgmtdev, mgtdev);
struct virtio_net_config *config;
struct mlx5_core_dev *pfmdev;
struct mlx5_vdpa_dev *mvdev;
struct mlx5_vdpa_net *ndev;
struct mlx5_core_dev *mdev;
u64 device_features;
u32 max_vqs;
u16 mtu;
int err;
if (mgtdev->ndev)
return -ENOSPC;
mdev = mgtdev->madev->mdev;
device_features = mgtdev->mgtdev.supported_features;
if (add_config->mask & BIT_ULL(VDPA_ATTR_DEV_FEATURES)) {
if (add_config->device_features & ~device_features) {
dev_warn(mdev->device,
"The provisioned features 0x%llx are not supported by this device with features 0x%llx\n",
add_config->device_features, device_features);
return -EINVAL;
}
device_features &= add_config->device_features;
} else {
device_features &= ~BIT_ULL(VIRTIO_NET_F_MRG_RXBUF);
}
if (!(device_features & BIT_ULL(VIRTIO_F_VERSION_1) &&
device_features & BIT_ULL(VIRTIO_F_ACCESS_PLATFORM))) {
dev_warn(mdev->device,
"Must provision minimum features 0x%llx for this device",
BIT_ULL(VIRTIO_F_VERSION_1) | BIT_ULL(VIRTIO_F_ACCESS_PLATFORM));
return -EOPNOTSUPP;
}
if (!(MLX5_CAP_DEV_VDPA_EMULATION(mdev, virtio_queue_type) &
MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_SPLIT)) {
dev_warn(mdev->device, "missing support for split virtqueues\n");
return -EOPNOTSUPP;
}
max_vqs = min_t(int, MLX5_CAP_DEV_VDPA_EMULATION(mdev, max_num_virtio_queues),
1 << MLX5_CAP_GEN(mdev, log_max_rqt_size));
if (max_vqs < 2) {
dev_warn(mdev->device,
"%d virtqueues are supported. At least 2 are required\n",
max_vqs);
return -EAGAIN;
}
if (add_config->mask & BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MAX_VQP)) {
if (add_config->net.max_vq_pairs > max_vqs / 2)
return -EINVAL;
max_vqs = min_t(u32, max_vqs, 2 * add_config->net.max_vq_pairs);
} else {
max_vqs = 2;
}
ndev = vdpa_alloc_device(struct mlx5_vdpa_net, mvdev.vdev, mdev->device, &mlx5_vdpa_ops,
MLX5_VDPA_NUMVQ_GROUPS, MLX5_VDPA_NUM_AS, name, false);
if (IS_ERR(ndev))
return PTR_ERR(ndev);
ndev->mvdev.max_vqs = max_vqs;
mvdev = &ndev->mvdev;
mvdev->mdev = mdev;
ndev->vqs = kcalloc(max_vqs, sizeof(*ndev->vqs), GFP_KERNEL);
ndev->event_cbs = kcalloc(max_vqs + 1, sizeof(*ndev->event_cbs), GFP_KERNEL);
if (!ndev->vqs || !ndev->event_cbs) {
err = -ENOMEM;
goto err_alloc;
}
init_mvqs(ndev);
allocate_irqs(ndev);
init_rwsem(&ndev->reslock);
config = &ndev->config;
if (add_config->mask & BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MTU)) {
err = config_func_mtu(mdev, add_config->net.mtu);
if (err)
goto err_alloc;
}
if (device_features & BIT_ULL(VIRTIO_NET_F_MTU)) {
err = query_mtu(mdev, &mtu);
if (err)
goto err_alloc;
ndev->config.mtu = cpu_to_mlx5vdpa16(mvdev, mtu);
}
if (device_features & BIT_ULL(VIRTIO_NET_F_STATUS)) {
if (get_link_state(mvdev))
ndev->config.status |= cpu_to_mlx5vdpa16(mvdev, VIRTIO_NET_S_LINK_UP);
else
ndev->config.status &= cpu_to_mlx5vdpa16(mvdev, ~VIRTIO_NET_S_LINK_UP);
}
if (add_config->mask & (1 << VDPA_ATTR_DEV_NET_CFG_MACADDR)) {
memcpy(ndev->config.mac, add_config->net.mac, ETH_ALEN);
/* No bother setting mac address in config if not going to provision _F_MAC */
} else if ((add_config->mask & BIT_ULL(VDPA_ATTR_DEV_FEATURES)) == 0 ||
device_features & BIT_ULL(VIRTIO_NET_F_MAC)) {
err = mlx5_query_nic_vport_mac_address(mdev, 0, 0, config->mac);
if (err)
goto err_alloc;
}
if (!is_zero_ether_addr(config->mac)) {
pfmdev = pci_get_drvdata(pci_physfn(mdev->pdev));
err = mlx5_mpfs_add_mac(pfmdev, config->mac);
if (err)
goto err_alloc;
} else if ((add_config->mask & BIT_ULL(VDPA_ATTR_DEV_FEATURES)) == 0) {
/*
* We used to clear _F_MAC feature bit if seeing
* zero mac address when device features are not
* specifically provisioned. Keep the behaviour
* so old scripts do not break.
*/
device_features &= ~BIT_ULL(VIRTIO_NET_F_MAC);
} else if (device_features & BIT_ULL(VIRTIO_NET_F_MAC)) {
/* Don't provision zero mac address for _F_MAC */
mlx5_vdpa_warn(&ndev->mvdev,
"No mac address provisioned?\n");
err = -EINVAL;
goto err_alloc;
}
if (device_features & BIT_ULL(VIRTIO_NET_F_MQ))
config->max_virtqueue_pairs = cpu_to_mlx5vdpa16(mvdev, max_vqs / 2);
ndev->mvdev.mlx_features = device_features;
mvdev->vdev.dma_dev = &mdev->pdev->dev;
err = mlx5_vdpa_alloc_resources(&ndev->mvdev);
if (err)
goto err_mpfs;
if (MLX5_CAP_GEN(mvdev->mdev, umem_uid_0)) {
err = mlx5_vdpa_create_mr(mvdev, NULL, 0);
if (err)
goto err_res;
}
err = alloc_resources(ndev);
if (err)
goto err_mr;
ndev->cvq_ent.mvdev = mvdev;
INIT_WORK(&ndev->cvq_ent.work, mlx5_cvq_kick_handler);
mvdev->wq = create_singlethread_workqueue("mlx5_vdpa_wq");
if (!mvdev->wq) {
err = -ENOMEM;
goto err_res2;
}
mvdev->vdev.mdev = &mgtdev->mgtdev;
err = _vdpa_register_device(&mvdev->vdev, max_vqs + 1);
if (err)
goto err_reg;
mgtdev->ndev = ndev;
return 0;
err_reg:
destroy_workqueue(mvdev->wq);
err_res2:
free_resources(ndev);
err_mr:
mlx5_vdpa_destroy_mr(mvdev);
err_res:
mlx5_vdpa_free_resources(&ndev->mvdev);
err_mpfs:
if (!is_zero_ether_addr(config->mac))
mlx5_mpfs_del_mac(pfmdev, config->mac);
err_alloc:
put_device(&mvdev->vdev.dev);
return err;
}
static void mlx5_vdpa_dev_del(struct vdpa_mgmt_dev *v_mdev, struct vdpa_device *dev)
{
struct mlx5_vdpa_mgmtdev *mgtdev = container_of(v_mdev, struct mlx5_vdpa_mgmtdev, mgtdev);
struct mlx5_vdpa_dev *mvdev = to_mvdev(dev);
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
struct workqueue_struct *wq;
mlx5_vdpa_remove_debugfs(ndev->debugfs);
ndev->debugfs = NULL;
unregister_link_notifier(ndev);
_vdpa_unregister_device(dev);
wq = mvdev->wq;
mvdev->wq = NULL;
destroy_workqueue(wq);
mgtdev->ndev = NULL;
}
static const struct vdpa_mgmtdev_ops mdev_ops = {
.dev_add = mlx5_vdpa_dev_add,
.dev_del = mlx5_vdpa_dev_del,
};
static struct virtio_device_id id_table[] = {
{ VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID },
{ 0 },
};
static int mlx5v_probe(struct auxiliary_device *adev,
const struct auxiliary_device_id *id)
{
struct mlx5_adev *madev = container_of(adev, struct mlx5_adev, adev);
struct mlx5_core_dev *mdev = madev->mdev;
struct mlx5_vdpa_mgmtdev *mgtdev;
int err;
mgtdev = kzalloc(sizeof(*mgtdev), GFP_KERNEL);
if (!mgtdev)
return -ENOMEM;
mgtdev->mgtdev.ops = &mdev_ops;
mgtdev->mgtdev.device = mdev->device;
mgtdev->mgtdev.id_table = id_table;
mgtdev->mgtdev.config_attr_mask = BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MACADDR) |
BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MAX_VQP) |
BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MTU) |
BIT_ULL(VDPA_ATTR_DEV_FEATURES);
mgtdev->mgtdev.max_supported_vqs =
MLX5_CAP_DEV_VDPA_EMULATION(mdev, max_num_virtio_queues) + 1;
mgtdev->mgtdev.supported_features = get_supported_features(mdev);
mgtdev->madev = madev;
err = vdpa_mgmtdev_register(&mgtdev->mgtdev);
if (err)
goto reg_err;
auxiliary_set_drvdata(adev, mgtdev);
return 0;
reg_err:
kfree(mgtdev);
return err;
}
static void mlx5v_remove(struct auxiliary_device *adev)
{
struct mlx5_vdpa_mgmtdev *mgtdev;
mgtdev = auxiliary_get_drvdata(adev);
vdpa_mgmtdev_unregister(&mgtdev->mgtdev);
kfree(mgtdev);
}
static const struct auxiliary_device_id mlx5v_id_table[] = {
{ .name = MLX5_ADEV_NAME ".vnet", },
{},
};
MODULE_DEVICE_TABLE(auxiliary, mlx5v_id_table);
static struct auxiliary_driver mlx5v_driver = {
.name = "vnet",
.probe = mlx5v_probe,
.remove = mlx5v_remove,
.id_table = mlx5v_id_table,
};
module_auxiliary_driver(mlx5v_driver);
|
linux-master
|
drivers/vdpa/mlx5/net/mlx5_vnet.c
|
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2020 Mellanox Technologies Ltd. */
#include <linux/iova.h>
#include <linux/mlx5/driver.h>
#include "mlx5_vdpa.h"
static int alloc_pd(struct mlx5_vdpa_dev *dev, u32 *pdn, u16 uid)
{
struct mlx5_core_dev *mdev = dev->mdev;
u32 out[MLX5_ST_SZ_DW(alloc_pd_out)] = {};
u32 in[MLX5_ST_SZ_DW(alloc_pd_in)] = {};
int err;
MLX5_SET(alloc_pd_in, in, opcode, MLX5_CMD_OP_ALLOC_PD);
MLX5_SET(alloc_pd_in, in, uid, uid);
err = mlx5_cmd_exec_inout(mdev, alloc_pd, in, out);
if (!err)
*pdn = MLX5_GET(alloc_pd_out, out, pd);
return err;
}
static int dealloc_pd(struct mlx5_vdpa_dev *dev, u32 pdn, u16 uid)
{
u32 in[MLX5_ST_SZ_DW(dealloc_pd_in)] = {};
struct mlx5_core_dev *mdev = dev->mdev;
MLX5_SET(dealloc_pd_in, in, opcode, MLX5_CMD_OP_DEALLOC_PD);
MLX5_SET(dealloc_pd_in, in, pd, pdn);
MLX5_SET(dealloc_pd_in, in, uid, uid);
return mlx5_cmd_exec_in(mdev, dealloc_pd, in);
}
static int get_null_mkey(struct mlx5_vdpa_dev *dev, u32 *null_mkey)
{
u32 out[MLX5_ST_SZ_DW(query_special_contexts_out)] = {};
u32 in[MLX5_ST_SZ_DW(query_special_contexts_in)] = {};
struct mlx5_core_dev *mdev = dev->mdev;
int err;
MLX5_SET(query_special_contexts_in, in, opcode, MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS);
err = mlx5_cmd_exec_inout(mdev, query_special_contexts, in, out);
if (!err)
*null_mkey = MLX5_GET(query_special_contexts_out, out, null_mkey);
return err;
}
static int create_uctx(struct mlx5_vdpa_dev *mvdev, u16 *uid)
{
u32 out[MLX5_ST_SZ_DW(create_uctx_out)] = {};
int inlen;
void *in;
int err;
if (MLX5_CAP_GEN(mvdev->mdev, umem_uid_0))
return 0;
/* 0 means not supported */
if (!MLX5_CAP_GEN(mvdev->mdev, log_max_uctx))
return -EOPNOTSUPP;
inlen = MLX5_ST_SZ_BYTES(create_uctx_in);
in = kzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
MLX5_SET(create_uctx_in, in, opcode, MLX5_CMD_OP_CREATE_UCTX);
MLX5_SET(create_uctx_in, in, uctx.cap, MLX5_UCTX_CAP_RAW_TX);
err = mlx5_cmd_exec(mvdev->mdev, in, inlen, out, sizeof(out));
kfree(in);
if (!err)
*uid = MLX5_GET(create_uctx_out, out, uid);
return err;
}
static void destroy_uctx(struct mlx5_vdpa_dev *mvdev, u32 uid)
{
u32 out[MLX5_ST_SZ_DW(destroy_uctx_out)] = {};
u32 in[MLX5_ST_SZ_DW(destroy_uctx_in)] = {};
if (!uid)
return;
MLX5_SET(destroy_uctx_in, in, opcode, MLX5_CMD_OP_DESTROY_UCTX);
MLX5_SET(destroy_uctx_in, in, uid, uid);
mlx5_cmd_exec(mvdev->mdev, in, sizeof(in), out, sizeof(out));
}
int mlx5_vdpa_create_tis(struct mlx5_vdpa_dev *mvdev, void *in, u32 *tisn)
{
u32 out[MLX5_ST_SZ_DW(create_tis_out)] = {};
int err;
MLX5_SET(create_tis_in, in, opcode, MLX5_CMD_OP_CREATE_TIS);
MLX5_SET(create_tis_in, in, uid, mvdev->res.uid);
err = mlx5_cmd_exec_inout(mvdev->mdev, create_tis, in, out);
if (!err)
*tisn = MLX5_GET(create_tis_out, out, tisn);
return err;
}
void mlx5_vdpa_destroy_tis(struct mlx5_vdpa_dev *mvdev, u32 tisn)
{
u32 in[MLX5_ST_SZ_DW(destroy_tis_in)] = {};
MLX5_SET(destroy_tis_in, in, opcode, MLX5_CMD_OP_DESTROY_TIS);
MLX5_SET(destroy_tis_in, in, uid, mvdev->res.uid);
MLX5_SET(destroy_tis_in, in, tisn, tisn);
mlx5_cmd_exec_in(mvdev->mdev, destroy_tis, in);
}
int mlx5_vdpa_create_rqt(struct mlx5_vdpa_dev *mvdev, void *in, int inlen, u32 *rqtn)
{
u32 out[MLX5_ST_SZ_DW(create_rqt_out)] = {};
int err;
MLX5_SET(create_rqt_in, in, opcode, MLX5_CMD_OP_CREATE_RQT);
err = mlx5_cmd_exec(mvdev->mdev, in, inlen, out, sizeof(out));
if (!err)
*rqtn = MLX5_GET(create_rqt_out, out, rqtn);
return err;
}
int mlx5_vdpa_modify_rqt(struct mlx5_vdpa_dev *mvdev, void *in, int inlen, u32 rqtn)
{
u32 out[MLX5_ST_SZ_DW(create_rqt_out)] = {};
MLX5_SET(modify_rqt_in, in, uid, mvdev->res.uid);
MLX5_SET(modify_rqt_in, in, rqtn, rqtn);
MLX5_SET(modify_rqt_in, in, opcode, MLX5_CMD_OP_MODIFY_RQT);
return mlx5_cmd_exec(mvdev->mdev, in, inlen, out, sizeof(out));
}
void mlx5_vdpa_destroy_rqt(struct mlx5_vdpa_dev *mvdev, u32 rqtn)
{
u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)] = {};
MLX5_SET(destroy_rqt_in, in, opcode, MLX5_CMD_OP_DESTROY_RQT);
MLX5_SET(destroy_rqt_in, in, uid, mvdev->res.uid);
MLX5_SET(destroy_rqt_in, in, rqtn, rqtn);
mlx5_cmd_exec_in(mvdev->mdev, destroy_rqt, in);
}
int mlx5_vdpa_create_tir(struct mlx5_vdpa_dev *mvdev, void *in, u32 *tirn)
{
u32 out[MLX5_ST_SZ_DW(create_tir_out)] = {};
int err;
MLX5_SET(create_tir_in, in, opcode, MLX5_CMD_OP_CREATE_TIR);
err = mlx5_cmd_exec_inout(mvdev->mdev, create_tir, in, out);
if (!err)
*tirn = MLX5_GET(create_tir_out, out, tirn);
return err;
}
void mlx5_vdpa_destroy_tir(struct mlx5_vdpa_dev *mvdev, u32 tirn)
{
u32 in[MLX5_ST_SZ_DW(destroy_tir_in)] = {};
MLX5_SET(destroy_tir_in, in, opcode, MLX5_CMD_OP_DESTROY_TIR);
MLX5_SET(destroy_tir_in, in, uid, mvdev->res.uid);
MLX5_SET(destroy_tir_in, in, tirn, tirn);
mlx5_cmd_exec_in(mvdev->mdev, destroy_tir, in);
}
int mlx5_vdpa_alloc_transport_domain(struct mlx5_vdpa_dev *mvdev, u32 *tdn)
{
u32 out[MLX5_ST_SZ_DW(alloc_transport_domain_out)] = {};
u32 in[MLX5_ST_SZ_DW(alloc_transport_domain_in)] = {};
int err;
MLX5_SET(alloc_transport_domain_in, in, opcode, MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN);
MLX5_SET(alloc_transport_domain_in, in, uid, mvdev->res.uid);
err = mlx5_cmd_exec_inout(mvdev->mdev, alloc_transport_domain, in, out);
if (!err)
*tdn = MLX5_GET(alloc_transport_domain_out, out, transport_domain);
return err;
}
void mlx5_vdpa_dealloc_transport_domain(struct mlx5_vdpa_dev *mvdev, u32 tdn)
{
u32 in[MLX5_ST_SZ_DW(dealloc_transport_domain_in)] = {};
MLX5_SET(dealloc_transport_domain_in, in, opcode, MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN);
MLX5_SET(dealloc_transport_domain_in, in, uid, mvdev->res.uid);
MLX5_SET(dealloc_transport_domain_in, in, transport_domain, tdn);
mlx5_cmd_exec_in(mvdev->mdev, dealloc_transport_domain, in);
}
int mlx5_vdpa_create_mkey(struct mlx5_vdpa_dev *mvdev, u32 *mkey, u32 *in,
int inlen)
{
u32 lout[MLX5_ST_SZ_DW(create_mkey_out)] = {};
u32 mkey_index;
int err;
MLX5_SET(create_mkey_in, in, opcode, MLX5_CMD_OP_CREATE_MKEY);
MLX5_SET(create_mkey_in, in, uid, mvdev->res.uid);
err = mlx5_cmd_exec(mvdev->mdev, in, inlen, lout, sizeof(lout));
if (err)
return err;
mkey_index = MLX5_GET(create_mkey_out, lout, mkey_index);
*mkey = mlx5_idx_to_mkey(mkey_index);
return 0;
}
int mlx5_vdpa_destroy_mkey(struct mlx5_vdpa_dev *mvdev, u32 mkey)
{
u32 in[MLX5_ST_SZ_DW(destroy_mkey_in)] = {};
MLX5_SET(destroy_mkey_in, in, uid, mvdev->res.uid);
MLX5_SET(destroy_mkey_in, in, opcode, MLX5_CMD_OP_DESTROY_MKEY);
MLX5_SET(destroy_mkey_in, in, mkey_index, mlx5_mkey_to_idx(mkey));
return mlx5_cmd_exec_in(mvdev->mdev, destroy_mkey, in);
}
static int init_ctrl_vq(struct mlx5_vdpa_dev *mvdev)
{
mvdev->cvq.iotlb = vhost_iotlb_alloc(0, 0);
if (!mvdev->cvq.iotlb)
return -ENOMEM;
spin_lock_init(&mvdev->cvq.iommu_lock);
vringh_set_iotlb(&mvdev->cvq.vring, mvdev->cvq.iotlb, &mvdev->cvq.iommu_lock);
return 0;
}
static void cleanup_ctrl_vq(struct mlx5_vdpa_dev *mvdev)
{
vhost_iotlb_free(mvdev->cvq.iotlb);
}
int mlx5_vdpa_alloc_resources(struct mlx5_vdpa_dev *mvdev)
{
u64 offset = MLX5_CAP64_DEV_VDPA_EMULATION(mvdev->mdev, doorbell_bar_offset);
struct mlx5_vdpa_resources *res = &mvdev->res;
struct mlx5_core_dev *mdev = mvdev->mdev;
u64 kick_addr;
int err;
if (res->valid) {
mlx5_vdpa_warn(mvdev, "resources already allocated\n");
return -EINVAL;
}
mutex_init(&mvdev->mr.mkey_mtx);
res->uar = mlx5_get_uars_page(mdev);
if (IS_ERR(res->uar)) {
err = PTR_ERR(res->uar);
goto err_uars;
}
err = create_uctx(mvdev, &res->uid);
if (err)
goto err_uctx;
err = alloc_pd(mvdev, &res->pdn, res->uid);
if (err)
goto err_pd;
err = get_null_mkey(mvdev, &res->null_mkey);
if (err)
goto err_key;
kick_addr = mdev->bar_addr + offset;
res->phys_kick_addr = kick_addr;
res->kick_addr = ioremap(kick_addr, PAGE_SIZE);
if (!res->kick_addr) {
err = -ENOMEM;
goto err_key;
}
err = init_ctrl_vq(mvdev);
if (err)
goto err_ctrl;
res->valid = true;
return 0;
err_ctrl:
iounmap(res->kick_addr);
err_key:
dealloc_pd(mvdev, res->pdn, res->uid);
err_pd:
destroy_uctx(mvdev, res->uid);
err_uctx:
mlx5_put_uars_page(mdev, res->uar);
err_uars:
mutex_destroy(&mvdev->mr.mkey_mtx);
return err;
}
void mlx5_vdpa_free_resources(struct mlx5_vdpa_dev *mvdev)
{
struct mlx5_vdpa_resources *res = &mvdev->res;
if (!res->valid)
return;
cleanup_ctrl_vq(mvdev);
iounmap(res->kick_addr);
res->kick_addr = NULL;
dealloc_pd(mvdev, res->pdn, res->uid);
destroy_uctx(mvdev, res->uid);
mlx5_put_uars_page(mvdev->mdev, res->uar);
mutex_destroy(&mvdev->mr.mkey_mtx);
res->valid = false;
}
|
linux-master
|
drivers/vdpa/mlx5/core/resources.c
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.