python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Samsung S5P/Exynos SoC series MIPI CSIS/DSIM DPHY driver
*
* Copyright (C) 2013,2016 Samsung Electronics Co., Ltd.
* Author: Sylwester Nawrocki <[email protected]>
*/
#include <linux/err.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/spinlock.h>
#include <linux/soc/samsung/exynos-regs-pmu.h>
#include <linux/mfd/syscon.h>
enum exynos_mipi_phy_id {
EXYNOS_MIPI_PHY_ID_NONE = -1,
EXYNOS_MIPI_PHY_ID_CSIS0,
EXYNOS_MIPI_PHY_ID_DSIM0,
EXYNOS_MIPI_PHY_ID_CSIS1,
EXYNOS_MIPI_PHY_ID_DSIM1,
EXYNOS_MIPI_PHY_ID_CSIS2,
EXYNOS_MIPI_PHYS_NUM
};
enum exynos_mipi_phy_regmap_id {
EXYNOS_MIPI_REGMAP_PMU,
EXYNOS_MIPI_REGMAP_DISP,
EXYNOS_MIPI_REGMAP_CAM0,
EXYNOS_MIPI_REGMAP_CAM1,
EXYNOS_MIPI_REGMAPS_NUM
};
struct mipi_phy_device_desc {
int num_phys;
int num_regmaps;
const char *regmap_names[EXYNOS_MIPI_REGMAPS_NUM];
struct exynos_mipi_phy_desc {
enum exynos_mipi_phy_id coupled_phy_id;
u32 enable_val;
unsigned int enable_reg;
enum exynos_mipi_phy_regmap_id enable_map;
u32 resetn_val;
unsigned int resetn_reg;
enum exynos_mipi_phy_regmap_id resetn_map;
} phys[EXYNOS_MIPI_PHYS_NUM];
};
static const struct mipi_phy_device_desc s5pv210_mipi_phy = {
.num_regmaps = 1,
.regmap_names = {"syscon"},
.num_phys = 4,
.phys = {
{
/* EXYNOS_MIPI_PHY_ID_CSIS0 */
.coupled_phy_id = EXYNOS_MIPI_PHY_ID_DSIM0,
.enable_val = EXYNOS4_PHY_ENABLE,
.enable_reg = EXYNOS4_MIPI_PHY_CONTROL(0),
.enable_map = EXYNOS_MIPI_REGMAP_PMU,
.resetn_val = EXYNOS4_MIPI_PHY_SRESETN,
.resetn_reg = EXYNOS4_MIPI_PHY_CONTROL(0),
.resetn_map = EXYNOS_MIPI_REGMAP_PMU,
}, {
/* EXYNOS_MIPI_PHY_ID_DSIM0 */
.coupled_phy_id = EXYNOS_MIPI_PHY_ID_CSIS0,
.enable_val = EXYNOS4_PHY_ENABLE,
.enable_reg = EXYNOS4_MIPI_PHY_CONTROL(0),
.enable_map = EXYNOS_MIPI_REGMAP_PMU,
.resetn_val = EXYNOS4_MIPI_PHY_MRESETN,
.resetn_reg = EXYNOS4_MIPI_PHY_CONTROL(0),
.resetn_map = EXYNOS_MIPI_REGMAP_PMU,
}, {
/* EXYNOS_MIPI_PHY_ID_CSIS1 */
.coupled_phy_id = EXYNOS_MIPI_PHY_ID_DSIM1,
.enable_val = EXYNOS4_PHY_ENABLE,
.enable_reg = EXYNOS4_MIPI_PHY_CONTROL(1),
.enable_map = EXYNOS_MIPI_REGMAP_PMU,
.resetn_val = EXYNOS4_MIPI_PHY_SRESETN,
.resetn_reg = EXYNOS4_MIPI_PHY_CONTROL(1),
.resetn_map = EXYNOS_MIPI_REGMAP_PMU,
}, {
/* EXYNOS_MIPI_PHY_ID_DSIM1 */
.coupled_phy_id = EXYNOS_MIPI_PHY_ID_CSIS1,
.enable_val = EXYNOS4_PHY_ENABLE,
.enable_reg = EXYNOS4_MIPI_PHY_CONTROL(1),
.enable_map = EXYNOS_MIPI_REGMAP_PMU,
.resetn_val = EXYNOS4_MIPI_PHY_MRESETN,
.resetn_reg = EXYNOS4_MIPI_PHY_CONTROL(1),
.resetn_map = EXYNOS_MIPI_REGMAP_PMU,
},
},
};
static const struct mipi_phy_device_desc exynos5420_mipi_phy = {
.num_regmaps = 1,
.regmap_names = {"syscon"},
.num_phys = 5,
.phys = {
{
/* EXYNOS_MIPI_PHY_ID_CSIS0 */
.coupled_phy_id = EXYNOS_MIPI_PHY_ID_DSIM0,
.enable_val = EXYNOS4_PHY_ENABLE,
.enable_reg = EXYNOS5420_MIPI_PHY_CONTROL(0),
.enable_map = EXYNOS_MIPI_REGMAP_PMU,
.resetn_val = EXYNOS4_MIPI_PHY_SRESETN,
.resetn_reg = EXYNOS5420_MIPI_PHY_CONTROL(0),
.resetn_map = EXYNOS_MIPI_REGMAP_PMU,
}, {
/* EXYNOS_MIPI_PHY_ID_DSIM0 */
.coupled_phy_id = EXYNOS_MIPI_PHY_ID_CSIS0,
.enable_val = EXYNOS4_PHY_ENABLE,
.enable_reg = EXYNOS5420_MIPI_PHY_CONTROL(0),
.enable_map = EXYNOS_MIPI_REGMAP_PMU,
.resetn_val = EXYNOS4_MIPI_PHY_MRESETN,
.resetn_reg = EXYNOS5420_MIPI_PHY_CONTROL(0),
.resetn_map = EXYNOS_MIPI_REGMAP_PMU,
}, {
/* EXYNOS_MIPI_PHY_ID_CSIS1 */
.coupled_phy_id = EXYNOS_MIPI_PHY_ID_DSIM1,
.enable_val = EXYNOS4_PHY_ENABLE,
.enable_reg = EXYNOS5420_MIPI_PHY_CONTROL(1),
.enable_map = EXYNOS_MIPI_REGMAP_PMU,
.resetn_val = EXYNOS4_MIPI_PHY_SRESETN,
.resetn_reg = EXYNOS5420_MIPI_PHY_CONTROL(1),
.resetn_map = EXYNOS_MIPI_REGMAP_PMU,
}, {
/* EXYNOS_MIPI_PHY_ID_DSIM1 */
.coupled_phy_id = EXYNOS_MIPI_PHY_ID_CSIS1,
.enable_val = EXYNOS4_PHY_ENABLE,
.enable_reg = EXYNOS5420_MIPI_PHY_CONTROL(1),
.enable_map = EXYNOS_MIPI_REGMAP_PMU,
.resetn_val = EXYNOS4_MIPI_PHY_MRESETN,
.resetn_reg = EXYNOS5420_MIPI_PHY_CONTROL(1),
.resetn_map = EXYNOS_MIPI_REGMAP_PMU,
}, {
/* EXYNOS_MIPI_PHY_ID_CSIS2 */
.coupled_phy_id = EXYNOS_MIPI_PHY_ID_NONE,
.enable_val = EXYNOS4_PHY_ENABLE,
.enable_reg = EXYNOS5420_MIPI_PHY_CONTROL(2),
.enable_map = EXYNOS_MIPI_REGMAP_PMU,
.resetn_val = EXYNOS4_MIPI_PHY_SRESETN,
.resetn_reg = EXYNOS5420_MIPI_PHY_CONTROL(2),
.resetn_map = EXYNOS_MIPI_REGMAP_PMU,
},
},
};
#define EXYNOS5433_SYSREG_DISP_MIPI_PHY 0x100C
#define EXYNOS5433_SYSREG_CAM0_MIPI_DPHY_CON 0x1014
#define EXYNOS5433_SYSREG_CAM1_MIPI_DPHY_CON 0x1020
static const struct mipi_phy_device_desc exynos5433_mipi_phy = {
.num_regmaps = 4,
.regmap_names = {
"samsung,pmu-syscon",
"samsung,disp-sysreg",
"samsung,cam0-sysreg",
"samsung,cam1-sysreg"
},
.num_phys = 5,
.phys = {
{
/* EXYNOS_MIPI_PHY_ID_CSIS0 */
.coupled_phy_id = EXYNOS_MIPI_PHY_ID_DSIM0,
.enable_val = EXYNOS4_PHY_ENABLE,
.enable_reg = EXYNOS4_MIPI_PHY_CONTROL(0),
.enable_map = EXYNOS_MIPI_REGMAP_PMU,
.resetn_val = BIT(0),
.resetn_reg = EXYNOS5433_SYSREG_CAM0_MIPI_DPHY_CON,
.resetn_map = EXYNOS_MIPI_REGMAP_CAM0,
}, {
/* EXYNOS_MIPI_PHY_ID_DSIM0 */
.coupled_phy_id = EXYNOS_MIPI_PHY_ID_CSIS0,
.enable_val = EXYNOS4_PHY_ENABLE,
.enable_reg = EXYNOS4_MIPI_PHY_CONTROL(0),
.enable_map = EXYNOS_MIPI_REGMAP_PMU,
.resetn_val = BIT(0),
.resetn_reg = EXYNOS5433_SYSREG_DISP_MIPI_PHY,
.resetn_map = EXYNOS_MIPI_REGMAP_DISP,
}, {
/* EXYNOS_MIPI_PHY_ID_CSIS1 */
.coupled_phy_id = EXYNOS_MIPI_PHY_ID_NONE,
.enable_val = EXYNOS4_PHY_ENABLE,
.enable_reg = EXYNOS4_MIPI_PHY_CONTROL(1),
.enable_map = EXYNOS_MIPI_REGMAP_PMU,
.resetn_val = BIT(1),
.resetn_reg = EXYNOS5433_SYSREG_CAM0_MIPI_DPHY_CON,
.resetn_map = EXYNOS_MIPI_REGMAP_CAM0,
}, {
/* EXYNOS_MIPI_PHY_ID_DSIM1 */
.coupled_phy_id = EXYNOS_MIPI_PHY_ID_NONE,
.enable_val = EXYNOS4_PHY_ENABLE,
.enable_reg = EXYNOS4_MIPI_PHY_CONTROL(1),
.enable_map = EXYNOS_MIPI_REGMAP_PMU,
.resetn_val = BIT(1),
.resetn_reg = EXYNOS5433_SYSREG_DISP_MIPI_PHY,
.resetn_map = EXYNOS_MIPI_REGMAP_DISP,
}, {
/* EXYNOS_MIPI_PHY_ID_CSIS2 */
.coupled_phy_id = EXYNOS_MIPI_PHY_ID_NONE,
.enable_val = EXYNOS4_PHY_ENABLE,
.enable_reg = EXYNOS4_MIPI_PHY_CONTROL(2),
.enable_map = EXYNOS_MIPI_REGMAP_PMU,
.resetn_val = BIT(0),
.resetn_reg = EXYNOS5433_SYSREG_CAM1_MIPI_DPHY_CON,
.resetn_map = EXYNOS_MIPI_REGMAP_CAM1,
},
},
};
struct exynos_mipi_video_phy {
struct regmap *regmaps[EXYNOS_MIPI_REGMAPS_NUM];
int num_phys;
struct video_phy_desc {
struct phy *phy;
unsigned int index;
const struct exynos_mipi_phy_desc *data;
} phys[EXYNOS_MIPI_PHYS_NUM];
spinlock_t slock;
};
static int __set_phy_state(const struct exynos_mipi_phy_desc *data,
struct exynos_mipi_video_phy *state, unsigned int on)
{
struct regmap *enable_map = state->regmaps[data->enable_map];
struct regmap *resetn_map = state->regmaps[data->resetn_map];
spin_lock(&state->slock);
/* disable in PMU sysreg */
if (!on && data->coupled_phy_id >= 0 &&
state->phys[data->coupled_phy_id].phy->power_count == 0)
regmap_update_bits(enable_map, data->enable_reg,
data->enable_val, 0);
/* PHY reset */
if (on)
regmap_update_bits(resetn_map, data->resetn_reg,
data->resetn_val, data->resetn_val);
else
regmap_update_bits(resetn_map, data->resetn_reg,
data->resetn_val, 0);
/* enable in PMU sysreg */
if (on)
regmap_update_bits(enable_map, data->enable_reg,
data->enable_val, data->enable_val);
spin_unlock(&state->slock);
return 0;
}
#define to_mipi_video_phy(desc) \
container_of((desc), struct exynos_mipi_video_phy, phys[(desc)->index])
static int exynos_mipi_video_phy_power_on(struct phy *phy)
{
struct video_phy_desc *phy_desc = phy_get_drvdata(phy);
struct exynos_mipi_video_phy *state = to_mipi_video_phy(phy_desc);
return __set_phy_state(phy_desc->data, state, 1);
}
static int exynos_mipi_video_phy_power_off(struct phy *phy)
{
struct video_phy_desc *phy_desc = phy_get_drvdata(phy);
struct exynos_mipi_video_phy *state = to_mipi_video_phy(phy_desc);
return __set_phy_state(phy_desc->data, state, 0);
}
static struct phy *exynos_mipi_video_phy_xlate(struct device *dev,
struct of_phandle_args *args)
{
struct exynos_mipi_video_phy *state = dev_get_drvdata(dev);
if (WARN_ON(args->args[0] >= state->num_phys))
return ERR_PTR(-ENODEV);
return state->phys[args->args[0]].phy;
}
static const struct phy_ops exynos_mipi_video_phy_ops = {
.power_on = exynos_mipi_video_phy_power_on,
.power_off = exynos_mipi_video_phy_power_off,
.owner = THIS_MODULE,
};
static int exynos_mipi_video_phy_probe(struct platform_device *pdev)
{
const struct mipi_phy_device_desc *phy_dev;
struct exynos_mipi_video_phy *state;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct phy_provider *phy_provider;
unsigned int i = 0;
phy_dev = of_device_get_match_data(dev);
if (!phy_dev)
return -ENODEV;
state = devm_kzalloc(dev, sizeof(*state), GFP_KERNEL);
if (!state)
return -ENOMEM;
state->regmaps[i] = syscon_node_to_regmap(dev->parent->of_node);
if (!IS_ERR(state->regmaps[i]))
i++;
for (; i < phy_dev->num_regmaps; i++) {
state->regmaps[i] = syscon_regmap_lookup_by_phandle(np,
phy_dev->regmap_names[i]);
if (IS_ERR(state->regmaps[i]))
return PTR_ERR(state->regmaps[i]);
}
state->num_phys = phy_dev->num_phys;
spin_lock_init(&state->slock);
dev_set_drvdata(dev, state);
for (i = 0; i < state->num_phys; i++) {
struct phy *phy = devm_phy_create(dev, NULL,
&exynos_mipi_video_phy_ops);
if (IS_ERR(phy)) {
dev_err(dev, "failed to create PHY %d\n", i);
return PTR_ERR(phy);
}
state->phys[i].phy = phy;
state->phys[i].index = i;
state->phys[i].data = &phy_dev->phys[i];
phy_set_drvdata(phy, &state->phys[i]);
}
phy_provider = devm_of_phy_provider_register(dev,
exynos_mipi_video_phy_xlate);
return PTR_ERR_OR_ZERO(phy_provider);
}
static const struct of_device_id exynos_mipi_video_phy_of_match[] = {
{
.compatible = "samsung,s5pv210-mipi-video-phy",
.data = &s5pv210_mipi_phy,
}, {
.compatible = "samsung,exynos5420-mipi-video-phy",
.data = &exynos5420_mipi_phy,
}, {
.compatible = "samsung,exynos5433-mipi-video-phy",
.data = &exynos5433_mipi_phy,
},
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, exynos_mipi_video_phy_of_match);
static struct platform_driver exynos_mipi_video_phy_driver = {
.probe = exynos_mipi_video_phy_probe,
.driver = {
.of_match_table = exynos_mipi_video_phy_of_match,
.name = "exynos-mipi-video-phy",
.suppress_bind_attrs = true,
}
};
module_platform_driver(exynos_mipi_video_phy_driver);
MODULE_DESCRIPTION("Samsung S5P/Exynos SoC MIPI CSI-2/DSI PHY driver");
MODULE_AUTHOR("Sylwester Nawrocki <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/phy/samsung/phy-exynos-mipi-video.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* UFS PHY driver for Samsung SoC
*
* Copyright (C) 2020 Samsung Electronics Co., Ltd.
* Author: Seungwon Jeon <[email protected]>
* Author: Alim Akhtar <[email protected]>
*
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/of.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include "phy-samsung-ufs.h"
#define for_each_phy_lane(phy, i) \
for (i = 0; i < (phy)->lane_cnt; i++)
#define for_each_phy_cfg(cfg) \
for (; (cfg)->id; (cfg)++)
#define PHY_DEF_LANE_CNT 1
static void samsung_ufs_phy_config(struct samsung_ufs_phy *phy,
const struct samsung_ufs_phy_cfg *cfg,
u8 lane)
{
enum {LANE_0, LANE_1}; /* lane index */
switch (lane) {
case LANE_0:
writel(cfg->val, (phy)->reg_pma + cfg->off_0);
break;
case LANE_1:
if (cfg->id == PHY_TRSV_BLK)
writel(cfg->val, (phy)->reg_pma + cfg->off_1);
break;
}
}
static int samsung_ufs_phy_wait_for_lock_acq(struct phy *phy)
{
struct samsung_ufs_phy *ufs_phy = get_samsung_ufs_phy(phy);
const unsigned int timeout_us = 100000;
const unsigned int sleep_us = 10;
u32 val;
int err;
err = readl_poll_timeout(
ufs_phy->reg_pma + PHY_APB_ADDR(PHY_PLL_LOCK_STATUS),
val, (val & PHY_PLL_LOCK_BIT), sleep_us, timeout_us);
if (err) {
dev_err(ufs_phy->dev,
"failed to get phy pll lock acquisition %d\n", err);
goto out;
}
err = readl_poll_timeout(
ufs_phy->reg_pma +
PHY_APB_ADDR(ufs_phy->drvdata->cdr_lock_status_offset),
val, (val & PHY_CDR_LOCK_BIT), sleep_us, timeout_us);
if (err)
dev_err(ufs_phy->dev,
"failed to get phy cdr lock acquisition %d\n", err);
out:
return err;
}
static int samsung_ufs_phy_calibrate(struct phy *phy)
{
struct samsung_ufs_phy *ufs_phy = get_samsung_ufs_phy(phy);
const struct samsung_ufs_phy_cfg * const *cfgs = ufs_phy->cfgs;
const struct samsung_ufs_phy_cfg *cfg;
int err = 0;
int i;
if (unlikely(ufs_phy->ufs_phy_state < CFG_PRE_INIT ||
ufs_phy->ufs_phy_state >= CFG_TAG_MAX)) {
dev_err(ufs_phy->dev, "invalid phy config index %d\n", ufs_phy->ufs_phy_state);
return -EINVAL;
}
cfg = cfgs[ufs_phy->ufs_phy_state];
if (!cfg)
goto out;
for_each_phy_cfg(cfg) {
for_each_phy_lane(ufs_phy, i) {
samsung_ufs_phy_config(ufs_phy, cfg, i);
}
}
if (ufs_phy->ufs_phy_state == CFG_POST_PWR_HS)
err = samsung_ufs_phy_wait_for_lock_acq(phy);
/**
* In Samsung ufshci, PHY need to be calibrated at different
* stages / state mainly before Linkstartup, after Linkstartup,
* before power mode change and after power mode change.
* Below state machine to make sure to calibrate PHY in each
* state. Here after configuring PHY in a given state, will
* change the state to next state so that next state phy
* calibration value can be programed
*/
out:
switch (ufs_phy->ufs_phy_state) {
case CFG_PRE_INIT:
ufs_phy->ufs_phy_state = CFG_POST_INIT;
break;
case CFG_POST_INIT:
ufs_phy->ufs_phy_state = CFG_PRE_PWR_HS;
break;
case CFG_PRE_PWR_HS:
ufs_phy->ufs_phy_state = CFG_POST_PWR_HS;
break;
case CFG_POST_PWR_HS:
/* Change back to INIT state */
ufs_phy->ufs_phy_state = CFG_PRE_INIT;
break;
default:
dev_err(ufs_phy->dev, "wrong state for phy calibration\n");
}
return err;
}
static int samsung_ufs_phy_clks_init(struct samsung_ufs_phy *phy)
{
int i;
const struct samsung_ufs_phy_drvdata *drvdata = phy->drvdata;
int num_clks = drvdata->num_clks;
phy->clks = devm_kcalloc(phy->dev, num_clks, sizeof(*phy->clks),
GFP_KERNEL);
if (!phy->clks)
return -ENOMEM;
for (i = 0; i < num_clks; i++)
phy->clks[i].id = drvdata->clk_list[i];
return devm_clk_bulk_get(phy->dev, num_clks, phy->clks);
}
static int samsung_ufs_phy_init(struct phy *phy)
{
struct samsung_ufs_phy *ss_phy = get_samsung_ufs_phy(phy);
ss_phy->lane_cnt = phy->attrs.bus_width;
ss_phy->ufs_phy_state = CFG_PRE_INIT;
return 0;
}
static int samsung_ufs_phy_power_on(struct phy *phy)
{
struct samsung_ufs_phy *ss_phy = get_samsung_ufs_phy(phy);
int ret;
samsung_ufs_phy_ctrl_isol(ss_phy, false);
ret = clk_bulk_prepare_enable(ss_phy->drvdata->num_clks, ss_phy->clks);
if (ret) {
dev_err(ss_phy->dev, "failed to enable ufs phy clocks\n");
return ret;
}
if (ss_phy->ufs_phy_state == CFG_PRE_INIT) {
ret = samsung_ufs_phy_calibrate(phy);
if (ret)
dev_err(ss_phy->dev, "ufs phy calibration failed\n");
}
return ret;
}
static int samsung_ufs_phy_power_off(struct phy *phy)
{
struct samsung_ufs_phy *ss_phy = get_samsung_ufs_phy(phy);
clk_bulk_disable_unprepare(ss_phy->drvdata->num_clks, ss_phy->clks);
samsung_ufs_phy_ctrl_isol(ss_phy, true);
return 0;
}
static int samsung_ufs_phy_set_mode(struct phy *generic_phy,
enum phy_mode mode, int submode)
{
struct samsung_ufs_phy *ss_phy = get_samsung_ufs_phy(generic_phy);
ss_phy->mode = PHY_MODE_INVALID;
if (mode > 0)
ss_phy->mode = mode;
return 0;
}
static int samsung_ufs_phy_exit(struct phy *phy)
{
struct samsung_ufs_phy *ss_phy = get_samsung_ufs_phy(phy);
ss_phy->ufs_phy_state = CFG_TAG_MAX;
return 0;
}
static const struct phy_ops samsung_ufs_phy_ops = {
.init = samsung_ufs_phy_init,
.exit = samsung_ufs_phy_exit,
.power_on = samsung_ufs_phy_power_on,
.power_off = samsung_ufs_phy_power_off,
.calibrate = samsung_ufs_phy_calibrate,
.set_mode = samsung_ufs_phy_set_mode,
.owner = THIS_MODULE,
};
static const struct of_device_id samsung_ufs_phy_match[];
static int samsung_ufs_phy_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
const struct of_device_id *match;
struct samsung_ufs_phy *phy;
struct phy *gen_phy;
struct phy_provider *phy_provider;
const struct samsung_ufs_phy_drvdata *drvdata;
u32 isol_offset;
int err = 0;
match = of_match_node(samsung_ufs_phy_match, dev->of_node);
if (!match) {
err = -EINVAL;
dev_err(dev, "failed to get match_node\n");
goto out;
}
phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
if (!phy) {
err = -ENOMEM;
goto out;
}
phy->reg_pma = devm_platform_ioremap_resource_byname(pdev, "phy-pma");
if (IS_ERR(phy->reg_pma)) {
err = PTR_ERR(phy->reg_pma);
goto out;
}
phy->reg_pmu = syscon_regmap_lookup_by_phandle(
dev->of_node, "samsung,pmu-syscon");
if (IS_ERR(phy->reg_pmu)) {
err = PTR_ERR(phy->reg_pmu);
dev_err(dev, "failed syscon remap for pmu\n");
goto out;
}
gen_phy = devm_phy_create(dev, NULL, &samsung_ufs_phy_ops);
if (IS_ERR(gen_phy)) {
err = PTR_ERR(gen_phy);
dev_err(dev, "failed to create PHY for ufs-phy\n");
goto out;
}
drvdata = match->data;
phy->dev = dev;
phy->drvdata = drvdata;
phy->cfgs = drvdata->cfgs;
memcpy(&phy->isol, &drvdata->isol, sizeof(phy->isol));
if (!of_property_read_u32_index(dev->of_node, "samsung,pmu-syscon", 1,
&isol_offset))
phy->isol.offset = isol_offset;
phy->lane_cnt = PHY_DEF_LANE_CNT;
err = samsung_ufs_phy_clks_init(phy);
if (err) {
dev_err(dev, "failed to get phy clocks\n");
goto out;
}
phy_set_drvdata(gen_phy, phy);
phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
if (IS_ERR(phy_provider)) {
err = PTR_ERR(phy_provider);
dev_err(dev, "failed to register phy-provider\n");
goto out;
}
out:
return err;
}
static const struct of_device_id samsung_ufs_phy_match[] = {
{
.compatible = "samsung,exynos7-ufs-phy",
.data = &exynos7_ufs_phy,
}, {
.compatible = "samsung,exynosautov9-ufs-phy",
.data = &exynosautov9_ufs_phy,
}, {
.compatible = "tesla,fsd-ufs-phy",
.data = &fsd_ufs_phy,
},
{},
};
MODULE_DEVICE_TABLE(of, samsung_ufs_phy_match);
static struct platform_driver samsung_ufs_phy_driver = {
.probe = samsung_ufs_phy_probe,
.driver = {
.name = "samsung-ufs-phy",
.of_match_table = samsung_ufs_phy_match,
},
};
module_platform_driver(samsung_ufs_phy_driver);
MODULE_DESCRIPTION("Samsung SoC UFS PHY Driver");
MODULE_AUTHOR("Seungwon Jeon <[email protected]>");
MODULE_AUTHOR("Alim Akhtar <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/phy/samsung/phy-samsung-ufs.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Samsung Exynos SoC series PCIe PHY driver
*
* Phy provider for PCIe controller on Exynos SoC series
*
* Copyright (C) 2017-2020 Samsung Electronics Co., Ltd.
* Jaehoon Chung <[email protected]>
*/
#include <linux/io.h>
#include <linux/mfd/syscon.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/phy/phy.h>
#include <linux/regmap.h>
#define PCIE_PHY_OFFSET(x) ((x) * 0x4)
/* Sysreg FSYS register offsets and bits for Exynos5433 */
#define PCIE_EXYNOS5433_PHY_MAC_RESET 0x0208
#define PCIE_MAC_RESET_MASK 0xFF
#define PCIE_MAC_RESET BIT(4)
#define PCIE_EXYNOS5433_PHY_L1SUB_CM_CON 0x1010
#define PCIE_REFCLK_GATING_EN BIT(0)
#define PCIE_EXYNOS5433_PHY_COMMON_RESET 0x1020
#define PCIE_PHY_RESET BIT(0)
#define PCIE_EXYNOS5433_PHY_GLOBAL_RESET 0x1040
#define PCIE_GLOBAL_RESET BIT(0)
#define PCIE_REFCLK BIT(1)
#define PCIE_REFCLK_MASK 0x16
#define PCIE_APP_REQ_EXIT_L1_MODE BIT(5)
/* PMU PCIE PHY isolation control */
#define EXYNOS5433_PMU_PCIE_PHY_OFFSET 0x730
/* For Exynos pcie phy */
struct exynos_pcie_phy {
void __iomem *base;
struct regmap *pmureg;
struct regmap *fsysreg;
};
static void exynos_pcie_phy_writel(void __iomem *base, u32 val, u32 offset)
{
writel(val, base + offset);
}
/* Exynos5433 specific functions */
static int exynos5433_pcie_phy_init(struct phy *phy)
{
struct exynos_pcie_phy *ep = phy_get_drvdata(phy);
regmap_update_bits(ep->pmureg, EXYNOS5433_PMU_PCIE_PHY_OFFSET,
BIT(0), 1);
regmap_update_bits(ep->fsysreg, PCIE_EXYNOS5433_PHY_GLOBAL_RESET,
PCIE_APP_REQ_EXIT_L1_MODE, 0);
regmap_update_bits(ep->fsysreg, PCIE_EXYNOS5433_PHY_L1SUB_CM_CON,
PCIE_REFCLK_GATING_EN, 0);
regmap_update_bits(ep->fsysreg, PCIE_EXYNOS5433_PHY_COMMON_RESET,
PCIE_PHY_RESET, 1);
regmap_update_bits(ep->fsysreg, PCIE_EXYNOS5433_PHY_MAC_RESET,
PCIE_MAC_RESET, 0);
/* PHY refclk 24MHz */
regmap_update_bits(ep->fsysreg, PCIE_EXYNOS5433_PHY_GLOBAL_RESET,
PCIE_REFCLK_MASK, PCIE_REFCLK);
regmap_update_bits(ep->fsysreg, PCIE_EXYNOS5433_PHY_GLOBAL_RESET,
PCIE_GLOBAL_RESET, 0);
exynos_pcie_phy_writel(ep->base, 0x11, PCIE_PHY_OFFSET(0x3));
/* band gap reference on */
exynos_pcie_phy_writel(ep->base, 0, PCIE_PHY_OFFSET(0x20));
exynos_pcie_phy_writel(ep->base, 0, PCIE_PHY_OFFSET(0x4b));
/* jitter tuning */
exynos_pcie_phy_writel(ep->base, 0x34, PCIE_PHY_OFFSET(0x4));
exynos_pcie_phy_writel(ep->base, 0x02, PCIE_PHY_OFFSET(0x7));
exynos_pcie_phy_writel(ep->base, 0x41, PCIE_PHY_OFFSET(0x21));
exynos_pcie_phy_writel(ep->base, 0x7F, PCIE_PHY_OFFSET(0x14));
exynos_pcie_phy_writel(ep->base, 0xC0, PCIE_PHY_OFFSET(0x15));
exynos_pcie_phy_writel(ep->base, 0x61, PCIE_PHY_OFFSET(0x36));
/* D0 uninit.. */
exynos_pcie_phy_writel(ep->base, 0x44, PCIE_PHY_OFFSET(0x3D));
/* 24MHz */
exynos_pcie_phy_writel(ep->base, 0x94, PCIE_PHY_OFFSET(0x8));
exynos_pcie_phy_writel(ep->base, 0xA7, PCIE_PHY_OFFSET(0x9));
exynos_pcie_phy_writel(ep->base, 0x93, PCIE_PHY_OFFSET(0xA));
exynos_pcie_phy_writel(ep->base, 0x6B, PCIE_PHY_OFFSET(0xC));
exynos_pcie_phy_writel(ep->base, 0xA5, PCIE_PHY_OFFSET(0xF));
exynos_pcie_phy_writel(ep->base, 0x34, PCIE_PHY_OFFSET(0x16));
exynos_pcie_phy_writel(ep->base, 0xA3, PCIE_PHY_OFFSET(0x17));
exynos_pcie_phy_writel(ep->base, 0xA7, PCIE_PHY_OFFSET(0x1A));
exynos_pcie_phy_writel(ep->base, 0x71, PCIE_PHY_OFFSET(0x23));
exynos_pcie_phy_writel(ep->base, 0x4C, PCIE_PHY_OFFSET(0x24));
exynos_pcie_phy_writel(ep->base, 0x0E, PCIE_PHY_OFFSET(0x26));
exynos_pcie_phy_writel(ep->base, 0x14, PCIE_PHY_OFFSET(0x7));
exynos_pcie_phy_writel(ep->base, 0x48, PCIE_PHY_OFFSET(0x43));
exynos_pcie_phy_writel(ep->base, 0x44, PCIE_PHY_OFFSET(0x44));
exynos_pcie_phy_writel(ep->base, 0x03, PCIE_PHY_OFFSET(0x45));
exynos_pcie_phy_writel(ep->base, 0xA7, PCIE_PHY_OFFSET(0x48));
exynos_pcie_phy_writel(ep->base, 0x13, PCIE_PHY_OFFSET(0x54));
exynos_pcie_phy_writel(ep->base, 0x04, PCIE_PHY_OFFSET(0x31));
exynos_pcie_phy_writel(ep->base, 0, PCIE_PHY_OFFSET(0x32));
regmap_update_bits(ep->fsysreg, PCIE_EXYNOS5433_PHY_COMMON_RESET,
PCIE_PHY_RESET, 0);
regmap_update_bits(ep->fsysreg, PCIE_EXYNOS5433_PHY_MAC_RESET,
PCIE_MAC_RESET_MASK, PCIE_MAC_RESET);
return 0;
}
static int exynos5433_pcie_phy_exit(struct phy *phy)
{
struct exynos_pcie_phy *ep = phy_get_drvdata(phy);
regmap_update_bits(ep->fsysreg, PCIE_EXYNOS5433_PHY_L1SUB_CM_CON,
PCIE_REFCLK_GATING_EN, PCIE_REFCLK_GATING_EN);
regmap_update_bits(ep->pmureg, EXYNOS5433_PMU_PCIE_PHY_OFFSET,
BIT(0), 0);
return 0;
}
static const struct phy_ops exynos5433_phy_ops = {
.init = exynos5433_pcie_phy_init,
.exit = exynos5433_pcie_phy_exit,
.owner = THIS_MODULE,
};
static const struct of_device_id exynos_pcie_phy_match[] = {
{
.compatible = "samsung,exynos5433-pcie-phy",
},
{},
};
static int exynos_pcie_phy_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct exynos_pcie_phy *exynos_phy;
struct phy *generic_phy;
struct phy_provider *phy_provider;
exynos_phy = devm_kzalloc(dev, sizeof(*exynos_phy), GFP_KERNEL);
if (!exynos_phy)
return -ENOMEM;
exynos_phy->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(exynos_phy->base))
return PTR_ERR(exynos_phy->base);
exynos_phy->pmureg = syscon_regmap_lookup_by_phandle(dev->of_node,
"samsung,pmu-syscon");
if (IS_ERR(exynos_phy->pmureg)) {
dev_err(&pdev->dev, "PMU regmap lookup failed.\n");
return PTR_ERR(exynos_phy->pmureg);
}
exynos_phy->fsysreg = syscon_regmap_lookup_by_phandle(dev->of_node,
"samsung,fsys-sysreg");
if (IS_ERR(exynos_phy->fsysreg)) {
dev_err(&pdev->dev, "FSYS sysreg regmap lookup failed.\n");
return PTR_ERR(exynos_phy->fsysreg);
}
generic_phy = devm_phy_create(dev, dev->of_node, &exynos5433_phy_ops);
if (IS_ERR(generic_phy)) {
dev_err(dev, "failed to create PHY\n");
return PTR_ERR(generic_phy);
}
phy_set_drvdata(generic_phy, exynos_phy);
phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
return PTR_ERR_OR_ZERO(phy_provider);
}
static struct platform_driver exynos_pcie_phy_driver = {
.probe = exynos_pcie_phy_probe,
.driver = {
.of_match_table = exynos_pcie_phy_match,
.name = "exynos_pcie_phy",
.suppress_bind_attrs = true,
}
};
builtin_platform_driver(exynos_pcie_phy_driver);
| linux-master | drivers/phy/samsung/phy-exynos-pcie.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Samsung SoC USB 1.1/2.0 PHY driver - Exynos 5250 support
*
* Copyright (C) 2013 Samsung Electronics Co., Ltd.
* Author: Kamil Debski <[email protected]>
*/
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/phy/phy.h>
#include <linux/regmap.h>
#include "phy-samsung-usb2.h"
/* Exynos USB PHY registers */
#define EXYNOS_5250_REFCLKSEL_CRYSTAL 0x0
#define EXYNOS_5250_REFCLKSEL_XO 0x1
#define EXYNOS_5250_REFCLKSEL_CLKCORE 0x2
#define EXYNOS_5250_FSEL_9MHZ6 0x0
#define EXYNOS_5250_FSEL_10MHZ 0x1
#define EXYNOS_5250_FSEL_12MHZ 0x2
#define EXYNOS_5250_FSEL_19MHZ2 0x3
#define EXYNOS_5250_FSEL_20MHZ 0x4
#define EXYNOS_5250_FSEL_24MHZ 0x5
#define EXYNOS_5250_FSEL_50MHZ 0x7
/* Normal host */
#define EXYNOS_5250_HOSTPHYCTRL0 0x0
#define EXYNOS_5250_HOSTPHYCTRL0_PHYSWRSTALL BIT(31)
#define EXYNOS_5250_HOSTPHYCTRL0_REFCLKSEL_SHIFT 19
#define EXYNOS_5250_HOSTPHYCTRL0_REFCLKSEL_MASK \
(0x3 << EXYNOS_5250_HOSTPHYCTRL0_REFCLKSEL_SHIFT)
#define EXYNOS_5250_HOSTPHYCTRL0_FSEL_SHIFT 16
#define EXYNOS_5250_HOSTPHYCTRL0_FSEL_MASK \
(0x7 << EXYNOS_5250_HOSTPHYCTRL0_FSEL_SHIFT)
#define EXYNOS_5250_HOSTPHYCTRL0_TESTBURNIN BIT(11)
#define EXYNOS_5250_HOSTPHYCTRL0_RETENABLE BIT(10)
#define EXYNOS_5250_HOSTPHYCTRL0_COMMON_ON_N BIT(9)
#define EXYNOS_5250_HOSTPHYCTRL0_VATESTENB_MASK (0x3 << 7)
#define EXYNOS_5250_HOSTPHYCTRL0_VATESTENB_DUAL (0x0 << 7)
#define EXYNOS_5250_HOSTPHYCTRL0_VATESTENB_ID0 (0x1 << 7)
#define EXYNOS_5250_HOSTPHYCTRL0_VATESTENB_ANALOGTEST (0x2 << 7)
#define EXYNOS_5250_HOSTPHYCTRL0_SIDDQ BIT(6)
#define EXYNOS_5250_HOSTPHYCTRL0_FORCESLEEP BIT(5)
#define EXYNOS_5250_HOSTPHYCTRL0_FORCESUSPEND BIT(4)
#define EXYNOS_5250_HOSTPHYCTRL0_WORDINTERFACE BIT(3)
#define EXYNOS_5250_HOSTPHYCTRL0_UTMISWRST BIT(2)
#define EXYNOS_5250_HOSTPHYCTRL0_LINKSWRST BIT(1)
#define EXYNOS_5250_HOSTPHYCTRL0_PHYSWRST BIT(0)
/* HSIC0 & HSIC1 */
#define EXYNOS_5250_HSICPHYCTRL1 0x10
#define EXYNOS_5250_HSICPHYCTRL2 0x20
#define EXYNOS_5250_HSICPHYCTRLX_REFCLKSEL_MASK (0x3 << 23)
#define EXYNOS_5250_HSICPHYCTRLX_REFCLKSEL_DEFAULT (0x2 << 23)
#define EXYNOS_5250_HSICPHYCTRLX_REFCLKDIV_MASK (0x7f << 16)
#define EXYNOS_5250_HSICPHYCTRLX_REFCLKDIV_12 (0x24 << 16)
#define EXYNOS_5250_HSICPHYCTRLX_REFCLKDIV_15 (0x1c << 16)
#define EXYNOS_5250_HSICPHYCTRLX_REFCLKDIV_16 (0x1a << 16)
#define EXYNOS_5250_HSICPHYCTRLX_REFCLKDIV_19_2 (0x15 << 16)
#define EXYNOS_5250_HSICPHYCTRLX_REFCLKDIV_20 (0x14 << 16)
#define EXYNOS_5250_HSICPHYCTRLX_SIDDQ BIT(6)
#define EXYNOS_5250_HSICPHYCTRLX_FORCESLEEP BIT(5)
#define EXYNOS_5250_HSICPHYCTRLX_FORCESUSPEND BIT(4)
#define EXYNOS_5250_HSICPHYCTRLX_WORDINTERFACE BIT(3)
#define EXYNOS_5250_HSICPHYCTRLX_UTMISWRST BIT(2)
#define EXYNOS_5250_HSICPHYCTRLX_PHYSWRST BIT(0)
/* EHCI control */
#define EXYNOS_5250_HOSTEHCICTRL 0x30
#define EXYNOS_5250_HOSTEHCICTRL_ENAINCRXALIGN BIT(29)
#define EXYNOS_5250_HOSTEHCICTRL_ENAINCR4 BIT(28)
#define EXYNOS_5250_HOSTEHCICTRL_ENAINCR8 BIT(27)
#define EXYNOS_5250_HOSTEHCICTRL_ENAINCR16 BIT(26)
#define EXYNOS_5250_HOSTEHCICTRL_AUTOPPDONOVRCUREN BIT(25)
#define EXYNOS_5250_HOSTEHCICTRL_FLADJVAL0_SHIFT 19
#define EXYNOS_5250_HOSTEHCICTRL_FLADJVAL0_MASK \
(0x3f << EXYNOS_5250_HOSTEHCICTRL_FLADJVAL0_SHIFT)
#define EXYNOS_5250_HOSTEHCICTRL_FLADJVAL1_SHIFT 13
#define EXYNOS_5250_HOSTEHCICTRL_FLADJVAL1_MASK \
(0x3f << EXYNOS_5250_HOSTEHCICTRL_FLADJVAL1_SHIFT)
#define EXYNOS_5250_HOSTEHCICTRL_FLADJVAL2_SHIFT 7
#define EXYNOS_5250_HOSTEHCICTRL_FLADJVAL0_MASK \
(0x3f << EXYNOS_5250_HOSTEHCICTRL_FLADJVAL0_SHIFT)
#define EXYNOS_5250_HOSTEHCICTRL_FLADJVALHOST_SHIFT 1
#define EXYNOS_5250_HOSTEHCICTRL_FLADJVALHOST_MASK \
(0x1 << EXYNOS_5250_HOSTEHCICTRL_FLADJVALHOST_SHIFT)
#define EXYNOS_5250_HOSTEHCICTRL_SIMULATIONMODE BIT(0)
/* OHCI control */
#define EXYNOS_5250_HOSTOHCICTRL 0x34
#define EXYNOS_5250_HOSTOHCICTRL_FRAMELENVAL_SHIFT 1
#define EXYNOS_5250_HOSTOHCICTRL_FRAMELENVAL_MASK \
(0x3ff << EXYNOS_5250_HOSTOHCICTRL_FRAMELENVAL_SHIFT)
#define EXYNOS_5250_HOSTOHCICTRL_FRAMELENVALEN BIT(0)
/* USBOTG */
#define EXYNOS_5250_USBOTGSYS 0x38
#define EXYNOS_5250_USBOTGSYS_PHYLINK_SW_RESET BIT(14)
#define EXYNOS_5250_USBOTGSYS_LINK_SW_RST_UOTG BIT(13)
#define EXYNOS_5250_USBOTGSYS_PHY_SW_RST BIT(12)
#define EXYNOS_5250_USBOTGSYS_REFCLKSEL_SHIFT 9
#define EXYNOS_5250_USBOTGSYS_REFCLKSEL_MASK \
(0x3 << EXYNOS_5250_USBOTGSYS_REFCLKSEL_SHIFT)
#define EXYNOS_5250_USBOTGSYS_ID_PULLUP BIT(8)
#define EXYNOS_5250_USBOTGSYS_COMMON_ON BIT(7)
#define EXYNOS_5250_USBOTGSYS_FSEL_SHIFT 4
#define EXYNOS_5250_USBOTGSYS_FSEL_MASK \
(0x3 << EXYNOS_5250_USBOTGSYS_FSEL_SHIFT)
#define EXYNOS_5250_USBOTGSYS_FORCE_SLEEP BIT(3)
#define EXYNOS_5250_USBOTGSYS_OTGDISABLE BIT(2)
#define EXYNOS_5250_USBOTGSYS_SIDDQ_UOTG BIT(1)
#define EXYNOS_5250_USBOTGSYS_FORCE_SUSPEND BIT(0)
/* Isolation, configured in the power management unit */
#define EXYNOS_5250_USB_ISOL_OTG_OFFSET 0x704
#define EXYNOS_5250_USB_ISOL_HOST_OFFSET 0x708
#define EXYNOS_5420_USB_ISOL_HOST_OFFSET 0x70C
#define EXYNOS_5250_USB_ISOL_ENABLE BIT(0)
/* Mode swtich register */
#define EXYNOS_5250_MODE_SWITCH_OFFSET 0x230
#define EXYNOS_5250_MODE_SWITCH_MASK 1
#define EXYNOS_5250_MODE_SWITCH_DEVICE 0
#define EXYNOS_5250_MODE_SWITCH_HOST 1
enum exynos4x12_phy_id {
EXYNOS5250_DEVICE,
EXYNOS5250_HOST,
EXYNOS5250_HSIC0,
EXYNOS5250_HSIC1,
};
/*
* exynos5250_rate_to_clk() converts the supplied clock rate to the value that
* can be written to the phy register.
*/
static int exynos5250_rate_to_clk(unsigned long rate, u32 *reg)
{
/* EXYNOS_5250_FSEL_MASK */
switch (rate) {
case 9600 * KHZ:
*reg = EXYNOS_5250_FSEL_9MHZ6;
break;
case 10 * MHZ:
*reg = EXYNOS_5250_FSEL_10MHZ;
break;
case 12 * MHZ:
*reg = EXYNOS_5250_FSEL_12MHZ;
break;
case 19200 * KHZ:
*reg = EXYNOS_5250_FSEL_19MHZ2;
break;
case 20 * MHZ:
*reg = EXYNOS_5250_FSEL_20MHZ;
break;
case 24 * MHZ:
*reg = EXYNOS_5250_FSEL_24MHZ;
break;
case 50 * MHZ:
*reg = EXYNOS_5250_FSEL_50MHZ;
break;
default:
return -EINVAL;
}
return 0;
}
static void exynos5250_isol(struct samsung_usb2_phy_instance *inst, bool on)
{
struct samsung_usb2_phy_driver *drv = inst->drv;
u32 offset;
u32 mask = EXYNOS_5250_USB_ISOL_ENABLE;
if (drv->cfg == &exynos5250_usb2_phy_config &&
inst->cfg->id == EXYNOS5250_DEVICE)
offset = EXYNOS_5250_USB_ISOL_OTG_OFFSET;
else if (drv->cfg == &exynos5250_usb2_phy_config &&
inst->cfg->id == EXYNOS5250_HOST)
offset = EXYNOS_5250_USB_ISOL_HOST_OFFSET;
else if (drv->cfg == &exynos5420_usb2_phy_config &&
inst->cfg->id == EXYNOS5250_HOST)
offset = EXYNOS_5420_USB_ISOL_HOST_OFFSET;
else
return;
regmap_update_bits(drv->reg_pmu, offset, mask, on ? 0 : mask);
}
static int exynos5250_power_on(struct samsung_usb2_phy_instance *inst)
{
struct samsung_usb2_phy_driver *drv = inst->drv;
u32 ctrl0;
u32 otg;
u32 ehci;
u32 ohci;
u32 hsic;
switch (inst->cfg->id) {
case EXYNOS5250_DEVICE:
regmap_update_bits(drv->reg_sys,
EXYNOS_5250_MODE_SWITCH_OFFSET,
EXYNOS_5250_MODE_SWITCH_MASK,
EXYNOS_5250_MODE_SWITCH_DEVICE);
/* OTG configuration */
otg = readl(drv->reg_phy + EXYNOS_5250_USBOTGSYS);
/* The clock */
otg &= ~EXYNOS_5250_USBOTGSYS_FSEL_MASK;
otg |= drv->ref_reg_val << EXYNOS_5250_USBOTGSYS_FSEL_SHIFT;
/* Reset */
otg &= ~(EXYNOS_5250_USBOTGSYS_FORCE_SUSPEND |
EXYNOS_5250_USBOTGSYS_FORCE_SLEEP |
EXYNOS_5250_USBOTGSYS_SIDDQ_UOTG);
otg |= EXYNOS_5250_USBOTGSYS_PHY_SW_RST |
EXYNOS_5250_USBOTGSYS_PHYLINK_SW_RESET |
EXYNOS_5250_USBOTGSYS_LINK_SW_RST_UOTG |
EXYNOS_5250_USBOTGSYS_OTGDISABLE;
/* Ref clock */
otg &= ~EXYNOS_5250_USBOTGSYS_REFCLKSEL_MASK;
otg |= EXYNOS_5250_REFCLKSEL_CLKCORE <<
EXYNOS_5250_USBOTGSYS_REFCLKSEL_SHIFT;
writel(otg, drv->reg_phy + EXYNOS_5250_USBOTGSYS);
udelay(100);
otg &= ~(EXYNOS_5250_USBOTGSYS_PHY_SW_RST |
EXYNOS_5250_USBOTGSYS_LINK_SW_RST_UOTG |
EXYNOS_5250_USBOTGSYS_PHYLINK_SW_RESET |
EXYNOS_5250_USBOTGSYS_OTGDISABLE);
writel(otg, drv->reg_phy + EXYNOS_5250_USBOTGSYS);
break;
case EXYNOS5250_HOST:
case EXYNOS5250_HSIC0:
case EXYNOS5250_HSIC1:
/* Host registers configuration */
ctrl0 = readl(drv->reg_phy + EXYNOS_5250_HOSTPHYCTRL0);
/* The clock */
ctrl0 &= ~EXYNOS_5250_HOSTPHYCTRL0_FSEL_MASK;
ctrl0 |= drv->ref_reg_val <<
EXYNOS_5250_HOSTPHYCTRL0_FSEL_SHIFT;
/* Reset */
ctrl0 &= ~(EXYNOS_5250_HOSTPHYCTRL0_PHYSWRST |
EXYNOS_5250_HOSTPHYCTRL0_PHYSWRSTALL |
EXYNOS_5250_HOSTPHYCTRL0_SIDDQ |
EXYNOS_5250_HOSTPHYCTRL0_FORCESUSPEND |
EXYNOS_5250_HOSTPHYCTRL0_FORCESLEEP);
ctrl0 |= EXYNOS_5250_HOSTPHYCTRL0_LINKSWRST |
EXYNOS_5250_HOSTPHYCTRL0_UTMISWRST |
EXYNOS_5250_HOSTPHYCTRL0_COMMON_ON_N;
writel(ctrl0, drv->reg_phy + EXYNOS_5250_HOSTPHYCTRL0);
udelay(10);
ctrl0 &= ~(EXYNOS_5250_HOSTPHYCTRL0_LINKSWRST |
EXYNOS_5250_HOSTPHYCTRL0_UTMISWRST);
writel(ctrl0, drv->reg_phy + EXYNOS_5250_HOSTPHYCTRL0);
/* OTG configuration */
otg = readl(drv->reg_phy + EXYNOS_5250_USBOTGSYS);
/* The clock */
otg &= ~EXYNOS_5250_USBOTGSYS_FSEL_MASK;
otg |= drv->ref_reg_val << EXYNOS_5250_USBOTGSYS_FSEL_SHIFT;
/* Reset */
otg &= ~(EXYNOS_5250_USBOTGSYS_FORCE_SUSPEND |
EXYNOS_5250_USBOTGSYS_FORCE_SLEEP |
EXYNOS_5250_USBOTGSYS_SIDDQ_UOTG);
otg |= EXYNOS_5250_USBOTGSYS_PHY_SW_RST |
EXYNOS_5250_USBOTGSYS_PHYLINK_SW_RESET |
EXYNOS_5250_USBOTGSYS_LINK_SW_RST_UOTG |
EXYNOS_5250_USBOTGSYS_OTGDISABLE;
/* Ref clock */
otg &= ~EXYNOS_5250_USBOTGSYS_REFCLKSEL_MASK;
otg |= EXYNOS_5250_REFCLKSEL_CLKCORE <<
EXYNOS_5250_USBOTGSYS_REFCLKSEL_SHIFT;
writel(otg, drv->reg_phy + EXYNOS_5250_USBOTGSYS);
udelay(10);
otg &= ~(EXYNOS_5250_USBOTGSYS_PHY_SW_RST |
EXYNOS_5250_USBOTGSYS_LINK_SW_RST_UOTG |
EXYNOS_5250_USBOTGSYS_PHYLINK_SW_RESET);
/* HSIC phy configuration */
hsic = (EXYNOS_5250_HSICPHYCTRLX_REFCLKDIV_12 |
EXYNOS_5250_HSICPHYCTRLX_REFCLKSEL_DEFAULT |
EXYNOS_5250_HSICPHYCTRLX_PHYSWRST);
writel(hsic, drv->reg_phy + EXYNOS_5250_HSICPHYCTRL1);
writel(hsic, drv->reg_phy + EXYNOS_5250_HSICPHYCTRL2);
udelay(10);
hsic &= ~EXYNOS_5250_HSICPHYCTRLX_PHYSWRST;
writel(hsic, drv->reg_phy + EXYNOS_5250_HSICPHYCTRL1);
writel(hsic, drv->reg_phy + EXYNOS_5250_HSICPHYCTRL2);
/* The following delay is necessary for the reset sequence to be
* completed */
udelay(80);
/* Enable EHCI DMA burst */
ehci = readl(drv->reg_phy + EXYNOS_5250_HOSTEHCICTRL);
ehci |= EXYNOS_5250_HOSTEHCICTRL_ENAINCRXALIGN |
EXYNOS_5250_HOSTEHCICTRL_ENAINCR4 |
EXYNOS_5250_HOSTEHCICTRL_ENAINCR8 |
EXYNOS_5250_HOSTEHCICTRL_ENAINCR16;
writel(ehci, drv->reg_phy + EXYNOS_5250_HOSTEHCICTRL);
/* OHCI settings */
ohci = readl(drv->reg_phy + EXYNOS_5250_HOSTOHCICTRL);
/* Following code is based on the old driver */
ohci |= 0x1 << 3;
writel(ohci, drv->reg_phy + EXYNOS_5250_HOSTOHCICTRL);
break;
}
exynos5250_isol(inst, 0);
return 0;
}
static int exynos5250_power_off(struct samsung_usb2_phy_instance *inst)
{
struct samsung_usb2_phy_driver *drv = inst->drv;
u32 ctrl0;
u32 otg;
u32 hsic;
exynos5250_isol(inst, 1);
switch (inst->cfg->id) {
case EXYNOS5250_DEVICE:
otg = readl(drv->reg_phy + EXYNOS_5250_USBOTGSYS);
otg |= (EXYNOS_5250_USBOTGSYS_FORCE_SUSPEND |
EXYNOS_5250_USBOTGSYS_SIDDQ_UOTG |
EXYNOS_5250_USBOTGSYS_FORCE_SLEEP);
writel(otg, drv->reg_phy + EXYNOS_5250_USBOTGSYS);
break;
case EXYNOS5250_HOST:
ctrl0 = readl(drv->reg_phy + EXYNOS_5250_HOSTPHYCTRL0);
ctrl0 |= (EXYNOS_5250_HOSTPHYCTRL0_SIDDQ |
EXYNOS_5250_HOSTPHYCTRL0_FORCESUSPEND |
EXYNOS_5250_HOSTPHYCTRL0_FORCESLEEP |
EXYNOS_5250_HOSTPHYCTRL0_PHYSWRST |
EXYNOS_5250_HOSTPHYCTRL0_PHYSWRSTALL);
writel(ctrl0, drv->reg_phy + EXYNOS_5250_HOSTPHYCTRL0);
break;
case EXYNOS5250_HSIC0:
case EXYNOS5250_HSIC1:
hsic = (EXYNOS_5250_HSICPHYCTRLX_REFCLKDIV_12 |
EXYNOS_5250_HSICPHYCTRLX_REFCLKSEL_DEFAULT |
EXYNOS_5250_HSICPHYCTRLX_SIDDQ |
EXYNOS_5250_HSICPHYCTRLX_FORCESLEEP |
EXYNOS_5250_HSICPHYCTRLX_FORCESUSPEND
);
writel(hsic, drv->reg_phy + EXYNOS_5250_HSICPHYCTRL1);
writel(hsic, drv->reg_phy + EXYNOS_5250_HSICPHYCTRL2);
break;
}
return 0;
}
static const struct samsung_usb2_common_phy exynos5250_phys[] = {
{
.label = "device",
.id = EXYNOS5250_DEVICE,
.power_on = exynos5250_power_on,
.power_off = exynos5250_power_off,
},
{
.label = "host",
.id = EXYNOS5250_HOST,
.power_on = exynos5250_power_on,
.power_off = exynos5250_power_off,
},
{
.label = "hsic0",
.id = EXYNOS5250_HSIC0,
.power_on = exynos5250_power_on,
.power_off = exynos5250_power_off,
},
{
.label = "hsic1",
.id = EXYNOS5250_HSIC1,
.power_on = exynos5250_power_on,
.power_off = exynos5250_power_off,
},
};
static const struct samsung_usb2_common_phy exynos5420_phys[] = {
{
.label = "host",
.id = EXYNOS5250_HOST,
.power_on = exynos5250_power_on,
.power_off = exynos5250_power_off,
},
{
.label = "hsic",
.id = EXYNOS5250_HSIC0,
.power_on = exynos5250_power_on,
.power_off = exynos5250_power_off,
},
};
const struct samsung_usb2_phy_config exynos5250_usb2_phy_config = {
.has_mode_switch = 1,
.num_phys = ARRAY_SIZE(exynos5250_phys),
.phys = exynos5250_phys,
.rate_to_clk = exynos5250_rate_to_clk,
};
const struct samsung_usb2_phy_config exynos5420_usb2_phy_config = {
.has_mode_switch = 1,
.num_phys = ARRAY_SIZE(exynos5420_phys),
.phys = exynos5420_phys,
.rate_to_clk = exynos5250_rate_to_clk,
};
| linux-master | drivers/phy/samsung/phy-exynos5250-usb2.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* UFS PHY driver data for Samsung EXYNOS7 SoC
*
* Copyright (C) 2020 Samsung Electronics Co., Ltd.
*/
#include "phy-samsung-ufs.h"
#define EXYNOS7_EMBEDDED_COMBO_PHY_CTRL 0x720
#define EXYNOS7_EMBEDDED_COMBO_PHY_CTRL_MASK 0x1
#define EXYNOS7_EMBEDDED_COMBO_PHY_CTRL_EN BIT(0)
#define EXYNOS7_EMBEDDED_COMBO_PHY_CDR_LOCK_STATUS 0x5e
/* Calibration for phy initialization */
static const struct samsung_ufs_phy_cfg exynos7_pre_init_cfg[] = {
PHY_COMN_REG_CFG(0x00f, 0xfa, PWR_MODE_ANY),
PHY_COMN_REG_CFG(0x010, 0x82, PWR_MODE_ANY),
PHY_COMN_REG_CFG(0x011, 0x1e, PWR_MODE_ANY),
PHY_COMN_REG_CFG(0x017, 0x84, PWR_MODE_ANY),
PHY_TRSV_REG_CFG(0x035, 0x58, PWR_MODE_ANY),
PHY_TRSV_REG_CFG(0x036, 0x32, PWR_MODE_ANY),
PHY_TRSV_REG_CFG(0x037, 0x40, PWR_MODE_ANY),
PHY_TRSV_REG_CFG(0x03b, 0x83, PWR_MODE_ANY),
PHY_TRSV_REG_CFG(0x042, 0x88, PWR_MODE_ANY),
PHY_TRSV_REG_CFG(0x043, 0xa6, PWR_MODE_ANY),
PHY_TRSV_REG_CFG(0x048, 0x74, PWR_MODE_ANY),
PHY_TRSV_REG_CFG(0x04c, 0x5b, PWR_MODE_ANY),
PHY_TRSV_REG_CFG(0x04d, 0x83, PWR_MODE_ANY),
PHY_TRSV_REG_CFG(0x05c, 0x14, PWR_MODE_ANY),
END_UFS_PHY_CFG
};
/* Calibration for HS mode series A/B */
static const struct samsung_ufs_phy_cfg exynos7_pre_pwr_hs_cfg[] = {
PHY_COMN_REG_CFG(0x00f, 0xfa, PWR_MODE_HS_ANY),
PHY_COMN_REG_CFG(0x010, 0x82, PWR_MODE_HS_ANY),
PHY_COMN_REG_CFG(0x011, 0x1e, PWR_MODE_HS_ANY),
/* Setting order: 1st(0x16, 2nd(0x15) */
PHY_COMN_REG_CFG(0x016, 0xff, PWR_MODE_HS_ANY),
PHY_COMN_REG_CFG(0x015, 0x80, PWR_MODE_HS_ANY),
PHY_COMN_REG_CFG(0x017, 0x94, PWR_MODE_HS_ANY),
PHY_TRSV_REG_CFG(0x036, 0x32, PWR_MODE_HS_ANY),
PHY_TRSV_REG_CFG(0x037, 0x43, PWR_MODE_HS_ANY),
PHY_TRSV_REG_CFG(0x038, 0x3f, PWR_MODE_HS_ANY),
PHY_TRSV_REG_CFG(0x042, 0x88, PWR_MODE_HS_G2_SER_A),
PHY_TRSV_REG_CFG(0x042, 0xbb, PWR_MODE_HS_G2_SER_B),
PHY_TRSV_REG_CFG(0x043, 0xa6, PWR_MODE_HS_ANY),
PHY_TRSV_REG_CFG(0x048, 0x74, PWR_MODE_HS_ANY),
PHY_TRSV_REG_CFG(0x034, 0x35, PWR_MODE_HS_G2_SER_A),
PHY_TRSV_REG_CFG(0x034, 0x36, PWR_MODE_HS_G2_SER_B),
PHY_TRSV_REG_CFG(0x035, 0x5b, PWR_MODE_HS_G2_SER_A),
PHY_TRSV_REG_CFG(0x035, 0x5c, PWR_MODE_HS_G2_SER_B),
END_UFS_PHY_CFG
};
/* Calibration for HS mode series A/B atfer PMC */
static const struct samsung_ufs_phy_cfg exynos7_post_pwr_hs_cfg[] = {
PHY_COMN_REG_CFG(0x015, 0x00, PWR_MODE_HS_ANY),
PHY_TRSV_REG_CFG(0x04d, 0x83, PWR_MODE_HS_ANY),
END_UFS_PHY_CFG
};
static const struct samsung_ufs_phy_cfg *exynos7_ufs_phy_cfgs[CFG_TAG_MAX] = {
[CFG_PRE_INIT] = exynos7_pre_init_cfg,
[CFG_PRE_PWR_HS] = exynos7_pre_pwr_hs_cfg,
[CFG_POST_PWR_HS] = exynos7_post_pwr_hs_cfg,
};
static const char * const exynos7_ufs_phy_clks[] = {
"tx0_symbol_clk", "rx0_symbol_clk", "rx1_symbol_clk", "ref_clk",
};
const struct samsung_ufs_phy_drvdata exynos7_ufs_phy = {
.cfgs = exynos7_ufs_phy_cfgs,
.isol = {
.offset = EXYNOS7_EMBEDDED_COMBO_PHY_CTRL,
.mask = EXYNOS7_EMBEDDED_COMBO_PHY_CTRL_MASK,
.en = EXYNOS7_EMBEDDED_COMBO_PHY_CTRL_EN,
},
.clk_list = exynos7_ufs_phy_clks,
.num_clks = ARRAY_SIZE(exynos7_ufs_phy_clks),
.cdr_lock_status_offset = EXYNOS7_EMBEDDED_COMBO_PHY_CDR_LOCK_STATUS,
};
| linux-master | drivers/phy/samsung/phy-exynos7-ufs.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Samsung Exynos SoC series Display Port PHY driver
*
* Copyright (C) 2013 Samsung Electronics Co., Ltd.
* Author: Jingoo Han <[email protected]>
*/
#include <linux/err.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mfd/syscon.h>
#include <linux/of.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/soc/samsung/exynos-regs-pmu.h>
struct exynos_dp_video_phy_drvdata {
u32 phy_ctrl_offset;
};
struct exynos_dp_video_phy {
struct regmap *regs;
const struct exynos_dp_video_phy_drvdata *drvdata;
};
static int exynos_dp_video_phy_power_on(struct phy *phy)
{
struct exynos_dp_video_phy *state = phy_get_drvdata(phy);
/* Disable power isolation on DP-PHY */
return regmap_update_bits(state->regs, state->drvdata->phy_ctrl_offset,
EXYNOS4_PHY_ENABLE, EXYNOS4_PHY_ENABLE);
}
static int exynos_dp_video_phy_power_off(struct phy *phy)
{
struct exynos_dp_video_phy *state = phy_get_drvdata(phy);
/* Enable power isolation on DP-PHY */
return regmap_update_bits(state->regs, state->drvdata->phy_ctrl_offset,
EXYNOS4_PHY_ENABLE, 0);
}
static const struct phy_ops exynos_dp_video_phy_ops = {
.power_on = exynos_dp_video_phy_power_on,
.power_off = exynos_dp_video_phy_power_off,
.owner = THIS_MODULE,
};
static const struct exynos_dp_video_phy_drvdata exynos5250_dp_video_phy = {
.phy_ctrl_offset = EXYNOS5_DPTX_PHY_CONTROL,
};
static const struct exynos_dp_video_phy_drvdata exynos5420_dp_video_phy = {
.phy_ctrl_offset = EXYNOS5420_DPTX_PHY_CONTROL,
};
static const struct of_device_id exynos_dp_video_phy_of_match[] = {
{
.compatible = "samsung,exynos5250-dp-video-phy",
.data = &exynos5250_dp_video_phy,
}, {
.compatible = "samsung,exynos5420-dp-video-phy",
.data = &exynos5420_dp_video_phy,
},
{ },
};
MODULE_DEVICE_TABLE(of, exynos_dp_video_phy_of_match);
static int exynos_dp_video_phy_probe(struct platform_device *pdev)
{
struct exynos_dp_video_phy *state;
struct device *dev = &pdev->dev;
struct phy_provider *phy_provider;
struct phy *phy;
state = devm_kzalloc(dev, sizeof(*state), GFP_KERNEL);
if (!state)
return -ENOMEM;
state->regs = syscon_node_to_regmap(dev->parent->of_node);
if (IS_ERR(state->regs))
/* Backwards compatible way */
state->regs = syscon_regmap_lookup_by_phandle(dev->of_node,
"samsung,pmu-syscon");
if (IS_ERR(state->regs)) {
dev_err(dev, "Failed to lookup PMU regmap\n");
return PTR_ERR(state->regs);
}
state->drvdata = of_device_get_match_data(dev);
phy = devm_phy_create(dev, NULL, &exynos_dp_video_phy_ops);
if (IS_ERR(phy)) {
dev_err(dev, "failed to create Display Port PHY\n");
return PTR_ERR(phy);
}
phy_set_drvdata(phy, state);
phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
return PTR_ERR_OR_ZERO(phy_provider);
}
static struct platform_driver exynos_dp_video_phy_driver = {
.probe = exynos_dp_video_phy_probe,
.driver = {
.name = "exynos-dp-video-phy",
.of_match_table = exynos_dp_video_phy_of_match,
.suppress_bind_attrs = true,
}
};
module_platform_driver(exynos_dp_video_phy_driver);
MODULE_AUTHOR("Jingoo Han <[email protected]>");
MODULE_DESCRIPTION("Samsung Exynos SoC DP PHY driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/phy/samsung/phy-exynos-dp-video.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Samsung SATA SerDes(PHY) driver
*
* Copyright (C) 2013 Samsung Electronics Co., Ltd.
* Authors: Girish K S <[email protected]>
* Yuvaraj Kumar C D <[email protected]>
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/i2c.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/spinlock.h>
#include <linux/mfd/syscon.h>
#define SATAPHY_CONTROL_OFFSET 0x0724
#define EXYNOS5_SATAPHY_PMU_ENABLE BIT(0)
#define EXYNOS5_SATA_RESET 0x4
#define RESET_GLOBAL_RST_N BIT(0)
#define RESET_CMN_RST_N BIT(1)
#define RESET_CMN_BLOCK_RST_N BIT(2)
#define RESET_CMN_I2C_RST_N BIT(3)
#define RESET_TX_RX_PIPE_RST_N BIT(4)
#define RESET_TX_RX_BLOCK_RST_N BIT(5)
#define RESET_TX_RX_I2C_RST_N (BIT(6) | BIT(7))
#define LINK_RESET 0xf0000
#define EXYNOS5_SATA_MODE0 0x10
#define SATA_SPD_GEN3 BIT(1)
#define EXYNOS5_SATA_CTRL0 0x14
#define CTRL0_P0_PHY_CALIBRATED_SEL BIT(9)
#define CTRL0_P0_PHY_CALIBRATED BIT(8)
#define EXYNOS5_SATA_PHSATA_CTRLM 0xe0
#define PHCTRLM_REF_RATE BIT(1)
#define PHCTRLM_HIGH_SPEED BIT(0)
#define EXYNOS5_SATA_PHSATA_STATM 0xf0
#define PHSTATM_PLL_LOCKED BIT(0)
#define PHY_PLL_TIMEOUT (usecs_to_jiffies(1000))
struct exynos_sata_phy {
struct phy *phy;
struct clk *phyclk;
void __iomem *regs;
struct regmap *pmureg;
struct i2c_client *client;
};
static int wait_for_reg_status(void __iomem *base, u32 reg, u32 checkbit,
u32 status)
{
unsigned long timeout = jiffies + PHY_PLL_TIMEOUT;
while (time_before(jiffies, timeout)) {
if ((readl(base + reg) & checkbit) == status)
return 0;
}
return -EFAULT;
}
static int exynos_sata_phy_power_on(struct phy *phy)
{
struct exynos_sata_phy *sata_phy = phy_get_drvdata(phy);
return regmap_update_bits(sata_phy->pmureg, SATAPHY_CONTROL_OFFSET,
EXYNOS5_SATAPHY_PMU_ENABLE, true);
}
static int exynos_sata_phy_power_off(struct phy *phy)
{
struct exynos_sata_phy *sata_phy = phy_get_drvdata(phy);
return regmap_update_bits(sata_phy->pmureg, SATAPHY_CONTROL_OFFSET,
EXYNOS5_SATAPHY_PMU_ENABLE, false);
}
static int exynos_sata_phy_init(struct phy *phy)
{
u32 val = 0;
int ret = 0;
u8 buf[] = { 0x3a, 0x0b };
struct exynos_sata_phy *sata_phy = phy_get_drvdata(phy);
ret = regmap_update_bits(sata_phy->pmureg, SATAPHY_CONTROL_OFFSET,
EXYNOS5_SATAPHY_PMU_ENABLE, true);
if (ret != 0)
dev_err(&sata_phy->phy->dev, "phy init failed\n");
writel(val, sata_phy->regs + EXYNOS5_SATA_RESET);
val = readl(sata_phy->regs + EXYNOS5_SATA_RESET);
val |= RESET_GLOBAL_RST_N | RESET_CMN_RST_N | RESET_CMN_BLOCK_RST_N
| RESET_CMN_I2C_RST_N | RESET_TX_RX_PIPE_RST_N
| RESET_TX_RX_BLOCK_RST_N | RESET_TX_RX_I2C_RST_N;
writel(val, sata_phy->regs + EXYNOS5_SATA_RESET);
val = readl(sata_phy->regs + EXYNOS5_SATA_RESET);
val |= LINK_RESET;
writel(val, sata_phy->regs + EXYNOS5_SATA_RESET);
val = readl(sata_phy->regs + EXYNOS5_SATA_RESET);
val |= RESET_CMN_RST_N;
writel(val, sata_phy->regs + EXYNOS5_SATA_RESET);
val = readl(sata_phy->regs + EXYNOS5_SATA_PHSATA_CTRLM);
val &= ~PHCTRLM_REF_RATE;
writel(val, sata_phy->regs + EXYNOS5_SATA_PHSATA_CTRLM);
/* High speed enable for Gen3 */
val = readl(sata_phy->regs + EXYNOS5_SATA_PHSATA_CTRLM);
val |= PHCTRLM_HIGH_SPEED;
writel(val, sata_phy->regs + EXYNOS5_SATA_PHSATA_CTRLM);
val = readl(sata_phy->regs + EXYNOS5_SATA_CTRL0);
val |= CTRL0_P0_PHY_CALIBRATED_SEL | CTRL0_P0_PHY_CALIBRATED;
writel(val, sata_phy->regs + EXYNOS5_SATA_CTRL0);
val = readl(sata_phy->regs + EXYNOS5_SATA_MODE0);
val |= SATA_SPD_GEN3;
writel(val, sata_phy->regs + EXYNOS5_SATA_MODE0);
ret = i2c_master_send(sata_phy->client, buf, sizeof(buf));
if (ret < 0)
return ret;
/* release cmu reset */
val = readl(sata_phy->regs + EXYNOS5_SATA_RESET);
val &= ~RESET_CMN_RST_N;
writel(val, sata_phy->regs + EXYNOS5_SATA_RESET);
val = readl(sata_phy->regs + EXYNOS5_SATA_RESET);
val |= RESET_CMN_RST_N;
writel(val, sata_phy->regs + EXYNOS5_SATA_RESET);
ret = wait_for_reg_status(sata_phy->regs,
EXYNOS5_SATA_PHSATA_STATM,
PHSTATM_PLL_LOCKED, 1);
if (ret < 0)
dev_err(&sata_phy->phy->dev,
"PHY PLL locking failed\n");
return ret;
}
static const struct phy_ops exynos_sata_phy_ops = {
.init = exynos_sata_phy_init,
.power_on = exynos_sata_phy_power_on,
.power_off = exynos_sata_phy_power_off,
.owner = THIS_MODULE,
};
static int exynos_sata_phy_probe(struct platform_device *pdev)
{
struct exynos_sata_phy *sata_phy;
struct device *dev = &pdev->dev;
struct phy_provider *phy_provider;
struct device_node *node;
int ret = 0;
sata_phy = devm_kzalloc(dev, sizeof(*sata_phy), GFP_KERNEL);
if (!sata_phy)
return -ENOMEM;
sata_phy->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(sata_phy->regs))
return PTR_ERR(sata_phy->regs);
sata_phy->pmureg = syscon_regmap_lookup_by_phandle(dev->of_node,
"samsung,syscon-phandle");
if (IS_ERR(sata_phy->pmureg)) {
dev_err(dev, "syscon regmap lookup failed.\n");
return PTR_ERR(sata_phy->pmureg);
}
node = of_parse_phandle(dev->of_node,
"samsung,exynos-sataphy-i2c-phandle", 0);
if (!node)
return -EINVAL;
sata_phy->client = of_find_i2c_device_by_node(node);
of_node_put(node);
if (!sata_phy->client)
return -EPROBE_DEFER;
dev_set_drvdata(dev, sata_phy);
sata_phy->phyclk = devm_clk_get(dev, "sata_phyctrl");
if (IS_ERR(sata_phy->phyclk)) {
dev_err(dev, "failed to get clk for PHY\n");
ret = PTR_ERR(sata_phy->phyclk);
goto put_dev;
}
ret = clk_prepare_enable(sata_phy->phyclk);
if (ret < 0) {
dev_err(dev, "failed to enable source clk\n");
goto put_dev;
}
sata_phy->phy = devm_phy_create(dev, NULL, &exynos_sata_phy_ops);
if (IS_ERR(sata_phy->phy)) {
dev_err(dev, "failed to create PHY\n");
ret = PTR_ERR(sata_phy->phy);
goto clk_disable;
}
phy_set_drvdata(sata_phy->phy, sata_phy);
phy_provider = devm_of_phy_provider_register(dev,
of_phy_simple_xlate);
if (IS_ERR(phy_provider)) {
ret = PTR_ERR(phy_provider);
goto clk_disable;
}
return 0;
clk_disable:
clk_disable_unprepare(sata_phy->phyclk);
put_dev:
put_device(&sata_phy->client->dev);
return ret;
}
static const struct of_device_id exynos_sata_phy_of_match[] = {
{ .compatible = "samsung,exynos5250-sata-phy" },
{ },
};
MODULE_DEVICE_TABLE(of, exynos_sata_phy_of_match);
static struct platform_driver exynos_sata_phy_driver = {
.probe = exynos_sata_phy_probe,
.driver = {
.of_match_table = exynos_sata_phy_of_match,
.name = "samsung,sata-phy",
.suppress_bind_attrs = true,
}
};
module_platform_driver(exynos_sata_phy_driver);
MODULE_DESCRIPTION("Samsung SerDes PHY driver");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Girish K S <[email protected]>");
MODULE_AUTHOR("Yuvaraj C D <[email protected]>");
| linux-master | drivers/phy/samsung/phy-exynos5250-sata.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Samsung Exynos5 SoC series USB DRD PHY driver
*
* Phy provider for USB 3.0 DRD controller on Exynos5 SoC series
*
* Copyright (C) 2014 Samsung Electronics Co., Ltd.
* Author: Vivek Gautam <[email protected]>
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/iopoll.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/mutex.h>
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/soc/samsung/exynos-regs-pmu.h>
/* Exynos USB PHY registers */
#define EXYNOS5_FSEL_9MHZ6 0x0
#define EXYNOS5_FSEL_10MHZ 0x1
#define EXYNOS5_FSEL_12MHZ 0x2
#define EXYNOS5_FSEL_19MHZ2 0x3
#define EXYNOS5_FSEL_20MHZ 0x4
#define EXYNOS5_FSEL_24MHZ 0x5
#define EXYNOS5_FSEL_26MHZ 0x82
#define EXYNOS5_FSEL_50MHZ 0x7
/* Exynos5: USB 3.0 DRD PHY registers */
#define EXYNOS5_DRD_LINKSYSTEM 0x04
#define LINKSYSTEM_FLADJ_MASK (0x3f << 1)
#define LINKSYSTEM_FLADJ(_x) ((_x) << 1)
#define LINKSYSTEM_XHCI_VERSION_CONTROL BIT(27)
#define EXYNOS5_DRD_PHYUTMI 0x08
#define PHYUTMI_OTGDISABLE BIT(6)
#define PHYUTMI_FORCESUSPEND BIT(1)
#define PHYUTMI_FORCESLEEP BIT(0)
#define EXYNOS5_DRD_PHYPIPE 0x0c
#define EXYNOS5_DRD_PHYCLKRST 0x10
#define PHYCLKRST_EN_UTMISUSPEND BIT(31)
#define PHYCLKRST_SSC_REFCLKSEL_MASK (0xff << 23)
#define PHYCLKRST_SSC_REFCLKSEL(_x) ((_x) << 23)
#define PHYCLKRST_SSC_RANGE_MASK (0x03 << 21)
#define PHYCLKRST_SSC_RANGE(_x) ((_x) << 21)
#define PHYCLKRST_SSC_EN BIT(20)
#define PHYCLKRST_REF_SSP_EN BIT(19)
#define PHYCLKRST_REF_CLKDIV2 BIT(18)
#define PHYCLKRST_MPLL_MULTIPLIER_MASK (0x7f << 11)
#define PHYCLKRST_MPLL_MULTIPLIER_100MHZ_REF (0x19 << 11)
#define PHYCLKRST_MPLL_MULTIPLIER_50M_REF (0x32 << 11)
#define PHYCLKRST_MPLL_MULTIPLIER_24MHZ_REF (0x68 << 11)
#define PHYCLKRST_MPLL_MULTIPLIER_20MHZ_REF (0x7d << 11)
#define PHYCLKRST_MPLL_MULTIPLIER_19200KHZ_REF (0x02 << 11)
#define PHYCLKRST_FSEL_UTMI_MASK (0x7 << 5)
#define PHYCLKRST_FSEL_PIPE_MASK (0x7 << 8)
#define PHYCLKRST_FSEL(_x) ((_x) << 5)
#define PHYCLKRST_FSEL_PAD_100MHZ (0x27 << 5)
#define PHYCLKRST_FSEL_PAD_24MHZ (0x2a << 5)
#define PHYCLKRST_FSEL_PAD_20MHZ (0x31 << 5)
#define PHYCLKRST_FSEL_PAD_19_2MHZ (0x38 << 5)
#define PHYCLKRST_RETENABLEN BIT(4)
#define PHYCLKRST_REFCLKSEL_MASK (0x03 << 2)
#define PHYCLKRST_REFCLKSEL_PAD_REFCLK (0x2 << 2)
#define PHYCLKRST_REFCLKSEL_EXT_REFCLK (0x3 << 2)
#define PHYCLKRST_PORTRESET BIT(1)
#define PHYCLKRST_COMMONONN BIT(0)
#define EXYNOS5_DRD_PHYREG0 0x14
#define PHYREG0_SSC_REF_CLK_SEL BIT(21)
#define PHYREG0_SSC_RANGE BIT(20)
#define PHYREG0_CR_WRITE BIT(19)
#define PHYREG0_CR_READ BIT(18)
#define PHYREG0_CR_DATA_IN(_x) ((_x) << 2)
#define PHYREG0_CR_CAP_DATA BIT(1)
#define PHYREG0_CR_CAP_ADDR BIT(0)
#define EXYNOS5_DRD_PHYREG1 0x18
#define PHYREG1_CR_DATA_OUT(_x) ((_x) << 1)
#define PHYREG1_CR_ACK BIT(0)
#define EXYNOS5_DRD_PHYPARAM0 0x1c
#define PHYPARAM0_REF_USE_PAD BIT(31)
#define PHYPARAM0_REF_LOSLEVEL_MASK (0x1f << 26)
#define PHYPARAM0_REF_LOSLEVEL (0x9 << 26)
#define EXYNOS5_DRD_PHYPARAM1 0x20
#define PHYPARAM1_PCS_TXDEEMPH_MASK (0x1f << 0)
#define PHYPARAM1_PCS_TXDEEMPH (0x1c)
#define EXYNOS5_DRD_PHYTERM 0x24
#define EXYNOS5_DRD_PHYTEST 0x28
#define PHYTEST_POWERDOWN_SSP BIT(3)
#define PHYTEST_POWERDOWN_HSP BIT(2)
#define EXYNOS5_DRD_PHYADP 0x2c
#define EXYNOS5_DRD_PHYUTMICLKSEL 0x30
#define PHYUTMICLKSEL_UTMI_CLKSEL BIT(2)
#define EXYNOS5_DRD_PHYRESUME 0x34
#define EXYNOS5_DRD_LINKPORT 0x44
/* USB 3.0 DRD PHY SS Function Control Reg; accessed by CR_PORT */
#define EXYNOS5_DRD_PHYSS_LOSLEVEL_OVRD_IN (0x15)
#define LOSLEVEL_OVRD_IN_LOS_BIAS_5420 (0x5 << 13)
#define LOSLEVEL_OVRD_IN_LOS_BIAS_DEFAULT (0x0 << 13)
#define LOSLEVEL_OVRD_IN_EN (0x1 << 10)
#define LOSLEVEL_OVRD_IN_LOS_LEVEL_DEFAULT (0x9 << 0)
#define EXYNOS5_DRD_PHYSS_TX_VBOOSTLEVEL_OVRD_IN (0x12)
#define TX_VBOOSTLEVEL_OVRD_IN_VBOOST_5420 (0x5 << 13)
#define TX_VBOOSTLEVEL_OVRD_IN_VBOOST_DEFAULT (0x4 << 13)
#define EXYNOS5_DRD_PHYSS_LANE0_TX_DEBUG (0x1010)
#define LANE0_TX_DEBUG_RXDET_MEAS_TIME_19M2_20M (0x4 << 4)
#define LANE0_TX_DEBUG_RXDET_MEAS_TIME_24M (0x8 << 4)
#define LANE0_TX_DEBUG_RXDET_MEAS_TIME_25M_26M (0x8 << 4)
#define LANE0_TX_DEBUG_RXDET_MEAS_TIME_48M_50M_52M (0x20 << 4)
#define LANE0_TX_DEBUG_RXDET_MEAS_TIME_62M5 (0x20 << 4)
#define LANE0_TX_DEBUG_RXDET_MEAS_TIME_96M_100M (0x40 << 4)
/* Exynos850: USB DRD PHY registers */
#define EXYNOS850_DRD_LINKCTRL 0x04
#define LINKCTRL_BUS_FILTER_BYPASS(_x) ((_x) << 4)
#define LINKCTRL_FORCE_QACT BIT(8)
#define EXYNOS850_DRD_CLKRST 0x20
#define CLKRST_LINK_SW_RST BIT(0)
#define CLKRST_PORT_RST BIT(1)
#define CLKRST_PHY_SW_RST BIT(3)
#define EXYNOS850_DRD_UTMI 0x50
#define UTMI_FORCE_SLEEP BIT(0)
#define UTMI_FORCE_SUSPEND BIT(1)
#define UTMI_DM_PULLDOWN BIT(2)
#define UTMI_DP_PULLDOWN BIT(3)
#define UTMI_FORCE_BVALID BIT(4)
#define UTMI_FORCE_VBUSVALID BIT(5)
#define EXYNOS850_DRD_HSP 0x54
#define HSP_COMMONONN BIT(8)
#define HSP_EN_UTMISUSPEND BIT(9)
#define HSP_VBUSVLDEXT BIT(12)
#define HSP_VBUSVLDEXTSEL BIT(13)
#define HSP_FSV_OUT_EN BIT(24)
#define EXYNOS850_DRD_HSP_TEST 0x5c
#define HSP_TEST_SIDDQ BIT(24)
#define KHZ 1000
#define MHZ (KHZ * KHZ)
enum exynos5_usbdrd_phy_id {
EXYNOS5_DRDPHY_UTMI,
EXYNOS5_DRDPHY_PIPE3,
EXYNOS5_DRDPHYS_NUM,
};
struct phy_usb_instance;
struct exynos5_usbdrd_phy;
struct exynos5_usbdrd_phy_config {
u32 id;
void (*phy_isol)(struct phy_usb_instance *inst, u32 on);
void (*phy_init)(struct exynos5_usbdrd_phy *phy_drd);
unsigned int (*set_refclk)(struct phy_usb_instance *inst);
};
struct exynos5_usbdrd_phy_drvdata {
const struct exynos5_usbdrd_phy_config *phy_cfg;
const struct phy_ops *phy_ops;
u32 pmu_offset_usbdrd0_phy;
u32 pmu_offset_usbdrd1_phy;
bool has_common_clk_gate;
};
/**
* struct exynos5_usbdrd_phy - driver data for USB 3.0 PHY
* @dev: pointer to device instance of this platform device
* @reg_phy: usb phy controller register memory base
* @clk: phy clock for register access
* @pipeclk: clock for pipe3 phy
* @utmiclk: clock for utmi+ phy
* @itpclk: clock for ITP generation
* @drv_data: pointer to SoC level driver data structure
* @phys: array for 'EXYNOS5_DRDPHYS_NUM' number of PHY
* instances each with its 'phy' and 'phy_cfg'.
* @extrefclk: frequency select settings when using 'separate
* reference clocks' for SS and HS operations
* @ref_clk: reference clock to PHY block from which PHY's
* operational clocks are derived
* @vbus: VBUS regulator for phy
* @vbus_boost: Boost regulator for VBUS present on few Exynos boards
*/
struct exynos5_usbdrd_phy {
struct device *dev;
void __iomem *reg_phy;
struct clk *clk;
struct clk *pipeclk;
struct clk *utmiclk;
struct clk *itpclk;
const struct exynos5_usbdrd_phy_drvdata *drv_data;
struct phy_usb_instance {
struct phy *phy;
u32 index;
struct regmap *reg_pmu;
u32 pmu_offset;
const struct exynos5_usbdrd_phy_config *phy_cfg;
} phys[EXYNOS5_DRDPHYS_NUM];
u32 extrefclk;
struct clk *ref_clk;
struct regulator *vbus;
struct regulator *vbus_boost;
};
static inline
struct exynos5_usbdrd_phy *to_usbdrd_phy(struct phy_usb_instance *inst)
{
return container_of((inst), struct exynos5_usbdrd_phy,
phys[(inst)->index]);
}
/*
* exynos5_rate_to_clk() converts the supplied clock rate to the value that
* can be written to the phy register.
*/
static unsigned int exynos5_rate_to_clk(unsigned long rate, u32 *reg)
{
/* EXYNOS5_FSEL_MASK */
switch (rate) {
case 9600 * KHZ:
*reg = EXYNOS5_FSEL_9MHZ6;
break;
case 10 * MHZ:
*reg = EXYNOS5_FSEL_10MHZ;
break;
case 12 * MHZ:
*reg = EXYNOS5_FSEL_12MHZ;
break;
case 19200 * KHZ:
*reg = EXYNOS5_FSEL_19MHZ2;
break;
case 20 * MHZ:
*reg = EXYNOS5_FSEL_20MHZ;
break;
case 24 * MHZ:
*reg = EXYNOS5_FSEL_24MHZ;
break;
case 26 * MHZ:
*reg = EXYNOS5_FSEL_26MHZ;
break;
case 50 * MHZ:
*reg = EXYNOS5_FSEL_50MHZ;
break;
default:
return -EINVAL;
}
return 0;
}
static void exynos5_usbdrd_phy_isol(struct phy_usb_instance *inst,
unsigned int on)
{
unsigned int val;
if (!inst->reg_pmu)
return;
val = on ? 0 : EXYNOS4_PHY_ENABLE;
regmap_update_bits(inst->reg_pmu, inst->pmu_offset,
EXYNOS4_PHY_ENABLE, val);
}
/*
* Sets the pipe3 phy's clk as EXTREFCLK (XXTI) which is internal clock
* from clock core. Further sets multiplier values and spread spectrum
* clock settings for SuperSpeed operations.
*/
static unsigned int
exynos5_usbdrd_pipe3_set_refclk(struct phy_usb_instance *inst)
{
u32 reg;
struct exynos5_usbdrd_phy *phy_drd = to_usbdrd_phy(inst);
/* restore any previous reference clock settings */
reg = readl(phy_drd->reg_phy + EXYNOS5_DRD_PHYCLKRST);
/* Use EXTREFCLK as ref clock */
reg &= ~PHYCLKRST_REFCLKSEL_MASK;
reg |= PHYCLKRST_REFCLKSEL_EXT_REFCLK;
/* FSEL settings corresponding to reference clock */
reg &= ~PHYCLKRST_FSEL_PIPE_MASK |
PHYCLKRST_MPLL_MULTIPLIER_MASK |
PHYCLKRST_SSC_REFCLKSEL_MASK;
switch (phy_drd->extrefclk) {
case EXYNOS5_FSEL_50MHZ:
reg |= (PHYCLKRST_MPLL_MULTIPLIER_50M_REF |
PHYCLKRST_SSC_REFCLKSEL(0x00));
break;
case EXYNOS5_FSEL_24MHZ:
reg |= (PHYCLKRST_MPLL_MULTIPLIER_24MHZ_REF |
PHYCLKRST_SSC_REFCLKSEL(0x88));
break;
case EXYNOS5_FSEL_20MHZ:
reg |= (PHYCLKRST_MPLL_MULTIPLIER_20MHZ_REF |
PHYCLKRST_SSC_REFCLKSEL(0x00));
break;
case EXYNOS5_FSEL_19MHZ2:
reg |= (PHYCLKRST_MPLL_MULTIPLIER_19200KHZ_REF |
PHYCLKRST_SSC_REFCLKSEL(0x88));
break;
default:
dev_dbg(phy_drd->dev, "unsupported ref clk\n");
break;
}
return reg;
}
/*
* Sets the utmi phy's clk as EXTREFCLK (XXTI) which is internal clock
* from clock core. Further sets the FSEL values for HighSpeed operations.
*/
static unsigned int
exynos5_usbdrd_utmi_set_refclk(struct phy_usb_instance *inst)
{
u32 reg;
struct exynos5_usbdrd_phy *phy_drd = to_usbdrd_phy(inst);
/* restore any previous reference clock settings */
reg = readl(phy_drd->reg_phy + EXYNOS5_DRD_PHYCLKRST);
reg &= ~PHYCLKRST_REFCLKSEL_MASK;
reg |= PHYCLKRST_REFCLKSEL_EXT_REFCLK;
reg &= ~PHYCLKRST_FSEL_UTMI_MASK |
PHYCLKRST_MPLL_MULTIPLIER_MASK |
PHYCLKRST_SSC_REFCLKSEL_MASK;
reg |= PHYCLKRST_FSEL(phy_drd->extrefclk);
return reg;
}
static void exynos5_usbdrd_pipe3_init(struct exynos5_usbdrd_phy *phy_drd)
{
u32 reg;
reg = readl(phy_drd->reg_phy + EXYNOS5_DRD_PHYPARAM1);
/* Set Tx De-Emphasis level */
reg &= ~PHYPARAM1_PCS_TXDEEMPH_MASK;
reg |= PHYPARAM1_PCS_TXDEEMPH;
writel(reg, phy_drd->reg_phy + EXYNOS5_DRD_PHYPARAM1);
reg = readl(phy_drd->reg_phy + EXYNOS5_DRD_PHYTEST);
reg &= ~PHYTEST_POWERDOWN_SSP;
writel(reg, phy_drd->reg_phy + EXYNOS5_DRD_PHYTEST);
}
static void exynos5_usbdrd_utmi_init(struct exynos5_usbdrd_phy *phy_drd)
{
u32 reg;
reg = readl(phy_drd->reg_phy + EXYNOS5_DRD_PHYPARAM0);
/* Set Loss-of-Signal Detector sensitivity */
reg &= ~PHYPARAM0_REF_LOSLEVEL_MASK;
reg |= PHYPARAM0_REF_LOSLEVEL;
writel(reg, phy_drd->reg_phy + EXYNOS5_DRD_PHYPARAM0);
reg = readl(phy_drd->reg_phy + EXYNOS5_DRD_PHYPARAM1);
/* Set Tx De-Emphasis level */
reg &= ~PHYPARAM1_PCS_TXDEEMPH_MASK;
reg |= PHYPARAM1_PCS_TXDEEMPH;
writel(reg, phy_drd->reg_phy + EXYNOS5_DRD_PHYPARAM1);
/* UTMI Power Control */
writel(PHYUTMI_OTGDISABLE, phy_drd->reg_phy + EXYNOS5_DRD_PHYUTMI);
reg = readl(phy_drd->reg_phy + EXYNOS5_DRD_PHYTEST);
reg &= ~PHYTEST_POWERDOWN_HSP;
writel(reg, phy_drd->reg_phy + EXYNOS5_DRD_PHYTEST);
}
static int exynos5_usbdrd_phy_init(struct phy *phy)
{
int ret;
u32 reg;
struct phy_usb_instance *inst = phy_get_drvdata(phy);
struct exynos5_usbdrd_phy *phy_drd = to_usbdrd_phy(inst);
ret = clk_prepare_enable(phy_drd->clk);
if (ret)
return ret;
/* Reset USB 3.0 PHY */
writel(0x0, phy_drd->reg_phy + EXYNOS5_DRD_PHYREG0);
writel(0x0, phy_drd->reg_phy + EXYNOS5_DRD_PHYRESUME);
/*
* Setting the Frame length Adj value[6:1] to default 0x20
* See xHCI 1.0 spec, 5.2.4
*/
reg = LINKSYSTEM_XHCI_VERSION_CONTROL |
LINKSYSTEM_FLADJ(0x20);
writel(reg, phy_drd->reg_phy + EXYNOS5_DRD_LINKSYSTEM);
reg = readl(phy_drd->reg_phy + EXYNOS5_DRD_PHYPARAM0);
/* Select PHY CLK source */
reg &= ~PHYPARAM0_REF_USE_PAD;
writel(reg, phy_drd->reg_phy + EXYNOS5_DRD_PHYPARAM0);
/* This bit must be set for both HS and SS operations */
reg = readl(phy_drd->reg_phy + EXYNOS5_DRD_PHYUTMICLKSEL);
reg |= PHYUTMICLKSEL_UTMI_CLKSEL;
writel(reg, phy_drd->reg_phy + EXYNOS5_DRD_PHYUTMICLKSEL);
/* UTMI or PIPE3 specific init */
inst->phy_cfg->phy_init(phy_drd);
/* reference clock settings */
reg = inst->phy_cfg->set_refclk(inst);
/* Digital power supply in normal operating mode */
reg |= PHYCLKRST_RETENABLEN |
/* Enable ref clock for SS function */
PHYCLKRST_REF_SSP_EN |
/* Enable spread spectrum */
PHYCLKRST_SSC_EN |
/* Power down HS Bias and PLL blocks in suspend mode */
PHYCLKRST_COMMONONN |
/* Reset the port */
PHYCLKRST_PORTRESET;
writel(reg, phy_drd->reg_phy + EXYNOS5_DRD_PHYCLKRST);
udelay(10);
reg &= ~PHYCLKRST_PORTRESET;
writel(reg, phy_drd->reg_phy + EXYNOS5_DRD_PHYCLKRST);
clk_disable_unprepare(phy_drd->clk);
return 0;
}
static int exynos5_usbdrd_phy_exit(struct phy *phy)
{
int ret;
u32 reg;
struct phy_usb_instance *inst = phy_get_drvdata(phy);
struct exynos5_usbdrd_phy *phy_drd = to_usbdrd_phy(inst);
ret = clk_prepare_enable(phy_drd->clk);
if (ret)
return ret;
reg = PHYUTMI_OTGDISABLE |
PHYUTMI_FORCESUSPEND |
PHYUTMI_FORCESLEEP;
writel(reg, phy_drd->reg_phy + EXYNOS5_DRD_PHYUTMI);
/* Resetting the PHYCLKRST enable bits to reduce leakage current */
reg = readl(phy_drd->reg_phy + EXYNOS5_DRD_PHYCLKRST);
reg &= ~(PHYCLKRST_REF_SSP_EN |
PHYCLKRST_SSC_EN |
PHYCLKRST_COMMONONN);
writel(reg, phy_drd->reg_phy + EXYNOS5_DRD_PHYCLKRST);
/* Control PHYTEST to remove leakage current */
reg = readl(phy_drd->reg_phy + EXYNOS5_DRD_PHYTEST);
reg |= PHYTEST_POWERDOWN_SSP |
PHYTEST_POWERDOWN_HSP;
writel(reg, phy_drd->reg_phy + EXYNOS5_DRD_PHYTEST);
clk_disable_unprepare(phy_drd->clk);
return 0;
}
static int exynos5_usbdrd_phy_power_on(struct phy *phy)
{
int ret;
struct phy_usb_instance *inst = phy_get_drvdata(phy);
struct exynos5_usbdrd_phy *phy_drd = to_usbdrd_phy(inst);
dev_dbg(phy_drd->dev, "Request to power_on usbdrd_phy phy\n");
clk_prepare_enable(phy_drd->ref_clk);
if (!phy_drd->drv_data->has_common_clk_gate) {
clk_prepare_enable(phy_drd->pipeclk);
clk_prepare_enable(phy_drd->utmiclk);
clk_prepare_enable(phy_drd->itpclk);
}
/* Enable VBUS supply */
if (phy_drd->vbus_boost) {
ret = regulator_enable(phy_drd->vbus_boost);
if (ret) {
dev_err(phy_drd->dev,
"Failed to enable VBUS boost supply\n");
goto fail_vbus;
}
}
if (phy_drd->vbus) {
ret = regulator_enable(phy_drd->vbus);
if (ret) {
dev_err(phy_drd->dev, "Failed to enable VBUS supply\n");
goto fail_vbus_boost;
}
}
/* Power-on PHY*/
inst->phy_cfg->phy_isol(inst, 0);
return 0;
fail_vbus_boost:
if (phy_drd->vbus_boost)
regulator_disable(phy_drd->vbus_boost);
fail_vbus:
clk_disable_unprepare(phy_drd->ref_clk);
if (!phy_drd->drv_data->has_common_clk_gate) {
clk_disable_unprepare(phy_drd->itpclk);
clk_disable_unprepare(phy_drd->utmiclk);
clk_disable_unprepare(phy_drd->pipeclk);
}
return ret;
}
static int exynos5_usbdrd_phy_power_off(struct phy *phy)
{
struct phy_usb_instance *inst = phy_get_drvdata(phy);
struct exynos5_usbdrd_phy *phy_drd = to_usbdrd_phy(inst);
dev_dbg(phy_drd->dev, "Request to power_off usbdrd_phy phy\n");
/* Power-off the PHY */
inst->phy_cfg->phy_isol(inst, 1);
/* Disable VBUS supply */
if (phy_drd->vbus)
regulator_disable(phy_drd->vbus);
if (phy_drd->vbus_boost)
regulator_disable(phy_drd->vbus_boost);
clk_disable_unprepare(phy_drd->ref_clk);
if (!phy_drd->drv_data->has_common_clk_gate) {
clk_disable_unprepare(phy_drd->itpclk);
clk_disable_unprepare(phy_drd->pipeclk);
clk_disable_unprepare(phy_drd->utmiclk);
}
return 0;
}
static int crport_handshake(struct exynos5_usbdrd_phy *phy_drd,
u32 val, u32 cmd)
{
unsigned int result;
int err;
writel(val | cmd, phy_drd->reg_phy + EXYNOS5_DRD_PHYREG0);
err = readl_poll_timeout(phy_drd->reg_phy + EXYNOS5_DRD_PHYREG1,
result, (result & PHYREG1_CR_ACK), 1, 100);
if (err == -ETIMEDOUT) {
dev_err(phy_drd->dev, "CRPORT handshake timeout1 (0x%08x)\n", val);
return err;
}
writel(val, phy_drd->reg_phy + EXYNOS5_DRD_PHYREG0);
err = readl_poll_timeout(phy_drd->reg_phy + EXYNOS5_DRD_PHYREG1,
result, !(result & PHYREG1_CR_ACK), 1, 100);
if (err == -ETIMEDOUT) {
dev_err(phy_drd->dev, "CRPORT handshake timeout2 (0x%08x)\n", val);
return err;
}
return 0;
}
static int crport_ctrl_write(struct exynos5_usbdrd_phy *phy_drd,
u32 addr, u32 data)
{
int ret;
/* Write Address */
writel(PHYREG0_CR_DATA_IN(addr),
phy_drd->reg_phy + EXYNOS5_DRD_PHYREG0);
ret = crport_handshake(phy_drd, PHYREG0_CR_DATA_IN(addr),
PHYREG0_CR_CAP_ADDR);
if (ret)
return ret;
/* Write Data */
writel(PHYREG0_CR_DATA_IN(data),
phy_drd->reg_phy + EXYNOS5_DRD_PHYREG0);
ret = crport_handshake(phy_drd, PHYREG0_CR_DATA_IN(data),
PHYREG0_CR_CAP_DATA);
if (ret)
return ret;
ret = crport_handshake(phy_drd, PHYREG0_CR_DATA_IN(data),
PHYREG0_CR_WRITE);
return ret;
}
/*
* Calibrate few PHY parameters using CR_PORT register to meet
* SuperSpeed requirements on Exynos5420 and Exynos5800 systems,
* which have 28nm USB 3.0 DRD PHY.
*/
static int exynos5420_usbdrd_phy_calibrate(struct exynos5_usbdrd_phy *phy_drd)
{
unsigned int temp;
int ret = 0;
/*
* Change los_bias to (0x5) for 28nm PHY from a
* default value (0x0); los_level is set as default
* (0x9) as also reflected in los_level[30:26] bits
* of PHYPARAM0 register.
*/
temp = LOSLEVEL_OVRD_IN_LOS_BIAS_5420 |
LOSLEVEL_OVRD_IN_EN |
LOSLEVEL_OVRD_IN_LOS_LEVEL_DEFAULT;
ret = crport_ctrl_write(phy_drd,
EXYNOS5_DRD_PHYSS_LOSLEVEL_OVRD_IN,
temp);
if (ret) {
dev_err(phy_drd->dev,
"Failed setting Loss-of-Signal level for SuperSpeed\n");
return ret;
}
/*
* Set tx_vboost_lvl to (0x5) for 28nm PHY Tuning,
* to raise Tx signal level from its default value of (0x4)
*/
temp = TX_VBOOSTLEVEL_OVRD_IN_VBOOST_5420;
ret = crport_ctrl_write(phy_drd,
EXYNOS5_DRD_PHYSS_TX_VBOOSTLEVEL_OVRD_IN,
temp);
if (ret) {
dev_err(phy_drd->dev,
"Failed setting Tx-Vboost-Level for SuperSpeed\n");
return ret;
}
/*
* Set proper time to wait for RxDetect measurement, for
* desired reference clock of PHY, by tuning the CR_PORT
* register LANE0.TX_DEBUG which is internal to PHY.
* This fixes issue with few USB 3.0 devices, which are
* not detected (not even generate interrupts on the bus
* on insertion) without this change.
* e.g. Samsung SUM-TSB16S 3.0 USB drive.
*/
switch (phy_drd->extrefclk) {
case EXYNOS5_FSEL_50MHZ:
temp = LANE0_TX_DEBUG_RXDET_MEAS_TIME_48M_50M_52M;
break;
case EXYNOS5_FSEL_20MHZ:
case EXYNOS5_FSEL_19MHZ2:
temp = LANE0_TX_DEBUG_RXDET_MEAS_TIME_19M2_20M;
break;
case EXYNOS5_FSEL_24MHZ:
default:
temp = LANE0_TX_DEBUG_RXDET_MEAS_TIME_24M;
break;
}
ret = crport_ctrl_write(phy_drd,
EXYNOS5_DRD_PHYSS_LANE0_TX_DEBUG,
temp);
if (ret)
dev_err(phy_drd->dev,
"Fail to set RxDet measurement time for SuperSpeed\n");
return ret;
}
static struct phy *exynos5_usbdrd_phy_xlate(struct device *dev,
struct of_phandle_args *args)
{
struct exynos5_usbdrd_phy *phy_drd = dev_get_drvdata(dev);
if (WARN_ON(args->args[0] >= EXYNOS5_DRDPHYS_NUM))
return ERR_PTR(-ENODEV);
return phy_drd->phys[args->args[0]].phy;
}
static int exynos5_usbdrd_phy_calibrate(struct phy *phy)
{
struct phy_usb_instance *inst = phy_get_drvdata(phy);
struct exynos5_usbdrd_phy *phy_drd = to_usbdrd_phy(inst);
if (inst->phy_cfg->id == EXYNOS5_DRDPHY_UTMI)
return exynos5420_usbdrd_phy_calibrate(phy_drd);
return 0;
}
static const struct phy_ops exynos5_usbdrd_phy_ops = {
.init = exynos5_usbdrd_phy_init,
.exit = exynos5_usbdrd_phy_exit,
.power_on = exynos5_usbdrd_phy_power_on,
.power_off = exynos5_usbdrd_phy_power_off,
.calibrate = exynos5_usbdrd_phy_calibrate,
.owner = THIS_MODULE,
};
static void exynos850_usbdrd_utmi_init(struct exynos5_usbdrd_phy *phy_drd)
{
void __iomem *regs_base = phy_drd->reg_phy;
u32 reg;
/*
* Disable HWACG (hardware auto clock gating control). This will force
* QACTIVE signal in Q-Channel interface to HIGH level, to make sure
* the PHY clock is not gated by the hardware.
*/
reg = readl(regs_base + EXYNOS850_DRD_LINKCTRL);
reg |= LINKCTRL_FORCE_QACT;
writel(reg, regs_base + EXYNOS850_DRD_LINKCTRL);
/* Start PHY Reset (POR=high) */
reg = readl(regs_base + EXYNOS850_DRD_CLKRST);
reg |= CLKRST_PHY_SW_RST;
writel(reg, regs_base + EXYNOS850_DRD_CLKRST);
/* Enable UTMI+ */
reg = readl(regs_base + EXYNOS850_DRD_UTMI);
reg &= ~(UTMI_FORCE_SUSPEND | UTMI_FORCE_SLEEP | UTMI_DP_PULLDOWN |
UTMI_DM_PULLDOWN);
writel(reg, regs_base + EXYNOS850_DRD_UTMI);
/* Set PHY clock and control HS PHY */
reg = readl(regs_base + EXYNOS850_DRD_HSP);
reg |= HSP_EN_UTMISUSPEND | HSP_COMMONONN;
writel(reg, regs_base + EXYNOS850_DRD_HSP);
/* Set VBUS Valid and D+ pull-up control by VBUS pad usage */
reg = readl(regs_base + EXYNOS850_DRD_LINKCTRL);
reg |= LINKCTRL_BUS_FILTER_BYPASS(0xf);
writel(reg, regs_base + EXYNOS850_DRD_LINKCTRL);
reg = readl(regs_base + EXYNOS850_DRD_UTMI);
reg |= UTMI_FORCE_BVALID | UTMI_FORCE_VBUSVALID;
writel(reg, regs_base + EXYNOS850_DRD_UTMI);
reg = readl(regs_base + EXYNOS850_DRD_HSP);
reg |= HSP_VBUSVLDEXT | HSP_VBUSVLDEXTSEL;
writel(reg, regs_base + EXYNOS850_DRD_HSP);
/* Power up PHY analog blocks */
reg = readl(regs_base + EXYNOS850_DRD_HSP_TEST);
reg &= ~HSP_TEST_SIDDQ;
writel(reg, regs_base + EXYNOS850_DRD_HSP_TEST);
/* Finish PHY reset (POR=low) */
udelay(10); /* required before doing POR=low */
reg = readl(regs_base + EXYNOS850_DRD_CLKRST);
reg &= ~(CLKRST_PHY_SW_RST | CLKRST_PORT_RST);
writel(reg, regs_base + EXYNOS850_DRD_CLKRST);
udelay(75); /* required after POR=low for guaranteed PHY clock */
/* Disable single ended signal out */
reg = readl(regs_base + EXYNOS850_DRD_HSP);
reg &= ~HSP_FSV_OUT_EN;
writel(reg, regs_base + EXYNOS850_DRD_HSP);
}
static int exynos850_usbdrd_phy_init(struct phy *phy)
{
struct phy_usb_instance *inst = phy_get_drvdata(phy);
struct exynos5_usbdrd_phy *phy_drd = to_usbdrd_phy(inst);
int ret;
ret = clk_prepare_enable(phy_drd->clk);
if (ret)
return ret;
/* UTMI or PIPE3 specific init */
inst->phy_cfg->phy_init(phy_drd);
clk_disable_unprepare(phy_drd->clk);
return 0;
}
static int exynos850_usbdrd_phy_exit(struct phy *phy)
{
struct phy_usb_instance *inst = phy_get_drvdata(phy);
struct exynos5_usbdrd_phy *phy_drd = to_usbdrd_phy(inst);
void __iomem *regs_base = phy_drd->reg_phy;
u32 reg;
int ret;
ret = clk_prepare_enable(phy_drd->clk);
if (ret)
return ret;
/* Set PHY clock and control HS PHY */
reg = readl(regs_base + EXYNOS850_DRD_UTMI);
reg &= ~(UTMI_DP_PULLDOWN | UTMI_DM_PULLDOWN);
reg |= UTMI_FORCE_SUSPEND | UTMI_FORCE_SLEEP;
writel(reg, regs_base + EXYNOS850_DRD_UTMI);
/* Power down PHY analog blocks */
reg = readl(regs_base + EXYNOS850_DRD_HSP_TEST);
reg |= HSP_TEST_SIDDQ;
writel(reg, regs_base + EXYNOS850_DRD_HSP_TEST);
/* Link reset */
reg = readl(regs_base + EXYNOS850_DRD_CLKRST);
reg |= CLKRST_LINK_SW_RST;
writel(reg, regs_base + EXYNOS850_DRD_CLKRST);
udelay(10); /* required before doing POR=low */
reg &= ~CLKRST_LINK_SW_RST;
writel(reg, regs_base + EXYNOS850_DRD_CLKRST);
clk_disable_unprepare(phy_drd->clk);
return 0;
}
static const struct phy_ops exynos850_usbdrd_phy_ops = {
.init = exynos850_usbdrd_phy_init,
.exit = exynos850_usbdrd_phy_exit,
.power_on = exynos5_usbdrd_phy_power_on,
.power_off = exynos5_usbdrd_phy_power_off,
.owner = THIS_MODULE,
};
static int exynos5_usbdrd_phy_clk_handle(struct exynos5_usbdrd_phy *phy_drd)
{
unsigned long ref_rate;
int ret;
phy_drd->clk = devm_clk_get(phy_drd->dev, "phy");
if (IS_ERR(phy_drd->clk)) {
dev_err(phy_drd->dev, "Failed to get phy clock\n");
return PTR_ERR(phy_drd->clk);
}
phy_drd->ref_clk = devm_clk_get(phy_drd->dev, "ref");
if (IS_ERR(phy_drd->ref_clk)) {
dev_err(phy_drd->dev, "Failed to get phy reference clock\n");
return PTR_ERR(phy_drd->ref_clk);
}
ref_rate = clk_get_rate(phy_drd->ref_clk);
ret = exynos5_rate_to_clk(ref_rate, &phy_drd->extrefclk);
if (ret) {
dev_err(phy_drd->dev, "Clock rate (%ld) not supported\n",
ref_rate);
return ret;
}
if (!phy_drd->drv_data->has_common_clk_gate) {
phy_drd->pipeclk = devm_clk_get(phy_drd->dev, "phy_pipe");
if (IS_ERR(phy_drd->pipeclk)) {
dev_info(phy_drd->dev,
"PIPE3 phy operational clock not specified\n");
phy_drd->pipeclk = NULL;
}
phy_drd->utmiclk = devm_clk_get(phy_drd->dev, "phy_utmi");
if (IS_ERR(phy_drd->utmiclk)) {
dev_info(phy_drd->dev,
"UTMI phy operational clock not specified\n");
phy_drd->utmiclk = NULL;
}
phy_drd->itpclk = devm_clk_get(phy_drd->dev, "itp");
if (IS_ERR(phy_drd->itpclk)) {
dev_info(phy_drd->dev,
"ITP clock from main OSC not specified\n");
phy_drd->itpclk = NULL;
}
}
return 0;
}
static const struct exynos5_usbdrd_phy_config phy_cfg_exynos5[] = {
{
.id = EXYNOS5_DRDPHY_UTMI,
.phy_isol = exynos5_usbdrd_phy_isol,
.phy_init = exynos5_usbdrd_utmi_init,
.set_refclk = exynos5_usbdrd_utmi_set_refclk,
},
{
.id = EXYNOS5_DRDPHY_PIPE3,
.phy_isol = exynos5_usbdrd_phy_isol,
.phy_init = exynos5_usbdrd_pipe3_init,
.set_refclk = exynos5_usbdrd_pipe3_set_refclk,
},
};
static const struct exynos5_usbdrd_phy_config phy_cfg_exynos850[] = {
{
.id = EXYNOS5_DRDPHY_UTMI,
.phy_isol = exynos5_usbdrd_phy_isol,
.phy_init = exynos850_usbdrd_utmi_init,
},
};
static const struct exynos5_usbdrd_phy_drvdata exynos5420_usbdrd_phy = {
.phy_cfg = phy_cfg_exynos5,
.phy_ops = &exynos5_usbdrd_phy_ops,
.pmu_offset_usbdrd0_phy = EXYNOS5_USBDRD_PHY_CONTROL,
.pmu_offset_usbdrd1_phy = EXYNOS5420_USBDRD1_PHY_CONTROL,
.has_common_clk_gate = true,
};
static const struct exynos5_usbdrd_phy_drvdata exynos5250_usbdrd_phy = {
.phy_cfg = phy_cfg_exynos5,
.phy_ops = &exynos5_usbdrd_phy_ops,
.pmu_offset_usbdrd0_phy = EXYNOS5_USBDRD_PHY_CONTROL,
.has_common_clk_gate = true,
};
static const struct exynos5_usbdrd_phy_drvdata exynos5433_usbdrd_phy = {
.phy_cfg = phy_cfg_exynos5,
.phy_ops = &exynos5_usbdrd_phy_ops,
.pmu_offset_usbdrd0_phy = EXYNOS5_USBDRD_PHY_CONTROL,
.pmu_offset_usbdrd1_phy = EXYNOS5433_USBHOST30_PHY_CONTROL,
.has_common_clk_gate = false,
};
static const struct exynos5_usbdrd_phy_drvdata exynos7_usbdrd_phy = {
.phy_cfg = phy_cfg_exynos5,
.phy_ops = &exynos5_usbdrd_phy_ops,
.pmu_offset_usbdrd0_phy = EXYNOS5_USBDRD_PHY_CONTROL,
.has_common_clk_gate = false,
};
static const struct exynos5_usbdrd_phy_drvdata exynos850_usbdrd_phy = {
.phy_cfg = phy_cfg_exynos850,
.phy_ops = &exynos850_usbdrd_phy_ops,
.pmu_offset_usbdrd0_phy = EXYNOS5_USBDRD_PHY_CONTROL,
.has_common_clk_gate = true,
};
static const struct of_device_id exynos5_usbdrd_phy_of_match[] = {
{
.compatible = "samsung,exynos5250-usbdrd-phy",
.data = &exynos5250_usbdrd_phy
}, {
.compatible = "samsung,exynos5420-usbdrd-phy",
.data = &exynos5420_usbdrd_phy
}, {
.compatible = "samsung,exynos5433-usbdrd-phy",
.data = &exynos5433_usbdrd_phy
}, {
.compatible = "samsung,exynos7-usbdrd-phy",
.data = &exynos7_usbdrd_phy
}, {
.compatible = "samsung,exynos850-usbdrd-phy",
.data = &exynos850_usbdrd_phy
},
{ },
};
MODULE_DEVICE_TABLE(of, exynos5_usbdrd_phy_of_match);
static int exynos5_usbdrd_phy_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *node = dev->of_node;
struct exynos5_usbdrd_phy *phy_drd;
struct phy_provider *phy_provider;
const struct exynos5_usbdrd_phy_drvdata *drv_data;
struct regmap *reg_pmu;
u32 pmu_offset;
int i, ret;
int channel;
phy_drd = devm_kzalloc(dev, sizeof(*phy_drd), GFP_KERNEL);
if (!phy_drd)
return -ENOMEM;
dev_set_drvdata(dev, phy_drd);
phy_drd->dev = dev;
phy_drd->reg_phy = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(phy_drd->reg_phy))
return PTR_ERR(phy_drd->reg_phy);
drv_data = of_device_get_match_data(dev);
if (!drv_data)
return -EINVAL;
phy_drd->drv_data = drv_data;
ret = exynos5_usbdrd_phy_clk_handle(phy_drd);
if (ret) {
dev_err(dev, "Failed to initialize clocks\n");
return ret;
}
reg_pmu = syscon_regmap_lookup_by_phandle(dev->of_node,
"samsung,pmu-syscon");
if (IS_ERR(reg_pmu)) {
dev_err(dev, "Failed to lookup PMU regmap\n");
return PTR_ERR(reg_pmu);
}
/*
* Exynos5420 SoC has multiple channels for USB 3.0 PHY, with
* each having separate power control registers.
* 'channel' facilitates to set such registers.
*/
channel = of_alias_get_id(node, "usbdrdphy");
if (channel < 0)
dev_dbg(dev, "Not a multi-controller usbdrd phy\n");
switch (channel) {
case 1:
pmu_offset = phy_drd->drv_data->pmu_offset_usbdrd1_phy;
break;
case 0:
default:
pmu_offset = phy_drd->drv_data->pmu_offset_usbdrd0_phy;
break;
}
/* Get Vbus regulators */
phy_drd->vbus = devm_regulator_get(dev, "vbus");
if (IS_ERR(phy_drd->vbus)) {
ret = PTR_ERR(phy_drd->vbus);
if (ret == -EPROBE_DEFER)
return ret;
dev_warn(dev, "Failed to get VBUS supply regulator\n");
phy_drd->vbus = NULL;
}
phy_drd->vbus_boost = devm_regulator_get(dev, "vbus-boost");
if (IS_ERR(phy_drd->vbus_boost)) {
ret = PTR_ERR(phy_drd->vbus_boost);
if (ret == -EPROBE_DEFER)
return ret;
dev_warn(dev, "Failed to get VBUS boost supply regulator\n");
phy_drd->vbus_boost = NULL;
}
dev_vdbg(dev, "Creating usbdrd_phy phy\n");
for (i = 0; i < EXYNOS5_DRDPHYS_NUM; i++) {
struct phy *phy = devm_phy_create(dev, NULL, drv_data->phy_ops);
if (IS_ERR(phy)) {
dev_err(dev, "Failed to create usbdrd_phy phy\n");
return PTR_ERR(phy);
}
phy_drd->phys[i].phy = phy;
phy_drd->phys[i].index = i;
phy_drd->phys[i].reg_pmu = reg_pmu;
phy_drd->phys[i].pmu_offset = pmu_offset;
phy_drd->phys[i].phy_cfg = &drv_data->phy_cfg[i];
phy_set_drvdata(phy, &phy_drd->phys[i]);
}
phy_provider = devm_of_phy_provider_register(dev,
exynos5_usbdrd_phy_xlate);
if (IS_ERR(phy_provider)) {
dev_err(phy_drd->dev, "Failed to register phy provider\n");
return PTR_ERR(phy_provider);
}
return 0;
}
static struct platform_driver exynos5_usb3drd_phy = {
.probe = exynos5_usbdrd_phy_probe,
.driver = {
.of_match_table = exynos5_usbdrd_phy_of_match,
.name = "exynos5_usb3drd_phy",
.suppress_bind_attrs = true,
}
};
module_platform_driver(exynos5_usb3drd_phy);
MODULE_DESCRIPTION("Samsung Exynos5 SoCs USB 3.0 DRD controller PHY driver");
MODULE_AUTHOR("Vivek Gautam <[email protected]>");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:exynos5_usb3drd_phy");
| linux-master | drivers/phy/samsung/phy-exynos5-usbdrd.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2018 MediaTek Inc.
* Author: Jie Qiu <[email protected]>
*/
#include "phy-mtk-hdmi.h"
static int mtk_hdmi_phy_power_on(struct phy *phy);
static int mtk_hdmi_phy_power_off(struct phy *phy);
static int mtk_hdmi_phy_configure(struct phy *phy, union phy_configure_opts *opts);
static const struct phy_ops mtk_hdmi_phy_dev_ops = {
.power_on = mtk_hdmi_phy_power_on,
.power_off = mtk_hdmi_phy_power_off,
.configure = mtk_hdmi_phy_configure,
.owner = THIS_MODULE,
};
inline struct mtk_hdmi_phy *to_mtk_hdmi_phy(struct clk_hw *hw)
{
return container_of(hw, struct mtk_hdmi_phy, pll_hw);
}
static int mtk_hdmi_phy_power_on(struct phy *phy)
{
struct mtk_hdmi_phy *hdmi_phy = phy_get_drvdata(phy);
int ret;
ret = clk_prepare_enable(hdmi_phy->pll);
if (ret < 0)
return ret;
hdmi_phy->conf->hdmi_phy_enable_tmds(hdmi_phy);
return 0;
}
static int mtk_hdmi_phy_power_off(struct phy *phy)
{
struct mtk_hdmi_phy *hdmi_phy = phy_get_drvdata(phy);
hdmi_phy->conf->hdmi_phy_disable_tmds(hdmi_phy);
clk_disable_unprepare(hdmi_phy->pll);
return 0;
}
static int mtk_hdmi_phy_configure(struct phy *phy, union phy_configure_opts *opts)
{
struct mtk_hdmi_phy *hdmi_phy = phy_get_drvdata(phy);
if (hdmi_phy->conf->hdmi_phy_configure)
return hdmi_phy->conf->hdmi_phy_configure(phy, opts);
return 0;
}
static const struct phy_ops *
mtk_hdmi_phy_dev_get_ops(const struct mtk_hdmi_phy *hdmi_phy)
{
if (hdmi_phy && hdmi_phy->conf &&
hdmi_phy->conf->hdmi_phy_enable_tmds &&
hdmi_phy->conf->hdmi_phy_disable_tmds)
return &mtk_hdmi_phy_dev_ops;
if (hdmi_phy)
dev_err(hdmi_phy->dev, "Failed to get dev ops of phy\n");
return NULL;
}
static void mtk_hdmi_phy_clk_get_data(struct mtk_hdmi_phy *hdmi_phy,
struct clk_init_data *clk_init)
{
clk_init->flags = hdmi_phy->conf->flags;
clk_init->ops = hdmi_phy->conf->hdmi_phy_clk_ops;
}
static int mtk_hdmi_phy_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct mtk_hdmi_phy *hdmi_phy;
struct clk *ref_clk;
const char *ref_clk_name;
struct clk_init_data clk_init = {
.num_parents = 1,
.parent_names = (const char * const *)&ref_clk_name,
};
struct phy *phy;
struct phy_provider *phy_provider;
int ret;
hdmi_phy = devm_kzalloc(dev, sizeof(*hdmi_phy), GFP_KERNEL);
if (!hdmi_phy)
return -ENOMEM;
hdmi_phy->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(hdmi_phy->regs))
return PTR_ERR(hdmi_phy->regs);
ref_clk = devm_clk_get(dev, "pll_ref");
if (IS_ERR(ref_clk))
return dev_err_probe(dev, PTR_ERR(ref_clk),
"Failed to get PLL reference clock\n");
ref_clk_name = __clk_get_name(ref_clk);
ret = of_property_read_string(dev->of_node, "clock-output-names",
&clk_init.name);
if (ret < 0)
return dev_err_probe(dev, ret, "Failed to read clock-output-names\n");
hdmi_phy->dev = dev;
hdmi_phy->conf =
(struct mtk_hdmi_phy_conf *)of_device_get_match_data(dev);
mtk_hdmi_phy_clk_get_data(hdmi_phy, &clk_init);
hdmi_phy->pll_hw.init = &clk_init;
hdmi_phy->pll = devm_clk_register(dev, &hdmi_phy->pll_hw);
if (IS_ERR(hdmi_phy->pll))
return dev_err_probe(dev, PTR_ERR(hdmi_phy->pll),
"Failed to register PLL\n");
ret = of_property_read_u32(dev->of_node, "mediatek,ibias",
&hdmi_phy->ibias);
if (ret < 0)
return dev_err_probe(dev, ret, "Failed to get ibias\n");
ret = of_property_read_u32(dev->of_node, "mediatek,ibias_up",
&hdmi_phy->ibias_up);
if (ret < 0)
return dev_err_probe(dev, ret, "Failed to get ibias_up\n");
dev_info(dev, "Using default TX DRV impedance: 4.2k/36\n");
hdmi_phy->drv_imp_clk = 0x30;
hdmi_phy->drv_imp_d2 = 0x30;
hdmi_phy->drv_imp_d1 = 0x30;
hdmi_phy->drv_imp_d0 = 0x30;
phy = devm_phy_create(dev, NULL, mtk_hdmi_phy_dev_get_ops(hdmi_phy));
if (IS_ERR(phy))
return dev_err_probe(dev, PTR_ERR(phy), "Cannot create HDMI PHY\n");
phy_set_drvdata(phy, hdmi_phy);
phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
if (IS_ERR(phy_provider))
return dev_err_probe(dev, PTR_ERR(phy_provider),
"Failed to register HDMI PHY\n");
if (hdmi_phy->conf->pll_default_off)
hdmi_phy->conf->hdmi_phy_disable_tmds(hdmi_phy);
return of_clk_add_provider(dev->of_node, of_clk_src_simple_get,
hdmi_phy->pll);
}
static const struct of_device_id mtk_hdmi_phy_match[] = {
{ .compatible = "mediatek,mt2701-hdmi-phy",
.data = &mtk_hdmi_phy_2701_conf,
},
{ .compatible = "mediatek,mt8173-hdmi-phy",
.data = &mtk_hdmi_phy_8173_conf,
},
{ .compatible = "mediatek,mt8195-hdmi-phy",
.data = &mtk_hdmi_phy_8195_conf,
},
{},
};
MODULE_DEVICE_TABLE(of, mtk_hdmi_phy_match);
static struct platform_driver mtk_hdmi_phy_driver = {
.probe = mtk_hdmi_phy_probe,
.driver = {
.name = "mediatek-hdmi-phy",
.of_match_table = mtk_hdmi_phy_match,
},
};
module_platform_driver(mtk_hdmi_phy_driver);
MODULE_DESCRIPTION("MediaTek HDMI PHY Driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/phy/mediatek/phy-mtk-hdmi.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2015 MediaTek Inc.
* Author: Chunfeng Yun <[email protected]>
*
*/
#include <dt-bindings/phy/phy.h>
#include <linux/clk.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/iopoll.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/nvmem-consumer.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include "phy-mtk-io.h"
/* version V1 sub-banks offset base address */
/* banks shared by multiple phys */
#define SSUSB_SIFSLV_V1_SPLLC 0x000 /* shared by u3 phys */
#define SSUSB_SIFSLV_V1_U2FREQ 0x100 /* shared by u2 phys */
#define SSUSB_SIFSLV_V1_CHIP 0x300 /* shared by u3 phys */
/* u2 phy bank */
#define SSUSB_SIFSLV_V1_U2PHY_COM 0x000
/* u3/pcie/sata phy banks */
#define SSUSB_SIFSLV_V1_U3PHYD 0x000
#define SSUSB_SIFSLV_V1_U3PHYA 0x200
/* version V2/V3 sub-banks offset base address */
/* V3: U2FREQ is not used anymore, but reserved */
/* u2 phy banks */
#define SSUSB_SIFSLV_V2_MISC 0x000
#define SSUSB_SIFSLV_V2_U2FREQ 0x100
#define SSUSB_SIFSLV_V2_U2PHY_COM 0x300
/* u3/pcie/sata phy banks */
#define SSUSB_SIFSLV_V2_SPLLC 0x000
#define SSUSB_SIFSLV_V2_CHIP 0x100
#define SSUSB_SIFSLV_V2_U3PHYD 0x200
#define SSUSB_SIFSLV_V2_U3PHYA 0x400
#define U3P_MISC_REG1 0x04
#define MR1_EFUSE_AUTO_LOAD_DIS BIT(6)
#define U3P_USBPHYACR0 0x000
#define PA0_RG_U2PLL_FORCE_ON BIT(15)
#define PA0_USB20_PLL_PREDIV GENMASK(7, 6)
#define PA0_RG_USB20_INTR_EN BIT(5)
#define U3P_USBPHYACR1 0x004
#define PA1_RG_INTR_CAL GENMASK(23, 19)
#define PA1_RG_VRT_SEL GENMASK(14, 12)
#define PA1_RG_TERM_SEL GENMASK(10, 8)
#define U3P_USBPHYACR2 0x008
#define PA2_RG_U2PLL_BW GENMASK(21, 19)
#define PA2_RG_SIF_U2PLL_FORCE_EN BIT(18)
#define U3P_USBPHYACR5 0x014
#define PA5_RG_U2_HSTX_SRCAL_EN BIT(15)
#define PA5_RG_U2_HSTX_SRCTRL GENMASK(14, 12)
#define PA5_RG_U2_HS_100U_U3_EN BIT(11)
#define U3P_USBPHYACR6 0x018
#define PA6_RG_U2_PRE_EMP GENMASK(31, 30)
#define PA6_RG_U2_BC11_SW_EN BIT(23)
#define PA6_RG_U2_OTG_VBUSCMP_EN BIT(20)
#define PA6_RG_U2_DISCTH GENMASK(7, 4)
#define PA6_RG_U2_SQTH GENMASK(3, 0)
#define U3P_U2PHYACR4 0x020
#define P2C_RG_USB20_GPIO_CTL BIT(9)
#define P2C_USB20_GPIO_MODE BIT(8)
#define P2C_U2_GPIO_CTR_MSK (P2C_RG_USB20_GPIO_CTL | P2C_USB20_GPIO_MODE)
#define U3P_U2PHYA_RESV 0x030
#define P2R_RG_U2PLL_FBDIV_26M 0x1bb13b
#define P2R_RG_U2PLL_FBDIV_48M 0x3c0000
#define U3P_U2PHYA_RESV1 0x044
#define P2R_RG_U2PLL_REFCLK_SEL BIT(5)
#define P2R_RG_U2PLL_FRA_EN BIT(3)
#define U3D_U2PHYDCR0 0x060
#define P2C_RG_SIF_U2PLL_FORCE_ON BIT(24)
#define U3P_U2PHYDTM0 0x068
#define P2C_FORCE_UART_EN BIT(26)
#define P2C_FORCE_DATAIN BIT(23)
#define P2C_FORCE_DM_PULLDOWN BIT(21)
#define P2C_FORCE_DP_PULLDOWN BIT(20)
#define P2C_FORCE_XCVRSEL BIT(19)
#define P2C_FORCE_SUSPENDM BIT(18)
#define P2C_FORCE_TERMSEL BIT(17)
#define P2C_RG_DATAIN GENMASK(13, 10)
#define P2C_RG_DMPULLDOWN BIT(7)
#define P2C_RG_DPPULLDOWN BIT(6)
#define P2C_RG_XCVRSEL GENMASK(5, 4)
#define P2C_RG_SUSPENDM BIT(3)
#define P2C_RG_TERMSEL BIT(2)
#define P2C_DTM0_PART_MASK \
(P2C_FORCE_DATAIN | P2C_FORCE_DM_PULLDOWN | \
P2C_FORCE_DP_PULLDOWN | P2C_FORCE_XCVRSEL | \
P2C_FORCE_TERMSEL | P2C_RG_DMPULLDOWN | \
P2C_RG_DPPULLDOWN | P2C_RG_TERMSEL)
#define U3P_U2PHYDTM1 0x06C
#define P2C_RG_UART_EN BIT(16)
#define P2C_FORCE_IDDIG BIT(9)
#define P2C_RG_VBUSVALID BIT(5)
#define P2C_RG_SESSEND BIT(4)
#define P2C_RG_AVALID BIT(2)
#define P2C_RG_IDDIG BIT(1)
#define U3P_U2PHYBC12C 0x080
#define P2C_RG_CHGDT_EN BIT(0)
#define U3P_U3_CHIP_GPIO_CTLD 0x0c
#define P3C_REG_IP_SW_RST BIT(31)
#define P3C_MCU_BUS_CK_GATE_EN BIT(30)
#define P3C_FORCE_IP_SW_RST BIT(29)
#define U3P_U3_CHIP_GPIO_CTLE 0x10
#define P3C_RG_SWRST_U3_PHYD BIT(25)
#define P3C_RG_SWRST_U3_PHYD_FORCE_EN BIT(24)
#define U3P_U3_PHYA_REG0 0x000
#define P3A_RG_IEXT_INTR GENMASK(15, 10)
#define P3A_RG_CLKDRV_OFF GENMASK(3, 2)
#define U3P_U3_PHYA_REG1 0x004
#define P3A_RG_CLKDRV_AMP GENMASK(31, 29)
#define U3P_U3_PHYA_REG6 0x018
#define P3A_RG_TX_EIDLE_CM GENMASK(31, 28)
#define U3P_U3_PHYA_REG9 0x024
#define P3A_RG_RX_DAC_MUX GENMASK(5, 1)
#define U3P_U3_PHYA_DA_REG0 0x100
#define P3A_RG_XTAL_EXT_PE2H GENMASK(17, 16)
#define P3A_RG_XTAL_EXT_PE1H GENMASK(13, 12)
#define P3A_RG_XTAL_EXT_EN_U3 GENMASK(11, 10)
#define U3P_U3_PHYA_DA_REG4 0x108
#define P3A_RG_PLL_DIVEN_PE2H GENMASK(21, 19)
#define P3A_RG_PLL_BC_PE2H GENMASK(7, 6)
#define U3P_U3_PHYA_DA_REG5 0x10c
#define P3A_RG_PLL_BR_PE2H GENMASK(29, 28)
#define P3A_RG_PLL_IC_PE2H GENMASK(15, 12)
#define U3P_U3_PHYA_DA_REG6 0x110
#define P3A_RG_PLL_IR_PE2H GENMASK(19, 16)
#define U3P_U3_PHYA_DA_REG7 0x114
#define P3A_RG_PLL_BP_PE2H GENMASK(19, 16)
#define U3P_U3_PHYA_DA_REG20 0x13c
#define P3A_RG_PLL_DELTA1_PE2H GENMASK(31, 16)
#define U3P_U3_PHYA_DA_REG25 0x148
#define P3A_RG_PLL_DELTA_PE2H GENMASK(15, 0)
#define U3P_U3_PHYD_LFPS1 0x00c
#define P3D_RG_FWAKE_TH GENMASK(21, 16)
#define U3P_U3_PHYD_IMPCAL0 0x010
#define P3D_RG_FORCE_TX_IMPEL BIT(31)
#define P3D_RG_TX_IMPEL GENMASK(28, 24)
#define U3P_U3_PHYD_IMPCAL1 0x014
#define P3D_RG_FORCE_RX_IMPEL BIT(31)
#define P3D_RG_RX_IMPEL GENMASK(28, 24)
#define U3P_U3_PHYD_RSV 0x054
#define P3D_RG_EFUSE_AUTO_LOAD_DIS BIT(12)
#define U3P_U3_PHYD_CDR1 0x05c
#define P3D_RG_CDR_BIR_LTD1 GENMASK(28, 24)
#define P3D_RG_CDR_BIR_LTD0 GENMASK(12, 8)
#define U3P_U3_PHYD_RXDET1 0x128
#define P3D_RG_RXDET_STB2_SET GENMASK(17, 9)
#define U3P_U3_PHYD_RXDET2 0x12c
#define P3D_RG_RXDET_STB2_SET_P3 GENMASK(8, 0)
#define U3P_SPLLC_XTALCTL3 0x018
#define XC3_RG_U3_XTAL_RX_PWD BIT(9)
#define XC3_RG_U3_FRC_XTAL_RX_PWD BIT(8)
#define U3P_U2FREQ_FMCR0 0x00
#define P2F_RG_MONCLK_SEL GENMASK(27, 26)
#define P2F_RG_FREQDET_EN BIT(24)
#define P2F_RG_CYCLECNT GENMASK(23, 0)
#define U3P_U2FREQ_VALUE 0x0c
#define U3P_U2FREQ_FMMONR1 0x10
#define P2F_USB_FM_VALID BIT(0)
#define P2F_RG_FRCK_EN BIT(8)
#define U3P_REF_CLK 26 /* MHZ */
#define U3P_SLEW_RATE_COEF 28
#define U3P_SR_COEF_DIVISOR 1000
#define U3P_FM_DET_CYCLE_CNT 1024
/* SATA register setting */
#define PHYD_CTRL_SIGNAL_MODE4 0x1c
/* CDR Charge Pump P-path current adjustment */
#define RG_CDR_BICLTD1_GEN1_MSK GENMASK(23, 20)
#define RG_CDR_BICLTD0_GEN1_MSK GENMASK(11, 8)
#define PHYD_DESIGN_OPTION2 0x24
/* Symbol lock count selection */
#define RG_LOCK_CNT_SEL_MSK GENMASK(5, 4)
#define PHYD_DESIGN_OPTION9 0x40
/* COMWAK GAP width window */
#define RG_TG_MAX_MSK GENMASK(20, 16)
/* COMINIT GAP width window */
#define RG_T2_MAX_MSK GENMASK(13, 8)
/* COMWAK GAP width window */
#define RG_TG_MIN_MSK GENMASK(7, 5)
/* COMINIT GAP width window */
#define RG_T2_MIN_MSK GENMASK(4, 0)
#define ANA_RG_CTRL_SIGNAL1 0x4c
/* TX driver tail current control for 0dB de-empahsis mdoe for Gen1 speed */
#define RG_IDRV_0DB_GEN1_MSK GENMASK(13, 8)
#define ANA_RG_CTRL_SIGNAL4 0x58
#define RG_CDR_BICLTR_GEN1_MSK GENMASK(23, 20)
/* Loop filter R1 resistance adjustment for Gen1 speed */
#define RG_CDR_BR_GEN2_MSK GENMASK(10, 8)
#define ANA_RG_CTRL_SIGNAL6 0x60
/* I-path capacitance adjustment for Gen1 */
#define RG_CDR_BC_GEN1_MSK GENMASK(28, 24)
#define RG_CDR_BIRLTR_GEN1_MSK GENMASK(4, 0)
#define ANA_EQ_EYE_CTRL_SIGNAL1 0x6c
/* RX Gen1 LEQ tuning step */
#define RG_EQ_DLEQ_LFI_GEN1_MSK GENMASK(11, 8)
#define ANA_EQ_EYE_CTRL_SIGNAL4 0xd8
#define RG_CDR_BIRLTD0_GEN1_MSK GENMASK(20, 16)
#define ANA_EQ_EYE_CTRL_SIGNAL5 0xdc
#define RG_CDR_BIRLTD0_GEN3_MSK GENMASK(4, 0)
/* PHY switch between pcie/usb3/sgmii/sata */
#define USB_PHY_SWITCH_CTRL 0x0
#define RG_PHY_SW_TYPE GENMASK(3, 0)
#define RG_PHY_SW_PCIE 0x0
#define RG_PHY_SW_USB3 0x1
#define RG_PHY_SW_SGMII 0x2
#define RG_PHY_SW_SATA 0x3
#define TPHY_CLKS_CNT 2
#define USER_BUF_LEN(count) min_t(size_t, 8, (count))
enum mtk_phy_version {
MTK_PHY_V1 = 1,
MTK_PHY_V2,
MTK_PHY_V3,
};
struct mtk_phy_pdata {
/* avoid RX sensitivity level degradation only for mt8173 */
bool avoid_rx_sen_degradation;
/*
* workaround only for mt8195, HW fix it for others of V3,
* u2phy should use integer mode instead of fractional mode of
* 48M PLL, fix it by switching PLL to 26M from default 48M
*/
bool sw_pll_48m_to_26m;
/*
* Some SoCs (e.g. mt8195) drop a bit when use auto load efuse,
* support sw way, also support it for v2/v3 optionally.
*/
bool sw_efuse_supported;
enum mtk_phy_version version;
};
struct u2phy_banks {
void __iomem *misc;
void __iomem *fmreg;
void __iomem *com;
};
struct u3phy_banks {
void __iomem *spllc;
void __iomem *chip;
void __iomem *phyd; /* include u3phyd_bank2 */
void __iomem *phya; /* include u3phya_da */
};
struct mtk_phy_instance {
struct phy *phy;
void __iomem *port_base;
union {
struct u2phy_banks u2_banks;
struct u3phy_banks u3_banks;
};
struct clk_bulk_data clks[TPHY_CLKS_CNT];
u32 index;
u32 type;
struct regmap *type_sw;
u32 type_sw_reg;
u32 type_sw_index;
u32 efuse_sw_en;
u32 efuse_intr;
u32 efuse_tx_imp;
u32 efuse_rx_imp;
int eye_src;
int eye_vrt;
int eye_term;
int intr;
int discth;
int pre_emphasis;
bool bc12_en;
};
struct mtk_tphy {
struct device *dev;
void __iomem *sif_base; /* only shared sif */
const struct mtk_phy_pdata *pdata;
struct mtk_phy_instance **phys;
int nphys;
int src_ref_clk; /* MHZ, reference clock for slew rate calibrate */
int src_coef; /* coefficient for slew rate calibrate */
};
#if IS_ENABLED(CONFIG_DEBUG_FS)
enum u2_phy_params {
U2P_EYE_VRT = 0,
U2P_EYE_TERM,
U2P_EFUSE_EN,
U2P_EFUSE_INTR,
U2P_DISCTH,
U2P_PRE_EMPHASIS,
};
enum u3_phy_params {
U3P_EFUSE_EN = 0,
U3P_EFUSE_INTR,
U3P_EFUSE_TX_IMP,
U3P_EFUSE_RX_IMP,
};
static const char *const u2_phy_files[] = {
[U2P_EYE_VRT] = "vrt",
[U2P_EYE_TERM] = "term",
[U2P_EFUSE_EN] = "efuse",
[U2P_EFUSE_INTR] = "intr",
[U2P_DISCTH] = "discth",
[U2P_PRE_EMPHASIS] = "preemph",
};
static const char *const u3_phy_files[] = {
[U3P_EFUSE_EN] = "efuse",
[U3P_EFUSE_INTR] = "intr",
[U3P_EFUSE_TX_IMP] = "tx-imp",
[U3P_EFUSE_RX_IMP] = "rx-imp",
};
static int u2_phy_params_show(struct seq_file *sf, void *unused)
{
struct mtk_phy_instance *inst = sf->private;
const char *fname = file_dentry(sf->file)->d_iname;
struct u2phy_banks *u2_banks = &inst->u2_banks;
void __iomem *com = u2_banks->com;
u32 max = 0;
u32 tmp = 0;
u32 val = 0;
int ret;
ret = match_string(u2_phy_files, ARRAY_SIZE(u2_phy_files), fname);
if (ret < 0)
return ret;
switch (ret) {
case U2P_EYE_VRT:
tmp = readl(com + U3P_USBPHYACR1);
val = FIELD_GET(PA1_RG_VRT_SEL, tmp);
max = FIELD_MAX(PA1_RG_VRT_SEL);
break;
case U2P_EYE_TERM:
tmp = readl(com + U3P_USBPHYACR1);
val = FIELD_GET(PA1_RG_TERM_SEL, tmp);
max = FIELD_MAX(PA1_RG_TERM_SEL);
break;
case U2P_EFUSE_EN:
if (u2_banks->misc) {
tmp = readl(u2_banks->misc + U3P_MISC_REG1);
max = 1;
}
val = !!(tmp & MR1_EFUSE_AUTO_LOAD_DIS);
break;
case U2P_EFUSE_INTR:
tmp = readl(com + U3P_USBPHYACR1);
val = FIELD_GET(PA1_RG_INTR_CAL, tmp);
max = FIELD_MAX(PA1_RG_INTR_CAL);
break;
case U2P_DISCTH:
tmp = readl(com + U3P_USBPHYACR6);
val = FIELD_GET(PA6_RG_U2_DISCTH, tmp);
max = FIELD_MAX(PA6_RG_U2_DISCTH);
break;
case U2P_PRE_EMPHASIS:
tmp = readl(com + U3P_USBPHYACR6);
val = FIELD_GET(PA6_RG_U2_PRE_EMP, tmp);
max = FIELD_MAX(PA6_RG_U2_PRE_EMP);
break;
default:
seq_printf(sf, "invalid, %d\n", ret);
break;
}
seq_printf(sf, "%s : %d [0, %d]\n", fname, val, max);
return 0;
}
static int u2_phy_params_open(struct inode *inode, struct file *file)
{
return single_open(file, u2_phy_params_show, inode->i_private);
}
static ssize_t u2_phy_params_write(struct file *file, const char __user *ubuf,
size_t count, loff_t *ppos)
{
const char *fname = file_dentry(file)->d_iname;
struct seq_file *sf = file->private_data;
struct mtk_phy_instance *inst = sf->private;
struct u2phy_banks *u2_banks = &inst->u2_banks;
void __iomem *com = u2_banks->com;
ssize_t rc;
u32 val;
int ret;
rc = kstrtouint_from_user(ubuf, USER_BUF_LEN(count), 0, &val);
if (rc)
return rc;
ret = match_string(u2_phy_files, ARRAY_SIZE(u2_phy_files), fname);
if (ret < 0)
return (ssize_t)ret;
switch (ret) {
case U2P_EYE_VRT:
mtk_phy_update_field(com + U3P_USBPHYACR1, PA1_RG_VRT_SEL, val);
break;
case U2P_EYE_TERM:
mtk_phy_update_field(com + U3P_USBPHYACR1, PA1_RG_TERM_SEL, val);
break;
case U2P_EFUSE_EN:
if (u2_banks->misc)
mtk_phy_update_field(u2_banks->misc + U3P_MISC_REG1,
MR1_EFUSE_AUTO_LOAD_DIS, !!val);
break;
case U2P_EFUSE_INTR:
mtk_phy_update_field(com + U3P_USBPHYACR1, PA1_RG_INTR_CAL, val);
break;
case U2P_DISCTH:
mtk_phy_update_field(com + U3P_USBPHYACR6, PA6_RG_U2_DISCTH, val);
break;
case U2P_PRE_EMPHASIS:
mtk_phy_update_field(com + U3P_USBPHYACR6, PA6_RG_U2_PRE_EMP, val);
break;
default:
break;
}
return count;
}
static const struct file_operations u2_phy_fops = {
.open = u2_phy_params_open,
.write = u2_phy_params_write,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static void u2_phy_dbgfs_files_create(struct mtk_phy_instance *inst)
{
u32 count = ARRAY_SIZE(u2_phy_files);
int i;
for (i = 0; i < count; i++)
debugfs_create_file(u2_phy_files[i], 0644, inst->phy->debugfs,
inst, &u2_phy_fops);
}
static int u3_phy_params_show(struct seq_file *sf, void *unused)
{
struct mtk_phy_instance *inst = sf->private;
const char *fname = file_dentry(sf->file)->d_iname;
struct u3phy_banks *u3_banks = &inst->u3_banks;
u32 val = 0;
u32 max = 0;
u32 tmp;
int ret;
ret = match_string(u3_phy_files, ARRAY_SIZE(u3_phy_files), fname);
if (ret < 0)
return ret;
switch (ret) {
case U3P_EFUSE_EN:
tmp = readl(u3_banks->phyd + U3P_U3_PHYD_RSV);
val = !!(tmp & P3D_RG_EFUSE_AUTO_LOAD_DIS);
max = 1;
break;
case U3P_EFUSE_INTR:
tmp = readl(u3_banks->phya + U3P_U3_PHYA_REG0);
val = FIELD_GET(P3A_RG_IEXT_INTR, tmp);
max = FIELD_MAX(P3A_RG_IEXT_INTR);
break;
case U3P_EFUSE_TX_IMP:
tmp = readl(u3_banks->phyd + U3P_U3_PHYD_IMPCAL0);
val = FIELD_GET(P3D_RG_TX_IMPEL, tmp);
max = FIELD_MAX(P3D_RG_TX_IMPEL);
break;
case U3P_EFUSE_RX_IMP:
tmp = readl(u3_banks->phyd + U3P_U3_PHYD_IMPCAL1);
val = FIELD_GET(P3D_RG_RX_IMPEL, tmp);
max = FIELD_MAX(P3D_RG_RX_IMPEL);
break;
default:
seq_printf(sf, "invalid, %d\n", ret);
break;
}
seq_printf(sf, "%s : %d [0, %d]\n", fname, val, max);
return 0;
}
static int u3_phy_params_open(struct inode *inode, struct file *file)
{
return single_open(file, u3_phy_params_show, inode->i_private);
}
static ssize_t u3_phy_params_write(struct file *file, const char __user *ubuf,
size_t count, loff_t *ppos)
{
const char *fname = file_dentry(file)->d_iname;
struct seq_file *sf = file->private_data;
struct mtk_phy_instance *inst = sf->private;
struct u3phy_banks *u3_banks = &inst->u3_banks;
void __iomem *phyd = u3_banks->phyd;
ssize_t rc;
u32 val;
int ret;
rc = kstrtouint_from_user(ubuf, USER_BUF_LEN(count), 0, &val);
if (rc)
return rc;
ret = match_string(u3_phy_files, ARRAY_SIZE(u3_phy_files), fname);
if (ret < 0)
return (ssize_t)ret;
switch (ret) {
case U3P_EFUSE_EN:
mtk_phy_update_field(phyd + U3P_U3_PHYD_RSV,
P3D_RG_EFUSE_AUTO_LOAD_DIS, !!val);
break;
case U3P_EFUSE_INTR:
mtk_phy_update_field(u3_banks->phya + U3P_U3_PHYA_REG0,
P3A_RG_IEXT_INTR, val);
break;
case U3P_EFUSE_TX_IMP:
mtk_phy_update_field(phyd + U3P_U3_PHYD_IMPCAL0, P3D_RG_TX_IMPEL, val);
mtk_phy_set_bits(phyd + U3P_U3_PHYD_IMPCAL0, P3D_RG_FORCE_TX_IMPEL);
break;
case U3P_EFUSE_RX_IMP:
mtk_phy_update_field(phyd + U3P_U3_PHYD_IMPCAL1, P3D_RG_RX_IMPEL, val);
mtk_phy_set_bits(phyd + U3P_U3_PHYD_IMPCAL1, P3D_RG_FORCE_RX_IMPEL);
break;
default:
break;
}
return count;
}
static const struct file_operations u3_phy_fops = {
.open = u3_phy_params_open,
.write = u3_phy_params_write,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static void u3_phy_dbgfs_files_create(struct mtk_phy_instance *inst)
{
u32 count = ARRAY_SIZE(u3_phy_files);
int i;
for (i = 0; i < count; i++)
debugfs_create_file(u3_phy_files[i], 0644, inst->phy->debugfs,
inst, &u3_phy_fops);
}
static int phy_type_show(struct seq_file *sf, void *unused)
{
struct mtk_phy_instance *inst = sf->private;
const char *type;
switch (inst->type) {
case PHY_TYPE_USB2:
type = "USB2";
break;
case PHY_TYPE_USB3:
type = "USB3";
break;
case PHY_TYPE_PCIE:
type = "PCIe";
break;
case PHY_TYPE_SGMII:
type = "SGMII";
break;
case PHY_TYPE_SATA:
type = "SATA";
break;
default:
type = "";
}
seq_printf(sf, "%s\n", type);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(phy_type);
/* these files will be removed when phy is released by phy core */
static void phy_debugfs_init(struct mtk_phy_instance *inst)
{
debugfs_create_file("type", 0444, inst->phy->debugfs, inst, &phy_type_fops);
switch (inst->type) {
case PHY_TYPE_USB2:
u2_phy_dbgfs_files_create(inst);
break;
case PHY_TYPE_USB3:
case PHY_TYPE_PCIE:
u3_phy_dbgfs_files_create(inst);
break;
default:
break;
}
}
#else
static void phy_debugfs_init(struct mtk_phy_instance *inst)
{}
#endif
static void hs_slew_rate_calibrate(struct mtk_tphy *tphy,
struct mtk_phy_instance *instance)
{
struct u2phy_banks *u2_banks = &instance->u2_banks;
void __iomem *fmreg = u2_banks->fmreg;
void __iomem *com = u2_banks->com;
int calibration_val;
int fm_out;
u32 tmp;
/* HW V3 doesn't support slew rate cal anymore */
if (tphy->pdata->version == MTK_PHY_V3)
return;
/* use force value */
if (instance->eye_src)
return;
/* enable USB ring oscillator */
mtk_phy_set_bits(com + U3P_USBPHYACR5, PA5_RG_U2_HSTX_SRCAL_EN);
udelay(1);
/*enable free run clock */
mtk_phy_set_bits(fmreg + U3P_U2FREQ_FMMONR1, P2F_RG_FRCK_EN);
/* set cycle count as 1024, and select u2 channel */
tmp = readl(fmreg + U3P_U2FREQ_FMCR0);
tmp &= ~(P2F_RG_CYCLECNT | P2F_RG_MONCLK_SEL);
tmp |= FIELD_PREP(P2F_RG_CYCLECNT, U3P_FM_DET_CYCLE_CNT);
if (tphy->pdata->version == MTK_PHY_V1)
tmp |= FIELD_PREP(P2F_RG_MONCLK_SEL, instance->index >> 1);
writel(tmp, fmreg + U3P_U2FREQ_FMCR0);
/* enable frequency meter */
mtk_phy_set_bits(fmreg + U3P_U2FREQ_FMCR0, P2F_RG_FREQDET_EN);
/* ignore return value */
readl_poll_timeout(fmreg + U3P_U2FREQ_FMMONR1, tmp,
(tmp & P2F_USB_FM_VALID), 10, 200);
fm_out = readl(fmreg + U3P_U2FREQ_VALUE);
/* disable frequency meter */
mtk_phy_clear_bits(fmreg + U3P_U2FREQ_FMCR0, P2F_RG_FREQDET_EN);
/*disable free run clock */
mtk_phy_clear_bits(fmreg + U3P_U2FREQ_FMMONR1, P2F_RG_FRCK_EN);
if (fm_out) {
/* ( 1024 / FM_OUT ) x reference clock frequency x coef */
tmp = tphy->src_ref_clk * tphy->src_coef;
tmp = (tmp * U3P_FM_DET_CYCLE_CNT) / fm_out;
calibration_val = DIV_ROUND_CLOSEST(tmp, U3P_SR_COEF_DIVISOR);
} else {
/* if FM detection fail, set default value */
calibration_val = 4;
}
dev_dbg(tphy->dev, "phy:%d, fm_out:%d, calib:%d (clk:%d, coef:%d)\n",
instance->index, fm_out, calibration_val,
tphy->src_ref_clk, tphy->src_coef);
/* set HS slew rate */
mtk_phy_update_field(com + U3P_USBPHYACR5, PA5_RG_U2_HSTX_SRCTRL,
calibration_val);
/* disable USB ring oscillator */
mtk_phy_clear_bits(com + U3P_USBPHYACR5, PA5_RG_U2_HSTX_SRCAL_EN);
}
static void u3_phy_instance_init(struct mtk_tphy *tphy,
struct mtk_phy_instance *instance)
{
struct u3phy_banks *u3_banks = &instance->u3_banks;
void __iomem *phya = u3_banks->phya;
void __iomem *phyd = u3_banks->phyd;
/* gating PCIe Analog XTAL clock */
mtk_phy_set_bits(u3_banks->spllc + U3P_SPLLC_XTALCTL3,
XC3_RG_U3_XTAL_RX_PWD | XC3_RG_U3_FRC_XTAL_RX_PWD);
/* gating XSQ */
mtk_phy_update_field(phya + U3P_U3_PHYA_DA_REG0, P3A_RG_XTAL_EXT_EN_U3, 2);
mtk_phy_update_field(phya + U3P_U3_PHYA_REG9, P3A_RG_RX_DAC_MUX, 4);
mtk_phy_update_field(phya + U3P_U3_PHYA_REG6, P3A_RG_TX_EIDLE_CM, 0xe);
mtk_phy_update_bits(u3_banks->phyd + U3P_U3_PHYD_CDR1,
P3D_RG_CDR_BIR_LTD0 | P3D_RG_CDR_BIR_LTD1,
FIELD_PREP(P3D_RG_CDR_BIR_LTD0, 0xc) |
FIELD_PREP(P3D_RG_CDR_BIR_LTD1, 0x3));
mtk_phy_update_field(phyd + U3P_U3_PHYD_LFPS1, P3D_RG_FWAKE_TH, 0x34);
mtk_phy_update_field(phyd + U3P_U3_PHYD_RXDET1, P3D_RG_RXDET_STB2_SET, 0x10);
mtk_phy_update_field(phyd + U3P_U3_PHYD_RXDET2, P3D_RG_RXDET_STB2_SET_P3, 0x10);
dev_dbg(tphy->dev, "%s(%d)\n", __func__, instance->index);
}
static void u2_phy_pll_26m_set(struct mtk_tphy *tphy,
struct mtk_phy_instance *instance)
{
struct u2phy_banks *u2_banks = &instance->u2_banks;
void __iomem *com = u2_banks->com;
if (!tphy->pdata->sw_pll_48m_to_26m)
return;
mtk_phy_update_field(com + U3P_USBPHYACR0, PA0_USB20_PLL_PREDIV, 0);
mtk_phy_update_field(com + U3P_USBPHYACR2, PA2_RG_U2PLL_BW, 3);
writel(P2R_RG_U2PLL_FBDIV_26M, com + U3P_U2PHYA_RESV);
mtk_phy_set_bits(com + U3P_U2PHYA_RESV1,
P2R_RG_U2PLL_FRA_EN | P2R_RG_U2PLL_REFCLK_SEL);
}
static void u2_phy_instance_init(struct mtk_tphy *tphy,
struct mtk_phy_instance *instance)
{
struct u2phy_banks *u2_banks = &instance->u2_banks;
void __iomem *com = u2_banks->com;
u32 index = instance->index;
/* switch to USB function, and enable usb pll */
mtk_phy_clear_bits(com + U3P_U2PHYDTM0, P2C_FORCE_UART_EN | P2C_FORCE_SUSPENDM);
mtk_phy_clear_bits(com + U3P_U2PHYDTM0,
P2C_RG_XCVRSEL | P2C_RG_DATAIN | P2C_DTM0_PART_MASK);
mtk_phy_clear_bits(com + U3P_U2PHYDTM1, P2C_RG_UART_EN);
mtk_phy_set_bits(com + U3P_USBPHYACR0, PA0_RG_USB20_INTR_EN);
/* disable switch 100uA current to SSUSB */
mtk_phy_clear_bits(com + U3P_USBPHYACR5, PA5_RG_U2_HS_100U_U3_EN);
mtk_phy_clear_bits(com + U3P_U2PHYACR4, P2C_U2_GPIO_CTR_MSK);
if (tphy->pdata->avoid_rx_sen_degradation) {
if (!index) {
mtk_phy_set_bits(com + U3P_USBPHYACR2, PA2_RG_SIF_U2PLL_FORCE_EN);
mtk_phy_clear_bits(com + U3D_U2PHYDCR0, P2C_RG_SIF_U2PLL_FORCE_ON);
} else {
mtk_phy_set_bits(com + U3D_U2PHYDCR0, P2C_RG_SIF_U2PLL_FORCE_ON);
mtk_phy_set_bits(com + U3P_U2PHYDTM0,
P2C_RG_SUSPENDM | P2C_FORCE_SUSPENDM);
}
}
/* DP/DM BC1.1 path Disable */
mtk_phy_clear_bits(com + U3P_USBPHYACR6, PA6_RG_U2_BC11_SW_EN);
mtk_phy_update_field(com + U3P_USBPHYACR6, PA6_RG_U2_SQTH, 2);
/* Workaround only for mt8195, HW fix it for others (V3) */
u2_phy_pll_26m_set(tphy, instance);
dev_dbg(tphy->dev, "%s(%d)\n", __func__, index);
}
static void u2_phy_instance_power_on(struct mtk_tphy *tphy,
struct mtk_phy_instance *instance)
{
struct u2phy_banks *u2_banks = &instance->u2_banks;
void __iomem *com = u2_banks->com;
u32 index = instance->index;
/* OTG Enable */
mtk_phy_set_bits(com + U3P_USBPHYACR6, PA6_RG_U2_OTG_VBUSCMP_EN);
mtk_phy_set_bits(com + U3P_U2PHYDTM1, P2C_RG_VBUSVALID | P2C_RG_AVALID);
mtk_phy_clear_bits(com + U3P_U2PHYDTM1, P2C_RG_SESSEND);
if (tphy->pdata->avoid_rx_sen_degradation && index) {
mtk_phy_set_bits(com + U3D_U2PHYDCR0, P2C_RG_SIF_U2PLL_FORCE_ON);
mtk_phy_set_bits(com + U3P_U2PHYDTM0, P2C_RG_SUSPENDM | P2C_FORCE_SUSPENDM);
}
dev_dbg(tphy->dev, "%s(%d)\n", __func__, index);
}
static void u2_phy_instance_power_off(struct mtk_tphy *tphy,
struct mtk_phy_instance *instance)
{
struct u2phy_banks *u2_banks = &instance->u2_banks;
void __iomem *com = u2_banks->com;
u32 index = instance->index;
/* OTG Disable */
mtk_phy_clear_bits(com + U3P_USBPHYACR6, PA6_RG_U2_OTG_VBUSCMP_EN);
mtk_phy_clear_bits(com + U3P_U2PHYDTM1, P2C_RG_VBUSVALID | P2C_RG_AVALID);
mtk_phy_set_bits(com + U3P_U2PHYDTM1, P2C_RG_SESSEND);
if (tphy->pdata->avoid_rx_sen_degradation && index) {
mtk_phy_clear_bits(com + U3P_U2PHYDTM0, P2C_RG_SUSPENDM | P2C_FORCE_SUSPENDM);
mtk_phy_clear_bits(com + U3D_U2PHYDCR0, P2C_RG_SIF_U2PLL_FORCE_ON);
}
dev_dbg(tphy->dev, "%s(%d)\n", __func__, index);
}
static void u2_phy_instance_exit(struct mtk_tphy *tphy,
struct mtk_phy_instance *instance)
{
struct u2phy_banks *u2_banks = &instance->u2_banks;
void __iomem *com = u2_banks->com;
u32 index = instance->index;
if (tphy->pdata->avoid_rx_sen_degradation && index) {
mtk_phy_clear_bits(com + U3D_U2PHYDCR0, P2C_RG_SIF_U2PLL_FORCE_ON);
mtk_phy_clear_bits(com + U3P_U2PHYDTM0, P2C_FORCE_SUSPENDM);
}
}
static void u2_phy_instance_set_mode(struct mtk_tphy *tphy,
struct mtk_phy_instance *instance,
enum phy_mode mode)
{
struct u2phy_banks *u2_banks = &instance->u2_banks;
u32 tmp;
tmp = readl(u2_banks->com + U3P_U2PHYDTM1);
switch (mode) {
case PHY_MODE_USB_DEVICE:
tmp |= P2C_FORCE_IDDIG | P2C_RG_IDDIG;
break;
case PHY_MODE_USB_HOST:
tmp |= P2C_FORCE_IDDIG;
tmp &= ~P2C_RG_IDDIG;
break;
case PHY_MODE_USB_OTG:
tmp &= ~(P2C_FORCE_IDDIG | P2C_RG_IDDIG);
break;
default:
return;
}
writel(tmp, u2_banks->com + U3P_U2PHYDTM1);
}
static void pcie_phy_instance_init(struct mtk_tphy *tphy,
struct mtk_phy_instance *instance)
{
struct u3phy_banks *u3_banks = &instance->u3_banks;
void __iomem *phya = u3_banks->phya;
if (tphy->pdata->version != MTK_PHY_V1)
return;
mtk_phy_update_bits(phya + U3P_U3_PHYA_DA_REG0,
P3A_RG_XTAL_EXT_PE1H | P3A_RG_XTAL_EXT_PE2H,
FIELD_PREP(P3A_RG_XTAL_EXT_PE1H, 0x2) |
FIELD_PREP(P3A_RG_XTAL_EXT_PE2H, 0x2));
/* ref clk drive */
mtk_phy_update_field(phya + U3P_U3_PHYA_REG1, P3A_RG_CLKDRV_AMP, 0x4);
mtk_phy_update_field(phya + U3P_U3_PHYA_REG0, P3A_RG_CLKDRV_OFF, 0x1);
/* SSC delta -5000ppm */
mtk_phy_update_field(phya + U3P_U3_PHYA_DA_REG20, P3A_RG_PLL_DELTA1_PE2H, 0x3c);
mtk_phy_update_field(phya + U3P_U3_PHYA_DA_REG25, P3A_RG_PLL_DELTA_PE2H, 0x36);
/* change pll BW 0.6M */
mtk_phy_update_bits(phya + U3P_U3_PHYA_DA_REG5,
P3A_RG_PLL_BR_PE2H | P3A_RG_PLL_IC_PE2H,
FIELD_PREP(P3A_RG_PLL_BR_PE2H, 0x1) |
FIELD_PREP(P3A_RG_PLL_IC_PE2H, 0x1));
mtk_phy_update_bits(phya + U3P_U3_PHYA_DA_REG4,
P3A_RG_PLL_DIVEN_PE2H | P3A_RG_PLL_BC_PE2H,
FIELD_PREP(P3A_RG_PLL_BC_PE2H, 0x3));
mtk_phy_update_field(phya + U3P_U3_PHYA_DA_REG6, P3A_RG_PLL_IR_PE2H, 0x2);
mtk_phy_update_field(phya + U3P_U3_PHYA_DA_REG7, P3A_RG_PLL_BP_PE2H, 0xa);
/* Tx Detect Rx Timing: 10us -> 5us */
mtk_phy_update_field(u3_banks->phyd + U3P_U3_PHYD_RXDET1,
P3D_RG_RXDET_STB2_SET, 0x10);
mtk_phy_update_field(u3_banks->phyd + U3P_U3_PHYD_RXDET2,
P3D_RG_RXDET_STB2_SET_P3, 0x10);
/* wait for PCIe subsys register to active */
usleep_range(2500, 3000);
dev_dbg(tphy->dev, "%s(%d)\n", __func__, instance->index);
}
static void pcie_phy_instance_power_on(struct mtk_tphy *tphy,
struct mtk_phy_instance *instance)
{
struct u3phy_banks *bank = &instance->u3_banks;
mtk_phy_clear_bits(bank->chip + U3P_U3_CHIP_GPIO_CTLD,
P3C_FORCE_IP_SW_RST | P3C_REG_IP_SW_RST);
mtk_phy_clear_bits(bank->chip + U3P_U3_CHIP_GPIO_CTLE,
P3C_RG_SWRST_U3_PHYD_FORCE_EN | P3C_RG_SWRST_U3_PHYD);
}
static void pcie_phy_instance_power_off(struct mtk_tphy *tphy,
struct mtk_phy_instance *instance)
{
struct u3phy_banks *bank = &instance->u3_banks;
mtk_phy_set_bits(bank->chip + U3P_U3_CHIP_GPIO_CTLD,
P3C_FORCE_IP_SW_RST | P3C_REG_IP_SW_RST);
mtk_phy_set_bits(bank->chip + U3P_U3_CHIP_GPIO_CTLE,
P3C_RG_SWRST_U3_PHYD_FORCE_EN | P3C_RG_SWRST_U3_PHYD);
}
static void sata_phy_instance_init(struct mtk_tphy *tphy,
struct mtk_phy_instance *instance)
{
struct u3phy_banks *u3_banks = &instance->u3_banks;
void __iomem *phyd = u3_banks->phyd;
/* charge current adjustment */
mtk_phy_update_bits(phyd + ANA_RG_CTRL_SIGNAL6,
RG_CDR_BIRLTR_GEN1_MSK | RG_CDR_BC_GEN1_MSK,
FIELD_PREP(RG_CDR_BIRLTR_GEN1_MSK, 0x6) |
FIELD_PREP(RG_CDR_BC_GEN1_MSK, 0x1a));
mtk_phy_update_field(phyd + ANA_EQ_EYE_CTRL_SIGNAL4, RG_CDR_BIRLTD0_GEN1_MSK, 0x18);
mtk_phy_update_field(phyd + ANA_EQ_EYE_CTRL_SIGNAL5, RG_CDR_BIRLTD0_GEN3_MSK, 0x06);
mtk_phy_update_bits(phyd + ANA_RG_CTRL_SIGNAL4,
RG_CDR_BICLTR_GEN1_MSK | RG_CDR_BR_GEN2_MSK,
FIELD_PREP(RG_CDR_BICLTR_GEN1_MSK, 0x0c) |
FIELD_PREP(RG_CDR_BR_GEN2_MSK, 0x07));
mtk_phy_update_bits(phyd + PHYD_CTRL_SIGNAL_MODE4,
RG_CDR_BICLTD0_GEN1_MSK | RG_CDR_BICLTD1_GEN1_MSK,
FIELD_PREP(RG_CDR_BICLTD0_GEN1_MSK, 0x08) |
FIELD_PREP(RG_CDR_BICLTD1_GEN1_MSK, 0x02));
mtk_phy_update_field(phyd + PHYD_DESIGN_OPTION2, RG_LOCK_CNT_SEL_MSK, 0x02);
mtk_phy_update_bits(phyd + PHYD_DESIGN_OPTION9,
RG_T2_MIN_MSK | RG_TG_MIN_MSK,
FIELD_PREP(RG_T2_MIN_MSK, 0x12) |
FIELD_PREP(RG_TG_MIN_MSK, 0x04));
mtk_phy_update_bits(phyd + PHYD_DESIGN_OPTION9,
RG_T2_MAX_MSK | RG_TG_MAX_MSK,
FIELD_PREP(RG_T2_MAX_MSK, 0x31) |
FIELD_PREP(RG_TG_MAX_MSK, 0x0e));
mtk_phy_update_field(phyd + ANA_RG_CTRL_SIGNAL1, RG_IDRV_0DB_GEN1_MSK, 0x20);
mtk_phy_update_field(phyd + ANA_EQ_EYE_CTRL_SIGNAL1, RG_EQ_DLEQ_LFI_GEN1_MSK, 0x03);
dev_dbg(tphy->dev, "%s(%d)\n", __func__, instance->index);
}
static void phy_v1_banks_init(struct mtk_tphy *tphy,
struct mtk_phy_instance *instance)
{
struct u2phy_banks *u2_banks = &instance->u2_banks;
struct u3phy_banks *u3_banks = &instance->u3_banks;
switch (instance->type) {
case PHY_TYPE_USB2:
u2_banks->misc = NULL;
u2_banks->fmreg = tphy->sif_base + SSUSB_SIFSLV_V1_U2FREQ;
u2_banks->com = instance->port_base + SSUSB_SIFSLV_V1_U2PHY_COM;
break;
case PHY_TYPE_USB3:
case PHY_TYPE_PCIE:
u3_banks->spllc = tphy->sif_base + SSUSB_SIFSLV_V1_SPLLC;
u3_banks->chip = tphy->sif_base + SSUSB_SIFSLV_V1_CHIP;
u3_banks->phyd = instance->port_base + SSUSB_SIFSLV_V1_U3PHYD;
u3_banks->phya = instance->port_base + SSUSB_SIFSLV_V1_U3PHYA;
break;
case PHY_TYPE_SATA:
u3_banks->phyd = instance->port_base + SSUSB_SIFSLV_V1_U3PHYD;
break;
default:
dev_err(tphy->dev, "incompatible PHY type\n");
return;
}
}
static void phy_v2_banks_init(struct mtk_tphy *tphy,
struct mtk_phy_instance *instance)
{
struct u2phy_banks *u2_banks = &instance->u2_banks;
struct u3phy_banks *u3_banks = &instance->u3_banks;
switch (instance->type) {
case PHY_TYPE_USB2:
u2_banks->misc = instance->port_base + SSUSB_SIFSLV_V2_MISC;
u2_banks->fmreg = instance->port_base + SSUSB_SIFSLV_V2_U2FREQ;
u2_banks->com = instance->port_base + SSUSB_SIFSLV_V2_U2PHY_COM;
break;
case PHY_TYPE_USB3:
case PHY_TYPE_PCIE:
u3_banks->spllc = instance->port_base + SSUSB_SIFSLV_V2_SPLLC;
u3_banks->chip = instance->port_base + SSUSB_SIFSLV_V2_CHIP;
u3_banks->phyd = instance->port_base + SSUSB_SIFSLV_V2_U3PHYD;
u3_banks->phya = instance->port_base + SSUSB_SIFSLV_V2_U3PHYA;
break;
default:
dev_err(tphy->dev, "incompatible PHY type\n");
return;
}
}
static void phy_parse_property(struct mtk_tphy *tphy,
struct mtk_phy_instance *instance)
{
struct device *dev = &instance->phy->dev;
if (instance->type != PHY_TYPE_USB2)
return;
instance->bc12_en = device_property_read_bool(dev, "mediatek,bc12");
device_property_read_u32(dev, "mediatek,eye-src",
&instance->eye_src);
device_property_read_u32(dev, "mediatek,eye-vrt",
&instance->eye_vrt);
device_property_read_u32(dev, "mediatek,eye-term",
&instance->eye_term);
device_property_read_u32(dev, "mediatek,intr",
&instance->intr);
device_property_read_u32(dev, "mediatek,discth",
&instance->discth);
device_property_read_u32(dev, "mediatek,pre-emphasis",
&instance->pre_emphasis);
dev_dbg(dev, "bc12:%d, src:%d, vrt:%d, term:%d, intr:%d, disc:%d\n",
instance->bc12_en, instance->eye_src,
instance->eye_vrt, instance->eye_term,
instance->intr, instance->discth);
dev_dbg(dev, "pre-emp:%d\n", instance->pre_emphasis);
}
static void u2_phy_props_set(struct mtk_tphy *tphy,
struct mtk_phy_instance *instance)
{
struct u2phy_banks *u2_banks = &instance->u2_banks;
void __iomem *com = u2_banks->com;
if (instance->bc12_en) /* BC1.2 path Enable */
mtk_phy_set_bits(com + U3P_U2PHYBC12C, P2C_RG_CHGDT_EN);
if (tphy->pdata->version < MTK_PHY_V3 && instance->eye_src)
mtk_phy_update_field(com + U3P_USBPHYACR5, PA5_RG_U2_HSTX_SRCTRL,
instance->eye_src);
if (instance->eye_vrt)
mtk_phy_update_field(com + U3P_USBPHYACR1, PA1_RG_VRT_SEL,
instance->eye_vrt);
if (instance->eye_term)
mtk_phy_update_field(com + U3P_USBPHYACR1, PA1_RG_TERM_SEL,
instance->eye_term);
if (instance->intr) {
if (u2_banks->misc)
mtk_phy_set_bits(u2_banks->misc + U3P_MISC_REG1,
MR1_EFUSE_AUTO_LOAD_DIS);
mtk_phy_update_field(com + U3P_USBPHYACR1, PA1_RG_INTR_CAL,
instance->intr);
}
if (instance->discth)
mtk_phy_update_field(com + U3P_USBPHYACR6, PA6_RG_U2_DISCTH,
instance->discth);
if (instance->pre_emphasis)
mtk_phy_update_field(com + U3P_USBPHYACR6, PA6_RG_U2_PRE_EMP,
instance->pre_emphasis);
}
/* type switch for usb3/pcie/sgmii/sata */
static int phy_type_syscon_get(struct mtk_phy_instance *instance,
struct device_node *dn)
{
struct of_phandle_args args;
int ret;
/* type switch function is optional */
if (!of_property_read_bool(dn, "mediatek,syscon-type"))
return 0;
ret = of_parse_phandle_with_fixed_args(dn, "mediatek,syscon-type",
2, 0, &args);
if (ret)
return ret;
instance->type_sw_reg = args.args[0];
instance->type_sw_index = args.args[1] & 0x3; /* <=3 */
instance->type_sw = syscon_node_to_regmap(args.np);
of_node_put(args.np);
dev_info(&instance->phy->dev, "type_sw - reg %#x, index %d\n",
instance->type_sw_reg, instance->type_sw_index);
return PTR_ERR_OR_ZERO(instance->type_sw);
}
static int phy_type_set(struct mtk_phy_instance *instance)
{
int type;
u32 offset;
if (!instance->type_sw)
return 0;
switch (instance->type) {
case PHY_TYPE_USB3:
type = RG_PHY_SW_USB3;
break;
case PHY_TYPE_PCIE:
type = RG_PHY_SW_PCIE;
break;
case PHY_TYPE_SGMII:
type = RG_PHY_SW_SGMII;
break;
case PHY_TYPE_SATA:
type = RG_PHY_SW_SATA;
break;
case PHY_TYPE_USB2:
default:
return 0;
}
offset = instance->type_sw_index * BITS_PER_BYTE;
regmap_update_bits(instance->type_sw, instance->type_sw_reg,
RG_PHY_SW_TYPE << offset, type << offset);
return 0;
}
static int phy_efuse_get(struct mtk_tphy *tphy, struct mtk_phy_instance *instance)
{
struct device *dev = &instance->phy->dev;
int ret = 0;
/* tphy v1 doesn't support sw efuse, skip it */
if (!tphy->pdata->sw_efuse_supported) {
instance->efuse_sw_en = 0;
return 0;
}
/* software efuse is optional */
instance->efuse_sw_en = device_property_read_bool(dev, "nvmem-cells");
if (!instance->efuse_sw_en)
return 0;
switch (instance->type) {
case PHY_TYPE_USB2:
ret = nvmem_cell_read_variable_le_u32(dev, "intr", &instance->efuse_intr);
if (ret) {
dev_err(dev, "fail to get u2 intr efuse, %d\n", ret);
break;
}
/* no efuse, ignore it */
if (!instance->efuse_intr) {
dev_warn(dev, "no u2 intr efuse, but dts enable it\n");
instance->efuse_sw_en = 0;
break;
}
dev_dbg(dev, "u2 efuse - intr %x\n", instance->efuse_intr);
break;
case PHY_TYPE_USB3:
case PHY_TYPE_PCIE:
ret = nvmem_cell_read_variable_le_u32(dev, "intr", &instance->efuse_intr);
if (ret) {
dev_err(dev, "fail to get u3 intr efuse, %d\n", ret);
break;
}
ret = nvmem_cell_read_variable_le_u32(dev, "rx_imp", &instance->efuse_rx_imp);
if (ret) {
dev_err(dev, "fail to get u3 rx_imp efuse, %d\n", ret);
break;
}
ret = nvmem_cell_read_variable_le_u32(dev, "tx_imp", &instance->efuse_tx_imp);
if (ret) {
dev_err(dev, "fail to get u3 tx_imp efuse, %d\n", ret);
break;
}
/* no efuse, ignore it */
if (!instance->efuse_intr &&
!instance->efuse_rx_imp &&
!instance->efuse_tx_imp) {
dev_warn(dev, "no u3 intr efuse, but dts enable it\n");
instance->efuse_sw_en = 0;
break;
}
dev_dbg(dev, "u3 efuse - intr %x, rx_imp %x, tx_imp %x\n",
instance->efuse_intr, instance->efuse_rx_imp,instance->efuse_tx_imp);
break;
default:
dev_err(dev, "no sw efuse for type %d\n", instance->type);
ret = -EINVAL;
}
return ret;
}
static void phy_efuse_set(struct mtk_phy_instance *instance)
{
struct device *dev = &instance->phy->dev;
struct u2phy_banks *u2_banks = &instance->u2_banks;
struct u3phy_banks *u3_banks = &instance->u3_banks;
if (!instance->efuse_sw_en)
return;
switch (instance->type) {
case PHY_TYPE_USB2:
mtk_phy_set_bits(u2_banks->misc + U3P_MISC_REG1, MR1_EFUSE_AUTO_LOAD_DIS);
mtk_phy_update_field(u2_banks->com + U3P_USBPHYACR1, PA1_RG_INTR_CAL,
instance->efuse_intr);
break;
case PHY_TYPE_USB3:
case PHY_TYPE_PCIE:
mtk_phy_set_bits(u3_banks->phyd + U3P_U3_PHYD_RSV, P3D_RG_EFUSE_AUTO_LOAD_DIS);
mtk_phy_update_field(u3_banks->phyd + U3P_U3_PHYD_IMPCAL0, P3D_RG_TX_IMPEL,
instance->efuse_tx_imp);
mtk_phy_set_bits(u3_banks->phyd + U3P_U3_PHYD_IMPCAL0, P3D_RG_FORCE_TX_IMPEL);
mtk_phy_update_field(u3_banks->phyd + U3P_U3_PHYD_IMPCAL1, P3D_RG_RX_IMPEL,
instance->efuse_rx_imp);
mtk_phy_set_bits(u3_banks->phyd + U3P_U3_PHYD_IMPCAL1, P3D_RG_FORCE_RX_IMPEL);
mtk_phy_update_field(u3_banks->phya + U3P_U3_PHYA_REG0, P3A_RG_IEXT_INTR,
instance->efuse_intr);
break;
default:
dev_warn(dev, "no sw efuse for type %d\n", instance->type);
break;
}
}
static int mtk_phy_init(struct phy *phy)
{
struct mtk_phy_instance *instance = phy_get_drvdata(phy);
struct mtk_tphy *tphy = dev_get_drvdata(phy->dev.parent);
int ret;
ret = clk_bulk_prepare_enable(TPHY_CLKS_CNT, instance->clks);
if (ret)
return ret;
phy_efuse_set(instance);
switch (instance->type) {
case PHY_TYPE_USB2:
u2_phy_instance_init(tphy, instance);
u2_phy_props_set(tphy, instance);
break;
case PHY_TYPE_USB3:
u3_phy_instance_init(tphy, instance);
break;
case PHY_TYPE_PCIE:
pcie_phy_instance_init(tphy, instance);
break;
case PHY_TYPE_SATA:
sata_phy_instance_init(tphy, instance);
break;
case PHY_TYPE_SGMII:
/* nothing to do, only used to set type */
break;
default:
dev_err(tphy->dev, "incompatible PHY type\n");
clk_bulk_disable_unprepare(TPHY_CLKS_CNT, instance->clks);
return -EINVAL;
}
return 0;
}
static int mtk_phy_power_on(struct phy *phy)
{
struct mtk_phy_instance *instance = phy_get_drvdata(phy);
struct mtk_tphy *tphy = dev_get_drvdata(phy->dev.parent);
if (instance->type == PHY_TYPE_USB2) {
u2_phy_instance_power_on(tphy, instance);
hs_slew_rate_calibrate(tphy, instance);
} else if (instance->type == PHY_TYPE_PCIE) {
pcie_phy_instance_power_on(tphy, instance);
}
return 0;
}
static int mtk_phy_power_off(struct phy *phy)
{
struct mtk_phy_instance *instance = phy_get_drvdata(phy);
struct mtk_tphy *tphy = dev_get_drvdata(phy->dev.parent);
if (instance->type == PHY_TYPE_USB2)
u2_phy_instance_power_off(tphy, instance);
else if (instance->type == PHY_TYPE_PCIE)
pcie_phy_instance_power_off(tphy, instance);
return 0;
}
static int mtk_phy_exit(struct phy *phy)
{
struct mtk_phy_instance *instance = phy_get_drvdata(phy);
struct mtk_tphy *tphy = dev_get_drvdata(phy->dev.parent);
if (instance->type == PHY_TYPE_USB2)
u2_phy_instance_exit(tphy, instance);
clk_bulk_disable_unprepare(TPHY_CLKS_CNT, instance->clks);
return 0;
}
static int mtk_phy_set_mode(struct phy *phy, enum phy_mode mode, int submode)
{
struct mtk_phy_instance *instance = phy_get_drvdata(phy);
struct mtk_tphy *tphy = dev_get_drvdata(phy->dev.parent);
if (instance->type == PHY_TYPE_USB2)
u2_phy_instance_set_mode(tphy, instance, mode);
return 0;
}
static struct phy *mtk_phy_xlate(struct device *dev,
struct of_phandle_args *args)
{
struct mtk_tphy *tphy = dev_get_drvdata(dev);
struct mtk_phy_instance *instance = NULL;
struct device_node *phy_np = args->np;
int index;
int ret;
if (args->args_count != 1) {
dev_err(dev, "invalid number of cells in 'phy' property\n");
return ERR_PTR(-EINVAL);
}
for (index = 0; index < tphy->nphys; index++)
if (phy_np == tphy->phys[index]->phy->dev.of_node) {
instance = tphy->phys[index];
break;
}
if (!instance) {
dev_err(dev, "failed to find appropriate phy\n");
return ERR_PTR(-EINVAL);
}
instance->type = args->args[0];
if (!(instance->type == PHY_TYPE_USB2 ||
instance->type == PHY_TYPE_USB3 ||
instance->type == PHY_TYPE_PCIE ||
instance->type == PHY_TYPE_SATA ||
instance->type == PHY_TYPE_SGMII)) {
dev_err(dev, "unsupported device type: %d\n", instance->type);
return ERR_PTR(-EINVAL);
}
switch (tphy->pdata->version) {
case MTK_PHY_V1:
phy_v1_banks_init(tphy, instance);
break;
case MTK_PHY_V2:
case MTK_PHY_V3:
phy_v2_banks_init(tphy, instance);
break;
default:
dev_err(dev, "phy version is not supported\n");
return ERR_PTR(-EINVAL);
}
ret = phy_efuse_get(tphy, instance);
if (ret)
return ERR_PTR(ret);
phy_parse_property(tphy, instance);
phy_type_set(instance);
phy_debugfs_init(instance);
return instance->phy;
}
static const struct phy_ops mtk_tphy_ops = {
.init = mtk_phy_init,
.exit = mtk_phy_exit,
.power_on = mtk_phy_power_on,
.power_off = mtk_phy_power_off,
.set_mode = mtk_phy_set_mode,
.owner = THIS_MODULE,
};
static const struct mtk_phy_pdata tphy_v1_pdata = {
.avoid_rx_sen_degradation = false,
.version = MTK_PHY_V1,
};
static const struct mtk_phy_pdata tphy_v2_pdata = {
.avoid_rx_sen_degradation = false,
.sw_efuse_supported = true,
.version = MTK_PHY_V2,
};
static const struct mtk_phy_pdata tphy_v3_pdata = {
.sw_efuse_supported = true,
.version = MTK_PHY_V3,
};
static const struct mtk_phy_pdata mt8173_pdata = {
.avoid_rx_sen_degradation = true,
.version = MTK_PHY_V1,
};
static const struct mtk_phy_pdata mt8195_pdata = {
.sw_pll_48m_to_26m = true,
.sw_efuse_supported = true,
.version = MTK_PHY_V3,
};
static const struct of_device_id mtk_tphy_id_table[] = {
{ .compatible = "mediatek,mt2701-u3phy", .data = &tphy_v1_pdata },
{ .compatible = "mediatek,mt2712-u3phy", .data = &tphy_v2_pdata },
{ .compatible = "mediatek,mt8173-u3phy", .data = &mt8173_pdata },
{ .compatible = "mediatek,mt8195-tphy", .data = &mt8195_pdata },
{ .compatible = "mediatek,generic-tphy-v1", .data = &tphy_v1_pdata },
{ .compatible = "mediatek,generic-tphy-v2", .data = &tphy_v2_pdata },
{ .compatible = "mediatek,generic-tphy-v3", .data = &tphy_v3_pdata },
{ },
};
MODULE_DEVICE_TABLE(of, mtk_tphy_id_table);
static int mtk_tphy_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct device_node *child_np;
struct phy_provider *provider;
struct resource *sif_res;
struct mtk_tphy *tphy;
struct resource res;
int port, retval;
tphy = devm_kzalloc(dev, sizeof(*tphy), GFP_KERNEL);
if (!tphy)
return -ENOMEM;
tphy->pdata = of_device_get_match_data(dev);
if (!tphy->pdata)
return -EINVAL;
tphy->nphys = of_get_child_count(np);
tphy->phys = devm_kcalloc(dev, tphy->nphys,
sizeof(*tphy->phys), GFP_KERNEL);
if (!tphy->phys)
return -ENOMEM;
tphy->dev = dev;
platform_set_drvdata(pdev, tphy);
sif_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
/* SATA phy of V1 needn't it if not shared with PCIe or USB */
if (sif_res && tphy->pdata->version == MTK_PHY_V1) {
/* get banks shared by multiple phys */
tphy->sif_base = devm_ioremap_resource(dev, sif_res);
if (IS_ERR(tphy->sif_base)) {
dev_err(dev, "failed to remap sif regs\n");
return PTR_ERR(tphy->sif_base);
}
}
if (tphy->pdata->version < MTK_PHY_V3) {
tphy->src_ref_clk = U3P_REF_CLK;
tphy->src_coef = U3P_SLEW_RATE_COEF;
/* update parameters of slew rate calibrate if exist */
device_property_read_u32(dev, "mediatek,src-ref-clk-mhz",
&tphy->src_ref_clk);
device_property_read_u32(dev, "mediatek,src-coef",
&tphy->src_coef);
}
port = 0;
for_each_child_of_node(np, child_np) {
struct mtk_phy_instance *instance;
struct clk_bulk_data *clks;
struct device *subdev;
struct phy *phy;
instance = devm_kzalloc(dev, sizeof(*instance), GFP_KERNEL);
if (!instance) {
retval = -ENOMEM;
goto put_child;
}
tphy->phys[port] = instance;
phy = devm_phy_create(dev, child_np, &mtk_tphy_ops);
if (IS_ERR(phy)) {
dev_err(dev, "failed to create phy\n");
retval = PTR_ERR(phy);
goto put_child;
}
subdev = &phy->dev;
retval = of_address_to_resource(child_np, 0, &res);
if (retval) {
dev_err(subdev, "failed to get address resource(id-%d)\n",
port);
goto put_child;
}
instance->port_base = devm_ioremap_resource(subdev, &res);
if (IS_ERR(instance->port_base)) {
retval = PTR_ERR(instance->port_base);
goto put_child;
}
instance->phy = phy;
instance->index = port;
phy_set_drvdata(phy, instance);
port++;
clks = instance->clks;
clks[0].id = "ref"; /* digital (& analog) clock */
clks[1].id = "da_ref"; /* analog clock */
retval = devm_clk_bulk_get_optional(subdev, TPHY_CLKS_CNT, clks);
if (retval)
goto put_child;
retval = phy_type_syscon_get(instance, child_np);
if (retval)
goto put_child;
}
provider = devm_of_phy_provider_register(dev, mtk_phy_xlate);
return PTR_ERR_OR_ZERO(provider);
put_child:
of_node_put(child_np);
return retval;
}
static struct platform_driver mtk_tphy_driver = {
.probe = mtk_tphy_probe,
.driver = {
.name = "mtk-tphy",
.of_match_table = mtk_tphy_id_table,
},
};
module_platform_driver(mtk_tphy_driver);
MODULE_AUTHOR("Chunfeng Yun <[email protected]>");
MODULE_DESCRIPTION("MediaTek T-PHY driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/phy/mediatek/phy-mtk-tphy.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2018 MediaTek Inc.
* Author: Chunhui Dai <[email protected]>
*/
#include "phy-mtk-hdmi.h"
#include "phy-mtk-io.h"
#define HDMI_CON0 0x00
#define RG_HDMITX_DRV_IBIAS_MASK GENMASK(5, 0)
#define RG_HDMITX_EN_SER_MASK GENMASK(15, 12)
#define RG_HDMITX_EN_SLDO_MASK GENMASK(19, 16)
#define RG_HDMITX_EN_PRED_MASK GENMASK(23, 20)
#define RG_HDMITX_EN_IMP_MASK GENMASK(27, 24)
#define RG_HDMITX_EN_DRV_MASK GENMASK(31, 28)
#define HDMI_CON1 0x04
#define RG_HDMITX_PRED_IBIAS_MASK GENMASK(21, 18)
#define RG_HDMITX_PRED_IMP BIT(22)
#define RG_HDMITX_DRV_IMP_MASK GENMASK(31, 26)
#define HDMI_CON2 0x08
#define RG_HDMITX_EN_TX_CKLDO BIT(0)
#define RG_HDMITX_EN_TX_POSDIV BIT(1)
#define RG_HDMITX_TX_POSDIV_MASK GENMASK(4, 3)
#define RG_HDMITX_EN_MBIAS BIT(6)
#define RG_HDMITX_MBIAS_LPF_EN BIT(7)
#define HDMI_CON4 0x10
#define RG_HDMITX_RESERVE_MASK GENMASK(31, 0)
#define HDMI_CON6 0x18
#define RG_HTPLL_BR_MASK GENMASK(1, 0)
#define RG_HTPLL_BC_MASK GENMASK(3, 2)
#define RG_HTPLL_BP_MASK GENMASK(7, 4)
#define RG_HTPLL_IR_MASK GENMASK(11, 8)
#define RG_HTPLL_IC_MASK GENMASK(15, 12)
#define RG_HTPLL_POSDIV_MASK GENMASK(17, 16)
#define RG_HTPLL_PREDIV_MASK GENMASK(19, 18)
#define RG_HTPLL_FBKSEL_MASK GENMASK(21, 20)
#define RG_HTPLL_RLH_EN BIT(22)
#define RG_HTPLL_FBKDIV_MASK GENMASK(30, 24)
#define RG_HTPLL_EN BIT(31)
#define HDMI_CON7 0x1c
#define RG_HTPLL_AUTOK_EN BIT(23)
#define RG_HTPLL_DIVEN_MASK GENMASK(30, 28)
static int mtk_hdmi_pll_prepare(struct clk_hw *hw)
{
struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
void __iomem *base = hdmi_phy->regs;
mtk_phy_set_bits(base + HDMI_CON7, RG_HTPLL_AUTOK_EN);
mtk_phy_clear_bits(base + HDMI_CON6, RG_HTPLL_RLH_EN);
mtk_phy_set_bits(base + HDMI_CON6, RG_HTPLL_POSDIV_MASK);
mtk_phy_set_bits(base + HDMI_CON2, RG_HDMITX_EN_MBIAS);
usleep_range(80, 100);
mtk_phy_set_bits(base + HDMI_CON6, RG_HTPLL_EN);
mtk_phy_set_bits(base + HDMI_CON2, RG_HDMITX_EN_TX_CKLDO);
mtk_phy_set_bits(base + HDMI_CON0, RG_HDMITX_EN_SLDO_MASK);
usleep_range(80, 100);
mtk_phy_set_bits(base + HDMI_CON2, RG_HDMITX_MBIAS_LPF_EN);
mtk_phy_set_bits(base + HDMI_CON0, RG_HDMITX_EN_SER_MASK);
mtk_phy_set_bits(base + HDMI_CON0, RG_HDMITX_EN_PRED_MASK);
mtk_phy_set_bits(base + HDMI_CON0, RG_HDMITX_EN_DRV_MASK);
usleep_range(80, 100);
return 0;
}
static void mtk_hdmi_pll_unprepare(struct clk_hw *hw)
{
struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
void __iomem *base = hdmi_phy->regs;
mtk_phy_clear_bits(base + HDMI_CON0, RG_HDMITX_EN_DRV_MASK);
mtk_phy_clear_bits(base + HDMI_CON0, RG_HDMITX_EN_PRED_MASK);
mtk_phy_clear_bits(base + HDMI_CON0, RG_HDMITX_EN_SER_MASK);
mtk_phy_clear_bits(base + HDMI_CON2, RG_HDMITX_MBIAS_LPF_EN);
usleep_range(80, 100);
mtk_phy_clear_bits(base + HDMI_CON0, RG_HDMITX_EN_SLDO_MASK);
mtk_phy_clear_bits(base + HDMI_CON2, RG_HDMITX_EN_TX_CKLDO);
mtk_phy_clear_bits(base + HDMI_CON6, RG_HTPLL_EN);
usleep_range(80, 100);
mtk_phy_clear_bits(base + HDMI_CON2, RG_HDMITX_EN_MBIAS);
mtk_phy_clear_bits(base + HDMI_CON6, RG_HTPLL_POSDIV_MASK);
mtk_phy_clear_bits(base + HDMI_CON6, RG_HTPLL_RLH_EN);
mtk_phy_clear_bits(base + HDMI_CON7, RG_HTPLL_AUTOK_EN);
usleep_range(80, 100);
}
static long mtk_hdmi_pll_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *parent_rate)
{
return rate;
}
static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
void __iomem *base = hdmi_phy->regs;
u32 pos_div;
if (rate <= 64000000)
pos_div = 3;
else if (rate <= 128000000)
pos_div = 2;
else
pos_div = 1;
mtk_phy_set_bits(base + HDMI_CON6, RG_HTPLL_PREDIV_MASK);
mtk_phy_set_bits(base + HDMI_CON6, RG_HTPLL_POSDIV_MASK);
mtk_phy_set_bits(base + HDMI_CON2, RG_HDMITX_EN_TX_POSDIV);
mtk_phy_update_field(base + HDMI_CON6, RG_HTPLL_IC_MASK, 0x1);
mtk_phy_update_field(base + HDMI_CON6, RG_HTPLL_IR_MASK, 0x1);
mtk_phy_update_field(base + HDMI_CON2, RG_HDMITX_TX_POSDIV_MASK, pos_div);
mtk_phy_update_field(base + HDMI_CON6, RG_HTPLL_FBKSEL_MASK, 1);
mtk_phy_update_field(base + HDMI_CON6, RG_HTPLL_FBKDIV_MASK, 19);
mtk_phy_update_field(base + HDMI_CON7, RG_HTPLL_DIVEN_MASK, 0x2);
mtk_phy_update_field(base + HDMI_CON6, RG_HTPLL_BP_MASK, 0xc);
mtk_phy_update_field(base + HDMI_CON6, RG_HTPLL_BC_MASK, 0x2);
mtk_phy_update_field(base + HDMI_CON6, RG_HTPLL_BR_MASK, 0x1);
mtk_phy_clear_bits(base + HDMI_CON1, RG_HDMITX_PRED_IMP);
mtk_phy_update_field(base + HDMI_CON1, RG_HDMITX_PRED_IBIAS_MASK, 0x3);
mtk_phy_clear_bits(base + HDMI_CON0, RG_HDMITX_EN_IMP_MASK);
mtk_phy_update_field(base + HDMI_CON1, RG_HDMITX_DRV_IMP_MASK, 0x28);
mtk_phy_update_field(base + HDMI_CON4, RG_HDMITX_RESERVE_MASK, 0x28);
mtk_phy_update_field(base + HDMI_CON0, RG_HDMITX_DRV_IBIAS_MASK, 0xa);
return 0;
}
static unsigned long mtk_hdmi_pll_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
unsigned long out_rate, val;
u32 tmp;
tmp = readl(hdmi_phy->regs + HDMI_CON6);
val = FIELD_GET(RG_HTPLL_PREDIV_MASK, tmp);
switch (val) {
case 0x00:
out_rate = parent_rate;
break;
case 0x01:
out_rate = parent_rate / 2;
break;
default:
out_rate = parent_rate / 4;
break;
}
val = FIELD_GET(RG_HTPLL_FBKDIV_MASK, tmp);
out_rate *= (val + 1) * 2;
tmp = readl(hdmi_phy->regs + HDMI_CON2);
val = FIELD_GET(RG_HDMITX_TX_POSDIV_MASK, tmp);
out_rate >>= val;
if (tmp & RG_HDMITX_EN_TX_POSDIV)
out_rate /= 5;
return out_rate;
}
static const struct clk_ops mtk_hdmi_phy_pll_ops = {
.prepare = mtk_hdmi_pll_prepare,
.unprepare = mtk_hdmi_pll_unprepare,
.set_rate = mtk_hdmi_pll_set_rate,
.round_rate = mtk_hdmi_pll_round_rate,
.recalc_rate = mtk_hdmi_pll_recalc_rate,
};
static void mtk_hdmi_phy_enable_tmds(struct mtk_hdmi_phy *hdmi_phy)
{
void __iomem *base = hdmi_phy->regs;
mtk_phy_set_bits(base + HDMI_CON7, RG_HTPLL_AUTOK_EN);
mtk_phy_clear_bits(base + HDMI_CON6, RG_HTPLL_RLH_EN);
mtk_phy_set_bits(base + HDMI_CON6, RG_HTPLL_POSDIV_MASK);
mtk_phy_set_bits(base + HDMI_CON2, RG_HDMITX_EN_MBIAS);
usleep_range(80, 100);
mtk_phy_set_bits(base + HDMI_CON6, RG_HTPLL_EN);
mtk_phy_set_bits(base + HDMI_CON2, RG_HDMITX_EN_TX_CKLDO);
mtk_phy_set_bits(base + HDMI_CON0, RG_HDMITX_EN_SLDO_MASK);
usleep_range(80, 100);
mtk_phy_set_bits(base + HDMI_CON2, RG_HDMITX_MBIAS_LPF_EN);
mtk_phy_set_bits(base + HDMI_CON0, RG_HDMITX_EN_SER_MASK);
mtk_phy_set_bits(base + HDMI_CON0, RG_HDMITX_EN_PRED_MASK);
mtk_phy_set_bits(base + HDMI_CON0, RG_HDMITX_EN_DRV_MASK);
usleep_range(80, 100);
}
static void mtk_hdmi_phy_disable_tmds(struct mtk_hdmi_phy *hdmi_phy)
{
void __iomem *base = hdmi_phy->regs;
mtk_phy_clear_bits(base + HDMI_CON0, RG_HDMITX_EN_DRV_MASK);
mtk_phy_clear_bits(base + HDMI_CON0, RG_HDMITX_EN_PRED_MASK);
mtk_phy_clear_bits(base + HDMI_CON0, RG_HDMITX_EN_SER_MASK);
mtk_phy_clear_bits(base + HDMI_CON2, RG_HDMITX_MBIAS_LPF_EN);
usleep_range(80, 100);
mtk_phy_clear_bits(base + HDMI_CON0, RG_HDMITX_EN_SLDO_MASK);
mtk_phy_clear_bits(base + HDMI_CON2, RG_HDMITX_EN_TX_CKLDO);
mtk_phy_clear_bits(base + HDMI_CON6, RG_HTPLL_EN);
usleep_range(80, 100);
mtk_phy_clear_bits(base + HDMI_CON2, RG_HDMITX_EN_MBIAS);
mtk_phy_clear_bits(base + HDMI_CON6, RG_HTPLL_POSDIV_MASK);
mtk_phy_clear_bits(base + HDMI_CON6, RG_HTPLL_RLH_EN);
mtk_phy_clear_bits(base + HDMI_CON7, RG_HTPLL_AUTOK_EN);
usleep_range(80, 100);
}
struct mtk_hdmi_phy_conf mtk_hdmi_phy_2701_conf = {
.flags = CLK_SET_RATE_GATE,
.pll_default_off = true,
.hdmi_phy_clk_ops = &mtk_hdmi_phy_pll_ops,
.hdmi_phy_enable_tmds = mtk_hdmi_phy_enable_tmds,
.hdmi_phy_disable_tmds = mtk_hdmi_phy_disable_tmds,
};
MODULE_AUTHOR("Chunhui Dai <[email protected]>");
MODULE_DESCRIPTION("MediaTek HDMI PHY Driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/phy/mediatek/phy-mtk-hdmi-mt2701.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2014 MediaTek Inc.
* Author: Jie Qiu <[email protected]>
*/
#include "phy-mtk-hdmi.h"
#include "phy-mtk-io.h"
#define HDMI_CON0 0x00
#define RG_HDMITX_PLL_EN BIT(31)
#define RG_HDMITX_PLL_FBKDIV GENMASK(30, 24)
#define RG_HDMITX_PLL_FBKSEL GENMASK(23, 22)
#define RG_HDMITX_PLL_PREDIV GENMASK(21, 20)
#define RG_HDMITX_PLL_POSDIV GENMASK(19, 18)
#define RG_HDMITX_PLL_RST_DLY GENMASK(17, 16)
#define RG_HDMITX_PLL_IR GENMASK(15, 12)
#define RG_HDMITX_PLL_IC GENMASK(11, 8)
#define RG_HDMITX_PLL_BP GENMASK(7, 4)
#define RG_HDMITX_PLL_BR GENMASK(3, 2)
#define RG_HDMITX_PLL_BC GENMASK(1, 0)
#define HDMI_CON1 0x04
#define RG_HDMITX_PLL_DIVEN GENMASK(31, 29)
#define RG_HDMITX_PLL_AUTOK_EN BIT(28)
#define RG_HDMITX_PLL_AUTOK_KF GENMASK(27, 26)
#define RG_HDMITX_PLL_AUTOK_KS GENMASK(25, 24)
#define RG_HDMITX_PLL_AUTOK_LOAD BIT(23)
#define RG_HDMITX_PLL_BAND GENMASK(21, 16)
#define RG_HDMITX_PLL_REF_SEL BIT(15)
#define RG_HDMITX_PLL_BIAS_EN BIT(14)
#define RG_HDMITX_PLL_BIAS_LPF_EN BIT(13)
#define RG_HDMITX_PLL_TXDIV_EN BIT(12)
#define RG_HDMITX_PLL_TXDIV GENMASK(11, 10)
#define RG_HDMITX_PLL_LVROD_EN BIT(9)
#define RG_HDMITX_PLL_MONVC_EN BIT(8)
#define RG_HDMITX_PLL_MONCK_EN BIT(7)
#define RG_HDMITX_PLL_MONREF_EN BIT(6)
#define RG_HDMITX_PLL_TST_EN BIT(5)
#define RG_HDMITX_PLL_TST_CK_EN BIT(4)
#define RG_HDMITX_PLL_TST_SEL GENMASK(3, 0)
#define HDMI_CON2 0x08
#define RGS_HDMITX_PLL_AUTOK_BAND GENMASK(14, 8)
#define RGS_HDMITX_PLL_AUTOK_FAIL BIT(1)
#define RG_HDMITX_EN_TX_CKLDO BIT(0)
#define HDMI_CON3 0x0c
#define RG_HDMITX_SER_EN GENMASK(31, 28)
#define RG_HDMITX_PRD_EN GENMASK(27, 24)
#define RG_HDMITX_PRD_IMP_EN GENMASK(23, 20)
#define RG_HDMITX_DRV_EN GENMASK(19, 16)
#define RG_HDMITX_DRV_IMP_EN GENMASK(15, 12)
#define RG_HDMITX_MHLCK_FORCE BIT(10)
#define RG_HDMITX_MHLCK_PPIX_EN BIT(9)
#define RG_HDMITX_MHLCK_EN BIT(8)
#define RG_HDMITX_SER_DIN_SEL GENMASK(7, 4)
#define RG_HDMITX_SER_5T1_BIST_EN BIT(3)
#define RG_HDMITX_SER_BIST_TOG BIT(2)
#define RG_HDMITX_SER_DIN_TOG BIT(1)
#define RG_HDMITX_SER_CLKDIG_INV BIT(0)
#define HDMI_CON4 0x10
#define RG_HDMITX_PRD_IBIAS_CLK GENMASK(27, 24)
#define RG_HDMITX_PRD_IBIAS_D2 GENMASK(19, 16)
#define RG_HDMITX_PRD_IBIAS_D1 GENMASK(11, 8)
#define RG_HDMITX_PRD_IBIAS_D0 GENMASK(3, 0)
#define HDMI_CON5 0x14
#define RG_HDMITX_DRV_IBIAS_CLK GENMASK(29, 24)
#define RG_HDMITX_DRV_IBIAS_D2 GENMASK(21, 16)
#define RG_HDMITX_DRV_IBIAS_D1 GENMASK(13, 8)
#define RG_HDMITX_DRV_IBIAS_D0 GENMASK(5, 0)
#define HDMI_CON6 0x18
#define RG_HDMITX_DRV_IMP_CLK GENMASK(29, 24)
#define RG_HDMITX_DRV_IMP_D2 GENMASK(21, 16)
#define RG_HDMITX_DRV_IMP_D1 GENMASK(13, 8)
#define RG_HDMITX_DRV_IMP_D0 GENMASK(5, 0)
#define HDMI_CON7 0x1c
#define RG_HDMITX_MHLCK_DRV_IBIAS GENMASK(31, 27)
#define RG_HDMITX_SER_DIN GENMASK(25, 16)
#define RG_HDMITX_CHLDC_TST GENMASK(15, 12)
#define RG_HDMITX_CHLCK_TST GENMASK(11, 8)
#define RG_HDMITX_RESERVE GENMASK(7, 0)
#define HDMI_CON8 0x20
#define RGS_HDMITX_2T1_LEV GENMASK(19, 16)
#define RGS_HDMITX_2T1_EDG GENMASK(15, 12)
#define RGS_HDMITX_5T1_LEV GENMASK(11, 8)
#define RGS_HDMITX_5T1_EDG GENMASK(7, 4)
#define RGS_HDMITX_PLUG_TST BIT(0)
static int mtk_hdmi_pll_prepare(struct clk_hw *hw)
{
struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
void __iomem *base = hdmi_phy->regs;
mtk_phy_set_bits(base + HDMI_CON1, RG_HDMITX_PLL_AUTOK_EN);
mtk_phy_set_bits(base + HDMI_CON0, RG_HDMITX_PLL_POSDIV);
mtk_phy_clear_bits(base + HDMI_CON3, RG_HDMITX_MHLCK_EN);
mtk_phy_set_bits(base + HDMI_CON1, RG_HDMITX_PLL_BIAS_EN);
usleep_range(100, 150);
mtk_phy_set_bits(base + HDMI_CON0, RG_HDMITX_PLL_EN);
usleep_range(100, 150);
mtk_phy_set_bits(base + HDMI_CON1, RG_HDMITX_PLL_BIAS_LPF_EN);
mtk_phy_set_bits(base + HDMI_CON1, RG_HDMITX_PLL_TXDIV_EN);
return 0;
}
static void mtk_hdmi_pll_unprepare(struct clk_hw *hw)
{
struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
void __iomem *base = hdmi_phy->regs;
mtk_phy_clear_bits(base + HDMI_CON1, RG_HDMITX_PLL_TXDIV_EN);
mtk_phy_clear_bits(base + HDMI_CON1, RG_HDMITX_PLL_BIAS_LPF_EN);
usleep_range(100, 150);
mtk_phy_clear_bits(base + HDMI_CON0, RG_HDMITX_PLL_EN);
usleep_range(100, 150);
mtk_phy_clear_bits(base + HDMI_CON1, RG_HDMITX_PLL_BIAS_EN);
mtk_phy_clear_bits(base + HDMI_CON0, RG_HDMITX_PLL_POSDIV);
mtk_phy_clear_bits(base + HDMI_CON1, RG_HDMITX_PLL_AUTOK_EN);
usleep_range(100, 150);
}
static long mtk_hdmi_pll_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *parent_rate)
{
struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
hdmi_phy->pll_rate = rate;
if (rate <= 74250000)
*parent_rate = rate;
else
*parent_rate = rate / 2;
return rate;
}
static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
void __iomem *base = hdmi_phy->regs;
unsigned int pre_div;
unsigned int div;
unsigned int pre_ibias;
unsigned int hdmi_ibias;
unsigned int imp_en;
dev_dbg(hdmi_phy->dev, "%s: %lu Hz, parent: %lu Hz\n", __func__,
rate, parent_rate);
if (rate <= 27000000) {
pre_div = 0;
div = 3;
} else if (rate <= 74250000) {
pre_div = 1;
div = 2;
} else {
pre_div = 1;
div = 1;
}
mtk_phy_update_field(base + HDMI_CON0, RG_HDMITX_PLL_PREDIV, pre_div);
mtk_phy_set_bits(base + HDMI_CON0, RG_HDMITX_PLL_POSDIV);
mtk_phy_update_bits(base + HDMI_CON0,
RG_HDMITX_PLL_IC | RG_HDMITX_PLL_IR,
FIELD_PREP(RG_HDMITX_PLL_IC, 0x1) |
FIELD_PREP(RG_HDMITX_PLL_IR, 0x1));
mtk_phy_update_field(base + HDMI_CON1, RG_HDMITX_PLL_TXDIV, div);
mtk_phy_update_bits(base + HDMI_CON0,
RG_HDMITX_PLL_FBKSEL | RG_HDMITX_PLL_FBKDIV,
FIELD_PREP(RG_HDMITX_PLL_FBKSEL, 0x1) |
FIELD_PREP(RG_HDMITX_PLL_FBKDIV, 19));
mtk_phy_update_field(base + HDMI_CON1, RG_HDMITX_PLL_DIVEN, 0x2);
mtk_phy_update_bits(base + HDMI_CON0,
RG_HDMITX_PLL_BP | RG_HDMITX_PLL_BC |
RG_HDMITX_PLL_BR,
FIELD_PREP(RG_HDMITX_PLL_BP, 0xc) |
FIELD_PREP(RG_HDMITX_PLL_BC, 0x2) |
FIELD_PREP(RG_HDMITX_PLL_BR, 0x1));
if (rate < 165000000) {
mtk_phy_clear_bits(base + HDMI_CON3, RG_HDMITX_PRD_IMP_EN);
pre_ibias = 0x3;
imp_en = 0x0;
hdmi_ibias = hdmi_phy->ibias;
} else {
mtk_phy_set_bits(base + HDMI_CON3, RG_HDMITX_PRD_IMP_EN);
pre_ibias = 0x6;
imp_en = 0xf;
hdmi_ibias = hdmi_phy->ibias_up;
}
mtk_phy_update_bits(base + HDMI_CON4,
RG_HDMITX_PRD_IBIAS_CLK | RG_HDMITX_PRD_IBIAS_D2 |
RG_HDMITX_PRD_IBIAS_D1 | RG_HDMITX_PRD_IBIAS_D0,
FIELD_PREP(RG_HDMITX_PRD_IBIAS_CLK, pre_ibias) |
FIELD_PREP(RG_HDMITX_PRD_IBIAS_D2, pre_ibias) |
FIELD_PREP(RG_HDMITX_PRD_IBIAS_D1, pre_ibias) |
FIELD_PREP(RG_HDMITX_PRD_IBIAS_D0, pre_ibias));
mtk_phy_update_field(base + HDMI_CON3, RG_HDMITX_DRV_IMP_EN, imp_en);
mtk_phy_update_bits(base + HDMI_CON6,
RG_HDMITX_DRV_IMP_CLK | RG_HDMITX_DRV_IMP_D2 |
RG_HDMITX_DRV_IMP_D1 | RG_HDMITX_DRV_IMP_D0,
FIELD_PREP(RG_HDMITX_DRV_IMP_CLK, hdmi_phy->drv_imp_clk) |
FIELD_PREP(RG_HDMITX_DRV_IMP_D2, hdmi_phy->drv_imp_d2) |
FIELD_PREP(RG_HDMITX_DRV_IMP_D1, hdmi_phy->drv_imp_d1) |
FIELD_PREP(RG_HDMITX_DRV_IMP_D0, hdmi_phy->drv_imp_d0));
mtk_phy_update_bits(base + HDMI_CON5,
RG_HDMITX_DRV_IBIAS_CLK | RG_HDMITX_DRV_IBIAS_D2 |
RG_HDMITX_DRV_IBIAS_D1 | RG_HDMITX_DRV_IBIAS_D0,
FIELD_PREP(RG_HDMITX_DRV_IBIAS_CLK, hdmi_ibias) |
FIELD_PREP(RG_HDMITX_DRV_IBIAS_D2, hdmi_ibias) |
FIELD_PREP(RG_HDMITX_DRV_IBIAS_D1, hdmi_ibias) |
FIELD_PREP(RG_HDMITX_DRV_IBIAS_D0, hdmi_ibias));
return 0;
}
static unsigned long mtk_hdmi_pll_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
return hdmi_phy->pll_rate;
}
static const struct clk_ops mtk_hdmi_phy_pll_ops = {
.prepare = mtk_hdmi_pll_prepare,
.unprepare = mtk_hdmi_pll_unprepare,
.set_rate = mtk_hdmi_pll_set_rate,
.round_rate = mtk_hdmi_pll_round_rate,
.recalc_rate = mtk_hdmi_pll_recalc_rate,
};
static void mtk_hdmi_phy_enable_tmds(struct mtk_hdmi_phy *hdmi_phy)
{
mtk_phy_set_bits(hdmi_phy->regs + HDMI_CON3,
RG_HDMITX_SER_EN | RG_HDMITX_PRD_EN |
RG_HDMITX_DRV_EN);
usleep_range(100, 150);
}
static void mtk_hdmi_phy_disable_tmds(struct mtk_hdmi_phy *hdmi_phy)
{
mtk_phy_clear_bits(hdmi_phy->regs + HDMI_CON3,
RG_HDMITX_DRV_EN | RG_HDMITX_PRD_EN |
RG_HDMITX_SER_EN);
}
struct mtk_hdmi_phy_conf mtk_hdmi_phy_8173_conf = {
.flags = CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE,
.hdmi_phy_clk_ops = &mtk_hdmi_phy_pll_ops,
.hdmi_phy_enable_tmds = mtk_hdmi_phy_enable_tmds,
.hdmi_phy_disable_tmds = mtk_hdmi_phy_disable_tmds,
};
MODULE_AUTHOR("Jie Qiu <[email protected]>");
MODULE_DESCRIPTION("MediaTek MT8173 HDMI PHY Driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/phy/mediatek/phy-mtk-hdmi-mt8173.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2019 MediaTek Inc.
* Author: Stanley Chu <[email protected]>
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include "phy-mtk-io.h"
/* mphy register and offsets */
#define MP_GLB_DIG_8C 0x008C
#define FRC_PLL_ISO_EN BIT(8)
#define PLL_ISO_EN BIT(9)
#define FRC_FRC_PWR_ON BIT(10)
#define PLL_PWR_ON BIT(11)
#define MP_LN_DIG_RX_9C 0xA09C
#define FSM_DIFZ_FRC BIT(18)
#define MP_LN_DIG_RX_AC 0xA0AC
#define FRC_RX_SQ_EN BIT(0)
#define RX_SQ_EN BIT(1)
#define MP_LN_RX_44 0xB044
#define FRC_CDR_PWR_ON BIT(17)
#define CDR_PWR_ON BIT(18)
#define FRC_CDR_ISO_EN BIT(19)
#define CDR_ISO_EN BIT(20)
#define UFSPHY_CLKS_CNT 2
struct ufs_mtk_phy {
struct device *dev;
void __iomem *mmio;
struct clk_bulk_data clks[UFSPHY_CLKS_CNT];
};
static struct ufs_mtk_phy *get_ufs_mtk_phy(struct phy *generic_phy)
{
return (struct ufs_mtk_phy *)phy_get_drvdata(generic_phy);
}
static int ufs_mtk_phy_clk_init(struct ufs_mtk_phy *phy)
{
struct device *dev = phy->dev;
struct clk_bulk_data *clks = phy->clks;
clks[0].id = "unipro";
clks[1].id = "mp";
return devm_clk_bulk_get(dev, UFSPHY_CLKS_CNT, clks);
}
static void ufs_mtk_phy_set_active(struct ufs_mtk_phy *phy)
{
void __iomem *mmio = phy->mmio;
/* release DA_MP_PLL_PWR_ON */
mtk_phy_set_bits(mmio + MP_GLB_DIG_8C, PLL_PWR_ON);
mtk_phy_clear_bits(mmio + MP_GLB_DIG_8C, FRC_FRC_PWR_ON);
/* release DA_MP_PLL_ISO_EN */
mtk_phy_clear_bits(mmio + MP_GLB_DIG_8C, PLL_ISO_EN);
mtk_phy_clear_bits(mmio + MP_GLB_DIG_8C, FRC_PLL_ISO_EN);
/* release DA_MP_CDR_PWR_ON */
mtk_phy_set_bits(mmio + MP_LN_RX_44, CDR_PWR_ON);
mtk_phy_clear_bits(mmio + MP_LN_RX_44, FRC_CDR_PWR_ON);
/* release DA_MP_CDR_ISO_EN */
mtk_phy_clear_bits(mmio + MP_LN_RX_44, CDR_ISO_EN);
mtk_phy_clear_bits(mmio + MP_LN_RX_44, FRC_CDR_ISO_EN);
/* release DA_MP_RX0_SQ_EN */
mtk_phy_set_bits(mmio + MP_LN_DIG_RX_AC, RX_SQ_EN);
mtk_phy_clear_bits(mmio + MP_LN_DIG_RX_AC, FRC_RX_SQ_EN);
/* delay 1us to wait DIFZ stable */
udelay(1);
/* release DIFZ */
mtk_phy_clear_bits(mmio + MP_LN_DIG_RX_9C, FSM_DIFZ_FRC);
}
static void ufs_mtk_phy_set_deep_hibern(struct ufs_mtk_phy *phy)
{
void __iomem *mmio = phy->mmio;
/* force DIFZ */
mtk_phy_set_bits(mmio + MP_LN_DIG_RX_9C, FSM_DIFZ_FRC);
/* force DA_MP_RX0_SQ_EN */
mtk_phy_set_bits(mmio + MP_LN_DIG_RX_AC, FRC_RX_SQ_EN);
mtk_phy_clear_bits(mmio + MP_LN_DIG_RX_AC, RX_SQ_EN);
/* force DA_MP_CDR_ISO_EN */
mtk_phy_set_bits(mmio + MP_LN_RX_44, FRC_CDR_ISO_EN);
mtk_phy_set_bits(mmio + MP_LN_RX_44, CDR_ISO_EN);
/* force DA_MP_CDR_PWR_ON */
mtk_phy_set_bits(mmio + MP_LN_RX_44, FRC_CDR_PWR_ON);
mtk_phy_clear_bits(mmio + MP_LN_RX_44, CDR_PWR_ON);
/* force DA_MP_PLL_ISO_EN */
mtk_phy_set_bits(mmio + MP_GLB_DIG_8C, FRC_PLL_ISO_EN);
mtk_phy_set_bits(mmio + MP_GLB_DIG_8C, PLL_ISO_EN);
/* force DA_MP_PLL_PWR_ON */
mtk_phy_set_bits(mmio + MP_GLB_DIG_8C, FRC_FRC_PWR_ON);
mtk_phy_clear_bits(mmio + MP_GLB_DIG_8C, PLL_PWR_ON);
}
static int ufs_mtk_phy_power_on(struct phy *generic_phy)
{
struct ufs_mtk_phy *phy = get_ufs_mtk_phy(generic_phy);
int ret;
ret = clk_bulk_prepare_enable(UFSPHY_CLKS_CNT, phy->clks);
if (ret)
return ret;
ufs_mtk_phy_set_active(phy);
return 0;
}
static int ufs_mtk_phy_power_off(struct phy *generic_phy)
{
struct ufs_mtk_phy *phy = get_ufs_mtk_phy(generic_phy);
ufs_mtk_phy_set_deep_hibern(phy);
clk_bulk_disable_unprepare(UFSPHY_CLKS_CNT, phy->clks);
return 0;
}
static const struct phy_ops ufs_mtk_phy_ops = {
.power_on = ufs_mtk_phy_power_on,
.power_off = ufs_mtk_phy_power_off,
.owner = THIS_MODULE,
};
static int ufs_mtk_phy_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct phy *generic_phy;
struct phy_provider *phy_provider;
struct ufs_mtk_phy *phy;
int ret;
phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
if (!phy)
return -ENOMEM;
phy->mmio = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(phy->mmio))
return PTR_ERR(phy->mmio);
phy->dev = dev;
ret = ufs_mtk_phy_clk_init(phy);
if (ret)
return ret;
generic_phy = devm_phy_create(dev, NULL, &ufs_mtk_phy_ops);
if (IS_ERR(generic_phy))
return PTR_ERR(generic_phy);
phy_set_drvdata(generic_phy, phy);
phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
return PTR_ERR_OR_ZERO(phy_provider);
}
static const struct of_device_id ufs_mtk_phy_of_match[] = {
{.compatible = "mediatek,mt8183-ufsphy"},
{},
};
MODULE_DEVICE_TABLE(of, ufs_mtk_phy_of_match);
static struct platform_driver ufs_mtk_phy_driver = {
.probe = ufs_mtk_phy_probe,
.driver = {
.of_match_table = ufs_mtk_phy_of_match,
.name = "ufs_mtk_phy",
},
};
module_platform_driver(ufs_mtk_phy_driver);
MODULE_DESCRIPTION("Universal Flash Storage (UFS) MediaTek MPHY");
MODULE_AUTHOR("Stanley Chu <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/phy/mediatek/phy-mtk-ufs.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2022 MediaTek Inc.
* Author: Jianjun Wang <[email protected]>
*/
#include <linux/bitfield.h>
#include <linux/module.h>
#include <linux/nvmem-consumer.h>
#include <linux/of.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include "phy-mtk-io.h"
#define PEXTP_ANA_GLB_00_REG 0x9000
/* Internal Resistor Selection of TX Bias Current */
#define EFUSE_GLB_INTR_SEL GENMASK(28, 24)
#define PEXTP_ANA_LN0_TRX_REG 0xa000
#define PEXTP_ANA_TX_REG 0x04
/* TX PMOS impedance selection */
#define EFUSE_LN_TX_PMOS_SEL GENMASK(5, 2)
/* TX NMOS impedance selection */
#define EFUSE_LN_TX_NMOS_SEL GENMASK(11, 8)
#define PEXTP_ANA_RX_REG 0x3c
/* RX impedance selection */
#define EFUSE_LN_RX_SEL GENMASK(3, 0)
#define PEXTP_ANA_LANE_OFFSET 0x100
/**
* struct mtk_pcie_lane_efuse - eFuse data for each lane
* @tx_pmos: TX PMOS impedance selection data
* @tx_nmos: TX NMOS impedance selection data
* @rx_data: RX impedance selection data
* @lane_efuse_supported: software eFuse data is supported for this lane
*/
struct mtk_pcie_lane_efuse {
u32 tx_pmos;
u32 tx_nmos;
u32 rx_data;
bool lane_efuse_supported;
};
/**
* struct mtk_pcie_phy_data - phy data for each SoC
* @num_lanes: supported lane numbers
* @sw_efuse_supported: support software to load eFuse data
*/
struct mtk_pcie_phy_data {
int num_lanes;
bool sw_efuse_supported;
};
/**
* struct mtk_pcie_phy - PCIe phy driver main structure
* @dev: pointer to device
* @phy: pointer to generic phy
* @sif_base: IO mapped register base address of system interface
* @data: pointer to SoC dependent data
* @sw_efuse_en: software eFuse enable status
* @efuse_glb_intr: internal resistor selection of TX bias current data
* @efuse: pointer to eFuse data for each lane
*/
struct mtk_pcie_phy {
struct device *dev;
struct phy *phy;
void __iomem *sif_base;
const struct mtk_pcie_phy_data *data;
bool sw_efuse_en;
u32 efuse_glb_intr;
struct mtk_pcie_lane_efuse *efuse;
};
static void mtk_pcie_efuse_set_lane(struct mtk_pcie_phy *pcie_phy,
unsigned int lane)
{
struct mtk_pcie_lane_efuse *data = &pcie_phy->efuse[lane];
void __iomem *addr;
if (!data->lane_efuse_supported)
return;
addr = pcie_phy->sif_base + PEXTP_ANA_LN0_TRX_REG +
lane * PEXTP_ANA_LANE_OFFSET;
mtk_phy_update_field(addr + PEXTP_ANA_TX_REG, EFUSE_LN_TX_PMOS_SEL,
data->tx_pmos);
mtk_phy_update_field(addr + PEXTP_ANA_TX_REG, EFUSE_LN_TX_NMOS_SEL,
data->tx_nmos);
mtk_phy_update_field(addr + PEXTP_ANA_RX_REG, EFUSE_LN_RX_SEL,
data->rx_data);
}
/**
* mtk_pcie_phy_init() - Initialize the phy
* @phy: the phy to be initialized
*
* Initialize the phy by setting the efuse data.
* The hardware settings will be reset during suspend, it should be
* reinitialized when the consumer calls phy_init() again on resume.
*/
static int mtk_pcie_phy_init(struct phy *phy)
{
struct mtk_pcie_phy *pcie_phy = phy_get_drvdata(phy);
int i;
if (!pcie_phy->sw_efuse_en)
return 0;
/* Set global data */
mtk_phy_update_field(pcie_phy->sif_base + PEXTP_ANA_GLB_00_REG,
EFUSE_GLB_INTR_SEL, pcie_phy->efuse_glb_intr);
for (i = 0; i < pcie_phy->data->num_lanes; i++)
mtk_pcie_efuse_set_lane(pcie_phy, i);
return 0;
}
static const struct phy_ops mtk_pcie_phy_ops = {
.init = mtk_pcie_phy_init,
.owner = THIS_MODULE,
};
static int mtk_pcie_efuse_read_for_lane(struct mtk_pcie_phy *pcie_phy,
unsigned int lane)
{
struct mtk_pcie_lane_efuse *efuse = &pcie_phy->efuse[lane];
struct device *dev = pcie_phy->dev;
char efuse_id[16];
int ret;
snprintf(efuse_id, sizeof(efuse_id), "tx_ln%d_pmos", lane);
ret = nvmem_cell_read_variable_le_u32(dev, efuse_id, &efuse->tx_pmos);
if (ret)
return dev_err_probe(dev, ret, "Failed to read %s\n", efuse_id);
snprintf(efuse_id, sizeof(efuse_id), "tx_ln%d_nmos", lane);
ret = nvmem_cell_read_variable_le_u32(dev, efuse_id, &efuse->tx_nmos);
if (ret)
return dev_err_probe(dev, ret, "Failed to read %s\n", efuse_id);
snprintf(efuse_id, sizeof(efuse_id), "rx_ln%d", lane);
ret = nvmem_cell_read_variable_le_u32(dev, efuse_id, &efuse->rx_data);
if (ret)
return dev_err_probe(dev, ret, "Failed to read %s\n", efuse_id);
if (!(efuse->tx_pmos || efuse->tx_nmos || efuse->rx_data))
return dev_err_probe(dev, -EINVAL,
"No eFuse data found for lane%d, but dts enable it\n",
lane);
efuse->lane_efuse_supported = true;
return 0;
}
static int mtk_pcie_read_efuse(struct mtk_pcie_phy *pcie_phy)
{
struct device *dev = pcie_phy->dev;
bool nvmem_enabled;
int ret, i;
/* nvmem data is optional */
nvmem_enabled = device_property_present(dev, "nvmem-cells");
if (!nvmem_enabled)
return 0;
ret = nvmem_cell_read_variable_le_u32(dev, "glb_intr",
&pcie_phy->efuse_glb_intr);
if (ret)
return dev_err_probe(dev, ret, "Failed to read glb_intr\n");
pcie_phy->sw_efuse_en = true;
pcie_phy->efuse = devm_kzalloc(dev, pcie_phy->data->num_lanes *
sizeof(*pcie_phy->efuse), GFP_KERNEL);
if (!pcie_phy->efuse)
return -ENOMEM;
for (i = 0; i < pcie_phy->data->num_lanes; i++) {
ret = mtk_pcie_efuse_read_for_lane(pcie_phy, i);
if (ret)
return ret;
}
return 0;
}
static int mtk_pcie_phy_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct phy_provider *provider;
struct mtk_pcie_phy *pcie_phy;
int ret;
pcie_phy = devm_kzalloc(dev, sizeof(*pcie_phy), GFP_KERNEL);
if (!pcie_phy)
return -ENOMEM;
pcie_phy->sif_base = devm_platform_ioremap_resource_byname(pdev, "sif");
if (IS_ERR(pcie_phy->sif_base))
return dev_err_probe(dev, PTR_ERR(pcie_phy->sif_base),
"Failed to map phy-sif base\n");
pcie_phy->phy = devm_phy_create(dev, dev->of_node, &mtk_pcie_phy_ops);
if (IS_ERR(pcie_phy->phy))
return dev_err_probe(dev, PTR_ERR(pcie_phy->phy),
"Failed to create PCIe phy\n");
pcie_phy->dev = dev;
pcie_phy->data = of_device_get_match_data(dev);
if (!pcie_phy->data)
return dev_err_probe(dev, -EINVAL, "Failed to get phy data\n");
if (pcie_phy->data->sw_efuse_supported) {
/*
* Failed to read the efuse data is not a fatal problem,
* ignore the failure and keep going.
*/
ret = mtk_pcie_read_efuse(pcie_phy);
if (ret == -EPROBE_DEFER || ret == -ENOMEM)
return ret;
}
phy_set_drvdata(pcie_phy->phy, pcie_phy);
provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
if (IS_ERR(provider))
return dev_err_probe(dev, PTR_ERR(provider),
"PCIe phy probe failed\n");
return 0;
}
static const struct mtk_pcie_phy_data mt8195_data = {
.num_lanes = 2,
.sw_efuse_supported = true,
};
static const struct of_device_id mtk_pcie_phy_of_match[] = {
{ .compatible = "mediatek,mt8195-pcie-phy", .data = &mt8195_data },
{ },
};
MODULE_DEVICE_TABLE(of, mtk_pcie_phy_of_match);
static struct platform_driver mtk_pcie_phy_driver = {
.probe = mtk_pcie_phy_probe,
.driver = {
.name = "mtk-pcie-phy",
.of_match_table = mtk_pcie_phy_of_match,
},
};
module_platform_driver(mtk_pcie_phy_driver);
MODULE_DESCRIPTION("MediaTek PCIe PHY driver");
MODULE_AUTHOR("Jianjun Wang <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/phy/mediatek/phy-mtk-pcie.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2019 MediaTek Inc.
* Author: jitao.shi <[email protected]>
*/
#include "phy-mtk-io.h"
#include "phy-mtk-mipi-dsi.h"
#define MIPITX_DSI_CON 0x00
#define RG_DSI_LDOCORE_EN BIT(0)
#define RG_DSI_CKG_LDOOUT_EN BIT(1)
#define RG_DSI_BCLK_SEL GENMASK(3, 2)
#define RG_DSI_LD_IDX_SEL GENMASK(6, 4)
#define RG_DSI_PHYCLK_SEL GENMASK(9, 8)
#define RG_DSI_DSICLK_FREQ_SEL BIT(10)
#define RG_DSI_LPTX_CLMP_EN BIT(11)
#define MIPITX_DSI_CLOCK_LANE 0x04
#define MIPITX_DSI_DATA_LANE0 0x08
#define MIPITX_DSI_DATA_LANE1 0x0c
#define MIPITX_DSI_DATA_LANE2 0x10
#define MIPITX_DSI_DATA_LANE3 0x14
#define RG_DSI_LNTx_LDOOUT_EN BIT(0)
#define RG_DSI_LNTx_CKLANE_EN BIT(1)
#define RG_DSI_LNTx_LPTX_IPLUS1 BIT(2)
#define RG_DSI_LNTx_LPTX_IPLUS2 BIT(3)
#define RG_DSI_LNTx_LPTX_IMINUS BIT(4)
#define RG_DSI_LNTx_LPCD_IPLUS BIT(5)
#define RG_DSI_LNTx_LPCD_IMINUS BIT(6)
#define RG_DSI_LNTx_RT_CODE GENMASK(11, 8)
#define MIPITX_DSI_TOP_CON 0x40
#define RG_DSI_LNT_INTR_EN BIT(0)
#define RG_DSI_LNT_HS_BIAS_EN BIT(1)
#define RG_DSI_LNT_IMP_CAL_EN BIT(2)
#define RG_DSI_LNT_TESTMODE_EN BIT(3)
#define RG_DSI_LNT_IMP_CAL_CODE GENMASK(7, 4)
#define RG_DSI_LNT_AIO_SEL GENMASK(10, 8)
#define RG_DSI_PAD_TIE_LOW_EN BIT(11)
#define RG_DSI_DEBUG_INPUT_EN BIT(12)
#define RG_DSI_PRESERVE GENMASK(15, 13)
#define MIPITX_DSI_BG_CON 0x44
#define RG_DSI_BG_CORE_EN BIT(0)
#define RG_DSI_BG_CKEN BIT(1)
#define RG_DSI_BG_DIV GENMASK(3, 2)
#define RG_DSI_BG_FAST_CHARGE BIT(4)
#define RG_DSI_V12_SEL GENMASK(7, 5)
#define RG_DSI_V10_SEL GENMASK(10, 8)
#define RG_DSI_V072_SEL GENMASK(13, 11)
#define RG_DSI_V04_SEL GENMASK(16, 14)
#define RG_DSI_V032_SEL GENMASK(19, 17)
#define RG_DSI_V02_SEL GENMASK(22, 20)
#define RG_DSI_VOUT_MSK \
(RG_DSI_V12_SEL | RG_DSI_V10_SEL | RG_DSI_V072_SEL | \
RG_DSI_V04_SEL | RG_DSI_V032_SEL | RG_DSI_V02_SEL)
#define RG_DSI_BG_R1_TRIM GENMASK(27, 24)
#define RG_DSI_BG_R2_TRIM GENMASK(31, 28)
#define MIPITX_DSI_PLL_CON0 0x50
#define RG_DSI_MPPLL_PLL_EN BIT(0)
#define RG_DSI_MPPLL_PREDIV GENMASK(2, 1)
#define RG_DSI_MPPLL_TXDIV0 GENMASK(4, 3)
#define RG_DSI_MPPLL_TXDIV1 GENMASK(6, 5)
#define RG_DSI_MPPLL_POSDIV GENMASK(9, 7)
#define RG_DSI_MPPLL_DIV_MSK \
(RG_DSI_MPPLL_PREDIV | RG_DSI_MPPLL_TXDIV0 | \
RG_DSI_MPPLL_TXDIV1 | RG_DSI_MPPLL_POSDIV)
#define RG_DSI_MPPLL_MONVC_EN BIT(10)
#define RG_DSI_MPPLL_MONREF_EN BIT(11)
#define RG_DSI_MPPLL_VOD_EN BIT(12)
#define MIPITX_DSI_PLL_CON1 0x54
#define RG_DSI_MPPLL_SDM_FRA_EN BIT(0)
#define RG_DSI_MPPLL_SDM_SSC_PH_INIT BIT(1)
#define RG_DSI_MPPLL_SDM_SSC_EN BIT(2)
#define RG_DSI_MPPLL_SDM_SSC_PRD GENMASK(31, 16)
#define MIPITX_DSI_PLL_CON2 0x58
#define MIPITX_DSI_PLL_TOP 0x64
#define RG_DSI_MPPLL_PRESERVE GENMASK(15, 8)
#define MIPITX_DSI_PLL_PWR 0x68
#define RG_DSI_MPPLL_SDM_PWR_ON BIT(0)
#define RG_DSI_MPPLL_SDM_ISO_EN BIT(1)
#define RG_DSI_MPPLL_SDM_PWR_ACK BIT(8)
#define MIPITX_DSI_SW_CTRL 0x80
#define SW_CTRL_EN BIT(0)
#define MIPITX_DSI_SW_CTRL_CON0 0x84
#define SW_LNTC_LPTX_PRE_OE BIT(0)
#define SW_LNTC_LPTX_OE BIT(1)
#define SW_LNTC_LPTX_P BIT(2)
#define SW_LNTC_LPTX_N BIT(3)
#define SW_LNTC_HSTX_PRE_OE BIT(4)
#define SW_LNTC_HSTX_OE BIT(5)
#define SW_LNTC_HSTX_ZEROCLK BIT(6)
#define SW_LNT0_LPTX_PRE_OE BIT(7)
#define SW_LNT0_LPTX_OE BIT(8)
#define SW_LNT0_LPTX_P BIT(9)
#define SW_LNT0_LPTX_N BIT(10)
#define SW_LNT0_HSTX_PRE_OE BIT(11)
#define SW_LNT0_HSTX_OE BIT(12)
#define SW_LNT0_LPRX_EN BIT(13)
#define SW_LNT1_LPTX_PRE_OE BIT(14)
#define SW_LNT1_LPTX_OE BIT(15)
#define SW_LNT1_LPTX_P BIT(16)
#define SW_LNT1_LPTX_N BIT(17)
#define SW_LNT1_HSTX_PRE_OE BIT(18)
#define SW_LNT1_HSTX_OE BIT(19)
#define SW_LNT2_LPTX_PRE_OE BIT(20)
#define SW_LNT2_LPTX_OE BIT(21)
#define SW_LNT2_LPTX_P BIT(22)
#define SW_LNT2_LPTX_N BIT(23)
#define SW_LNT2_HSTX_PRE_OE BIT(24)
#define SW_LNT2_HSTX_OE BIT(25)
static int mtk_mipi_tx_pll_prepare(struct clk_hw *hw)
{
struct mtk_mipi_tx *mipi_tx = mtk_mipi_tx_from_clk_hw(hw);
void __iomem *base = mipi_tx->regs;
u8 txdiv, txdiv0, txdiv1;
u64 pcw;
dev_dbg(mipi_tx->dev, "prepare: %u Hz\n", mipi_tx->data_rate);
if (mipi_tx->data_rate >= 500000000) {
txdiv = 1;
txdiv0 = 0;
txdiv1 = 0;
} else if (mipi_tx->data_rate >= 250000000) {
txdiv = 2;
txdiv0 = 1;
txdiv1 = 0;
} else if (mipi_tx->data_rate >= 125000000) {
txdiv = 4;
txdiv0 = 2;
txdiv1 = 0;
} else if (mipi_tx->data_rate > 62000000) {
txdiv = 8;
txdiv0 = 2;
txdiv1 = 1;
} else if (mipi_tx->data_rate >= 50000000) {
txdiv = 16;
txdiv0 = 2;
txdiv1 = 2;
} else {
return -EINVAL;
}
mtk_phy_update_bits(base + MIPITX_DSI_BG_CON,
RG_DSI_VOUT_MSK | RG_DSI_BG_CKEN |
RG_DSI_BG_CORE_EN,
FIELD_PREP(RG_DSI_V02_SEL, 4) |
FIELD_PREP(RG_DSI_V032_SEL, 4) |
FIELD_PREP(RG_DSI_V04_SEL, 4) |
FIELD_PREP(RG_DSI_V072_SEL, 4) |
FIELD_PREP(RG_DSI_V10_SEL, 4) |
FIELD_PREP(RG_DSI_V12_SEL, 4) |
RG_DSI_BG_CKEN | RG_DSI_BG_CORE_EN);
usleep_range(30, 100);
mtk_phy_update_bits(base + MIPITX_DSI_TOP_CON,
RG_DSI_LNT_IMP_CAL_CODE | RG_DSI_LNT_HS_BIAS_EN,
FIELD_PREP(RG_DSI_LNT_IMP_CAL_CODE, 8) |
RG_DSI_LNT_HS_BIAS_EN);
mtk_phy_set_bits(base + MIPITX_DSI_CON,
RG_DSI_CKG_LDOOUT_EN | RG_DSI_LDOCORE_EN);
mtk_phy_update_bits(base + MIPITX_DSI_PLL_PWR,
RG_DSI_MPPLL_SDM_PWR_ON | RG_DSI_MPPLL_SDM_ISO_EN,
RG_DSI_MPPLL_SDM_PWR_ON);
mtk_phy_clear_bits(base + MIPITX_DSI_PLL_CON0, RG_DSI_MPPLL_PLL_EN);
mtk_phy_update_bits(base + MIPITX_DSI_PLL_CON0,
RG_DSI_MPPLL_TXDIV0 | RG_DSI_MPPLL_TXDIV1 |
RG_DSI_MPPLL_PREDIV,
FIELD_PREP(RG_DSI_MPPLL_TXDIV0, txdiv0) |
FIELD_PREP(RG_DSI_MPPLL_TXDIV1, txdiv1));
/*
* PLL PCW config
* PCW bit 24~30 = integer part of pcw
* PCW bit 0~23 = fractional part of pcw
* pcw = data_Rate*4*txdiv/(Ref_clk*2);
* Post DIV =4, so need data_Rate*4
* Ref_clk is 26MHz
*/
pcw = div_u64(((u64)mipi_tx->data_rate * 2 * txdiv) << 24, 26000000);
writel(pcw, base + MIPITX_DSI_PLL_CON2);
mtk_phy_set_bits(base + MIPITX_DSI_PLL_CON1, RG_DSI_MPPLL_SDM_FRA_EN);
mtk_phy_set_bits(base + MIPITX_DSI_PLL_CON0, RG_DSI_MPPLL_PLL_EN);
usleep_range(20, 100);
mtk_phy_clear_bits(base + MIPITX_DSI_PLL_CON1, RG_DSI_MPPLL_SDM_SSC_EN);
mtk_phy_update_field(base + MIPITX_DSI_PLL_TOP,
RG_DSI_MPPLL_PRESERVE,
mipi_tx->driver_data->mppll_preserve);
return 0;
}
static void mtk_mipi_tx_pll_unprepare(struct clk_hw *hw)
{
struct mtk_mipi_tx *mipi_tx = mtk_mipi_tx_from_clk_hw(hw);
void __iomem *base = mipi_tx->regs;
dev_dbg(mipi_tx->dev, "unprepare\n");
mtk_phy_clear_bits(base + MIPITX_DSI_PLL_CON0, RG_DSI_MPPLL_PLL_EN);
mtk_phy_clear_bits(base + MIPITX_DSI_PLL_TOP, RG_DSI_MPPLL_PRESERVE);
mtk_phy_update_bits(base + MIPITX_DSI_PLL_PWR,
RG_DSI_MPPLL_SDM_ISO_EN | RG_DSI_MPPLL_SDM_PWR_ON,
RG_DSI_MPPLL_SDM_ISO_EN);
mtk_phy_clear_bits(base + MIPITX_DSI_TOP_CON, RG_DSI_LNT_HS_BIAS_EN);
mtk_phy_clear_bits(base + MIPITX_DSI_CON,
RG_DSI_CKG_LDOOUT_EN | RG_DSI_LDOCORE_EN);
mtk_phy_clear_bits(base + MIPITX_DSI_BG_CON,
RG_DSI_BG_CKEN | RG_DSI_BG_CORE_EN);
mtk_phy_clear_bits(base + MIPITX_DSI_PLL_CON0, RG_DSI_MPPLL_DIV_MSK);
}
static long mtk_mipi_tx_pll_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *prate)
{
return clamp_val(rate, 50000000, 1250000000);
}
static const struct clk_ops mtk_mipi_tx_pll_ops = {
.prepare = mtk_mipi_tx_pll_prepare,
.unprepare = mtk_mipi_tx_pll_unprepare,
.round_rate = mtk_mipi_tx_pll_round_rate,
.set_rate = mtk_mipi_tx_pll_set_rate,
.recalc_rate = mtk_mipi_tx_pll_recalc_rate,
};
static void mtk_mipi_tx_power_on_signal(struct phy *phy)
{
struct mtk_mipi_tx *mipi_tx = phy_get_drvdata(phy);
u32 reg;
for (reg = MIPITX_DSI_CLOCK_LANE;
reg <= MIPITX_DSI_DATA_LANE3; reg += 4)
mtk_phy_set_bits(mipi_tx->regs + reg, RG_DSI_LNTx_LDOOUT_EN);
mtk_phy_clear_bits(mipi_tx->regs + MIPITX_DSI_TOP_CON,
RG_DSI_PAD_TIE_LOW_EN);
}
static void mtk_mipi_tx_power_off_signal(struct phy *phy)
{
struct mtk_mipi_tx *mipi_tx = phy_get_drvdata(phy);
u32 reg;
mtk_phy_set_bits(mipi_tx->regs + MIPITX_DSI_TOP_CON,
RG_DSI_PAD_TIE_LOW_EN);
for (reg = MIPITX_DSI_CLOCK_LANE;
reg <= MIPITX_DSI_DATA_LANE3; reg += 4)
mtk_phy_clear_bits(mipi_tx->regs + reg, RG_DSI_LNTx_LDOOUT_EN);
}
const struct mtk_mipitx_data mt2701_mipitx_data = {
.mppll_preserve = 3,
.mipi_tx_clk_ops = &mtk_mipi_tx_pll_ops,
.mipi_tx_enable_signal = mtk_mipi_tx_power_on_signal,
.mipi_tx_disable_signal = mtk_mipi_tx_power_off_signal,
};
const struct mtk_mipitx_data mt8173_mipitx_data = {
.mppll_preserve = 0,
.mipi_tx_clk_ops = &mtk_mipi_tx_pll_ops,
.mipi_tx_enable_signal = mtk_mipi_tx_power_on_signal,
.mipi_tx_disable_signal = mtk_mipi_tx_power_off_signal,
};
| linux-master | drivers/phy/mediatek/phy-mtk-mipi-dsi-mt8173.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2022 MediaTek Inc.
* Copyright (c) 2022 BayLibre, SAS
*/
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/types.h>
#include <linux/units.h>
#include <linux/nvmem-consumer.h>
#include "phy-mtk-io.h"
#include "phy-mtk-hdmi.h"
#include "phy-mtk-hdmi-mt8195.h"
static void mtk_hdmi_ana_fifo_en(struct mtk_hdmi_phy *hdmi_phy)
{
/* make data fifo writable for hdmi2.0 */
mtk_phy_set_bits(hdmi_phy->regs + HDMI_ANA_CTL, REG_ANA_HDMI20_FIFO_EN);
}
static void
mtk_phy_tmds_clk_ratio(struct mtk_hdmi_phy *hdmi_phy, bool enable)
{
void __iomem *regs = hdmi_phy->regs;
mtk_hdmi_ana_fifo_en(hdmi_phy);
/* HDMI 2.0 specification, 3.4Gbps <= TMDS Bit Rate <= 6G,
* clock bit ratio 1:40, under 3.4Gbps, clock bit ratio 1:10
*/
if (enable)
mtk_phy_update_field(regs + HDMI20_CLK_CFG, REG_TXC_DIV, 3);
else
mtk_phy_clear_bits(regs + HDMI20_CLK_CFG, REG_TXC_DIV);
}
static void mtk_hdmi_pll_sel_src(struct clk_hw *hw)
{
struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
void __iomem *regs = hdmi_phy->regs;
mtk_phy_clear_bits(regs + HDMI_CTL_3, REG_HDMITX_REF_XTAL_SEL);
mtk_phy_clear_bits(regs + HDMI_CTL_3, REG_HDMITX_REF_RESPLL_SEL);
/* DA_HDMITX21_REF_CK for TXPLL input source */
mtk_phy_clear_bits(regs + HDMI_1_CFG_10, RG_HDMITXPLL_REF_CK_SEL);
}
static void mtk_hdmi_pll_perf(struct clk_hw *hw)
{
struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
void __iomem *regs = hdmi_phy->regs;
mtk_phy_set_bits(regs + HDMI_1_PLL_CFG_0, RG_HDMITXPLL_BP2);
mtk_phy_set_bits(regs + HDMI_1_PLL_CFG_2, RG_HDMITXPLL_BC);
mtk_phy_update_field(regs + HDMI_1_PLL_CFG_2, RG_HDMITXPLL_IC, 0x1);
mtk_phy_update_field(regs + HDMI_1_PLL_CFG_2, RG_HDMITXPLL_BR, 0x2);
mtk_phy_update_field(regs + HDMI_1_PLL_CFG_2, RG_HDMITXPLL_IR, 0x2);
mtk_phy_set_bits(regs + HDMI_1_PLL_CFG_2, RG_HDMITXPLL_BP);
mtk_phy_clear_bits(regs + HDMI_1_PLL_CFG_0, RG_HDMITXPLL_IBAND_FIX_EN);
mtk_phy_clear_bits(regs + HDMI_1_PLL_CFG_1, RG_HDMITXPLL_RESERVE_BIT14);
mtk_phy_clear_bits(regs + HDMI_1_PLL_CFG_2, RG_HDMITXPLL_HIKVCO);
mtk_phy_update_field(regs + HDMI_1_PLL_CFG_0, RG_HDMITXPLL_HREN, 0x1);
mtk_phy_update_field(regs + HDMI_1_PLL_CFG_0, RG_HDMITXPLL_LVR_SEL, 0x1);
mtk_phy_set_bits(regs + HDMI_1_PLL_CFG_1, RG_HDMITXPLL_RESERVE_BIT12_11);
mtk_phy_set_bits(regs + HDMI_1_PLL_CFG_0, RG_HDMITXPLL_TCL_EN);
}
static int mtk_hdmi_pll_set_hw(struct clk_hw *hw, u8 prediv,
u8 fbkdiv_high,
u32 fbkdiv_low,
u8 fbkdiv_hs3, u8 posdiv1,
u8 posdiv2, u8 txprediv,
u8 txposdiv,
u8 digital_div)
{
u8 txposdiv_value;
u8 div3_ctrl_value;
u8 posdiv_vallue;
u8 div_ctrl_value;
u8 reserve_3_2_value;
u8 prediv_value;
u8 reserve13_value;
struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
void __iomem *regs = hdmi_phy->regs;
mtk_hdmi_pll_sel_src(hw);
mtk_hdmi_pll_perf(hw);
mtk_phy_update_field(regs + HDMI_1_CFG_10, RG_HDMITX21_BIAS_PE_BG_VREF_SEL, 0x2);
mtk_phy_clear_bits(regs + HDMI_1_CFG_10, RG_HDMITX21_VREF_SEL);
mtk_phy_update_field(regs + HDMI_1_CFG_9, RG_HDMITX21_SLDO_VREF_SEL, 0x2);
mtk_phy_clear_bits(regs + HDMI_1_CFG_10, RG_HDMITX21_BIAS_PE_VREF_SELB);
mtk_phy_set_bits(regs + HDMI_1_CFG_3, RG_HDMITX21_SLDOLPF_EN);
mtk_phy_update_field(regs + HDMI_1_CFG_6, RG_HDMITX21_INTR_CAL, 0x11);
mtk_phy_set_bits(regs + HDMI_1_PLL_CFG_2, RG_HDMITXPLL_PWD);
/* TXPOSDIV */
txposdiv_value = ilog2(txposdiv);
mtk_phy_update_field(regs + HDMI_1_CFG_6, RG_HDMITX21_TX_POSDIV, txposdiv_value);
mtk_phy_set_bits(regs + HDMI_1_CFG_6, RG_HDMITX21_TX_POSDIV_EN);
mtk_phy_clear_bits(regs + HDMI_1_CFG_6, RG_HDMITX21_FRL_EN);
/* TXPREDIV */
switch (txprediv) {
case 2:
div3_ctrl_value = 0x0;
posdiv_vallue = 0x0;
break;
case 4:
div3_ctrl_value = 0x0;
posdiv_vallue = 0x1;
break;
case 6:
div3_ctrl_value = 0x1;
posdiv_vallue = 0x0;
break;
case 12:
div3_ctrl_value = 0x1;
posdiv_vallue = 0x1;
break;
default:
return -EINVAL;
}
mtk_phy_update_field(regs + HDMI_1_PLL_CFG_4, RG_HDMITXPLL_POSDIV_DIV3_CTRL, div3_ctrl_value);
mtk_phy_update_field(regs + HDMI_1_PLL_CFG_4, RG_HDMITXPLL_POSDIV, posdiv_vallue);
/* POSDIV1 */
switch (posdiv1) {
case 5:
div_ctrl_value = 0x0;
break;
case 10:
div_ctrl_value = 0x1;
break;
case 12:
div_ctrl_value = 0x2;
break;
case 15:
div_ctrl_value = 0x3;
break;
default:
return -EINVAL;
}
mtk_phy_update_field(regs + HDMI_1_PLL_CFG_4, RG_HDMITXPLL_DIV_CTRL, div_ctrl_value);
/* DE add new setting */
mtk_phy_clear_bits(regs + HDMI_1_PLL_CFG_1, RG_HDMITXPLL_RESERVE_BIT14);
/* POSDIV2 */
switch (posdiv2) {
case 1:
reserve_3_2_value = 0x0;
break;
case 2:
reserve_3_2_value = 0x1;
break;
case 4:
reserve_3_2_value = 0x2;
break;
case 6:
reserve_3_2_value = 0x3;
break;
default:
return -EINVAL;
}
mtk_phy_update_field(regs + HDMI_1_PLL_CFG_1, RG_HDMITXPLL_RESERVE_BIT3_2, reserve_3_2_value);
/* DE add new setting */
mtk_phy_update_field(regs + HDMI_1_PLL_CFG_1, RG_HDMITXPLL_RESERVE_BIT1_0, 0x2);
/* PREDIV */
prediv_value = ilog2(prediv);
mtk_phy_update_field(regs + HDMI_1_PLL_CFG_4, RG_HDMITXPLL_PREDIV, prediv_value);
/* FBKDIV_HS3 */
reserve13_value = ilog2(fbkdiv_hs3);
mtk_phy_update_field(regs + HDMI_1_PLL_CFG_1, RG_HDMITXPLL_RESERVE_BIT13, reserve13_value);
/* FBDIV */
mtk_phy_update_field(regs + HDMI_1_PLL_CFG_4, RG_HDMITXPLL_FBKDIV_HIGH, fbkdiv_high);
mtk_phy_update_field(regs + HDMI_1_PLL_CFG_3, RG_HDMITXPLL_FBKDIV_LOW, fbkdiv_low);
/* Digital DIVIDER */
mtk_phy_clear_bits(regs + HDMI_CTL_3, REG_PIXEL_CLOCK_SEL);
if (digital_div == 1) {
mtk_phy_clear_bits(regs + HDMI_CTL_3, REG_HDMITX_PIXEL_CLOCK);
} else {
mtk_phy_set_bits(regs + HDMI_CTL_3, REG_HDMITX_PIXEL_CLOCK);
mtk_phy_update_field(regs + HDMI_CTL_3, REG_HDMITXPLL_DIV, digital_div - 1);
}
return 0;
}
static int mtk_hdmi_pll_calc(struct mtk_hdmi_phy *hdmi_phy, struct clk_hw *hw,
unsigned long rate, unsigned long parent_rate)
{
u8 digital_div, txprediv, txposdiv, fbkdiv_high, posdiv1, posdiv2;
u64 tmds_clk, pixel_clk, da_hdmitx21_ref_ck, ns_hdmipll_ck, pcw;
u8 txpredivs[4] = { 2, 4, 6, 12 };
u32 fbkdiv_low;
int i;
pixel_clk = rate;
tmds_clk = pixel_clk;
if (tmds_clk < 25 * MEGA || tmds_clk > 594 * MEGA)
return -EINVAL;
if (tmds_clk >= 340 * MEGA)
hdmi_phy->tmds_over_340M = true;
else
hdmi_phy->tmds_over_340M = false;
/* in Hz */
da_hdmitx21_ref_ck = 26 * MEGA;
/* TXPOSDIV stage treatment:
* 0M < TMDS clk < 54M /8
* 54M <= TMDS clk < 148.35M /4
* 148.35M <=TMDS clk < 296.7M /2
* 296.7 <=TMDS clk <= 594M /1
*/
if (tmds_clk < 54 * MEGA)
txposdiv = 8;
else if (tmds_clk >= 54 * MEGA && (tmds_clk * 100) < 14835 * MEGA)
txposdiv = 4;
else if ((tmds_clk * 100) >= 14835 * MEGA && (tmds_clk * 10) < 2967 * MEGA)
txposdiv = 2;
else if ((tmds_clk * 10) >= 2967 * MEGA && tmds_clk <= 594 * MEGA)
txposdiv = 1;
else
return -EINVAL;
/* calculate txprediv: can be 2, 4, 6, 12
* ICO clk = 5*TMDS_CLK*TXPOSDIV*TXPREDIV
* ICO clk constraint: 5G =< ICO clk <= 12G
*/
for (i = 0; i < ARRAY_SIZE(txpredivs); i++) {
ns_hdmipll_ck = 5 * tmds_clk * txposdiv * txpredivs[i];
if (ns_hdmipll_ck >= 5 * GIGA &&
ns_hdmipll_ck <= 12 * GIGA)
break;
}
if (i == (ARRAY_SIZE(txpredivs) - 1) &&
(ns_hdmipll_ck < 5 * GIGA || ns_hdmipll_ck > 12 * GIGA)) {
return -EINVAL;
}
if (i == ARRAY_SIZE(txpredivs))
return -EINVAL;
txprediv = txpredivs[i];
/* PCW calculation: FBKDIV
* formula: pcw=(frequency_out*2^pcw_bit) / frequency_in / FBKDIV_HS3;
* RG_HDMITXPLL_FBKDIV[32:0]:
* [32,24] 9bit integer, [23,0]:24bit fraction
*/
pcw = div_u64(((u64)ns_hdmipll_ck) << PCW_DECIMAL_WIDTH,
da_hdmitx21_ref_ck * PLL_FBKDIV_HS3);
if (pcw > GENMASK_ULL(32, 0))
return -EINVAL;
fbkdiv_high = FIELD_GET(GENMASK_ULL(63, 32), pcw);
fbkdiv_low = FIELD_GET(GENMASK(31, 0), pcw);
/* posdiv1:
* posdiv1 stage treatment according to color_depth:
* 24bit -> posdiv1 /10, 30bit -> posdiv1 /12.5,
* 36bit -> posdiv1 /15, 48bit -> posdiv1 /10
*/
posdiv1 = 10;
posdiv2 = 1;
/* Digital clk divider, max /32 */
digital_div = div_u64(ns_hdmipll_ck, posdiv1 * posdiv2 * pixel_clk);
if (!(digital_div <= 32 && digital_div >= 1))
return -EINVAL;
return mtk_hdmi_pll_set_hw(hw, PLL_PREDIV, fbkdiv_high, fbkdiv_low,
PLL_FBKDIV_HS3, posdiv1, posdiv2, txprediv,
txposdiv, digital_div);
}
static int mtk_hdmi_pll_drv_setting(struct clk_hw *hw)
{
struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
void __iomem *regs = hdmi_phy->regs;
u8 data_channel_bias, clk_channel_bias;
u8 impedance, impedance_en;
u32 tmds_clk;
u32 pixel_clk = hdmi_phy->pll_rate;
tmds_clk = pixel_clk;
/* bias & impedance setting:
* 3G < data rate <= 6G: enable impedance 100ohm,
* data channel bias 24mA, clock channel bias 20mA
* pixel clk >= HD, 74.175MHZ <= pixel clk <= 300MHZ:
* enalbe impedance 100ohm
* data channel 20mA, clock channel 16mA
* 27M =< pixel clk < 74.175: disable impedance
* data channel & clock channel bias 10mA
*/
/* 3G < data rate <= 6G, 300M < tmds rate <= 594M */
if (tmds_clk > 300 * MEGA && tmds_clk <= 594 * MEGA) {
data_channel_bias = 0x3c; /* 24mA */
clk_channel_bias = 0x34; /* 20mA */
impedance_en = 0xf;
impedance = 0x36; /* 100ohm */
} else if (((u64)pixel_clk * 1000) >= 74175 * MEGA && pixel_clk <= 300 * MEGA) {
data_channel_bias = 0x34; /* 20mA */
clk_channel_bias = 0x2c; /* 16mA */
impedance_en = 0xf;
impedance = 0x36; /* 100ohm */
} else if (pixel_clk >= 27 * MEGA && ((u64)pixel_clk * 1000) < 74175 * MEGA) {
data_channel_bias = 0x14; /* 10mA */
clk_channel_bias = 0x14; /* 10mA */
impedance_en = 0x0;
impedance = 0x0;
} else {
return -EINVAL;
}
/* bias */
mtk_phy_update_field(regs + HDMI_1_CFG_1, RG_HDMITX21_DRV_IBIAS_D0, data_channel_bias);
mtk_phy_update_field(regs + HDMI_1_CFG_1, RG_HDMITX21_DRV_IBIAS_D1, data_channel_bias);
mtk_phy_update_field(regs + HDMI_1_CFG_1, RG_HDMITX21_DRV_IBIAS_D2, data_channel_bias);
mtk_phy_update_field(regs + HDMI_1_CFG_0, RG_HDMITX21_DRV_IBIAS_CLK, clk_channel_bias);
/* impedance */
mtk_phy_update_field(regs + HDMI_1_CFG_0, RG_HDMITX21_DRV_IMP_EN, impedance_en);
mtk_phy_update_field(regs + HDMI_1_CFG_2, RG_HDMITX21_DRV_IMP_D0_EN1, impedance);
mtk_phy_update_field(regs + HDMI_1_CFG_2, RG_HDMITX21_DRV_IMP_D1_EN1, impedance);
mtk_phy_update_field(regs + HDMI_1_CFG_2, RG_HDMITX21_DRV_IMP_D2_EN1, impedance);
mtk_phy_update_field(regs + HDMI_1_CFG_2, RG_HDMITX21_DRV_IMP_CLK_EN1, impedance);
return 0;
}
static int mtk_hdmi_pll_prepare(struct clk_hw *hw)
{
struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
void __iomem *regs = hdmi_phy->regs;
mtk_phy_set_bits(regs + HDMI_1_CFG_6, RG_HDMITX21_TX_POSDIV_EN);
mtk_phy_set_bits(regs + HDMI_1_CFG_0, RG_HDMITX21_SER_EN);
mtk_phy_set_bits(regs + HDMI_1_CFG_6, RG_HDMITX21_D0_DRV_OP_EN);
mtk_phy_set_bits(regs + HDMI_1_CFG_6, RG_HDMITX21_D1_DRV_OP_EN);
mtk_phy_set_bits(regs + HDMI_1_CFG_6, RG_HDMITX21_D2_DRV_OP_EN);
mtk_phy_set_bits(regs + HDMI_1_CFG_6, RG_HDMITX21_CK_DRV_OP_EN);
mtk_phy_clear_bits(regs + HDMI_1_CFG_6, RG_HDMITX21_FRL_D0_EN);
mtk_phy_clear_bits(regs + HDMI_1_CFG_6, RG_HDMITX21_FRL_D1_EN);
mtk_phy_clear_bits(regs + HDMI_1_CFG_6, RG_HDMITX21_FRL_D2_EN);
mtk_phy_clear_bits(regs + HDMI_1_CFG_6, RG_HDMITX21_FRL_CK_EN);
mtk_hdmi_pll_drv_setting(hw);
mtk_phy_clear_bits(regs + HDMI_1_CFG_10, RG_HDMITX21_BG_PWD);
mtk_phy_set_bits(regs + HDMI_1_CFG_6, RG_HDMITX21_BIAS_EN);
mtk_phy_set_bits(regs + HDMI_1_CFG_3, RG_HDMITX21_CKLDO_EN);
mtk_phy_set_bits(regs + HDMI_1_CFG_3, RG_HDMITX21_SLDO_EN);
mtk_phy_set_bits(regs + HDMI_1_PLL_CFG_4, DA_HDMITXPLL_PWR_ON);
usleep_range(5, 10);
mtk_phy_clear_bits(regs + HDMI_1_PLL_CFG_4, DA_HDMITXPLL_ISO_EN);
usleep_range(5, 10);
mtk_phy_clear_bits(regs + HDMI_1_PLL_CFG_2, RG_HDMITXPLL_PWD);
usleep_range(30, 50);
return 0;
}
static void mtk_hdmi_pll_unprepare(struct clk_hw *hw)
{
struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
void __iomem *regs = hdmi_phy->regs;
mtk_phy_set_bits(regs + HDMI_1_CFG_10, RG_HDMITX21_BG_PWD);
mtk_phy_clear_bits(regs + HDMI_1_CFG_6, RG_HDMITX21_BIAS_EN);
mtk_phy_clear_bits(regs + HDMI_1_CFG_3, RG_HDMITX21_CKLDO_EN);
mtk_phy_clear_bits(regs + HDMI_1_CFG_3, RG_HDMITX21_SLDO_EN);
mtk_phy_set_bits(regs + HDMI_1_PLL_CFG_2, RG_HDMITXPLL_PWD);
usleep_range(10, 20);
mtk_phy_set_bits(regs + HDMI_1_PLL_CFG_4, DA_HDMITXPLL_ISO_EN);
usleep_range(10, 20);
mtk_phy_clear_bits(regs + HDMI_1_PLL_CFG_4, DA_HDMITXPLL_PWR_ON);
}
static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
dev_dbg(hdmi_phy->dev, "%s: %lu Hz, parent: %lu Hz\n", __func__, rate,
parent_rate);
return mtk_hdmi_pll_calc(hdmi_phy, hw, rate, parent_rate);
}
static long mtk_hdmi_pll_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *parent_rate)
{
struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
hdmi_phy->pll_rate = rate;
return rate;
}
static unsigned long mtk_hdmi_pll_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
return hdmi_phy->pll_rate;
}
static const struct clk_ops mtk_hdmi_pll_ops = {
.prepare = mtk_hdmi_pll_prepare,
.unprepare = mtk_hdmi_pll_unprepare,
.set_rate = mtk_hdmi_pll_set_rate,
.round_rate = mtk_hdmi_pll_round_rate,
.recalc_rate = mtk_hdmi_pll_recalc_rate,
};
static void vtx_signal_en(struct mtk_hdmi_phy *hdmi_phy, bool on)
{
void __iomem *regs = hdmi_phy->regs;
if (on)
mtk_phy_set_bits(regs + HDMI_1_CFG_0, RG_HDMITX21_DRV_EN);
else
mtk_phy_clear_bits(regs + HDMI_1_CFG_0, RG_HDMITX21_DRV_EN);
}
static void mtk_hdmi_phy_enable_tmds(struct mtk_hdmi_phy *hdmi_phy)
{
vtx_signal_en(hdmi_phy, true);
usleep_range(100, 150);
}
static void mtk_hdmi_phy_disable_tmds(struct mtk_hdmi_phy *hdmi_phy)
{
vtx_signal_en(hdmi_phy, false);
}
static int mtk_hdmi_phy_configure(struct phy *phy, union phy_configure_opts *opts)
{
struct phy_configure_opts_dp *dp_opts = &opts->dp;
struct mtk_hdmi_phy *hdmi_phy = phy_get_drvdata(phy);
int ret;
ret = clk_set_rate(hdmi_phy->pll, dp_opts->link_rate);
if (ret)
return ret;
mtk_phy_tmds_clk_ratio(hdmi_phy, hdmi_phy->tmds_over_340M);
return ret;
}
struct mtk_hdmi_phy_conf mtk_hdmi_phy_8195_conf = {
.flags = CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE,
.hdmi_phy_clk_ops = &mtk_hdmi_pll_ops,
.hdmi_phy_enable_tmds = mtk_hdmi_phy_enable_tmds,
.hdmi_phy_disable_tmds = mtk_hdmi_phy_disable_tmds,
.hdmi_phy_configure = mtk_hdmi_phy_configure,
};
MODULE_AUTHOR("Can Zeng <[email protected]>");
MODULE_DESCRIPTION("MediaTek MT8195 HDMI PHY Driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/phy/mediatek/phy-mtk-hdmi-mt8195.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2015 MediaTek Inc.
*/
#include "phy-mtk-mipi-dsi.h"
inline struct mtk_mipi_tx *mtk_mipi_tx_from_clk_hw(struct clk_hw *hw)
{
return container_of(hw, struct mtk_mipi_tx, pll_hw);
}
int mtk_mipi_tx_pll_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct mtk_mipi_tx *mipi_tx = mtk_mipi_tx_from_clk_hw(hw);
dev_dbg(mipi_tx->dev, "set rate: %lu Hz\n", rate);
mipi_tx->data_rate = rate;
return 0;
}
unsigned long mtk_mipi_tx_pll_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct mtk_mipi_tx *mipi_tx = mtk_mipi_tx_from_clk_hw(hw);
return mipi_tx->data_rate;
}
static int mtk_mipi_tx_power_on(struct phy *phy)
{
struct mtk_mipi_tx *mipi_tx = phy_get_drvdata(phy);
int ret;
/* Power up core and enable PLL */
ret = clk_prepare_enable(mipi_tx->pll_hw.clk);
if (ret < 0)
return ret;
/* Enable DSI Lane LDO outputs, disable pad tie low */
mipi_tx->driver_data->mipi_tx_enable_signal(phy);
return 0;
}
static int mtk_mipi_tx_power_off(struct phy *phy)
{
struct mtk_mipi_tx *mipi_tx = phy_get_drvdata(phy);
/* Enable pad tie low, disable DSI Lane LDO outputs */
mipi_tx->driver_data->mipi_tx_disable_signal(phy);
/* Disable PLL and power down core */
clk_disable_unprepare(mipi_tx->pll_hw.clk);
return 0;
}
static const struct phy_ops mtk_mipi_tx_ops = {
.power_on = mtk_mipi_tx_power_on,
.power_off = mtk_mipi_tx_power_off,
.owner = THIS_MODULE,
};
static void mtk_mipi_tx_get_calibration_datal(struct mtk_mipi_tx *mipi_tx)
{
struct nvmem_cell *cell;
size_t len;
u32 *buf;
cell = nvmem_cell_get(mipi_tx->dev, "calibration-data");
if (IS_ERR(cell)) {
dev_info(mipi_tx->dev, "can't get nvmem_cell_get, ignore it\n");
return;
}
buf = (u32 *)nvmem_cell_read(cell, &len);
nvmem_cell_put(cell);
if (IS_ERR(buf)) {
dev_info(mipi_tx->dev, "can't get data, ignore it\n");
return;
}
if (len < 3 * sizeof(u32)) {
dev_info(mipi_tx->dev, "invalid calibration data\n");
kfree(buf);
return;
}
mipi_tx->rt_code[0] = ((buf[0] >> 6 & 0x1f) << 5) |
(buf[0] >> 11 & 0x1f);
mipi_tx->rt_code[1] = ((buf[1] >> 27 & 0x1f) << 5) |
(buf[0] >> 1 & 0x1f);
mipi_tx->rt_code[2] = ((buf[1] >> 17 & 0x1f) << 5) |
(buf[1] >> 22 & 0x1f);
mipi_tx->rt_code[3] = ((buf[1] >> 7 & 0x1f) << 5) |
(buf[1] >> 12 & 0x1f);
mipi_tx->rt_code[4] = ((buf[2] >> 27 & 0x1f) << 5) |
(buf[1] >> 2 & 0x1f);
kfree(buf);
}
static int mtk_mipi_tx_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct mtk_mipi_tx *mipi_tx;
const char *ref_clk_name;
struct clk *ref_clk;
struct clk_init_data clk_init = {
.num_parents = 1,
.parent_names = (const char * const *)&ref_clk_name,
.flags = CLK_SET_RATE_GATE,
};
struct phy *phy;
struct phy_provider *phy_provider;
int ret;
mipi_tx = devm_kzalloc(dev, sizeof(*mipi_tx), GFP_KERNEL);
if (!mipi_tx)
return -ENOMEM;
mipi_tx->driver_data = of_device_get_match_data(dev);
if (!mipi_tx->driver_data)
return -ENODEV;
mipi_tx->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mipi_tx->regs))
return PTR_ERR(mipi_tx->regs);
ref_clk = devm_clk_get(dev, NULL);
if (IS_ERR(ref_clk))
return dev_err_probe(dev, PTR_ERR(ref_clk),
"Failed to get reference clock\n");
ret = of_property_read_u32(dev->of_node, "drive-strength-microamp",
&mipi_tx->mipitx_drive);
/* If can't get the "mipi_tx->mipitx_drive", set it default 0x8 */
if (ret < 0)
mipi_tx->mipitx_drive = 4600;
/* check the mipitx_drive valid */
if (mipi_tx->mipitx_drive > 6000 || mipi_tx->mipitx_drive < 3000) {
dev_warn(dev, "drive-strength-microamp is invalid %d, not in 3000 ~ 6000\n",
mipi_tx->mipitx_drive);
mipi_tx->mipitx_drive = clamp_val(mipi_tx->mipitx_drive, 3000,
6000);
}
ref_clk_name = __clk_get_name(ref_clk);
ret = of_property_read_string(dev->of_node, "clock-output-names",
&clk_init.name);
if (ret < 0)
return dev_err_probe(dev, ret, "Failed to read clock-output-names\n");
clk_init.ops = mipi_tx->driver_data->mipi_tx_clk_ops;
mipi_tx->pll_hw.init = &clk_init;
ret = devm_clk_hw_register(dev, &mipi_tx->pll_hw);
if (ret)
return dev_err_probe(dev, ret, "Failed to register PLL\n");
phy = devm_phy_create(dev, NULL, &mtk_mipi_tx_ops);
if (IS_ERR(phy))
return dev_err_probe(dev, PTR_ERR(phy), "Failed to create MIPI D-PHY\n");
phy_set_drvdata(phy, mipi_tx);
phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
if (IS_ERR(phy_provider))
return PTR_ERR(phy_provider);
mipi_tx->dev = dev;
mtk_mipi_tx_get_calibration_datal(mipi_tx);
return devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, &mipi_tx->pll_hw);
}
static const struct of_device_id mtk_mipi_tx_match[] = {
{ .compatible = "mediatek,mt2701-mipi-tx", .data = &mt2701_mipitx_data },
{ .compatible = "mediatek,mt8173-mipi-tx", .data = &mt8173_mipitx_data },
{ .compatible = "mediatek,mt8183-mipi-tx", .data = &mt8183_mipitx_data },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, mtk_mipi_tx_match);
static struct platform_driver mtk_mipi_tx_driver = {
.probe = mtk_mipi_tx_probe,
.driver = {
.name = "mediatek-mipi-tx",
.of_match_table = mtk_mipi_tx_match,
},
};
module_platform_driver(mtk_mipi_tx_driver);
MODULE_DESCRIPTION("MediaTek MIPI TX Driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/phy/mediatek/phy-mtk-mipi-dsi.c |
// SPDX-License-Identifier: GPL-2.0
/*
* MediaTek USB3.1 gen2 xsphy Driver
*
* Copyright (c) 2018 MediaTek Inc.
* Author: Chunfeng Yun <[email protected]>
*
*/
#include <dt-bindings/phy/phy.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include "phy-mtk-io.h"
/* u2 phy banks */
#define SSUSB_SIFSLV_MISC 0x000
#define SSUSB_SIFSLV_U2FREQ 0x100
#define SSUSB_SIFSLV_U2PHY_COM 0x300
/* u3 phy shared banks */
#define SSPXTP_SIFSLV_DIG_GLB 0x000
#define SSPXTP_SIFSLV_PHYA_GLB 0x100
/* u3 phy banks */
#define SSPXTP_SIFSLV_DIG_LN_TOP 0x000
#define SSPXTP_SIFSLV_DIG_LN_TX0 0x100
#define SSPXTP_SIFSLV_DIG_LN_RX0 0x200
#define SSPXTP_SIFSLV_DIG_LN_DAIF 0x300
#define SSPXTP_SIFSLV_PHYA_LN 0x400
#define XSP_U2FREQ_FMCR0 ((SSUSB_SIFSLV_U2FREQ) + 0x00)
#define P2F_RG_FREQDET_EN BIT(24)
#define P2F_RG_CYCLECNT GENMASK(23, 0)
#define XSP_U2FREQ_MMONR0 ((SSUSB_SIFSLV_U2FREQ) + 0x0c)
#define XSP_U2FREQ_FMMONR1 ((SSUSB_SIFSLV_U2FREQ) + 0x10)
#define P2F_RG_FRCK_EN BIT(8)
#define P2F_USB_FM_VALID BIT(0)
#define XSP_USBPHYACR0 ((SSUSB_SIFSLV_U2PHY_COM) + 0x00)
#define P2A0_RG_INTR_EN BIT(5)
#define XSP_USBPHYACR1 ((SSUSB_SIFSLV_U2PHY_COM) + 0x04)
#define P2A1_RG_INTR_CAL GENMASK(23, 19)
#define P2A1_RG_VRT_SEL GENMASK(14, 12)
#define P2A1_RG_TERM_SEL GENMASK(10, 8)
#define XSP_USBPHYACR5 ((SSUSB_SIFSLV_U2PHY_COM) + 0x014)
#define P2A5_RG_HSTX_SRCAL_EN BIT(15)
#define P2A5_RG_HSTX_SRCTRL GENMASK(14, 12)
#define XSP_USBPHYACR6 ((SSUSB_SIFSLV_U2PHY_COM) + 0x018)
#define P2A6_RG_BC11_SW_EN BIT(23)
#define P2A6_RG_OTG_VBUSCMP_EN BIT(20)
#define XSP_U2PHYDTM1 ((SSUSB_SIFSLV_U2PHY_COM) + 0x06C)
#define P2D_FORCE_IDDIG BIT(9)
#define P2D_RG_VBUSVALID BIT(5)
#define P2D_RG_SESSEND BIT(4)
#define P2D_RG_AVALID BIT(2)
#define P2D_RG_IDDIG BIT(1)
#define SSPXTP_PHYA_GLB_00 ((SSPXTP_SIFSLV_PHYA_GLB) + 0x00)
#define RG_XTP_GLB_BIAS_INTR_CTRL GENMASK(21, 16)
#define SSPXTP_PHYA_LN_04 ((SSPXTP_SIFSLV_PHYA_LN) + 0x04)
#define RG_XTP_LN0_TX_IMPSEL GENMASK(4, 0)
#define SSPXTP_PHYA_LN_14 ((SSPXTP_SIFSLV_PHYA_LN) + 0x014)
#define RG_XTP_LN0_RX_IMPSEL GENMASK(4, 0)
#define XSP_REF_CLK 26 /* MHZ */
#define XSP_SLEW_RATE_COEF 17
#define XSP_SR_COEF_DIVISOR 1000
#define XSP_FM_DET_CYCLE_CNT 1024
struct xsphy_instance {
struct phy *phy;
void __iomem *port_base;
struct clk *ref_clk; /* reference clock of anolog phy */
u32 index;
u32 type;
/* only for HQA test */
int efuse_intr;
int efuse_tx_imp;
int efuse_rx_imp;
/* u2 eye diagram */
int eye_src;
int eye_vrt;
int eye_term;
};
struct mtk_xsphy {
struct device *dev;
void __iomem *glb_base; /* only shared u3 sif */
struct xsphy_instance **phys;
int nphys;
int src_ref_clk; /* MHZ, reference clock for slew rate calibrate */
int src_coef; /* coefficient for slew rate calibrate */
};
static void u2_phy_slew_rate_calibrate(struct mtk_xsphy *xsphy,
struct xsphy_instance *inst)
{
void __iomem *pbase = inst->port_base;
int calib_val;
int fm_out;
u32 tmp;
/* use force value */
if (inst->eye_src)
return;
/* enable USB ring oscillator */
mtk_phy_set_bits(pbase + XSP_USBPHYACR5, P2A5_RG_HSTX_SRCAL_EN);
udelay(1); /* wait clock stable */
/* enable free run clock */
mtk_phy_set_bits(pbase + XSP_U2FREQ_FMMONR1, P2F_RG_FRCK_EN);
/* set cycle count as 1024 */
mtk_phy_update_field(pbase + XSP_U2FREQ_FMCR0, P2F_RG_CYCLECNT,
XSP_FM_DET_CYCLE_CNT);
/* enable frequency meter */
mtk_phy_set_bits(pbase + XSP_U2FREQ_FMCR0, P2F_RG_FREQDET_EN);
/* ignore return value */
readl_poll_timeout(pbase + XSP_U2FREQ_FMMONR1, tmp,
(tmp & P2F_USB_FM_VALID), 10, 200);
fm_out = readl(pbase + XSP_U2FREQ_MMONR0);
/* disable frequency meter */
mtk_phy_clear_bits(pbase + XSP_U2FREQ_FMCR0, P2F_RG_FREQDET_EN);
/* disable free run clock */
mtk_phy_clear_bits(pbase + XSP_U2FREQ_FMMONR1, P2F_RG_FRCK_EN);
if (fm_out) {
/* (1024 / FM_OUT) x reference clock frequency x coefficient */
tmp = xsphy->src_ref_clk * xsphy->src_coef;
tmp = (tmp * XSP_FM_DET_CYCLE_CNT) / fm_out;
calib_val = DIV_ROUND_CLOSEST(tmp, XSP_SR_COEF_DIVISOR);
} else {
/* if FM detection fail, set default value */
calib_val = 3;
}
dev_dbg(xsphy->dev, "phy.%d, fm_out:%d, calib:%d (clk:%d, coef:%d)\n",
inst->index, fm_out, calib_val,
xsphy->src_ref_clk, xsphy->src_coef);
/* set HS slew rate */
mtk_phy_update_field(pbase + XSP_USBPHYACR5, P2A5_RG_HSTX_SRCTRL, calib_val);
/* disable USB ring oscillator */
mtk_phy_clear_bits(pbase + XSP_USBPHYACR5, P2A5_RG_HSTX_SRCAL_EN);
}
static void u2_phy_instance_init(struct mtk_xsphy *xsphy,
struct xsphy_instance *inst)
{
void __iomem *pbase = inst->port_base;
/* DP/DM BC1.1 path Disable */
mtk_phy_clear_bits(pbase + XSP_USBPHYACR6, P2A6_RG_BC11_SW_EN);
mtk_phy_set_bits(pbase + XSP_USBPHYACR0, P2A0_RG_INTR_EN);
}
static void u2_phy_instance_power_on(struct mtk_xsphy *xsphy,
struct xsphy_instance *inst)
{
void __iomem *pbase = inst->port_base;
u32 index = inst->index;
mtk_phy_set_bits(pbase + XSP_USBPHYACR6, P2A6_RG_OTG_VBUSCMP_EN);
mtk_phy_update_bits(pbase + XSP_U2PHYDTM1,
P2D_RG_VBUSVALID | P2D_RG_AVALID | P2D_RG_SESSEND,
P2D_RG_VBUSVALID | P2D_RG_AVALID);
dev_dbg(xsphy->dev, "%s(%d)\n", __func__, index);
}
static void u2_phy_instance_power_off(struct mtk_xsphy *xsphy,
struct xsphy_instance *inst)
{
void __iomem *pbase = inst->port_base;
u32 index = inst->index;
mtk_phy_clear_bits(pbase + XSP_USBPHYACR6, P2A6_RG_OTG_VBUSCMP_EN);
mtk_phy_update_bits(pbase + XSP_U2PHYDTM1,
P2D_RG_VBUSVALID | P2D_RG_AVALID | P2D_RG_SESSEND,
P2D_RG_SESSEND);
dev_dbg(xsphy->dev, "%s(%d)\n", __func__, index);
}
static void u2_phy_instance_set_mode(struct mtk_xsphy *xsphy,
struct xsphy_instance *inst,
enum phy_mode mode)
{
u32 tmp;
tmp = readl(inst->port_base + XSP_U2PHYDTM1);
switch (mode) {
case PHY_MODE_USB_DEVICE:
tmp |= P2D_FORCE_IDDIG | P2D_RG_IDDIG;
break;
case PHY_MODE_USB_HOST:
tmp |= P2D_FORCE_IDDIG;
tmp &= ~P2D_RG_IDDIG;
break;
case PHY_MODE_USB_OTG:
tmp &= ~(P2D_FORCE_IDDIG | P2D_RG_IDDIG);
break;
default:
return;
}
writel(tmp, inst->port_base + XSP_U2PHYDTM1);
}
static void phy_parse_property(struct mtk_xsphy *xsphy,
struct xsphy_instance *inst)
{
struct device *dev = &inst->phy->dev;
switch (inst->type) {
case PHY_TYPE_USB2:
device_property_read_u32(dev, "mediatek,efuse-intr",
&inst->efuse_intr);
device_property_read_u32(dev, "mediatek,eye-src",
&inst->eye_src);
device_property_read_u32(dev, "mediatek,eye-vrt",
&inst->eye_vrt);
device_property_read_u32(dev, "mediatek,eye-term",
&inst->eye_term);
dev_dbg(dev, "intr:%d, src:%d, vrt:%d, term:%d\n",
inst->efuse_intr, inst->eye_src,
inst->eye_vrt, inst->eye_term);
break;
case PHY_TYPE_USB3:
device_property_read_u32(dev, "mediatek,efuse-intr",
&inst->efuse_intr);
device_property_read_u32(dev, "mediatek,efuse-tx-imp",
&inst->efuse_tx_imp);
device_property_read_u32(dev, "mediatek,efuse-rx-imp",
&inst->efuse_rx_imp);
dev_dbg(dev, "intr:%d, tx-imp:%d, rx-imp:%d\n",
inst->efuse_intr, inst->efuse_tx_imp,
inst->efuse_rx_imp);
break;
default:
dev_err(xsphy->dev, "incompatible phy type\n");
return;
}
}
static void u2_phy_props_set(struct mtk_xsphy *xsphy,
struct xsphy_instance *inst)
{
void __iomem *pbase = inst->port_base;
if (inst->efuse_intr)
mtk_phy_update_field(pbase + XSP_USBPHYACR1, P2A1_RG_INTR_CAL,
inst->efuse_intr);
if (inst->eye_src)
mtk_phy_update_field(pbase + XSP_USBPHYACR5, P2A5_RG_HSTX_SRCTRL,
inst->eye_src);
if (inst->eye_vrt)
mtk_phy_update_field(pbase + XSP_USBPHYACR1, P2A1_RG_VRT_SEL,
inst->eye_vrt);
if (inst->eye_term)
mtk_phy_update_field(pbase + XSP_USBPHYACR1, P2A1_RG_TERM_SEL,
inst->eye_term);
}
static void u3_phy_props_set(struct mtk_xsphy *xsphy,
struct xsphy_instance *inst)
{
void __iomem *pbase = inst->port_base;
if (inst->efuse_intr)
mtk_phy_update_field(xsphy->glb_base + SSPXTP_PHYA_GLB_00,
RG_XTP_GLB_BIAS_INTR_CTRL, inst->efuse_intr);
if (inst->efuse_tx_imp)
mtk_phy_update_field(pbase + SSPXTP_PHYA_LN_04,
RG_XTP_LN0_TX_IMPSEL, inst->efuse_tx_imp);
if (inst->efuse_rx_imp)
mtk_phy_update_field(pbase + SSPXTP_PHYA_LN_14,
RG_XTP_LN0_RX_IMPSEL, inst->efuse_rx_imp);
}
static int mtk_phy_init(struct phy *phy)
{
struct xsphy_instance *inst = phy_get_drvdata(phy);
struct mtk_xsphy *xsphy = dev_get_drvdata(phy->dev.parent);
int ret;
ret = clk_prepare_enable(inst->ref_clk);
if (ret) {
dev_err(xsphy->dev, "failed to enable ref_clk\n");
return ret;
}
switch (inst->type) {
case PHY_TYPE_USB2:
u2_phy_instance_init(xsphy, inst);
u2_phy_props_set(xsphy, inst);
break;
case PHY_TYPE_USB3:
u3_phy_props_set(xsphy, inst);
break;
default:
dev_err(xsphy->dev, "incompatible phy type\n");
clk_disable_unprepare(inst->ref_clk);
return -EINVAL;
}
return 0;
}
static int mtk_phy_power_on(struct phy *phy)
{
struct xsphy_instance *inst = phy_get_drvdata(phy);
struct mtk_xsphy *xsphy = dev_get_drvdata(phy->dev.parent);
if (inst->type == PHY_TYPE_USB2) {
u2_phy_instance_power_on(xsphy, inst);
u2_phy_slew_rate_calibrate(xsphy, inst);
}
return 0;
}
static int mtk_phy_power_off(struct phy *phy)
{
struct xsphy_instance *inst = phy_get_drvdata(phy);
struct mtk_xsphy *xsphy = dev_get_drvdata(phy->dev.parent);
if (inst->type == PHY_TYPE_USB2)
u2_phy_instance_power_off(xsphy, inst);
return 0;
}
static int mtk_phy_exit(struct phy *phy)
{
struct xsphy_instance *inst = phy_get_drvdata(phy);
clk_disable_unprepare(inst->ref_clk);
return 0;
}
static int mtk_phy_set_mode(struct phy *phy, enum phy_mode mode, int submode)
{
struct xsphy_instance *inst = phy_get_drvdata(phy);
struct mtk_xsphy *xsphy = dev_get_drvdata(phy->dev.parent);
if (inst->type == PHY_TYPE_USB2)
u2_phy_instance_set_mode(xsphy, inst, mode);
return 0;
}
static struct phy *mtk_phy_xlate(struct device *dev,
struct of_phandle_args *args)
{
struct mtk_xsphy *xsphy = dev_get_drvdata(dev);
struct xsphy_instance *inst = NULL;
struct device_node *phy_np = args->np;
int index;
if (args->args_count != 1) {
dev_err(dev, "invalid number of cells in 'phy' property\n");
return ERR_PTR(-EINVAL);
}
for (index = 0; index < xsphy->nphys; index++)
if (phy_np == xsphy->phys[index]->phy->dev.of_node) {
inst = xsphy->phys[index];
break;
}
if (!inst) {
dev_err(dev, "failed to find appropriate phy\n");
return ERR_PTR(-EINVAL);
}
inst->type = args->args[0];
if (!(inst->type == PHY_TYPE_USB2 ||
inst->type == PHY_TYPE_USB3)) {
dev_err(dev, "unsupported phy type: %d\n", inst->type);
return ERR_PTR(-EINVAL);
}
phy_parse_property(xsphy, inst);
return inst->phy;
}
static const struct phy_ops mtk_xsphy_ops = {
.init = mtk_phy_init,
.exit = mtk_phy_exit,
.power_on = mtk_phy_power_on,
.power_off = mtk_phy_power_off,
.set_mode = mtk_phy_set_mode,
.owner = THIS_MODULE,
};
static const struct of_device_id mtk_xsphy_id_table[] = {
{ .compatible = "mediatek,xsphy", },
{ },
};
MODULE_DEVICE_TABLE(of, mtk_xsphy_id_table);
static int mtk_xsphy_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct device_node *child_np;
struct phy_provider *provider;
struct resource *glb_res;
struct mtk_xsphy *xsphy;
struct resource res;
int port, retval;
xsphy = devm_kzalloc(dev, sizeof(*xsphy), GFP_KERNEL);
if (!xsphy)
return -ENOMEM;
xsphy->nphys = of_get_child_count(np);
xsphy->phys = devm_kcalloc(dev, xsphy->nphys,
sizeof(*xsphy->phys), GFP_KERNEL);
if (!xsphy->phys)
return -ENOMEM;
xsphy->dev = dev;
platform_set_drvdata(pdev, xsphy);
glb_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
/* optional, may not exist if no u3 phys */
if (glb_res) {
/* get banks shared by multiple u3 phys */
xsphy->glb_base = devm_ioremap_resource(dev, glb_res);
if (IS_ERR(xsphy->glb_base)) {
dev_err(dev, "failed to remap glb regs\n");
return PTR_ERR(xsphy->glb_base);
}
}
xsphy->src_ref_clk = XSP_REF_CLK;
xsphy->src_coef = XSP_SLEW_RATE_COEF;
/* update parameters of slew rate calibrate if exist */
device_property_read_u32(dev, "mediatek,src-ref-clk-mhz",
&xsphy->src_ref_clk);
device_property_read_u32(dev, "mediatek,src-coef", &xsphy->src_coef);
port = 0;
for_each_child_of_node(np, child_np) {
struct xsphy_instance *inst;
struct phy *phy;
inst = devm_kzalloc(dev, sizeof(*inst), GFP_KERNEL);
if (!inst) {
retval = -ENOMEM;
goto put_child;
}
xsphy->phys[port] = inst;
phy = devm_phy_create(dev, child_np, &mtk_xsphy_ops);
if (IS_ERR(phy)) {
dev_err(dev, "failed to create phy\n");
retval = PTR_ERR(phy);
goto put_child;
}
retval = of_address_to_resource(child_np, 0, &res);
if (retval) {
dev_err(dev, "failed to get address resource(id-%d)\n",
port);
goto put_child;
}
inst->port_base = devm_ioremap_resource(&phy->dev, &res);
if (IS_ERR(inst->port_base)) {
dev_err(dev, "failed to remap phy regs\n");
retval = PTR_ERR(inst->port_base);
goto put_child;
}
inst->phy = phy;
inst->index = port;
phy_set_drvdata(phy, inst);
port++;
inst->ref_clk = devm_clk_get(&phy->dev, "ref");
if (IS_ERR(inst->ref_clk)) {
dev_err(dev, "failed to get ref_clk(id-%d)\n", port);
retval = PTR_ERR(inst->ref_clk);
goto put_child;
}
}
provider = devm_of_phy_provider_register(dev, mtk_phy_xlate);
return PTR_ERR_OR_ZERO(provider);
put_child:
of_node_put(child_np);
return retval;
}
static struct platform_driver mtk_xsphy_driver = {
.probe = mtk_xsphy_probe,
.driver = {
.name = "mtk-xsphy",
.of_match_table = mtk_xsphy_id_table,
},
};
module_platform_driver(mtk_xsphy_driver);
MODULE_AUTHOR("Chunfeng Yun <[email protected]>");
MODULE_DESCRIPTION("MediaTek USB XS-PHY driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/phy/mediatek/phy-mtk-xsphy.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2019 MediaTek Inc.
* Author: jitao.shi <[email protected]>
*/
#include "phy-mtk-io.h"
#include "phy-mtk-mipi-dsi.h"
#define MIPITX_LANE_CON 0x000c
#define RG_DSI_CPHY_T1DRV_EN BIT(0)
#define RG_DSI_ANA_CK_SEL BIT(1)
#define RG_DSI_PHY_CK_SEL BIT(2)
#define RG_DSI_CPHY_EN BIT(3)
#define RG_DSI_PHYCK_INV_EN BIT(4)
#define RG_DSI_PWR04_EN BIT(5)
#define RG_DSI_BG_LPF_EN BIT(6)
#define RG_DSI_BG_CORE_EN BIT(7)
#define RG_DSI_PAD_TIEL_SEL BIT(8)
#define MIPITX_VOLTAGE_SEL 0x0010
#define RG_DSI_HSTX_LDO_REF_SEL GENMASK(9, 6)
#define MIPITX_PLL_PWR 0x0028
#define MIPITX_PLL_CON0 0x002c
#define MIPITX_PLL_CON1 0x0030
#define MIPITX_PLL_CON2 0x0034
#define MIPITX_PLL_CON3 0x0038
#define MIPITX_PLL_CON4 0x003c
#define RG_DSI_PLL_IBIAS GENMASK(11, 10)
#define MIPITX_D2P_RTCODE 0x0100
#define MIPITX_D2_SW_CTL_EN 0x0144
#define MIPITX_D0_SW_CTL_EN 0x0244
#define MIPITX_CK_CKMODE_EN 0x0328
#define DSI_CK_CKMODE_EN BIT(0)
#define MIPITX_CK_SW_CTL_EN 0x0344
#define MIPITX_D1_SW_CTL_EN 0x0444
#define MIPITX_D3_SW_CTL_EN 0x0544
#define DSI_SW_CTL_EN BIT(0)
#define AD_DSI_PLL_SDM_PWR_ON BIT(0)
#define AD_DSI_PLL_SDM_ISO_EN BIT(1)
#define RG_DSI_PLL_EN BIT(4)
#define RG_DSI_PLL_POSDIV GENMASK(10, 8)
static int mtk_mipi_tx_pll_enable(struct clk_hw *hw)
{
struct mtk_mipi_tx *mipi_tx = mtk_mipi_tx_from_clk_hw(hw);
void __iomem *base = mipi_tx->regs;
unsigned int txdiv, txdiv0;
u64 pcw;
dev_dbg(mipi_tx->dev, "enable: %u bps\n", mipi_tx->data_rate);
if (mipi_tx->data_rate >= 2000000000) {
txdiv = 1;
txdiv0 = 0;
} else if (mipi_tx->data_rate >= 1000000000) {
txdiv = 2;
txdiv0 = 1;
} else if (mipi_tx->data_rate >= 500000000) {
txdiv = 4;
txdiv0 = 2;
} else if (mipi_tx->data_rate > 250000000) {
txdiv = 8;
txdiv0 = 3;
} else if (mipi_tx->data_rate >= 125000000) {
txdiv = 16;
txdiv0 = 4;
} else {
return -EINVAL;
}
mtk_phy_clear_bits(base + MIPITX_PLL_CON4, RG_DSI_PLL_IBIAS);
mtk_phy_set_bits(base + MIPITX_PLL_PWR, AD_DSI_PLL_SDM_PWR_ON);
mtk_phy_clear_bits(base + MIPITX_PLL_CON1, RG_DSI_PLL_EN);
udelay(1);
mtk_phy_clear_bits(base + MIPITX_PLL_PWR, AD_DSI_PLL_SDM_ISO_EN);
pcw = div_u64(((u64)mipi_tx->data_rate * txdiv) << 24, 26000000);
writel(pcw, base + MIPITX_PLL_CON0);
mtk_phy_update_field(base + MIPITX_PLL_CON1, RG_DSI_PLL_POSDIV, txdiv0);
mtk_phy_set_bits(base + MIPITX_PLL_CON1, RG_DSI_PLL_EN);
return 0;
}
static void mtk_mipi_tx_pll_disable(struct clk_hw *hw)
{
struct mtk_mipi_tx *mipi_tx = mtk_mipi_tx_from_clk_hw(hw);
void __iomem *base = mipi_tx->regs;
mtk_phy_clear_bits(base + MIPITX_PLL_CON1, RG_DSI_PLL_EN);
mtk_phy_set_bits(base + MIPITX_PLL_PWR, AD_DSI_PLL_SDM_ISO_EN);
mtk_phy_clear_bits(base + MIPITX_PLL_PWR, AD_DSI_PLL_SDM_PWR_ON);
}
static long mtk_mipi_tx_pll_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *prate)
{
return clamp_val(rate, 50000000, 1600000000);
}
static const struct clk_ops mtk_mipi_tx_pll_ops = {
.enable = mtk_mipi_tx_pll_enable,
.disable = mtk_mipi_tx_pll_disable,
.round_rate = mtk_mipi_tx_pll_round_rate,
.set_rate = mtk_mipi_tx_pll_set_rate,
.recalc_rate = mtk_mipi_tx_pll_recalc_rate,
};
static void mtk_mipi_tx_config_calibration_data(struct mtk_mipi_tx *mipi_tx)
{
int i, j;
for (i = 0; i < 5; i++) {
if ((mipi_tx->rt_code[i] & 0x1f) == 0)
mipi_tx->rt_code[i] |= 0x10;
if ((mipi_tx->rt_code[i] >> 5 & 0x1f) == 0)
mipi_tx->rt_code[i] |= 0x10 << 5;
for (j = 0; j < 10; j++)
mtk_phy_update_bits(mipi_tx->regs +
MIPITX_D2P_RTCODE * (i + 1) + j * 4,
1, mipi_tx->rt_code[i] >> j & 1);
}
}
static void mtk_mipi_tx_power_on_signal(struct phy *phy)
{
struct mtk_mipi_tx *mipi_tx = phy_get_drvdata(phy);
void __iomem *base = mipi_tx->regs;
/* BG_LPF_EN / BG_CORE_EN */
writel(RG_DSI_PAD_TIEL_SEL | RG_DSI_BG_CORE_EN, base + MIPITX_LANE_CON);
usleep_range(30, 100);
writel(RG_DSI_BG_CORE_EN | RG_DSI_BG_LPF_EN, base + MIPITX_LANE_CON);
/* Switch OFF each Lane */
mtk_phy_clear_bits(base + MIPITX_D0_SW_CTL_EN, DSI_SW_CTL_EN);
mtk_phy_clear_bits(base + MIPITX_D1_SW_CTL_EN, DSI_SW_CTL_EN);
mtk_phy_clear_bits(base + MIPITX_D2_SW_CTL_EN, DSI_SW_CTL_EN);
mtk_phy_clear_bits(base + MIPITX_D3_SW_CTL_EN, DSI_SW_CTL_EN);
mtk_phy_clear_bits(base + MIPITX_CK_SW_CTL_EN, DSI_SW_CTL_EN);
mtk_phy_update_field(base + MIPITX_VOLTAGE_SEL, RG_DSI_HSTX_LDO_REF_SEL,
(mipi_tx->mipitx_drive - 3000) / 200);
mtk_mipi_tx_config_calibration_data(mipi_tx);
mtk_phy_set_bits(base + MIPITX_CK_CKMODE_EN, DSI_CK_CKMODE_EN);
}
static void mtk_mipi_tx_power_off_signal(struct phy *phy)
{
struct mtk_mipi_tx *mipi_tx = phy_get_drvdata(phy);
void __iomem *base = mipi_tx->regs;
/* Switch ON each Lane */
mtk_phy_set_bits(base + MIPITX_D0_SW_CTL_EN, DSI_SW_CTL_EN);
mtk_phy_set_bits(base + MIPITX_D1_SW_CTL_EN, DSI_SW_CTL_EN);
mtk_phy_set_bits(base + MIPITX_D2_SW_CTL_EN, DSI_SW_CTL_EN);
mtk_phy_set_bits(base + MIPITX_D3_SW_CTL_EN, DSI_SW_CTL_EN);
mtk_phy_set_bits(base + MIPITX_CK_SW_CTL_EN, DSI_SW_CTL_EN);
writel(RG_DSI_PAD_TIEL_SEL | RG_DSI_BG_CORE_EN, base + MIPITX_LANE_CON);
writel(RG_DSI_PAD_TIEL_SEL, base + MIPITX_LANE_CON);
}
const struct mtk_mipitx_data mt8183_mipitx_data = {
.mipi_tx_clk_ops = &mtk_mipi_tx_pll_ops,
.mipi_tx_enable_signal = mtk_mipi_tx_power_on_signal,
.mipi_tx_disable_signal = mtk_mipi_tx_power_off_signal,
};
| linux-master | drivers/phy/mediatek/phy-mtk-mipi-dsi-mt8183.c |
// SPDX-License-Identifier: GPL-2.0
/*
* MediaTek DisplayPort PHY driver
*
* Copyright (c) 2022, BayLibre Inc.
* Copyright (c) 2022, MediaTek Inc.
*/
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/mfd/syscon.h>
#include <linux/of.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#define PHY_OFFSET 0x1000
#define MTK_DP_PHY_DIG_PLL_CTL_1 (PHY_OFFSET + 0x14)
#define TPLL_SSC_EN BIT(3)
#define MTK_DP_PHY_DIG_BIT_RATE (PHY_OFFSET + 0x3C)
#define BIT_RATE_RBR 0
#define BIT_RATE_HBR 1
#define BIT_RATE_HBR2 2
#define BIT_RATE_HBR3 3
#define MTK_DP_PHY_DIG_SW_RST (PHY_OFFSET + 0x38)
#define DP_GLB_SW_RST_PHYD BIT(0)
#define MTK_DP_LANE0_DRIVING_PARAM_3 (PHY_OFFSET + 0x138)
#define MTK_DP_LANE1_DRIVING_PARAM_3 (PHY_OFFSET + 0x238)
#define MTK_DP_LANE2_DRIVING_PARAM_3 (PHY_OFFSET + 0x338)
#define MTK_DP_LANE3_DRIVING_PARAM_3 (PHY_OFFSET + 0x438)
#define XTP_LN_TX_LCTXC0_SW0_PRE0_DEFAULT BIT(4)
#define XTP_LN_TX_LCTXC0_SW0_PRE1_DEFAULT (BIT(10) | BIT(12))
#define XTP_LN_TX_LCTXC0_SW0_PRE2_DEFAULT GENMASK(20, 19)
#define XTP_LN_TX_LCTXC0_SW0_PRE3_DEFAULT GENMASK(29, 29)
#define DRIVING_PARAM_3_DEFAULT (XTP_LN_TX_LCTXC0_SW0_PRE0_DEFAULT | \
XTP_LN_TX_LCTXC0_SW0_PRE1_DEFAULT | \
XTP_LN_TX_LCTXC0_SW0_PRE2_DEFAULT | \
XTP_LN_TX_LCTXC0_SW0_PRE3_DEFAULT)
#define XTP_LN_TX_LCTXC0_SW1_PRE0_DEFAULT GENMASK(4, 3)
#define XTP_LN_TX_LCTXC0_SW1_PRE1_DEFAULT GENMASK(12, 9)
#define XTP_LN_TX_LCTXC0_SW1_PRE2_DEFAULT (BIT(18) | BIT(21))
#define XTP_LN_TX_LCTXC0_SW2_PRE0_DEFAULT GENMASK(29, 29)
#define DRIVING_PARAM_4_DEFAULT (XTP_LN_TX_LCTXC0_SW1_PRE0_DEFAULT | \
XTP_LN_TX_LCTXC0_SW1_PRE1_DEFAULT | \
XTP_LN_TX_LCTXC0_SW1_PRE2_DEFAULT | \
XTP_LN_TX_LCTXC0_SW2_PRE0_DEFAULT)
#define XTP_LN_TX_LCTXC0_SW2_PRE1_DEFAULT (BIT(3) | BIT(5))
#define XTP_LN_TX_LCTXC0_SW3_PRE0_DEFAULT GENMASK(13, 12)
#define DRIVING_PARAM_5_DEFAULT (XTP_LN_TX_LCTXC0_SW2_PRE1_DEFAULT | \
XTP_LN_TX_LCTXC0_SW3_PRE0_DEFAULT)
#define XTP_LN_TX_LCTXCP1_SW0_PRE0_DEFAULT 0
#define XTP_LN_TX_LCTXCP1_SW0_PRE1_DEFAULT GENMASK(10, 10)
#define XTP_LN_TX_LCTXCP1_SW0_PRE2_DEFAULT GENMASK(19, 19)
#define XTP_LN_TX_LCTXCP1_SW0_PRE3_DEFAULT GENMASK(28, 28)
#define DRIVING_PARAM_6_DEFAULT (XTP_LN_TX_LCTXCP1_SW0_PRE0_DEFAULT | \
XTP_LN_TX_LCTXCP1_SW0_PRE1_DEFAULT | \
XTP_LN_TX_LCTXCP1_SW0_PRE2_DEFAULT | \
XTP_LN_TX_LCTXCP1_SW0_PRE3_DEFAULT)
#define XTP_LN_TX_LCTXCP1_SW1_PRE0_DEFAULT 0
#define XTP_LN_TX_LCTXCP1_SW1_PRE1_DEFAULT GENMASK(10, 9)
#define XTP_LN_TX_LCTXCP1_SW1_PRE2_DEFAULT GENMASK(19, 18)
#define XTP_LN_TX_LCTXCP1_SW2_PRE0_DEFAULT 0
#define DRIVING_PARAM_7_DEFAULT (XTP_LN_TX_LCTXCP1_SW1_PRE0_DEFAULT | \
XTP_LN_TX_LCTXCP1_SW1_PRE1_DEFAULT | \
XTP_LN_TX_LCTXCP1_SW1_PRE2_DEFAULT | \
XTP_LN_TX_LCTXCP1_SW2_PRE0_DEFAULT)
#define XTP_LN_TX_LCTXCP1_SW2_PRE1_DEFAULT GENMASK(3, 3)
#define XTP_LN_TX_LCTXCP1_SW3_PRE0_DEFAULT 0
#define DRIVING_PARAM_8_DEFAULT (XTP_LN_TX_LCTXCP1_SW2_PRE1_DEFAULT | \
XTP_LN_TX_LCTXCP1_SW3_PRE0_DEFAULT)
struct mtk_dp_phy {
struct regmap *regs;
};
static int mtk_dp_phy_init(struct phy *phy)
{
struct mtk_dp_phy *dp_phy = phy_get_drvdata(phy);
static const u32 driving_params[] = {
DRIVING_PARAM_3_DEFAULT,
DRIVING_PARAM_4_DEFAULT,
DRIVING_PARAM_5_DEFAULT,
DRIVING_PARAM_6_DEFAULT,
DRIVING_PARAM_7_DEFAULT,
DRIVING_PARAM_8_DEFAULT
};
regmap_bulk_write(dp_phy->regs, MTK_DP_LANE0_DRIVING_PARAM_3,
driving_params, ARRAY_SIZE(driving_params));
regmap_bulk_write(dp_phy->regs, MTK_DP_LANE1_DRIVING_PARAM_3,
driving_params, ARRAY_SIZE(driving_params));
regmap_bulk_write(dp_phy->regs, MTK_DP_LANE2_DRIVING_PARAM_3,
driving_params, ARRAY_SIZE(driving_params));
regmap_bulk_write(dp_phy->regs, MTK_DP_LANE3_DRIVING_PARAM_3,
driving_params, ARRAY_SIZE(driving_params));
return 0;
}
static int mtk_dp_phy_configure(struct phy *phy, union phy_configure_opts *opts)
{
struct mtk_dp_phy *dp_phy = phy_get_drvdata(phy);
u32 val;
if (opts->dp.set_rate) {
switch (opts->dp.link_rate) {
default:
dev_err(&phy->dev,
"Implementation error, unknown linkrate %x\n",
opts->dp.link_rate);
return -EINVAL;
case 1620:
val = BIT_RATE_RBR;
break;
case 2700:
val = BIT_RATE_HBR;
break;
case 5400:
val = BIT_RATE_HBR2;
break;
case 8100:
val = BIT_RATE_HBR3;
break;
}
regmap_write(dp_phy->regs, MTK_DP_PHY_DIG_BIT_RATE, val);
}
regmap_update_bits(dp_phy->regs, MTK_DP_PHY_DIG_PLL_CTL_1,
TPLL_SSC_EN, opts->dp.ssc ? TPLL_SSC_EN : 0);
return 0;
}
static int mtk_dp_phy_reset(struct phy *phy)
{
struct mtk_dp_phy *dp_phy = phy_get_drvdata(phy);
regmap_update_bits(dp_phy->regs, MTK_DP_PHY_DIG_SW_RST,
DP_GLB_SW_RST_PHYD, 0);
usleep_range(50, 200);
regmap_update_bits(dp_phy->regs, MTK_DP_PHY_DIG_SW_RST,
DP_GLB_SW_RST_PHYD, 1);
return 0;
}
static const struct phy_ops mtk_dp_phy_dev_ops = {
.init = mtk_dp_phy_init,
.configure = mtk_dp_phy_configure,
.reset = mtk_dp_phy_reset,
.owner = THIS_MODULE,
};
static int mtk_dp_phy_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct mtk_dp_phy *dp_phy;
struct phy *phy;
struct regmap *regs;
regs = *(struct regmap **)dev->platform_data;
if (!regs)
return dev_err_probe(dev, -EINVAL,
"No data passed, requires struct regmap**\n");
dp_phy = devm_kzalloc(dev, sizeof(*dp_phy), GFP_KERNEL);
if (!dp_phy)
return -ENOMEM;
dp_phy->regs = regs;
phy = devm_phy_create(dev, NULL, &mtk_dp_phy_dev_ops);
if (IS_ERR(phy))
return dev_err_probe(dev, PTR_ERR(phy),
"Failed to create DP PHY\n");
phy_set_drvdata(phy, dp_phy);
if (!dev->of_node)
phy_create_lookup(phy, "dp", dev_name(dev));
return 0;
}
static struct platform_driver mtk_dp_phy_driver = {
.probe = mtk_dp_phy_probe,
.driver = {
.name = "mediatek-dp-phy",
},
};
module_platform_driver(mtk_dp_phy_driver);
MODULE_AUTHOR("Markus Schneider-Pargmann <[email protected]>");
MODULE_DESCRIPTION("MediaTek DP PHY Driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/phy/mediatek/phy-mtk-dp.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Sunplus SP7021 USB 2.0 phy driver
*
* Copyright (C) 2022 Sunplus Technology Inc., All rights reserved.
*
* Note 1 : non-posted write command for the registers accesses of
* Sunplus SP7021.
*
*/
#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/nvmem-consumer.h>
#include <linux/of.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
#define HIGH_MASK_BITS GENMASK(31, 16)
#define LOW_MASK_BITS GENMASK(15, 0)
#define OTP_DISC_LEVEL_DEFAULT 0xd
/* GROUP UPHY */
#define CONFIG1 0x4
#define J_HS_TX_PWRSAV BIT(5)
#define CONFIG3 0xc
#define J_FORCE_DISC_ON BIT(5)
#define J_DEBUG_CTRL_ADDR_MACRO BIT(0)
#define CONFIG7 0x1c
#define J_DISC 0X1f
#define CONFIG9 0x24
#define J_ECO_PATH BIT(6)
#define CONFIG16 0x40
#define J_TBCWAIT_MASK GENMASK(6, 5)
#define J_TBCWAIT_1P1_MS FIELD_PREP(J_TBCWAIT_MASK, 0)
#define J_TVDM_SRC_DIS_MASK GENMASK(4, 3)
#define J_TVDM_SRC_DIS_8P2_MS FIELD_PREP(J_TVDM_SRC_DIS_MASK, 3)
#define J_TVDM_SRC_EN_MASK GENMASK(2, 1)
#define J_TVDM_SRC_EN_1P6_MS FIELD_PREP(J_TVDM_SRC_EN_MASK, 0)
#define J_BC_EN BIT(0)
#define CONFIG17 0x44
#define IBG_TRIM0_MASK GENMASK(7, 5)
#define IBG_TRIM0_SSLVHT FIELD_PREP(IBG_TRIM0_MASK, 4)
#define J_VDATREE_TRIM_MASK GENMASK(4, 1)
#define J_VDATREE_TRIM_DEFAULT FIELD_PREP(J_VDATREE_TRIM_MASK, 9)
#define CONFIG23 0x5c
#define PROB_MASK GENMASK(5, 3)
#define PROB FIELD_PREP(PROB_MASK, 7)
/* GROUP MOON4 */
#define UPHY_CONTROL0 0x0
#define UPHY_CONTROL1 0x4
#define UPHY_CONTROL2 0x8
#define MO1_UPHY_RX_CLK_SEL BIT(6)
#define MASK_MO1_UPHY_RX_CLK_SEL BIT(6 + 16)
#define UPHY_CONTROL3 0xc
#define MO1_UPHY_PLL_POWER_OFF_SEL BIT(7)
#define MASK_MO1_UPHY_PLL_POWER_OFF_SEL BIT(7 + 16)
#define MO1_UPHY_PLL_POWER_OFF BIT(3)
#define MASK_UPHY_PLL_POWER_OFF BIT(3 + 16)
struct sp_usbphy {
struct device *dev;
struct resource *phy_res_mem;
struct resource *moon4_res_mem;
struct reset_control *rstc;
struct clk *phy_clk;
void __iomem *phy_regs;
void __iomem *moon4_regs;
u32 disc_vol_addr_off;
};
static int update_disc_vol(struct sp_usbphy *usbphy)
{
struct nvmem_cell *cell;
char *disc_name = "disc_vol";
ssize_t otp_l = 0;
char *otp_v;
u32 val, set;
cell = nvmem_cell_get(usbphy->dev, disc_name);
if (IS_ERR_OR_NULL(cell)) {
if (PTR_ERR(cell) == -EPROBE_DEFER)
return -EPROBE_DEFER;
}
otp_v = nvmem_cell_read(cell, &otp_l);
nvmem_cell_put(cell);
if (!IS_ERR(otp_v)) {
set = *(otp_v + 1);
set = (set << (sizeof(char) * 8)) | *otp_v;
set = (set >> usbphy->disc_vol_addr_off) & J_DISC;
}
if (IS_ERR(otp_v) || set == 0)
set = OTP_DISC_LEVEL_DEFAULT;
val = readl(usbphy->phy_regs + CONFIG7);
val = (val & ~J_DISC) | set;
writel(val, usbphy->phy_regs + CONFIG7);
return 0;
}
static int sp_uphy_init(struct phy *phy)
{
struct sp_usbphy *usbphy = phy_get_drvdata(phy);
u32 val;
int ret;
ret = clk_prepare_enable(usbphy->phy_clk);
if (ret)
goto err_clk;
ret = reset_control_deassert(usbphy->rstc);
if (ret)
goto err_reset;
/* Default value modification */
writel(HIGH_MASK_BITS | 0x4002, usbphy->moon4_regs + UPHY_CONTROL0);
writel(HIGH_MASK_BITS | 0x8747, usbphy->moon4_regs + UPHY_CONTROL1);
/* disconnect voltage */
ret = update_disc_vol(usbphy);
if (ret < 0)
return ret;
/* board uphy 0 internal register modification for tid certification */
val = readl(usbphy->phy_regs + CONFIG9);
val &= ~(J_ECO_PATH);
writel(val, usbphy->phy_regs + CONFIG9);
val = readl(usbphy->phy_regs + CONFIG1);
val &= ~(J_HS_TX_PWRSAV);
writel(val, usbphy->phy_regs + CONFIG1);
val = readl(usbphy->phy_regs + CONFIG23);
val = (val & ~PROB) | PROB;
writel(val, usbphy->phy_regs + CONFIG23);
/* port 0 uphy clk fix */
writel(MASK_MO1_UPHY_RX_CLK_SEL | MO1_UPHY_RX_CLK_SEL,
usbphy->moon4_regs + UPHY_CONTROL2);
/* battery charger */
writel(J_TBCWAIT_1P1_MS | J_TVDM_SRC_DIS_8P2_MS | J_TVDM_SRC_EN_1P6_MS | J_BC_EN,
usbphy->phy_regs + CONFIG16);
writel(IBG_TRIM0_SSLVHT | J_VDATREE_TRIM_DEFAULT, usbphy->phy_regs + CONFIG17);
/* chirp mode */
writel(J_FORCE_DISC_ON | J_DEBUG_CTRL_ADDR_MACRO, usbphy->phy_regs + CONFIG3);
return 0;
err_reset:
reset_control_assert(usbphy->rstc);
err_clk:
clk_disable_unprepare(usbphy->phy_clk);
return ret;
}
static int sp_uphy_power_on(struct phy *phy)
{
struct sp_usbphy *usbphy = phy_get_drvdata(phy);
u32 pll_pwr_on, pll_pwr_off;
/* PLL power off/on twice */
pll_pwr_off = (readl(usbphy->moon4_regs + UPHY_CONTROL3) & ~LOW_MASK_BITS)
| MO1_UPHY_PLL_POWER_OFF_SEL | MO1_UPHY_PLL_POWER_OFF;
pll_pwr_on = (readl(usbphy->moon4_regs + UPHY_CONTROL3) & ~LOW_MASK_BITS)
| MO1_UPHY_PLL_POWER_OFF_SEL;
writel(MASK_MO1_UPHY_PLL_POWER_OFF_SEL | MASK_UPHY_PLL_POWER_OFF | pll_pwr_off,
usbphy->moon4_regs + UPHY_CONTROL3);
mdelay(1);
writel(MASK_MO1_UPHY_PLL_POWER_OFF_SEL | MASK_UPHY_PLL_POWER_OFF | pll_pwr_on,
usbphy->moon4_regs + UPHY_CONTROL3);
mdelay(1);
writel(MASK_MO1_UPHY_PLL_POWER_OFF_SEL | MASK_UPHY_PLL_POWER_OFF | pll_pwr_off,
usbphy->moon4_regs + UPHY_CONTROL3);
mdelay(1);
writel(MASK_MO1_UPHY_PLL_POWER_OFF_SEL | MASK_UPHY_PLL_POWER_OFF | pll_pwr_on,
usbphy->moon4_regs + UPHY_CONTROL3);
mdelay(1);
writel(MASK_MO1_UPHY_PLL_POWER_OFF_SEL | MASK_UPHY_PLL_POWER_OFF | 0x0,
usbphy->moon4_regs + UPHY_CONTROL3);
return 0;
}
static int sp_uphy_power_off(struct phy *phy)
{
struct sp_usbphy *usbphy = phy_get_drvdata(phy);
u32 pll_pwr_off;
pll_pwr_off = (readl(usbphy->moon4_regs + UPHY_CONTROL3) & ~LOW_MASK_BITS)
| MO1_UPHY_PLL_POWER_OFF_SEL | MO1_UPHY_PLL_POWER_OFF;
writel(MASK_MO1_UPHY_PLL_POWER_OFF_SEL | MASK_UPHY_PLL_POWER_OFF | pll_pwr_off,
usbphy->moon4_regs + UPHY_CONTROL3);
mdelay(1);
writel(MASK_MO1_UPHY_PLL_POWER_OFF_SEL | MASK_UPHY_PLL_POWER_OFF | 0x0,
usbphy->moon4_regs + UPHY_CONTROL3);
return 0;
}
static int sp_uphy_exit(struct phy *phy)
{
struct sp_usbphy *usbphy = phy_get_drvdata(phy);
reset_control_assert(usbphy->rstc);
clk_disable_unprepare(usbphy->phy_clk);
return 0;
}
static const struct phy_ops sp_uphy_ops = {
.init = sp_uphy_init,
.power_on = sp_uphy_power_on,
.power_off = sp_uphy_power_off,
.exit = sp_uphy_exit,
};
static const struct of_device_id sp_uphy_dt_ids[] = {
{.compatible = "sunplus,sp7021-usb2-phy", },
{ }
};
MODULE_DEVICE_TABLE(of, sp_uphy_dt_ids);
static int sp_usb_phy_probe(struct platform_device *pdev)
{
struct sp_usbphy *usbphy;
struct phy_provider *phy_provider;
struct phy *phy;
int ret;
usbphy = devm_kzalloc(&pdev->dev, sizeof(*usbphy), GFP_KERNEL);
if (!usbphy)
return -ENOMEM;
usbphy->dev = &pdev->dev;
usbphy->phy_res_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy");
usbphy->phy_regs = devm_ioremap_resource(&pdev->dev, usbphy->phy_res_mem);
if (IS_ERR(usbphy->phy_regs))
return PTR_ERR(usbphy->phy_regs);
usbphy->moon4_res_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "moon4");
if (!usbphy->moon4_res_mem)
return -EINVAL;
usbphy->moon4_regs = devm_ioremap(&pdev->dev, usbphy->moon4_res_mem->start,
resource_size(usbphy->moon4_res_mem));
if (!usbphy->moon4_regs)
return -ENOMEM;
usbphy->phy_clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(usbphy->phy_clk))
return PTR_ERR(usbphy->phy_clk);
usbphy->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
if (IS_ERR(usbphy->rstc))
return PTR_ERR(usbphy->rstc);
of_property_read_u32(pdev->dev.of_node, "sunplus,disc-vol-addr-off",
&usbphy->disc_vol_addr_off);
phy = devm_phy_create(&pdev->dev, NULL, &sp_uphy_ops);
if (IS_ERR(phy)) {
ret = -PTR_ERR(phy);
return ret;
}
phy_set_drvdata(phy, usbphy);
phy_provider = devm_of_phy_provider_register(&pdev->dev, of_phy_simple_xlate);
return PTR_ERR_OR_ZERO(phy_provider);
}
static struct platform_driver sunplus_usb_phy_driver = {
.probe = sp_usb_phy_probe,
.driver = {
.name = "sunplus-usb2-phy",
.of_match_table = sp_uphy_dt_ids,
},
};
module_platform_driver(sunplus_usb_phy_driver);
MODULE_AUTHOR("Vincent Shih <[email protected]>");
MODULE_DESCRIPTION("Sunplus USB 2.0 phy driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/phy/sunplus/phy-sunplus-usb2.c |
// SPDX-License-Identifier: (GPL-2.0 OR MIT)
/*
* SerDes PHY driver for Microsemi Ocelot
*
* Copyright (c) 2018 Microsemi
*
*/
#include <linux/err.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/phy.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <soc/mscc/ocelot_hsio.h>
#include <dt-bindings/phy/phy-ocelot-serdes.h>
struct serdes_ctrl {
struct regmap *regs;
struct device *dev;
struct phy *phys[SERDES_MAX];
};
struct serdes_macro {
u8 idx;
/* Not used when in QSGMII or PCIe mode */
int port;
struct serdes_ctrl *ctrl;
};
#define MCB_S6G_CFG_TIMEOUT 50
static int __serdes_write_mcb_s6g(struct regmap *regmap, u8 macro, u32 op)
{
unsigned int regval = 0;
regmap_write(regmap, HSIO_MCB_S6G_ADDR_CFG, op |
HSIO_MCB_S6G_ADDR_CFG_SERDES6G_ADDR(BIT(macro)));
return regmap_read_poll_timeout(regmap, HSIO_MCB_S6G_ADDR_CFG, regval,
(regval & op) != op, 100,
MCB_S6G_CFG_TIMEOUT * 1000);
}
static int serdes_commit_mcb_s6g(struct regmap *regmap, u8 macro)
{
return __serdes_write_mcb_s6g(regmap, macro,
HSIO_MCB_S6G_ADDR_CFG_SERDES6G_WR_ONE_SHOT);
}
static int serdes_update_mcb_s6g(struct regmap *regmap, u8 macro)
{
return __serdes_write_mcb_s6g(regmap, macro,
HSIO_MCB_S6G_ADDR_CFG_SERDES6G_RD_ONE_SHOT);
}
static int serdes_init_s6g(struct regmap *regmap, u8 serdes, int mode)
{
u32 pll_fsm_ctrl_data;
u32 ob_ena1v_mode;
u32 des_bw_ana;
u32 ob_ena_cas;
u32 if_mode;
u32 ob_lev;
u32 qrate;
int ret;
if (mode == PHY_INTERFACE_MODE_QSGMII) {
pll_fsm_ctrl_data = 120;
ob_ena1v_mode = 0;
ob_ena_cas = 0;
des_bw_ana = 5;
ob_lev = 24;
if_mode = 3;
qrate = 0;
} else {
pll_fsm_ctrl_data = 60;
ob_ena1v_mode = 1;
ob_ena_cas = 2;
des_bw_ana = 3;
ob_lev = 48;
if_mode = 1;
qrate = 1;
}
ret = serdes_update_mcb_s6g(regmap, serdes);
if (ret)
return ret;
/* Test pattern */
regmap_update_bits(regmap, HSIO_S6G_COMMON_CFG,
HSIO_S6G_COMMON_CFG_SYS_RST, 0);
regmap_update_bits(regmap, HSIO_S6G_PLL_CFG,
HSIO_S6G_PLL_CFG_PLL_FSM_ENA, 0);
regmap_update_bits(regmap, HSIO_S6G_IB_CFG,
HSIO_S6G_IB_CFG_IB_SIG_DET_ENA |
HSIO_S6G_IB_CFG_IB_REG_ENA |
HSIO_S6G_IB_CFG_IB_SAM_ENA |
HSIO_S6G_IB_CFG_IB_EQZ_ENA |
HSIO_S6G_IB_CFG_IB_CONCUR |
HSIO_S6G_IB_CFG_IB_CAL_ENA,
HSIO_S6G_IB_CFG_IB_SIG_DET_ENA |
HSIO_S6G_IB_CFG_IB_REG_ENA |
HSIO_S6G_IB_CFG_IB_SAM_ENA |
HSIO_S6G_IB_CFG_IB_EQZ_ENA |
HSIO_S6G_IB_CFG_IB_CONCUR);
regmap_update_bits(regmap, HSIO_S6G_IB_CFG1,
HSIO_S6G_IB_CFG1_IB_FRC_OFFSET |
HSIO_S6G_IB_CFG1_IB_FRC_LP |
HSIO_S6G_IB_CFG1_IB_FRC_MID |
HSIO_S6G_IB_CFG1_IB_FRC_HP |
HSIO_S6G_IB_CFG1_IB_FILT_OFFSET |
HSIO_S6G_IB_CFG1_IB_FILT_LP |
HSIO_S6G_IB_CFG1_IB_FILT_MID |
HSIO_S6G_IB_CFG1_IB_FILT_HP,
HSIO_S6G_IB_CFG1_IB_FILT_OFFSET |
HSIO_S6G_IB_CFG1_IB_FILT_HP |
HSIO_S6G_IB_CFG1_IB_FILT_LP |
HSIO_S6G_IB_CFG1_IB_FILT_MID);
regmap_update_bits(regmap, HSIO_S6G_IB_CFG2,
HSIO_S6G_IB_CFG2_IB_UREG_M,
HSIO_S6G_IB_CFG2_IB_UREG(4));
regmap_update_bits(regmap, HSIO_S6G_IB_CFG3,
HSIO_S6G_IB_CFG3_IB_INI_OFFSET_M |
HSIO_S6G_IB_CFG3_IB_INI_LP_M |
HSIO_S6G_IB_CFG3_IB_INI_MID_M |
HSIO_S6G_IB_CFG3_IB_INI_HP_M,
HSIO_S6G_IB_CFG3_IB_INI_OFFSET(31) |
HSIO_S6G_IB_CFG3_IB_INI_LP(1) |
HSIO_S6G_IB_CFG3_IB_INI_MID(31) |
HSIO_S6G_IB_CFG3_IB_INI_HP(0));
regmap_update_bits(regmap, HSIO_S6G_MISC_CFG,
HSIO_S6G_MISC_CFG_LANE_RST,
HSIO_S6G_MISC_CFG_LANE_RST);
ret = serdes_commit_mcb_s6g(regmap, serdes);
if (ret)
return ret;
/* OB + DES + IB + SER CFG */
regmap_update_bits(regmap, HSIO_S6G_OB_CFG,
HSIO_S6G_OB_CFG_OB_IDLE |
HSIO_S6G_OB_CFG_OB_ENA1V_MODE |
HSIO_S6G_OB_CFG_OB_POST0_M |
HSIO_S6G_OB_CFG_OB_PREC_M,
(ob_ena1v_mode ? HSIO_S6G_OB_CFG_OB_ENA1V_MODE : 0) |
HSIO_S6G_OB_CFG_OB_POST0(0) |
HSIO_S6G_OB_CFG_OB_PREC(0));
regmap_update_bits(regmap, HSIO_S6G_OB_CFG1,
HSIO_S6G_OB_CFG1_OB_ENA_CAS_M |
HSIO_S6G_OB_CFG1_OB_LEV_M,
HSIO_S6G_OB_CFG1_OB_LEV(ob_lev) |
HSIO_S6G_OB_CFG1_OB_ENA_CAS(ob_ena_cas));
regmap_update_bits(regmap, HSIO_S6G_DES_CFG,
HSIO_S6G_DES_CFG_DES_PHS_CTRL_M |
HSIO_S6G_DES_CFG_DES_CPMD_SEL_M |
HSIO_S6G_DES_CFG_DES_BW_ANA_M,
HSIO_S6G_DES_CFG_DES_PHS_CTRL(2) |
HSIO_S6G_DES_CFG_DES_CPMD_SEL(0) |
HSIO_S6G_DES_CFG_DES_BW_ANA(des_bw_ana));
regmap_update_bits(regmap, HSIO_S6G_IB_CFG,
HSIO_S6G_IB_CFG_IB_SIG_DET_CLK_SEL_M |
HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_OFFSET_M,
HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_OFFSET(0) |
HSIO_S6G_IB_CFG_IB_SIG_DET_CLK_SEL(0));
regmap_update_bits(regmap, HSIO_S6G_IB_CFG1,
HSIO_S6G_IB_CFG1_IB_TSDET_M,
HSIO_S6G_IB_CFG1_IB_TSDET(16));
regmap_update_bits(regmap, HSIO_S6G_SER_CFG,
HSIO_S6G_SER_CFG_SER_ALISEL_M |
HSIO_S6G_SER_CFG_SER_ENALI,
HSIO_S6G_SER_CFG_SER_ALISEL(0));
regmap_update_bits(regmap, HSIO_S6G_PLL_CFG,
HSIO_S6G_PLL_CFG_PLL_DIV4 |
HSIO_S6G_PLL_CFG_PLL_ENA_ROT |
HSIO_S6G_PLL_CFG_PLL_FSM_CTRL_DATA_M |
HSIO_S6G_PLL_CFG_PLL_ROT_DIR |
HSIO_S6G_PLL_CFG_PLL_ROT_FRQ,
HSIO_S6G_PLL_CFG_PLL_FSM_CTRL_DATA
(pll_fsm_ctrl_data));
regmap_update_bits(regmap, HSIO_S6G_COMMON_CFG,
HSIO_S6G_COMMON_CFG_SYS_RST |
HSIO_S6G_COMMON_CFG_ENA_LANE |
HSIO_S6G_COMMON_CFG_PWD_RX |
HSIO_S6G_COMMON_CFG_PWD_TX |
HSIO_S6G_COMMON_CFG_HRATE |
HSIO_S6G_COMMON_CFG_QRATE |
HSIO_S6G_COMMON_CFG_ENA_ELOOP |
HSIO_S6G_COMMON_CFG_ENA_FLOOP |
HSIO_S6G_COMMON_CFG_IF_MODE_M,
HSIO_S6G_COMMON_CFG_SYS_RST |
HSIO_S6G_COMMON_CFG_ENA_LANE |
(qrate ? HSIO_S6G_COMMON_CFG_QRATE : 0) |
HSIO_S6G_COMMON_CFG_IF_MODE(if_mode));
regmap_update_bits(regmap, HSIO_S6G_MISC_CFG,
HSIO_S6G_MISC_CFG_LANE_RST |
HSIO_S6G_MISC_CFG_DES_100FX_CPMD_ENA |
HSIO_S6G_MISC_CFG_RX_LPI_MODE_ENA |
HSIO_S6G_MISC_CFG_TX_LPI_MODE_ENA,
HSIO_S6G_MISC_CFG_LANE_RST |
HSIO_S6G_MISC_CFG_RX_LPI_MODE_ENA);
ret = serdes_commit_mcb_s6g(regmap, serdes);
if (ret)
return ret;
regmap_update_bits(regmap, HSIO_S6G_PLL_CFG,
HSIO_S6G_PLL_CFG_PLL_FSM_ENA,
HSIO_S6G_PLL_CFG_PLL_FSM_ENA);
ret = serdes_commit_mcb_s6g(regmap, serdes);
if (ret)
return ret;
/* Wait for PLL bringup */
msleep(20);
regmap_update_bits(regmap, HSIO_S6G_IB_CFG,
HSIO_S6G_IB_CFG_IB_CAL_ENA,
HSIO_S6G_IB_CFG_IB_CAL_ENA);
regmap_update_bits(regmap, HSIO_S6G_MISC_CFG,
HSIO_S6G_MISC_CFG_LANE_RST, 0);
ret = serdes_commit_mcb_s6g(regmap, serdes);
if (ret)
return ret;
/* Wait for calibration */
msleep(60);
regmap_update_bits(regmap, HSIO_S6G_IB_CFG,
HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_OFFSET_M |
HSIO_S6G_IB_CFG_IB_SIG_DET_CLK_SEL_M,
HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_OFFSET(0) |
HSIO_S6G_IB_CFG_IB_SIG_DET_CLK_SEL(7));
regmap_update_bits(regmap, HSIO_S6G_IB_CFG1,
HSIO_S6G_IB_CFG1_IB_TSDET_M,
HSIO_S6G_IB_CFG1_IB_TSDET(3));
/* IB CFG */
return 0;
}
#define MCB_S1G_CFG_TIMEOUT 50
static int __serdes_write_mcb_s1g(struct regmap *regmap, u8 macro, u32 op)
{
unsigned int regval;
regmap_write(regmap, HSIO_MCB_S1G_ADDR_CFG, op |
HSIO_MCB_S1G_ADDR_CFG_SERDES1G_ADDR(BIT(macro)));
return regmap_read_poll_timeout(regmap, HSIO_MCB_S1G_ADDR_CFG, regval,
(regval & op) != op, 100,
MCB_S1G_CFG_TIMEOUT * 1000);
}
static int serdes_commit_mcb_s1g(struct regmap *regmap, u8 macro)
{
return __serdes_write_mcb_s1g(regmap, macro,
HSIO_MCB_S1G_ADDR_CFG_SERDES1G_WR_ONE_SHOT);
}
static int serdes_update_mcb_s1g(struct regmap *regmap, u8 macro)
{
return __serdes_write_mcb_s1g(regmap, macro,
HSIO_MCB_S1G_ADDR_CFG_SERDES1G_RD_ONE_SHOT);
}
static int serdes_init_s1g(struct regmap *regmap, u8 serdes)
{
int ret;
ret = serdes_update_mcb_s1g(regmap, serdes);
if (ret)
return ret;
regmap_update_bits(regmap, HSIO_S1G_COMMON_CFG,
HSIO_S1G_COMMON_CFG_SYS_RST |
HSIO_S1G_COMMON_CFG_ENA_LANE |
HSIO_S1G_COMMON_CFG_ENA_ELOOP |
HSIO_S1G_COMMON_CFG_ENA_FLOOP,
HSIO_S1G_COMMON_CFG_ENA_LANE);
regmap_update_bits(regmap, HSIO_S1G_PLL_CFG,
HSIO_S1G_PLL_CFG_PLL_FSM_ENA |
HSIO_S1G_PLL_CFG_PLL_FSM_CTRL_DATA_M,
HSIO_S1G_PLL_CFG_PLL_FSM_CTRL_DATA(200) |
HSIO_S1G_PLL_CFG_PLL_FSM_ENA);
regmap_update_bits(regmap, HSIO_S1G_MISC_CFG,
HSIO_S1G_MISC_CFG_DES_100FX_CPMD_ENA |
HSIO_S1G_MISC_CFG_LANE_RST,
HSIO_S1G_MISC_CFG_LANE_RST);
ret = serdes_commit_mcb_s1g(regmap, serdes);
if (ret)
return ret;
regmap_update_bits(regmap, HSIO_S1G_COMMON_CFG,
HSIO_S1G_COMMON_CFG_SYS_RST,
HSIO_S1G_COMMON_CFG_SYS_RST);
regmap_update_bits(regmap, HSIO_S1G_MISC_CFG,
HSIO_S1G_MISC_CFG_LANE_RST, 0);
ret = serdes_commit_mcb_s1g(regmap, serdes);
if (ret)
return ret;
return 0;
}
struct serdes_mux {
u8 idx;
u8 port;
enum phy_mode mode;
int submode;
u32 mask;
u32 mux;
};
#define SERDES_MUX(_idx, _port, _mode, _submode, _mask, _mux) { \
.idx = _idx, \
.port = _port, \
.mode = _mode, \
.submode = _submode, \
.mask = _mask, \
.mux = _mux, \
}
#define SERDES_MUX_SGMII(i, p, m, c) \
SERDES_MUX(i, p, PHY_MODE_ETHERNET, PHY_INTERFACE_MODE_SGMII, m, c)
#define SERDES_MUX_QSGMII(i, p, m, c) \
SERDES_MUX(i, p, PHY_MODE_ETHERNET, PHY_INTERFACE_MODE_QSGMII, m, c)
static const struct serdes_mux ocelot_serdes_muxes[] = {
SERDES_MUX_SGMII(SERDES1G(0), 0, 0, 0),
SERDES_MUX_SGMII(SERDES1G(1), 1, HSIO_HW_CFG_DEV1G_5_MODE, 0),
SERDES_MUX_SGMII(SERDES1G(1), 5, HSIO_HW_CFG_QSGMII_ENA |
HSIO_HW_CFG_DEV1G_5_MODE, HSIO_HW_CFG_DEV1G_5_MODE),
SERDES_MUX_SGMII(SERDES1G(2), 2, HSIO_HW_CFG_DEV1G_4_MODE, 0),
SERDES_MUX_SGMII(SERDES1G(2), 4, HSIO_HW_CFG_QSGMII_ENA |
HSIO_HW_CFG_DEV1G_4_MODE, HSIO_HW_CFG_DEV1G_4_MODE),
SERDES_MUX_SGMII(SERDES1G(3), 3, HSIO_HW_CFG_DEV1G_6_MODE, 0),
SERDES_MUX_SGMII(SERDES1G(3), 6, HSIO_HW_CFG_QSGMII_ENA |
HSIO_HW_CFG_DEV1G_6_MODE, HSIO_HW_CFG_DEV1G_6_MODE),
SERDES_MUX_SGMII(SERDES1G(4), 4, HSIO_HW_CFG_QSGMII_ENA |
HSIO_HW_CFG_DEV1G_4_MODE | HSIO_HW_CFG_DEV1G_9_MODE,
0),
SERDES_MUX_SGMII(SERDES1G(4), 9, HSIO_HW_CFG_DEV1G_4_MODE |
HSIO_HW_CFG_DEV1G_9_MODE, HSIO_HW_CFG_DEV1G_4_MODE |
HSIO_HW_CFG_DEV1G_9_MODE),
SERDES_MUX_SGMII(SERDES1G(5), 5, HSIO_HW_CFG_QSGMII_ENA |
HSIO_HW_CFG_DEV1G_5_MODE | HSIO_HW_CFG_DEV2G5_10_MODE,
0),
SERDES_MUX_SGMII(SERDES1G(5), 10, HSIO_HW_CFG_PCIE_ENA |
HSIO_HW_CFG_DEV1G_5_MODE | HSIO_HW_CFG_DEV2G5_10_MODE,
HSIO_HW_CFG_DEV1G_5_MODE | HSIO_HW_CFG_DEV2G5_10_MODE),
SERDES_MUX_QSGMII(SERDES6G(0), 4, HSIO_HW_CFG_QSGMII_ENA,
HSIO_HW_CFG_QSGMII_ENA),
SERDES_MUX_QSGMII(SERDES6G(0), 5, HSIO_HW_CFG_QSGMII_ENA,
HSIO_HW_CFG_QSGMII_ENA),
SERDES_MUX_QSGMII(SERDES6G(0), 6, HSIO_HW_CFG_QSGMII_ENA,
HSIO_HW_CFG_QSGMII_ENA),
SERDES_MUX_SGMII(SERDES6G(0), 7, HSIO_HW_CFG_QSGMII_ENA, 0),
SERDES_MUX_QSGMII(SERDES6G(0), 7, HSIO_HW_CFG_QSGMII_ENA,
HSIO_HW_CFG_QSGMII_ENA),
SERDES_MUX_SGMII(SERDES6G(1), 8, 0, 0),
SERDES_MUX_SGMII(SERDES6G(2), 10, HSIO_HW_CFG_PCIE_ENA |
HSIO_HW_CFG_DEV2G5_10_MODE, 0),
SERDES_MUX(SERDES6G(2), 10, PHY_MODE_PCIE, 0, HSIO_HW_CFG_PCIE_ENA,
HSIO_HW_CFG_PCIE_ENA),
};
static int serdes_set_mode(struct phy *phy, enum phy_mode mode, int submode)
{
struct serdes_macro *macro = phy_get_drvdata(phy);
unsigned int i;
int ret;
/* As of now only PHY_MODE_ETHERNET is supported */
if (mode != PHY_MODE_ETHERNET)
return -EOPNOTSUPP;
for (i = 0; i < ARRAY_SIZE(ocelot_serdes_muxes); i++) {
if (macro->idx != ocelot_serdes_muxes[i].idx ||
mode != ocelot_serdes_muxes[i].mode ||
submode != ocelot_serdes_muxes[i].submode)
continue;
if (submode != PHY_INTERFACE_MODE_QSGMII &&
macro->port != ocelot_serdes_muxes[i].port)
continue;
ret = regmap_update_bits(macro->ctrl->regs, HSIO_HW_CFG,
ocelot_serdes_muxes[i].mask,
ocelot_serdes_muxes[i].mux);
if (ret)
return ret;
if (macro->idx <= SERDES1G_MAX)
return serdes_init_s1g(macro->ctrl->regs, macro->idx);
else if (macro->idx <= SERDES6G_MAX)
return serdes_init_s6g(macro->ctrl->regs,
macro->idx - (SERDES1G_MAX + 1),
ocelot_serdes_muxes[i].submode);
/* PCIe not supported yet */
return -EOPNOTSUPP;
}
return -EINVAL;
}
static const struct phy_ops serdes_ops = {
.set_mode = serdes_set_mode,
.owner = THIS_MODULE,
};
static struct phy *serdes_simple_xlate(struct device *dev,
struct of_phandle_args *args)
{
struct serdes_ctrl *ctrl = dev_get_drvdata(dev);
unsigned int port, idx, i;
if (args->args_count != 2)
return ERR_PTR(-EINVAL);
port = args->args[0];
idx = args->args[1];
for (i = 0; i < SERDES_MAX; i++) {
struct serdes_macro *macro = phy_get_drvdata(ctrl->phys[i]);
if (idx != macro->idx)
continue;
/* SERDES6G(0) is the only SerDes capable of QSGMII */
if (idx != SERDES6G(0) && macro->port >= 0)
return ERR_PTR(-EBUSY);
macro->port = port;
return ctrl->phys[i];
}
return ERR_PTR(-ENODEV);
}
static int serdes_phy_create(struct serdes_ctrl *ctrl, u8 idx, struct phy **phy)
{
struct serdes_macro *macro;
*phy = devm_phy_create(ctrl->dev, NULL, &serdes_ops);
if (IS_ERR(*phy))
return PTR_ERR(*phy);
macro = devm_kzalloc(ctrl->dev, sizeof(*macro), GFP_KERNEL);
if (!macro)
return -ENOMEM;
macro->idx = idx;
macro->ctrl = ctrl;
macro->port = -1;
phy_set_drvdata(*phy, macro);
return 0;
}
static int serdes_probe(struct platform_device *pdev)
{
struct phy_provider *provider;
struct serdes_ctrl *ctrl;
struct resource *res;
unsigned int i;
int ret;
ctrl = devm_kzalloc(&pdev->dev, sizeof(*ctrl), GFP_KERNEL);
if (!ctrl)
return -ENOMEM;
ctrl->dev = &pdev->dev;
ctrl->regs = syscon_node_to_regmap(pdev->dev.parent->of_node);
if (IS_ERR(ctrl->regs)) {
/* Fall back to using IORESOURCE_REG, if possible */
res = platform_get_resource(pdev, IORESOURCE_REG, 0);
if (res)
ctrl->regs = dev_get_regmap(ctrl->dev->parent,
res->name);
}
if (IS_ERR(ctrl->regs))
return PTR_ERR(ctrl->regs);
for (i = 0; i < SERDES_MAX; i++) {
ret = serdes_phy_create(ctrl, i, &ctrl->phys[i]);
if (ret)
return ret;
}
dev_set_drvdata(&pdev->dev, ctrl);
provider = devm_of_phy_provider_register(ctrl->dev,
serdes_simple_xlate);
return PTR_ERR_OR_ZERO(provider);
}
static const struct of_device_id serdes_ids[] = {
{ .compatible = "mscc,vsc7514-serdes", },
{},
};
MODULE_DEVICE_TABLE(of, serdes_ids);
static struct platform_driver mscc_ocelot_serdes = {
.probe = serdes_probe,
.driver = {
.name = "mscc,ocelot-serdes",
.of_match_table = of_match_ptr(serdes_ids),
},
};
module_platform_driver(mscc_ocelot_serdes);
MODULE_AUTHOR("Quentin Schulz <[email protected]>");
MODULE_DESCRIPTION("SerDes driver for Microsemi Ocelot");
MODULE_LICENSE("Dual MIT/GPL");
| linux-master | drivers/phy/mscc/phy-ocelot-serdes.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
*/
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/mailbox_client.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <linux/reset.h>
#include <linux/slab.h>
#include <soc/tegra/fuse.h>
#include "xusb.h"
#define FUSE_SKU_CALIB_HS_CURR_LEVEL_PADX_SHIFT(x) ((x) ? 15 : 0)
#define FUSE_SKU_CALIB_HS_CURR_LEVEL_PAD_MASK 0x3f
#define FUSE_SKU_CALIB_HS_IREF_CAP_SHIFT 13
#define FUSE_SKU_CALIB_HS_IREF_CAP_MASK 0x3
#define FUSE_SKU_CALIB_HS_SQUELCH_LEVEL_SHIFT 11
#define FUSE_SKU_CALIB_HS_SQUELCH_LEVEL_MASK 0x3
#define FUSE_SKU_CALIB_HS_TERM_RANGE_ADJ_SHIFT 7
#define FUSE_SKU_CALIB_HS_TERM_RANGE_ADJ_MASK 0xf
#define XUSB_PADCTL_USB2_PORT_CAP 0x008
#define XUSB_PADCTL_USB2_PORT_CAP_PORTX_CAP_SHIFT(x) ((x) * 4)
#define XUSB_PADCTL_USB2_PORT_CAP_PORT_CAP_MASK 0x3
#define XUSB_PADCTL_USB2_PORT_CAP_DISABLED 0x0
#define XUSB_PADCTL_USB2_PORT_CAP_HOST 0x1
#define XUSB_PADCTL_USB2_PORT_CAP_DEVICE 0x2
#define XUSB_PADCTL_USB2_PORT_CAP_OTG 0x3
#define XUSB_PADCTL_SS_PORT_MAP 0x014
#define XUSB_PADCTL_SS_PORT_MAP_PORTX_INTERNAL(x) (1 << (((x) * 4) + 3))
#define XUSB_PADCTL_SS_PORT_MAP_PORTX_MAP_SHIFT(x) ((x) * 4)
#define XUSB_PADCTL_SS_PORT_MAP_PORTX_MAP_MASK(x) (0x7 << ((x) * 4))
#define XUSB_PADCTL_SS_PORT_MAP_PORTX_MAP(x, v) (((v) & 0x7) << ((x) * 4))
#define XUSB_PADCTL_SS_PORT_MAP_PORT_MAP_MASK 0x7
#define XUSB_PADCTL_ELPG_PROGRAM 0x01c
#define XUSB_PADCTL_ELPG_PROGRAM_AUX_MUX_LP0_VCORE_DOWN (1 << 26)
#define XUSB_PADCTL_ELPG_PROGRAM_AUX_MUX_LP0_CLAMP_EN_EARLY (1 << 25)
#define XUSB_PADCTL_ELPG_PROGRAM_AUX_MUX_LP0_CLAMP_EN (1 << 24)
#define XUSB_PADCTL_ELPG_PROGRAM_SSPX_ELPG_VCORE_DOWN(x) (1 << (18 + (x) * 4))
#define XUSB_PADCTL_ELPG_PROGRAM_SSPX_ELPG_CLAMP_EN_EARLY(x) \
(1 << (17 + (x) * 4))
#define XUSB_PADCTL_ELPG_PROGRAM_SSPX_ELPG_CLAMP_EN(x) (1 << (16 + (x) * 4))
#define XUSB_PADCTL_IOPHY_PLL_P0_CTL1 0x040
#define XUSB_PADCTL_IOPHY_PLL_P0_CTL1_PLL0_LOCKDET (1 << 19)
#define XUSB_PADCTL_IOPHY_PLL_P0_CTL1_REFCLK_SEL_MASK (0xf << 12)
#define XUSB_PADCTL_IOPHY_PLL_P0_CTL1_PLL_RST (1 << 1)
#define XUSB_PADCTL_IOPHY_PLL_P0_CTL2 0x044
#define XUSB_PADCTL_IOPHY_PLL_P0_CTL2_REFCLKBUF_EN (1 << 6)
#define XUSB_PADCTL_IOPHY_PLL_P0_CTL2_TXCLKREF_EN (1 << 5)
#define XUSB_PADCTL_IOPHY_PLL_P0_CTL2_TXCLKREF_SEL (1 << 4)
#define XUSB_PADCTL_IOPHY_USB3_PADX_CTL2(x) (0x058 + (x) * 4)
#define XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_CDR_CNTL_SHIFT 24
#define XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_CDR_CNTL_MASK 0xff
#define XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_CDR_CNTL_VAL 0x24
#define XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_RX_EQ_Z_SHIFT 16
#define XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_RX_EQ_Z_MASK 0x3f
#define XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_RX_EQ_G_SHIFT 8
#define XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_RX_EQ_G_MASK 0x3f
#define XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_RX_EQ_SHIFT 8
#define XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_RX_EQ_MASK 0xffff
#define XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_RX_EQ_VAL 0xf070
#define XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_RX_WANDER_SHIFT 4
#define XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_RX_WANDER_MASK 0xf
#define XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_RX_WANDER_VAL 0xf
#define XUSB_PADCTL_IOPHY_USB3_PADX_CTL4(x) (0x068 + (x) * 4)
#define XUSB_PADCTL_IOPHY_USB3_PAD_CTL4_DFE_CNTL_TAP_SHIFT 24
#define XUSB_PADCTL_IOPHY_USB3_PAD_CTL4_DFE_CNTL_TAP_MASK 0x1f
#define XUSB_PADCTL_IOPHY_USB3_PAD_CTL4_DFE_CNTL_AMP_SHIFT 16
#define XUSB_PADCTL_IOPHY_USB3_PAD_CTL4_DFE_CNTL_AMP_MASK 0x7f
#define XUSB_PADCTL_IOPHY_USB3_PAD_CTL4_DFE_CNTL_VAL 0x002008ee
#define XUSB_PADCTL_IOPHY_MISC_PAD_PX_CTL2(x) ((x) < 2 ? 0x078 + (x) * 4 : \
0x0f8 + (x) * 4)
#define XUSB_PADCTL_IOPHY_MISC_PAD_CTL2_SPARE_IN_SHIFT 28
#define XUSB_PADCTL_IOPHY_MISC_PAD_CTL2_SPARE_IN_MASK 0x3
#define XUSB_PADCTL_IOPHY_MISC_PAD_CTL2_SPARE_IN_VAL 0x1
#define XUSB_PADCTL_IOPHY_MISC_PAD_PX_CTL5(x) ((x) < 2 ? 0x090 + (x) * 4 : \
0x11c + (x) * 4)
#define XUSB_PADCTL_IOPHY_MISC_PAD_CTL5_RX_QEYE_EN (1 << 8)
#define XUSB_PADCTL_IOPHY_MISC_PAD_PX_CTL6(x) ((x) < 2 ? 0x098 + (x) * 4 : \
0x128 + (x) * 4)
#define XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_SHIFT 24
#define XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_G_Z_MASK 0x3f
#define XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_TAP_MASK 0x1f
#define XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_AMP_MASK 0x7f
#define XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_SEL_SHIFT 16
#define XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_SEL_MASK 0xff
#define XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_SEL_G_Z 0x21
#define XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_SEL_TAP 0x32
#define XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_SEL_AMP 0x33
#define XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_SEL_CTLE_Z 0x48
#define XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_SEL_LATCH_G_Z 0xa1
#define XUSB_PADCTL_USB2_OTG_PADX_CTL0(x) (0x0a0 + (x) * 4)
#define XUSB_PADCTL_USB2_OTG_PAD_CTL0_PD_ZI (1 << 21)
#define XUSB_PADCTL_USB2_OTG_PAD_CTL0_PD2 (1 << 20)
#define XUSB_PADCTL_USB2_OTG_PAD_CTL0_PD (1 << 19)
#define XUSB_PADCTL_USB2_OTG_PAD_CTL0_LS_RSLEW_SHIFT 14
#define XUSB_PADCTL_USB2_OTG_PAD_CTL0_LS_RSLEW_MASK 0x3
#define XUSB_PADCTL_USB2_OTG_PAD_CTL0_LS_RSLEW_VAL(x) ((x) ? 0x0 : 0x3)
#define XUSB_PADCTL_USB2_OTG_PAD_CTL0_HS_SLEW_SHIFT 6
#define XUSB_PADCTL_USB2_OTG_PAD_CTL0_HS_SLEW_MASK 0x3f
#define XUSB_PADCTL_USB2_OTG_PAD_CTL0_HS_SLEW_VAL 0x0e
#define XUSB_PADCTL_USB2_OTG_PAD_CTL0_HS_CURR_LEVEL_SHIFT 0
#define XUSB_PADCTL_USB2_OTG_PAD_CTL0_HS_CURR_LEVEL_MASK 0x3f
#define XUSB_PADCTL_USB2_OTG_PADX_CTL1(x) (0x0ac + (x) * 4)
#define XUSB_PADCTL_USB2_OTG_PAD_CTL1_HS_IREF_CAP_SHIFT 9
#define XUSB_PADCTL_USB2_OTG_PAD_CTL1_HS_IREF_CAP_MASK 0x3
#define XUSB_PADCTL_USB2_OTG_PAD_CTL1_TERM_RANGE_ADJ_SHIFT 3
#define XUSB_PADCTL_USB2_OTG_PAD_CTL1_TERM_RANGE_ADJ_MASK 0x7
#define XUSB_PADCTL_USB2_OTG_PAD_CTL1_PD_DR (1 << 2)
#define XUSB_PADCTL_USB2_OTG_PAD_CTL1_PD_DISC_FORCE_POWERUP (1 << 1)
#define XUSB_PADCTL_USB2_OTG_PAD_CTL1_PD_CHRP_FORCE_POWERUP (1 << 0)
#define XUSB_PADCTL_USB2_BIAS_PAD_CTL0 0x0b8
#define XUSB_PADCTL_USB2_BIAS_PAD_CTL0_PD (1 << 12)
#define XUSB_PADCTL_USB2_BIAS_PAD_CTL0_HS_DISCON_LEVEL_SHIFT 2
#define XUSB_PADCTL_USB2_BIAS_PAD_CTL0_HS_DISCON_LEVEL_MASK 0x7
#define XUSB_PADCTL_USB2_BIAS_PAD_CTL0_HS_DISCON_LEVEL_VAL 0x5
#define XUSB_PADCTL_USB2_BIAS_PAD_CTL0_HS_SQUELCH_LEVEL_SHIFT 0
#define XUSB_PADCTL_USB2_BIAS_PAD_CTL0_HS_SQUELCH_LEVEL_MASK 0x3
#define XUSB_PADCTL_HSIC_PADX_CTL0(x) (0x0c0 + (x) * 4)
#define XUSB_PADCTL_HSIC_PAD_CTL0_TX_RSLEWN_SHIFT 12
#define XUSB_PADCTL_HSIC_PAD_CTL0_TX_RSLEWN_MASK 0x7
#define XUSB_PADCTL_HSIC_PAD_CTL0_TX_RSLEWP_SHIFT 8
#define XUSB_PADCTL_HSIC_PAD_CTL0_TX_RSLEWP_MASK 0x7
#define XUSB_PADCTL_HSIC_PAD_CTL0_TX_RTUNEN_SHIFT 4
#define XUSB_PADCTL_HSIC_PAD_CTL0_TX_RTUNEN_MASK 0x7
#define XUSB_PADCTL_HSIC_PAD_CTL0_TX_RTUNEP_SHIFT 0
#define XUSB_PADCTL_HSIC_PAD_CTL0_TX_RTUNEP_MASK 0x7
#define XUSB_PADCTL_HSIC_PADX_CTL1(x) (0x0c8 + (x) * 4)
#define XUSB_PADCTL_HSIC_PAD_CTL1_RPU_STROBE (1 << 10)
#define XUSB_PADCTL_HSIC_PAD_CTL1_RPU_DATA (1 << 9)
#define XUSB_PADCTL_HSIC_PAD_CTL1_RPD_STROBE (1 << 8)
#define XUSB_PADCTL_HSIC_PAD_CTL1_RPD_DATA (1 << 7)
#define XUSB_PADCTL_HSIC_PAD_CTL1_PD_ZI (1 << 5)
#define XUSB_PADCTL_HSIC_PAD_CTL1_PD_RX (1 << 4)
#define XUSB_PADCTL_HSIC_PAD_CTL1_PD_TRX (1 << 3)
#define XUSB_PADCTL_HSIC_PAD_CTL1_PD_TX (1 << 2)
#define XUSB_PADCTL_HSIC_PAD_CTL1_AUTO_TERM_EN (1 << 0)
#define XUSB_PADCTL_HSIC_PADX_CTL2(x) (0x0d0 + (x) * 4)
#define XUSB_PADCTL_HSIC_PAD_CTL2_RX_STROBE_TRIM_SHIFT 4
#define XUSB_PADCTL_HSIC_PAD_CTL2_RX_STROBE_TRIM_MASK 0x7
#define XUSB_PADCTL_HSIC_PAD_CTL2_RX_DATA_TRIM_SHIFT 0
#define XUSB_PADCTL_HSIC_PAD_CTL2_RX_DATA_TRIM_MASK 0x7
#define XUSB_PADCTL_HSIC_STRB_TRIM_CONTROL 0x0e0
#define XUSB_PADCTL_HSIC_STRB_TRIM_CONTROL_STRB_TRIM_MASK 0x1f
#define XUSB_PADCTL_USB3_PAD_MUX 0x134
#define XUSB_PADCTL_USB3_PAD_MUX_PCIE_IDDQ_DISABLE(x) (1 << (1 + (x)))
#define XUSB_PADCTL_USB3_PAD_MUX_SATA_IDDQ_DISABLE(x) (1 << (6 + (x)))
#define XUSB_PADCTL_IOPHY_PLL_S0_CTL1 0x138
#define XUSB_PADCTL_IOPHY_PLL_S0_CTL1_PLL1_LOCKDET (1 << 27)
#define XUSB_PADCTL_IOPHY_PLL_S0_CTL1_PLL1_MODE (1 << 24)
#define XUSB_PADCTL_IOPHY_PLL_S0_CTL1_PLL0_REFCLK_NDIV_SHIFT 20
#define XUSB_PADCTL_IOPHY_PLL_S0_CTL1_PLL0_REFCLK_NDIV_MASK 0x3
#define XUSB_PADCTL_IOPHY_PLL_S0_CTL1_PLL_PWR_OVRD (1 << 3)
#define XUSB_PADCTL_IOPHY_PLL_S0_CTL1_PLL_RST (1 << 1)
#define XUSB_PADCTL_IOPHY_PLL_S0_CTL1_PLL_IDDQ (1 << 0)
#define XUSB_PADCTL_IOPHY_PLL_S0_CTL2 0x13c
#define XUSB_PADCTL_IOPHY_PLL_S0_CTL2_PLL1_CP_CNTL_SHIFT 20
#define XUSB_PADCTL_IOPHY_PLL_S0_CTL2_PLL1_CP_CNTL_MASK 0xf
#define XUSB_PADCTL_IOPHY_PLL_S0_CTL2_PLL0_CP_CNTL_SHIFT 16
#define XUSB_PADCTL_IOPHY_PLL_S0_CTL2_PLL0_CP_CNTL_MASK 0xf
#define XUSB_PADCTL_IOPHY_PLL_S0_CTL2_TCLKOUT_EN (1 << 12)
#define XUSB_PADCTL_IOPHY_PLL_S0_CTL2_TXCLKREF_SEL (1 << 4)
#define XUSB_PADCTL_IOPHY_PLL_S0_CTL2_XDIGCLK_SEL_SHIFT 0
#define XUSB_PADCTL_IOPHY_PLL_S0_CTL2_XDIGCLK_SEL_MASK 0x7
#define XUSB_PADCTL_IOPHY_PLL_S0_CTL3 0x140
#define XUSB_PADCTL_IOPHY_PLL_S0_CTL3_RCAL_BYPASS (1 << 7)
#define XUSB_PADCTL_IOPHY_MISC_PAD_S0_CTL1 0x148
#define XUSB_PADCTL_IOPHY_MISC_PAD_S0_CTL1_IDDQ_OVRD (1 << 1)
#define XUSB_PADCTL_IOPHY_MISC_PAD_S0_CTL1_IDDQ (1 << 0)
#define XUSB_PADCTL_IOPHY_MISC_PAD_S0_CTL2 0x14c
#define XUSB_PADCTL_IOPHY_MISC_PAD_S0_CTL5 0x158
#define XUSB_PADCTL_IOPHY_MISC_PAD_S0_CTL6 0x15c
struct tegra124_xusb_fuse_calibration {
u32 hs_curr_level[3];
u32 hs_iref_cap;
u32 hs_term_range_adj;
u32 hs_squelch_level;
};
struct tegra124_xusb_padctl {
struct tegra_xusb_padctl base;
struct tegra124_xusb_fuse_calibration fuse;
};
static inline struct tegra124_xusb_padctl *
to_tegra124_xusb_padctl(struct tegra_xusb_padctl *padctl)
{
return container_of(padctl, struct tegra124_xusb_padctl, base);
}
static int tegra124_xusb_padctl_enable(struct tegra_xusb_padctl *padctl)
{
u32 value;
mutex_lock(&padctl->lock);
if (padctl->enable++ > 0)
goto out;
value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM);
value &= ~XUSB_PADCTL_ELPG_PROGRAM_AUX_MUX_LP0_CLAMP_EN;
padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM);
usleep_range(100, 200);
value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM);
value &= ~XUSB_PADCTL_ELPG_PROGRAM_AUX_MUX_LP0_CLAMP_EN_EARLY;
padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM);
usleep_range(100, 200);
value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM);
value &= ~XUSB_PADCTL_ELPG_PROGRAM_AUX_MUX_LP0_VCORE_DOWN;
padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM);
out:
mutex_unlock(&padctl->lock);
return 0;
}
static int tegra124_xusb_padctl_disable(struct tegra_xusb_padctl *padctl)
{
u32 value;
mutex_lock(&padctl->lock);
if (WARN_ON(padctl->enable == 0))
goto out;
if (--padctl->enable > 0)
goto out;
value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM);
value |= XUSB_PADCTL_ELPG_PROGRAM_AUX_MUX_LP0_VCORE_DOWN;
padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM);
usleep_range(100, 200);
value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM);
value |= XUSB_PADCTL_ELPG_PROGRAM_AUX_MUX_LP0_CLAMP_EN_EARLY;
padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM);
usleep_range(100, 200);
value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM);
value |= XUSB_PADCTL_ELPG_PROGRAM_AUX_MUX_LP0_CLAMP_EN;
padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM);
out:
mutex_unlock(&padctl->lock);
return 0;
}
static int tegra124_usb3_save_context(struct tegra_xusb_padctl *padctl,
unsigned int index)
{
struct tegra_xusb_usb3_port *port;
struct tegra_xusb_lane *lane;
u32 value, offset;
port = tegra_xusb_find_usb3_port(padctl, index);
if (!port)
return -ENODEV;
port->context_saved = true;
lane = port->base.lane;
if (lane->pad == padctl->pcie)
offset = XUSB_PADCTL_IOPHY_MISC_PAD_PX_CTL6(lane->index);
else
offset = XUSB_PADCTL_IOPHY_MISC_PAD_S0_CTL6;
value = padctl_readl(padctl, offset);
value &= ~(XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_SEL_MASK <<
XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_SEL_SHIFT);
value |= XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_SEL_TAP <<
XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_SEL_SHIFT;
padctl_writel(padctl, value, offset);
value = padctl_readl(padctl, offset) >>
XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_SHIFT;
port->tap1 = value & XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_TAP_MASK;
value = padctl_readl(padctl, offset);
value &= ~(XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_SEL_MASK <<
XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_SEL_SHIFT);
value |= XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_SEL_AMP <<
XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_SEL_SHIFT;
padctl_writel(padctl, value, offset);
value = padctl_readl(padctl, offset) >>
XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_SHIFT;
port->amp = value & XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_AMP_MASK;
value = padctl_readl(padctl, XUSB_PADCTL_IOPHY_USB3_PADX_CTL4(index));
value &= ~((XUSB_PADCTL_IOPHY_USB3_PAD_CTL4_DFE_CNTL_TAP_MASK <<
XUSB_PADCTL_IOPHY_USB3_PAD_CTL4_DFE_CNTL_TAP_SHIFT) |
(XUSB_PADCTL_IOPHY_USB3_PAD_CTL4_DFE_CNTL_AMP_MASK <<
XUSB_PADCTL_IOPHY_USB3_PAD_CTL4_DFE_CNTL_AMP_SHIFT));
value |= (port->tap1 <<
XUSB_PADCTL_IOPHY_USB3_PAD_CTL4_DFE_CNTL_TAP_SHIFT) |
(port->amp <<
XUSB_PADCTL_IOPHY_USB3_PAD_CTL4_DFE_CNTL_AMP_SHIFT);
padctl_writel(padctl, value, XUSB_PADCTL_IOPHY_USB3_PADX_CTL4(index));
value = padctl_readl(padctl, offset);
value &= ~(XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_SEL_MASK <<
XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_SEL_SHIFT);
value |= XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_SEL_LATCH_G_Z <<
XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_SEL_SHIFT;
padctl_writel(padctl, value, offset);
value = padctl_readl(padctl, offset);
value &= ~(XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_SEL_MASK <<
XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_SEL_SHIFT);
value |= XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_SEL_G_Z <<
XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_SEL_SHIFT;
padctl_writel(padctl, value, offset);
value = padctl_readl(padctl, offset) >>
XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_SHIFT;
port->ctle_g = value &
XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_G_Z_MASK;
value = padctl_readl(padctl, offset);
value &= ~(XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_SEL_MASK <<
XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_SEL_SHIFT);
value |= XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_SEL_CTLE_Z <<
XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_SEL_SHIFT;
padctl_writel(padctl, value, offset);
value = padctl_readl(padctl, offset) >>
XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_SHIFT;
port->ctle_z = value &
XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_G_Z_MASK;
value = padctl_readl(padctl, XUSB_PADCTL_IOPHY_USB3_PADX_CTL2(index));
value &= ~((XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_RX_EQ_G_MASK <<
XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_RX_EQ_G_SHIFT) |
(XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_RX_EQ_Z_MASK <<
XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_RX_EQ_Z_SHIFT));
value |= (port->ctle_g <<
XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_RX_EQ_G_SHIFT) |
(port->ctle_z <<
XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_RX_EQ_Z_SHIFT);
padctl_writel(padctl, value, XUSB_PADCTL_IOPHY_USB3_PADX_CTL2(index));
return 0;
}
static int tegra124_hsic_set_idle(struct tegra_xusb_padctl *padctl,
unsigned int index, bool idle)
{
u32 value;
value = padctl_readl(padctl, XUSB_PADCTL_HSIC_PADX_CTL1(index));
if (idle)
value |= XUSB_PADCTL_HSIC_PAD_CTL1_RPD_DATA |
XUSB_PADCTL_HSIC_PAD_CTL1_RPU_STROBE;
else
value &= ~(XUSB_PADCTL_HSIC_PAD_CTL1_RPD_DATA |
XUSB_PADCTL_HSIC_PAD_CTL1_RPU_STROBE);
padctl_writel(padctl, value, XUSB_PADCTL_HSIC_PADX_CTL1(index));
return 0;
}
#define TEGRA124_LANE(_name, _offset, _shift, _mask, _type) \
{ \
.name = _name, \
.offset = _offset, \
.shift = _shift, \
.mask = _mask, \
.num_funcs = ARRAY_SIZE(tegra124_##_type##_functions), \
.funcs = tegra124_##_type##_functions, \
}
static const char * const tegra124_usb2_functions[] = {
"snps",
"xusb",
"uart",
};
static const struct tegra_xusb_lane_soc tegra124_usb2_lanes[] = {
TEGRA124_LANE("usb2-0", 0x004, 0, 0x3, usb2),
TEGRA124_LANE("usb2-1", 0x004, 2, 0x3, usb2),
TEGRA124_LANE("usb2-2", 0x004, 4, 0x3, usb2),
};
static struct tegra_xusb_lane *
tegra124_usb2_lane_probe(struct tegra_xusb_pad *pad, struct device_node *np,
unsigned int index)
{
struct tegra_xusb_usb2_lane *usb2;
int err;
usb2 = kzalloc(sizeof(*usb2), GFP_KERNEL);
if (!usb2)
return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&usb2->base.list);
usb2->base.soc = &pad->soc->lanes[index];
usb2->base.index = index;
usb2->base.pad = pad;
usb2->base.np = np;
err = tegra_xusb_lane_parse_dt(&usb2->base, np);
if (err < 0) {
kfree(usb2);
return ERR_PTR(err);
}
return &usb2->base;
}
static void tegra124_usb2_lane_remove(struct tegra_xusb_lane *lane)
{
struct tegra_xusb_usb2_lane *usb2 = to_usb2_lane(lane);
kfree(usb2);
}
static const struct tegra_xusb_lane_ops tegra124_usb2_lane_ops = {
.probe = tegra124_usb2_lane_probe,
.remove = tegra124_usb2_lane_remove,
};
static int tegra124_usb2_phy_init(struct phy *phy)
{
struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
return tegra124_xusb_padctl_enable(lane->pad->padctl);
}
static int tegra124_usb2_phy_exit(struct phy *phy)
{
struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
return tegra124_xusb_padctl_disable(lane->pad->padctl);
}
static int tegra124_usb2_phy_power_on(struct phy *phy)
{
struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
struct tegra_xusb_usb2_lane *usb2 = to_usb2_lane(lane);
struct tegra_xusb_usb2_pad *pad = to_usb2_pad(lane->pad);
struct tegra_xusb_padctl *padctl = lane->pad->padctl;
struct tegra124_xusb_padctl *priv;
struct tegra_xusb_usb2_port *port;
unsigned int index = lane->index;
u32 value;
int err;
port = tegra_xusb_find_usb2_port(padctl, index);
if (!port) {
dev_err(&phy->dev, "no port found for USB2 lane %u\n", index);
return -ENODEV;
}
priv = to_tegra124_xusb_padctl(padctl);
value = padctl_readl(padctl, XUSB_PADCTL_USB2_BIAS_PAD_CTL0);
value &= ~((XUSB_PADCTL_USB2_BIAS_PAD_CTL0_HS_SQUELCH_LEVEL_MASK <<
XUSB_PADCTL_USB2_BIAS_PAD_CTL0_HS_SQUELCH_LEVEL_SHIFT) |
(XUSB_PADCTL_USB2_BIAS_PAD_CTL0_HS_DISCON_LEVEL_MASK <<
XUSB_PADCTL_USB2_BIAS_PAD_CTL0_HS_DISCON_LEVEL_SHIFT));
value |= (priv->fuse.hs_squelch_level <<
XUSB_PADCTL_USB2_BIAS_PAD_CTL0_HS_SQUELCH_LEVEL_SHIFT) |
(XUSB_PADCTL_USB2_BIAS_PAD_CTL0_HS_DISCON_LEVEL_VAL <<
XUSB_PADCTL_USB2_BIAS_PAD_CTL0_HS_DISCON_LEVEL_SHIFT);
padctl_writel(padctl, value, XUSB_PADCTL_USB2_BIAS_PAD_CTL0);
value = padctl_readl(padctl, XUSB_PADCTL_USB2_PORT_CAP);
value &= ~(XUSB_PADCTL_USB2_PORT_CAP_PORT_CAP_MASK <<
XUSB_PADCTL_USB2_PORT_CAP_PORTX_CAP_SHIFT(index));
value |= XUSB_PADCTL_USB2_PORT_CAP_HOST <<
XUSB_PADCTL_USB2_PORT_CAP_PORTX_CAP_SHIFT(index);
padctl_writel(padctl, value, XUSB_PADCTL_USB2_PORT_CAP);
value = padctl_readl(padctl, XUSB_PADCTL_USB2_OTG_PADX_CTL0(index));
value &= ~((XUSB_PADCTL_USB2_OTG_PAD_CTL0_HS_CURR_LEVEL_MASK <<
XUSB_PADCTL_USB2_OTG_PAD_CTL0_HS_CURR_LEVEL_SHIFT) |
(XUSB_PADCTL_USB2_OTG_PAD_CTL0_HS_SLEW_MASK <<
XUSB_PADCTL_USB2_OTG_PAD_CTL0_HS_SLEW_SHIFT) |
(XUSB_PADCTL_USB2_OTG_PAD_CTL0_LS_RSLEW_MASK <<
XUSB_PADCTL_USB2_OTG_PAD_CTL0_LS_RSLEW_SHIFT) |
XUSB_PADCTL_USB2_OTG_PAD_CTL0_PD |
XUSB_PADCTL_USB2_OTG_PAD_CTL0_PD2 |
XUSB_PADCTL_USB2_OTG_PAD_CTL0_PD_ZI);
value |= (priv->fuse.hs_curr_level[index] +
usb2->hs_curr_level_offset) <<
XUSB_PADCTL_USB2_OTG_PAD_CTL0_HS_CURR_LEVEL_SHIFT;
value |= XUSB_PADCTL_USB2_OTG_PAD_CTL0_HS_SLEW_VAL <<
XUSB_PADCTL_USB2_OTG_PAD_CTL0_HS_SLEW_SHIFT;
value |= XUSB_PADCTL_USB2_OTG_PAD_CTL0_LS_RSLEW_VAL(index) <<
XUSB_PADCTL_USB2_OTG_PAD_CTL0_LS_RSLEW_SHIFT;
padctl_writel(padctl, value, XUSB_PADCTL_USB2_OTG_PADX_CTL0(index));
value = padctl_readl(padctl, XUSB_PADCTL_USB2_OTG_PADX_CTL1(index));
value &= ~((XUSB_PADCTL_USB2_OTG_PAD_CTL1_TERM_RANGE_ADJ_MASK <<
XUSB_PADCTL_USB2_OTG_PAD_CTL1_TERM_RANGE_ADJ_SHIFT) |
(XUSB_PADCTL_USB2_OTG_PAD_CTL1_HS_IREF_CAP_MASK <<
XUSB_PADCTL_USB2_OTG_PAD_CTL1_HS_IREF_CAP_SHIFT) |
XUSB_PADCTL_USB2_OTG_PAD_CTL1_PD_DR |
XUSB_PADCTL_USB2_OTG_PAD_CTL1_PD_CHRP_FORCE_POWERUP |
XUSB_PADCTL_USB2_OTG_PAD_CTL1_PD_DISC_FORCE_POWERUP);
value |= (priv->fuse.hs_term_range_adj <<
XUSB_PADCTL_USB2_OTG_PAD_CTL1_TERM_RANGE_ADJ_SHIFT) |
(priv->fuse.hs_iref_cap <<
XUSB_PADCTL_USB2_OTG_PAD_CTL1_HS_IREF_CAP_SHIFT);
padctl_writel(padctl, value, XUSB_PADCTL_USB2_OTG_PADX_CTL1(index));
err = regulator_enable(port->supply);
if (err)
return err;
mutex_lock(&pad->lock);
if (pad->enable++ > 0)
goto out;
value = padctl_readl(padctl, XUSB_PADCTL_USB2_BIAS_PAD_CTL0);
value &= ~XUSB_PADCTL_USB2_BIAS_PAD_CTL0_PD;
padctl_writel(padctl, value, XUSB_PADCTL_USB2_BIAS_PAD_CTL0);
out:
mutex_unlock(&pad->lock);
return 0;
}
static int tegra124_usb2_phy_power_off(struct phy *phy)
{
struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
struct tegra_xusb_usb2_pad *pad = to_usb2_pad(lane->pad);
struct tegra_xusb_padctl *padctl = lane->pad->padctl;
struct tegra_xusb_usb2_port *port;
u32 value;
port = tegra_xusb_find_usb2_port(padctl, lane->index);
if (!port) {
dev_err(&phy->dev, "no port found for USB2 lane %u\n",
lane->index);
return -ENODEV;
}
mutex_lock(&pad->lock);
if (WARN_ON(pad->enable == 0))
goto out;
if (--pad->enable > 0)
goto out;
value = padctl_readl(padctl, XUSB_PADCTL_USB2_BIAS_PAD_CTL0);
value |= XUSB_PADCTL_USB2_BIAS_PAD_CTL0_PD;
padctl_writel(padctl, value, XUSB_PADCTL_USB2_BIAS_PAD_CTL0);
out:
regulator_disable(port->supply);
mutex_unlock(&pad->lock);
return 0;
}
static const struct phy_ops tegra124_usb2_phy_ops = {
.init = tegra124_usb2_phy_init,
.exit = tegra124_usb2_phy_exit,
.power_on = tegra124_usb2_phy_power_on,
.power_off = tegra124_usb2_phy_power_off,
.owner = THIS_MODULE,
};
static struct tegra_xusb_pad *
tegra124_usb2_pad_probe(struct tegra_xusb_padctl *padctl,
const struct tegra_xusb_pad_soc *soc,
struct device_node *np)
{
struct tegra_xusb_usb2_pad *usb2;
struct tegra_xusb_pad *pad;
int err;
usb2 = kzalloc(sizeof(*usb2), GFP_KERNEL);
if (!usb2)
return ERR_PTR(-ENOMEM);
mutex_init(&usb2->lock);
pad = &usb2->base;
pad->ops = &tegra124_usb2_lane_ops;
pad->soc = soc;
err = tegra_xusb_pad_init(pad, padctl, np);
if (err < 0) {
kfree(usb2);
goto out;
}
err = tegra_xusb_pad_register(pad, &tegra124_usb2_phy_ops);
if (err < 0)
goto unregister;
dev_set_drvdata(&pad->dev, pad);
return pad;
unregister:
device_unregister(&pad->dev);
out:
return ERR_PTR(err);
}
static void tegra124_usb2_pad_remove(struct tegra_xusb_pad *pad)
{
struct tegra_xusb_usb2_pad *usb2 = to_usb2_pad(pad);
kfree(usb2);
}
static const struct tegra_xusb_pad_ops tegra124_usb2_ops = {
.probe = tegra124_usb2_pad_probe,
.remove = tegra124_usb2_pad_remove,
};
static const struct tegra_xusb_pad_soc tegra124_usb2_pad = {
.name = "usb2",
.num_lanes = ARRAY_SIZE(tegra124_usb2_lanes),
.lanes = tegra124_usb2_lanes,
.ops = &tegra124_usb2_ops,
};
static const char * const tegra124_ulpi_functions[] = {
"snps",
"xusb",
};
static const struct tegra_xusb_lane_soc tegra124_ulpi_lanes[] = {
TEGRA124_LANE("ulpi-0", 0x004, 12, 0x1, ulpi),
};
static struct tegra_xusb_lane *
tegra124_ulpi_lane_probe(struct tegra_xusb_pad *pad, struct device_node *np,
unsigned int index)
{
struct tegra_xusb_ulpi_lane *ulpi;
int err;
ulpi = kzalloc(sizeof(*ulpi), GFP_KERNEL);
if (!ulpi)
return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&ulpi->base.list);
ulpi->base.soc = &pad->soc->lanes[index];
ulpi->base.index = index;
ulpi->base.pad = pad;
ulpi->base.np = np;
err = tegra_xusb_lane_parse_dt(&ulpi->base, np);
if (err < 0) {
kfree(ulpi);
return ERR_PTR(err);
}
return &ulpi->base;
}
static void tegra124_ulpi_lane_remove(struct tegra_xusb_lane *lane)
{
struct tegra_xusb_ulpi_lane *ulpi = to_ulpi_lane(lane);
kfree(ulpi);
}
static const struct tegra_xusb_lane_ops tegra124_ulpi_lane_ops = {
.probe = tegra124_ulpi_lane_probe,
.remove = tegra124_ulpi_lane_remove,
};
static int tegra124_ulpi_phy_init(struct phy *phy)
{
struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
return tegra124_xusb_padctl_enable(lane->pad->padctl);
}
static int tegra124_ulpi_phy_exit(struct phy *phy)
{
struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
return tegra124_xusb_padctl_disable(lane->pad->padctl);
}
static int tegra124_ulpi_phy_power_on(struct phy *phy)
{
return 0;
}
static int tegra124_ulpi_phy_power_off(struct phy *phy)
{
return 0;
}
static const struct phy_ops tegra124_ulpi_phy_ops = {
.init = tegra124_ulpi_phy_init,
.exit = tegra124_ulpi_phy_exit,
.power_on = tegra124_ulpi_phy_power_on,
.power_off = tegra124_ulpi_phy_power_off,
.owner = THIS_MODULE,
};
static struct tegra_xusb_pad *
tegra124_ulpi_pad_probe(struct tegra_xusb_padctl *padctl,
const struct tegra_xusb_pad_soc *soc,
struct device_node *np)
{
struct tegra_xusb_ulpi_pad *ulpi;
struct tegra_xusb_pad *pad;
int err;
ulpi = kzalloc(sizeof(*ulpi), GFP_KERNEL);
if (!ulpi)
return ERR_PTR(-ENOMEM);
pad = &ulpi->base;
pad->ops = &tegra124_ulpi_lane_ops;
pad->soc = soc;
err = tegra_xusb_pad_init(pad, padctl, np);
if (err < 0) {
kfree(ulpi);
goto out;
}
err = tegra_xusb_pad_register(pad, &tegra124_ulpi_phy_ops);
if (err < 0)
goto unregister;
dev_set_drvdata(&pad->dev, pad);
return pad;
unregister:
device_unregister(&pad->dev);
out:
return ERR_PTR(err);
}
static void tegra124_ulpi_pad_remove(struct tegra_xusb_pad *pad)
{
struct tegra_xusb_ulpi_pad *ulpi = to_ulpi_pad(pad);
kfree(ulpi);
}
static const struct tegra_xusb_pad_ops tegra124_ulpi_ops = {
.probe = tegra124_ulpi_pad_probe,
.remove = tegra124_ulpi_pad_remove,
};
static const struct tegra_xusb_pad_soc tegra124_ulpi_pad = {
.name = "ulpi",
.num_lanes = ARRAY_SIZE(tegra124_ulpi_lanes),
.lanes = tegra124_ulpi_lanes,
.ops = &tegra124_ulpi_ops,
};
static const char * const tegra124_hsic_functions[] = {
"snps",
"xusb",
};
static const struct tegra_xusb_lane_soc tegra124_hsic_lanes[] = {
TEGRA124_LANE("hsic-0", 0x004, 14, 0x1, hsic),
TEGRA124_LANE("hsic-1", 0x004, 15, 0x1, hsic),
};
static struct tegra_xusb_lane *
tegra124_hsic_lane_probe(struct tegra_xusb_pad *pad, struct device_node *np,
unsigned int index)
{
struct tegra_xusb_hsic_lane *hsic;
int err;
hsic = kzalloc(sizeof(*hsic), GFP_KERNEL);
if (!hsic)
return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&hsic->base.list);
hsic->base.soc = &pad->soc->lanes[index];
hsic->base.index = index;
hsic->base.pad = pad;
hsic->base.np = np;
err = tegra_xusb_lane_parse_dt(&hsic->base, np);
if (err < 0) {
kfree(hsic);
return ERR_PTR(err);
}
return &hsic->base;
}
static void tegra124_hsic_lane_remove(struct tegra_xusb_lane *lane)
{
struct tegra_xusb_hsic_lane *hsic = to_hsic_lane(lane);
kfree(hsic);
}
static const struct tegra_xusb_lane_ops tegra124_hsic_lane_ops = {
.probe = tegra124_hsic_lane_probe,
.remove = tegra124_hsic_lane_remove,
};
static int tegra124_hsic_phy_init(struct phy *phy)
{
struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
return tegra124_xusb_padctl_enable(lane->pad->padctl);
}
static int tegra124_hsic_phy_exit(struct phy *phy)
{
struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
return tegra124_xusb_padctl_disable(lane->pad->padctl);
}
static int tegra124_hsic_phy_power_on(struct phy *phy)
{
struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
struct tegra_xusb_hsic_lane *hsic = to_hsic_lane(lane);
struct tegra_xusb_hsic_pad *pad = to_hsic_pad(lane->pad);
struct tegra_xusb_padctl *padctl = lane->pad->padctl;
unsigned int index = lane->index;
u32 value;
int err;
err = regulator_enable(pad->supply);
if (err)
return err;
padctl_writel(padctl, hsic->strobe_trim,
XUSB_PADCTL_HSIC_STRB_TRIM_CONTROL);
value = padctl_readl(padctl, XUSB_PADCTL_HSIC_PADX_CTL1(index));
if (hsic->auto_term)
value |= XUSB_PADCTL_HSIC_PAD_CTL1_AUTO_TERM_EN;
else
value &= ~XUSB_PADCTL_HSIC_PAD_CTL1_AUTO_TERM_EN;
padctl_writel(padctl, value, XUSB_PADCTL_HSIC_PADX_CTL1(index));
value = padctl_readl(padctl, XUSB_PADCTL_HSIC_PADX_CTL0(index));
value &= ~((XUSB_PADCTL_HSIC_PAD_CTL0_TX_RTUNEN_MASK <<
XUSB_PADCTL_HSIC_PAD_CTL0_TX_RTUNEN_SHIFT) |
(XUSB_PADCTL_HSIC_PAD_CTL0_TX_RTUNEP_MASK <<
XUSB_PADCTL_HSIC_PAD_CTL0_TX_RTUNEP_SHIFT) |
(XUSB_PADCTL_HSIC_PAD_CTL0_TX_RSLEWN_MASK <<
XUSB_PADCTL_HSIC_PAD_CTL0_TX_RSLEWN_SHIFT) |
(XUSB_PADCTL_HSIC_PAD_CTL0_TX_RSLEWP_MASK <<
XUSB_PADCTL_HSIC_PAD_CTL0_TX_RSLEWP_SHIFT));
value |= (hsic->tx_rtune_n <<
XUSB_PADCTL_HSIC_PAD_CTL0_TX_RTUNEN_SHIFT) |
(hsic->tx_rtune_p <<
XUSB_PADCTL_HSIC_PAD_CTL0_TX_RTUNEP_SHIFT) |
(hsic->tx_rslew_n <<
XUSB_PADCTL_HSIC_PAD_CTL0_TX_RSLEWN_SHIFT) |
(hsic->tx_rslew_p <<
XUSB_PADCTL_HSIC_PAD_CTL0_TX_RSLEWP_SHIFT);
padctl_writel(padctl, value, XUSB_PADCTL_HSIC_PADX_CTL0(index));
value = padctl_readl(padctl, XUSB_PADCTL_HSIC_PADX_CTL2(index));
value &= ~((XUSB_PADCTL_HSIC_PAD_CTL2_RX_STROBE_TRIM_MASK <<
XUSB_PADCTL_HSIC_PAD_CTL2_RX_STROBE_TRIM_SHIFT) |
(XUSB_PADCTL_HSIC_PAD_CTL2_RX_DATA_TRIM_MASK <<
XUSB_PADCTL_HSIC_PAD_CTL2_RX_DATA_TRIM_SHIFT));
value |= (hsic->rx_strobe_trim <<
XUSB_PADCTL_HSIC_PAD_CTL2_RX_STROBE_TRIM_SHIFT) |
(hsic->rx_data_trim <<
XUSB_PADCTL_HSIC_PAD_CTL2_RX_DATA_TRIM_SHIFT);
padctl_writel(padctl, value, XUSB_PADCTL_HSIC_PADX_CTL2(index));
value = padctl_readl(padctl, XUSB_PADCTL_HSIC_PADX_CTL1(index));
value &= ~(XUSB_PADCTL_HSIC_PAD_CTL1_RPD_STROBE |
XUSB_PADCTL_HSIC_PAD_CTL1_RPU_DATA |
XUSB_PADCTL_HSIC_PAD_CTL1_PD_RX |
XUSB_PADCTL_HSIC_PAD_CTL1_PD_ZI |
XUSB_PADCTL_HSIC_PAD_CTL1_PD_TRX |
XUSB_PADCTL_HSIC_PAD_CTL1_PD_TX);
value |= XUSB_PADCTL_HSIC_PAD_CTL1_RPD_DATA |
XUSB_PADCTL_HSIC_PAD_CTL1_RPU_STROBE;
padctl_writel(padctl, value, XUSB_PADCTL_HSIC_PADX_CTL1(index));
return 0;
}
static int tegra124_hsic_phy_power_off(struct phy *phy)
{
struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
struct tegra_xusb_hsic_pad *pad = to_hsic_pad(lane->pad);
struct tegra_xusb_padctl *padctl = lane->pad->padctl;
unsigned int index = lane->index;
u32 value;
value = padctl_readl(padctl, XUSB_PADCTL_HSIC_PADX_CTL1(index));
value |= XUSB_PADCTL_HSIC_PAD_CTL1_PD_RX |
XUSB_PADCTL_HSIC_PAD_CTL1_PD_ZI |
XUSB_PADCTL_HSIC_PAD_CTL1_PD_TRX |
XUSB_PADCTL_HSIC_PAD_CTL1_PD_TX;
padctl_writel(padctl, value, XUSB_PADCTL_HSIC_PADX_CTL1(index));
regulator_disable(pad->supply);
return 0;
}
static const struct phy_ops tegra124_hsic_phy_ops = {
.init = tegra124_hsic_phy_init,
.exit = tegra124_hsic_phy_exit,
.power_on = tegra124_hsic_phy_power_on,
.power_off = tegra124_hsic_phy_power_off,
.owner = THIS_MODULE,
};
static struct tegra_xusb_pad *
tegra124_hsic_pad_probe(struct tegra_xusb_padctl *padctl,
const struct tegra_xusb_pad_soc *soc,
struct device_node *np)
{
struct tegra_xusb_hsic_pad *hsic;
struct tegra_xusb_pad *pad;
int err;
hsic = kzalloc(sizeof(*hsic), GFP_KERNEL);
if (!hsic)
return ERR_PTR(-ENOMEM);
pad = &hsic->base;
pad->ops = &tegra124_hsic_lane_ops;
pad->soc = soc;
err = tegra_xusb_pad_init(pad, padctl, np);
if (err < 0) {
kfree(hsic);
goto out;
}
err = tegra_xusb_pad_register(pad, &tegra124_hsic_phy_ops);
if (err < 0)
goto unregister;
dev_set_drvdata(&pad->dev, pad);
return pad;
unregister:
device_unregister(&pad->dev);
out:
return ERR_PTR(err);
}
static void tegra124_hsic_pad_remove(struct tegra_xusb_pad *pad)
{
struct tegra_xusb_hsic_pad *hsic = to_hsic_pad(pad);
kfree(hsic);
}
static const struct tegra_xusb_pad_ops tegra124_hsic_ops = {
.probe = tegra124_hsic_pad_probe,
.remove = tegra124_hsic_pad_remove,
};
static const struct tegra_xusb_pad_soc tegra124_hsic_pad = {
.name = "hsic",
.num_lanes = ARRAY_SIZE(tegra124_hsic_lanes),
.lanes = tegra124_hsic_lanes,
.ops = &tegra124_hsic_ops,
};
static const char * const tegra124_pcie_functions[] = {
"pcie",
"usb3-ss",
"sata",
};
static const struct tegra_xusb_lane_soc tegra124_pcie_lanes[] = {
TEGRA124_LANE("pcie-0", 0x134, 16, 0x3, pcie),
TEGRA124_LANE("pcie-1", 0x134, 18, 0x3, pcie),
TEGRA124_LANE("pcie-2", 0x134, 20, 0x3, pcie),
TEGRA124_LANE("pcie-3", 0x134, 22, 0x3, pcie),
TEGRA124_LANE("pcie-4", 0x134, 24, 0x3, pcie),
};
static struct tegra_xusb_lane *
tegra124_pcie_lane_probe(struct tegra_xusb_pad *pad, struct device_node *np,
unsigned int index)
{
struct tegra_xusb_pcie_lane *pcie;
int err;
pcie = kzalloc(sizeof(*pcie), GFP_KERNEL);
if (!pcie)
return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&pcie->base.list);
pcie->base.soc = &pad->soc->lanes[index];
pcie->base.index = index;
pcie->base.pad = pad;
pcie->base.np = np;
err = tegra_xusb_lane_parse_dt(&pcie->base, np);
if (err < 0) {
kfree(pcie);
return ERR_PTR(err);
}
return &pcie->base;
}
static void tegra124_pcie_lane_remove(struct tegra_xusb_lane *lane)
{
struct tegra_xusb_pcie_lane *pcie = to_pcie_lane(lane);
kfree(pcie);
}
static const struct tegra_xusb_lane_ops tegra124_pcie_lane_ops = {
.probe = tegra124_pcie_lane_probe,
.remove = tegra124_pcie_lane_remove,
};
static int tegra124_pcie_phy_init(struct phy *phy)
{
struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
return tegra124_xusb_padctl_enable(lane->pad->padctl);
}
static int tegra124_pcie_phy_exit(struct phy *phy)
{
struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
return tegra124_xusb_padctl_disable(lane->pad->padctl);
}
static int tegra124_pcie_phy_power_on(struct phy *phy)
{
struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
struct tegra_xusb_padctl *padctl = lane->pad->padctl;
unsigned long timeout;
int err = -ETIMEDOUT;
u32 value;
value = padctl_readl(padctl, XUSB_PADCTL_IOPHY_PLL_P0_CTL1);
value &= ~XUSB_PADCTL_IOPHY_PLL_P0_CTL1_REFCLK_SEL_MASK;
padctl_writel(padctl, value, XUSB_PADCTL_IOPHY_PLL_P0_CTL1);
value = padctl_readl(padctl, XUSB_PADCTL_IOPHY_PLL_P0_CTL2);
value |= XUSB_PADCTL_IOPHY_PLL_P0_CTL2_REFCLKBUF_EN |
XUSB_PADCTL_IOPHY_PLL_P0_CTL2_TXCLKREF_EN |
XUSB_PADCTL_IOPHY_PLL_P0_CTL2_TXCLKREF_SEL;
padctl_writel(padctl, value, XUSB_PADCTL_IOPHY_PLL_P0_CTL2);
value = padctl_readl(padctl, XUSB_PADCTL_IOPHY_PLL_P0_CTL1);
value |= XUSB_PADCTL_IOPHY_PLL_P0_CTL1_PLL_RST;
padctl_writel(padctl, value, XUSB_PADCTL_IOPHY_PLL_P0_CTL1);
timeout = jiffies + msecs_to_jiffies(50);
while (time_before(jiffies, timeout)) {
value = padctl_readl(padctl, XUSB_PADCTL_IOPHY_PLL_P0_CTL1);
if (value & XUSB_PADCTL_IOPHY_PLL_P0_CTL1_PLL0_LOCKDET) {
err = 0;
break;
}
usleep_range(100, 200);
}
value = padctl_readl(padctl, XUSB_PADCTL_USB3_PAD_MUX);
value |= XUSB_PADCTL_USB3_PAD_MUX_PCIE_IDDQ_DISABLE(lane->index);
padctl_writel(padctl, value, XUSB_PADCTL_USB3_PAD_MUX);
return err;
}
static int tegra124_pcie_phy_power_off(struct phy *phy)
{
struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
struct tegra_xusb_padctl *padctl = lane->pad->padctl;
u32 value;
value = padctl_readl(padctl, XUSB_PADCTL_USB3_PAD_MUX);
value &= ~XUSB_PADCTL_USB3_PAD_MUX_PCIE_IDDQ_DISABLE(lane->index);
padctl_writel(padctl, value, XUSB_PADCTL_USB3_PAD_MUX);
value = padctl_readl(padctl, XUSB_PADCTL_IOPHY_PLL_P0_CTL1);
value &= ~XUSB_PADCTL_IOPHY_PLL_P0_CTL1_PLL_RST;
padctl_writel(padctl, value, XUSB_PADCTL_IOPHY_PLL_P0_CTL1);
return 0;
}
static const struct phy_ops tegra124_pcie_phy_ops = {
.init = tegra124_pcie_phy_init,
.exit = tegra124_pcie_phy_exit,
.power_on = tegra124_pcie_phy_power_on,
.power_off = tegra124_pcie_phy_power_off,
.owner = THIS_MODULE,
};
static struct tegra_xusb_pad *
tegra124_pcie_pad_probe(struct tegra_xusb_padctl *padctl,
const struct tegra_xusb_pad_soc *soc,
struct device_node *np)
{
struct tegra_xusb_pcie_pad *pcie;
struct tegra_xusb_pad *pad;
int err;
pcie = kzalloc(sizeof(*pcie), GFP_KERNEL);
if (!pcie)
return ERR_PTR(-ENOMEM);
pad = &pcie->base;
pad->ops = &tegra124_pcie_lane_ops;
pad->soc = soc;
err = tegra_xusb_pad_init(pad, padctl, np);
if (err < 0) {
kfree(pcie);
goto out;
}
err = tegra_xusb_pad_register(pad, &tegra124_pcie_phy_ops);
if (err < 0)
goto unregister;
dev_set_drvdata(&pad->dev, pad);
return pad;
unregister:
device_unregister(&pad->dev);
out:
return ERR_PTR(err);
}
static void tegra124_pcie_pad_remove(struct tegra_xusb_pad *pad)
{
struct tegra_xusb_pcie_pad *pcie = to_pcie_pad(pad);
kfree(pcie);
}
static const struct tegra_xusb_pad_ops tegra124_pcie_ops = {
.probe = tegra124_pcie_pad_probe,
.remove = tegra124_pcie_pad_remove,
};
static const struct tegra_xusb_pad_soc tegra124_pcie_pad = {
.name = "pcie",
.num_lanes = ARRAY_SIZE(tegra124_pcie_lanes),
.lanes = tegra124_pcie_lanes,
.ops = &tegra124_pcie_ops,
};
static const struct tegra_xusb_lane_soc tegra124_sata_lanes[] = {
TEGRA124_LANE("sata-0", 0x134, 26, 0x3, pcie),
};
static struct tegra_xusb_lane *
tegra124_sata_lane_probe(struct tegra_xusb_pad *pad, struct device_node *np,
unsigned int index)
{
struct tegra_xusb_sata_lane *sata;
int err;
sata = kzalloc(sizeof(*sata), GFP_KERNEL);
if (!sata)
return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&sata->base.list);
sata->base.soc = &pad->soc->lanes[index];
sata->base.index = index;
sata->base.pad = pad;
sata->base.np = np;
err = tegra_xusb_lane_parse_dt(&sata->base, np);
if (err < 0) {
kfree(sata);
return ERR_PTR(err);
}
return &sata->base;
}
static void tegra124_sata_lane_remove(struct tegra_xusb_lane *lane)
{
struct tegra_xusb_sata_lane *sata = to_sata_lane(lane);
kfree(sata);
}
static const struct tegra_xusb_lane_ops tegra124_sata_lane_ops = {
.probe = tegra124_sata_lane_probe,
.remove = tegra124_sata_lane_remove,
};
static int tegra124_sata_phy_init(struct phy *phy)
{
struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
return tegra124_xusb_padctl_enable(lane->pad->padctl);
}
static int tegra124_sata_phy_exit(struct phy *phy)
{
struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
return tegra124_xusb_padctl_disable(lane->pad->padctl);
}
static int tegra124_sata_phy_power_on(struct phy *phy)
{
struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
struct tegra_xusb_padctl *padctl = lane->pad->padctl;
unsigned long timeout;
int err = -ETIMEDOUT;
u32 value;
value = padctl_readl(padctl, XUSB_PADCTL_IOPHY_MISC_PAD_S0_CTL1);
value &= ~XUSB_PADCTL_IOPHY_MISC_PAD_S0_CTL1_IDDQ_OVRD;
value &= ~XUSB_PADCTL_IOPHY_MISC_PAD_S0_CTL1_IDDQ;
padctl_writel(padctl, value, XUSB_PADCTL_IOPHY_MISC_PAD_S0_CTL1);
value = padctl_readl(padctl, XUSB_PADCTL_IOPHY_PLL_S0_CTL1);
value &= ~XUSB_PADCTL_IOPHY_PLL_S0_CTL1_PLL_PWR_OVRD;
value &= ~XUSB_PADCTL_IOPHY_PLL_S0_CTL1_PLL_IDDQ;
padctl_writel(padctl, value, XUSB_PADCTL_IOPHY_PLL_S0_CTL1);
value = padctl_readl(padctl, XUSB_PADCTL_IOPHY_PLL_S0_CTL1);
value |= XUSB_PADCTL_IOPHY_PLL_S0_CTL1_PLL1_MODE;
padctl_writel(padctl, value, XUSB_PADCTL_IOPHY_PLL_S0_CTL1);
value = padctl_readl(padctl, XUSB_PADCTL_IOPHY_PLL_S0_CTL1);
value |= XUSB_PADCTL_IOPHY_PLL_S0_CTL1_PLL_RST;
padctl_writel(padctl, value, XUSB_PADCTL_IOPHY_PLL_S0_CTL1);
timeout = jiffies + msecs_to_jiffies(50);
while (time_before(jiffies, timeout)) {
value = padctl_readl(padctl, XUSB_PADCTL_IOPHY_PLL_S0_CTL1);
if (value & XUSB_PADCTL_IOPHY_PLL_S0_CTL1_PLL1_LOCKDET) {
err = 0;
break;
}
usleep_range(100, 200);
}
value = padctl_readl(padctl, XUSB_PADCTL_USB3_PAD_MUX);
value |= XUSB_PADCTL_USB3_PAD_MUX_SATA_IDDQ_DISABLE(lane->index);
padctl_writel(padctl, value, XUSB_PADCTL_USB3_PAD_MUX);
return err;
}
static int tegra124_sata_phy_power_off(struct phy *phy)
{
struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
struct tegra_xusb_padctl *padctl = lane->pad->padctl;
u32 value;
value = padctl_readl(padctl, XUSB_PADCTL_USB3_PAD_MUX);
value &= ~XUSB_PADCTL_USB3_PAD_MUX_SATA_IDDQ_DISABLE(lane->index);
padctl_writel(padctl, value, XUSB_PADCTL_USB3_PAD_MUX);
value = padctl_readl(padctl, XUSB_PADCTL_IOPHY_PLL_S0_CTL1);
value &= ~XUSB_PADCTL_IOPHY_PLL_S0_CTL1_PLL_RST;
padctl_writel(padctl, value, XUSB_PADCTL_IOPHY_PLL_S0_CTL1);
value = padctl_readl(padctl, XUSB_PADCTL_IOPHY_PLL_S0_CTL1);
value &= ~XUSB_PADCTL_IOPHY_PLL_S0_CTL1_PLL1_MODE;
padctl_writel(padctl, value, XUSB_PADCTL_IOPHY_PLL_S0_CTL1);
value = padctl_readl(padctl, XUSB_PADCTL_IOPHY_PLL_S0_CTL1);
value |= XUSB_PADCTL_IOPHY_PLL_S0_CTL1_PLL_PWR_OVRD;
value |= XUSB_PADCTL_IOPHY_PLL_S0_CTL1_PLL_IDDQ;
padctl_writel(padctl, value, XUSB_PADCTL_IOPHY_PLL_S0_CTL1);
value = padctl_readl(padctl, XUSB_PADCTL_IOPHY_MISC_PAD_S0_CTL1);
value |= ~XUSB_PADCTL_IOPHY_MISC_PAD_S0_CTL1_IDDQ_OVRD;
value |= ~XUSB_PADCTL_IOPHY_MISC_PAD_S0_CTL1_IDDQ;
padctl_writel(padctl, value, XUSB_PADCTL_IOPHY_MISC_PAD_S0_CTL1);
return 0;
}
static const struct phy_ops tegra124_sata_phy_ops = {
.init = tegra124_sata_phy_init,
.exit = tegra124_sata_phy_exit,
.power_on = tegra124_sata_phy_power_on,
.power_off = tegra124_sata_phy_power_off,
.owner = THIS_MODULE,
};
static struct tegra_xusb_pad *
tegra124_sata_pad_probe(struct tegra_xusb_padctl *padctl,
const struct tegra_xusb_pad_soc *soc,
struct device_node *np)
{
struct tegra_xusb_sata_pad *sata;
struct tegra_xusb_pad *pad;
int err;
sata = kzalloc(sizeof(*sata), GFP_KERNEL);
if (!sata)
return ERR_PTR(-ENOMEM);
pad = &sata->base;
pad->ops = &tegra124_sata_lane_ops;
pad->soc = soc;
err = tegra_xusb_pad_init(pad, padctl, np);
if (err < 0) {
kfree(sata);
goto out;
}
err = tegra_xusb_pad_register(pad, &tegra124_sata_phy_ops);
if (err < 0)
goto unregister;
dev_set_drvdata(&pad->dev, pad);
return pad;
unregister:
device_unregister(&pad->dev);
out:
return ERR_PTR(err);
}
static void tegra124_sata_pad_remove(struct tegra_xusb_pad *pad)
{
struct tegra_xusb_sata_pad *sata = to_sata_pad(pad);
kfree(sata);
}
static const struct tegra_xusb_pad_ops tegra124_sata_ops = {
.probe = tegra124_sata_pad_probe,
.remove = tegra124_sata_pad_remove,
};
static const struct tegra_xusb_pad_soc tegra124_sata_pad = {
.name = "sata",
.num_lanes = ARRAY_SIZE(tegra124_sata_lanes),
.lanes = tegra124_sata_lanes,
.ops = &tegra124_sata_ops,
};
static const struct tegra_xusb_pad_soc *tegra124_pads[] = {
&tegra124_usb2_pad,
&tegra124_ulpi_pad,
&tegra124_hsic_pad,
&tegra124_pcie_pad,
&tegra124_sata_pad,
};
static int tegra124_usb2_port_enable(struct tegra_xusb_port *port)
{
return 0;
}
static void tegra124_usb2_port_disable(struct tegra_xusb_port *port)
{
}
static struct tegra_xusb_lane *
tegra124_usb2_port_map(struct tegra_xusb_port *port)
{
return tegra_xusb_find_lane(port->padctl, "usb2", port->index);
}
static const struct tegra_xusb_port_ops tegra124_usb2_port_ops = {
.release = tegra_xusb_usb2_port_release,
.remove = tegra_xusb_usb2_port_remove,
.enable = tegra124_usb2_port_enable,
.disable = tegra124_usb2_port_disable,
.map = tegra124_usb2_port_map,
};
static int tegra124_ulpi_port_enable(struct tegra_xusb_port *port)
{
return 0;
}
static void tegra124_ulpi_port_disable(struct tegra_xusb_port *port)
{
}
static struct tegra_xusb_lane *
tegra124_ulpi_port_map(struct tegra_xusb_port *port)
{
return tegra_xusb_find_lane(port->padctl, "ulpi", port->index);
}
static const struct tegra_xusb_port_ops tegra124_ulpi_port_ops = {
.release = tegra_xusb_ulpi_port_release,
.enable = tegra124_ulpi_port_enable,
.disable = tegra124_ulpi_port_disable,
.map = tegra124_ulpi_port_map,
};
static int tegra124_hsic_port_enable(struct tegra_xusb_port *port)
{
return 0;
}
static void tegra124_hsic_port_disable(struct tegra_xusb_port *port)
{
}
static struct tegra_xusb_lane *
tegra124_hsic_port_map(struct tegra_xusb_port *port)
{
return tegra_xusb_find_lane(port->padctl, "hsic", port->index);
}
static const struct tegra_xusb_port_ops tegra124_hsic_port_ops = {
.release = tegra_xusb_hsic_port_release,
.enable = tegra124_hsic_port_enable,
.disable = tegra124_hsic_port_disable,
.map = tegra124_hsic_port_map,
};
static int tegra124_usb3_port_enable(struct tegra_xusb_port *port)
{
struct tegra_xusb_usb3_port *usb3 = to_usb3_port(port);
struct tegra_xusb_padctl *padctl = port->padctl;
struct tegra_xusb_lane *lane = usb3->base.lane;
unsigned int index = port->index, offset;
u32 value;
value = padctl_readl(padctl, XUSB_PADCTL_SS_PORT_MAP);
if (!usb3->internal)
value &= ~XUSB_PADCTL_SS_PORT_MAP_PORTX_INTERNAL(index);
else
value |= XUSB_PADCTL_SS_PORT_MAP_PORTX_INTERNAL(index);
value &= ~XUSB_PADCTL_SS_PORT_MAP_PORTX_MAP_MASK(index);
value |= XUSB_PADCTL_SS_PORT_MAP_PORTX_MAP(index, usb3->port);
padctl_writel(padctl, value, XUSB_PADCTL_SS_PORT_MAP);
/*
* TODO: move this code into the PCIe/SATA PHY ->power_on() callbacks
* and conditionalize based on mux function? This seems to work, but
* might not be the exact proper sequence.
*/
value = padctl_readl(padctl, XUSB_PADCTL_IOPHY_USB3_PADX_CTL2(index));
value &= ~((XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_RX_WANDER_MASK <<
XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_RX_WANDER_SHIFT) |
(XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_RX_EQ_MASK <<
XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_RX_EQ_SHIFT) |
(XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_CDR_CNTL_MASK <<
XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_CDR_CNTL_SHIFT));
value |= (XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_RX_WANDER_VAL <<
XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_RX_WANDER_SHIFT) |
(XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_CDR_CNTL_VAL <<
XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_CDR_CNTL_SHIFT) |
(XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_RX_EQ_VAL <<
XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_RX_EQ_SHIFT);
if (usb3->context_saved) {
value &= ~((XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_RX_EQ_G_MASK <<
XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_RX_EQ_G_SHIFT) |
(XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_RX_EQ_Z_MASK <<
XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_RX_EQ_Z_SHIFT));
value |= (usb3->ctle_g <<
XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_RX_EQ_G_SHIFT) |
(usb3->ctle_z <<
XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_RX_EQ_Z_SHIFT);
}
padctl_writel(padctl, value, XUSB_PADCTL_IOPHY_USB3_PADX_CTL2(index));
value = XUSB_PADCTL_IOPHY_USB3_PAD_CTL4_DFE_CNTL_VAL;
if (usb3->context_saved) {
value &= ~((XUSB_PADCTL_IOPHY_USB3_PAD_CTL4_DFE_CNTL_TAP_MASK <<
XUSB_PADCTL_IOPHY_USB3_PAD_CTL4_DFE_CNTL_TAP_SHIFT) |
(XUSB_PADCTL_IOPHY_USB3_PAD_CTL4_DFE_CNTL_AMP_MASK <<
XUSB_PADCTL_IOPHY_USB3_PAD_CTL4_DFE_CNTL_AMP_SHIFT));
value |= (usb3->tap1 <<
XUSB_PADCTL_IOPHY_USB3_PAD_CTL4_DFE_CNTL_TAP_SHIFT) |
(usb3->amp <<
XUSB_PADCTL_IOPHY_USB3_PAD_CTL4_DFE_CNTL_AMP_SHIFT);
}
padctl_writel(padctl, value, XUSB_PADCTL_IOPHY_USB3_PADX_CTL4(index));
if (lane->pad == padctl->pcie)
offset = XUSB_PADCTL_IOPHY_MISC_PAD_PX_CTL2(lane->index);
else
offset = XUSB_PADCTL_IOPHY_MISC_PAD_S0_CTL2;
value = padctl_readl(padctl, offset);
value &= ~(XUSB_PADCTL_IOPHY_MISC_PAD_CTL2_SPARE_IN_MASK <<
XUSB_PADCTL_IOPHY_MISC_PAD_CTL2_SPARE_IN_SHIFT);
value |= XUSB_PADCTL_IOPHY_MISC_PAD_CTL2_SPARE_IN_VAL <<
XUSB_PADCTL_IOPHY_MISC_PAD_CTL2_SPARE_IN_SHIFT;
padctl_writel(padctl, value, offset);
if (lane->pad == padctl->pcie)
offset = XUSB_PADCTL_IOPHY_MISC_PAD_PX_CTL5(lane->index);
else
offset = XUSB_PADCTL_IOPHY_MISC_PAD_S0_CTL5;
value = padctl_readl(padctl, offset);
value |= XUSB_PADCTL_IOPHY_MISC_PAD_CTL5_RX_QEYE_EN;
padctl_writel(padctl, value, offset);
/* Enable SATA PHY when SATA lane is used */
if (lane->pad == padctl->sata) {
value = padctl_readl(padctl, XUSB_PADCTL_IOPHY_PLL_S0_CTL1);
value &= ~(XUSB_PADCTL_IOPHY_PLL_S0_CTL1_PLL0_REFCLK_NDIV_MASK <<
XUSB_PADCTL_IOPHY_PLL_S0_CTL1_PLL0_REFCLK_NDIV_SHIFT);
value |= 0x2 <<
XUSB_PADCTL_IOPHY_PLL_S0_CTL1_PLL0_REFCLK_NDIV_SHIFT;
padctl_writel(padctl, value, XUSB_PADCTL_IOPHY_PLL_S0_CTL1);
value = padctl_readl(padctl, XUSB_PADCTL_IOPHY_PLL_S0_CTL2);
value &= ~((XUSB_PADCTL_IOPHY_PLL_S0_CTL2_XDIGCLK_SEL_MASK <<
XUSB_PADCTL_IOPHY_PLL_S0_CTL2_XDIGCLK_SEL_SHIFT) |
(XUSB_PADCTL_IOPHY_PLL_S0_CTL2_PLL1_CP_CNTL_MASK <<
XUSB_PADCTL_IOPHY_PLL_S0_CTL2_PLL1_CP_CNTL_SHIFT) |
(XUSB_PADCTL_IOPHY_PLL_S0_CTL2_PLL0_CP_CNTL_MASK <<
XUSB_PADCTL_IOPHY_PLL_S0_CTL2_PLL0_CP_CNTL_SHIFT) |
XUSB_PADCTL_IOPHY_PLL_S0_CTL2_TCLKOUT_EN);
value |= (0x7 <<
XUSB_PADCTL_IOPHY_PLL_S0_CTL2_XDIGCLK_SEL_SHIFT) |
(0x8 <<
XUSB_PADCTL_IOPHY_PLL_S0_CTL2_PLL1_CP_CNTL_SHIFT) |
(0x8 <<
XUSB_PADCTL_IOPHY_PLL_S0_CTL2_PLL0_CP_CNTL_SHIFT) |
XUSB_PADCTL_IOPHY_PLL_S0_CTL2_TXCLKREF_SEL;
padctl_writel(padctl, value, XUSB_PADCTL_IOPHY_PLL_S0_CTL2);
value = padctl_readl(padctl, XUSB_PADCTL_IOPHY_PLL_S0_CTL3);
value &= ~XUSB_PADCTL_IOPHY_PLL_S0_CTL3_RCAL_BYPASS;
padctl_writel(padctl, value, XUSB_PADCTL_IOPHY_PLL_S0_CTL3);
}
value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM);
value &= ~XUSB_PADCTL_ELPG_PROGRAM_SSPX_ELPG_VCORE_DOWN(index);
padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM);
usleep_range(100, 200);
value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM);
value &= ~XUSB_PADCTL_ELPG_PROGRAM_SSPX_ELPG_CLAMP_EN_EARLY(index);
padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM);
usleep_range(100, 200);
value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM);
value &= ~XUSB_PADCTL_ELPG_PROGRAM_SSPX_ELPG_CLAMP_EN(index);
padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM);
return 0;
}
static void tegra124_usb3_port_disable(struct tegra_xusb_port *port)
{
struct tegra_xusb_padctl *padctl = port->padctl;
u32 value;
value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM);
value |= XUSB_PADCTL_ELPG_PROGRAM_SSPX_ELPG_CLAMP_EN_EARLY(port->index);
padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM);
usleep_range(100, 200);
value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM);
value |= XUSB_PADCTL_ELPG_PROGRAM_SSPX_ELPG_CLAMP_EN(port->index);
padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM);
usleep_range(250, 350);
value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM);
value |= XUSB_PADCTL_ELPG_PROGRAM_SSPX_ELPG_VCORE_DOWN(port->index);
padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM);
value = padctl_readl(padctl, XUSB_PADCTL_SS_PORT_MAP);
value &= ~XUSB_PADCTL_SS_PORT_MAP_PORTX_MAP_MASK(port->index);
value |= XUSB_PADCTL_SS_PORT_MAP_PORTX_MAP(port->index, 0x7);
padctl_writel(padctl, value, XUSB_PADCTL_SS_PORT_MAP);
}
static const struct tegra_xusb_lane_map tegra124_usb3_map[] = {
{ 0, "pcie", 0 },
{ 1, "pcie", 1 },
{ 1, "sata", 0 },
{ 0, NULL, 0 },
};
static struct tegra_xusb_lane *
tegra124_usb3_port_map(struct tegra_xusb_port *port)
{
return tegra_xusb_port_find_lane(port, tegra124_usb3_map, "usb3-ss");
}
static const struct tegra_xusb_port_ops tegra124_usb3_port_ops = {
.release = tegra_xusb_usb3_port_release,
.enable = tegra124_usb3_port_enable,
.disable = tegra124_usb3_port_disable,
.map = tegra124_usb3_port_map,
};
static int
tegra124_xusb_read_fuse_calibration(struct tegra124_xusb_fuse_calibration *fuse)
{
unsigned int i;
int err;
u32 value;
err = tegra_fuse_readl(TEGRA_FUSE_SKU_CALIB_0, &value);
if (err < 0)
return err;
for (i = 0; i < ARRAY_SIZE(fuse->hs_curr_level); i++) {
fuse->hs_curr_level[i] =
(value >> FUSE_SKU_CALIB_HS_CURR_LEVEL_PADX_SHIFT(i)) &
FUSE_SKU_CALIB_HS_CURR_LEVEL_PAD_MASK;
}
fuse->hs_iref_cap =
(value >> FUSE_SKU_CALIB_HS_IREF_CAP_SHIFT) &
FUSE_SKU_CALIB_HS_IREF_CAP_MASK;
fuse->hs_term_range_adj =
(value >> FUSE_SKU_CALIB_HS_TERM_RANGE_ADJ_SHIFT) &
FUSE_SKU_CALIB_HS_TERM_RANGE_ADJ_MASK;
fuse->hs_squelch_level =
(value >> FUSE_SKU_CALIB_HS_SQUELCH_LEVEL_SHIFT) &
FUSE_SKU_CALIB_HS_SQUELCH_LEVEL_MASK;
return 0;
}
static struct tegra_xusb_padctl *
tegra124_xusb_padctl_probe(struct device *dev,
const struct tegra_xusb_padctl_soc *soc)
{
struct tegra124_xusb_padctl *padctl;
int err;
padctl = devm_kzalloc(dev, sizeof(*padctl), GFP_KERNEL);
if (!padctl)
return ERR_PTR(-ENOMEM);
padctl->base.dev = dev;
padctl->base.soc = soc;
err = tegra124_xusb_read_fuse_calibration(&padctl->fuse);
if (err < 0)
return ERR_PTR(err);
return &padctl->base;
}
static void tegra124_xusb_padctl_remove(struct tegra_xusb_padctl *padctl)
{
}
static const struct tegra_xusb_padctl_ops tegra124_xusb_padctl_ops = {
.probe = tegra124_xusb_padctl_probe,
.remove = tegra124_xusb_padctl_remove,
.usb3_save_context = tegra124_usb3_save_context,
.hsic_set_idle = tegra124_hsic_set_idle,
};
static const char * const tegra124_xusb_padctl_supply_names[] = {
"avdd-pll-utmip",
"avdd-pll-erefe",
"avdd-pex-pll",
"hvdd-pex-pll-e",
};
const struct tegra_xusb_padctl_soc tegra124_xusb_padctl_soc = {
.num_pads = ARRAY_SIZE(tegra124_pads),
.pads = tegra124_pads,
.ports = {
.usb2 = {
.ops = &tegra124_usb2_port_ops,
.count = 3,
},
.ulpi = {
.ops = &tegra124_ulpi_port_ops,
.count = 1,
},
.hsic = {
.ops = &tegra124_hsic_port_ops,
.count = 2,
},
.usb3 = {
.ops = &tegra124_usb3_port_ops,
.count = 2,
},
},
.ops = &tegra124_xusb_padctl_ops,
.supply_names = tegra124_xusb_padctl_supply_names,
.num_supplies = ARRAY_SIZE(tegra124_xusb_padctl_supply_names),
};
EXPORT_SYMBOL_GPL(tegra124_xusb_padctl_soc);
MODULE_AUTHOR("Thierry Reding <[email protected]>");
MODULE_DESCRIPTION("NVIDIA Tegra 124 XUSB Pad Controller driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/phy/tegra/xusb-tegra124.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2016-2022, NVIDIA CORPORATION. All rights reserved.
*/
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/phy/phy.h>
#include <linux/regulator/consumer.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/slab.h>
#include <soc/tegra/fuse.h>
#include "xusb.h"
/* FUSE USB_CALIB registers */
#define HS_CURR_LEVEL_PADX_SHIFT(x) ((x) ? (11 + (x - 1) * 6) : 0)
#define HS_CURR_LEVEL_PAD_MASK 0x3f
#define HS_TERM_RANGE_ADJ_SHIFT 7
#define HS_TERM_RANGE_ADJ_MASK 0xf
#define HS_SQUELCH_SHIFT 29
#define HS_SQUELCH_MASK 0x7
#define RPD_CTRL_SHIFT 0
#define RPD_CTRL_MASK 0x1f
/* XUSB PADCTL registers */
#define XUSB_PADCTL_USB2_PAD_MUX 0x4
#define USB2_PORT_SHIFT(x) ((x) * 2)
#define USB2_PORT_MASK 0x3
#define PORT_XUSB 1
#define HSIC_PORT_SHIFT(x) ((x) + 20)
#define HSIC_PORT_MASK 0x1
#define PORT_HSIC 0
#define XUSB_PADCTL_USB2_PORT_CAP 0x8
#define XUSB_PADCTL_SS_PORT_CAP 0xc
#define PORTX_CAP_SHIFT(x) ((x) * 4)
#define PORT_CAP_MASK 0x3
#define PORT_CAP_DISABLED 0x0
#define PORT_CAP_HOST 0x1
#define PORT_CAP_DEVICE 0x2
#define PORT_CAP_OTG 0x3
#define XUSB_PADCTL_ELPG_PROGRAM 0x20
#define USB2_PORT_WAKE_INTERRUPT_ENABLE(x) BIT(x)
#define USB2_PORT_WAKEUP_EVENT(x) BIT((x) + 7)
#define SS_PORT_WAKE_INTERRUPT_ENABLE(x) BIT((x) + 14)
#define SS_PORT_WAKEUP_EVENT(x) BIT((x) + 21)
#define USB2_HSIC_PORT_WAKE_INTERRUPT_ENABLE(x) BIT((x) + 28)
#define USB2_HSIC_PORT_WAKEUP_EVENT(x) BIT((x) + 30)
#define ALL_WAKE_EVENTS \
(USB2_PORT_WAKEUP_EVENT(0) | USB2_PORT_WAKEUP_EVENT(1) | \
USB2_PORT_WAKEUP_EVENT(2) | SS_PORT_WAKEUP_EVENT(0) | \
SS_PORT_WAKEUP_EVENT(1) | SS_PORT_WAKEUP_EVENT(2) | \
USB2_HSIC_PORT_WAKEUP_EVENT(0))
#define XUSB_PADCTL_ELPG_PROGRAM_1 0x24
#define SSPX_ELPG_CLAMP_EN(x) BIT(0 + (x) * 3)
#define SSPX_ELPG_CLAMP_EN_EARLY(x) BIT(1 + (x) * 3)
#define SSPX_ELPG_VCORE_DOWN(x) BIT(2 + (x) * 3)
#define XUSB_PADCTL_SS_PORT_CFG 0x2c
#define PORTX_SPEED_SUPPORT_SHIFT(x) ((x) * 4)
#define PORTX_SPEED_SUPPORT_MASK (0x3)
#define PORT_SPEED_SUPPORT_GEN1 (0x0)
#define XUSB_PADCTL_USB2_OTG_PADX_CTL0(x) (0x88 + (x) * 0x40)
#define HS_CURR_LEVEL(x) ((x) & 0x3f)
#define TERM_SEL BIT(25)
#define USB2_OTG_PD BIT(26)
#define USB2_OTG_PD2 BIT(27)
#define USB2_OTG_PD2_OVRD_EN BIT(28)
#define USB2_OTG_PD_ZI BIT(29)
#define XUSB_PADCTL_USB2_OTG_PADX_CTL1(x) (0x8c + (x) * 0x40)
#define USB2_OTG_PD_DR BIT(2)
#define TERM_RANGE_ADJ(x) (((x) & 0xf) << 3)
#define RPD_CTRL(x) (((x) & 0x1f) << 26)
#define XUSB_PADCTL_USB2_BIAS_PAD_CTL0 0x284
#define BIAS_PAD_PD BIT(11)
#define HS_SQUELCH_LEVEL(x) (((x) & 0x7) << 0)
#define XUSB_PADCTL_USB2_BIAS_PAD_CTL1 0x288
#define USB2_TRK_START_TIMER(x) (((x) & 0x7f) << 12)
#define USB2_TRK_DONE_RESET_TIMER(x) (((x) & 0x7f) << 19)
#define USB2_PD_TRK BIT(26)
#define USB2_TRK_COMPLETED BIT(31)
#define XUSB_PADCTL_USB2_BIAS_PAD_CTL2 0x28c
#define USB2_TRK_HW_MODE BIT(0)
#define CYA_TRK_CODE_UPDATE_ON_IDLE BIT(31)
#define XUSB_PADCTL_HSIC_PADX_CTL0(x) (0x300 + (x) * 0x20)
#define HSIC_PD_TX_DATA0 BIT(1)
#define HSIC_PD_TX_STROBE BIT(3)
#define HSIC_PD_RX_DATA0 BIT(4)
#define HSIC_PD_RX_STROBE BIT(6)
#define HSIC_PD_ZI_DATA0 BIT(7)
#define HSIC_PD_ZI_STROBE BIT(9)
#define HSIC_RPD_DATA0 BIT(13)
#define HSIC_RPD_STROBE BIT(15)
#define HSIC_RPU_DATA0 BIT(16)
#define HSIC_RPU_STROBE BIT(18)
#define XUSB_PADCTL_HSIC_PAD_TRK_CTL0 0x340
#define HSIC_TRK_START_TIMER(x) (((x) & 0x7f) << 5)
#define HSIC_TRK_DONE_RESET_TIMER(x) (((x) & 0x7f) << 12)
#define HSIC_PD_TRK BIT(19)
#define USB2_VBUS_ID 0x360
#define VBUS_OVERRIDE BIT(14)
#define ID_OVERRIDE(x) (((x) & 0xf) << 18)
#define ID_OVERRIDE_FLOATING ID_OVERRIDE(8)
#define ID_OVERRIDE_GROUNDED ID_OVERRIDE(0)
/* XUSB AO registers */
#define XUSB_AO_USB_DEBOUNCE_DEL (0x4)
#define UHSIC_LINE_DEB_CNT(x) (((x) & 0xf) << 4)
#define UTMIP_LINE_DEB_CNT(x) ((x) & 0xf)
#define XUSB_AO_UTMIP_TRIGGERS(x) (0x40 + (x) * 4)
#define CLR_WALK_PTR BIT(0)
#define CAP_CFG BIT(1)
#define CLR_WAKE_ALARM BIT(3)
#define XUSB_AO_UHSIC_TRIGGERS(x) (0x60 + (x) * 4)
#define HSIC_CLR_WALK_PTR BIT(0)
#define HSIC_CLR_WAKE_ALARM BIT(3)
#define HSIC_CAP_CFG BIT(4)
#define XUSB_AO_UTMIP_SAVED_STATE(x) (0x70 + (x) * 4)
#define SPEED(x) ((x) & 0x3)
#define UTMI_HS SPEED(0)
#define UTMI_FS SPEED(1)
#define UTMI_LS SPEED(2)
#define UTMI_RST SPEED(3)
#define XUSB_AO_UHSIC_SAVED_STATE(x) (0x90 + (x) * 4)
#define MODE(x) ((x) & 0x1)
#define MODE_HS MODE(0)
#define MODE_RST MODE(1)
#define XUSB_AO_UTMIP_SLEEPWALK_STATUS(x) (0xa0 + (x) * 4)
#define XUSB_AO_UTMIP_SLEEPWALK_CFG(x) (0xd0 + (x) * 4)
#define XUSB_AO_UHSIC_SLEEPWALK_CFG(x) (0xf0 + (x) * 4)
#define FAKE_USBOP_VAL BIT(0)
#define FAKE_USBON_VAL BIT(1)
#define FAKE_USBOP_EN BIT(2)
#define FAKE_USBON_EN BIT(3)
#define FAKE_STROBE_VAL BIT(0)
#define FAKE_DATA_VAL BIT(1)
#define FAKE_STROBE_EN BIT(2)
#define FAKE_DATA_EN BIT(3)
#define WAKE_WALK_EN BIT(14)
#define MASTER_ENABLE BIT(15)
#define LINEVAL_WALK_EN BIT(16)
#define WAKE_VAL(x) (((x) & 0xf) << 17)
#define WAKE_VAL_NONE WAKE_VAL(12)
#define WAKE_VAL_ANY WAKE_VAL(15)
#define WAKE_VAL_DS10 WAKE_VAL(2)
#define LINE_WAKEUP_EN BIT(21)
#define MASTER_CFG_SEL BIT(22)
#define XUSB_AO_UTMIP_SLEEPWALK(x) (0x100 + (x) * 4)
/* phase A */
#define USBOP_RPD_A BIT(0)
#define USBON_RPD_A BIT(1)
#define AP_A BIT(4)
#define AN_A BIT(5)
#define HIGHZ_A BIT(6)
#define MASTER_ENABLE_A BIT(7)
/* phase B */
#define USBOP_RPD_B BIT(8)
#define USBON_RPD_B BIT(9)
#define AP_B BIT(12)
#define AN_B BIT(13)
#define HIGHZ_B BIT(14)
#define MASTER_ENABLE_B BIT(15)
/* phase C */
#define USBOP_RPD_C BIT(16)
#define USBON_RPD_C BIT(17)
#define AP_C BIT(20)
#define AN_C BIT(21)
#define HIGHZ_C BIT(22)
#define MASTER_ENABLE_C BIT(23)
/* phase D */
#define USBOP_RPD_D BIT(24)
#define USBON_RPD_D BIT(25)
#define AP_D BIT(28)
#define AN_D BIT(29)
#define HIGHZ_D BIT(30)
#define MASTER_ENABLE_D BIT(31)
#define MASTER_ENABLE_B_C_D \
(MASTER_ENABLE_B | MASTER_ENABLE_C | MASTER_ENABLE_D)
#define XUSB_AO_UHSIC_SLEEPWALK(x) (0x120 + (x) * 4)
/* phase A */
#define RPD_STROBE_A BIT(0)
#define RPD_DATA0_A BIT(1)
#define RPU_STROBE_A BIT(2)
#define RPU_DATA0_A BIT(3)
/* phase B */
#define RPD_STROBE_B BIT(8)
#define RPD_DATA0_B BIT(9)
#define RPU_STROBE_B BIT(10)
#define RPU_DATA0_B BIT(11)
/* phase C */
#define RPD_STROBE_C BIT(16)
#define RPD_DATA0_C BIT(17)
#define RPU_STROBE_C BIT(18)
#define RPU_DATA0_C BIT(19)
/* phase D */
#define RPD_STROBE_D BIT(24)
#define RPD_DATA0_D BIT(25)
#define RPU_STROBE_D BIT(26)
#define RPU_DATA0_D BIT(27)
#define XUSB_AO_UTMIP_PAD_CFG(x) (0x130 + (x) * 4)
#define FSLS_USE_XUSB_AO BIT(3)
#define TRK_CTRL_USE_XUSB_AO BIT(4)
#define RPD_CTRL_USE_XUSB_AO BIT(5)
#define RPU_USE_XUSB_AO BIT(6)
#define VREG_USE_XUSB_AO BIT(7)
#define USBOP_VAL_PD BIT(8)
#define USBON_VAL_PD BIT(9)
#define E_DPD_OVRD_EN BIT(10)
#define E_DPD_OVRD_VAL BIT(11)
#define XUSB_AO_UHSIC_PAD_CFG(x) (0x150 + (x) * 4)
#define STROBE_VAL_PD BIT(0)
#define DATA0_VAL_PD BIT(1)
#define USE_XUSB_AO BIT(4)
#define TEGRA186_LANE(_name, _offset, _shift, _mask, _type) \
{ \
.name = _name, \
.offset = _offset, \
.shift = _shift, \
.mask = _mask, \
.num_funcs = ARRAY_SIZE(tegra186_##_type##_functions), \
.funcs = tegra186_##_type##_functions, \
}
struct tegra_xusb_fuse_calibration {
u32 *hs_curr_level;
u32 hs_squelch;
u32 hs_term_range_adj;
u32 rpd_ctrl;
};
struct tegra186_xusb_padctl_context {
u32 vbus_id;
u32 usb2_pad_mux;
u32 usb2_port_cap;
u32 ss_port_cap;
};
struct tegra186_xusb_padctl {
struct tegra_xusb_padctl base;
void __iomem *ao_regs;
struct tegra_xusb_fuse_calibration calib;
/* UTMI bias and tracking */
struct clk *usb2_trk_clk;
unsigned int bias_pad_enable;
/* padctl context */
struct tegra186_xusb_padctl_context context;
};
static inline void ao_writel(struct tegra186_xusb_padctl *priv, u32 value, unsigned int offset)
{
writel(value, priv->ao_regs + offset);
}
static inline u32 ao_readl(struct tegra186_xusb_padctl *priv, unsigned int offset)
{
return readl(priv->ao_regs + offset);
}
static inline struct tegra186_xusb_padctl *
to_tegra186_xusb_padctl(struct tegra_xusb_padctl *padctl)
{
return container_of(padctl, struct tegra186_xusb_padctl, base);
}
/* USB 2.0 UTMI PHY support */
static struct tegra_xusb_lane *
tegra186_usb2_lane_probe(struct tegra_xusb_pad *pad, struct device_node *np,
unsigned int index)
{
struct tegra_xusb_usb2_lane *usb2;
int err;
usb2 = kzalloc(sizeof(*usb2), GFP_KERNEL);
if (!usb2)
return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&usb2->base.list);
usb2->base.soc = &pad->soc->lanes[index];
usb2->base.index = index;
usb2->base.pad = pad;
usb2->base.np = np;
err = tegra_xusb_lane_parse_dt(&usb2->base, np);
if (err < 0) {
kfree(usb2);
return ERR_PTR(err);
}
return &usb2->base;
}
static void tegra186_usb2_lane_remove(struct tegra_xusb_lane *lane)
{
struct tegra_xusb_usb2_lane *usb2 = to_usb2_lane(lane);
kfree(usb2);
}
static int tegra186_utmi_enable_phy_sleepwalk(struct tegra_xusb_lane *lane,
enum usb_device_speed speed)
{
struct tegra_xusb_padctl *padctl = lane->pad->padctl;
struct tegra186_xusb_padctl *priv = to_tegra186_xusb_padctl(padctl);
unsigned int index = lane->index;
u32 value;
mutex_lock(&padctl->lock);
/* ensure sleepwalk logic is disabled */
value = ao_readl(priv, XUSB_AO_UTMIP_SLEEPWALK_CFG(index));
value &= ~MASTER_ENABLE;
ao_writel(priv, value, XUSB_AO_UTMIP_SLEEPWALK_CFG(index));
/* ensure sleepwalk logics are in low power mode */
value = ao_readl(priv, XUSB_AO_UTMIP_SLEEPWALK_CFG(index));
value |= MASTER_CFG_SEL;
ao_writel(priv, value, XUSB_AO_UTMIP_SLEEPWALK_CFG(index));
/* set debounce time */
value = ao_readl(priv, XUSB_AO_USB_DEBOUNCE_DEL);
value &= ~UTMIP_LINE_DEB_CNT(~0);
value |= UTMIP_LINE_DEB_CNT(1);
ao_writel(priv, value, XUSB_AO_USB_DEBOUNCE_DEL);
/* ensure fake events of sleepwalk logic are desiabled */
value = ao_readl(priv, XUSB_AO_UTMIP_SLEEPWALK_CFG(index));
value &= ~(FAKE_USBOP_VAL | FAKE_USBON_VAL |
FAKE_USBOP_EN | FAKE_USBON_EN);
ao_writel(priv, value, XUSB_AO_UTMIP_SLEEPWALK_CFG(index));
/* ensure wake events of sleepwalk logic are not latched */
value = ao_readl(priv, XUSB_AO_UTMIP_SLEEPWALK_CFG(index));
value &= ~LINE_WAKEUP_EN;
ao_writel(priv, value, XUSB_AO_UTMIP_SLEEPWALK_CFG(index));
/* disable wake event triggers of sleepwalk logic */
value = ao_readl(priv, XUSB_AO_UTMIP_SLEEPWALK_CFG(index));
value &= ~WAKE_VAL(~0);
value |= WAKE_VAL_NONE;
ao_writel(priv, value, XUSB_AO_UTMIP_SLEEPWALK_CFG(index));
/* power down the line state detectors of the pad */
value = ao_readl(priv, XUSB_AO_UTMIP_PAD_CFG(index));
value |= (USBOP_VAL_PD | USBON_VAL_PD);
ao_writel(priv, value, XUSB_AO_UTMIP_PAD_CFG(index));
/* save state per speed */
value = ao_readl(priv, XUSB_AO_UTMIP_SAVED_STATE(index));
value &= ~SPEED(~0);
switch (speed) {
case USB_SPEED_HIGH:
value |= UTMI_HS;
break;
case USB_SPEED_FULL:
value |= UTMI_FS;
break;
case USB_SPEED_LOW:
value |= UTMI_LS;
break;
default:
value |= UTMI_RST;
break;
}
ao_writel(priv, value, XUSB_AO_UTMIP_SAVED_STATE(index));
/* enable the trigger of the sleepwalk logic */
value = ao_readl(priv, XUSB_AO_UTMIP_SLEEPWALK_CFG(index));
value |= LINEVAL_WALK_EN;
value &= ~WAKE_WALK_EN;
ao_writel(priv, value, XUSB_AO_UTMIP_SLEEPWALK_CFG(index));
/* reset the walk pointer and clear the alarm of the sleepwalk logic,
* as well as capture the configuration of the USB2.0 pad
*/
value = ao_readl(priv, XUSB_AO_UTMIP_TRIGGERS(index));
value |= (CLR_WALK_PTR | CLR_WAKE_ALARM | CAP_CFG);
ao_writel(priv, value, XUSB_AO_UTMIP_TRIGGERS(index));
/* setup the pull-ups and pull-downs of the signals during the four
* stages of sleepwalk.
* if device is connected, program sleepwalk logic to maintain a J and
* keep driving K upon seeing remote wake.
*/
value = USBOP_RPD_A | USBOP_RPD_B | USBOP_RPD_C | USBOP_RPD_D;
value |= USBON_RPD_A | USBON_RPD_B | USBON_RPD_C | USBON_RPD_D;
switch (speed) {
case USB_SPEED_HIGH:
case USB_SPEED_FULL:
/* J state: D+/D- = high/low, K state: D+/D- = low/high */
value |= HIGHZ_A;
value |= AP_A;
value |= AN_B | AN_C | AN_D;
if (padctl->soc->supports_lp_cfg_en)
value |= MASTER_ENABLE_B_C_D;
break;
case USB_SPEED_LOW:
/* J state: D+/D- = low/high, K state: D+/D- = high/low */
value |= HIGHZ_A;
value |= AN_A;
value |= AP_B | AP_C | AP_D;
if (padctl->soc->supports_lp_cfg_en)
value |= MASTER_ENABLE_B_C_D;
break;
default:
value |= HIGHZ_A | HIGHZ_B | HIGHZ_C | HIGHZ_D;
break;
}
ao_writel(priv, value, XUSB_AO_UTMIP_SLEEPWALK(index));
/* power up the line state detectors of the pad */
value = ao_readl(priv, XUSB_AO_UTMIP_PAD_CFG(index));
value &= ~(USBOP_VAL_PD | USBON_VAL_PD);
ao_writel(priv, value, XUSB_AO_UTMIP_PAD_CFG(index));
usleep_range(150, 200);
/* switch the electric control of the USB2.0 pad to XUSB_AO */
value = ao_readl(priv, XUSB_AO_UTMIP_PAD_CFG(index));
value |= FSLS_USE_XUSB_AO | TRK_CTRL_USE_XUSB_AO | RPD_CTRL_USE_XUSB_AO |
RPU_USE_XUSB_AO | VREG_USE_XUSB_AO;
ao_writel(priv, value, XUSB_AO_UTMIP_PAD_CFG(index));
/* set the wake signaling trigger events */
value = ao_readl(priv, XUSB_AO_UTMIP_SLEEPWALK_CFG(index));
value &= ~WAKE_VAL(~0);
value |= WAKE_VAL_ANY;
ao_writel(priv, value, XUSB_AO_UTMIP_SLEEPWALK_CFG(index));
/* enable the wake detection */
value = ao_readl(priv, XUSB_AO_UTMIP_SLEEPWALK_CFG(index));
value |= MASTER_ENABLE | LINE_WAKEUP_EN;
ao_writel(priv, value, XUSB_AO_UTMIP_SLEEPWALK_CFG(index));
mutex_unlock(&padctl->lock);
return 0;
}
static int tegra186_utmi_disable_phy_sleepwalk(struct tegra_xusb_lane *lane)
{
struct tegra_xusb_padctl *padctl = lane->pad->padctl;
struct tegra186_xusb_padctl *priv = to_tegra186_xusb_padctl(padctl);
unsigned int index = lane->index;
u32 value;
mutex_lock(&padctl->lock);
/* disable the wake detection */
value = ao_readl(priv, XUSB_AO_UTMIP_SLEEPWALK_CFG(index));
value &= ~(MASTER_ENABLE | LINE_WAKEUP_EN);
ao_writel(priv, value, XUSB_AO_UTMIP_SLEEPWALK_CFG(index));
/* switch the electric control of the USB2.0 pad to XUSB vcore logic */
value = ao_readl(priv, XUSB_AO_UTMIP_PAD_CFG(index));
value &= ~(FSLS_USE_XUSB_AO | TRK_CTRL_USE_XUSB_AO | RPD_CTRL_USE_XUSB_AO |
RPU_USE_XUSB_AO | VREG_USE_XUSB_AO);
ao_writel(priv, value, XUSB_AO_UTMIP_PAD_CFG(index));
/* disable wake event triggers of sleepwalk logic */
value = ao_readl(priv, XUSB_AO_UTMIP_SLEEPWALK_CFG(index));
value &= ~WAKE_VAL(~0);
value |= WAKE_VAL_NONE;
ao_writel(priv, value, XUSB_AO_UTMIP_SLEEPWALK_CFG(index));
if (padctl->soc->supports_lp_cfg_en) {
/* disable the four stages of sleepwalk */
value = ao_readl(priv, XUSB_AO_UTMIP_SLEEPWALK(index));
value &= ~(MASTER_ENABLE_A | MASTER_ENABLE_B_C_D);
ao_writel(priv, value, XUSB_AO_UTMIP_SLEEPWALK(index));
}
/* power down the line state detectors of the port */
value = ao_readl(priv, XUSB_AO_UTMIP_PAD_CFG(index));
value |= USBOP_VAL_PD | USBON_VAL_PD;
ao_writel(priv, value, XUSB_AO_UTMIP_PAD_CFG(index));
/* clear alarm of the sleepwalk logic */
value = ao_readl(priv, XUSB_AO_UTMIP_TRIGGERS(index));
value |= CLR_WAKE_ALARM;
ao_writel(priv, value, XUSB_AO_UTMIP_TRIGGERS(index));
mutex_unlock(&padctl->lock);
return 0;
}
static int tegra186_utmi_enable_phy_wake(struct tegra_xusb_lane *lane)
{
struct tegra_xusb_padctl *padctl = lane->pad->padctl;
unsigned int index = lane->index;
u32 value;
mutex_lock(&padctl->lock);
value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM);
value &= ~ALL_WAKE_EVENTS;
value |= USB2_PORT_WAKEUP_EVENT(index);
padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM);
usleep_range(10, 20);
value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM);
value &= ~ALL_WAKE_EVENTS;
value |= USB2_PORT_WAKE_INTERRUPT_ENABLE(index);
padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM);
mutex_unlock(&padctl->lock);
return 0;
}
static int tegra186_utmi_disable_phy_wake(struct tegra_xusb_lane *lane)
{
struct tegra_xusb_padctl *padctl = lane->pad->padctl;
unsigned int index = lane->index;
u32 value;
mutex_lock(&padctl->lock);
value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM);
value &= ~ALL_WAKE_EVENTS;
value &= ~USB2_PORT_WAKE_INTERRUPT_ENABLE(index);
padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM);
usleep_range(10, 20);
value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM);
value &= ~ALL_WAKE_EVENTS;
value |= USB2_PORT_WAKEUP_EVENT(index);
padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM);
mutex_unlock(&padctl->lock);
return 0;
}
static bool tegra186_utmi_phy_remote_wake_detected(struct tegra_xusb_lane *lane)
{
struct tegra_xusb_padctl *padctl = lane->pad->padctl;
unsigned int index = lane->index;
u32 value;
value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM);
if ((value & USB2_PORT_WAKE_INTERRUPT_ENABLE(index)) &&
(value & USB2_PORT_WAKEUP_EVENT(index)))
return true;
return false;
}
static const struct tegra_xusb_lane_ops tegra186_usb2_lane_ops = {
.probe = tegra186_usb2_lane_probe,
.remove = tegra186_usb2_lane_remove,
.enable_phy_sleepwalk = tegra186_utmi_enable_phy_sleepwalk,
.disable_phy_sleepwalk = tegra186_utmi_disable_phy_sleepwalk,
.enable_phy_wake = tegra186_utmi_enable_phy_wake,
.disable_phy_wake = tegra186_utmi_disable_phy_wake,
.remote_wake_detected = tegra186_utmi_phy_remote_wake_detected,
};
static void tegra186_utmi_bias_pad_power_on(struct tegra_xusb_padctl *padctl)
{
struct tegra186_xusb_padctl *priv = to_tegra186_xusb_padctl(padctl);
struct device *dev = padctl->dev;
u32 value;
int err;
mutex_lock(&padctl->lock);
if (priv->bias_pad_enable++ > 0) {
mutex_unlock(&padctl->lock);
return;
}
err = clk_prepare_enable(priv->usb2_trk_clk);
if (err < 0)
dev_warn(dev, "failed to enable USB2 trk clock: %d\n", err);
value = padctl_readl(padctl, XUSB_PADCTL_USB2_BIAS_PAD_CTL1);
value &= ~USB2_TRK_START_TIMER(~0);
value |= USB2_TRK_START_TIMER(0x1e);
value &= ~USB2_TRK_DONE_RESET_TIMER(~0);
value |= USB2_TRK_DONE_RESET_TIMER(0xa);
padctl_writel(padctl, value, XUSB_PADCTL_USB2_BIAS_PAD_CTL1);
value = padctl_readl(padctl, XUSB_PADCTL_USB2_BIAS_PAD_CTL0);
value &= ~BIAS_PAD_PD;
value &= ~HS_SQUELCH_LEVEL(~0);
value |= HS_SQUELCH_LEVEL(priv->calib.hs_squelch);
padctl_writel(padctl, value, XUSB_PADCTL_USB2_BIAS_PAD_CTL0);
udelay(1);
value = padctl_readl(padctl, XUSB_PADCTL_USB2_BIAS_PAD_CTL1);
value &= ~USB2_PD_TRK;
padctl_writel(padctl, value, XUSB_PADCTL_USB2_BIAS_PAD_CTL1);
if (padctl->soc->poll_trk_completed) {
err = padctl_readl_poll(padctl, XUSB_PADCTL_USB2_BIAS_PAD_CTL1,
USB2_TRK_COMPLETED, USB2_TRK_COMPLETED, 100);
if (err) {
/* The failure with polling on trk complete will not
* cause the failure of powering on the bias pad.
*/
dev_warn(dev, "failed to poll USB2 trk completed: %d\n", err);
}
value = padctl_readl(padctl, XUSB_PADCTL_USB2_BIAS_PAD_CTL1);
value |= USB2_TRK_COMPLETED;
padctl_writel(padctl, value, XUSB_PADCTL_USB2_BIAS_PAD_CTL1);
} else {
udelay(100);
}
if (padctl->soc->trk_hw_mode) {
value = padctl_readl(padctl, XUSB_PADCTL_USB2_BIAS_PAD_CTL2);
value |= USB2_TRK_HW_MODE;
value &= ~CYA_TRK_CODE_UPDATE_ON_IDLE;
padctl_writel(padctl, value, XUSB_PADCTL_USB2_BIAS_PAD_CTL2);
} else {
clk_disable_unprepare(priv->usb2_trk_clk);
}
mutex_unlock(&padctl->lock);
}
static void tegra186_utmi_bias_pad_power_off(struct tegra_xusb_padctl *padctl)
{
struct tegra186_xusb_padctl *priv = to_tegra186_xusb_padctl(padctl);
u32 value;
mutex_lock(&padctl->lock);
if (WARN_ON(priv->bias_pad_enable == 0)) {
mutex_unlock(&padctl->lock);
return;
}
if (--priv->bias_pad_enable > 0) {
mutex_unlock(&padctl->lock);
return;
}
value = padctl_readl(padctl, XUSB_PADCTL_USB2_BIAS_PAD_CTL1);
value |= USB2_PD_TRK;
padctl_writel(padctl, value, XUSB_PADCTL_USB2_BIAS_PAD_CTL1);
if (padctl->soc->trk_hw_mode) {
value = padctl_readl(padctl, XUSB_PADCTL_USB2_BIAS_PAD_CTL2);
value &= ~USB2_TRK_HW_MODE;
padctl_writel(padctl, value, XUSB_PADCTL_USB2_BIAS_PAD_CTL2);
clk_disable_unprepare(priv->usb2_trk_clk);
}
mutex_unlock(&padctl->lock);
}
static void tegra186_utmi_pad_power_on(struct phy *phy)
{
struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
struct tegra_xusb_padctl *padctl = lane->pad->padctl;
struct tegra_xusb_usb2_port *port;
struct device *dev = padctl->dev;
unsigned int index = lane->index;
u32 value;
if (!phy)
return;
port = tegra_xusb_find_usb2_port(padctl, index);
if (!port) {
dev_err(dev, "no port found for USB2 lane %u\n", index);
return;
}
dev_dbg(dev, "power on UTMI pad %u\n", index);
tegra186_utmi_bias_pad_power_on(padctl);
udelay(2);
value = padctl_readl(padctl, XUSB_PADCTL_USB2_OTG_PADX_CTL0(index));
value &= ~USB2_OTG_PD;
padctl_writel(padctl, value, XUSB_PADCTL_USB2_OTG_PADX_CTL0(index));
value = padctl_readl(padctl, XUSB_PADCTL_USB2_OTG_PADX_CTL1(index));
value &= ~USB2_OTG_PD_DR;
padctl_writel(padctl, value, XUSB_PADCTL_USB2_OTG_PADX_CTL1(index));
}
static void tegra186_utmi_pad_power_down(struct phy *phy)
{
struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
struct tegra_xusb_padctl *padctl = lane->pad->padctl;
unsigned int index = lane->index;
u32 value;
if (!phy)
return;
dev_dbg(padctl->dev, "power down UTMI pad %u\n", index);
value = padctl_readl(padctl, XUSB_PADCTL_USB2_OTG_PADX_CTL0(index));
value |= USB2_OTG_PD;
padctl_writel(padctl, value, XUSB_PADCTL_USB2_OTG_PADX_CTL0(index));
value = padctl_readl(padctl, XUSB_PADCTL_USB2_OTG_PADX_CTL1(index));
value |= USB2_OTG_PD_DR;
padctl_writel(padctl, value, XUSB_PADCTL_USB2_OTG_PADX_CTL1(index));
udelay(2);
tegra186_utmi_bias_pad_power_off(padctl);
}
static int tegra186_xusb_padctl_vbus_override(struct tegra_xusb_padctl *padctl,
bool status)
{
u32 value;
dev_dbg(padctl->dev, "%s vbus override\n", status ? "set" : "clear");
value = padctl_readl(padctl, USB2_VBUS_ID);
if (status) {
value |= VBUS_OVERRIDE;
value &= ~ID_OVERRIDE(~0);
value |= ID_OVERRIDE_FLOATING;
} else {
value &= ~VBUS_OVERRIDE;
}
padctl_writel(padctl, value, USB2_VBUS_ID);
return 0;
}
static int tegra186_xusb_padctl_id_override(struct tegra_xusb_padctl *padctl,
bool status)
{
u32 value;
dev_dbg(padctl->dev, "%s id override\n", status ? "set" : "clear");
value = padctl_readl(padctl, USB2_VBUS_ID);
if (status) {
if (value & VBUS_OVERRIDE) {
value &= ~VBUS_OVERRIDE;
padctl_writel(padctl, value, USB2_VBUS_ID);
usleep_range(1000, 2000);
value = padctl_readl(padctl, USB2_VBUS_ID);
}
value &= ~ID_OVERRIDE(~0);
value |= ID_OVERRIDE_GROUNDED;
} else {
value &= ~ID_OVERRIDE(~0);
value |= ID_OVERRIDE_FLOATING;
}
padctl_writel(padctl, value, USB2_VBUS_ID);
return 0;
}
static int tegra186_utmi_phy_set_mode(struct phy *phy, enum phy_mode mode,
int submode)
{
struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
struct tegra_xusb_padctl *padctl = lane->pad->padctl;
struct tegra_xusb_usb2_port *port = tegra_xusb_find_usb2_port(padctl,
lane->index);
int err = 0;
mutex_lock(&padctl->lock);
dev_dbg(&port->base.dev, "%s: mode %d", __func__, mode);
if (mode == PHY_MODE_USB_OTG) {
if (submode == USB_ROLE_HOST) {
tegra186_xusb_padctl_id_override(padctl, true);
err = regulator_enable(port->supply);
} else if (submode == USB_ROLE_DEVICE) {
tegra186_xusb_padctl_vbus_override(padctl, true);
} else if (submode == USB_ROLE_NONE) {
/*
* When port is peripheral only or role transitions to
* USB_ROLE_NONE from USB_ROLE_DEVICE, regulator is not
* enabled.
*/
if (regulator_is_enabled(port->supply))
regulator_disable(port->supply);
tegra186_xusb_padctl_id_override(padctl, false);
tegra186_xusb_padctl_vbus_override(padctl, false);
}
}
mutex_unlock(&padctl->lock);
return err;
}
static int tegra186_utmi_phy_power_on(struct phy *phy)
{
struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
struct tegra_xusb_usb2_lane *usb2 = to_usb2_lane(lane);
struct tegra_xusb_padctl *padctl = lane->pad->padctl;
struct tegra186_xusb_padctl *priv = to_tegra186_xusb_padctl(padctl);
struct tegra_xusb_usb2_port *port;
unsigned int index = lane->index;
struct device *dev = padctl->dev;
u32 value;
port = tegra_xusb_find_usb2_port(padctl, index);
if (!port) {
dev_err(dev, "no port found for USB2 lane %u\n", index);
return -ENODEV;
}
value = padctl_readl(padctl, XUSB_PADCTL_USB2_PAD_MUX);
value &= ~(USB2_PORT_MASK << USB2_PORT_SHIFT(index));
value |= (PORT_XUSB << USB2_PORT_SHIFT(index));
padctl_writel(padctl, value, XUSB_PADCTL_USB2_PAD_MUX);
value = padctl_readl(padctl, XUSB_PADCTL_USB2_PORT_CAP);
value &= ~(PORT_CAP_MASK << PORTX_CAP_SHIFT(index));
if (port->mode == USB_DR_MODE_UNKNOWN)
value |= (PORT_CAP_DISABLED << PORTX_CAP_SHIFT(index));
else if (port->mode == USB_DR_MODE_PERIPHERAL)
value |= (PORT_CAP_DEVICE << PORTX_CAP_SHIFT(index));
else if (port->mode == USB_DR_MODE_HOST)
value |= (PORT_CAP_HOST << PORTX_CAP_SHIFT(index));
else if (port->mode == USB_DR_MODE_OTG)
value |= (PORT_CAP_OTG << PORTX_CAP_SHIFT(index));
padctl_writel(padctl, value, XUSB_PADCTL_USB2_PORT_CAP);
value = padctl_readl(padctl, XUSB_PADCTL_USB2_OTG_PADX_CTL0(index));
value &= ~USB2_OTG_PD_ZI;
value |= TERM_SEL;
value &= ~HS_CURR_LEVEL(~0);
if (usb2->hs_curr_level_offset) {
int hs_current_level;
hs_current_level = (int)priv->calib.hs_curr_level[index] +
usb2->hs_curr_level_offset;
if (hs_current_level < 0)
hs_current_level = 0;
if (hs_current_level > 0x3f)
hs_current_level = 0x3f;
value |= HS_CURR_LEVEL(hs_current_level);
} else {
value |= HS_CURR_LEVEL(priv->calib.hs_curr_level[index]);
}
padctl_writel(padctl, value, XUSB_PADCTL_USB2_OTG_PADX_CTL0(index));
value = padctl_readl(padctl, XUSB_PADCTL_USB2_OTG_PADX_CTL1(index));
value &= ~TERM_RANGE_ADJ(~0);
value |= TERM_RANGE_ADJ(priv->calib.hs_term_range_adj);
value &= ~RPD_CTRL(~0);
value |= RPD_CTRL(priv->calib.rpd_ctrl);
padctl_writel(padctl, value, XUSB_PADCTL_USB2_OTG_PADX_CTL1(index));
tegra186_utmi_pad_power_on(phy);
return 0;
}
static int tegra186_utmi_phy_power_off(struct phy *phy)
{
tegra186_utmi_pad_power_down(phy);
return 0;
}
static int tegra186_utmi_phy_init(struct phy *phy)
{
struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
struct tegra_xusb_padctl *padctl = lane->pad->padctl;
struct tegra_xusb_usb2_port *port;
unsigned int index = lane->index;
struct device *dev = padctl->dev;
int err;
port = tegra_xusb_find_usb2_port(padctl, index);
if (!port) {
dev_err(dev, "no port found for USB2 lane %u\n", index);
return -ENODEV;
}
if (port->supply && port->mode == USB_DR_MODE_HOST) {
err = regulator_enable(port->supply);
if (err) {
dev_err(dev, "failed to enable port %u VBUS: %d\n",
index, err);
return err;
}
}
return 0;
}
static int tegra186_utmi_phy_exit(struct phy *phy)
{
struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
struct tegra_xusb_padctl *padctl = lane->pad->padctl;
struct tegra_xusb_usb2_port *port;
unsigned int index = lane->index;
struct device *dev = padctl->dev;
int err;
port = tegra_xusb_find_usb2_port(padctl, index);
if (!port) {
dev_err(dev, "no port found for USB2 lane %u\n", index);
return -ENODEV;
}
if (port->supply && port->mode == USB_DR_MODE_HOST) {
err = regulator_disable(port->supply);
if (err) {
dev_err(dev, "failed to disable port %u VBUS: %d\n",
index, err);
return err;
}
}
return 0;
}
static const struct phy_ops utmi_phy_ops = {
.init = tegra186_utmi_phy_init,
.exit = tegra186_utmi_phy_exit,
.power_on = tegra186_utmi_phy_power_on,
.power_off = tegra186_utmi_phy_power_off,
.set_mode = tegra186_utmi_phy_set_mode,
.owner = THIS_MODULE,
};
static struct tegra_xusb_pad *
tegra186_usb2_pad_probe(struct tegra_xusb_padctl *padctl,
const struct tegra_xusb_pad_soc *soc,
struct device_node *np)
{
struct tegra186_xusb_padctl *priv = to_tegra186_xusb_padctl(padctl);
struct tegra_xusb_usb2_pad *usb2;
struct tegra_xusb_pad *pad;
int err;
usb2 = kzalloc(sizeof(*usb2), GFP_KERNEL);
if (!usb2)
return ERR_PTR(-ENOMEM);
pad = &usb2->base;
pad->ops = &tegra186_usb2_lane_ops;
pad->soc = soc;
err = tegra_xusb_pad_init(pad, padctl, np);
if (err < 0) {
kfree(usb2);
goto out;
}
priv->usb2_trk_clk = devm_clk_get(&pad->dev, "trk");
if (IS_ERR(priv->usb2_trk_clk)) {
err = PTR_ERR(priv->usb2_trk_clk);
dev_dbg(&pad->dev, "failed to get usb2 trk clock: %d\n", err);
goto unregister;
}
err = tegra_xusb_pad_register(pad, &utmi_phy_ops);
if (err < 0)
goto unregister;
dev_set_drvdata(&pad->dev, pad);
return pad;
unregister:
device_unregister(&pad->dev);
out:
return ERR_PTR(err);
}
static void tegra186_usb2_pad_remove(struct tegra_xusb_pad *pad)
{
struct tegra_xusb_usb2_pad *usb2 = to_usb2_pad(pad);
kfree(usb2);
}
static const struct tegra_xusb_pad_ops tegra186_usb2_pad_ops = {
.probe = tegra186_usb2_pad_probe,
.remove = tegra186_usb2_pad_remove,
};
static const char * const tegra186_usb2_functions[] = {
"xusb",
};
static int tegra186_usb2_port_enable(struct tegra_xusb_port *port)
{
return 0;
}
static void tegra186_usb2_port_disable(struct tegra_xusb_port *port)
{
}
static struct tegra_xusb_lane *
tegra186_usb2_port_map(struct tegra_xusb_port *port)
{
return tegra_xusb_find_lane(port->padctl, "usb2", port->index);
}
static const struct tegra_xusb_port_ops tegra186_usb2_port_ops = {
.release = tegra_xusb_usb2_port_release,
.remove = tegra_xusb_usb2_port_remove,
.enable = tegra186_usb2_port_enable,
.disable = tegra186_usb2_port_disable,
.map = tegra186_usb2_port_map,
};
/* SuperSpeed PHY support */
static struct tegra_xusb_lane *
tegra186_usb3_lane_probe(struct tegra_xusb_pad *pad, struct device_node *np,
unsigned int index)
{
struct tegra_xusb_usb3_lane *usb3;
int err;
usb3 = kzalloc(sizeof(*usb3), GFP_KERNEL);
if (!usb3)
return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&usb3->base.list);
usb3->base.soc = &pad->soc->lanes[index];
usb3->base.index = index;
usb3->base.pad = pad;
usb3->base.np = np;
err = tegra_xusb_lane_parse_dt(&usb3->base, np);
if (err < 0) {
kfree(usb3);
return ERR_PTR(err);
}
return &usb3->base;
}
static void tegra186_usb3_lane_remove(struct tegra_xusb_lane *lane)
{
struct tegra_xusb_usb3_lane *usb3 = to_usb3_lane(lane);
kfree(usb3);
}
static int tegra186_usb3_enable_phy_sleepwalk(struct tegra_xusb_lane *lane,
enum usb_device_speed speed)
{
struct tegra_xusb_padctl *padctl = lane->pad->padctl;
unsigned int index = lane->index;
u32 value;
mutex_lock(&padctl->lock);
value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM_1);
value |= SSPX_ELPG_CLAMP_EN_EARLY(index);
padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM_1);
usleep_range(100, 200);
value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM_1);
value |= SSPX_ELPG_CLAMP_EN(index);
padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM_1);
usleep_range(250, 350);
mutex_unlock(&padctl->lock);
return 0;
}
static int tegra186_usb3_disable_phy_sleepwalk(struct tegra_xusb_lane *lane)
{
struct tegra_xusb_padctl *padctl = lane->pad->padctl;
unsigned int index = lane->index;
u32 value;
mutex_lock(&padctl->lock);
value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM_1);
value &= ~SSPX_ELPG_CLAMP_EN_EARLY(index);
padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM_1);
usleep_range(100, 200);
value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM_1);
value &= ~SSPX_ELPG_CLAMP_EN(index);
padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM_1);
mutex_unlock(&padctl->lock);
return 0;
}
static int tegra186_usb3_enable_phy_wake(struct tegra_xusb_lane *lane)
{
struct tegra_xusb_padctl *padctl = lane->pad->padctl;
unsigned int index = lane->index;
u32 value;
mutex_lock(&padctl->lock);
value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM);
value &= ~ALL_WAKE_EVENTS;
value |= SS_PORT_WAKEUP_EVENT(index);
padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM);
usleep_range(10, 20);
value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM);
value &= ~ALL_WAKE_EVENTS;
value |= SS_PORT_WAKE_INTERRUPT_ENABLE(index);
padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM);
mutex_unlock(&padctl->lock);
return 0;
}
static int tegra186_usb3_disable_phy_wake(struct tegra_xusb_lane *lane)
{
struct tegra_xusb_padctl *padctl = lane->pad->padctl;
unsigned int index = lane->index;
u32 value;
mutex_lock(&padctl->lock);
value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM);
value &= ~ALL_WAKE_EVENTS;
value &= ~SS_PORT_WAKE_INTERRUPT_ENABLE(index);
padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM);
usleep_range(10, 20);
value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM);
value &= ~ALL_WAKE_EVENTS;
value |= SS_PORT_WAKEUP_EVENT(index);
padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM);
mutex_unlock(&padctl->lock);
return 0;
}
static bool tegra186_usb3_phy_remote_wake_detected(struct tegra_xusb_lane *lane)
{
struct tegra_xusb_padctl *padctl = lane->pad->padctl;
unsigned int index = lane->index;
u32 value;
value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM);
if ((value & SS_PORT_WAKE_INTERRUPT_ENABLE(index)) && (value & SS_PORT_WAKEUP_EVENT(index)))
return true;
return false;
}
static const struct tegra_xusb_lane_ops tegra186_usb3_lane_ops = {
.probe = tegra186_usb3_lane_probe,
.remove = tegra186_usb3_lane_remove,
.enable_phy_sleepwalk = tegra186_usb3_enable_phy_sleepwalk,
.disable_phy_sleepwalk = tegra186_usb3_disable_phy_sleepwalk,
.enable_phy_wake = tegra186_usb3_enable_phy_wake,
.disable_phy_wake = tegra186_usb3_disable_phy_wake,
.remote_wake_detected = tegra186_usb3_phy_remote_wake_detected,
};
static int tegra186_usb3_port_enable(struct tegra_xusb_port *port)
{
return 0;
}
static void tegra186_usb3_port_disable(struct tegra_xusb_port *port)
{
}
static struct tegra_xusb_lane *
tegra186_usb3_port_map(struct tegra_xusb_port *port)
{
return tegra_xusb_find_lane(port->padctl, "usb3", port->index);
}
static const struct tegra_xusb_port_ops tegra186_usb3_port_ops = {
.release = tegra_xusb_usb3_port_release,
.enable = tegra186_usb3_port_enable,
.disable = tegra186_usb3_port_disable,
.map = tegra186_usb3_port_map,
};
static int tegra186_usb3_phy_power_on(struct phy *phy)
{
struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
struct tegra_xusb_padctl *padctl = lane->pad->padctl;
struct tegra_xusb_usb3_port *port;
struct tegra_xusb_usb2_port *usb2;
unsigned int index = lane->index;
struct device *dev = padctl->dev;
u32 value;
port = tegra_xusb_find_usb3_port(padctl, index);
if (!port) {
dev_err(dev, "no port found for USB3 lane %u\n", index);
return -ENODEV;
}
usb2 = tegra_xusb_find_usb2_port(padctl, port->port);
if (!usb2) {
dev_err(dev, "no companion port found for USB3 lane %u\n",
index);
return -ENODEV;
}
mutex_lock(&padctl->lock);
value = padctl_readl(padctl, XUSB_PADCTL_SS_PORT_CAP);
value &= ~(PORT_CAP_MASK << PORTX_CAP_SHIFT(index));
if (usb2->mode == USB_DR_MODE_UNKNOWN)
value |= (PORT_CAP_DISABLED << PORTX_CAP_SHIFT(index));
else if (usb2->mode == USB_DR_MODE_PERIPHERAL)
value |= (PORT_CAP_DEVICE << PORTX_CAP_SHIFT(index));
else if (usb2->mode == USB_DR_MODE_HOST)
value |= (PORT_CAP_HOST << PORTX_CAP_SHIFT(index));
else if (usb2->mode == USB_DR_MODE_OTG)
value |= (PORT_CAP_OTG << PORTX_CAP_SHIFT(index));
padctl_writel(padctl, value, XUSB_PADCTL_SS_PORT_CAP);
if (padctl->soc->supports_gen2 && port->disable_gen2) {
value = padctl_readl(padctl, XUSB_PADCTL_SS_PORT_CFG);
value &= ~(PORTX_SPEED_SUPPORT_MASK <<
PORTX_SPEED_SUPPORT_SHIFT(index));
value |= (PORT_SPEED_SUPPORT_GEN1 <<
PORTX_SPEED_SUPPORT_SHIFT(index));
padctl_writel(padctl, value, XUSB_PADCTL_SS_PORT_CFG);
}
value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM_1);
value &= ~SSPX_ELPG_VCORE_DOWN(index);
padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM_1);
usleep_range(100, 200);
value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM_1);
value &= ~SSPX_ELPG_CLAMP_EN_EARLY(index);
padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM_1);
usleep_range(100, 200);
value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM_1);
value &= ~SSPX_ELPG_CLAMP_EN(index);
padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM_1);
mutex_unlock(&padctl->lock);
return 0;
}
static int tegra186_usb3_phy_power_off(struct phy *phy)
{
struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
struct tegra_xusb_padctl *padctl = lane->pad->padctl;
struct tegra_xusb_usb3_port *port;
unsigned int index = lane->index;
struct device *dev = padctl->dev;
u32 value;
port = tegra_xusb_find_usb3_port(padctl, index);
if (!port) {
dev_err(dev, "no port found for USB3 lane %u\n", index);
return -ENODEV;
}
mutex_lock(&padctl->lock);
value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM_1);
value |= SSPX_ELPG_CLAMP_EN_EARLY(index);
padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM_1);
usleep_range(100, 200);
value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM_1);
value |= SSPX_ELPG_CLAMP_EN(index);
padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM_1);
usleep_range(250, 350);
value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM_1);
value |= SSPX_ELPG_VCORE_DOWN(index);
padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM_1);
mutex_unlock(&padctl->lock);
return 0;
}
static int tegra186_usb3_phy_init(struct phy *phy)
{
return 0;
}
static int tegra186_usb3_phy_exit(struct phy *phy)
{
return 0;
}
static const struct phy_ops usb3_phy_ops = {
.init = tegra186_usb3_phy_init,
.exit = tegra186_usb3_phy_exit,
.power_on = tegra186_usb3_phy_power_on,
.power_off = tegra186_usb3_phy_power_off,
.owner = THIS_MODULE,
};
static struct tegra_xusb_pad *
tegra186_usb3_pad_probe(struct tegra_xusb_padctl *padctl,
const struct tegra_xusb_pad_soc *soc,
struct device_node *np)
{
struct tegra_xusb_usb3_pad *usb3;
struct tegra_xusb_pad *pad;
int err;
usb3 = kzalloc(sizeof(*usb3), GFP_KERNEL);
if (!usb3)
return ERR_PTR(-ENOMEM);
pad = &usb3->base;
pad->ops = &tegra186_usb3_lane_ops;
pad->soc = soc;
err = tegra_xusb_pad_init(pad, padctl, np);
if (err < 0) {
kfree(usb3);
goto out;
}
err = tegra_xusb_pad_register(pad, &usb3_phy_ops);
if (err < 0)
goto unregister;
dev_set_drvdata(&pad->dev, pad);
return pad;
unregister:
device_unregister(&pad->dev);
out:
return ERR_PTR(err);
}
static void tegra186_usb3_pad_remove(struct tegra_xusb_pad *pad)
{
struct tegra_xusb_usb2_pad *usb2 = to_usb2_pad(pad);
kfree(usb2);
}
static const struct tegra_xusb_pad_ops tegra186_usb3_pad_ops = {
.probe = tegra186_usb3_pad_probe,
.remove = tegra186_usb3_pad_remove,
};
static const char * const tegra186_usb3_functions[] = {
"xusb",
};
static int
tegra186_xusb_read_fuse_calibration(struct tegra186_xusb_padctl *padctl)
{
struct device *dev = padctl->base.dev;
unsigned int i, count;
u32 value, *level;
int err;
count = padctl->base.soc->ports.usb2.count;
level = devm_kcalloc(dev, count, sizeof(u32), GFP_KERNEL);
if (!level)
return -ENOMEM;
err = tegra_fuse_readl(TEGRA_FUSE_SKU_CALIB_0, &value);
if (err)
return dev_err_probe(dev, err,
"failed to read calibration fuse\n");
dev_dbg(dev, "FUSE_USB_CALIB_0 %#x\n", value);
for (i = 0; i < count; i++)
level[i] = (value >> HS_CURR_LEVEL_PADX_SHIFT(i)) &
HS_CURR_LEVEL_PAD_MASK;
padctl->calib.hs_curr_level = level;
padctl->calib.hs_squelch = (value >> HS_SQUELCH_SHIFT) &
HS_SQUELCH_MASK;
padctl->calib.hs_term_range_adj = (value >> HS_TERM_RANGE_ADJ_SHIFT) &
HS_TERM_RANGE_ADJ_MASK;
err = tegra_fuse_readl(TEGRA_FUSE_USB_CALIB_EXT_0, &value);
if (err) {
dev_err(dev, "failed to read calibration fuse: %d\n", err);
return err;
}
dev_dbg(dev, "FUSE_USB_CALIB_EXT_0 %#x\n", value);
padctl->calib.rpd_ctrl = (value >> RPD_CTRL_SHIFT) & RPD_CTRL_MASK;
return 0;
}
static struct tegra_xusb_padctl *
tegra186_xusb_padctl_probe(struct device *dev,
const struct tegra_xusb_padctl_soc *soc)
{
struct platform_device *pdev = to_platform_device(dev);
struct tegra186_xusb_padctl *priv;
struct resource *res;
int err;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return ERR_PTR(-ENOMEM);
priv->base.dev = dev;
priv->base.soc = soc;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ao");
priv->ao_regs = devm_ioremap_resource(dev, res);
if (IS_ERR(priv->ao_regs))
return ERR_CAST(priv->ao_regs);
err = tegra186_xusb_read_fuse_calibration(priv);
if (err < 0)
return ERR_PTR(err);
return &priv->base;
}
static void tegra186_xusb_padctl_save(struct tegra_xusb_padctl *padctl)
{
struct tegra186_xusb_padctl *priv = to_tegra186_xusb_padctl(padctl);
priv->context.vbus_id = padctl_readl(padctl, USB2_VBUS_ID);
priv->context.usb2_pad_mux = padctl_readl(padctl, XUSB_PADCTL_USB2_PAD_MUX);
priv->context.usb2_port_cap = padctl_readl(padctl, XUSB_PADCTL_USB2_PORT_CAP);
priv->context.ss_port_cap = padctl_readl(padctl, XUSB_PADCTL_SS_PORT_CAP);
}
static void tegra186_xusb_padctl_restore(struct tegra_xusb_padctl *padctl)
{
struct tegra186_xusb_padctl *priv = to_tegra186_xusb_padctl(padctl);
padctl_writel(padctl, priv->context.usb2_pad_mux, XUSB_PADCTL_USB2_PAD_MUX);
padctl_writel(padctl, priv->context.usb2_port_cap, XUSB_PADCTL_USB2_PORT_CAP);
padctl_writel(padctl, priv->context.ss_port_cap, XUSB_PADCTL_SS_PORT_CAP);
padctl_writel(padctl, priv->context.vbus_id, USB2_VBUS_ID);
}
static int tegra186_xusb_padctl_suspend_noirq(struct tegra_xusb_padctl *padctl)
{
tegra186_xusb_padctl_save(padctl);
return 0;
}
static int tegra186_xusb_padctl_resume_noirq(struct tegra_xusb_padctl *padctl)
{
tegra186_xusb_padctl_restore(padctl);
return 0;
}
static void tegra186_xusb_padctl_remove(struct tegra_xusb_padctl *padctl)
{
}
static const struct tegra_xusb_padctl_ops tegra186_xusb_padctl_ops = {
.probe = tegra186_xusb_padctl_probe,
.remove = tegra186_xusb_padctl_remove,
.suspend_noirq = tegra186_xusb_padctl_suspend_noirq,
.resume_noirq = tegra186_xusb_padctl_resume_noirq,
.vbus_override = tegra186_xusb_padctl_vbus_override,
.utmi_pad_power_on = tegra186_utmi_pad_power_on,
.utmi_pad_power_down = tegra186_utmi_pad_power_down,
};
#if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC)
static const char * const tegra186_xusb_padctl_supply_names[] = {
"avdd-pll-erefeut",
"avdd-usb",
"vclamp-usb",
"vddio-hsic",
};
static const struct tegra_xusb_lane_soc tegra186_usb2_lanes[] = {
TEGRA186_LANE("usb2-0", 0, 0, 0, usb2),
TEGRA186_LANE("usb2-1", 0, 0, 0, usb2),
TEGRA186_LANE("usb2-2", 0, 0, 0, usb2),
};
static const struct tegra_xusb_pad_soc tegra186_usb2_pad = {
.name = "usb2",
.num_lanes = ARRAY_SIZE(tegra186_usb2_lanes),
.lanes = tegra186_usb2_lanes,
.ops = &tegra186_usb2_pad_ops,
};
static const struct tegra_xusb_lane_soc tegra186_usb3_lanes[] = {
TEGRA186_LANE("usb3-0", 0, 0, 0, usb3),
TEGRA186_LANE("usb3-1", 0, 0, 0, usb3),
TEGRA186_LANE("usb3-2", 0, 0, 0, usb3),
};
static const struct tegra_xusb_pad_soc tegra186_usb3_pad = {
.name = "usb3",
.num_lanes = ARRAY_SIZE(tegra186_usb3_lanes),
.lanes = tegra186_usb3_lanes,
.ops = &tegra186_usb3_pad_ops,
};
static const struct tegra_xusb_pad_soc * const tegra186_pads[] = {
&tegra186_usb2_pad,
&tegra186_usb3_pad,
#if 0 /* TODO implement */
&tegra186_hsic_pad,
#endif
};
const struct tegra_xusb_padctl_soc tegra186_xusb_padctl_soc = {
.num_pads = ARRAY_SIZE(tegra186_pads),
.pads = tegra186_pads,
.ports = {
.usb2 = {
.ops = &tegra186_usb2_port_ops,
.count = 3,
},
#if 0 /* TODO implement */
.hsic = {
.ops = &tegra186_hsic_port_ops,
.count = 1,
},
#endif
.usb3 = {
.ops = &tegra186_usb3_port_ops,
.count = 3,
},
},
.ops = &tegra186_xusb_padctl_ops,
.supply_names = tegra186_xusb_padctl_supply_names,
.num_supplies = ARRAY_SIZE(tegra186_xusb_padctl_supply_names),
};
EXPORT_SYMBOL_GPL(tegra186_xusb_padctl_soc);
#endif
#if IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC) || \
IS_ENABLED(CONFIG_ARCH_TEGRA_234_SOC)
static const char * const tegra194_xusb_padctl_supply_names[] = {
"avdd-usb",
"vclamp-usb",
};
static const struct tegra_xusb_lane_soc tegra194_usb2_lanes[] = {
TEGRA186_LANE("usb2-0", 0, 0, 0, usb2),
TEGRA186_LANE("usb2-1", 0, 0, 0, usb2),
TEGRA186_LANE("usb2-2", 0, 0, 0, usb2),
TEGRA186_LANE("usb2-3", 0, 0, 0, usb2),
};
static const struct tegra_xusb_pad_soc tegra194_usb2_pad = {
.name = "usb2",
.num_lanes = ARRAY_SIZE(tegra194_usb2_lanes),
.lanes = tegra194_usb2_lanes,
.ops = &tegra186_usb2_pad_ops,
};
static const struct tegra_xusb_lane_soc tegra194_usb3_lanes[] = {
TEGRA186_LANE("usb3-0", 0, 0, 0, usb3),
TEGRA186_LANE("usb3-1", 0, 0, 0, usb3),
TEGRA186_LANE("usb3-2", 0, 0, 0, usb3),
TEGRA186_LANE("usb3-3", 0, 0, 0, usb3),
};
static const struct tegra_xusb_pad_soc tegra194_usb3_pad = {
.name = "usb3",
.num_lanes = ARRAY_SIZE(tegra194_usb3_lanes),
.lanes = tegra194_usb3_lanes,
.ops = &tegra186_usb3_pad_ops,
};
static const struct tegra_xusb_pad_soc * const tegra194_pads[] = {
&tegra194_usb2_pad,
&tegra194_usb3_pad,
};
const struct tegra_xusb_padctl_soc tegra194_xusb_padctl_soc = {
.num_pads = ARRAY_SIZE(tegra194_pads),
.pads = tegra194_pads,
.ports = {
.usb2 = {
.ops = &tegra186_usb2_port_ops,
.count = 4,
},
.usb3 = {
.ops = &tegra186_usb3_port_ops,
.count = 4,
},
},
.ops = &tegra186_xusb_padctl_ops,
.supply_names = tegra194_xusb_padctl_supply_names,
.num_supplies = ARRAY_SIZE(tegra194_xusb_padctl_supply_names),
.supports_gen2 = true,
.poll_trk_completed = true,
};
EXPORT_SYMBOL_GPL(tegra194_xusb_padctl_soc);
const struct tegra_xusb_padctl_soc tegra234_xusb_padctl_soc = {
.num_pads = ARRAY_SIZE(tegra194_pads),
.pads = tegra194_pads,
.ports = {
.usb2 = {
.ops = &tegra186_usb2_port_ops,
.count = 4,
},
.usb3 = {
.ops = &tegra186_usb3_port_ops,
.count = 4,
},
},
.ops = &tegra186_xusb_padctl_ops,
.supply_names = tegra194_xusb_padctl_supply_names,
.num_supplies = ARRAY_SIZE(tegra194_xusb_padctl_supply_names),
.supports_gen2 = true,
.poll_trk_completed = true,
.trk_hw_mode = true,
.supports_lp_cfg_en = true,
};
EXPORT_SYMBOL_GPL(tegra234_xusb_padctl_soc);
#endif
MODULE_AUTHOR("JC Kuo <[email protected]>");
MODULE_DESCRIPTION("NVIDIA Tegra186 XUSB Pad Controller driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/phy/tegra/xusb-tegra186.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2014-2020, NVIDIA CORPORATION. All rights reserved.
* Copyright (C) 2015 Google, Inc.
*/
#include <linux/clk.h>
#include <linux/clk/tegra.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/mailbox_client.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/reset.h>
#include <linux/slab.h>
#include <soc/tegra/fuse.h>
#include "xusb.h"
#define FUSE_SKU_CALIB_HS_CURR_LEVEL_PADX_SHIFT(x) \
((x) ? (11 + ((x) - 1) * 6) : 0)
#define FUSE_SKU_CALIB_HS_CURR_LEVEL_PAD_MASK 0x3f
#define FUSE_SKU_CALIB_HS_TERM_RANGE_ADJ_SHIFT 7
#define FUSE_SKU_CALIB_HS_TERM_RANGE_ADJ_MASK 0xf
#define FUSE_USB_CALIB_EXT_RPD_CTRL_SHIFT 0
#define FUSE_USB_CALIB_EXT_RPD_CTRL_MASK 0x1f
#define XUSB_PADCTL_USB2_PAD_MUX 0x004
#define XUSB_PADCTL_USB2_PAD_MUX_HSIC_PAD_TRK_SHIFT 16
#define XUSB_PADCTL_USB2_PAD_MUX_HSIC_PAD_TRK_MASK 0x3
#define XUSB_PADCTL_USB2_PAD_MUX_HSIC_PAD_TRK_XUSB 0x1
#define XUSB_PADCTL_USB2_PAD_MUX_USB2_BIAS_PAD_SHIFT 18
#define XUSB_PADCTL_USB2_PAD_MUX_USB2_BIAS_PAD_MASK 0x3
#define XUSB_PADCTL_USB2_PAD_MUX_USB2_BIAS_PAD_XUSB 0x1
#define XUSB_PADCTL_USB2_PORT_CAP 0x008
#define XUSB_PADCTL_USB2_PORT_CAP_PORTX_CAP_DISABLED(x) (0x0 << ((x) * 4))
#define XUSB_PADCTL_USB2_PORT_CAP_PORTX_CAP_HOST(x) (0x1 << ((x) * 4))
#define XUSB_PADCTL_USB2_PORT_CAP_PORTX_CAP_DEVICE(x) (0x2 << ((x) * 4))
#define XUSB_PADCTL_USB2_PORT_CAP_PORTX_CAP_OTG(x) (0x3 << ((x) * 4))
#define XUSB_PADCTL_USB2_PORT_CAP_PORTX_CAP_MASK(x) (0x3 << ((x) * 4))
#define XUSB_PADCTL_SS_PORT_MAP 0x014
#define XUSB_PADCTL_SS_PORT_MAP_PORTX_INTERNAL(x) (1 << (((x) * 5) + 4))
#define XUSB_PADCTL_SS_PORT_MAP_PORTX_MAP_SHIFT(x) ((x) * 5)
#define XUSB_PADCTL_SS_PORT_MAP_PORTX_MAP_MASK(x) (0x7 << ((x) * 5))
#define XUSB_PADCTL_SS_PORT_MAP_PORTX_MAP(x, v) (((v) & 0x7) << ((x) * 5))
#define XUSB_PADCTL_SS_PORT_MAP_PORT_DISABLED 0x7
#define XUSB_PADCTL_ELPG_PROGRAM_0 0x20
#define USB2_PORT_WAKE_INTERRUPT_ENABLE(x) BIT((x))
#define USB2_PORT_WAKEUP_EVENT(x) BIT((x) + 7)
#define SS_PORT_WAKE_INTERRUPT_ENABLE(x) BIT((x) + 14)
#define SS_PORT_WAKEUP_EVENT(x) BIT((x) + 21)
#define USB2_HSIC_PORT_WAKE_INTERRUPT_ENABLE(x) BIT((x) + 28)
#define USB2_HSIC_PORT_WAKEUP_EVENT(x) BIT((x) + 30)
#define ALL_WAKE_EVENTS ( \
USB2_PORT_WAKEUP_EVENT(0) | USB2_PORT_WAKEUP_EVENT(1) | \
USB2_PORT_WAKEUP_EVENT(2) | USB2_PORT_WAKEUP_EVENT(3) | \
SS_PORT_WAKEUP_EVENT(0) | SS_PORT_WAKEUP_EVENT(1) | \
SS_PORT_WAKEUP_EVENT(2) | SS_PORT_WAKEUP_EVENT(3) | \
USB2_HSIC_PORT_WAKEUP_EVENT(0))
#define XUSB_PADCTL_ELPG_PROGRAM1 0x024
#define XUSB_PADCTL_ELPG_PROGRAM1_AUX_MUX_LP0_VCORE_DOWN (1 << 31)
#define XUSB_PADCTL_ELPG_PROGRAM1_AUX_MUX_LP0_CLAMP_EN_EARLY (1 << 30)
#define XUSB_PADCTL_ELPG_PROGRAM1_AUX_MUX_LP0_CLAMP_EN (1 << 29)
#define XUSB_PADCTL_ELPG_PROGRAM1_SSPX_ELPG_VCORE_DOWN(x) (1 << (2 + (x) * 3))
#define XUSB_PADCTL_ELPG_PROGRAM1_SSPX_ELPG_CLAMP_EN_EARLY(x) \
(1 << (1 + (x) * 3))
#define XUSB_PADCTL_ELPG_PROGRAM1_SSPX_ELPG_CLAMP_EN(x) (1 << ((x) * 3))
#define XUSB_PADCTL_USB3_PAD_MUX 0x028
#define XUSB_PADCTL_USB3_PAD_MUX_PCIE_IDDQ_DISABLE(x) (1 << (1 + (x)))
#define XUSB_PADCTL_USB3_PAD_MUX_SATA_IDDQ_DISABLE(x) (1 << (8 + (x)))
#define XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPADX_CTL0(x) (0x080 + (x) * 0x40)
#define XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPAD_CTL0_ZIP (1 << 18)
#define XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPAD_CTL0_ZIN (1 << 22)
#define XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPADX_CTL1(x) (0x084 + (x) * 0x40)
#define XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPAD_CTL1_VREG_LEV_SHIFT 7
#define XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPAD_CTL1_VREG_LEV_MASK 0x3
#define XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPAD_CTL1_VREG_LEV_VAL 0x1
#define XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPAD_CTL1_VREG_FIX18 (1 << 6)
#define XUSB_PADCTL_USB2_OTG_PADX_CTL0(x) (0x088 + (x) * 0x40)
#define XUSB_PADCTL_USB2_OTG_PAD_CTL0_PD_ZI (1 << 29)
#define XUSB_PADCTL_USB2_OTG_PAD_CTL0_PD2 (1 << 27)
#define XUSB_PADCTL_USB2_OTG_PAD_CTL0_PD (1 << 26)
#define XUSB_PADCTL_USB2_OTG_PAD_CTL0_HS_CURR_LEVEL_SHIFT 0
#define XUSB_PADCTL_USB2_OTG_PAD_CTL0_HS_CURR_LEVEL_MASK 0x3f
#define XUSB_PADCTL_USB2_OTG_PADX_CTL1(x) (0x08c + (x) * 0x40)
#define XUSB_PADCTL_USB2_OTG_PAD_CTL1_RPD_CTRL_SHIFT 26
#define XUSB_PADCTL_USB2_OTG_PAD_CTL1_RPD_CTRL_MASK 0x1f
#define XUSB_PADCTL_USB2_OTG_PAD_CTL1_TERM_RANGE_ADJ_SHIFT 3
#define XUSB_PADCTL_USB2_OTG_PAD_CTL1_TERM_RANGE_ADJ_MASK 0xf
#define XUSB_PADCTL_USB2_OTG_PAD_CTL1_PD_DR (1 << 2)
#define XUSB_PADCTL_USB2_OTG_PAD_CTL1_PD_DISC_OVRD (1 << 1)
#define XUSB_PADCTL_USB2_OTG_PAD_CTL1_PD_CHRP_OVRD (1 << 0)
#define RPD_CTRL(x) (((x) & 0x1f) << 26)
#define RPD_CTRL_VALUE(x) (((x) >> 26) & 0x1f)
#define XUSB_PADCTL_USB2_BIAS_PAD_CTL0 0x284
#define XUSB_PADCTL_USB2_BIAS_PAD_CTL0_PD (1 << 11)
#define XUSB_PADCTL_USB2_BIAS_PAD_CTL0_HS_DISCON_LEVEL_SHIFT 3
#define XUSB_PADCTL_USB2_BIAS_PAD_CTL0_HS_DISCON_LEVEL_MASK 0x7
#define XUSB_PADCTL_USB2_BIAS_PAD_CTL0_HS_DISCON_LEVEL_VAL 0x7
#define XUSB_PADCTL_USB2_BIAS_PAD_CTL0_HS_SQUELCH_LEVEL_SHIFT 0
#define XUSB_PADCTL_USB2_BIAS_PAD_CTL0_HS_SQUELCH_LEVEL_MASK 0x7
#define XUSB_PADCTL_USB2_BIAS_PAD_CTL0_HS_SQUELCH_LEVEL_VAL 0x2
#define XUSB_PADCTL_USB2_BIAS_PAD_CTL1 0x288
#define XUSB_PADCTL_USB2_BIAS_PAD_CTL1_PD_TRK (1 << 26)
#define XUSB_PADCTL_USB2_BIAS_PAD_CTL1_TRK_DONE_RESET_TIMER_SHIFT 19
#define XUSB_PADCTL_USB2_BIAS_PAD_CTL1_TRK_DONE_RESET_TIMER_MASK 0x7f
#define XUSB_PADCTL_USB2_BIAS_PAD_CTL1_TRK_DONE_RESET_TIMER_VAL 0x0a
#define XUSB_PADCTL_USB2_BIAS_PAD_CTL1_TRK_START_TIMER_SHIFT 12
#define XUSB_PADCTL_USB2_BIAS_PAD_CTL1_TRK_START_TIMER_MASK 0x7f
#define XUSB_PADCTL_USB2_BIAS_PAD_CTL1_TRK_START_TIMER_VAL 0x1e
#define TCTRL_VALUE(x) (((x) & 0x3f) >> 0)
#define PCTRL_VALUE(x) (((x) >> 6) & 0x3f)
#define XUSB_PADCTL_HSIC_PADX_CTL0(x) (0x300 + (x) * 0x20)
#define XUSB_PADCTL_HSIC_PAD_CTL0_RPU_STROBE (1 << 18)
#define XUSB_PADCTL_HSIC_PAD_CTL0_RPU_DATA1 (1 << 17)
#define XUSB_PADCTL_HSIC_PAD_CTL0_RPU_DATA0 (1 << 16)
#define XUSB_PADCTL_HSIC_PAD_CTL0_RPD_STROBE (1 << 15)
#define XUSB_PADCTL_HSIC_PAD_CTL0_RPD_DATA1 (1 << 14)
#define XUSB_PADCTL_HSIC_PAD_CTL0_RPD_DATA0 (1 << 13)
#define XUSB_PADCTL_HSIC_PAD_CTL0_PD_ZI_STROBE (1 << 9)
#define XUSB_PADCTL_HSIC_PAD_CTL0_PD_ZI_DATA1 (1 << 8)
#define XUSB_PADCTL_HSIC_PAD_CTL0_PD_ZI_DATA0 (1 << 7)
#define XUSB_PADCTL_HSIC_PAD_CTL0_PD_RX_STROBE (1 << 6)
#define XUSB_PADCTL_HSIC_PAD_CTL0_PD_RX_DATA1 (1 << 5)
#define XUSB_PADCTL_HSIC_PAD_CTL0_PD_RX_DATA0 (1 << 4)
#define XUSB_PADCTL_HSIC_PAD_CTL0_PD_TX_STROBE (1 << 3)
#define XUSB_PADCTL_HSIC_PAD_CTL0_PD_TX_DATA1 (1 << 2)
#define XUSB_PADCTL_HSIC_PAD_CTL0_PD_TX_DATA0 (1 << 1)
#define XUSB_PADCTL_HSIC_PADX_CTL1(x) (0x304 + (x) * 0x20)
#define XUSB_PADCTL_HSIC_PAD_CTL1_TX_RTUNEP_SHIFT 0
#define XUSB_PADCTL_HSIC_PAD_CTL1_TX_RTUNEP_MASK 0xf
#define XUSB_PADCTL_HSIC_PADX_CTL2(x) (0x308 + (x) * 0x20)
#define XUSB_PADCTL_HSIC_PAD_CTL2_RX_STROBE_TRIM_SHIFT 8
#define XUSB_PADCTL_HSIC_PAD_CTL2_RX_STROBE_TRIM_MASK 0xf
#define XUSB_PADCTL_HSIC_PAD_CTL2_RX_DATA_TRIM_SHIFT 0
#define XUSB_PADCTL_HSIC_PAD_CTL2_RX_DATA_TRIM_MASK 0xff
#define XUSB_PADCTL_HSIC_PAD_TRK_CTL 0x340
#define XUSB_PADCTL_HSIC_PAD_TRK_CTL_PD_TRK (1 << 19)
#define XUSB_PADCTL_HSIC_PAD_TRK_CTL_TRK_DONE_RESET_TIMER_SHIFT 12
#define XUSB_PADCTL_HSIC_PAD_TRK_CTL_TRK_DONE_RESET_TIMER_MASK 0x7f
#define XUSB_PADCTL_HSIC_PAD_TRK_CTL_TRK_DONE_RESET_TIMER_VAL 0x0a
#define XUSB_PADCTL_HSIC_PAD_TRK_CTL_TRK_START_TIMER_SHIFT 5
#define XUSB_PADCTL_HSIC_PAD_TRK_CTL_TRK_START_TIMER_MASK 0x7f
#define XUSB_PADCTL_HSIC_PAD_TRK_CTL_TRK_START_TIMER_VAL 0x1e
#define XUSB_PADCTL_HSIC_STRB_TRIM_CONTROL 0x344
#define XUSB_PADCTL_UPHY_PLL_P0_CTL1 0x360
#define XUSB_PADCTL_UPHY_PLL_CTL1_FREQ_NDIV_SHIFT 20
#define XUSB_PADCTL_UPHY_PLL_CTL1_FREQ_NDIV_MASK 0xff
#define XUSB_PADCTL_UPHY_PLL_CTL1_FREQ_NDIV_USB_VAL 0x19
#define XUSB_PADCTL_UPHY_PLL_CTL1_FREQ_NDIV_SATA_VAL 0x1e
#define XUSB_PADCTL_UPHY_PLL_CTL1_FREQ_MDIV_SHIFT 16
#define XUSB_PADCTL_UPHY_PLL_CTL1_FREQ_MDIV_MASK 0x3
#define XUSB_PADCTL_UPHY_PLL_CTL1_LOCKDET_STATUS (1 << 15)
#define XUSB_PADCTL_UPHY_PLL_CTL1_PWR_OVRD (1 << 4)
#define XUSB_PADCTL_UPHY_PLL_CTL1_ENABLE (1 << 3)
#define XUSB_PADCTL_UPHY_PLL_CTL1_SLEEP_SHIFT 1
#define XUSB_PADCTL_UPHY_PLL_CTL1_SLEEP_MASK 0x3
#define XUSB_PADCTL_UPHY_PLL_CTL1_IDDQ (1 << 0)
#define XUSB_PADCTL_UPHY_PLL_P0_CTL2 0x364
#define XUSB_PADCTL_UPHY_PLL_CTL2_CAL_CTRL_SHIFT 4
#define XUSB_PADCTL_UPHY_PLL_CTL2_CAL_CTRL_MASK 0xffffff
#define XUSB_PADCTL_UPHY_PLL_CTL2_CAL_CTRL_VAL 0x136
#define XUSB_PADCTL_UPHY_PLL_CTL2_CAL_OVRD (1 << 2)
#define XUSB_PADCTL_UPHY_PLL_CTL2_CAL_DONE (1 << 1)
#define XUSB_PADCTL_UPHY_PLL_CTL2_CAL_EN (1 << 0)
#define XUSB_PADCTL_UPHY_PLL_P0_CTL4 0x36c
#define XUSB_PADCTL_UPHY_PLL_CTL4_XDIGCLK_EN (1 << 19)
#define XUSB_PADCTL_UPHY_PLL_CTL4_TXCLKREF_EN (1 << 15)
#define XUSB_PADCTL_UPHY_PLL_CTL4_TXCLKREF_SEL_SHIFT 12
#define XUSB_PADCTL_UPHY_PLL_CTL4_TXCLKREF_SEL_MASK 0x3
#define XUSB_PADCTL_UPHY_PLL_CTL4_TXCLKREF_SEL_USB_VAL 0x2
#define XUSB_PADCTL_UPHY_PLL_CTL4_TXCLKREF_SEL_SATA_VAL 0x0
#define XUSB_PADCTL_UPHY_PLL_CTL4_REFCLKBUF_EN (1 << 8)
#define XUSB_PADCTL_UPHY_PLL_CTL4_REFCLK_SEL_SHIFT 4
#define XUSB_PADCTL_UPHY_PLL_CTL4_REFCLK_SEL_MASK 0xf
#define XUSB_PADCTL_UPHY_PLL_P0_CTL5 0x370
#define XUSB_PADCTL_UPHY_PLL_CTL5_DCO_CTRL_SHIFT 16
#define XUSB_PADCTL_UPHY_PLL_CTL5_DCO_CTRL_MASK 0xff
#define XUSB_PADCTL_UPHY_PLL_CTL5_DCO_CTRL_VAL 0x2a
#define XUSB_PADCTL_UPHY_PLL_P0_CTL8 0x37c
#define XUSB_PADCTL_UPHY_PLL_CTL8_RCAL_DONE (1 << 31)
#define XUSB_PADCTL_UPHY_PLL_CTL8_RCAL_OVRD (1 << 15)
#define XUSB_PADCTL_UPHY_PLL_CTL8_RCAL_CLK_EN (1 << 13)
#define XUSB_PADCTL_UPHY_PLL_CTL8_RCAL_EN (1 << 12)
#define XUSB_PADCTL_UPHY_MISC_PAD_PX_CTL1(x) (0x460 + (x) * 0x40)
#define XUSB_PADCTL_UPHY_MISC_PAD_CTL1_AUX_RX_IDLE_MODE_SHIFT 20
#define XUSB_PADCTL_UPHY_MISC_PAD_CTL1_AUX_RX_IDLE_MODE_MASK 0x3
#define XUSB_PADCTL_UPHY_MISC_PAD_CTL1_AUX_RX_IDLE_MODE_VAL 0x1
#define XUSB_PADCTL_UPHY_MISC_PAD_CTL1_AUX_RX_TERM_EN BIT(18)
#define XUSB_PADCTL_UPHY_MISC_PAD_CTL1_AUX_RX_MODE_OVRD BIT(13)
#define XUSB_PADCTL_UPHY_MISC_PAD_PX_CTL2(x) (0x464 + (x) * 0x40)
#define XUSB_PADCTL_UPHY_MISC_PAD_CTL2_TX_IDDQ BIT(0)
#define XUSB_PADCTL_UPHY_MISC_PAD_CTL2_TX_IDDQ_OVRD BIT(1)
#define XUSB_PADCTL_UPHY_MISC_PAD_CTL2_TX_SLEEP_MASK GENMASK(5, 4)
#define XUSB_PADCTL_UPHY_MISC_PAD_CTL2_TX_SLEEP_VAL GENMASK(5, 4)
#define XUSB_PADCTL_UPHY_MISC_PAD_CTL2_TX_PWR_OVRD BIT(24)
#define XUSB_PADCTL_UPHY_MISC_PAD_CTL2_RX_IDDQ BIT(8)
#define XUSB_PADCTL_UPHY_MISC_PAD_CTL2_RX_IDDQ_OVRD BIT(9)
#define XUSB_PADCTL_UPHY_MISC_PAD_CTL2_RX_SLEEP_MASK GENMASK(13, 12)
#define XUSB_PADCTL_UPHY_MISC_PAD_CTL2_RX_SLEEP_VAL GENMASK(13, 12)
#define XUSB_PADCTL_UPHY_MISC_PAD_CTL2_RX_PWR_OVRD BIT(25)
#define XUSB_PADCTL_UPHY_PLL_S0_CTL1 0x860
#define XUSB_PADCTL_UPHY_PLL_S0_CTL2 0x864
#define XUSB_PADCTL_UPHY_PLL_S0_CTL4 0x86c
#define XUSB_PADCTL_UPHY_PLL_S0_CTL5 0x870
#define XUSB_PADCTL_UPHY_PLL_S0_CTL8 0x87c
#define XUSB_PADCTL_UPHY_MISC_PAD_S0_CTL1 0x960
#define XUSB_PADCTL_UPHY_MISC_PAD_S0_CTL2 0x964
#define XUSB_PADCTL_UPHY_USB3_PADX_ECTL1(x) (0xa60 + (x) * 0x40)
#define XUSB_PADCTL_UPHY_USB3_PAD_ECTL1_TX_TERM_CTRL_SHIFT 16
#define XUSB_PADCTL_UPHY_USB3_PAD_ECTL1_TX_TERM_CTRL_MASK 0x3
#define XUSB_PADCTL_UPHY_USB3_PAD_ECTL1_TX_TERM_CTRL_VAL 0x2
#define XUSB_PADCTL_UPHY_USB3_PADX_ECTL2(x) (0xa64 + (x) * 0x40)
#define XUSB_PADCTL_UPHY_USB3_PAD_ECTL2_RX_CTLE_SHIFT 0
#define XUSB_PADCTL_UPHY_USB3_PAD_ECTL2_RX_CTLE_MASK 0xffff
#define XUSB_PADCTL_UPHY_USB3_PAD_ECTL2_RX_CTLE_VAL 0x00fc
#define XUSB_PADCTL_UPHY_USB3_PADX_ECTL3(x) (0xa68 + (x) * 0x40)
#define XUSB_PADCTL_UPHY_USB3_PAD_ECTL3_RX_DFE_VAL 0xc0077f1f
#define XUSB_PADCTL_UPHY_USB3_PADX_ECTL4(x) (0xa6c + (x) * 0x40)
#define XUSB_PADCTL_UPHY_USB3_PAD_ECTL4_RX_CDR_CTRL_SHIFT 16
#define XUSB_PADCTL_UPHY_USB3_PAD_ECTL4_RX_CDR_CTRL_MASK 0xffff
#define XUSB_PADCTL_UPHY_USB3_PAD_ECTL4_RX_CDR_CTRL_VAL 0x01c7
#define XUSB_PADCTL_UPHY_USB3_PADX_ECTL6(x) (0xa74 + (x) * 0x40)
#define XUSB_PADCTL_UPHY_USB3_PAD_ECTL6_RX_EQ_CTRL_H_VAL 0xfcf01368
#define XUSB_PADCTL_USB2_VBUS_ID 0xc60
#define XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_VBUS_ON (1 << 14)
#define XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_SHIFT 18
#define XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_MASK 0xf
#define XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_FLOATING 8
#define XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_GROUNDED 0
/* USB2 SLEEPWALK registers */
#define UTMIP(_port, _offset1, _offset2) \
(((_port) <= 2) ? (_offset1) : (_offset2))
#define PMC_UTMIP_UHSIC_SLEEP_CFG(x) UTMIP(x, 0x1fc, 0x4d0)
#define UTMIP_MASTER_ENABLE(x) UTMIP(x, BIT(8 * (x)), BIT(0))
#define UTMIP_FSLS_USE_PMC(x) UTMIP(x, BIT(8 * (x) + 1), \
BIT(1))
#define UTMIP_PCTRL_USE_PMC(x) UTMIP(x, BIT(8 * (x) + 2), \
BIT(2))
#define UTMIP_TCTRL_USE_PMC(x) UTMIP(x, BIT(8 * (x) + 3), \
BIT(3))
#define UTMIP_WAKE_VAL(_port, _value) (((_value) & 0xf) << \
(UTMIP(_port, 8 * (_port) + 4, 4)))
#define UTMIP_WAKE_VAL_NONE(_port) UTMIP_WAKE_VAL(_port, 12)
#define UTMIP_WAKE_VAL_ANY(_port) UTMIP_WAKE_VAL(_port, 15)
#define PMC_UTMIP_UHSIC_SLEEP_CFG1 (0x4d0)
#define UTMIP_RPU_SWITC_LOW_USE_PMC_PX(x) BIT((x) + 8)
#define UTMIP_RPD_CTRL_USE_PMC_PX(x) BIT((x) + 16)
#define PMC_UTMIP_MASTER_CONFIG (0x274)
#define UTMIP_PWR(x) UTMIP(x, BIT(x), BIT(4))
#define UHSIC_PWR BIT(3)
#define PMC_USB_DEBOUNCE_DEL (0xec)
#define DEBOUNCE_VAL(x) (((x) & 0xffff) << 0)
#define UTMIP_LINE_DEB_CNT(x) (((x) & 0xf) << 16)
#define UHSIC_LINE_DEB_CNT(x) (((x) & 0xf) << 20)
#define PMC_UTMIP_UHSIC_FAKE(x) UTMIP(x, 0x218, 0x294)
#define UTMIP_FAKE_USBOP_VAL(x) UTMIP(x, BIT(4 * (x)), BIT(8))
#define UTMIP_FAKE_USBON_VAL(x) UTMIP(x, BIT(4 * (x) + 1), \
BIT(9))
#define UTMIP_FAKE_USBOP_EN(x) UTMIP(x, BIT(4 * (x) + 2), \
BIT(10))
#define UTMIP_FAKE_USBON_EN(x) UTMIP(x, BIT(4 * (x) + 3), \
BIT(11))
#define PMC_UTMIP_UHSIC_SLEEPWALK_CFG(x) UTMIP(x, 0x200, 0x288)
#define UTMIP_LINEVAL_WALK_EN(x) UTMIP(x, BIT(8 * (x) + 7), \
BIT(15))
#define PMC_USB_AO (0xf0)
#define USBOP_VAL_PD(x) UTMIP(x, BIT(4 * (x)), BIT(20))
#define USBON_VAL_PD(x) UTMIP(x, BIT(4 * (x) + 1), \
BIT(21))
#define STROBE_VAL_PD BIT(12)
#define DATA0_VAL_PD BIT(13)
#define DATA1_VAL_PD BIT(24)
#define PMC_UTMIP_UHSIC_SAVED_STATE(x) UTMIP(x, 0x1f0, 0x280)
#define SPEED(_port, _value) (((_value) & 0x3) << \
(UTMIP(_port, 8 * (_port), 8)))
#define UTMI_HS(_port) SPEED(_port, 0)
#define UTMI_FS(_port) SPEED(_port, 1)
#define UTMI_LS(_port) SPEED(_port, 2)
#define UTMI_RST(_port) SPEED(_port, 3)
#define PMC_UTMIP_UHSIC_TRIGGERS (0x1ec)
#define UTMIP_CLR_WALK_PTR(x) UTMIP(x, BIT(x), BIT(16))
#define UTMIP_CAP_CFG(x) UTMIP(x, BIT((x) + 4), BIT(17))
#define UTMIP_CLR_WAKE_ALARM(x) UTMIP(x, BIT((x) + 12), \
BIT(19))
#define UHSIC_CLR_WALK_PTR BIT(3)
#define UHSIC_CLR_WAKE_ALARM BIT(15)
#define PMC_UTMIP_SLEEPWALK_PX(x) UTMIP(x, 0x204 + (4 * (x)), \
0x4e0)
/* phase A */
#define UTMIP_USBOP_RPD_A BIT(0)
#define UTMIP_USBON_RPD_A BIT(1)
#define UTMIP_AP_A BIT(4)
#define UTMIP_AN_A BIT(5)
#define UTMIP_HIGHZ_A BIT(6)
/* phase B */
#define UTMIP_USBOP_RPD_B BIT(8)
#define UTMIP_USBON_RPD_B BIT(9)
#define UTMIP_AP_B BIT(12)
#define UTMIP_AN_B BIT(13)
#define UTMIP_HIGHZ_B BIT(14)
/* phase C */
#define UTMIP_USBOP_RPD_C BIT(16)
#define UTMIP_USBON_RPD_C BIT(17)
#define UTMIP_AP_C BIT(20)
#define UTMIP_AN_C BIT(21)
#define UTMIP_HIGHZ_C BIT(22)
/* phase D */
#define UTMIP_USBOP_RPD_D BIT(24)
#define UTMIP_USBON_RPD_D BIT(25)
#define UTMIP_AP_D BIT(28)
#define UTMIP_AN_D BIT(29)
#define UTMIP_HIGHZ_D BIT(30)
#define PMC_UTMIP_UHSIC_LINE_WAKEUP (0x26c)
#define UTMIP_LINE_WAKEUP_EN(x) UTMIP(x, BIT(x), BIT(4))
#define UHSIC_LINE_WAKEUP_EN BIT(3)
#define PMC_UTMIP_TERM_PAD_CFG (0x1f8)
#define PCTRL_VAL(x) (((x) & 0x3f) << 1)
#define TCTRL_VAL(x) (((x) & 0x3f) << 7)
#define PMC_UTMIP_PAD_CFGX(x) (0x4c0 + (4 * (x)))
#define RPD_CTRL_PX(x) (((x) & 0x1f) << 22)
#define PMC_UHSIC_SLEEP_CFG PMC_UTMIP_UHSIC_SLEEP_CFG(0)
#define UHSIC_MASTER_ENABLE BIT(24)
#define UHSIC_WAKE_VAL(_value) (((_value) & 0xf) << 28)
#define UHSIC_WAKE_VAL_SD10 UHSIC_WAKE_VAL(2)
#define UHSIC_WAKE_VAL_NONE UHSIC_WAKE_VAL(12)
#define PMC_UHSIC_FAKE PMC_UTMIP_UHSIC_FAKE(0)
#define UHSIC_FAKE_STROBE_VAL BIT(12)
#define UHSIC_FAKE_DATA_VAL BIT(13)
#define UHSIC_FAKE_STROBE_EN BIT(14)
#define UHSIC_FAKE_DATA_EN BIT(15)
#define PMC_UHSIC_SAVED_STATE PMC_UTMIP_UHSIC_SAVED_STATE(0)
#define UHSIC_MODE(_value) (((_value) & 0x1) << 24)
#define UHSIC_HS UHSIC_MODE(0)
#define UHSIC_RST UHSIC_MODE(1)
#define PMC_UHSIC_SLEEPWALK_CFG PMC_UTMIP_UHSIC_SLEEPWALK_CFG(0)
#define UHSIC_WAKE_WALK_EN BIT(30)
#define UHSIC_LINEVAL_WALK_EN BIT(31)
#define PMC_UHSIC_SLEEPWALK_P0 (0x210)
#define UHSIC_DATA0_RPD_A BIT(1)
#define UHSIC_DATA0_RPU_B BIT(11)
#define UHSIC_DATA0_RPU_C BIT(19)
#define UHSIC_DATA0_RPU_D BIT(27)
#define UHSIC_STROBE_RPU_A BIT(2)
#define UHSIC_STROBE_RPD_B BIT(8)
#define UHSIC_STROBE_RPD_C BIT(16)
#define UHSIC_STROBE_RPD_D BIT(24)
struct tegra210_xusb_fuse_calibration {
u32 hs_curr_level[4];
u32 hs_term_range_adj;
u32 rpd_ctrl;
};
struct tegra210_xusb_padctl_context {
u32 usb2_pad_mux;
u32 usb2_port_cap;
u32 ss_port_map;
u32 usb3_pad_mux;
};
struct tegra210_xusb_padctl {
struct tegra_xusb_padctl base;
struct regmap *regmap;
struct tegra210_xusb_fuse_calibration fuse;
struct tegra210_xusb_padctl_context context;
};
static inline struct tegra210_xusb_padctl *
to_tegra210_xusb_padctl(struct tegra_xusb_padctl *padctl)
{
return container_of(padctl, struct tegra210_xusb_padctl, base);
}
static const struct tegra_xusb_lane_map tegra210_usb3_map[] = {
{ 0, "pcie", 6 },
{ 1, "pcie", 5 },
{ 2, "pcie", 0 },
{ 2, "pcie", 3 },
{ 3, "pcie", 4 },
{ 3, "sata", 0 },
{ 0, NULL, 0 }
};
static int tegra210_usb3_lane_map(struct tegra_xusb_lane *lane)
{
const struct tegra_xusb_lane_map *map;
for (map = tegra210_usb3_map; map->type; map++) {
if (map->index == lane->index &&
strcmp(map->type, lane->pad->soc->name) == 0) {
dev_dbg(lane->pad->padctl->dev, "lane = %s map to port = usb3-%d\n",
lane->pad->soc->lanes[lane->index].name, map->port);
return map->port;
}
}
return -EINVAL;
}
/* must be called under padctl->lock */
static int tegra210_pex_uphy_enable(struct tegra_xusb_padctl *padctl)
{
struct tegra_xusb_pcie_pad *pcie = to_pcie_pad(padctl->pcie);
unsigned long timeout;
u32 value;
unsigned int i;
int err;
if (pcie->enable)
return 0;
err = clk_prepare_enable(pcie->pll);
if (err < 0)
return err;
if (tegra210_plle_hw_sequence_is_enabled())
goto skip_pll_init;
err = reset_control_deassert(pcie->rst);
if (err < 0)
goto disable;
value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_P0_CTL2);
value &= ~(XUSB_PADCTL_UPHY_PLL_CTL2_CAL_CTRL_MASK <<
XUSB_PADCTL_UPHY_PLL_CTL2_CAL_CTRL_SHIFT);
value |= XUSB_PADCTL_UPHY_PLL_CTL2_CAL_CTRL_VAL <<
XUSB_PADCTL_UPHY_PLL_CTL2_CAL_CTRL_SHIFT;
padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_P0_CTL2);
value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_P0_CTL5);
value &= ~(XUSB_PADCTL_UPHY_PLL_CTL5_DCO_CTRL_MASK <<
XUSB_PADCTL_UPHY_PLL_CTL5_DCO_CTRL_SHIFT);
value |= XUSB_PADCTL_UPHY_PLL_CTL5_DCO_CTRL_VAL <<
XUSB_PADCTL_UPHY_PLL_CTL5_DCO_CTRL_SHIFT;
padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_P0_CTL5);
value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_P0_CTL1);
value |= XUSB_PADCTL_UPHY_PLL_CTL1_PWR_OVRD;
padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_P0_CTL1);
value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_P0_CTL2);
value |= XUSB_PADCTL_UPHY_PLL_CTL2_CAL_OVRD;
padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_P0_CTL2);
value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_P0_CTL8);
value |= XUSB_PADCTL_UPHY_PLL_CTL8_RCAL_OVRD;
padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_P0_CTL8);
value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_P0_CTL4);
value &= ~((XUSB_PADCTL_UPHY_PLL_CTL4_TXCLKREF_SEL_MASK <<
XUSB_PADCTL_UPHY_PLL_CTL4_TXCLKREF_SEL_SHIFT) |
(XUSB_PADCTL_UPHY_PLL_CTL4_REFCLK_SEL_MASK <<
XUSB_PADCTL_UPHY_PLL_CTL4_REFCLK_SEL_SHIFT));
value |= (XUSB_PADCTL_UPHY_PLL_CTL4_TXCLKREF_SEL_USB_VAL <<
XUSB_PADCTL_UPHY_PLL_CTL4_TXCLKREF_SEL_SHIFT) |
XUSB_PADCTL_UPHY_PLL_CTL4_TXCLKREF_EN;
padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_P0_CTL4);
value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_P0_CTL1);
value &= ~((XUSB_PADCTL_UPHY_PLL_CTL1_FREQ_MDIV_MASK <<
XUSB_PADCTL_UPHY_PLL_CTL1_FREQ_MDIV_SHIFT) |
(XUSB_PADCTL_UPHY_PLL_CTL1_FREQ_NDIV_MASK <<
XUSB_PADCTL_UPHY_PLL_CTL1_FREQ_NDIV_SHIFT));
value |= XUSB_PADCTL_UPHY_PLL_CTL1_FREQ_NDIV_USB_VAL <<
XUSB_PADCTL_UPHY_PLL_CTL1_FREQ_NDIV_SHIFT;
padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_P0_CTL1);
value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_P0_CTL1);
value &= ~XUSB_PADCTL_UPHY_PLL_CTL1_IDDQ;
padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_P0_CTL1);
value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_P0_CTL1);
value &= ~(XUSB_PADCTL_UPHY_PLL_CTL1_SLEEP_MASK <<
XUSB_PADCTL_UPHY_PLL_CTL1_SLEEP_SHIFT);
padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_P0_CTL1);
usleep_range(10, 20);
value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_P0_CTL4);
value |= XUSB_PADCTL_UPHY_PLL_CTL4_REFCLKBUF_EN;
padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_P0_CTL4);
value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_P0_CTL2);
value |= XUSB_PADCTL_UPHY_PLL_CTL2_CAL_EN;
padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_P0_CTL2);
timeout = jiffies + msecs_to_jiffies(100);
while (time_before(jiffies, timeout)) {
value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_P0_CTL2);
if (value & XUSB_PADCTL_UPHY_PLL_CTL2_CAL_DONE)
break;
usleep_range(10, 20);
}
if (time_after_eq(jiffies, timeout)) {
err = -ETIMEDOUT;
goto reset;
}
value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_P0_CTL2);
value &= ~XUSB_PADCTL_UPHY_PLL_CTL2_CAL_EN;
padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_P0_CTL2);
timeout = jiffies + msecs_to_jiffies(100);
while (time_before(jiffies, timeout)) {
value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_P0_CTL2);
if (!(value & XUSB_PADCTL_UPHY_PLL_CTL2_CAL_DONE))
break;
usleep_range(10, 20);
}
if (time_after_eq(jiffies, timeout)) {
err = -ETIMEDOUT;
goto reset;
}
value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_P0_CTL1);
value |= XUSB_PADCTL_UPHY_PLL_CTL1_ENABLE;
padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_P0_CTL1);
timeout = jiffies + msecs_to_jiffies(100);
while (time_before(jiffies, timeout)) {
value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_P0_CTL1);
if (value & XUSB_PADCTL_UPHY_PLL_CTL1_LOCKDET_STATUS)
break;
usleep_range(10, 20);
}
if (time_after_eq(jiffies, timeout)) {
err = -ETIMEDOUT;
goto reset;
}
value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_P0_CTL8);
value |= XUSB_PADCTL_UPHY_PLL_CTL8_RCAL_EN |
XUSB_PADCTL_UPHY_PLL_CTL8_RCAL_CLK_EN;
padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_P0_CTL8);
timeout = jiffies + msecs_to_jiffies(100);
while (time_before(jiffies, timeout)) {
value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_P0_CTL8);
if (value & XUSB_PADCTL_UPHY_PLL_CTL8_RCAL_DONE)
break;
usleep_range(10, 20);
}
if (time_after_eq(jiffies, timeout)) {
err = -ETIMEDOUT;
goto reset;
}
value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_P0_CTL8);
value &= ~XUSB_PADCTL_UPHY_PLL_CTL8_RCAL_EN;
padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_P0_CTL8);
timeout = jiffies + msecs_to_jiffies(100);
while (time_before(jiffies, timeout)) {
value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_P0_CTL8);
if (!(value & XUSB_PADCTL_UPHY_PLL_CTL8_RCAL_DONE))
break;
usleep_range(10, 20);
}
if (time_after_eq(jiffies, timeout)) {
err = -ETIMEDOUT;
goto reset;
}
value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_P0_CTL8);
value &= ~XUSB_PADCTL_UPHY_PLL_CTL8_RCAL_CLK_EN;
padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_P0_CTL8);
tegra210_xusb_pll_hw_control_enable();
value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_P0_CTL1);
value &= ~XUSB_PADCTL_UPHY_PLL_CTL1_PWR_OVRD;
padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_P0_CTL1);
value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_P0_CTL2);
value &= ~XUSB_PADCTL_UPHY_PLL_CTL2_CAL_OVRD;
padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_P0_CTL2);
value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_P0_CTL8);
value &= ~XUSB_PADCTL_UPHY_PLL_CTL8_RCAL_OVRD;
padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_P0_CTL8);
usleep_range(10, 20);
tegra210_xusb_pll_hw_sequence_start();
skip_pll_init:
pcie->enable = true;
for (i = 0; i < padctl->pcie->soc->num_lanes; i++) {
value = padctl_readl(padctl, XUSB_PADCTL_USB3_PAD_MUX);
value |= XUSB_PADCTL_USB3_PAD_MUX_PCIE_IDDQ_DISABLE(i);
padctl_writel(padctl, value, XUSB_PADCTL_USB3_PAD_MUX);
}
return 0;
reset:
reset_control_assert(pcie->rst);
disable:
clk_disable_unprepare(pcie->pll);
return err;
}
static void tegra210_pex_uphy_disable(struct tegra_xusb_padctl *padctl)
{
struct tegra_xusb_pcie_pad *pcie = to_pcie_pad(padctl->pcie);
u32 value;
unsigned int i;
if (WARN_ON(!pcie->enable))
return;
pcie->enable = false;
for (i = 0; i < padctl->pcie->soc->num_lanes; i++) {
value = padctl_readl(padctl, XUSB_PADCTL_USB3_PAD_MUX);
value &= ~XUSB_PADCTL_USB3_PAD_MUX_PCIE_IDDQ_DISABLE(i);
padctl_writel(padctl, value, XUSB_PADCTL_USB3_PAD_MUX);
}
clk_disable_unprepare(pcie->pll);
}
/* must be called under padctl->lock */
static int tegra210_sata_uphy_enable(struct tegra_xusb_padctl *padctl)
{
struct tegra_xusb_sata_pad *sata = to_sata_pad(padctl->sata);
struct tegra_xusb_lane *lane = tegra_xusb_find_lane(padctl, "sata", 0);
unsigned long timeout;
u32 value;
unsigned int i;
int err;
bool usb;
if (sata->enable)
return 0;
if (IS_ERR(lane))
return 0;
if (tegra210_plle_hw_sequence_is_enabled())
goto skip_pll_init;
usb = tegra_xusb_lane_check(lane, "usb3-ss");
err = clk_prepare_enable(sata->pll);
if (err < 0)
return err;
err = reset_control_deassert(sata->rst);
if (err < 0)
goto disable;
value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_S0_CTL2);
value &= ~(XUSB_PADCTL_UPHY_PLL_CTL2_CAL_CTRL_MASK <<
XUSB_PADCTL_UPHY_PLL_CTL2_CAL_CTRL_SHIFT);
value |= XUSB_PADCTL_UPHY_PLL_CTL2_CAL_CTRL_VAL <<
XUSB_PADCTL_UPHY_PLL_CTL2_CAL_CTRL_SHIFT;
padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_S0_CTL2);
value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_S0_CTL5);
value &= ~(XUSB_PADCTL_UPHY_PLL_CTL5_DCO_CTRL_MASK <<
XUSB_PADCTL_UPHY_PLL_CTL5_DCO_CTRL_SHIFT);
value |= XUSB_PADCTL_UPHY_PLL_CTL5_DCO_CTRL_VAL <<
XUSB_PADCTL_UPHY_PLL_CTL5_DCO_CTRL_SHIFT;
padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_S0_CTL5);
value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_S0_CTL1);
value |= XUSB_PADCTL_UPHY_PLL_CTL1_PWR_OVRD;
padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_S0_CTL1);
value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_S0_CTL2);
value |= XUSB_PADCTL_UPHY_PLL_CTL2_CAL_OVRD;
padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_S0_CTL2);
value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_S0_CTL8);
value |= XUSB_PADCTL_UPHY_PLL_CTL8_RCAL_OVRD;
padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_S0_CTL8);
value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_S0_CTL4);
value &= ~((XUSB_PADCTL_UPHY_PLL_CTL4_TXCLKREF_SEL_MASK <<
XUSB_PADCTL_UPHY_PLL_CTL4_TXCLKREF_SEL_SHIFT) |
(XUSB_PADCTL_UPHY_PLL_CTL4_REFCLK_SEL_MASK <<
XUSB_PADCTL_UPHY_PLL_CTL4_REFCLK_SEL_SHIFT));
value |= XUSB_PADCTL_UPHY_PLL_CTL4_TXCLKREF_EN;
if (usb)
value |= (XUSB_PADCTL_UPHY_PLL_CTL4_TXCLKREF_SEL_USB_VAL <<
XUSB_PADCTL_UPHY_PLL_CTL4_TXCLKREF_SEL_SHIFT);
else
value |= (XUSB_PADCTL_UPHY_PLL_CTL4_TXCLKREF_SEL_SATA_VAL <<
XUSB_PADCTL_UPHY_PLL_CTL4_TXCLKREF_SEL_SHIFT);
value &= ~XUSB_PADCTL_UPHY_PLL_CTL4_XDIGCLK_EN;
padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_S0_CTL4);
value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_S0_CTL1);
value &= ~((XUSB_PADCTL_UPHY_PLL_CTL1_FREQ_MDIV_MASK <<
XUSB_PADCTL_UPHY_PLL_CTL1_FREQ_MDIV_SHIFT) |
(XUSB_PADCTL_UPHY_PLL_CTL1_FREQ_NDIV_MASK <<
XUSB_PADCTL_UPHY_PLL_CTL1_FREQ_NDIV_SHIFT));
if (usb)
value |= XUSB_PADCTL_UPHY_PLL_CTL1_FREQ_NDIV_USB_VAL <<
XUSB_PADCTL_UPHY_PLL_CTL1_FREQ_NDIV_SHIFT;
else
value |= XUSB_PADCTL_UPHY_PLL_CTL1_FREQ_NDIV_SATA_VAL <<
XUSB_PADCTL_UPHY_PLL_CTL1_FREQ_NDIV_SHIFT;
padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_S0_CTL1);
value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_S0_CTL1);
value &= ~XUSB_PADCTL_UPHY_PLL_CTL1_IDDQ;
padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_S0_CTL1);
value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_S0_CTL1);
value &= ~(XUSB_PADCTL_UPHY_PLL_CTL1_SLEEP_MASK <<
XUSB_PADCTL_UPHY_PLL_CTL1_SLEEP_SHIFT);
padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_S0_CTL1);
usleep_range(10, 20);
value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_S0_CTL4);
value |= XUSB_PADCTL_UPHY_PLL_CTL4_REFCLKBUF_EN;
padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_S0_CTL4);
value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_S0_CTL2);
value |= XUSB_PADCTL_UPHY_PLL_CTL2_CAL_EN;
padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_S0_CTL2);
timeout = jiffies + msecs_to_jiffies(100);
while (time_before(jiffies, timeout)) {
value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_S0_CTL2);
if (value & XUSB_PADCTL_UPHY_PLL_CTL2_CAL_DONE)
break;
usleep_range(10, 20);
}
if (time_after_eq(jiffies, timeout)) {
err = -ETIMEDOUT;
goto reset;
}
value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_S0_CTL2);
value &= ~XUSB_PADCTL_UPHY_PLL_CTL2_CAL_EN;
padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_S0_CTL2);
timeout = jiffies + msecs_to_jiffies(100);
while (time_before(jiffies, timeout)) {
value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_S0_CTL2);
if (!(value & XUSB_PADCTL_UPHY_PLL_CTL2_CAL_DONE))
break;
usleep_range(10, 20);
}
if (time_after_eq(jiffies, timeout)) {
err = -ETIMEDOUT;
goto reset;
}
value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_S0_CTL1);
value |= XUSB_PADCTL_UPHY_PLL_CTL1_ENABLE;
padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_S0_CTL1);
timeout = jiffies + msecs_to_jiffies(100);
while (time_before(jiffies, timeout)) {
value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_S0_CTL1);
if (value & XUSB_PADCTL_UPHY_PLL_CTL1_LOCKDET_STATUS)
break;
usleep_range(10, 20);
}
if (time_after_eq(jiffies, timeout)) {
err = -ETIMEDOUT;
goto reset;
}
value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_S0_CTL8);
value |= XUSB_PADCTL_UPHY_PLL_CTL8_RCAL_EN |
XUSB_PADCTL_UPHY_PLL_CTL8_RCAL_CLK_EN;
padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_S0_CTL8);
timeout = jiffies + msecs_to_jiffies(100);
while (time_before(jiffies, timeout)) {
value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_S0_CTL8);
if (value & XUSB_PADCTL_UPHY_PLL_CTL8_RCAL_DONE)
break;
usleep_range(10, 20);
}
if (time_after_eq(jiffies, timeout)) {
err = -ETIMEDOUT;
goto reset;
}
value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_S0_CTL8);
value &= ~XUSB_PADCTL_UPHY_PLL_CTL8_RCAL_EN;
padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_S0_CTL8);
timeout = jiffies + msecs_to_jiffies(100);
while (time_before(jiffies, timeout)) {
value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_S0_CTL8);
if (!(value & XUSB_PADCTL_UPHY_PLL_CTL8_RCAL_DONE))
break;
usleep_range(10, 20);
}
if (time_after_eq(jiffies, timeout)) {
err = -ETIMEDOUT;
goto reset;
}
value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_S0_CTL8);
value &= ~XUSB_PADCTL_UPHY_PLL_CTL8_RCAL_CLK_EN;
padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_S0_CTL8);
tegra210_sata_pll_hw_control_enable();
value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_S0_CTL1);
value &= ~XUSB_PADCTL_UPHY_PLL_CTL1_PWR_OVRD;
padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_S0_CTL1);
value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_S0_CTL2);
value &= ~XUSB_PADCTL_UPHY_PLL_CTL2_CAL_OVRD;
padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_S0_CTL2);
value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_S0_CTL8);
value &= ~XUSB_PADCTL_UPHY_PLL_CTL8_RCAL_OVRD;
padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_S0_CTL8);
usleep_range(10, 20);
tegra210_sata_pll_hw_sequence_start();
skip_pll_init:
sata->enable = true;
for (i = 0; i < padctl->sata->soc->num_lanes; i++) {
value = padctl_readl(padctl, XUSB_PADCTL_USB3_PAD_MUX);
value |= XUSB_PADCTL_USB3_PAD_MUX_SATA_IDDQ_DISABLE(i);
padctl_writel(padctl, value, XUSB_PADCTL_USB3_PAD_MUX);
}
return 0;
reset:
reset_control_assert(sata->rst);
disable:
clk_disable_unprepare(sata->pll);
return err;
}
static void tegra210_sata_uphy_disable(struct tegra_xusb_padctl *padctl)
{
struct tegra_xusb_sata_pad *sata = to_sata_pad(padctl->sata);
u32 value;
unsigned int i;
if (WARN_ON(!sata->enable))
return;
sata->enable = false;
for (i = 0; i < padctl->sata->soc->num_lanes; i++) {
value = padctl_readl(padctl, XUSB_PADCTL_USB3_PAD_MUX);
value &= ~XUSB_PADCTL_USB3_PAD_MUX_SATA_IDDQ_DISABLE(i);
padctl_writel(padctl, value, XUSB_PADCTL_USB3_PAD_MUX);
}
clk_disable_unprepare(sata->pll);
}
static void tegra210_aux_mux_lp0_clamp_disable(struct tegra_xusb_padctl *padctl)
{
u32 value;
value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM1);
value &= ~XUSB_PADCTL_ELPG_PROGRAM1_AUX_MUX_LP0_CLAMP_EN;
padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM1);
usleep_range(100, 200);
value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM1);
value &= ~XUSB_PADCTL_ELPG_PROGRAM1_AUX_MUX_LP0_CLAMP_EN_EARLY;
padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM1);
usleep_range(100, 200);
value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM1);
value &= ~XUSB_PADCTL_ELPG_PROGRAM1_AUX_MUX_LP0_VCORE_DOWN;
padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM1);
}
static void tegra210_aux_mux_lp0_clamp_enable(struct tegra_xusb_padctl *padctl)
{
u32 value;
value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM1);
value |= XUSB_PADCTL_ELPG_PROGRAM1_AUX_MUX_LP0_VCORE_DOWN;
padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM1);
usleep_range(100, 200);
value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM1);
value |= XUSB_PADCTL_ELPG_PROGRAM1_AUX_MUX_LP0_CLAMP_EN_EARLY;
padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM1);
usleep_range(100, 200);
value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM1);
value |= XUSB_PADCTL_ELPG_PROGRAM1_AUX_MUX_LP0_CLAMP_EN;
padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM1);
}
static int tegra210_uphy_init(struct tegra_xusb_padctl *padctl)
{
if (padctl->pcie)
tegra210_pex_uphy_enable(padctl);
if (padctl->sata)
tegra210_sata_uphy_enable(padctl);
if (!tegra210_plle_hw_sequence_is_enabled())
tegra210_plle_hw_sequence_start();
else
dev_dbg(padctl->dev, "PLLE is already in HW control\n");
tegra210_aux_mux_lp0_clamp_disable(padctl);
return 0;
}
static void __maybe_unused
tegra210_uphy_deinit(struct tegra_xusb_padctl *padctl)
{
tegra210_aux_mux_lp0_clamp_enable(padctl);
if (padctl->sata)
tegra210_sata_uphy_disable(padctl);
if (padctl->pcie)
tegra210_pex_uphy_disable(padctl);
}
static int tegra210_hsic_set_idle(struct tegra_xusb_padctl *padctl,
unsigned int index, bool idle)
{
u32 value;
value = padctl_readl(padctl, XUSB_PADCTL_HSIC_PADX_CTL0(index));
value &= ~(XUSB_PADCTL_HSIC_PAD_CTL0_RPU_DATA0 |
XUSB_PADCTL_HSIC_PAD_CTL0_RPU_DATA1 |
XUSB_PADCTL_HSIC_PAD_CTL0_RPD_STROBE);
if (idle)
value |= XUSB_PADCTL_HSIC_PAD_CTL0_RPD_DATA0 |
XUSB_PADCTL_HSIC_PAD_CTL0_RPD_DATA1 |
XUSB_PADCTL_HSIC_PAD_CTL0_RPU_STROBE;
else
value &= ~(XUSB_PADCTL_HSIC_PAD_CTL0_RPD_DATA0 |
XUSB_PADCTL_HSIC_PAD_CTL0_RPD_DATA1 |
XUSB_PADCTL_HSIC_PAD_CTL0_RPU_STROBE);
padctl_writel(padctl, value, XUSB_PADCTL_HSIC_PADX_CTL0(index));
return 0;
}
static int tegra210_usb3_enable_phy_sleepwalk(struct tegra_xusb_lane *lane,
enum usb_device_speed speed)
{
struct tegra_xusb_padctl *padctl = lane->pad->padctl;
int port = tegra210_usb3_lane_map(lane);
struct device *dev = padctl->dev;
u32 value;
if (port < 0) {
dev_err(dev, "invalid usb3 port number\n");
return -EINVAL;
}
mutex_lock(&padctl->lock);
value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM1);
value |= XUSB_PADCTL_ELPG_PROGRAM1_SSPX_ELPG_CLAMP_EN_EARLY(port);
padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM1);
usleep_range(100, 200);
value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM1);
value |= XUSB_PADCTL_ELPG_PROGRAM1_SSPX_ELPG_CLAMP_EN(port);
padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM1);
usleep_range(250, 350);
mutex_unlock(&padctl->lock);
return 0;
}
static int tegra210_usb3_disable_phy_sleepwalk(struct tegra_xusb_lane *lane)
{
struct tegra_xusb_padctl *padctl = lane->pad->padctl;
int port = tegra210_usb3_lane_map(lane);
struct device *dev = padctl->dev;
u32 value;
if (port < 0) {
dev_err(dev, "invalid usb3 port number\n");
return -EINVAL;
}
mutex_lock(&padctl->lock);
value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM1);
value &= ~XUSB_PADCTL_ELPG_PROGRAM1_SSPX_ELPG_CLAMP_EN_EARLY(port);
padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM1);
usleep_range(100, 200);
value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM1);
value &= ~XUSB_PADCTL_ELPG_PROGRAM1_SSPX_ELPG_CLAMP_EN(port);
padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM1);
mutex_unlock(&padctl->lock);
return 0;
}
static int tegra210_usb3_enable_phy_wake(struct tegra_xusb_lane *lane)
{
struct tegra_xusb_padctl *padctl = lane->pad->padctl;
int port = tegra210_usb3_lane_map(lane);
struct device *dev = padctl->dev;
u32 value;
if (port < 0) {
dev_err(dev, "invalid usb3 port number\n");
return -EINVAL;
}
mutex_lock(&padctl->lock);
value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM_0);
value &= ~ALL_WAKE_EVENTS;
value |= SS_PORT_WAKEUP_EVENT(port);
padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM_0);
usleep_range(10, 20);
value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM_0);
value &= ~ALL_WAKE_EVENTS;
value |= SS_PORT_WAKE_INTERRUPT_ENABLE(port);
padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM_0);
mutex_unlock(&padctl->lock);
return 0;
}
static int tegra210_usb3_disable_phy_wake(struct tegra_xusb_lane *lane)
{
struct tegra_xusb_padctl *padctl = lane->pad->padctl;
int port = tegra210_usb3_lane_map(lane);
struct device *dev = padctl->dev;
u32 value;
if (port < 0) {
dev_err(dev, "invalid usb3 port number\n");
return -EINVAL;
}
mutex_lock(&padctl->lock);
value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM_0);
value &= ~ALL_WAKE_EVENTS;
value &= ~SS_PORT_WAKE_INTERRUPT_ENABLE(port);
padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM_0);
usleep_range(10, 20);
value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM_0);
value &= ~ALL_WAKE_EVENTS;
value |= SS_PORT_WAKEUP_EVENT(port);
padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM_0);
mutex_unlock(&padctl->lock);
return 0;
}
static bool tegra210_usb3_phy_remote_wake_detected(struct tegra_xusb_lane *lane)
{
struct tegra_xusb_padctl *padctl = lane->pad->padctl;
int index = tegra210_usb3_lane_map(lane);
u32 value;
if (index < 0)
return false;
value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM_0);
if ((value & SS_PORT_WAKE_INTERRUPT_ENABLE(index)) && (value & SS_PORT_WAKEUP_EVENT(index)))
return true;
return false;
}
static int tegra210_utmi_enable_phy_wake(struct tegra_xusb_lane *lane)
{
struct tegra_xusb_padctl *padctl = lane->pad->padctl;
unsigned int index = lane->index;
u32 value;
mutex_lock(&padctl->lock);
value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM_0);
value &= ~ALL_WAKE_EVENTS;
value |= USB2_PORT_WAKEUP_EVENT(index);
padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM_0);
usleep_range(10, 20);
value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM_0);
value &= ~ALL_WAKE_EVENTS;
value |= USB2_PORT_WAKE_INTERRUPT_ENABLE(index);
padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM_0);
mutex_unlock(&padctl->lock);
return 0;
}
static int tegra210_utmi_disable_phy_wake(struct tegra_xusb_lane *lane)
{
struct tegra_xusb_padctl *padctl = lane->pad->padctl;
unsigned int index = lane->index;
u32 value;
mutex_lock(&padctl->lock);
value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM_0);
value &= ~ALL_WAKE_EVENTS;
value &= ~USB2_PORT_WAKE_INTERRUPT_ENABLE(index);
padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM_0);
usleep_range(10, 20);
value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM_0);
value &= ~ALL_WAKE_EVENTS;
value |= USB2_PORT_WAKEUP_EVENT(index);
padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM_0);
mutex_unlock(&padctl->lock);
return 0;
}
static bool tegra210_utmi_phy_remote_wake_detected(struct tegra_xusb_lane *lane)
{
struct tegra_xusb_padctl *padctl = lane->pad->padctl;
unsigned int index = lane->index;
u32 value;
value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM_0);
if ((value & USB2_PORT_WAKE_INTERRUPT_ENABLE(index)) &&
(value & USB2_PORT_WAKEUP_EVENT(index)))
return true;
return false;
}
static int tegra210_hsic_enable_phy_wake(struct tegra_xusb_lane *lane)
{
struct tegra_xusb_padctl *padctl = lane->pad->padctl;
unsigned int index = lane->index;
u32 value;
mutex_lock(&padctl->lock);
value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM_0);
value &= ~ALL_WAKE_EVENTS;
value |= USB2_HSIC_PORT_WAKEUP_EVENT(index);
padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM_0);
usleep_range(10, 20);
value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM_0);
value &= ~ALL_WAKE_EVENTS;
value |= USB2_HSIC_PORT_WAKE_INTERRUPT_ENABLE(index);
padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM_0);
mutex_unlock(&padctl->lock);
return 0;
}
static int tegra210_hsic_disable_phy_wake(struct tegra_xusb_lane *lane)
{
struct tegra_xusb_padctl *padctl = lane->pad->padctl;
unsigned int index = lane->index;
u32 value;
mutex_lock(&padctl->lock);
value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM_0);
value &= ~ALL_WAKE_EVENTS;
value &= ~USB2_HSIC_PORT_WAKE_INTERRUPT_ENABLE(index);
padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM_0);
usleep_range(10, 20);
value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM_0);
value &= ~ALL_WAKE_EVENTS;
value |= USB2_HSIC_PORT_WAKEUP_EVENT(index);
padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM_0);
mutex_unlock(&padctl->lock);
return 0;
}
static bool tegra210_hsic_phy_remote_wake_detected(struct tegra_xusb_lane *lane)
{
struct tegra_xusb_padctl *padctl = lane->pad->padctl;
unsigned int index = lane->index;
u32 value;
value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM_0);
if ((value & USB2_HSIC_PORT_WAKE_INTERRUPT_ENABLE(index)) &&
(value & USB2_HSIC_PORT_WAKEUP_EVENT(index)))
return true;
return false;
}
#define padctl_pmc_readl(_priv, _offset) \
({ \
u32 value; \
WARN(regmap_read(_priv->regmap, _offset, &value), "read %s failed\n", #_offset);\
value; \
})
#define padctl_pmc_writel(_priv, _value, _offset) \
WARN(regmap_write(_priv->regmap, _offset, _value), "write %s failed\n", #_offset)
static int tegra210_pmc_utmi_enable_phy_sleepwalk(struct tegra_xusb_lane *lane,
enum usb_device_speed speed)
{
struct tegra_xusb_padctl *padctl = lane->pad->padctl;
struct tegra210_xusb_padctl *priv = to_tegra210_xusb_padctl(padctl);
unsigned int port = lane->index;
u32 value, tctrl, pctrl, rpd_ctrl;
if (!priv->regmap)
return -EOPNOTSUPP;
if (speed > USB_SPEED_HIGH)
return -EINVAL;
value = padctl_readl(padctl, XUSB_PADCTL_USB2_BIAS_PAD_CTL1);
tctrl = TCTRL_VALUE(value);
pctrl = PCTRL_VALUE(value);
value = padctl_readl(padctl, XUSB_PADCTL_USB2_OTG_PADX_CTL1(port));
rpd_ctrl = RPD_CTRL_VALUE(value);
/* ensure sleepwalk logic is disabled */
value = padctl_pmc_readl(priv, PMC_UTMIP_UHSIC_SLEEP_CFG(port));
value &= ~UTMIP_MASTER_ENABLE(port);
padctl_pmc_writel(priv, value, PMC_UTMIP_UHSIC_SLEEP_CFG(port));
/* ensure sleepwalk logics are in low power mode */
value = padctl_pmc_readl(priv, PMC_UTMIP_MASTER_CONFIG);
value |= UTMIP_PWR(port);
padctl_pmc_writel(priv, value, PMC_UTMIP_MASTER_CONFIG);
/* set debounce time */
value = padctl_pmc_readl(priv, PMC_USB_DEBOUNCE_DEL);
value &= ~UTMIP_LINE_DEB_CNT(~0);
value |= UTMIP_LINE_DEB_CNT(0x1);
padctl_pmc_writel(priv, value, PMC_USB_DEBOUNCE_DEL);
/* ensure fake events of sleepwalk logic are desiabled */
value = padctl_pmc_readl(priv, PMC_UTMIP_UHSIC_FAKE(port));
value &= ~(UTMIP_FAKE_USBOP_VAL(port) | UTMIP_FAKE_USBON_VAL(port) |
UTMIP_FAKE_USBOP_EN(port) | UTMIP_FAKE_USBON_EN(port));
padctl_pmc_writel(priv, value, PMC_UTMIP_UHSIC_FAKE(port));
/* ensure wake events of sleepwalk logic are not latched */
value = padctl_pmc_readl(priv, PMC_UTMIP_UHSIC_LINE_WAKEUP);
value &= ~UTMIP_LINE_WAKEUP_EN(port);
padctl_pmc_writel(priv, value, PMC_UTMIP_UHSIC_LINE_WAKEUP);
/* disable wake event triggers of sleepwalk logic */
value = padctl_pmc_readl(priv, PMC_UTMIP_UHSIC_SLEEP_CFG(port));
value &= ~UTMIP_WAKE_VAL(port, ~0);
value |= UTMIP_WAKE_VAL_NONE(port);
padctl_pmc_writel(priv, value, PMC_UTMIP_UHSIC_SLEEP_CFG(port));
/* power down the line state detectors of the pad */
value = padctl_pmc_readl(priv, PMC_USB_AO);
value |= (USBOP_VAL_PD(port) | USBON_VAL_PD(port));
padctl_pmc_writel(priv, value, PMC_USB_AO);
/* save state per speed */
value = padctl_pmc_readl(priv, PMC_UTMIP_UHSIC_SAVED_STATE(port));
value &= ~SPEED(port, ~0);
switch (speed) {
case USB_SPEED_HIGH:
value |= UTMI_HS(port);
break;
case USB_SPEED_FULL:
value |= UTMI_FS(port);
break;
case USB_SPEED_LOW:
value |= UTMI_LS(port);
break;
default:
value |= UTMI_RST(port);
break;
}
padctl_pmc_writel(priv, value, PMC_UTMIP_UHSIC_SAVED_STATE(port));
/* enable the trigger of the sleepwalk logic */
value = padctl_pmc_readl(priv, PMC_UTMIP_UHSIC_SLEEPWALK_CFG(port));
value |= UTMIP_LINEVAL_WALK_EN(port);
padctl_pmc_writel(priv, value, PMC_UTMIP_UHSIC_SLEEPWALK_CFG(port));
/*
* Reset the walk pointer and clear the alarm of the sleepwalk logic,
* as well as capture the configuration of the USB2.0 pad.
*/
value = padctl_pmc_readl(priv, PMC_UTMIP_UHSIC_TRIGGERS);
value |= UTMIP_CLR_WALK_PTR(port) | UTMIP_CLR_WAKE_ALARM(port) | UTMIP_CAP_CFG(port);
padctl_pmc_writel(priv, value, PMC_UTMIP_UHSIC_TRIGGERS);
/* program electrical parameters read from XUSB PADCTL */
value = padctl_pmc_readl(priv, PMC_UTMIP_TERM_PAD_CFG);
value &= ~(TCTRL_VAL(~0) | PCTRL_VAL(~0));
value |= (TCTRL_VAL(tctrl) | PCTRL_VAL(pctrl));
padctl_pmc_writel(priv, value, PMC_UTMIP_TERM_PAD_CFG);
value = padctl_pmc_readl(priv, PMC_UTMIP_PAD_CFGX(port));
value &= ~RPD_CTRL_PX(~0);
value |= RPD_CTRL_PX(rpd_ctrl);
padctl_pmc_writel(priv, value, PMC_UTMIP_PAD_CFGX(port));
/*
* Set up the pull-ups and pull-downs of the signals during the four
* stages of sleepwalk. If a device is connected, program sleepwalk
* logic to maintain a J and keep driving K upon seeing remote wake.
*/
value = padctl_pmc_readl(priv, PMC_UTMIP_SLEEPWALK_PX(port));
value = UTMIP_USBOP_RPD_A | UTMIP_USBOP_RPD_B | UTMIP_USBOP_RPD_C | UTMIP_USBOP_RPD_D;
value |= UTMIP_USBON_RPD_A | UTMIP_USBON_RPD_B | UTMIP_USBON_RPD_C | UTMIP_USBON_RPD_D;
switch (speed) {
case USB_SPEED_HIGH:
case USB_SPEED_FULL:
/* J state: D+/D- = high/low, K state: D+/D- = low/high */
value |= UTMIP_HIGHZ_A;
value |= UTMIP_AP_A;
value |= UTMIP_AN_B | UTMIP_AN_C | UTMIP_AN_D;
break;
case USB_SPEED_LOW:
/* J state: D+/D- = low/high, K state: D+/D- = high/low */
value |= UTMIP_HIGHZ_A;
value |= UTMIP_AN_A;
value |= UTMIP_AP_B | UTMIP_AP_C | UTMIP_AP_D;
break;
default:
value |= UTMIP_HIGHZ_A | UTMIP_HIGHZ_B | UTMIP_HIGHZ_C | UTMIP_HIGHZ_D;
break;
}
padctl_pmc_writel(priv, value, PMC_UTMIP_SLEEPWALK_PX(port));
/* power up the line state detectors of the pad */
value = padctl_pmc_readl(priv, PMC_USB_AO);
value &= ~(USBOP_VAL_PD(port) | USBON_VAL_PD(port));
padctl_pmc_writel(priv, value, PMC_USB_AO);
usleep_range(50, 100);
/* switch the electric control of the USB2.0 pad to PMC */
value = padctl_pmc_readl(priv, PMC_UTMIP_UHSIC_SLEEP_CFG(port));
value |= UTMIP_FSLS_USE_PMC(port) | UTMIP_PCTRL_USE_PMC(port) | UTMIP_TCTRL_USE_PMC(port);
padctl_pmc_writel(priv, value, PMC_UTMIP_UHSIC_SLEEP_CFG(port));
value = padctl_pmc_readl(priv, PMC_UTMIP_UHSIC_SLEEP_CFG1);
value |= UTMIP_RPD_CTRL_USE_PMC_PX(port) | UTMIP_RPU_SWITC_LOW_USE_PMC_PX(port);
padctl_pmc_writel(priv, value, PMC_UTMIP_UHSIC_SLEEP_CFG1);
/* set the wake signaling trigger events */
value = padctl_pmc_readl(priv, PMC_UTMIP_UHSIC_SLEEP_CFG(port));
value &= ~UTMIP_WAKE_VAL(port, ~0);
value |= UTMIP_WAKE_VAL_ANY(port);
padctl_pmc_writel(priv, value, PMC_UTMIP_UHSIC_SLEEP_CFG(port));
/* enable the wake detection */
value = padctl_pmc_readl(priv, PMC_UTMIP_UHSIC_SLEEP_CFG(port));
value |= UTMIP_MASTER_ENABLE(port);
padctl_pmc_writel(priv, value, PMC_UTMIP_UHSIC_SLEEP_CFG(port));
value = padctl_pmc_readl(priv, PMC_UTMIP_UHSIC_LINE_WAKEUP);
value |= UTMIP_LINE_WAKEUP_EN(port);
padctl_pmc_writel(priv, value, PMC_UTMIP_UHSIC_LINE_WAKEUP);
return 0;
}
static int tegra210_pmc_utmi_disable_phy_sleepwalk(struct tegra_xusb_lane *lane)
{
struct tegra_xusb_padctl *padctl = lane->pad->padctl;
struct tegra210_xusb_padctl *priv = to_tegra210_xusb_padctl(padctl);
unsigned int port = lane->index;
u32 value;
if (!priv->regmap)
return -EOPNOTSUPP;
/* disable the wake detection */
value = padctl_pmc_readl(priv, PMC_UTMIP_UHSIC_SLEEP_CFG(port));
value &= ~UTMIP_MASTER_ENABLE(port);
padctl_pmc_writel(priv, value, PMC_UTMIP_UHSIC_SLEEP_CFG(port));
value = padctl_pmc_readl(priv, PMC_UTMIP_UHSIC_LINE_WAKEUP);
value &= ~UTMIP_LINE_WAKEUP_EN(port);
padctl_pmc_writel(priv, value, PMC_UTMIP_UHSIC_LINE_WAKEUP);
/* switch the electric control of the USB2.0 pad to XUSB or USB2 */
value = padctl_pmc_readl(priv, PMC_UTMIP_UHSIC_SLEEP_CFG(port));
value &= ~(UTMIP_FSLS_USE_PMC(port) | UTMIP_PCTRL_USE_PMC(port) |
UTMIP_TCTRL_USE_PMC(port));
padctl_pmc_writel(priv, value, PMC_UTMIP_UHSIC_SLEEP_CFG(port));
value = padctl_pmc_readl(priv, PMC_UTMIP_UHSIC_SLEEP_CFG1);
value &= ~(UTMIP_RPD_CTRL_USE_PMC_PX(port) | UTMIP_RPU_SWITC_LOW_USE_PMC_PX(port));
padctl_pmc_writel(priv, value, PMC_UTMIP_UHSIC_SLEEP_CFG1);
/* disable wake event triggers of sleepwalk logic */
value = padctl_pmc_readl(priv, PMC_UTMIP_UHSIC_SLEEP_CFG(port));
value &= ~UTMIP_WAKE_VAL(port, ~0);
value |= UTMIP_WAKE_VAL_NONE(port);
padctl_pmc_writel(priv, value, PMC_UTMIP_UHSIC_SLEEP_CFG(port));
/* power down the line state detectors of the port */
value = padctl_pmc_readl(priv, PMC_USB_AO);
value |= (USBOP_VAL_PD(port) | USBON_VAL_PD(port));
padctl_pmc_writel(priv, value, PMC_USB_AO);
/* clear alarm of the sleepwalk logic */
value = padctl_pmc_readl(priv, PMC_UTMIP_UHSIC_TRIGGERS);
value |= UTMIP_CLR_WAKE_ALARM(port);
padctl_pmc_writel(priv, value, PMC_UTMIP_UHSIC_TRIGGERS);
return 0;
}
static int tegra210_pmc_hsic_enable_phy_sleepwalk(struct tegra_xusb_lane *lane,
enum usb_device_speed speed)
{
struct tegra_xusb_padctl *padctl = lane->pad->padctl;
struct tegra210_xusb_padctl *priv = to_tegra210_xusb_padctl(padctl);
u32 value;
if (!priv->regmap)
return -EOPNOTSUPP;
/* ensure sleepwalk logic is disabled */
value = padctl_pmc_readl(priv, PMC_UHSIC_SLEEP_CFG);
value &= ~UHSIC_MASTER_ENABLE;
padctl_pmc_writel(priv, value, PMC_UHSIC_SLEEP_CFG);
/* ensure sleepwalk logics are in low power mode */
value = padctl_pmc_readl(priv, PMC_UTMIP_MASTER_CONFIG);
value |= UHSIC_PWR;
padctl_pmc_writel(priv, value, PMC_UTMIP_MASTER_CONFIG);
/* set debounce time */
value = padctl_pmc_readl(priv, PMC_USB_DEBOUNCE_DEL);
value &= ~UHSIC_LINE_DEB_CNT(~0);
value |= UHSIC_LINE_DEB_CNT(0x1);
padctl_pmc_writel(priv, value, PMC_USB_DEBOUNCE_DEL);
/* ensure fake events of sleepwalk logic are desiabled */
value = padctl_pmc_readl(priv, PMC_UHSIC_FAKE);
value &= ~(UHSIC_FAKE_STROBE_VAL | UHSIC_FAKE_DATA_VAL |
UHSIC_FAKE_STROBE_EN | UHSIC_FAKE_DATA_EN);
padctl_pmc_writel(priv, value, PMC_UHSIC_FAKE);
/* ensure wake events of sleepwalk logic are not latched */
value = padctl_pmc_readl(priv, PMC_UTMIP_UHSIC_LINE_WAKEUP);
value &= ~UHSIC_LINE_WAKEUP_EN;
padctl_pmc_writel(priv, value, PMC_UTMIP_UHSIC_LINE_WAKEUP);
/* disable wake event triggers of sleepwalk logic */
value = padctl_pmc_readl(priv, PMC_UHSIC_SLEEP_CFG);
value &= ~UHSIC_WAKE_VAL(~0);
value |= UHSIC_WAKE_VAL_NONE;
padctl_pmc_writel(priv, value, PMC_UHSIC_SLEEP_CFG);
/* power down the line state detectors of the port */
value = padctl_pmc_readl(priv, PMC_USB_AO);
value |= STROBE_VAL_PD | DATA0_VAL_PD | DATA1_VAL_PD;
padctl_pmc_writel(priv, value, PMC_USB_AO);
/* save state, HSIC always comes up as HS */
value = padctl_pmc_readl(priv, PMC_UHSIC_SAVED_STATE);
value &= ~UHSIC_MODE(~0);
value |= UHSIC_HS;
padctl_pmc_writel(priv, value, PMC_UHSIC_SAVED_STATE);
/* enable the trigger of the sleepwalk logic */
value = padctl_pmc_readl(priv, PMC_UHSIC_SLEEPWALK_CFG);
value |= UHSIC_WAKE_WALK_EN | UHSIC_LINEVAL_WALK_EN;
padctl_pmc_writel(priv, value, PMC_UHSIC_SLEEPWALK_CFG);
/*
* Reset the walk pointer and clear the alarm of the sleepwalk logic,
* as well as capture the configuration of the USB2.0 port.
*/
value = padctl_pmc_readl(priv, PMC_UTMIP_UHSIC_TRIGGERS);
value |= UHSIC_CLR_WALK_PTR | UHSIC_CLR_WAKE_ALARM;
padctl_pmc_writel(priv, value, PMC_UTMIP_UHSIC_TRIGGERS);
/*
* Set up the pull-ups and pull-downs of the signals during the four
* stages of sleepwalk. Maintain a HSIC IDLE and keep driving HSIC
* RESUME upon remote wake.
*/
value = padctl_pmc_readl(priv, PMC_UHSIC_SLEEPWALK_P0);
value = UHSIC_DATA0_RPD_A | UHSIC_DATA0_RPU_B | UHSIC_DATA0_RPU_C | UHSIC_DATA0_RPU_D |
UHSIC_STROBE_RPU_A | UHSIC_STROBE_RPD_B | UHSIC_STROBE_RPD_C | UHSIC_STROBE_RPD_D;
padctl_pmc_writel(priv, value, PMC_UHSIC_SLEEPWALK_P0);
/* power up the line state detectors of the port */
value = padctl_pmc_readl(priv, PMC_USB_AO);
value &= ~(STROBE_VAL_PD | DATA0_VAL_PD | DATA1_VAL_PD);
padctl_pmc_writel(priv, value, PMC_USB_AO);
usleep_range(50, 100);
/* set the wake signaling trigger events */
value = padctl_pmc_readl(priv, PMC_UHSIC_SLEEP_CFG);
value &= ~UHSIC_WAKE_VAL(~0);
value |= UHSIC_WAKE_VAL_SD10;
padctl_pmc_writel(priv, value, PMC_UHSIC_SLEEP_CFG);
/* enable the wake detection */
value = padctl_pmc_readl(priv, PMC_UHSIC_SLEEP_CFG);
value |= UHSIC_MASTER_ENABLE;
padctl_pmc_writel(priv, value, PMC_UHSIC_SLEEP_CFG);
value = padctl_pmc_readl(priv, PMC_UTMIP_UHSIC_LINE_WAKEUP);
value |= UHSIC_LINE_WAKEUP_EN;
padctl_pmc_writel(priv, value, PMC_UTMIP_UHSIC_LINE_WAKEUP);
return 0;
}
static int tegra210_pmc_hsic_disable_phy_sleepwalk(struct tegra_xusb_lane *lane)
{
struct tegra_xusb_padctl *padctl = lane->pad->padctl;
struct tegra210_xusb_padctl *priv = to_tegra210_xusb_padctl(padctl);
u32 value;
if (!priv->regmap)
return -EOPNOTSUPP;
/* disable the wake detection */
value = padctl_pmc_readl(priv, PMC_UHSIC_SLEEP_CFG);
value &= ~UHSIC_MASTER_ENABLE;
padctl_pmc_writel(priv, value, PMC_UHSIC_SLEEP_CFG);
value = padctl_pmc_readl(priv, PMC_UTMIP_UHSIC_LINE_WAKEUP);
value &= ~UHSIC_LINE_WAKEUP_EN;
padctl_pmc_writel(priv, value, PMC_UTMIP_UHSIC_LINE_WAKEUP);
/* disable wake event triggers of sleepwalk logic */
value = padctl_pmc_readl(priv, PMC_UHSIC_SLEEP_CFG);
value &= ~UHSIC_WAKE_VAL(~0);
value |= UHSIC_WAKE_VAL_NONE;
padctl_pmc_writel(priv, value, PMC_UHSIC_SLEEP_CFG);
/* power down the line state detectors of the port */
value = padctl_pmc_readl(priv, PMC_USB_AO);
value |= STROBE_VAL_PD | DATA0_VAL_PD | DATA1_VAL_PD;
padctl_pmc_writel(priv, value, PMC_USB_AO);
/* clear alarm of the sleepwalk logic */
value = padctl_pmc_readl(priv, PMC_UTMIP_UHSIC_TRIGGERS);
value |= UHSIC_CLR_WAKE_ALARM;
padctl_pmc_writel(priv, value, PMC_UTMIP_UHSIC_TRIGGERS);
return 0;
}
static int tegra210_usb3_set_lfps_detect(struct tegra_xusb_padctl *padctl,
unsigned int index, bool enable)
{
struct tegra_xusb_port *port;
struct tegra_xusb_lane *lane;
u32 value, offset;
port = tegra_xusb_find_port(padctl, "usb3", index);
if (!port)
return -ENODEV;
lane = port->lane;
if (lane->pad == padctl->pcie)
offset = XUSB_PADCTL_UPHY_MISC_PAD_PX_CTL1(lane->index);
else
offset = XUSB_PADCTL_UPHY_MISC_PAD_S0_CTL1;
value = padctl_readl(padctl, offset);
value &= ~((XUSB_PADCTL_UPHY_MISC_PAD_CTL1_AUX_RX_IDLE_MODE_MASK <<
XUSB_PADCTL_UPHY_MISC_PAD_CTL1_AUX_RX_IDLE_MODE_SHIFT) |
XUSB_PADCTL_UPHY_MISC_PAD_CTL1_AUX_RX_TERM_EN |
XUSB_PADCTL_UPHY_MISC_PAD_CTL1_AUX_RX_MODE_OVRD);
if (!enable) {
value |= (XUSB_PADCTL_UPHY_MISC_PAD_CTL1_AUX_RX_IDLE_MODE_VAL <<
XUSB_PADCTL_UPHY_MISC_PAD_CTL1_AUX_RX_IDLE_MODE_SHIFT) |
XUSB_PADCTL_UPHY_MISC_PAD_CTL1_AUX_RX_TERM_EN |
XUSB_PADCTL_UPHY_MISC_PAD_CTL1_AUX_RX_MODE_OVRD;
}
padctl_writel(padctl, value, offset);
return 0;
}
#define TEGRA210_LANE(_name, _offset, _shift, _mask, _type) \
{ \
.name = _name, \
.offset = _offset, \
.shift = _shift, \
.mask = _mask, \
.num_funcs = ARRAY_SIZE(tegra210_##_type##_functions), \
.funcs = tegra210_##_type##_functions, \
}
static const char *tegra210_usb2_functions[] = {
"snps",
"xusb",
"uart"
};
static const struct tegra_xusb_lane_soc tegra210_usb2_lanes[] = {
TEGRA210_LANE("usb2-0", 0x004, 0, 0x3, usb2),
TEGRA210_LANE("usb2-1", 0x004, 2, 0x3, usb2),
TEGRA210_LANE("usb2-2", 0x004, 4, 0x3, usb2),
TEGRA210_LANE("usb2-3", 0x004, 6, 0x3, usb2),
};
static struct tegra_xusb_lane *
tegra210_usb2_lane_probe(struct tegra_xusb_pad *pad, struct device_node *np,
unsigned int index)
{
struct tegra_xusb_usb2_lane *usb2;
int err;
usb2 = kzalloc(sizeof(*usb2), GFP_KERNEL);
if (!usb2)
return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&usb2->base.list);
usb2->base.soc = &pad->soc->lanes[index];
usb2->base.index = index;
usb2->base.pad = pad;
usb2->base.np = np;
err = tegra_xusb_lane_parse_dt(&usb2->base, np);
if (err < 0) {
kfree(usb2);
return ERR_PTR(err);
}
return &usb2->base;
}
static void tegra210_usb2_lane_remove(struct tegra_xusb_lane *lane)
{
struct tegra_xusb_usb2_lane *usb2 = to_usb2_lane(lane);
kfree(usb2);
}
static const struct tegra_xusb_lane_ops tegra210_usb2_lane_ops = {
.probe = tegra210_usb2_lane_probe,
.remove = tegra210_usb2_lane_remove,
.enable_phy_sleepwalk = tegra210_pmc_utmi_enable_phy_sleepwalk,
.disable_phy_sleepwalk = tegra210_pmc_utmi_disable_phy_sleepwalk,
.enable_phy_wake = tegra210_utmi_enable_phy_wake,
.disable_phy_wake = tegra210_utmi_disable_phy_wake,
.remote_wake_detected = tegra210_utmi_phy_remote_wake_detected,
};
static int tegra210_usb2_phy_init(struct phy *phy)
{
struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
struct tegra_xusb_padctl *padctl = lane->pad->padctl;
unsigned int index = lane->index;
struct tegra_xusb_usb2_port *port;
int err;
u32 value;
port = tegra_xusb_find_usb2_port(padctl, index);
if (!port) {
dev_err(&phy->dev, "no port found for USB2 lane %u\n", index);
return -ENODEV;
}
if (port->supply && port->mode == USB_DR_MODE_HOST) {
err = regulator_enable(port->supply);
if (err)
return err;
}
mutex_lock(&padctl->lock);
value = padctl_readl(padctl, XUSB_PADCTL_USB2_PAD_MUX);
value &= ~(XUSB_PADCTL_USB2_PAD_MUX_USB2_BIAS_PAD_MASK <<
XUSB_PADCTL_USB2_PAD_MUX_USB2_BIAS_PAD_SHIFT);
value |= XUSB_PADCTL_USB2_PAD_MUX_USB2_BIAS_PAD_XUSB <<
XUSB_PADCTL_USB2_PAD_MUX_USB2_BIAS_PAD_SHIFT;
padctl_writel(padctl, value, XUSB_PADCTL_USB2_PAD_MUX);
mutex_unlock(&padctl->lock);
return 0;
}
static int tegra210_usb2_phy_exit(struct phy *phy)
{
struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
struct tegra_xusb_padctl *padctl = lane->pad->padctl;
struct tegra_xusb_usb2_port *port;
int err;
port = tegra_xusb_find_usb2_port(padctl, lane->index);
if (!port) {
dev_err(&phy->dev, "no port found for USB2 lane %u\n", lane->index);
return -ENODEV;
}
if (port->supply && port->mode == USB_DR_MODE_HOST) {
err = regulator_disable(port->supply);
if (err)
return err;
}
return 0;
}
static int tegra210_xusb_padctl_vbus_override(struct tegra_xusb_padctl *padctl,
bool status)
{
u32 value;
dev_dbg(padctl->dev, "%s vbus override\n", status ? "set" : "clear");
value = padctl_readl(padctl, XUSB_PADCTL_USB2_VBUS_ID);
if (status) {
value |= XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_VBUS_ON;
value &= ~(XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_MASK <<
XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_SHIFT);
value |= XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_FLOATING <<
XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_SHIFT;
} else {
value &= ~XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_VBUS_ON;
}
padctl_writel(padctl, value, XUSB_PADCTL_USB2_VBUS_ID);
return 0;
}
static int tegra210_xusb_padctl_id_override(struct tegra_xusb_padctl *padctl,
bool status)
{
u32 value;
dev_dbg(padctl->dev, "%s id override\n", status ? "set" : "clear");
value = padctl_readl(padctl, XUSB_PADCTL_USB2_VBUS_ID);
if (status) {
if (value & XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_VBUS_ON) {
value &= ~XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_VBUS_ON;
padctl_writel(padctl, value, XUSB_PADCTL_USB2_VBUS_ID);
usleep_range(1000, 2000);
value = padctl_readl(padctl, XUSB_PADCTL_USB2_VBUS_ID);
}
value &= ~(XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_MASK <<
XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_SHIFT);
value |= XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_GROUNDED <<
XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_SHIFT;
} else {
value &= ~(XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_MASK <<
XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_SHIFT);
value |= XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_FLOATING <<
XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_SHIFT;
}
padctl_writel(padctl, value, XUSB_PADCTL_USB2_VBUS_ID);
return 0;
}
static int tegra210_usb2_phy_set_mode(struct phy *phy, enum phy_mode mode,
int submode)
{
struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
struct tegra_xusb_padctl *padctl = lane->pad->padctl;
struct tegra_xusb_usb2_port *port = tegra_xusb_find_usb2_port(padctl,
lane->index);
int err = 0;
mutex_lock(&padctl->lock);
dev_dbg(&port->base.dev, "%s: mode %d", __func__, mode);
if (mode == PHY_MODE_USB_OTG) {
if (submode == USB_ROLE_HOST) {
tegra210_xusb_padctl_id_override(padctl, true);
err = regulator_enable(port->supply);
} else if (submode == USB_ROLE_DEVICE) {
tegra210_xusb_padctl_vbus_override(padctl, true);
} else if (submode == USB_ROLE_NONE) {
/*
* When port is peripheral only or role transitions to
* USB_ROLE_NONE from USB_ROLE_DEVICE, regulator is not
* be enabled.
*/
if (regulator_is_enabled(port->supply))
regulator_disable(port->supply);
tegra210_xusb_padctl_id_override(padctl, false);
tegra210_xusb_padctl_vbus_override(padctl, false);
}
}
mutex_unlock(&padctl->lock);
return err;
}
static int tegra210_usb2_phy_power_on(struct phy *phy)
{
struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
struct tegra_xusb_usb2_lane *usb2 = to_usb2_lane(lane);
struct tegra_xusb_usb2_pad *pad = to_usb2_pad(lane->pad);
struct tegra_xusb_padctl *padctl = lane->pad->padctl;
struct tegra210_xusb_padctl *priv;
struct tegra_xusb_usb2_port *port;
unsigned int index = lane->index;
u32 value;
int err;
port = tegra_xusb_find_usb2_port(padctl, index);
if (!port) {
dev_err(&phy->dev, "no port found for USB2 lane %u\n", index);
return -ENODEV;
}
priv = to_tegra210_xusb_padctl(padctl);
mutex_lock(&padctl->lock);
if (port->usb3_port_fake != -1) {
value = padctl_readl(padctl, XUSB_PADCTL_SS_PORT_MAP);
value &= ~XUSB_PADCTL_SS_PORT_MAP_PORTX_MAP_MASK(
port->usb3_port_fake);
value |= XUSB_PADCTL_SS_PORT_MAP_PORTX_MAP(
port->usb3_port_fake, index);
padctl_writel(padctl, value, XUSB_PADCTL_SS_PORT_MAP);
value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM1);
value &= ~XUSB_PADCTL_ELPG_PROGRAM1_SSPX_ELPG_VCORE_DOWN(
port->usb3_port_fake);
padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM1);
usleep_range(100, 200);
value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM1);
value &= ~XUSB_PADCTL_ELPG_PROGRAM1_SSPX_ELPG_CLAMP_EN_EARLY(
port->usb3_port_fake);
padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM1);
usleep_range(100, 200);
value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM1);
value &= ~XUSB_PADCTL_ELPG_PROGRAM1_SSPX_ELPG_CLAMP_EN(
port->usb3_port_fake);
padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM1);
}
value = padctl_readl(padctl, XUSB_PADCTL_USB2_BIAS_PAD_CTL0);
value &= ~((XUSB_PADCTL_USB2_BIAS_PAD_CTL0_HS_SQUELCH_LEVEL_MASK <<
XUSB_PADCTL_USB2_BIAS_PAD_CTL0_HS_SQUELCH_LEVEL_SHIFT) |
(XUSB_PADCTL_USB2_BIAS_PAD_CTL0_HS_DISCON_LEVEL_MASK <<
XUSB_PADCTL_USB2_BIAS_PAD_CTL0_HS_DISCON_LEVEL_SHIFT));
value |= (XUSB_PADCTL_USB2_BIAS_PAD_CTL0_HS_DISCON_LEVEL_VAL <<
XUSB_PADCTL_USB2_BIAS_PAD_CTL0_HS_DISCON_LEVEL_SHIFT);
if (tegra_sku_info.revision < TEGRA_REVISION_A02)
value |=
(XUSB_PADCTL_USB2_BIAS_PAD_CTL0_HS_SQUELCH_LEVEL_VAL <<
XUSB_PADCTL_USB2_BIAS_PAD_CTL0_HS_SQUELCH_LEVEL_SHIFT);
padctl_writel(padctl, value, XUSB_PADCTL_USB2_BIAS_PAD_CTL0);
value = padctl_readl(padctl, XUSB_PADCTL_USB2_PORT_CAP);
value &= ~XUSB_PADCTL_USB2_PORT_CAP_PORTX_CAP_MASK(index);
if (port->mode == USB_DR_MODE_UNKNOWN)
value |= XUSB_PADCTL_USB2_PORT_CAP_PORTX_CAP_DISABLED(index);
else if (port->mode == USB_DR_MODE_PERIPHERAL)
value |= XUSB_PADCTL_USB2_PORT_CAP_PORTX_CAP_DEVICE(index);
else if (port->mode == USB_DR_MODE_HOST)
value |= XUSB_PADCTL_USB2_PORT_CAP_PORTX_CAP_HOST(index);
else if (port->mode == USB_DR_MODE_OTG)
value |= XUSB_PADCTL_USB2_PORT_CAP_PORTX_CAP_OTG(index);
padctl_writel(padctl, value, XUSB_PADCTL_USB2_PORT_CAP);
value = padctl_readl(padctl, XUSB_PADCTL_USB2_OTG_PADX_CTL0(index));
value &= ~((XUSB_PADCTL_USB2_OTG_PAD_CTL0_HS_CURR_LEVEL_MASK <<
XUSB_PADCTL_USB2_OTG_PAD_CTL0_HS_CURR_LEVEL_SHIFT) |
XUSB_PADCTL_USB2_OTG_PAD_CTL0_PD |
XUSB_PADCTL_USB2_OTG_PAD_CTL0_PD2 |
XUSB_PADCTL_USB2_OTG_PAD_CTL0_PD_ZI);
value |= (priv->fuse.hs_curr_level[index] +
usb2->hs_curr_level_offset) <<
XUSB_PADCTL_USB2_OTG_PAD_CTL0_HS_CURR_LEVEL_SHIFT;
padctl_writel(padctl, value, XUSB_PADCTL_USB2_OTG_PADX_CTL0(index));
value = padctl_readl(padctl, XUSB_PADCTL_USB2_OTG_PADX_CTL1(index));
value &= ~((XUSB_PADCTL_USB2_OTG_PAD_CTL1_TERM_RANGE_ADJ_MASK <<
XUSB_PADCTL_USB2_OTG_PAD_CTL1_TERM_RANGE_ADJ_SHIFT) |
(XUSB_PADCTL_USB2_OTG_PAD_CTL1_RPD_CTRL_MASK <<
XUSB_PADCTL_USB2_OTG_PAD_CTL1_RPD_CTRL_SHIFT) |
XUSB_PADCTL_USB2_OTG_PAD_CTL1_PD_DR |
XUSB_PADCTL_USB2_OTG_PAD_CTL1_PD_CHRP_OVRD |
XUSB_PADCTL_USB2_OTG_PAD_CTL1_PD_DISC_OVRD);
value |= (priv->fuse.hs_term_range_adj <<
XUSB_PADCTL_USB2_OTG_PAD_CTL1_TERM_RANGE_ADJ_SHIFT) |
(priv->fuse.rpd_ctrl <<
XUSB_PADCTL_USB2_OTG_PAD_CTL1_RPD_CTRL_SHIFT);
padctl_writel(padctl, value, XUSB_PADCTL_USB2_OTG_PADX_CTL1(index));
value = padctl_readl(padctl,
XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPADX_CTL1(index));
value &= ~(XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPAD_CTL1_VREG_LEV_MASK <<
XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPAD_CTL1_VREG_LEV_SHIFT);
if (port->mode == USB_DR_MODE_HOST)
value |= XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPAD_CTL1_VREG_FIX18;
else
value |=
XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPAD_CTL1_VREG_LEV_VAL <<
XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPAD_CTL1_VREG_LEV_SHIFT;
padctl_writel(padctl, value,
XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPADX_CTL1(index));
if (pad->enable > 0) {
pad->enable++;
mutex_unlock(&padctl->lock);
return 0;
}
err = clk_prepare_enable(pad->clk);
if (err)
goto out;
value = padctl_readl(padctl, XUSB_PADCTL_USB2_BIAS_PAD_CTL1);
value &= ~((XUSB_PADCTL_USB2_BIAS_PAD_CTL1_TRK_START_TIMER_MASK <<
XUSB_PADCTL_USB2_BIAS_PAD_CTL1_TRK_START_TIMER_SHIFT) |
(XUSB_PADCTL_USB2_BIAS_PAD_CTL1_TRK_DONE_RESET_TIMER_MASK <<
XUSB_PADCTL_USB2_BIAS_PAD_CTL1_TRK_DONE_RESET_TIMER_SHIFT));
value |= (XUSB_PADCTL_USB2_BIAS_PAD_CTL1_TRK_START_TIMER_VAL <<
XUSB_PADCTL_USB2_BIAS_PAD_CTL1_TRK_START_TIMER_SHIFT) |
(XUSB_PADCTL_USB2_BIAS_PAD_CTL1_TRK_DONE_RESET_TIMER_VAL <<
XUSB_PADCTL_USB2_BIAS_PAD_CTL1_TRK_DONE_RESET_TIMER_SHIFT);
padctl_writel(padctl, value, XUSB_PADCTL_USB2_BIAS_PAD_CTL1);
value = padctl_readl(padctl, XUSB_PADCTL_USB2_BIAS_PAD_CTL0);
value &= ~XUSB_PADCTL_USB2_BIAS_PAD_CTL0_PD;
padctl_writel(padctl, value, XUSB_PADCTL_USB2_BIAS_PAD_CTL0);
udelay(1);
value = padctl_readl(padctl, XUSB_PADCTL_USB2_BIAS_PAD_CTL1);
value &= ~XUSB_PADCTL_USB2_BIAS_PAD_CTL1_PD_TRK;
padctl_writel(padctl, value, XUSB_PADCTL_USB2_BIAS_PAD_CTL1);
udelay(50);
clk_disable_unprepare(pad->clk);
pad->enable++;
mutex_unlock(&padctl->lock);
return 0;
out:
mutex_unlock(&padctl->lock);
return err;
}
static int tegra210_usb2_phy_power_off(struct phy *phy)
{
struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
struct tegra_xusb_usb2_pad *pad = to_usb2_pad(lane->pad);
struct tegra_xusb_padctl *padctl = lane->pad->padctl;
struct tegra_xusb_usb2_port *port;
u32 value;
port = tegra_xusb_find_usb2_port(padctl, lane->index);
if (!port) {
dev_err(&phy->dev, "no port found for USB2 lane %u\n",
lane->index);
return -ENODEV;
}
mutex_lock(&padctl->lock);
if (port->usb3_port_fake != -1) {
value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM1);
value |= XUSB_PADCTL_ELPG_PROGRAM1_SSPX_ELPG_CLAMP_EN_EARLY(
port->usb3_port_fake);
padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM1);
usleep_range(100, 200);
value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM1);
value |= XUSB_PADCTL_ELPG_PROGRAM1_SSPX_ELPG_CLAMP_EN(
port->usb3_port_fake);
padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM1);
usleep_range(250, 350);
value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM1);
value |= XUSB_PADCTL_ELPG_PROGRAM1_SSPX_ELPG_VCORE_DOWN(
port->usb3_port_fake);
padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM1);
value = padctl_readl(padctl, XUSB_PADCTL_SS_PORT_MAP);
value |= XUSB_PADCTL_SS_PORT_MAP_PORTX_MAP(port->usb3_port_fake,
XUSB_PADCTL_SS_PORT_MAP_PORT_DISABLED);
padctl_writel(padctl, value, XUSB_PADCTL_SS_PORT_MAP);
}
if (WARN_ON(pad->enable == 0))
goto out;
if (--pad->enable > 0)
goto out;
value = padctl_readl(padctl, XUSB_PADCTL_USB2_BIAS_PAD_CTL0);
value |= XUSB_PADCTL_USB2_BIAS_PAD_CTL0_PD;
padctl_writel(padctl, value, XUSB_PADCTL_USB2_BIAS_PAD_CTL0);
out:
mutex_unlock(&padctl->lock);
return 0;
}
static const struct phy_ops tegra210_usb2_phy_ops = {
.init = tegra210_usb2_phy_init,
.exit = tegra210_usb2_phy_exit,
.power_on = tegra210_usb2_phy_power_on,
.power_off = tegra210_usb2_phy_power_off,
.set_mode = tegra210_usb2_phy_set_mode,
.owner = THIS_MODULE,
};
static struct tegra_xusb_pad *
tegra210_usb2_pad_probe(struct tegra_xusb_padctl *padctl,
const struct tegra_xusb_pad_soc *soc,
struct device_node *np)
{
struct tegra_xusb_usb2_pad *usb2;
struct tegra_xusb_pad *pad;
int err;
usb2 = kzalloc(sizeof(*usb2), GFP_KERNEL);
if (!usb2)
return ERR_PTR(-ENOMEM);
pad = &usb2->base;
pad->ops = &tegra210_usb2_lane_ops;
pad->soc = soc;
err = tegra_xusb_pad_init(pad, padctl, np);
if (err < 0) {
kfree(usb2);
goto out;
}
usb2->clk = devm_clk_get(&pad->dev, "trk");
if (IS_ERR(usb2->clk)) {
err = PTR_ERR(usb2->clk);
dev_err(&pad->dev, "failed to get trk clock: %d\n", err);
goto unregister;
}
err = tegra_xusb_pad_register(pad, &tegra210_usb2_phy_ops);
if (err < 0)
goto unregister;
dev_set_drvdata(&pad->dev, pad);
return pad;
unregister:
device_unregister(&pad->dev);
out:
return ERR_PTR(err);
}
static void tegra210_usb2_pad_remove(struct tegra_xusb_pad *pad)
{
struct tegra_xusb_usb2_pad *usb2 = to_usb2_pad(pad);
kfree(usb2);
}
static const struct tegra_xusb_pad_ops tegra210_usb2_ops = {
.probe = tegra210_usb2_pad_probe,
.remove = tegra210_usb2_pad_remove,
};
static const struct tegra_xusb_pad_soc tegra210_usb2_pad = {
.name = "usb2",
.num_lanes = ARRAY_SIZE(tegra210_usb2_lanes),
.lanes = tegra210_usb2_lanes,
.ops = &tegra210_usb2_ops,
};
static const char *tegra210_hsic_functions[] = {
"snps",
"xusb",
};
static const struct tegra_xusb_lane_soc tegra210_hsic_lanes[] = {
TEGRA210_LANE("hsic-0", 0x004, 14, 0x1, hsic),
};
static struct tegra_xusb_lane *
tegra210_hsic_lane_probe(struct tegra_xusb_pad *pad, struct device_node *np,
unsigned int index)
{
struct tegra_xusb_hsic_lane *hsic;
int err;
hsic = kzalloc(sizeof(*hsic), GFP_KERNEL);
if (!hsic)
return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&hsic->base.list);
hsic->base.soc = &pad->soc->lanes[index];
hsic->base.index = index;
hsic->base.pad = pad;
hsic->base.np = np;
err = tegra_xusb_lane_parse_dt(&hsic->base, np);
if (err < 0) {
kfree(hsic);
return ERR_PTR(err);
}
return &hsic->base;
}
static void tegra210_hsic_lane_remove(struct tegra_xusb_lane *lane)
{
struct tegra_xusb_hsic_lane *hsic = to_hsic_lane(lane);
kfree(hsic);
}
static const struct tegra_xusb_lane_ops tegra210_hsic_lane_ops = {
.probe = tegra210_hsic_lane_probe,
.remove = tegra210_hsic_lane_remove,
.enable_phy_sleepwalk = tegra210_pmc_hsic_enable_phy_sleepwalk,
.disable_phy_sleepwalk = tegra210_pmc_hsic_disable_phy_sleepwalk,
.enable_phy_wake = tegra210_hsic_enable_phy_wake,
.disable_phy_wake = tegra210_hsic_disable_phy_wake,
.remote_wake_detected = tegra210_hsic_phy_remote_wake_detected,
};
static int tegra210_hsic_phy_init(struct phy *phy)
{
struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
struct tegra_xusb_padctl *padctl = lane->pad->padctl;
u32 value;
value = padctl_readl(padctl, XUSB_PADCTL_USB2_PAD_MUX);
value &= ~(XUSB_PADCTL_USB2_PAD_MUX_HSIC_PAD_TRK_MASK <<
XUSB_PADCTL_USB2_PAD_MUX_HSIC_PAD_TRK_SHIFT);
value |= XUSB_PADCTL_USB2_PAD_MUX_HSIC_PAD_TRK_XUSB <<
XUSB_PADCTL_USB2_PAD_MUX_HSIC_PAD_TRK_SHIFT;
padctl_writel(padctl, value, XUSB_PADCTL_USB2_PAD_MUX);
return 0;
}
static int tegra210_hsic_phy_exit(struct phy *phy)
{
return 0;
}
static int tegra210_hsic_phy_power_on(struct phy *phy)
{
struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
struct tegra_xusb_hsic_lane *hsic = to_hsic_lane(lane);
struct tegra_xusb_hsic_pad *pad = to_hsic_pad(lane->pad);
struct tegra_xusb_padctl *padctl = lane->pad->padctl;
unsigned int index = lane->index;
u32 value;
int err;
err = regulator_enable(pad->supply);
if (err)
return err;
padctl_writel(padctl, hsic->strobe_trim,
XUSB_PADCTL_HSIC_STRB_TRIM_CONTROL);
value = padctl_readl(padctl, XUSB_PADCTL_HSIC_PADX_CTL1(index));
value &= ~(XUSB_PADCTL_HSIC_PAD_CTL1_TX_RTUNEP_MASK <<
XUSB_PADCTL_HSIC_PAD_CTL1_TX_RTUNEP_SHIFT);
value |= (hsic->tx_rtune_p <<
XUSB_PADCTL_HSIC_PAD_CTL1_TX_RTUNEP_SHIFT);
padctl_writel(padctl, value, XUSB_PADCTL_HSIC_PADX_CTL1(index));
value = padctl_readl(padctl, XUSB_PADCTL_HSIC_PADX_CTL2(index));
value &= ~((XUSB_PADCTL_HSIC_PAD_CTL2_RX_STROBE_TRIM_MASK <<
XUSB_PADCTL_HSIC_PAD_CTL2_RX_STROBE_TRIM_SHIFT) |
(XUSB_PADCTL_HSIC_PAD_CTL2_RX_DATA_TRIM_MASK <<
XUSB_PADCTL_HSIC_PAD_CTL2_RX_DATA_TRIM_SHIFT));
value |= (hsic->rx_strobe_trim <<
XUSB_PADCTL_HSIC_PAD_CTL2_RX_STROBE_TRIM_SHIFT) |
(hsic->rx_data_trim <<
XUSB_PADCTL_HSIC_PAD_CTL2_RX_DATA_TRIM_SHIFT);
padctl_writel(padctl, value, XUSB_PADCTL_HSIC_PADX_CTL2(index));
value = padctl_readl(padctl, XUSB_PADCTL_HSIC_PADX_CTL0(index));
value &= ~(XUSB_PADCTL_HSIC_PAD_CTL0_RPU_DATA0 |
XUSB_PADCTL_HSIC_PAD_CTL0_RPU_DATA1 |
XUSB_PADCTL_HSIC_PAD_CTL0_RPU_STROBE |
XUSB_PADCTL_HSIC_PAD_CTL0_PD_RX_DATA0 |
XUSB_PADCTL_HSIC_PAD_CTL0_PD_RX_DATA1 |
XUSB_PADCTL_HSIC_PAD_CTL0_PD_RX_STROBE |
XUSB_PADCTL_HSIC_PAD_CTL0_PD_ZI_DATA0 |
XUSB_PADCTL_HSIC_PAD_CTL0_PD_ZI_DATA1 |
XUSB_PADCTL_HSIC_PAD_CTL0_PD_ZI_STROBE |
XUSB_PADCTL_HSIC_PAD_CTL0_PD_TX_DATA0 |
XUSB_PADCTL_HSIC_PAD_CTL0_PD_TX_DATA1 |
XUSB_PADCTL_HSIC_PAD_CTL0_PD_TX_STROBE);
value |= XUSB_PADCTL_HSIC_PAD_CTL0_RPD_DATA0 |
XUSB_PADCTL_HSIC_PAD_CTL0_RPD_DATA1 |
XUSB_PADCTL_HSIC_PAD_CTL0_RPD_STROBE;
padctl_writel(padctl, value, XUSB_PADCTL_HSIC_PADX_CTL0(index));
err = clk_prepare_enable(pad->clk);
if (err)
goto disable;
value = padctl_readl(padctl, XUSB_PADCTL_HSIC_PAD_TRK_CTL);
value &= ~((XUSB_PADCTL_HSIC_PAD_TRK_CTL_TRK_START_TIMER_MASK <<
XUSB_PADCTL_HSIC_PAD_TRK_CTL_TRK_START_TIMER_SHIFT) |
(XUSB_PADCTL_HSIC_PAD_TRK_CTL_TRK_DONE_RESET_TIMER_MASK <<
XUSB_PADCTL_HSIC_PAD_TRK_CTL_TRK_DONE_RESET_TIMER_SHIFT));
value |= (XUSB_PADCTL_HSIC_PAD_TRK_CTL_TRK_START_TIMER_VAL <<
XUSB_PADCTL_HSIC_PAD_TRK_CTL_TRK_START_TIMER_SHIFT) |
(XUSB_PADCTL_HSIC_PAD_TRK_CTL_TRK_DONE_RESET_TIMER_VAL <<
XUSB_PADCTL_HSIC_PAD_TRK_CTL_TRK_DONE_RESET_TIMER_SHIFT);
padctl_writel(padctl, value, XUSB_PADCTL_HSIC_PAD_TRK_CTL);
udelay(1);
value = padctl_readl(padctl, XUSB_PADCTL_HSIC_PAD_TRK_CTL);
value &= ~XUSB_PADCTL_HSIC_PAD_TRK_CTL_PD_TRK;
padctl_writel(padctl, value, XUSB_PADCTL_HSIC_PAD_TRK_CTL);
udelay(50);
clk_disable_unprepare(pad->clk);
return 0;
disable:
regulator_disable(pad->supply);
return err;
}
static int tegra210_hsic_phy_power_off(struct phy *phy)
{
struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
struct tegra_xusb_hsic_pad *pad = to_hsic_pad(lane->pad);
struct tegra_xusb_padctl *padctl = lane->pad->padctl;
unsigned int index = lane->index;
u32 value;
value = padctl_readl(padctl, XUSB_PADCTL_HSIC_PADX_CTL0(index));
value |= XUSB_PADCTL_HSIC_PAD_CTL0_PD_RX_DATA0 |
XUSB_PADCTL_HSIC_PAD_CTL0_PD_RX_DATA1 |
XUSB_PADCTL_HSIC_PAD_CTL0_PD_RX_STROBE |
XUSB_PADCTL_HSIC_PAD_CTL0_PD_ZI_DATA0 |
XUSB_PADCTL_HSIC_PAD_CTL0_PD_ZI_DATA1 |
XUSB_PADCTL_HSIC_PAD_CTL0_PD_ZI_STROBE |
XUSB_PADCTL_HSIC_PAD_CTL0_PD_TX_DATA0 |
XUSB_PADCTL_HSIC_PAD_CTL0_PD_TX_DATA1 |
XUSB_PADCTL_HSIC_PAD_CTL0_PD_TX_STROBE;
padctl_writel(padctl, value, XUSB_PADCTL_HSIC_PADX_CTL1(index));
regulator_disable(pad->supply);
return 0;
}
static const struct phy_ops tegra210_hsic_phy_ops = {
.init = tegra210_hsic_phy_init,
.exit = tegra210_hsic_phy_exit,
.power_on = tegra210_hsic_phy_power_on,
.power_off = tegra210_hsic_phy_power_off,
.owner = THIS_MODULE,
};
static struct tegra_xusb_pad *
tegra210_hsic_pad_probe(struct tegra_xusb_padctl *padctl,
const struct tegra_xusb_pad_soc *soc,
struct device_node *np)
{
struct tegra_xusb_hsic_pad *hsic;
struct tegra_xusb_pad *pad;
int err;
hsic = kzalloc(sizeof(*hsic), GFP_KERNEL);
if (!hsic)
return ERR_PTR(-ENOMEM);
pad = &hsic->base;
pad->ops = &tegra210_hsic_lane_ops;
pad->soc = soc;
err = tegra_xusb_pad_init(pad, padctl, np);
if (err < 0) {
kfree(hsic);
goto out;
}
hsic->clk = devm_clk_get(&pad->dev, "trk");
if (IS_ERR(hsic->clk)) {
err = PTR_ERR(hsic->clk);
dev_err(&pad->dev, "failed to get trk clock: %d\n", err);
goto unregister;
}
err = tegra_xusb_pad_register(pad, &tegra210_hsic_phy_ops);
if (err < 0)
goto unregister;
dev_set_drvdata(&pad->dev, pad);
return pad;
unregister:
device_unregister(&pad->dev);
out:
return ERR_PTR(err);
}
static void tegra210_hsic_pad_remove(struct tegra_xusb_pad *pad)
{
struct tegra_xusb_hsic_pad *hsic = to_hsic_pad(pad);
kfree(hsic);
}
static const struct tegra_xusb_pad_ops tegra210_hsic_ops = {
.probe = tegra210_hsic_pad_probe,
.remove = tegra210_hsic_pad_remove,
};
static const struct tegra_xusb_pad_soc tegra210_hsic_pad = {
.name = "hsic",
.num_lanes = ARRAY_SIZE(tegra210_hsic_lanes),
.lanes = tegra210_hsic_lanes,
.ops = &tegra210_hsic_ops,
};
static void tegra210_uphy_lane_iddq_enable(struct tegra_xusb_lane *lane)
{
struct tegra_xusb_padctl *padctl = lane->pad->padctl;
u32 value;
value = padctl_readl(padctl, lane->soc->regs.misc_ctl2);
value |= XUSB_PADCTL_UPHY_MISC_PAD_CTL2_TX_IDDQ_OVRD;
value |= XUSB_PADCTL_UPHY_MISC_PAD_CTL2_RX_IDDQ_OVRD;
value |= XUSB_PADCTL_UPHY_MISC_PAD_CTL2_TX_PWR_OVRD;
value |= XUSB_PADCTL_UPHY_MISC_PAD_CTL2_RX_PWR_OVRD;
value |= XUSB_PADCTL_UPHY_MISC_PAD_CTL2_TX_IDDQ;
value &= ~XUSB_PADCTL_UPHY_MISC_PAD_CTL2_TX_SLEEP_MASK;
value |= XUSB_PADCTL_UPHY_MISC_PAD_CTL2_TX_SLEEP_VAL;
value |= XUSB_PADCTL_UPHY_MISC_PAD_CTL2_RX_IDDQ;
value &= ~XUSB_PADCTL_UPHY_MISC_PAD_CTL2_RX_SLEEP_MASK;
value |= XUSB_PADCTL_UPHY_MISC_PAD_CTL2_RX_SLEEP_VAL;
padctl_writel(padctl, value, lane->soc->regs.misc_ctl2);
}
static void tegra210_uphy_lane_iddq_disable(struct tegra_xusb_lane *lane)
{
struct tegra_xusb_padctl *padctl = lane->pad->padctl;
u32 value;
value = padctl_readl(padctl, lane->soc->regs.misc_ctl2);
value &= ~XUSB_PADCTL_UPHY_MISC_PAD_CTL2_TX_IDDQ_OVRD;
value &= ~XUSB_PADCTL_UPHY_MISC_PAD_CTL2_RX_IDDQ_OVRD;
value &= ~XUSB_PADCTL_UPHY_MISC_PAD_CTL2_TX_PWR_OVRD;
value &= ~XUSB_PADCTL_UPHY_MISC_PAD_CTL2_RX_PWR_OVRD;
value |= XUSB_PADCTL_UPHY_MISC_PAD_CTL2_TX_IDDQ;
value &= ~XUSB_PADCTL_UPHY_MISC_PAD_CTL2_TX_SLEEP_MASK;
value |= XUSB_PADCTL_UPHY_MISC_PAD_CTL2_TX_SLEEP_VAL;
value |= XUSB_PADCTL_UPHY_MISC_PAD_CTL2_RX_IDDQ;
value &= ~XUSB_PADCTL_UPHY_MISC_PAD_CTL2_RX_SLEEP_MASK;
value |= XUSB_PADCTL_UPHY_MISC_PAD_CTL2_RX_SLEEP_VAL;
padctl_writel(padctl, value, lane->soc->regs.misc_ctl2);
}
#define TEGRA210_UPHY_LANE(_name, _offset, _shift, _mask, _type, _misc) \
{ \
.name = _name, \
.offset = _offset, \
.shift = _shift, \
.mask = _mask, \
.num_funcs = ARRAY_SIZE(tegra210_##_type##_functions), \
.funcs = tegra210_##_type##_functions, \
.regs.misc_ctl2 = _misc, \
}
static const char *tegra210_pcie_functions[] = {
"pcie-x1",
"usb3-ss",
"sata",
"pcie-x4",
};
static const struct tegra_xusb_lane_soc tegra210_pcie_lanes[] = {
TEGRA210_UPHY_LANE("pcie-0", 0x028, 12, 0x3, pcie, XUSB_PADCTL_UPHY_MISC_PAD_PX_CTL2(0)),
TEGRA210_UPHY_LANE("pcie-1", 0x028, 14, 0x3, pcie, XUSB_PADCTL_UPHY_MISC_PAD_PX_CTL2(1)),
TEGRA210_UPHY_LANE("pcie-2", 0x028, 16, 0x3, pcie, XUSB_PADCTL_UPHY_MISC_PAD_PX_CTL2(2)),
TEGRA210_UPHY_LANE("pcie-3", 0x028, 18, 0x3, pcie, XUSB_PADCTL_UPHY_MISC_PAD_PX_CTL2(3)),
TEGRA210_UPHY_LANE("pcie-4", 0x028, 20, 0x3, pcie, XUSB_PADCTL_UPHY_MISC_PAD_PX_CTL2(4)),
TEGRA210_UPHY_LANE("pcie-5", 0x028, 22, 0x3, pcie, XUSB_PADCTL_UPHY_MISC_PAD_PX_CTL2(5)),
TEGRA210_UPHY_LANE("pcie-6", 0x028, 24, 0x3, pcie, XUSB_PADCTL_UPHY_MISC_PAD_PX_CTL2(6)),
};
static struct tegra_xusb_usb3_port *
tegra210_lane_to_usb3_port(struct tegra_xusb_lane *lane)
{
int port;
if (!lane || !lane->pad || !lane->pad->padctl)
return NULL;
port = tegra210_usb3_lane_map(lane);
if (port < 0)
return NULL;
return tegra_xusb_find_usb3_port(lane->pad->padctl, port);
}
static int tegra210_usb3_phy_power_on(struct phy *phy)
{
struct device *dev = &phy->dev;
struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
struct tegra_xusb_padctl *padctl = lane->pad->padctl;
struct tegra_xusb_usb3_port *usb3 = tegra210_lane_to_usb3_port(lane);
unsigned int index;
u32 value;
if (!usb3) {
dev_err(dev, "no USB3 port found for lane %u\n", lane->index);
return -ENODEV;
}
index = usb3->base.index;
value = padctl_readl(padctl, XUSB_PADCTL_SS_PORT_MAP);
if (!usb3->internal)
value &= ~XUSB_PADCTL_SS_PORT_MAP_PORTX_INTERNAL(index);
else
value |= XUSB_PADCTL_SS_PORT_MAP_PORTX_INTERNAL(index);
value &= ~XUSB_PADCTL_SS_PORT_MAP_PORTX_MAP_MASK(index);
value |= XUSB_PADCTL_SS_PORT_MAP_PORTX_MAP(index, usb3->port);
padctl_writel(padctl, value, XUSB_PADCTL_SS_PORT_MAP);
value = padctl_readl(padctl, XUSB_PADCTL_UPHY_USB3_PADX_ECTL1(index));
value &= ~(XUSB_PADCTL_UPHY_USB3_PAD_ECTL1_TX_TERM_CTRL_MASK <<
XUSB_PADCTL_UPHY_USB3_PAD_ECTL1_TX_TERM_CTRL_SHIFT);
value |= XUSB_PADCTL_UPHY_USB3_PAD_ECTL1_TX_TERM_CTRL_VAL <<
XUSB_PADCTL_UPHY_USB3_PAD_ECTL1_TX_TERM_CTRL_SHIFT;
padctl_writel(padctl, value, XUSB_PADCTL_UPHY_USB3_PADX_ECTL1(index));
value = padctl_readl(padctl, XUSB_PADCTL_UPHY_USB3_PADX_ECTL2(index));
value &= ~(XUSB_PADCTL_UPHY_USB3_PAD_ECTL2_RX_CTLE_MASK <<
XUSB_PADCTL_UPHY_USB3_PAD_ECTL2_RX_CTLE_SHIFT);
value |= XUSB_PADCTL_UPHY_USB3_PAD_ECTL2_RX_CTLE_VAL <<
XUSB_PADCTL_UPHY_USB3_PAD_ECTL2_RX_CTLE_SHIFT;
padctl_writel(padctl, value, XUSB_PADCTL_UPHY_USB3_PADX_ECTL2(index));
padctl_writel(padctl, XUSB_PADCTL_UPHY_USB3_PAD_ECTL3_RX_DFE_VAL,
XUSB_PADCTL_UPHY_USB3_PADX_ECTL3(index));
value = padctl_readl(padctl, XUSB_PADCTL_UPHY_USB3_PADX_ECTL4(index));
value &= ~(XUSB_PADCTL_UPHY_USB3_PAD_ECTL4_RX_CDR_CTRL_MASK <<
XUSB_PADCTL_UPHY_USB3_PAD_ECTL4_RX_CDR_CTRL_SHIFT);
value |= XUSB_PADCTL_UPHY_USB3_PAD_ECTL4_RX_CDR_CTRL_VAL <<
XUSB_PADCTL_UPHY_USB3_PAD_ECTL4_RX_CDR_CTRL_SHIFT;
padctl_writel(padctl, value, XUSB_PADCTL_UPHY_USB3_PADX_ECTL4(index));
padctl_writel(padctl, XUSB_PADCTL_UPHY_USB3_PAD_ECTL6_RX_EQ_CTRL_H_VAL,
XUSB_PADCTL_UPHY_USB3_PADX_ECTL6(index));
value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM1);
value &= ~XUSB_PADCTL_ELPG_PROGRAM1_SSPX_ELPG_VCORE_DOWN(index);
padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM1);
usleep_range(100, 200);
value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM1);
value &= ~XUSB_PADCTL_ELPG_PROGRAM1_SSPX_ELPG_CLAMP_EN_EARLY(index);
padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM1);
usleep_range(100, 200);
value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM1);
value &= ~XUSB_PADCTL_ELPG_PROGRAM1_SSPX_ELPG_CLAMP_EN(index);
padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM1);
return 0;
}
static int tegra210_usb3_phy_power_off(struct phy *phy)
{
struct device *dev = &phy->dev;
struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
struct tegra_xusb_padctl *padctl = lane->pad->padctl;
struct tegra_xusb_usb3_port *usb3 = tegra210_lane_to_usb3_port(lane);
unsigned int index;
u32 value;
if (!usb3) {
dev_err(dev, "no USB3 port found for lane %u\n", lane->index);
return -ENODEV;
}
index = usb3->base.index;
value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM1);
value |= XUSB_PADCTL_ELPG_PROGRAM1_SSPX_ELPG_CLAMP_EN_EARLY(index);
padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM1);
usleep_range(100, 200);
value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM1);
value |= XUSB_PADCTL_ELPG_PROGRAM1_SSPX_ELPG_CLAMP_EN(index);
padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM1);
usleep_range(250, 350);
value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM1);
value |= XUSB_PADCTL_ELPG_PROGRAM1_SSPX_ELPG_VCORE_DOWN(index);
padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM1);
return 0;
}
static struct tegra_xusb_lane *
tegra210_pcie_lane_probe(struct tegra_xusb_pad *pad, struct device_node *np,
unsigned int index)
{
struct tegra_xusb_pcie_lane *pcie;
int err;
pcie = kzalloc(sizeof(*pcie), GFP_KERNEL);
if (!pcie)
return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&pcie->base.list);
pcie->base.soc = &pad->soc->lanes[index];
pcie->base.index = index;
pcie->base.pad = pad;
pcie->base.np = np;
err = tegra_xusb_lane_parse_dt(&pcie->base, np);
if (err < 0) {
kfree(pcie);
return ERR_PTR(err);
}
return &pcie->base;
}
static void tegra210_pcie_lane_remove(struct tegra_xusb_lane *lane)
{
struct tegra_xusb_pcie_lane *pcie = to_pcie_lane(lane);
kfree(pcie);
}
static const struct tegra_xusb_lane_ops tegra210_pcie_lane_ops = {
.probe = tegra210_pcie_lane_probe,
.remove = tegra210_pcie_lane_remove,
.iddq_enable = tegra210_uphy_lane_iddq_enable,
.iddq_disable = tegra210_uphy_lane_iddq_disable,
.enable_phy_sleepwalk = tegra210_usb3_enable_phy_sleepwalk,
.disable_phy_sleepwalk = tegra210_usb3_disable_phy_sleepwalk,
.enable_phy_wake = tegra210_usb3_enable_phy_wake,
.disable_phy_wake = tegra210_usb3_disable_phy_wake,
.remote_wake_detected = tegra210_usb3_phy_remote_wake_detected,
};
static int tegra210_pcie_phy_init(struct phy *phy)
{
struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
struct tegra_xusb_padctl *padctl = lane->pad->padctl;
mutex_lock(&padctl->lock);
tegra210_uphy_init(padctl);
mutex_unlock(&padctl->lock);
return 0;
}
static int tegra210_pcie_phy_power_on(struct phy *phy)
{
struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
struct tegra_xusb_padctl *padctl = lane->pad->padctl;
int err = 0;
mutex_lock(&padctl->lock);
if (tegra_xusb_lane_check(lane, "usb3-ss"))
err = tegra210_usb3_phy_power_on(phy);
mutex_unlock(&padctl->lock);
return err;
}
static int tegra210_pcie_phy_power_off(struct phy *phy)
{
struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
struct tegra_xusb_padctl *padctl = lane->pad->padctl;
int err = 0;
mutex_lock(&padctl->lock);
if (tegra_xusb_lane_check(lane, "usb3-ss"))
err = tegra210_usb3_phy_power_off(phy);
mutex_unlock(&padctl->lock);
return err;
}
static const struct phy_ops tegra210_pcie_phy_ops = {
.init = tegra210_pcie_phy_init,
.power_on = tegra210_pcie_phy_power_on,
.power_off = tegra210_pcie_phy_power_off,
.owner = THIS_MODULE,
};
static struct tegra_xusb_pad *
tegra210_pcie_pad_probe(struct tegra_xusb_padctl *padctl,
const struct tegra_xusb_pad_soc *soc,
struct device_node *np)
{
struct tegra_xusb_pcie_pad *pcie;
struct tegra_xusb_pad *pad;
int err;
pcie = kzalloc(sizeof(*pcie), GFP_KERNEL);
if (!pcie)
return ERR_PTR(-ENOMEM);
pad = &pcie->base;
pad->ops = &tegra210_pcie_lane_ops;
pad->soc = soc;
err = tegra_xusb_pad_init(pad, padctl, np);
if (err < 0) {
kfree(pcie);
goto out;
}
pcie->pll = devm_clk_get(&pad->dev, "pll");
if (IS_ERR(pcie->pll)) {
err = PTR_ERR(pcie->pll);
dev_err(&pad->dev, "failed to get PLL: %d\n", err);
goto unregister;
}
pcie->rst = devm_reset_control_get(&pad->dev, "phy");
if (IS_ERR(pcie->rst)) {
err = PTR_ERR(pcie->rst);
dev_err(&pad->dev, "failed to get PCIe pad reset: %d\n", err);
goto unregister;
}
err = tegra_xusb_pad_register(pad, &tegra210_pcie_phy_ops);
if (err < 0)
goto unregister;
dev_set_drvdata(&pad->dev, pad);
return pad;
unregister:
device_unregister(&pad->dev);
out:
return ERR_PTR(err);
}
static void tegra210_pcie_pad_remove(struct tegra_xusb_pad *pad)
{
struct tegra_xusb_pcie_pad *pcie = to_pcie_pad(pad);
kfree(pcie);
}
static const struct tegra_xusb_pad_ops tegra210_pcie_ops = {
.probe = tegra210_pcie_pad_probe,
.remove = tegra210_pcie_pad_remove,
};
static const struct tegra_xusb_pad_soc tegra210_pcie_pad = {
.name = "pcie",
.num_lanes = ARRAY_SIZE(tegra210_pcie_lanes),
.lanes = tegra210_pcie_lanes,
.ops = &tegra210_pcie_ops,
};
static const struct tegra_xusb_lane_soc tegra210_sata_lanes[] = {
TEGRA210_UPHY_LANE("sata-0", 0x028, 30, 0x3, pcie, XUSB_PADCTL_UPHY_MISC_PAD_S0_CTL2),
};
static struct tegra_xusb_lane *
tegra210_sata_lane_probe(struct tegra_xusb_pad *pad, struct device_node *np,
unsigned int index)
{
struct tegra_xusb_sata_lane *sata;
int err;
sata = kzalloc(sizeof(*sata), GFP_KERNEL);
if (!sata)
return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&sata->base.list);
sata->base.soc = &pad->soc->lanes[index];
sata->base.index = index;
sata->base.pad = pad;
sata->base.np = np;
err = tegra_xusb_lane_parse_dt(&sata->base, np);
if (err < 0) {
kfree(sata);
return ERR_PTR(err);
}
return &sata->base;
}
static void tegra210_sata_lane_remove(struct tegra_xusb_lane *lane)
{
struct tegra_xusb_sata_lane *sata = to_sata_lane(lane);
kfree(sata);
}
static const struct tegra_xusb_lane_ops tegra210_sata_lane_ops = {
.probe = tegra210_sata_lane_probe,
.remove = tegra210_sata_lane_remove,
.iddq_enable = tegra210_uphy_lane_iddq_enable,
.iddq_disable = tegra210_uphy_lane_iddq_disable,
.enable_phy_sleepwalk = tegra210_usb3_enable_phy_sleepwalk,
.disable_phy_sleepwalk = tegra210_usb3_disable_phy_sleepwalk,
.enable_phy_wake = tegra210_usb3_enable_phy_wake,
.disable_phy_wake = tegra210_usb3_disable_phy_wake,
.remote_wake_detected = tegra210_usb3_phy_remote_wake_detected,
};
static int tegra210_sata_phy_init(struct phy *phy)
{
struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
struct tegra_xusb_padctl *padctl = lane->pad->padctl;
mutex_lock(&padctl->lock);
tegra210_uphy_init(padctl);
mutex_unlock(&padctl->lock);
return 0;
}
static int tegra210_sata_phy_power_on(struct phy *phy)
{
struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
struct tegra_xusb_padctl *padctl = lane->pad->padctl;
int err = 0;
mutex_lock(&padctl->lock);
if (tegra_xusb_lane_check(lane, "usb3-ss"))
err = tegra210_usb3_phy_power_on(phy);
mutex_unlock(&padctl->lock);
return err;
}
static int tegra210_sata_phy_power_off(struct phy *phy)
{
struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
struct tegra_xusb_padctl *padctl = lane->pad->padctl;
int err = 0;
mutex_lock(&padctl->lock);
if (tegra_xusb_lane_check(lane, "usb3-ss"))
err = tegra210_usb3_phy_power_off(phy);
mutex_unlock(&padctl->lock);
return err;
}
static const struct phy_ops tegra210_sata_phy_ops = {
.init = tegra210_sata_phy_init,
.power_on = tegra210_sata_phy_power_on,
.power_off = tegra210_sata_phy_power_off,
.owner = THIS_MODULE,
};
static struct tegra_xusb_pad *
tegra210_sata_pad_probe(struct tegra_xusb_padctl *padctl,
const struct tegra_xusb_pad_soc *soc,
struct device_node *np)
{
struct tegra_xusb_sata_pad *sata;
struct tegra_xusb_pad *pad;
int err;
sata = kzalloc(sizeof(*sata), GFP_KERNEL);
if (!sata)
return ERR_PTR(-ENOMEM);
pad = &sata->base;
pad->ops = &tegra210_sata_lane_ops;
pad->soc = soc;
err = tegra_xusb_pad_init(pad, padctl, np);
if (err < 0) {
kfree(sata);
goto out;
}
sata->rst = devm_reset_control_get(&pad->dev, "phy");
if (IS_ERR(sata->rst)) {
err = PTR_ERR(sata->rst);
dev_err(&pad->dev, "failed to get SATA pad reset: %d\n", err);
goto unregister;
}
err = tegra_xusb_pad_register(pad, &tegra210_sata_phy_ops);
if (err < 0)
goto unregister;
dev_set_drvdata(&pad->dev, pad);
return pad;
unregister:
device_unregister(&pad->dev);
out:
return ERR_PTR(err);
}
static void tegra210_sata_pad_remove(struct tegra_xusb_pad *pad)
{
struct tegra_xusb_sata_pad *sata = to_sata_pad(pad);
kfree(sata);
}
static const struct tegra_xusb_pad_ops tegra210_sata_ops = {
.probe = tegra210_sata_pad_probe,
.remove = tegra210_sata_pad_remove,
};
static const struct tegra_xusb_pad_soc tegra210_sata_pad = {
.name = "sata",
.num_lanes = ARRAY_SIZE(tegra210_sata_lanes),
.lanes = tegra210_sata_lanes,
.ops = &tegra210_sata_ops,
};
static const struct tegra_xusb_pad_soc * const tegra210_pads[] = {
&tegra210_usb2_pad,
&tegra210_hsic_pad,
&tegra210_pcie_pad,
&tegra210_sata_pad,
};
static int tegra210_usb2_port_enable(struct tegra_xusb_port *port)
{
return 0;
}
static void tegra210_usb2_port_disable(struct tegra_xusb_port *port)
{
}
static struct tegra_xusb_lane *
tegra210_usb2_port_map(struct tegra_xusb_port *port)
{
return tegra_xusb_find_lane(port->padctl, "usb2", port->index);
}
static const struct tegra_xusb_port_ops tegra210_usb2_port_ops = {
.release = tegra_xusb_usb2_port_release,
.remove = tegra_xusb_usb2_port_remove,
.enable = tegra210_usb2_port_enable,
.disable = tegra210_usb2_port_disable,
.map = tegra210_usb2_port_map,
};
static int tegra210_hsic_port_enable(struct tegra_xusb_port *port)
{
return 0;
}
static void tegra210_hsic_port_disable(struct tegra_xusb_port *port)
{
}
static struct tegra_xusb_lane *
tegra210_hsic_port_map(struct tegra_xusb_port *port)
{
return tegra_xusb_find_lane(port->padctl, "hsic", port->index);
}
static const struct tegra_xusb_port_ops tegra210_hsic_port_ops = {
.release = tegra_xusb_hsic_port_release,
.enable = tegra210_hsic_port_enable,
.disable = tegra210_hsic_port_disable,
.map = tegra210_hsic_port_map,
};
static int tegra210_usb3_port_enable(struct tegra_xusb_port *port)
{
return 0;
}
static void tegra210_usb3_port_disable(struct tegra_xusb_port *port)
{
}
static struct tegra_xusb_lane *
tegra210_usb3_port_map(struct tegra_xusb_port *port)
{
return tegra_xusb_port_find_lane(port, tegra210_usb3_map, "usb3-ss");
}
static const struct tegra_xusb_port_ops tegra210_usb3_port_ops = {
.release = tegra_xusb_usb3_port_release,
.enable = tegra210_usb3_port_enable,
.disable = tegra210_usb3_port_disable,
.map = tegra210_usb3_port_map,
};
static int tegra210_utmi_port_reset(struct phy *phy)
{
struct tegra_xusb_padctl *padctl;
struct tegra_xusb_lane *lane;
u32 value;
lane = phy_get_drvdata(phy);
padctl = lane->pad->padctl;
value = padctl_readl(padctl,
XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPADX_CTL0(lane->index));
if ((value & XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPAD_CTL0_ZIP) ||
(value & XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPAD_CTL0_ZIN)) {
tegra210_xusb_padctl_vbus_override(padctl, false);
tegra210_xusb_padctl_vbus_override(padctl, true);
return 1;
}
return 0;
}
static int
tegra210_xusb_read_fuse_calibration(struct tegra210_xusb_fuse_calibration *fuse)
{
unsigned int i;
u32 value;
int err;
err = tegra_fuse_readl(TEGRA_FUSE_SKU_CALIB_0, &value);
if (err < 0)
return err;
for (i = 0; i < ARRAY_SIZE(fuse->hs_curr_level); i++) {
fuse->hs_curr_level[i] =
(value >> FUSE_SKU_CALIB_HS_CURR_LEVEL_PADX_SHIFT(i)) &
FUSE_SKU_CALIB_HS_CURR_LEVEL_PAD_MASK;
}
fuse->hs_term_range_adj =
(value >> FUSE_SKU_CALIB_HS_TERM_RANGE_ADJ_SHIFT) &
FUSE_SKU_CALIB_HS_TERM_RANGE_ADJ_MASK;
err = tegra_fuse_readl(TEGRA_FUSE_USB_CALIB_EXT_0, &value);
if (err < 0)
return err;
fuse->rpd_ctrl =
(value >> FUSE_USB_CALIB_EXT_RPD_CTRL_SHIFT) &
FUSE_USB_CALIB_EXT_RPD_CTRL_MASK;
return 0;
}
static struct tegra_xusb_padctl *
tegra210_xusb_padctl_probe(struct device *dev,
const struct tegra_xusb_padctl_soc *soc)
{
struct tegra210_xusb_padctl *padctl;
struct platform_device *pdev;
struct device_node *np;
int err;
padctl = devm_kzalloc(dev, sizeof(*padctl), GFP_KERNEL);
if (!padctl)
return ERR_PTR(-ENOMEM);
padctl->base.dev = dev;
padctl->base.soc = soc;
err = tegra210_xusb_read_fuse_calibration(&padctl->fuse);
if (err < 0)
return ERR_PTR(err);
np = of_parse_phandle(dev->of_node, "nvidia,pmc", 0);
if (!np) {
dev_warn(dev, "nvidia,pmc property is missing\n");
goto out;
}
pdev = of_find_device_by_node(np);
if (!pdev) {
dev_warn(dev, "PMC device is not available\n");
goto out;
}
if (!platform_get_drvdata(pdev))
return ERR_PTR(-EPROBE_DEFER);
padctl->regmap = dev_get_regmap(&pdev->dev, "usb_sleepwalk");
if (!padctl->regmap)
dev_info(dev, "failed to find PMC regmap\n");
out:
return &padctl->base;
}
static void tegra210_xusb_padctl_remove(struct tegra_xusb_padctl *padctl)
{
}
static void tegra210_xusb_padctl_save(struct tegra_xusb_padctl *padctl)
{
struct tegra210_xusb_padctl *priv = to_tegra210_xusb_padctl(padctl);
priv->context.usb2_pad_mux =
padctl_readl(padctl, XUSB_PADCTL_USB2_PAD_MUX);
priv->context.usb2_port_cap =
padctl_readl(padctl, XUSB_PADCTL_USB2_PORT_CAP);
priv->context.ss_port_map =
padctl_readl(padctl, XUSB_PADCTL_SS_PORT_MAP);
priv->context.usb3_pad_mux =
padctl_readl(padctl, XUSB_PADCTL_USB3_PAD_MUX);
}
static void tegra210_xusb_padctl_restore(struct tegra_xusb_padctl *padctl)
{
struct tegra210_xusb_padctl *priv = to_tegra210_xusb_padctl(padctl);
struct tegra_xusb_lane *lane;
padctl_writel(padctl, priv->context.usb2_pad_mux,
XUSB_PADCTL_USB2_PAD_MUX);
padctl_writel(padctl, priv->context.usb2_port_cap,
XUSB_PADCTL_USB2_PORT_CAP);
padctl_writel(padctl, priv->context.ss_port_map,
XUSB_PADCTL_SS_PORT_MAP);
list_for_each_entry(lane, &padctl->lanes, list) {
if (lane->pad->ops->iddq_enable)
tegra210_uphy_lane_iddq_enable(lane);
}
padctl_writel(padctl, priv->context.usb3_pad_mux,
XUSB_PADCTL_USB3_PAD_MUX);
list_for_each_entry(lane, &padctl->lanes, list) {
if (lane->pad->ops->iddq_disable)
tegra210_uphy_lane_iddq_disable(lane);
}
}
static int tegra210_xusb_padctl_suspend_noirq(struct tegra_xusb_padctl *padctl)
{
mutex_lock(&padctl->lock);
tegra210_uphy_deinit(padctl);
tegra210_xusb_padctl_save(padctl);
mutex_unlock(&padctl->lock);
return 0;
}
static int tegra210_xusb_padctl_resume_noirq(struct tegra_xusb_padctl *padctl)
{
mutex_lock(&padctl->lock);
tegra210_xusb_padctl_restore(padctl);
tegra210_uphy_init(padctl);
mutex_unlock(&padctl->lock);
return 0;
}
static const struct tegra_xusb_padctl_ops tegra210_xusb_padctl_ops = {
.probe = tegra210_xusb_padctl_probe,
.remove = tegra210_xusb_padctl_remove,
.suspend_noirq = tegra210_xusb_padctl_suspend_noirq,
.resume_noirq = tegra210_xusb_padctl_resume_noirq,
.usb3_set_lfps_detect = tegra210_usb3_set_lfps_detect,
.hsic_set_idle = tegra210_hsic_set_idle,
.vbus_override = tegra210_xusb_padctl_vbus_override,
.utmi_port_reset = tegra210_utmi_port_reset,
};
static const char * const tegra210_xusb_padctl_supply_names[] = {
"avdd-pll-utmip",
"avdd-pll-uerefe",
"dvdd-pex-pll",
"hvdd-pex-pll-e",
};
const struct tegra_xusb_padctl_soc tegra210_xusb_padctl_soc = {
.num_pads = ARRAY_SIZE(tegra210_pads),
.pads = tegra210_pads,
.ports = {
.usb2 = {
.ops = &tegra210_usb2_port_ops,
.count = 4,
},
.hsic = {
.ops = &tegra210_hsic_port_ops,
.count = 1,
},
.usb3 = {
.ops = &tegra210_usb3_port_ops,
.count = 4,
},
},
.ops = &tegra210_xusb_padctl_ops,
.supply_names = tegra210_xusb_padctl_supply_names,
.num_supplies = ARRAY_SIZE(tegra210_xusb_padctl_supply_names),
.need_fake_usb3_port = true,
};
EXPORT_SYMBOL_GPL(tegra210_xusb_padctl_soc);
MODULE_AUTHOR("Andrew Bresticker <[email protected]>");
MODULE_DESCRIPTION("NVIDIA Tegra 210 XUSB Pad Controller driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/phy/tegra/xusb-tegra210.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2014-2022, NVIDIA CORPORATION. All rights reserved.
*/
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/mailbox_client.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/phy/phy.h>
#include <linux/phy/tegra/xusb.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <linux/reset.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
#include <soc/tegra/fuse.h>
#include "xusb.h"
static struct phy *tegra_xusb_pad_of_xlate(struct device *dev,
struct of_phandle_args *args)
{
struct tegra_xusb_pad *pad = dev_get_drvdata(dev);
struct phy *phy = NULL;
unsigned int i;
if (args->args_count != 0)
return ERR_PTR(-EINVAL);
for (i = 0; i < pad->soc->num_lanes; i++) {
if (!pad->lanes[i])
continue;
if (pad->lanes[i]->dev.of_node == args->np) {
phy = pad->lanes[i];
break;
}
}
if (phy == NULL)
phy = ERR_PTR(-ENODEV);
return phy;
}
static const struct of_device_id tegra_xusb_padctl_of_match[] = {
#if defined(CONFIG_ARCH_TEGRA_124_SOC) || defined(CONFIG_ARCH_TEGRA_132_SOC)
{
.compatible = "nvidia,tegra124-xusb-padctl",
.data = &tegra124_xusb_padctl_soc,
},
#endif
#if defined(CONFIG_ARCH_TEGRA_210_SOC)
{
.compatible = "nvidia,tegra210-xusb-padctl",
.data = &tegra210_xusb_padctl_soc,
},
#endif
#if defined(CONFIG_ARCH_TEGRA_186_SOC)
{
.compatible = "nvidia,tegra186-xusb-padctl",
.data = &tegra186_xusb_padctl_soc,
},
#endif
#if defined(CONFIG_ARCH_TEGRA_194_SOC)
{
.compatible = "nvidia,tegra194-xusb-padctl",
.data = &tegra194_xusb_padctl_soc,
},
#endif
#if defined(CONFIG_ARCH_TEGRA_234_SOC)
{
.compatible = "nvidia,tegra234-xusb-padctl",
.data = &tegra234_xusb_padctl_soc,
},
#endif
{ }
};
MODULE_DEVICE_TABLE(of, tegra_xusb_padctl_of_match);
static struct device_node *
tegra_xusb_find_pad_node(struct tegra_xusb_padctl *padctl, const char *name)
{
struct device_node *pads, *np;
pads = of_get_child_by_name(padctl->dev->of_node, "pads");
if (!pads)
return NULL;
np = of_get_child_by_name(pads, name);
of_node_put(pads);
return np;
}
static struct device_node *
tegra_xusb_pad_find_phy_node(struct tegra_xusb_pad *pad, unsigned int index)
{
struct device_node *np, *lanes;
lanes = of_get_child_by_name(pad->dev.of_node, "lanes");
if (!lanes)
return NULL;
np = of_get_child_by_name(lanes, pad->soc->lanes[index].name);
of_node_put(lanes);
return np;
}
int tegra_xusb_lane_parse_dt(struct tegra_xusb_lane *lane,
struct device_node *np)
{
struct device *dev = &lane->pad->dev;
const char *function;
int err;
err = of_property_read_string(np, "nvidia,function", &function);
if (err < 0)
return err;
err = match_string(lane->soc->funcs, lane->soc->num_funcs, function);
if (err < 0) {
dev_err(dev, "invalid function \"%s\" for lane \"%pOFn\"\n",
function, np);
return err;
}
lane->function = err;
return 0;
}
static void tegra_xusb_lane_destroy(struct phy *phy)
{
if (phy) {
struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
lane->pad->ops->remove(lane);
phy_destroy(phy);
}
}
static void tegra_xusb_pad_release(struct device *dev)
{
struct tegra_xusb_pad *pad = to_tegra_xusb_pad(dev);
pad->soc->ops->remove(pad);
}
static const struct device_type tegra_xusb_pad_type = {
.release = tegra_xusb_pad_release,
};
int tegra_xusb_pad_init(struct tegra_xusb_pad *pad,
struct tegra_xusb_padctl *padctl,
struct device_node *np)
{
int err;
device_initialize(&pad->dev);
INIT_LIST_HEAD(&pad->list);
pad->dev.parent = padctl->dev;
pad->dev.type = &tegra_xusb_pad_type;
pad->dev.of_node = np;
pad->padctl = padctl;
err = dev_set_name(&pad->dev, "%s", pad->soc->name);
if (err < 0)
goto unregister;
err = device_add(&pad->dev);
if (err < 0)
goto unregister;
return 0;
unregister:
device_unregister(&pad->dev);
return err;
}
int tegra_xusb_pad_register(struct tegra_xusb_pad *pad,
const struct phy_ops *ops)
{
struct device_node *children;
struct phy *lane;
unsigned int i;
int err;
children = of_get_child_by_name(pad->dev.of_node, "lanes");
if (!children)
return -ENODEV;
pad->lanes = devm_kcalloc(&pad->dev, pad->soc->num_lanes, sizeof(lane),
GFP_KERNEL);
if (!pad->lanes) {
of_node_put(children);
return -ENOMEM;
}
for (i = 0; i < pad->soc->num_lanes; i++) {
struct device_node *np = tegra_xusb_pad_find_phy_node(pad, i);
struct tegra_xusb_lane *lane;
/* skip disabled lanes */
if (!np || !of_device_is_available(np)) {
of_node_put(np);
continue;
}
pad->lanes[i] = phy_create(&pad->dev, np, ops);
if (IS_ERR(pad->lanes[i])) {
err = PTR_ERR(pad->lanes[i]);
of_node_put(np);
goto remove;
}
lane = pad->ops->probe(pad, np, i);
if (IS_ERR(lane)) {
phy_destroy(pad->lanes[i]);
err = PTR_ERR(lane);
goto remove;
}
list_add_tail(&lane->list, &pad->padctl->lanes);
phy_set_drvdata(pad->lanes[i], lane);
}
pad->provider = of_phy_provider_register_full(&pad->dev, children,
tegra_xusb_pad_of_xlate);
if (IS_ERR(pad->provider)) {
err = PTR_ERR(pad->provider);
goto remove;
}
return 0;
remove:
while (i--)
tegra_xusb_lane_destroy(pad->lanes[i]);
of_node_put(children);
return err;
}
void tegra_xusb_pad_unregister(struct tegra_xusb_pad *pad)
{
unsigned int i = pad->soc->num_lanes;
of_phy_provider_unregister(pad->provider);
while (i--)
tegra_xusb_lane_destroy(pad->lanes[i]);
device_unregister(&pad->dev);
}
static struct tegra_xusb_pad *
tegra_xusb_pad_create(struct tegra_xusb_padctl *padctl,
const struct tegra_xusb_pad_soc *soc)
{
struct tegra_xusb_pad *pad;
struct device_node *np;
int err;
np = tegra_xusb_find_pad_node(padctl, soc->name);
if (!np || !of_device_is_available(np))
return NULL;
pad = soc->ops->probe(padctl, soc, np);
if (IS_ERR(pad)) {
err = PTR_ERR(pad);
dev_err(padctl->dev, "failed to create pad %s: %d\n",
soc->name, err);
return ERR_PTR(err);
}
/* XXX move this into ->probe() to avoid string comparison */
if (strcmp(soc->name, "pcie") == 0)
padctl->pcie = pad;
if (strcmp(soc->name, "sata") == 0)
padctl->sata = pad;
if (strcmp(soc->name, "usb2") == 0)
padctl->usb2 = pad;
if (strcmp(soc->name, "ulpi") == 0)
padctl->ulpi = pad;
if (strcmp(soc->name, "hsic") == 0)
padctl->hsic = pad;
return pad;
}
static void __tegra_xusb_remove_pads(struct tegra_xusb_padctl *padctl)
{
struct tegra_xusb_pad *pad, *tmp;
list_for_each_entry_safe_reverse(pad, tmp, &padctl->pads, list) {
list_del(&pad->list);
tegra_xusb_pad_unregister(pad);
}
}
static void tegra_xusb_remove_pads(struct tegra_xusb_padctl *padctl)
{
mutex_lock(&padctl->lock);
__tegra_xusb_remove_pads(padctl);
mutex_unlock(&padctl->lock);
}
static void tegra_xusb_lane_program(struct tegra_xusb_lane *lane)
{
struct tegra_xusb_padctl *padctl = lane->pad->padctl;
const struct tegra_xusb_lane_soc *soc = lane->soc;
u32 value;
/* skip single function lanes */
if (soc->num_funcs < 2)
return;
if (lane->pad->ops->iddq_enable)
lane->pad->ops->iddq_enable(lane);
/* choose function */
value = padctl_readl(padctl, soc->offset);
value &= ~(soc->mask << soc->shift);
value |= lane->function << soc->shift;
padctl_writel(padctl, value, soc->offset);
if (lane->pad->ops->iddq_disable)
lane->pad->ops->iddq_disable(lane);
}
static void tegra_xusb_pad_program(struct tegra_xusb_pad *pad)
{
unsigned int i;
for (i = 0; i < pad->soc->num_lanes; i++) {
struct tegra_xusb_lane *lane;
if (pad->lanes[i]) {
lane = phy_get_drvdata(pad->lanes[i]);
tegra_xusb_lane_program(lane);
}
}
}
static int tegra_xusb_setup_pads(struct tegra_xusb_padctl *padctl)
{
struct tegra_xusb_pad *pad;
unsigned int i;
mutex_lock(&padctl->lock);
for (i = 0; i < padctl->soc->num_pads; i++) {
const struct tegra_xusb_pad_soc *soc = padctl->soc->pads[i];
int err;
pad = tegra_xusb_pad_create(padctl, soc);
if (IS_ERR(pad)) {
err = PTR_ERR(pad);
dev_err(padctl->dev, "failed to create pad %s: %d\n",
soc->name, err);
__tegra_xusb_remove_pads(padctl);
mutex_unlock(&padctl->lock);
return err;
}
if (!pad)
continue;
list_add_tail(&pad->list, &padctl->pads);
}
list_for_each_entry(pad, &padctl->pads, list)
tegra_xusb_pad_program(pad);
mutex_unlock(&padctl->lock);
return 0;
}
bool tegra_xusb_lane_check(struct tegra_xusb_lane *lane,
const char *function)
{
const char *func = lane->soc->funcs[lane->function];
return strcmp(function, func) == 0;
}
struct tegra_xusb_lane *tegra_xusb_find_lane(struct tegra_xusb_padctl *padctl,
const char *type,
unsigned int index)
{
struct tegra_xusb_lane *lane, *hit = ERR_PTR(-ENODEV);
char *name;
name = kasprintf(GFP_KERNEL, "%s-%u", type, index);
if (!name)
return ERR_PTR(-ENOMEM);
list_for_each_entry(lane, &padctl->lanes, list) {
if (strcmp(lane->soc->name, name) == 0) {
hit = lane;
break;
}
}
kfree(name);
return hit;
}
struct tegra_xusb_lane *
tegra_xusb_port_find_lane(struct tegra_xusb_port *port,
const struct tegra_xusb_lane_map *map,
const char *function)
{
struct tegra_xusb_lane *lane, *match = ERR_PTR(-ENODEV);
for (; map->type; map++) {
if (port->index != map->port)
continue;
lane = tegra_xusb_find_lane(port->padctl, map->type,
map->index);
if (IS_ERR(lane))
continue;
if (!tegra_xusb_lane_check(lane, function))
continue;
if (!IS_ERR(match))
dev_err(&port->dev, "conflicting match: %s-%u / %s\n",
map->type, map->index, match->soc->name);
else
match = lane;
}
return match;
}
static struct device_node *
tegra_xusb_find_port_node(struct tegra_xusb_padctl *padctl, const char *type,
unsigned int index)
{
struct device_node *ports, *np;
char *name;
ports = of_get_child_by_name(padctl->dev->of_node, "ports");
if (!ports)
return NULL;
name = kasprintf(GFP_KERNEL, "%s-%u", type, index);
if (!name) {
of_node_put(ports);
return NULL;
}
np = of_get_child_by_name(ports, name);
kfree(name);
of_node_put(ports);
return np;
}
struct tegra_xusb_port *
tegra_xusb_find_port(struct tegra_xusb_padctl *padctl, const char *type,
unsigned int index)
{
struct tegra_xusb_port *port;
struct device_node *np;
np = tegra_xusb_find_port_node(padctl, type, index);
if (!np)
return NULL;
list_for_each_entry(port, &padctl->ports, list) {
if (np == port->dev.of_node) {
of_node_put(np);
return port;
}
}
of_node_put(np);
return NULL;
}
struct tegra_xusb_usb2_port *
tegra_xusb_find_usb2_port(struct tegra_xusb_padctl *padctl, unsigned int index)
{
struct tegra_xusb_port *port;
port = tegra_xusb_find_port(padctl, "usb2", index);
if (port)
return to_usb2_port(port);
return NULL;
}
struct tegra_xusb_usb3_port *
tegra_xusb_find_usb3_port(struct tegra_xusb_padctl *padctl, unsigned int index)
{
struct tegra_xusb_port *port;
port = tegra_xusb_find_port(padctl, "usb3", index);
if (port)
return to_usb3_port(port);
return NULL;
}
static void tegra_xusb_port_release(struct device *dev)
{
struct tegra_xusb_port *port = to_tegra_xusb_port(dev);
if (port->ops->release)
port->ops->release(port);
}
static const struct device_type tegra_xusb_port_type = {
.release = tegra_xusb_port_release,
};
static int tegra_xusb_port_init(struct tegra_xusb_port *port,
struct tegra_xusb_padctl *padctl,
struct device_node *np,
const char *name,
unsigned int index)
{
int err;
INIT_LIST_HEAD(&port->list);
port->padctl = padctl;
port->index = index;
device_initialize(&port->dev);
port->dev.type = &tegra_xusb_port_type;
port->dev.of_node = of_node_get(np);
port->dev.parent = padctl->dev;
err = dev_set_name(&port->dev, "%s-%u", name, index);
if (err < 0)
goto unregister;
err = device_add(&port->dev);
if (err < 0)
goto unregister;
return 0;
unregister:
device_unregister(&port->dev);
return err;
}
static void tegra_xusb_port_unregister(struct tegra_xusb_port *port)
{
if (!IS_ERR_OR_NULL(port->usb_role_sw)) {
of_platform_depopulate(&port->dev);
usb_role_switch_unregister(port->usb_role_sw);
cancel_work_sync(&port->usb_phy_work);
usb_remove_phy(&port->usb_phy);
port->usb_phy.dev->driver = NULL;
}
if (port->ops->remove)
port->ops->remove(port);
device_unregister(&port->dev);
}
static const char *const modes[] = {
[USB_DR_MODE_UNKNOWN] = "",
[USB_DR_MODE_HOST] = "host",
[USB_DR_MODE_PERIPHERAL] = "peripheral",
[USB_DR_MODE_OTG] = "otg",
};
static const char * const usb_roles[] = {
[USB_ROLE_NONE] = "none",
[USB_ROLE_HOST] = "host",
[USB_ROLE_DEVICE] = "device",
};
static enum usb_phy_events to_usb_phy_event(enum usb_role role)
{
switch (role) {
case USB_ROLE_DEVICE:
return USB_EVENT_VBUS;
case USB_ROLE_HOST:
return USB_EVENT_ID;
default:
return USB_EVENT_NONE;
}
}
static void tegra_xusb_usb_phy_work(struct work_struct *work)
{
struct tegra_xusb_port *port = container_of(work,
struct tegra_xusb_port,
usb_phy_work);
enum usb_role role = usb_role_switch_get_role(port->usb_role_sw);
usb_phy_set_event(&port->usb_phy, to_usb_phy_event(role));
dev_dbg(&port->dev, "%s(): calling notifier for role %s\n", __func__,
usb_roles[role]);
atomic_notifier_call_chain(&port->usb_phy.notifier, 0, &port->usb_phy);
}
static int tegra_xusb_role_sw_set(struct usb_role_switch *sw,
enum usb_role role)
{
struct tegra_xusb_port *port = usb_role_switch_get_drvdata(sw);
dev_dbg(&port->dev, "%s(): role %s\n", __func__, usb_roles[role]);
schedule_work(&port->usb_phy_work);
return 0;
}
static int tegra_xusb_set_peripheral(struct usb_otg *otg,
struct usb_gadget *gadget)
{
struct tegra_xusb_port *port = container_of(otg->usb_phy,
struct tegra_xusb_port,
usb_phy);
if (gadget != NULL)
schedule_work(&port->usb_phy_work);
return 0;
}
static int tegra_xusb_set_host(struct usb_otg *otg, struct usb_bus *host)
{
struct tegra_xusb_port *port = container_of(otg->usb_phy,
struct tegra_xusb_port,
usb_phy);
if (host != NULL)
schedule_work(&port->usb_phy_work);
return 0;
}
static int tegra_xusb_setup_usb_role_switch(struct tegra_xusb_port *port)
{
struct tegra_xusb_lane *lane;
struct usb_role_switch_desc role_sx_desc = {
.fwnode = dev_fwnode(&port->dev),
.set = tegra_xusb_role_sw_set,
.allow_userspace_control = true,
};
int err = 0;
/*
* USB role switch driver needs parent driver owner info. This is a
* suboptimal solution. TODO: Need to revisit this in a follow-up patch
* where an optimal solution is possible with changes to USB role
* switch driver.
*/
port->dev.driver = devm_kzalloc(&port->dev,
sizeof(struct device_driver),
GFP_KERNEL);
if (!port->dev.driver)
return -ENOMEM;
port->dev.driver->owner = THIS_MODULE;
port->usb_role_sw = usb_role_switch_register(&port->dev,
&role_sx_desc);
if (IS_ERR(port->usb_role_sw)) {
err = PTR_ERR(port->usb_role_sw);
dev_err(&port->dev, "failed to register USB role switch: %d",
err);
return err;
}
INIT_WORK(&port->usb_phy_work, tegra_xusb_usb_phy_work);
usb_role_switch_set_drvdata(port->usb_role_sw, port);
port->usb_phy.otg = devm_kzalloc(&port->dev, sizeof(struct usb_otg),
GFP_KERNEL);
if (!port->usb_phy.otg)
return -ENOMEM;
lane = tegra_xusb_find_lane(port->padctl, "usb2", port->index);
/*
* Assign phy dev to usb-phy dev. Host/device drivers can use phy
* reference to retrieve usb-phy details.
*/
port->usb_phy.dev = &lane->pad->lanes[port->index]->dev;
port->usb_phy.dev->driver = port->dev.driver;
port->usb_phy.otg->usb_phy = &port->usb_phy;
port->usb_phy.otg->set_peripheral = tegra_xusb_set_peripheral;
port->usb_phy.otg->set_host = tegra_xusb_set_host;
err = usb_add_phy_dev(&port->usb_phy);
if (err < 0) {
dev_err(&port->dev, "Failed to add USB PHY: %d\n", err);
return err;
}
/* populate connector entry */
of_platform_populate(port->dev.of_node, NULL, NULL, &port->dev);
return err;
}
static void tegra_xusb_parse_usb_role_default_mode(struct tegra_xusb_port *port)
{
enum usb_role role = USB_ROLE_NONE;
enum usb_dr_mode mode = usb_get_role_switch_default_mode(&port->dev);
if (mode == USB_DR_MODE_HOST)
role = USB_ROLE_HOST;
else if (mode == USB_DR_MODE_PERIPHERAL)
role = USB_ROLE_DEVICE;
if (role != USB_ROLE_NONE) {
usb_role_switch_set_role(port->usb_role_sw, role);
dev_dbg(&port->dev, "usb role default mode is %s", modes[mode]);
}
}
static int tegra_xusb_usb2_port_parse_dt(struct tegra_xusb_usb2_port *usb2)
{
struct tegra_xusb_port *port = &usb2->base;
struct device_node *np = port->dev.of_node;
const char *mode;
int err;
usb2->internal = of_property_read_bool(np, "nvidia,internal");
if (!of_property_read_string(np, "mode", &mode)) {
int err = match_string(modes, ARRAY_SIZE(modes), mode);
if (err < 0) {
dev_err(&port->dev, "invalid value %s for \"mode\"\n",
mode);
usb2->mode = USB_DR_MODE_UNKNOWN;
} else {
usb2->mode = err;
}
} else {
usb2->mode = USB_DR_MODE_HOST;
}
/* usb-role-switch property is mandatory for OTG/Peripheral modes */
if (usb2->mode == USB_DR_MODE_PERIPHERAL ||
usb2->mode == USB_DR_MODE_OTG) {
if (of_property_read_bool(np, "usb-role-switch")) {
err = tegra_xusb_setup_usb_role_switch(port);
if (err < 0)
return err;
tegra_xusb_parse_usb_role_default_mode(port);
} else {
dev_err(&port->dev, "usb-role-switch not found for %s mode",
modes[usb2->mode]);
return -EINVAL;
}
}
usb2->supply = regulator_get(&port->dev, "vbus");
return PTR_ERR_OR_ZERO(usb2->supply);
}
static int tegra_xusb_add_usb2_port(struct tegra_xusb_padctl *padctl,
unsigned int index)
{
struct tegra_xusb_usb2_port *usb2;
struct device_node *np;
int err = 0;
/*
* USB2 ports don't require additional properties, but if the port is
* marked as disabled there is no reason to register it.
*/
np = tegra_xusb_find_port_node(padctl, "usb2", index);
if (!np || !of_device_is_available(np))
goto out;
usb2 = kzalloc(sizeof(*usb2), GFP_KERNEL);
if (!usb2) {
err = -ENOMEM;
goto out;
}
err = tegra_xusb_port_init(&usb2->base, padctl, np, "usb2", index);
if (err < 0)
goto out;
usb2->base.ops = padctl->soc->ports.usb2.ops;
usb2->base.lane = usb2->base.ops->map(&usb2->base);
if (IS_ERR(usb2->base.lane)) {
err = PTR_ERR(usb2->base.lane);
tegra_xusb_port_unregister(&usb2->base);
goto out;
}
err = tegra_xusb_usb2_port_parse_dt(usb2);
if (err < 0) {
tegra_xusb_port_unregister(&usb2->base);
goto out;
}
list_add_tail(&usb2->base.list, &padctl->ports);
out:
of_node_put(np);
return err;
}
void tegra_xusb_usb2_port_release(struct tegra_xusb_port *port)
{
struct tegra_xusb_usb2_port *usb2 = to_usb2_port(port);
kfree(usb2);
}
void tegra_xusb_usb2_port_remove(struct tegra_xusb_port *port)
{
struct tegra_xusb_usb2_port *usb2 = to_usb2_port(port);
regulator_put(usb2->supply);
}
static int tegra_xusb_ulpi_port_parse_dt(struct tegra_xusb_ulpi_port *ulpi)
{
struct tegra_xusb_port *port = &ulpi->base;
struct device_node *np = port->dev.of_node;
ulpi->internal = of_property_read_bool(np, "nvidia,internal");
return 0;
}
static int tegra_xusb_add_ulpi_port(struct tegra_xusb_padctl *padctl,
unsigned int index)
{
struct tegra_xusb_ulpi_port *ulpi;
struct device_node *np;
int err = 0;
np = tegra_xusb_find_port_node(padctl, "ulpi", index);
if (!np || !of_device_is_available(np))
goto out;
ulpi = kzalloc(sizeof(*ulpi), GFP_KERNEL);
if (!ulpi) {
err = -ENOMEM;
goto out;
}
err = tegra_xusb_port_init(&ulpi->base, padctl, np, "ulpi", index);
if (err < 0)
goto out;
ulpi->base.ops = padctl->soc->ports.ulpi.ops;
ulpi->base.lane = ulpi->base.ops->map(&ulpi->base);
if (IS_ERR(ulpi->base.lane)) {
err = PTR_ERR(ulpi->base.lane);
tegra_xusb_port_unregister(&ulpi->base);
goto out;
}
err = tegra_xusb_ulpi_port_parse_dt(ulpi);
if (err < 0) {
tegra_xusb_port_unregister(&ulpi->base);
goto out;
}
list_add_tail(&ulpi->base.list, &padctl->ports);
out:
of_node_put(np);
return err;
}
void tegra_xusb_ulpi_port_release(struct tegra_xusb_port *port)
{
struct tegra_xusb_ulpi_port *ulpi = to_ulpi_port(port);
kfree(ulpi);
}
static int tegra_xusb_hsic_port_parse_dt(struct tegra_xusb_hsic_port *hsic)
{
/* XXX */
return 0;
}
static int tegra_xusb_add_hsic_port(struct tegra_xusb_padctl *padctl,
unsigned int index)
{
struct tegra_xusb_hsic_port *hsic;
struct device_node *np;
int err = 0;
np = tegra_xusb_find_port_node(padctl, "hsic", index);
if (!np || !of_device_is_available(np))
goto out;
hsic = kzalloc(sizeof(*hsic), GFP_KERNEL);
if (!hsic) {
err = -ENOMEM;
goto out;
}
err = tegra_xusb_port_init(&hsic->base, padctl, np, "hsic", index);
if (err < 0)
goto out;
hsic->base.ops = padctl->soc->ports.hsic.ops;
hsic->base.lane = hsic->base.ops->map(&hsic->base);
if (IS_ERR(hsic->base.lane)) {
err = PTR_ERR(hsic->base.lane);
goto out;
}
err = tegra_xusb_hsic_port_parse_dt(hsic);
if (err < 0) {
tegra_xusb_port_unregister(&hsic->base);
goto out;
}
list_add_tail(&hsic->base.list, &padctl->ports);
out:
of_node_put(np);
return err;
}
void tegra_xusb_hsic_port_release(struct tegra_xusb_port *port)
{
struct tegra_xusb_hsic_port *hsic = to_hsic_port(port);
kfree(hsic);
}
static int tegra_xusb_usb3_port_parse_dt(struct tegra_xusb_usb3_port *usb3)
{
struct tegra_xusb_port *port = &usb3->base;
struct device_node *np = port->dev.of_node;
enum usb_device_speed maximum_speed;
u32 value;
int err;
err = of_property_read_u32(np, "nvidia,usb2-companion", &value);
if (err < 0) {
dev_err(&port->dev, "failed to read port: %d\n", err);
return err;
}
usb3->port = value;
usb3->internal = of_property_read_bool(np, "nvidia,internal");
if (device_property_present(&port->dev, "maximum-speed")) {
maximum_speed = usb_get_maximum_speed(&port->dev);
if (maximum_speed == USB_SPEED_SUPER)
usb3->disable_gen2 = true;
else if (maximum_speed == USB_SPEED_SUPER_PLUS)
usb3->disable_gen2 = false;
else
return -EINVAL;
}
return 0;
}
static int tegra_xusb_add_usb3_port(struct tegra_xusb_padctl *padctl,
unsigned int index)
{
struct tegra_xusb_usb3_port *usb3;
struct device_node *np;
int err = 0;
/*
* If there is no supplemental configuration in the device tree the
* port is unusable. But it is valid to configure only a single port,
* hence return 0 instead of an error to allow ports to be optional.
*/
np = tegra_xusb_find_port_node(padctl, "usb3", index);
if (!np || !of_device_is_available(np))
goto out;
usb3 = kzalloc(sizeof(*usb3), GFP_KERNEL);
if (!usb3) {
err = -ENOMEM;
goto out;
}
err = tegra_xusb_port_init(&usb3->base, padctl, np, "usb3", index);
if (err < 0)
goto out;
usb3->base.ops = padctl->soc->ports.usb3.ops;
usb3->base.lane = usb3->base.ops->map(&usb3->base);
if (IS_ERR(usb3->base.lane)) {
err = PTR_ERR(usb3->base.lane);
goto out;
}
err = tegra_xusb_usb3_port_parse_dt(usb3);
if (err < 0) {
tegra_xusb_port_unregister(&usb3->base);
goto out;
}
list_add_tail(&usb3->base.list, &padctl->ports);
out:
of_node_put(np);
return err;
}
void tegra_xusb_usb3_port_release(struct tegra_xusb_port *port)
{
struct tegra_xusb_usb3_port *usb3 = to_usb3_port(port);
kfree(usb3);
}
static void __tegra_xusb_remove_ports(struct tegra_xusb_padctl *padctl)
{
struct tegra_xusb_port *port, *tmp;
list_for_each_entry_safe_reverse(port, tmp, &padctl->ports, list) {
list_del(&port->list);
tegra_xusb_port_unregister(port);
}
}
static int tegra_xusb_find_unused_usb3_port(struct tegra_xusb_padctl *padctl)
{
struct device_node *np;
unsigned int i;
for (i = 0; i < padctl->soc->ports.usb3.count; i++) {
np = tegra_xusb_find_port_node(padctl, "usb3", i);
if (!np || !of_device_is_available(np))
return i;
}
return -ENODEV;
}
static bool tegra_xusb_port_is_companion(struct tegra_xusb_usb2_port *usb2)
{
unsigned int i;
struct tegra_xusb_usb3_port *usb3;
struct tegra_xusb_padctl *padctl = usb2->base.padctl;
for (i = 0; i < padctl->soc->ports.usb3.count; i++) {
usb3 = tegra_xusb_find_usb3_port(padctl, i);
if (usb3 && usb3->port == usb2->base.index)
return true;
}
return false;
}
static int tegra_xusb_update_usb3_fake_port(struct tegra_xusb_usb2_port *usb2)
{
int fake;
/* Disable usb3_port_fake usage by default and assign if needed */
usb2->usb3_port_fake = -1;
if ((usb2->mode == USB_DR_MODE_OTG ||
usb2->mode == USB_DR_MODE_PERIPHERAL) &&
!tegra_xusb_port_is_companion(usb2)) {
fake = tegra_xusb_find_unused_usb3_port(usb2->base.padctl);
if (fake < 0) {
dev_err(&usb2->base.dev, "no unused USB3 ports available\n");
return -ENODEV;
}
dev_dbg(&usb2->base.dev, "Found unused usb3 port: %d\n", fake);
usb2->usb3_port_fake = fake;
}
return 0;
}
static int tegra_xusb_setup_ports(struct tegra_xusb_padctl *padctl)
{
struct tegra_xusb_port *port;
struct tegra_xusb_usb2_port *usb2;
unsigned int i;
int err = 0;
mutex_lock(&padctl->lock);
for (i = 0; i < padctl->soc->ports.usb2.count; i++) {
err = tegra_xusb_add_usb2_port(padctl, i);
if (err < 0)
goto remove_ports;
}
for (i = 0; i < padctl->soc->ports.ulpi.count; i++) {
err = tegra_xusb_add_ulpi_port(padctl, i);
if (err < 0)
goto remove_ports;
}
for (i = 0; i < padctl->soc->ports.hsic.count; i++) {
err = tegra_xusb_add_hsic_port(padctl, i);
if (err < 0)
goto remove_ports;
}
for (i = 0; i < padctl->soc->ports.usb3.count; i++) {
err = tegra_xusb_add_usb3_port(padctl, i);
if (err < 0)
goto remove_ports;
}
if (padctl->soc->need_fake_usb3_port) {
for (i = 0; i < padctl->soc->ports.usb2.count; i++) {
usb2 = tegra_xusb_find_usb2_port(padctl, i);
if (!usb2)
continue;
err = tegra_xusb_update_usb3_fake_port(usb2);
if (err < 0)
goto remove_ports;
}
}
list_for_each_entry(port, &padctl->ports, list) {
err = port->ops->enable(port);
if (err < 0)
dev_err(padctl->dev, "failed to enable port %s: %d\n",
dev_name(&port->dev), err);
}
goto unlock;
remove_ports:
__tegra_xusb_remove_ports(padctl);
unlock:
mutex_unlock(&padctl->lock);
return err;
}
static void tegra_xusb_remove_ports(struct tegra_xusb_padctl *padctl)
{
mutex_lock(&padctl->lock);
__tegra_xusb_remove_ports(padctl);
mutex_unlock(&padctl->lock);
}
static int tegra_xusb_padctl_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
const struct tegra_xusb_padctl_soc *soc;
struct tegra_xusb_padctl *padctl;
const struct of_device_id *match;
int err;
/* for backwards compatibility with old device trees */
np = of_get_child_by_name(np, "pads");
if (!np) {
dev_warn(&pdev->dev, "deprecated DT, using legacy driver\n");
return tegra_xusb_padctl_legacy_probe(pdev);
}
of_node_put(np);
match = of_match_node(tegra_xusb_padctl_of_match, pdev->dev.of_node);
soc = match->data;
padctl = soc->ops->probe(&pdev->dev, soc);
if (IS_ERR(padctl))
return PTR_ERR(padctl);
platform_set_drvdata(pdev, padctl);
INIT_LIST_HEAD(&padctl->ports);
INIT_LIST_HEAD(&padctl->lanes);
INIT_LIST_HEAD(&padctl->pads);
mutex_init(&padctl->lock);
padctl->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(padctl->regs)) {
err = PTR_ERR(padctl->regs);
goto remove;
}
padctl->rst = devm_reset_control_get(&pdev->dev, NULL);
if (IS_ERR(padctl->rst)) {
err = PTR_ERR(padctl->rst);
goto remove;
}
padctl->supplies = devm_kcalloc(&pdev->dev, padctl->soc->num_supplies,
sizeof(*padctl->supplies), GFP_KERNEL);
if (!padctl->supplies) {
err = -ENOMEM;
goto remove;
}
regulator_bulk_set_supply_names(padctl->supplies,
padctl->soc->supply_names,
padctl->soc->num_supplies);
err = devm_regulator_bulk_get(&pdev->dev, padctl->soc->num_supplies,
padctl->supplies);
if (err < 0) {
dev_err_probe(&pdev->dev, err, "failed to get regulators\n");
goto remove;
}
err = reset_control_deassert(padctl->rst);
if (err < 0)
goto remove;
err = regulator_bulk_enable(padctl->soc->num_supplies,
padctl->supplies);
if (err < 0) {
dev_err(&pdev->dev, "failed to enable supplies: %d\n", err);
goto reset;
}
err = tegra_xusb_setup_pads(padctl);
if (err < 0) {
dev_err(&pdev->dev, "failed to setup pads: %d\n", err);
goto power_down;
}
err = tegra_xusb_setup_ports(padctl);
if (err) {
const char *level = KERN_ERR;
if (err == -EPROBE_DEFER)
level = KERN_DEBUG;
dev_printk(level, &pdev->dev,
dev_fmt("failed to setup XUSB ports: %d\n"), err);
goto remove_pads;
}
return 0;
remove_pads:
tegra_xusb_remove_pads(padctl);
power_down:
regulator_bulk_disable(padctl->soc->num_supplies, padctl->supplies);
reset:
reset_control_assert(padctl->rst);
remove:
platform_set_drvdata(pdev, NULL);
soc->ops->remove(padctl);
return err;
}
static void tegra_xusb_padctl_remove(struct platform_device *pdev)
{
struct tegra_xusb_padctl *padctl = platform_get_drvdata(pdev);
int err;
tegra_xusb_remove_ports(padctl);
tegra_xusb_remove_pads(padctl);
err = regulator_bulk_disable(padctl->soc->num_supplies,
padctl->supplies);
if (err < 0)
dev_err(&pdev->dev, "failed to disable supplies: %d\n", err);
err = reset_control_assert(padctl->rst);
if (err < 0)
dev_err(&pdev->dev, "failed to assert reset: %d\n", err);
padctl->soc->ops->remove(padctl);
}
static __maybe_unused int tegra_xusb_padctl_suspend_noirq(struct device *dev)
{
struct tegra_xusb_padctl *padctl = dev_get_drvdata(dev);
if (padctl->soc && padctl->soc->ops && padctl->soc->ops->suspend_noirq)
return padctl->soc->ops->suspend_noirq(padctl);
return 0;
}
static __maybe_unused int tegra_xusb_padctl_resume_noirq(struct device *dev)
{
struct tegra_xusb_padctl *padctl = dev_get_drvdata(dev);
if (padctl->soc && padctl->soc->ops && padctl->soc->ops->resume_noirq)
return padctl->soc->ops->resume_noirq(padctl);
return 0;
}
static const struct dev_pm_ops tegra_xusb_padctl_pm_ops = {
SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(tegra_xusb_padctl_suspend_noirq,
tegra_xusb_padctl_resume_noirq)
};
static struct platform_driver tegra_xusb_padctl_driver = {
.driver = {
.name = "tegra-xusb-padctl",
.of_match_table = tegra_xusb_padctl_of_match,
.pm = &tegra_xusb_padctl_pm_ops,
},
.probe = tegra_xusb_padctl_probe,
.remove_new = tegra_xusb_padctl_remove,
};
module_platform_driver(tegra_xusb_padctl_driver);
struct tegra_xusb_padctl *tegra_xusb_padctl_get(struct device *dev)
{
struct tegra_xusb_padctl *padctl;
struct platform_device *pdev;
struct device_node *np;
np = of_parse_phandle(dev->of_node, "nvidia,xusb-padctl", 0);
if (!np)
return ERR_PTR(-EINVAL);
/*
* This is slightly ugly. A better implementation would be to keep a
* registry of pad controllers, but since there will almost certainly
* only ever be one per SoC that would be a little overkill.
*/
pdev = of_find_device_by_node(np);
if (!pdev) {
of_node_put(np);
return ERR_PTR(-ENODEV);
}
of_node_put(np);
padctl = platform_get_drvdata(pdev);
if (!padctl) {
put_device(&pdev->dev);
return ERR_PTR(-EPROBE_DEFER);
}
return padctl;
}
EXPORT_SYMBOL_GPL(tegra_xusb_padctl_get);
void tegra_xusb_padctl_put(struct tegra_xusb_padctl *padctl)
{
if (padctl)
put_device(padctl->dev);
}
EXPORT_SYMBOL_GPL(tegra_xusb_padctl_put);
int tegra_xusb_padctl_usb3_save_context(struct tegra_xusb_padctl *padctl,
unsigned int port)
{
if (padctl->soc->ops->usb3_save_context)
return padctl->soc->ops->usb3_save_context(padctl, port);
return -ENOSYS;
}
EXPORT_SYMBOL_GPL(tegra_xusb_padctl_usb3_save_context);
int tegra_xusb_padctl_hsic_set_idle(struct tegra_xusb_padctl *padctl,
unsigned int port, bool idle)
{
if (padctl->soc->ops->hsic_set_idle)
return padctl->soc->ops->hsic_set_idle(padctl, port, idle);
return -ENOSYS;
}
EXPORT_SYMBOL_GPL(tegra_xusb_padctl_hsic_set_idle);
int tegra_xusb_padctl_enable_phy_sleepwalk(struct tegra_xusb_padctl *padctl, struct phy *phy,
enum usb_device_speed speed)
{
struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
if (lane->pad->ops->enable_phy_sleepwalk)
return lane->pad->ops->enable_phy_sleepwalk(lane, speed);
return -EOPNOTSUPP;
}
EXPORT_SYMBOL_GPL(tegra_xusb_padctl_enable_phy_sleepwalk);
int tegra_xusb_padctl_disable_phy_sleepwalk(struct tegra_xusb_padctl *padctl, struct phy *phy)
{
struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
if (lane->pad->ops->disable_phy_sleepwalk)
return lane->pad->ops->disable_phy_sleepwalk(lane);
return -EOPNOTSUPP;
}
EXPORT_SYMBOL_GPL(tegra_xusb_padctl_disable_phy_sleepwalk);
int tegra_xusb_padctl_enable_phy_wake(struct tegra_xusb_padctl *padctl, struct phy *phy)
{
struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
if (lane->pad->ops->enable_phy_wake)
return lane->pad->ops->enable_phy_wake(lane);
return -EOPNOTSUPP;
}
EXPORT_SYMBOL_GPL(tegra_xusb_padctl_enable_phy_wake);
int tegra_xusb_padctl_disable_phy_wake(struct tegra_xusb_padctl *padctl, struct phy *phy)
{
struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
if (lane->pad->ops->disable_phy_wake)
return lane->pad->ops->disable_phy_wake(lane);
return -EOPNOTSUPP;
}
EXPORT_SYMBOL_GPL(tegra_xusb_padctl_disable_phy_wake);
bool tegra_xusb_padctl_remote_wake_detected(struct tegra_xusb_padctl *padctl, struct phy *phy)
{
struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
if (lane->pad->ops->remote_wake_detected)
return lane->pad->ops->remote_wake_detected(lane);
return false;
}
EXPORT_SYMBOL_GPL(tegra_xusb_padctl_remote_wake_detected);
int tegra_xusb_padctl_usb3_set_lfps_detect(struct tegra_xusb_padctl *padctl,
unsigned int port, bool enable)
{
if (padctl->soc->ops->usb3_set_lfps_detect)
return padctl->soc->ops->usb3_set_lfps_detect(padctl, port,
enable);
return -ENOSYS;
}
EXPORT_SYMBOL_GPL(tegra_xusb_padctl_usb3_set_lfps_detect);
int tegra_xusb_padctl_set_vbus_override(struct tegra_xusb_padctl *padctl,
bool val)
{
if (padctl->soc->ops->vbus_override)
return padctl->soc->ops->vbus_override(padctl, val);
return -ENOTSUPP;
}
EXPORT_SYMBOL_GPL(tegra_xusb_padctl_set_vbus_override);
int tegra_phy_xusb_utmi_port_reset(struct phy *phy)
{
struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
struct tegra_xusb_padctl *padctl = lane->pad->padctl;
if (padctl->soc->ops->utmi_port_reset)
return padctl->soc->ops->utmi_port_reset(phy);
return -ENOTSUPP;
}
EXPORT_SYMBOL_GPL(tegra_phy_xusb_utmi_port_reset);
void tegra_phy_xusb_utmi_pad_power_on(struct phy *phy)
{
struct tegra_xusb_lane *lane;
struct tegra_xusb_padctl *padctl;
if (!phy)
return;
lane = phy_get_drvdata(phy);
padctl = lane->pad->padctl;
if (padctl->soc->ops->utmi_pad_power_on)
padctl->soc->ops->utmi_pad_power_on(phy);
}
EXPORT_SYMBOL_GPL(tegra_phy_xusb_utmi_pad_power_on);
void tegra_phy_xusb_utmi_pad_power_down(struct phy *phy)
{
struct tegra_xusb_lane *lane;
struct tegra_xusb_padctl *padctl;
if (!phy)
return;
lane = phy_get_drvdata(phy);
padctl = lane->pad->padctl;
if (padctl->soc->ops->utmi_pad_power_down)
padctl->soc->ops->utmi_pad_power_down(phy);
}
EXPORT_SYMBOL_GPL(tegra_phy_xusb_utmi_pad_power_down);
int tegra_xusb_padctl_get_usb3_companion(struct tegra_xusb_padctl *padctl,
unsigned int port)
{
struct tegra_xusb_usb2_port *usb2;
struct tegra_xusb_usb3_port *usb3;
int i;
usb2 = tegra_xusb_find_usb2_port(padctl, port);
if (!usb2)
return -EINVAL;
for (i = 0; i < padctl->soc->ports.usb3.count; i++) {
usb3 = tegra_xusb_find_usb3_port(padctl, i);
if (usb3 && usb3->port == usb2->base.index)
return usb3->base.index;
}
return -ENODEV;
}
EXPORT_SYMBOL_GPL(tegra_xusb_padctl_get_usb3_companion);
MODULE_AUTHOR("Thierry Reding <[email protected]>");
MODULE_DESCRIPTION("Tegra XUSB Pad Controller driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/phy/tegra/xusb.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* P2U (PIPE to UPHY) driver for Tegra T194 SoC
*
* Copyright (C) 2019-2022 NVIDIA Corporation.
*
* Author: Vidya Sagar <[email protected]>
*/
#include <linux/err.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#define P2U_CONTROL_CMN 0x74
#define P2U_CONTROL_CMN_ENABLE_L2_EXIT_RATE_CHANGE BIT(13)
#define P2U_CONTROL_CMN_SKP_SIZE_PROTECTION_EN BIT(20)
#define P2U_PERIODIC_EQ_CTRL_GEN3 0xc0
#define P2U_PERIODIC_EQ_CTRL_GEN3_PERIODIC_EQ_EN BIT(0)
#define P2U_PERIODIC_EQ_CTRL_GEN3_INIT_PRESET_EQ_TRAIN_EN BIT(1)
#define P2U_PERIODIC_EQ_CTRL_GEN4 0xc4
#define P2U_PERIODIC_EQ_CTRL_GEN4_INIT_PRESET_EQ_TRAIN_EN BIT(1)
#define P2U_RX_DEBOUNCE_TIME 0xa4
#define P2U_RX_DEBOUNCE_TIME_DEBOUNCE_TIMER_MASK 0xffff
#define P2U_RX_DEBOUNCE_TIME_DEBOUNCE_TIMER_VAL 160
#define P2U_DIR_SEARCH_CTRL 0xd4
#define P2U_DIR_SEARCH_CTRL_GEN4_FINE_GRAIN_SEARCH_TWICE BIT(18)
struct tegra_p2u_of_data {
bool one_dir_search;
};
struct tegra_p2u {
void __iomem *base;
bool skip_sz_protection_en; /* Needed to support two retimers */
struct tegra_p2u_of_data *of_data;
};
static inline void p2u_writel(struct tegra_p2u *phy, const u32 value,
const u32 reg)
{
writel_relaxed(value, phy->base + reg);
}
static inline u32 p2u_readl(struct tegra_p2u *phy, const u32 reg)
{
return readl_relaxed(phy->base + reg);
}
static int tegra_p2u_power_on(struct phy *x)
{
struct tegra_p2u *phy = phy_get_drvdata(x);
u32 val;
if (phy->skip_sz_protection_en) {
val = p2u_readl(phy, P2U_CONTROL_CMN);
val |= P2U_CONTROL_CMN_SKP_SIZE_PROTECTION_EN;
p2u_writel(phy, val, P2U_CONTROL_CMN);
}
val = p2u_readl(phy, P2U_PERIODIC_EQ_CTRL_GEN3);
val &= ~P2U_PERIODIC_EQ_CTRL_GEN3_PERIODIC_EQ_EN;
val |= P2U_PERIODIC_EQ_CTRL_GEN3_INIT_PRESET_EQ_TRAIN_EN;
p2u_writel(phy, val, P2U_PERIODIC_EQ_CTRL_GEN3);
val = p2u_readl(phy, P2U_PERIODIC_EQ_CTRL_GEN4);
val |= P2U_PERIODIC_EQ_CTRL_GEN4_INIT_PRESET_EQ_TRAIN_EN;
p2u_writel(phy, val, P2U_PERIODIC_EQ_CTRL_GEN4);
val = p2u_readl(phy, P2U_RX_DEBOUNCE_TIME);
val &= ~P2U_RX_DEBOUNCE_TIME_DEBOUNCE_TIMER_MASK;
val |= P2U_RX_DEBOUNCE_TIME_DEBOUNCE_TIMER_VAL;
p2u_writel(phy, val, P2U_RX_DEBOUNCE_TIME);
if (phy->of_data->one_dir_search) {
val = p2u_readl(phy, P2U_DIR_SEARCH_CTRL);
val &= ~P2U_DIR_SEARCH_CTRL_GEN4_FINE_GRAIN_SEARCH_TWICE;
p2u_writel(phy, val, P2U_DIR_SEARCH_CTRL);
}
return 0;
}
static int tegra_p2u_calibrate(struct phy *x)
{
struct tegra_p2u *phy = phy_get_drvdata(x);
u32 val;
val = p2u_readl(phy, P2U_CONTROL_CMN);
val |= P2U_CONTROL_CMN_ENABLE_L2_EXIT_RATE_CHANGE;
p2u_writel(phy, val, P2U_CONTROL_CMN);
return 0;
}
static const struct phy_ops ops = {
.power_on = tegra_p2u_power_on,
.calibrate = tegra_p2u_calibrate,
.owner = THIS_MODULE,
};
static int tegra_p2u_probe(struct platform_device *pdev)
{
struct phy_provider *phy_provider;
struct device *dev = &pdev->dev;
struct phy *generic_phy;
struct tegra_p2u *phy;
phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
if (!phy)
return -ENOMEM;
phy->of_data =
(struct tegra_p2u_of_data *)of_device_get_match_data(dev);
if (!phy->of_data)
return -EINVAL;
phy->base = devm_platform_ioremap_resource_byname(pdev, "ctl");
if (IS_ERR(phy->base))
return PTR_ERR(phy->base);
phy->skip_sz_protection_en =
of_property_read_bool(dev->of_node,
"nvidia,skip-sz-protect-en");
platform_set_drvdata(pdev, phy);
generic_phy = devm_phy_create(dev, NULL, &ops);
if (IS_ERR(generic_phy))
return PTR_ERR(generic_phy);
phy_set_drvdata(generic_phy, phy);
phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
if (IS_ERR(phy_provider))
return PTR_ERR(phy_provider);
return 0;
}
static const struct tegra_p2u_of_data tegra194_p2u_of_data = {
.one_dir_search = false,
};
static const struct tegra_p2u_of_data tegra234_p2u_of_data = {
.one_dir_search = true,
};
static const struct of_device_id tegra_p2u_id_table[] = {
{
.compatible = "nvidia,tegra194-p2u",
.data = &tegra194_p2u_of_data,
},
{
.compatible = "nvidia,tegra234-p2u",
.data = &tegra234_p2u_of_data,
},
{}
};
MODULE_DEVICE_TABLE(of, tegra_p2u_id_table);
static struct platform_driver tegra_p2u_driver = {
.probe = tegra_p2u_probe,
.driver = {
.name = "tegra194-p2u",
.of_match_table = tegra_p2u_id_table,
},
};
module_platform_driver(tegra_p2u_driver);
MODULE_AUTHOR("Vidya Sagar <[email protected]>");
MODULE_DESCRIPTION("NVIDIA Tegra194 PIPE2UPHY PHY driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/phy/tegra/phy-tegra194-p2u.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2014 STMicroelectronics
*
* STMicroelectronics PHY driver MiPHY28lp (for SoC STiH407).
*
* Author: Alexandre Torgue <[email protected]>
*/
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/of_address.h>
#include <linux/clk.h>
#include <linux/phy/phy.h>
#include <linux/delay.h>
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
#include <linux/reset.h>
#include <dt-bindings/phy/phy.h>
/* MiPHY registers */
#define MIPHY_CONF_RESET 0x00
#define RST_APPLI_SW BIT(0)
#define RST_CONF_SW BIT(1)
#define RST_MACRO_SW BIT(2)
#define MIPHY_RESET 0x01
#define RST_PLL_SW BIT(0)
#define RST_COMP_SW BIT(2)
#define MIPHY_STATUS_1 0x02
#define PHY_RDY BIT(0)
#define HFC_RDY BIT(1)
#define HFC_PLL BIT(2)
#define MIPHY_CONTROL 0x04
#define TERM_EN_SW BIT(2)
#define DIS_LINK_RST BIT(3)
#define AUTO_RST_RX BIT(4)
#define PX_RX_POL BIT(5)
#define MIPHY_BOUNDARY_SEL 0x0a
#define TX_SEL BIT(6)
#define SSC_SEL BIT(4)
#define GENSEL_SEL BIT(0)
#define MIPHY_BOUNDARY_1 0x0b
#define MIPHY_BOUNDARY_2 0x0c
#define SSC_EN_SW BIT(2)
#define MIPHY_PLL_CLKREF_FREQ 0x0d
#define MIPHY_SPEED 0x0e
#define TX_SPDSEL_80DEC 0
#define TX_SPDSEL_40DEC 1
#define TX_SPDSEL_20DEC 2
#define RX_SPDSEL_80DEC 0
#define RX_SPDSEL_40DEC (1 << 2)
#define RX_SPDSEL_20DEC (2 << 2)
#define MIPHY_CONF 0x0f
#define MIPHY_CTRL_TEST_SEL 0x20
#define MIPHY_CTRL_TEST_1 0x21
#define MIPHY_CTRL_TEST_2 0x22
#define MIPHY_CTRL_TEST_3 0x23
#define MIPHY_CTRL_TEST_4 0x24
#define MIPHY_FEEDBACK_TEST 0x25
#define MIPHY_DEBUG_BUS 0x26
#define MIPHY_DEBUG_STATUS_MSB 0x27
#define MIPHY_DEBUG_STATUS_LSB 0x28
#define MIPHY_PWR_RAIL_1 0x29
#define MIPHY_PWR_RAIL_2 0x2a
#define MIPHY_SYNCHAR_CONTROL 0x30
#define MIPHY_COMP_FSM_1 0x3a
#define COMP_START BIT(6)
#define MIPHY_COMP_FSM_6 0x3f
#define COMP_DONE BIT(7)
#define MIPHY_COMP_POSTP 0x42
#define MIPHY_TX_CTRL_1 0x49
#define TX_REG_STEP_0V 0
#define TX_REG_STEP_P_25MV 1
#define TX_REG_STEP_P_50MV 2
#define TX_REG_STEP_N_25MV 7
#define TX_REG_STEP_N_50MV 6
#define TX_REG_STEP_N_75MV 5
#define MIPHY_TX_CTRL_2 0x4a
#define TX_SLEW_SW_40_PS 0
#define TX_SLEW_SW_80_PS 1
#define TX_SLEW_SW_120_PS 2
#define MIPHY_TX_CTRL_3 0x4b
#define MIPHY_TX_CAL_MAN 0x4e
#define TX_SLEW_CAL_MAN_EN BIT(0)
#define MIPHY_TST_BIAS_BOOST_2 0x62
#define MIPHY_BIAS_BOOST_1 0x63
#define MIPHY_BIAS_BOOST_2 0x64
#define MIPHY_RX_DESBUFF_FDB_2 0x67
#define MIPHY_RX_DESBUFF_FDB_3 0x68
#define MIPHY_SIGDET_COMPENS1 0x69
#define MIPHY_SIGDET_COMPENS2 0x6a
#define MIPHY_JITTER_PERIOD 0x6b
#define MIPHY_JITTER_AMPLITUDE_1 0x6c
#define MIPHY_JITTER_AMPLITUDE_2 0x6d
#define MIPHY_JITTER_AMPLITUDE_3 0x6e
#define MIPHY_RX_K_GAIN 0x78
#define MIPHY_RX_BUFFER_CTRL 0x7a
#define VGA_GAIN BIT(0)
#define EQ_DC_GAIN BIT(2)
#define EQ_BOOST_GAIN BIT(3)
#define MIPHY_RX_VGA_GAIN 0x7b
#define MIPHY_RX_EQU_GAIN_1 0x7f
#define MIPHY_RX_EQU_GAIN_2 0x80
#define MIPHY_RX_EQU_GAIN_3 0x81
#define MIPHY_RX_CAL_CTRL_1 0x97
#define MIPHY_RX_CAL_CTRL_2 0x98
#define MIPHY_RX_CAL_OFFSET_CTRL 0x99
#define CAL_OFFSET_VGA_64 (0x03 << 0)
#define CAL_OFFSET_THRESHOLD_64 (0x03 << 2)
#define VGA_OFFSET_POLARITY BIT(4)
#define OFFSET_COMPENSATION_EN BIT(6)
#define MIPHY_RX_CAL_VGA_STEP 0x9a
#define MIPHY_RX_CAL_EYE_MIN 0x9d
#define MIPHY_RX_CAL_OPT_LENGTH 0x9f
#define MIPHY_RX_LOCK_CTRL_1 0xc1
#define MIPHY_RX_LOCK_SETTINGS_OPT 0xc2
#define MIPHY_RX_LOCK_STEP 0xc4
#define MIPHY_RX_SIGDET_SLEEP_OA 0xc9
#define MIPHY_RX_SIGDET_SLEEP_SEL 0xca
#define MIPHY_RX_SIGDET_WAIT_SEL 0xcb
#define MIPHY_RX_SIGDET_DATA_SEL 0xcc
#define EN_ULTRA_LOW_POWER BIT(0)
#define EN_FIRST_HALF BIT(1)
#define EN_SECOND_HALF BIT(2)
#define EN_DIGIT_SIGNAL_CHECK BIT(3)
#define MIPHY_RX_POWER_CTRL_1 0xcd
#define MIPHY_RX_POWER_CTRL_2 0xce
#define MIPHY_PLL_CALSET_CTRL 0xd3
#define MIPHY_PLL_CALSET_1 0xd4
#define MIPHY_PLL_CALSET_2 0xd5
#define MIPHY_PLL_CALSET_3 0xd6
#define MIPHY_PLL_CALSET_4 0xd7
#define MIPHY_PLL_SBR_1 0xe3
#define SET_NEW_CHANGE BIT(1)
#define MIPHY_PLL_SBR_2 0xe4
#define MIPHY_PLL_SBR_3 0xe5
#define MIPHY_PLL_SBR_4 0xe6
#define MIPHY_PLL_COMMON_MISC_2 0xe9
#define START_ACT_FILT BIT(6)
#define MIPHY_PLL_SPAREIN 0xeb
/*
* On STiH407 the glue logic can be different among MiPHY devices; for example:
* MiPHY0: OSC_FORCE_EXT means:
* 0: 30MHz crystal clk - 1: 100MHz ext clk routed through MiPHY1
* MiPHY1: OSC_FORCE_EXT means:
* 1: 30MHz crystal clk - 0: 100MHz ext clk routed through MiPHY1
* Some devices have not the possibility to check if the osc is ready.
*/
#define MIPHY_OSC_FORCE_EXT BIT(3)
#define MIPHY_OSC_RDY BIT(5)
#define MIPHY_CTRL_MASK 0x0f
#define MIPHY_CTRL_DEFAULT 0
#define MIPHY_CTRL_SYNC_D_EN BIT(2)
/* SATA / PCIe defines */
#define SATA_CTRL_MASK 0x07
#define PCIE_CTRL_MASK 0xff
#define SATA_CTRL_SELECT_SATA 1
#define SATA_CTRL_SELECT_PCIE 0
#define SYSCFG_PCIE_PCIE_VAL 0x80
#define SATA_SPDMODE 1
#define MIPHY_SATA_BANK_NB 3
#define MIPHY_PCIE_BANK_NB 2
enum {
SYSCFG_CTRL,
SYSCFG_STATUS,
SYSCFG_PCI,
SYSCFG_SATA,
SYSCFG_REG_MAX,
};
struct miphy28lp_phy {
struct phy *phy;
struct miphy28lp_dev *phydev;
void __iomem *base;
void __iomem *pipebase;
bool osc_force_ext;
bool osc_rdy;
bool px_rx_pol_inv;
bool ssc;
bool tx_impedance;
struct reset_control *miphy_rst;
u32 sata_gen;
/* Sysconfig registers offsets needed to configure the device */
u32 syscfg_reg[SYSCFG_REG_MAX];
u8 type;
};
struct miphy28lp_dev {
struct device *dev;
struct regmap *regmap;
struct mutex miphy_mutex;
struct miphy28lp_phy **phys;
int nphys;
};
struct miphy_initval {
u16 reg;
u16 val;
};
enum miphy_sata_gen { SATA_GEN1, SATA_GEN2, SATA_GEN3 };
static char *PHY_TYPE_name[] = { "sata-up", "pcie-up", "", "usb3-up" };
struct pll_ratio {
int clk_ref;
int calset_1;
int calset_2;
int calset_3;
int calset_4;
int cal_ctrl;
};
static struct pll_ratio sata_pll_ratio = {
.clk_ref = 0x1e,
.calset_1 = 0xc8,
.calset_2 = 0x00,
.calset_3 = 0x00,
.calset_4 = 0x00,
.cal_ctrl = 0x00,
};
static struct pll_ratio pcie_pll_ratio = {
.clk_ref = 0x1e,
.calset_1 = 0xa6,
.calset_2 = 0xaa,
.calset_3 = 0xaa,
.calset_4 = 0x00,
.cal_ctrl = 0x00,
};
static struct pll_ratio usb3_pll_ratio = {
.clk_ref = 0x1e,
.calset_1 = 0xa6,
.calset_2 = 0xaa,
.calset_3 = 0xaa,
.calset_4 = 0x04,
.cal_ctrl = 0x00,
};
struct miphy28lp_pll_gen {
int bank;
int speed;
int bias_boost_1;
int bias_boost_2;
int tx_ctrl_1;
int tx_ctrl_2;
int tx_ctrl_3;
int rx_k_gain;
int rx_vga_gain;
int rx_equ_gain_1;
int rx_equ_gain_2;
int rx_equ_gain_3;
int rx_buff_ctrl;
};
static struct miphy28lp_pll_gen sata_pll_gen[] = {
{
.bank = 0x00,
.speed = TX_SPDSEL_80DEC | RX_SPDSEL_80DEC,
.bias_boost_1 = 0x00,
.bias_boost_2 = 0xae,
.tx_ctrl_2 = 0x53,
.tx_ctrl_3 = 0x00,
.rx_buff_ctrl = EQ_BOOST_GAIN | EQ_DC_GAIN | VGA_GAIN,
.rx_vga_gain = 0x00,
.rx_equ_gain_1 = 0x7d,
.rx_equ_gain_2 = 0x56,
.rx_equ_gain_3 = 0x00,
},
{
.bank = 0x01,
.speed = TX_SPDSEL_40DEC | RX_SPDSEL_40DEC,
.bias_boost_1 = 0x00,
.bias_boost_2 = 0xae,
.tx_ctrl_2 = 0x72,
.tx_ctrl_3 = 0x20,
.rx_buff_ctrl = EQ_BOOST_GAIN | EQ_DC_GAIN | VGA_GAIN,
.rx_vga_gain = 0x00,
.rx_equ_gain_1 = 0x7d,
.rx_equ_gain_2 = 0x56,
.rx_equ_gain_3 = 0x00,
},
{
.bank = 0x02,
.speed = TX_SPDSEL_20DEC | RX_SPDSEL_20DEC,
.bias_boost_1 = 0x00,
.bias_boost_2 = 0xae,
.tx_ctrl_2 = 0xc0,
.tx_ctrl_3 = 0x20,
.rx_buff_ctrl = EQ_BOOST_GAIN | EQ_DC_GAIN | VGA_GAIN,
.rx_vga_gain = 0x00,
.rx_equ_gain_1 = 0x7d,
.rx_equ_gain_2 = 0x56,
.rx_equ_gain_3 = 0x00,
},
};
static struct miphy28lp_pll_gen pcie_pll_gen[] = {
{
.bank = 0x00,
.speed = TX_SPDSEL_40DEC | RX_SPDSEL_40DEC,
.bias_boost_1 = 0x00,
.bias_boost_2 = 0xa5,
.tx_ctrl_1 = TX_REG_STEP_N_25MV,
.tx_ctrl_2 = 0x71,
.tx_ctrl_3 = 0x60,
.rx_k_gain = 0x98,
.rx_buff_ctrl = EQ_BOOST_GAIN | EQ_DC_GAIN | VGA_GAIN,
.rx_vga_gain = 0x00,
.rx_equ_gain_1 = 0x79,
.rx_equ_gain_2 = 0x56,
},
{
.bank = 0x01,
.speed = TX_SPDSEL_20DEC | RX_SPDSEL_20DEC,
.bias_boost_1 = 0x00,
.bias_boost_2 = 0xa5,
.tx_ctrl_1 = TX_REG_STEP_N_25MV,
.tx_ctrl_2 = 0x70,
.tx_ctrl_3 = 0x60,
.rx_k_gain = 0xcc,
.rx_buff_ctrl = EQ_BOOST_GAIN | EQ_DC_GAIN | VGA_GAIN,
.rx_vga_gain = 0x00,
.rx_equ_gain_1 = 0x78,
.rx_equ_gain_2 = 0x07,
},
};
static inline void miphy28lp_set_reset(struct miphy28lp_phy *miphy_phy)
{
void __iomem *base = miphy_phy->base;
u8 val;
/* Putting Macro in reset */
writeb_relaxed(RST_APPLI_SW, base + MIPHY_CONF_RESET);
val = RST_APPLI_SW | RST_CONF_SW;
writeb_relaxed(val, base + MIPHY_CONF_RESET);
writeb_relaxed(RST_APPLI_SW, base + MIPHY_CONF_RESET);
/* Bringing the MIPHY-CPU registers out of reset */
if (miphy_phy->type == PHY_TYPE_PCIE) {
val = AUTO_RST_RX | TERM_EN_SW;
writeb_relaxed(val, base + MIPHY_CONTROL);
} else {
val = AUTO_RST_RX | TERM_EN_SW | DIS_LINK_RST;
writeb_relaxed(val, base + MIPHY_CONTROL);
}
}
static inline void miphy28lp_pll_calibration(struct miphy28lp_phy *miphy_phy,
struct pll_ratio *pll_ratio)
{
void __iomem *base = miphy_phy->base;
u8 val;
/* Applying PLL Settings */
writeb_relaxed(0x1d, base + MIPHY_PLL_SPAREIN);
writeb_relaxed(pll_ratio->clk_ref, base + MIPHY_PLL_CLKREF_FREQ);
/* PLL Ratio */
writeb_relaxed(pll_ratio->calset_1, base + MIPHY_PLL_CALSET_1);
writeb_relaxed(pll_ratio->calset_2, base + MIPHY_PLL_CALSET_2);
writeb_relaxed(pll_ratio->calset_3, base + MIPHY_PLL_CALSET_3);
writeb_relaxed(pll_ratio->calset_4, base + MIPHY_PLL_CALSET_4);
writeb_relaxed(pll_ratio->cal_ctrl, base + MIPHY_PLL_CALSET_CTRL);
writeb_relaxed(TX_SEL, base + MIPHY_BOUNDARY_SEL);
val = (0x68 << 1) | TX_SLEW_CAL_MAN_EN;
writeb_relaxed(val, base + MIPHY_TX_CAL_MAN);
val = VGA_OFFSET_POLARITY | CAL_OFFSET_THRESHOLD_64 | CAL_OFFSET_VGA_64;
if (miphy_phy->type != PHY_TYPE_SATA)
val |= OFFSET_COMPENSATION_EN;
writeb_relaxed(val, base + MIPHY_RX_CAL_OFFSET_CTRL);
if (miphy_phy->type == PHY_TYPE_USB3) {
writeb_relaxed(0x00, base + MIPHY_CONF);
writeb_relaxed(0x70, base + MIPHY_RX_LOCK_STEP);
writeb_relaxed(EN_FIRST_HALF, base + MIPHY_RX_SIGDET_SLEEP_OA);
writeb_relaxed(EN_FIRST_HALF, base + MIPHY_RX_SIGDET_SLEEP_SEL);
writeb_relaxed(EN_FIRST_HALF, base + MIPHY_RX_SIGDET_WAIT_SEL);
val = EN_DIGIT_SIGNAL_CHECK | EN_FIRST_HALF;
writeb_relaxed(val, base + MIPHY_RX_SIGDET_DATA_SEL);
}
}
static inline void miphy28lp_sata_config_gen(struct miphy28lp_phy *miphy_phy)
{
void __iomem *base = miphy_phy->base;
int i;
for (i = 0; i < ARRAY_SIZE(sata_pll_gen); i++) {
struct miphy28lp_pll_gen *gen = &sata_pll_gen[i];
/* Banked settings */
writeb_relaxed(gen->bank, base + MIPHY_CONF);
writeb_relaxed(gen->speed, base + MIPHY_SPEED);
writeb_relaxed(gen->bias_boost_1, base + MIPHY_BIAS_BOOST_1);
writeb_relaxed(gen->bias_boost_2, base + MIPHY_BIAS_BOOST_2);
/* TX buffer Settings */
writeb_relaxed(gen->tx_ctrl_2, base + MIPHY_TX_CTRL_2);
writeb_relaxed(gen->tx_ctrl_3, base + MIPHY_TX_CTRL_3);
/* RX Buffer Settings */
writeb_relaxed(gen->rx_buff_ctrl, base + MIPHY_RX_BUFFER_CTRL);
writeb_relaxed(gen->rx_vga_gain, base + MIPHY_RX_VGA_GAIN);
writeb_relaxed(gen->rx_equ_gain_1, base + MIPHY_RX_EQU_GAIN_1);
writeb_relaxed(gen->rx_equ_gain_2, base + MIPHY_RX_EQU_GAIN_2);
writeb_relaxed(gen->rx_equ_gain_3, base + MIPHY_RX_EQU_GAIN_3);
}
}
static inline void miphy28lp_pcie_config_gen(struct miphy28lp_phy *miphy_phy)
{
void __iomem *base = miphy_phy->base;
int i;
for (i = 0; i < ARRAY_SIZE(pcie_pll_gen); i++) {
struct miphy28lp_pll_gen *gen = &pcie_pll_gen[i];
/* Banked settings */
writeb_relaxed(gen->bank, base + MIPHY_CONF);
writeb_relaxed(gen->speed, base + MIPHY_SPEED);
writeb_relaxed(gen->bias_boost_1, base + MIPHY_BIAS_BOOST_1);
writeb_relaxed(gen->bias_boost_2, base + MIPHY_BIAS_BOOST_2);
/* TX buffer Settings */
writeb_relaxed(gen->tx_ctrl_1, base + MIPHY_TX_CTRL_1);
writeb_relaxed(gen->tx_ctrl_2, base + MIPHY_TX_CTRL_2);
writeb_relaxed(gen->tx_ctrl_3, base + MIPHY_TX_CTRL_3);
writeb_relaxed(gen->rx_k_gain, base + MIPHY_RX_K_GAIN);
/* RX Buffer Settings */
writeb_relaxed(gen->rx_buff_ctrl, base + MIPHY_RX_BUFFER_CTRL);
writeb_relaxed(gen->rx_vga_gain, base + MIPHY_RX_VGA_GAIN);
writeb_relaxed(gen->rx_equ_gain_1, base + MIPHY_RX_EQU_GAIN_1);
writeb_relaxed(gen->rx_equ_gain_2, base + MIPHY_RX_EQU_GAIN_2);
}
}
static inline int miphy28lp_wait_compensation(struct miphy28lp_phy *miphy_phy)
{
u8 val;
/* Waiting for Compensation to complete */
return readb_relaxed_poll_timeout(miphy_phy->base + MIPHY_COMP_FSM_6,
val, val & COMP_DONE, 1, 5 * USEC_PER_SEC);
}
static inline int miphy28lp_compensation(struct miphy28lp_phy *miphy_phy,
struct pll_ratio *pll_ratio)
{
void __iomem *base = miphy_phy->base;
/* Poll for HFC ready after reset release */
/* Compensation measurement */
writeb_relaxed(RST_PLL_SW | RST_COMP_SW, base + MIPHY_RESET);
writeb_relaxed(0x00, base + MIPHY_PLL_COMMON_MISC_2);
writeb_relaxed(pll_ratio->clk_ref, base + MIPHY_PLL_CLKREF_FREQ);
writeb_relaxed(COMP_START, base + MIPHY_COMP_FSM_1);
if (miphy_phy->type == PHY_TYPE_PCIE)
writeb_relaxed(RST_PLL_SW, base + MIPHY_RESET);
writeb_relaxed(0x00, base + MIPHY_RESET);
writeb_relaxed(START_ACT_FILT, base + MIPHY_PLL_COMMON_MISC_2);
writeb_relaxed(SET_NEW_CHANGE, base + MIPHY_PLL_SBR_1);
/* TX compensation offset to re-center TX impedance */
writeb_relaxed(0x00, base + MIPHY_COMP_POSTP);
if (miphy_phy->type == PHY_TYPE_PCIE)
return miphy28lp_wait_compensation(miphy_phy);
return 0;
}
static inline void miphy28_usb3_miphy_reset(struct miphy28lp_phy *miphy_phy)
{
void __iomem *base = miphy_phy->base;
u8 val;
/* MIPHY Reset */
writeb_relaxed(RST_APPLI_SW, base + MIPHY_CONF_RESET);
writeb_relaxed(0x00, base + MIPHY_CONF_RESET);
writeb_relaxed(RST_COMP_SW, base + MIPHY_RESET);
val = RST_COMP_SW | RST_PLL_SW;
writeb_relaxed(val, base + MIPHY_RESET);
writeb_relaxed(0x00, base + MIPHY_PLL_COMMON_MISC_2);
writeb_relaxed(0x1e, base + MIPHY_PLL_CLKREF_FREQ);
writeb_relaxed(COMP_START, base + MIPHY_COMP_FSM_1);
writeb_relaxed(RST_PLL_SW, base + MIPHY_RESET);
writeb_relaxed(0x00, base + MIPHY_RESET);
writeb_relaxed(START_ACT_FILT, base + MIPHY_PLL_COMMON_MISC_2);
writeb_relaxed(0x00, base + MIPHY_CONF);
writeb_relaxed(0x00, base + MIPHY_BOUNDARY_1);
writeb_relaxed(0x00, base + MIPHY_TST_BIAS_BOOST_2);
writeb_relaxed(0x00, base + MIPHY_CONF);
writeb_relaxed(SET_NEW_CHANGE, base + MIPHY_PLL_SBR_1);
writeb_relaxed(0xa5, base + MIPHY_DEBUG_BUS);
writeb_relaxed(0x00, base + MIPHY_CONF);
}
static void miphy_sata_tune_ssc(struct miphy28lp_phy *miphy_phy)
{
void __iomem *base = miphy_phy->base;
u8 val;
/* Compensate Tx impedance to avoid out of range values */
/*
* Enable the SSC on PLL for all banks
* SSC Modulation @ 31 KHz and 4000 ppm modulation amp
*/
val = readb_relaxed(base + MIPHY_BOUNDARY_2);
val |= SSC_EN_SW;
writeb_relaxed(val, base + MIPHY_BOUNDARY_2);
val = readb_relaxed(base + MIPHY_BOUNDARY_SEL);
val |= SSC_SEL;
writeb_relaxed(val, base + MIPHY_BOUNDARY_SEL);
for (val = 0; val < MIPHY_SATA_BANK_NB; val++) {
writeb_relaxed(val, base + MIPHY_CONF);
/* Add value to each reference clock cycle */
/* and define the period length of the SSC */
writeb_relaxed(0x3c, base + MIPHY_PLL_SBR_2);
writeb_relaxed(0x6c, base + MIPHY_PLL_SBR_3);
writeb_relaxed(0x81, base + MIPHY_PLL_SBR_4);
/* Clear any previous request */
writeb_relaxed(0x00, base + MIPHY_PLL_SBR_1);
/* requests the PLL to take in account new parameters */
writeb_relaxed(SET_NEW_CHANGE, base + MIPHY_PLL_SBR_1);
/* To be sure there is no other pending requests */
writeb_relaxed(0x00, base + MIPHY_PLL_SBR_1);
}
}
static void miphy_pcie_tune_ssc(struct miphy28lp_phy *miphy_phy)
{
void __iomem *base = miphy_phy->base;
u8 val;
/* Compensate Tx impedance to avoid out of range values */
/*
* Enable the SSC on PLL for all banks
* SSC Modulation @ 31 KHz and 4000 ppm modulation amp
*/
val = readb_relaxed(base + MIPHY_BOUNDARY_2);
val |= SSC_EN_SW;
writeb_relaxed(val, base + MIPHY_BOUNDARY_2);
val = readb_relaxed(base + MIPHY_BOUNDARY_SEL);
val |= SSC_SEL;
writeb_relaxed(val, base + MIPHY_BOUNDARY_SEL);
for (val = 0; val < MIPHY_PCIE_BANK_NB; val++) {
writeb_relaxed(val, base + MIPHY_CONF);
/* Validate Step component */
writeb_relaxed(0x69, base + MIPHY_PLL_SBR_3);
writeb_relaxed(0x21, base + MIPHY_PLL_SBR_4);
/* Validate Period component */
writeb_relaxed(0x3c, base + MIPHY_PLL_SBR_2);
writeb_relaxed(0x21, base + MIPHY_PLL_SBR_4);
/* Clear any previous request */
writeb_relaxed(0x00, base + MIPHY_PLL_SBR_1);
/* requests the PLL to take in account new parameters */
writeb_relaxed(SET_NEW_CHANGE, base + MIPHY_PLL_SBR_1);
/* To be sure there is no other pending requests */
writeb_relaxed(0x00, base + MIPHY_PLL_SBR_1);
}
}
static inline void miphy_tune_tx_impedance(struct miphy28lp_phy *miphy_phy)
{
/* Compensate Tx impedance to avoid out of range values */
writeb_relaxed(0x02, miphy_phy->base + MIPHY_COMP_POSTP);
}
static inline int miphy28lp_configure_sata(struct miphy28lp_phy *miphy_phy)
{
void __iomem *base = miphy_phy->base;
int err;
u8 val;
/* Putting Macro in reset */
miphy28lp_set_reset(miphy_phy);
/* PLL calibration */
miphy28lp_pll_calibration(miphy_phy, &sata_pll_ratio);
/* Banked settings Gen1/Gen2/Gen3 */
miphy28lp_sata_config_gen(miphy_phy);
/* Power control */
/* Input bridge enable, manual input bridge control */
writeb_relaxed(0x21, base + MIPHY_RX_POWER_CTRL_1);
/* Macro out of reset */
writeb_relaxed(0x00, base + MIPHY_CONF_RESET);
/* Poll for HFC ready after reset release */
/* Compensation measurement */
err = miphy28lp_compensation(miphy_phy, &sata_pll_ratio);
if (err)
return err;
if (miphy_phy->px_rx_pol_inv) {
/* Invert Rx polarity */
val = readb_relaxed(miphy_phy->base + MIPHY_CONTROL);
val |= PX_RX_POL;
writeb_relaxed(val, miphy_phy->base + MIPHY_CONTROL);
}
if (miphy_phy->ssc)
miphy_sata_tune_ssc(miphy_phy);
if (miphy_phy->tx_impedance)
miphy_tune_tx_impedance(miphy_phy);
return 0;
}
static inline int miphy28lp_configure_pcie(struct miphy28lp_phy *miphy_phy)
{
void __iomem *base = miphy_phy->base;
int err;
/* Putting Macro in reset */
miphy28lp_set_reset(miphy_phy);
/* PLL calibration */
miphy28lp_pll_calibration(miphy_phy, &pcie_pll_ratio);
/* Banked settings Gen1/Gen2 */
miphy28lp_pcie_config_gen(miphy_phy);
/* Power control */
/* Input bridge enable, manual input bridge control */
writeb_relaxed(0x21, base + MIPHY_RX_POWER_CTRL_1);
/* Macro out of reset */
writeb_relaxed(0x00, base + MIPHY_CONF_RESET);
/* Poll for HFC ready after reset release */
/* Compensation measurement */
err = miphy28lp_compensation(miphy_phy, &pcie_pll_ratio);
if (err)
return err;
if (miphy_phy->ssc)
miphy_pcie_tune_ssc(miphy_phy);
if (miphy_phy->tx_impedance)
miphy_tune_tx_impedance(miphy_phy);
return 0;
}
static inline void miphy28lp_configure_usb3(struct miphy28lp_phy *miphy_phy)
{
void __iomem *base = miphy_phy->base;
u8 val;
/* Putting Macro in reset */
miphy28lp_set_reset(miphy_phy);
/* PLL calibration */
miphy28lp_pll_calibration(miphy_phy, &usb3_pll_ratio);
/* Writing The Speed Rate */
writeb_relaxed(0x00, base + MIPHY_CONF);
val = RX_SPDSEL_20DEC | TX_SPDSEL_20DEC;
writeb_relaxed(val, base + MIPHY_SPEED);
/* RX Channel compensation and calibration */
writeb_relaxed(0x1c, base + MIPHY_RX_LOCK_SETTINGS_OPT);
writeb_relaxed(0x51, base + MIPHY_RX_CAL_CTRL_1);
writeb_relaxed(0x70, base + MIPHY_RX_CAL_CTRL_2);
val = OFFSET_COMPENSATION_EN | VGA_OFFSET_POLARITY |
CAL_OFFSET_THRESHOLD_64 | CAL_OFFSET_VGA_64;
writeb_relaxed(val, base + MIPHY_RX_CAL_OFFSET_CTRL);
writeb_relaxed(0x22, base + MIPHY_RX_CAL_VGA_STEP);
writeb_relaxed(0x0e, base + MIPHY_RX_CAL_OPT_LENGTH);
val = EQ_DC_GAIN | VGA_GAIN;
writeb_relaxed(val, base + MIPHY_RX_BUFFER_CTRL);
writeb_relaxed(0x78, base + MIPHY_RX_EQU_GAIN_1);
writeb_relaxed(0x1b, base + MIPHY_SYNCHAR_CONTROL);
/* TX compensation offset to re-center TX impedance */
writeb_relaxed(0x02, base + MIPHY_COMP_POSTP);
/* Enable GENSEL_SEL and SSC */
/* TX_SEL=0 swing preemp forced by pipe registres */
val = SSC_SEL | GENSEL_SEL;
writeb_relaxed(val, base + MIPHY_BOUNDARY_SEL);
/* MIPHY Bias boost */
writeb_relaxed(0x00, base + MIPHY_BIAS_BOOST_1);
writeb_relaxed(0xa7, base + MIPHY_BIAS_BOOST_2);
/* SSC modulation */
writeb_relaxed(SSC_EN_SW, base + MIPHY_BOUNDARY_2);
/* MIPHY TX control */
writeb_relaxed(0x00, base + MIPHY_CONF);
/* Validate Step component */
writeb_relaxed(0x5a, base + MIPHY_PLL_SBR_3);
writeb_relaxed(0xa0, base + MIPHY_PLL_SBR_4);
/* Validate Period component */
writeb_relaxed(0x3c, base + MIPHY_PLL_SBR_2);
writeb_relaxed(0xa1, base + MIPHY_PLL_SBR_4);
/* Clear any previous request */
writeb_relaxed(0x00, base + MIPHY_PLL_SBR_1);
/* requests the PLL to take in account new parameters */
writeb_relaxed(0x02, base + MIPHY_PLL_SBR_1);
/* To be sure there is no other pending requests */
writeb_relaxed(0x00, base + MIPHY_PLL_SBR_1);
/* Rx PI controller settings */
writeb_relaxed(0xca, base + MIPHY_RX_K_GAIN);
/* MIPHY RX input bridge control */
/* INPUT_BRIDGE_EN_SW=1, manual input bridge control[0]=1 */
writeb_relaxed(0x21, base + MIPHY_RX_POWER_CTRL_1);
writeb_relaxed(0x29, base + MIPHY_RX_POWER_CTRL_1);
writeb_relaxed(0x1a, base + MIPHY_RX_POWER_CTRL_2);
/* MIPHY Reset for usb3 */
miphy28_usb3_miphy_reset(miphy_phy);
}
static inline int miphy_is_ready(struct miphy28lp_phy *miphy_phy)
{
u8 mask = HFC_PLL | HFC_RDY;
u8 val;
/*
* For PCIe and USB3 check only that PLL and HFC are ready
* For SATA check also that phy is ready!
*/
if (miphy_phy->type == PHY_TYPE_SATA)
mask |= PHY_RDY;
return readb_relaxed_poll_timeout(miphy_phy->base + MIPHY_STATUS_1,
val, (val & mask) == mask, 1,
5 * USEC_PER_SEC);
}
static int miphy_osc_is_ready(struct miphy28lp_phy *miphy_phy)
{
struct miphy28lp_dev *miphy_dev = miphy_phy->phydev;
u32 val;
if (!miphy_phy->osc_rdy)
return 0;
if (!miphy_phy->syscfg_reg[SYSCFG_STATUS])
return -EINVAL;
return regmap_read_poll_timeout(miphy_dev->regmap,
miphy_phy->syscfg_reg[SYSCFG_STATUS],
val, val & MIPHY_OSC_RDY, 1,
5 * USEC_PER_SEC);
}
static int miphy28lp_get_resource_byname(struct device_node *child,
char *rname, struct resource *res)
{
int index;
index = of_property_match_string(child, "reg-names", rname);
if (index < 0)
return -ENODEV;
return of_address_to_resource(child, index, res);
}
static int miphy28lp_get_one_addr(struct device *dev,
struct device_node *child, char *rname,
void __iomem **base)
{
struct resource res;
int ret;
ret = miphy28lp_get_resource_byname(child, rname, &res);
if (!ret) {
*base = devm_ioremap(dev, res.start, resource_size(&res));
if (!*base) {
dev_err(dev, "failed to ioremap %s address region\n"
, rname);
return -ENOENT;
}
}
return 0;
}
/* MiPHY reset and sysconf setup */
static int miphy28lp_setup(struct miphy28lp_phy *miphy_phy, u32 miphy_val)
{
int err;
struct miphy28lp_dev *miphy_dev = miphy_phy->phydev;
if (!miphy_phy->syscfg_reg[SYSCFG_CTRL])
return -EINVAL;
err = reset_control_assert(miphy_phy->miphy_rst);
if (err) {
dev_err(miphy_dev->dev, "unable to bring out of miphy reset\n");
return err;
}
if (miphy_phy->osc_force_ext)
miphy_val |= MIPHY_OSC_FORCE_EXT;
regmap_update_bits(miphy_dev->regmap,
miphy_phy->syscfg_reg[SYSCFG_CTRL],
MIPHY_CTRL_MASK, miphy_val);
err = reset_control_deassert(miphy_phy->miphy_rst);
if (err) {
dev_err(miphy_dev->dev, "unable to bring out of miphy reset\n");
return err;
}
return miphy_osc_is_ready(miphy_phy);
}
static int miphy28lp_init_sata(struct miphy28lp_phy *miphy_phy)
{
struct miphy28lp_dev *miphy_dev = miphy_phy->phydev;
int err, sata_conf = SATA_CTRL_SELECT_SATA;
if ((!miphy_phy->syscfg_reg[SYSCFG_SATA]) ||
(!miphy_phy->syscfg_reg[SYSCFG_PCI]) ||
(!miphy_phy->base))
return -EINVAL;
dev_info(miphy_dev->dev, "sata-up mode, addr 0x%p\n", miphy_phy->base);
/* Configure the glue-logic */
sata_conf |= ((miphy_phy->sata_gen - SATA_GEN1) << SATA_SPDMODE);
regmap_update_bits(miphy_dev->regmap,
miphy_phy->syscfg_reg[SYSCFG_SATA],
SATA_CTRL_MASK, sata_conf);
regmap_update_bits(miphy_dev->regmap, miphy_phy->syscfg_reg[SYSCFG_PCI],
PCIE_CTRL_MASK, SATA_CTRL_SELECT_PCIE);
/* MiPHY path and clocking init */
err = miphy28lp_setup(miphy_phy, MIPHY_CTRL_DEFAULT);
if (err) {
dev_err(miphy_dev->dev, "SATA phy setup failed\n");
return err;
}
/* initialize miphy */
miphy28lp_configure_sata(miphy_phy);
return miphy_is_ready(miphy_phy);
}
static int miphy28lp_init_pcie(struct miphy28lp_phy *miphy_phy)
{
struct miphy28lp_dev *miphy_dev = miphy_phy->phydev;
int err;
if ((!miphy_phy->syscfg_reg[SYSCFG_SATA]) ||
(!miphy_phy->syscfg_reg[SYSCFG_PCI])
|| (!miphy_phy->base) || (!miphy_phy->pipebase))
return -EINVAL;
dev_info(miphy_dev->dev, "pcie-up mode, addr 0x%p\n", miphy_phy->base);
/* Configure the glue-logic */
regmap_update_bits(miphy_dev->regmap,
miphy_phy->syscfg_reg[SYSCFG_SATA],
SATA_CTRL_MASK, SATA_CTRL_SELECT_PCIE);
regmap_update_bits(miphy_dev->regmap, miphy_phy->syscfg_reg[SYSCFG_PCI],
PCIE_CTRL_MASK, SYSCFG_PCIE_PCIE_VAL);
/* MiPHY path and clocking init */
err = miphy28lp_setup(miphy_phy, MIPHY_CTRL_DEFAULT);
if (err) {
dev_err(miphy_dev->dev, "PCIe phy setup failed\n");
return err;
}
/* initialize miphy */
err = miphy28lp_configure_pcie(miphy_phy);
if (err)
return err;
/* PIPE Wrapper Configuration */
writeb_relaxed(0x68, miphy_phy->pipebase + 0x104); /* Rise_0 */
writeb_relaxed(0x61, miphy_phy->pipebase + 0x105); /* Rise_1 */
writeb_relaxed(0x68, miphy_phy->pipebase + 0x108); /* Fall_0 */
writeb_relaxed(0x61, miphy_phy->pipebase + 0x109); /* Fall-1 */
writeb_relaxed(0x68, miphy_phy->pipebase + 0x10c); /* Threshold_0 */
writeb_relaxed(0x60, miphy_phy->pipebase + 0x10d); /* Threshold_1 */
/* Wait for phy_ready */
return miphy_is_ready(miphy_phy);
}
static int miphy28lp_init_usb3(struct miphy28lp_phy *miphy_phy)
{
struct miphy28lp_dev *miphy_dev = miphy_phy->phydev;
int err;
if ((!miphy_phy->base) || (!miphy_phy->pipebase))
return -EINVAL;
dev_info(miphy_dev->dev, "usb3-up mode, addr 0x%p\n", miphy_phy->base);
/* MiPHY path and clocking init */
err = miphy28lp_setup(miphy_phy, MIPHY_CTRL_SYNC_D_EN);
if (err) {
dev_err(miphy_dev->dev, "USB3 phy setup failed\n");
return err;
}
/* initialize miphy */
miphy28lp_configure_usb3(miphy_phy);
/* PIPE Wrapper Configuration */
writeb_relaxed(0x68, miphy_phy->pipebase + 0x23);
writeb_relaxed(0x61, miphy_phy->pipebase + 0x24);
writeb_relaxed(0x68, miphy_phy->pipebase + 0x26);
writeb_relaxed(0x61, miphy_phy->pipebase + 0x27);
writeb_relaxed(0x18, miphy_phy->pipebase + 0x29);
writeb_relaxed(0x61, miphy_phy->pipebase + 0x2a);
/* pipe Wrapper usb3 TX swing de-emph margin PREEMPH[7:4], SWING[3:0] */
writeb_relaxed(0X67, miphy_phy->pipebase + 0x68);
writeb_relaxed(0x0d, miphy_phy->pipebase + 0x69);
writeb_relaxed(0X67, miphy_phy->pipebase + 0x6a);
writeb_relaxed(0X0d, miphy_phy->pipebase + 0x6b);
writeb_relaxed(0X67, miphy_phy->pipebase + 0x6c);
writeb_relaxed(0X0d, miphy_phy->pipebase + 0x6d);
writeb_relaxed(0X67, miphy_phy->pipebase + 0x6e);
writeb_relaxed(0X0d, miphy_phy->pipebase + 0x6f);
return miphy_is_ready(miphy_phy);
}
static int miphy28lp_init(struct phy *phy)
{
struct miphy28lp_phy *miphy_phy = phy_get_drvdata(phy);
struct miphy28lp_dev *miphy_dev = miphy_phy->phydev;
int ret;
mutex_lock(&miphy_dev->miphy_mutex);
switch (miphy_phy->type) {
case PHY_TYPE_SATA:
ret = miphy28lp_init_sata(miphy_phy);
break;
case PHY_TYPE_PCIE:
ret = miphy28lp_init_pcie(miphy_phy);
break;
case PHY_TYPE_USB3:
ret = miphy28lp_init_usb3(miphy_phy);
break;
default:
ret = -EINVAL;
break;
}
mutex_unlock(&miphy_dev->miphy_mutex);
return ret;
}
static int miphy28lp_get_addr(struct miphy28lp_phy *miphy_phy)
{
struct miphy28lp_dev *miphy_dev = miphy_phy->phydev;
struct device_node *phynode = miphy_phy->phy->dev.of_node;
int err;
if ((miphy_phy->type != PHY_TYPE_SATA) &&
(miphy_phy->type != PHY_TYPE_PCIE) &&
(miphy_phy->type != PHY_TYPE_USB3)) {
return -EINVAL;
}
err = miphy28lp_get_one_addr(miphy_dev->dev, phynode,
PHY_TYPE_name[miphy_phy->type - PHY_TYPE_SATA],
&miphy_phy->base);
if (err)
return err;
if ((miphy_phy->type == PHY_TYPE_PCIE) ||
(miphy_phy->type == PHY_TYPE_USB3)) {
err = miphy28lp_get_one_addr(miphy_dev->dev, phynode, "pipew",
&miphy_phy->pipebase);
if (err)
return err;
}
return 0;
}
static struct phy *miphy28lp_xlate(struct device *dev,
struct of_phandle_args *args)
{
struct miphy28lp_dev *miphy_dev = dev_get_drvdata(dev);
struct miphy28lp_phy *miphy_phy = NULL;
struct device_node *phynode = args->np;
int ret, index = 0;
if (args->args_count != 1) {
dev_err(dev, "Invalid number of cells in 'phy' property\n");
return ERR_PTR(-EINVAL);
}
for (index = 0; index < miphy_dev->nphys; index++)
if (phynode == miphy_dev->phys[index]->phy->dev.of_node) {
miphy_phy = miphy_dev->phys[index];
break;
}
if (!miphy_phy) {
dev_err(dev, "Failed to find appropriate phy\n");
return ERR_PTR(-EINVAL);
}
miphy_phy->type = args->args[0];
ret = miphy28lp_get_addr(miphy_phy);
if (ret < 0)
return ERR_PTR(ret);
return miphy_phy->phy;
}
static const struct phy_ops miphy28lp_ops = {
.init = miphy28lp_init,
.owner = THIS_MODULE,
};
static int miphy28lp_probe_resets(struct device_node *node,
struct miphy28lp_phy *miphy_phy)
{
struct miphy28lp_dev *miphy_dev = miphy_phy->phydev;
int err;
miphy_phy->miphy_rst =
of_reset_control_get_shared(node, "miphy-sw-rst");
if (IS_ERR(miphy_phy->miphy_rst)) {
dev_err(miphy_dev->dev,
"miphy soft reset control not defined\n");
return PTR_ERR(miphy_phy->miphy_rst);
}
err = reset_control_deassert(miphy_phy->miphy_rst);
if (err) {
dev_err(miphy_dev->dev, "unable to bring out of miphy reset\n");
return err;
}
return 0;
}
static int miphy28lp_of_probe(struct device_node *np,
struct miphy28lp_phy *miphy_phy)
{
int i;
u32 ctrlreg;
miphy_phy->osc_force_ext =
of_property_read_bool(np, "st,osc-force-ext");
miphy_phy->osc_rdy = of_property_read_bool(np, "st,osc-rdy");
miphy_phy->px_rx_pol_inv =
of_property_read_bool(np, "st,px_rx_pol_inv");
miphy_phy->ssc = of_property_read_bool(np, "st,ssc-on");
miphy_phy->tx_impedance =
of_property_read_bool(np, "st,tx-impedance-comp");
of_property_read_u32(np, "st,sata-gen", &miphy_phy->sata_gen);
if (!miphy_phy->sata_gen)
miphy_phy->sata_gen = SATA_GEN1;
for (i = 0; i < SYSCFG_REG_MAX; i++) {
if (!of_property_read_u32_index(np, "st,syscfg", i, &ctrlreg))
miphy_phy->syscfg_reg[i] = ctrlreg;
}
return 0;
}
static int miphy28lp_probe(struct platform_device *pdev)
{
struct device_node *child, *np = pdev->dev.of_node;
struct miphy28lp_dev *miphy_dev;
struct phy_provider *provider;
struct phy *phy;
int ret, port = 0;
miphy_dev = devm_kzalloc(&pdev->dev, sizeof(*miphy_dev), GFP_KERNEL);
if (!miphy_dev)
return -ENOMEM;
miphy_dev->nphys = of_get_child_count(np);
miphy_dev->phys = devm_kcalloc(&pdev->dev, miphy_dev->nphys,
sizeof(*miphy_dev->phys), GFP_KERNEL);
if (!miphy_dev->phys)
return -ENOMEM;
miphy_dev->regmap = syscon_regmap_lookup_by_phandle(np, "st,syscfg");
if (IS_ERR(miphy_dev->regmap)) {
dev_err(miphy_dev->dev, "No syscfg phandle specified\n");
return PTR_ERR(miphy_dev->regmap);
}
miphy_dev->dev = &pdev->dev;
dev_set_drvdata(&pdev->dev, miphy_dev);
mutex_init(&miphy_dev->miphy_mutex);
for_each_child_of_node(np, child) {
struct miphy28lp_phy *miphy_phy;
miphy_phy = devm_kzalloc(&pdev->dev, sizeof(*miphy_phy),
GFP_KERNEL);
if (!miphy_phy) {
ret = -ENOMEM;
goto put_child;
}
miphy_dev->phys[port] = miphy_phy;
phy = devm_phy_create(&pdev->dev, child, &miphy28lp_ops);
if (IS_ERR(phy)) {
dev_err(&pdev->dev, "failed to create PHY\n");
ret = PTR_ERR(phy);
goto put_child;
}
miphy_dev->phys[port]->phy = phy;
miphy_dev->phys[port]->phydev = miphy_dev;
ret = miphy28lp_of_probe(child, miphy_phy);
if (ret)
goto put_child;
ret = miphy28lp_probe_resets(child, miphy_dev->phys[port]);
if (ret)
goto put_child;
phy_set_drvdata(phy, miphy_dev->phys[port]);
port++;
}
provider = devm_of_phy_provider_register(&pdev->dev, miphy28lp_xlate);
return PTR_ERR_OR_ZERO(provider);
put_child:
of_node_put(child);
return ret;
}
static const struct of_device_id miphy28lp_of_match[] = {
{.compatible = "st,miphy28lp-phy", },
{},
};
MODULE_DEVICE_TABLE(of, miphy28lp_of_match);
static struct platform_driver miphy28lp_driver = {
.probe = miphy28lp_probe,
.driver = {
.name = "miphy28lp-phy",
.of_match_table = miphy28lp_of_match,
}
};
module_platform_driver(miphy28lp_driver);
MODULE_AUTHOR("Alexandre Torgue <[email protected]>");
MODULE_DESCRIPTION("STMicroelectronics miphy28lp driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/phy/st/phy-miphy28lp.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* ST SPEAr1310-miphy driver
*
* Copyright (C) 2014 ST Microelectronics
* Pratyush Anand <[email protected]>
* Mohit Kumar <[email protected]>
*/
#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
/* SPEAr1310 Registers */
#define SPEAR1310_PCIE_SATA_CFG 0x3A4
#define SPEAR1310_PCIE_SATA2_SEL_PCIE (0 << 31)
#define SPEAR1310_PCIE_SATA1_SEL_PCIE (0 << 30)
#define SPEAR1310_PCIE_SATA0_SEL_PCIE (0 << 29)
#define SPEAR1310_PCIE_SATA2_SEL_SATA BIT(31)
#define SPEAR1310_PCIE_SATA1_SEL_SATA BIT(30)
#define SPEAR1310_PCIE_SATA0_SEL_SATA BIT(29)
#define SPEAR1310_SATA2_CFG_TX_CLK_EN BIT(27)
#define SPEAR1310_SATA2_CFG_RX_CLK_EN BIT(26)
#define SPEAR1310_SATA2_CFG_POWERUP_RESET BIT(25)
#define SPEAR1310_SATA2_CFG_PM_CLK_EN BIT(24)
#define SPEAR1310_SATA1_CFG_TX_CLK_EN BIT(23)
#define SPEAR1310_SATA1_CFG_RX_CLK_EN BIT(22)
#define SPEAR1310_SATA1_CFG_POWERUP_RESET BIT(21)
#define SPEAR1310_SATA1_CFG_PM_CLK_EN BIT(20)
#define SPEAR1310_SATA0_CFG_TX_CLK_EN BIT(19)
#define SPEAR1310_SATA0_CFG_RX_CLK_EN BIT(18)
#define SPEAR1310_SATA0_CFG_POWERUP_RESET BIT(17)
#define SPEAR1310_SATA0_CFG_PM_CLK_EN BIT(16)
#define SPEAR1310_PCIE2_CFG_DEVICE_PRESENT BIT(11)
#define SPEAR1310_PCIE2_CFG_POWERUP_RESET BIT(10)
#define SPEAR1310_PCIE2_CFG_CORE_CLK_EN BIT(9)
#define SPEAR1310_PCIE2_CFG_AUX_CLK_EN BIT(8)
#define SPEAR1310_PCIE1_CFG_DEVICE_PRESENT BIT(7)
#define SPEAR1310_PCIE1_CFG_POWERUP_RESET BIT(6)
#define SPEAR1310_PCIE1_CFG_CORE_CLK_EN BIT(5)
#define SPEAR1310_PCIE1_CFG_AUX_CLK_EN BIT(4)
#define SPEAR1310_PCIE0_CFG_DEVICE_PRESENT BIT(3)
#define SPEAR1310_PCIE0_CFG_POWERUP_RESET BIT(2)
#define SPEAR1310_PCIE0_CFG_CORE_CLK_EN BIT(1)
#define SPEAR1310_PCIE0_CFG_AUX_CLK_EN BIT(0)
#define SPEAR1310_PCIE_CFG_MASK(x) ((0xF << (x * 4)) | BIT((x + 29)))
#define SPEAR1310_SATA_CFG_MASK(x) ((0xF << (x * 4 + 16)) | \
BIT((x + 29)))
#define SPEAR1310_PCIE_CFG_VAL(x) \
(SPEAR1310_PCIE_SATA##x##_SEL_PCIE | \
SPEAR1310_PCIE##x##_CFG_AUX_CLK_EN | \
SPEAR1310_PCIE##x##_CFG_CORE_CLK_EN | \
SPEAR1310_PCIE##x##_CFG_POWERUP_RESET | \
SPEAR1310_PCIE##x##_CFG_DEVICE_PRESENT)
#define SPEAR1310_SATA_CFG_VAL(x) \
(SPEAR1310_PCIE_SATA##x##_SEL_SATA | \
SPEAR1310_SATA##x##_CFG_PM_CLK_EN | \
SPEAR1310_SATA##x##_CFG_POWERUP_RESET | \
SPEAR1310_SATA##x##_CFG_RX_CLK_EN | \
SPEAR1310_SATA##x##_CFG_TX_CLK_EN)
#define SPEAR1310_PCIE_MIPHY_CFG_1 0x3A8
#define SPEAR1310_MIPHY_DUAL_OSC_BYPASS_EXT BIT(31)
#define SPEAR1310_MIPHY_DUAL_CLK_REF_DIV2 BIT(28)
#define SPEAR1310_MIPHY_DUAL_PLL_RATIO_TOP(x) (x << 16)
#define SPEAR1310_MIPHY_SINGLE_OSC_BYPASS_EXT BIT(15)
#define SPEAR1310_MIPHY_SINGLE_CLK_REF_DIV2 BIT(12)
#define SPEAR1310_MIPHY_SINGLE_PLL_RATIO_TOP(x) (x << 0)
#define SPEAR1310_PCIE_SATA_MIPHY_CFG_SATA_MASK (0xFFFF)
#define SPEAR1310_PCIE_SATA_MIPHY_CFG_PCIE_MASK (0xFFFF << 16)
#define SPEAR1310_PCIE_SATA_MIPHY_CFG_SATA \
(SPEAR1310_MIPHY_DUAL_OSC_BYPASS_EXT | \
SPEAR1310_MIPHY_DUAL_CLK_REF_DIV2 | \
SPEAR1310_MIPHY_DUAL_PLL_RATIO_TOP(60) | \
SPEAR1310_MIPHY_SINGLE_OSC_BYPASS_EXT | \
SPEAR1310_MIPHY_SINGLE_CLK_REF_DIV2 | \
SPEAR1310_MIPHY_SINGLE_PLL_RATIO_TOP(60))
#define SPEAR1310_PCIE_SATA_MIPHY_CFG_SATA_25M_CRYSTAL_CLK \
(SPEAR1310_MIPHY_SINGLE_PLL_RATIO_TOP(120))
#define SPEAR1310_PCIE_SATA_MIPHY_CFG_PCIE \
(SPEAR1310_MIPHY_DUAL_OSC_BYPASS_EXT | \
SPEAR1310_MIPHY_DUAL_PLL_RATIO_TOP(25) | \
SPEAR1310_MIPHY_SINGLE_OSC_BYPASS_EXT | \
SPEAR1310_MIPHY_SINGLE_PLL_RATIO_TOP(25))
#define SPEAR1310_PCIE_MIPHY_CFG_2 0x3AC
enum spear1310_miphy_mode {
SATA,
PCIE,
};
struct spear1310_miphy_priv {
/* instance id of this phy */
u32 id;
/* phy mode: 0 for SATA 1 for PCIe */
enum spear1310_miphy_mode mode;
/* regmap for any soc specific misc registers */
struct regmap *misc;
/* phy struct pointer */
struct phy *phy;
};
static int spear1310_miphy_pcie_init(struct spear1310_miphy_priv *priv)
{
u32 val;
regmap_update_bits(priv->misc, SPEAR1310_PCIE_MIPHY_CFG_1,
SPEAR1310_PCIE_SATA_MIPHY_CFG_PCIE_MASK,
SPEAR1310_PCIE_SATA_MIPHY_CFG_PCIE);
switch (priv->id) {
case 0:
val = SPEAR1310_PCIE_CFG_VAL(0);
break;
case 1:
val = SPEAR1310_PCIE_CFG_VAL(1);
break;
case 2:
val = SPEAR1310_PCIE_CFG_VAL(2);
break;
default:
return -EINVAL;
}
regmap_update_bits(priv->misc, SPEAR1310_PCIE_SATA_CFG,
SPEAR1310_PCIE_CFG_MASK(priv->id), val);
return 0;
}
static int spear1310_miphy_pcie_exit(struct spear1310_miphy_priv *priv)
{
regmap_update_bits(priv->misc, SPEAR1310_PCIE_SATA_CFG,
SPEAR1310_PCIE_CFG_MASK(priv->id), 0);
regmap_update_bits(priv->misc, SPEAR1310_PCIE_MIPHY_CFG_1,
SPEAR1310_PCIE_SATA_MIPHY_CFG_PCIE_MASK, 0);
return 0;
}
static int spear1310_miphy_init(struct phy *phy)
{
struct spear1310_miphy_priv *priv = phy_get_drvdata(phy);
int ret = 0;
if (priv->mode == PCIE)
ret = spear1310_miphy_pcie_init(priv);
return ret;
}
static int spear1310_miphy_exit(struct phy *phy)
{
struct spear1310_miphy_priv *priv = phy_get_drvdata(phy);
int ret = 0;
if (priv->mode == PCIE)
ret = spear1310_miphy_pcie_exit(priv);
return ret;
}
static const struct of_device_id spear1310_miphy_of_match[] = {
{ .compatible = "st,spear1310-miphy" },
{ },
};
MODULE_DEVICE_TABLE(of, spear1310_miphy_of_match);
static const struct phy_ops spear1310_miphy_ops = {
.init = spear1310_miphy_init,
.exit = spear1310_miphy_exit,
.owner = THIS_MODULE,
};
static struct phy *spear1310_miphy_xlate(struct device *dev,
struct of_phandle_args *args)
{
struct spear1310_miphy_priv *priv = dev_get_drvdata(dev);
if (args->args_count < 1) {
dev_err(dev, "DT did not pass correct no of args\n");
return ERR_PTR(-ENODEV);
}
priv->mode = args->args[0];
if (priv->mode != SATA && priv->mode != PCIE) {
dev_err(dev, "DT did not pass correct phy mode\n");
return ERR_PTR(-ENODEV);
}
return priv->phy;
}
static int spear1310_miphy_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct spear1310_miphy_priv *priv;
struct phy_provider *phy_provider;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->misc =
syscon_regmap_lookup_by_phandle(dev->of_node, "misc");
if (IS_ERR(priv->misc)) {
dev_err(dev, "failed to find misc regmap\n");
return PTR_ERR(priv->misc);
}
if (of_property_read_u32(dev->of_node, "phy-id", &priv->id)) {
dev_err(dev, "failed to find phy id\n");
return -EINVAL;
}
priv->phy = devm_phy_create(dev, NULL, &spear1310_miphy_ops);
if (IS_ERR(priv->phy)) {
dev_err(dev, "failed to create SATA PCIe PHY\n");
return PTR_ERR(priv->phy);
}
dev_set_drvdata(dev, priv);
phy_set_drvdata(priv->phy, priv);
phy_provider =
devm_of_phy_provider_register(dev, spear1310_miphy_xlate);
if (IS_ERR(phy_provider)) {
dev_err(dev, "failed to register phy provider\n");
return PTR_ERR(phy_provider);
}
return 0;
}
static struct platform_driver spear1310_miphy_driver = {
.probe = spear1310_miphy_probe,
.driver = {
.name = "spear1310-miphy",
.of_match_table = spear1310_miphy_of_match,
},
};
module_platform_driver(spear1310_miphy_driver);
MODULE_DESCRIPTION("ST SPEAR1310-MIPHY driver");
MODULE_AUTHOR("Pratyush Anand <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/phy/st/phy-spear1310-miphy.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* ST spear1340-miphy driver
*
* Copyright (C) 2014 ST Microelectronics
* Pratyush Anand <[email protected]>
* Mohit Kumar <[email protected]>
*/
#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
/* SPEAr1340 Registers */
/* Power Management Registers */
#define SPEAR1340_PCM_CFG 0x100
#define SPEAR1340_PCM_CFG_SATA_POWER_EN BIT(11)
#define SPEAR1340_PCM_WKUP_CFG 0x104
#define SPEAR1340_SWITCH_CTR 0x108
#define SPEAR1340_PERIP1_SW_RST 0x318
#define SPEAR1340_PERIP1_SW_RSATA BIT(12)
#define SPEAR1340_PERIP2_SW_RST 0x31C
#define SPEAR1340_PERIP3_SW_RST 0x320
/* PCIE - SATA configuration registers */
#define SPEAR1340_PCIE_SATA_CFG 0x424
/* PCIE CFG MASks */
#define SPEAR1340_PCIE_CFG_DEVICE_PRESENT BIT(11)
#define SPEAR1340_PCIE_CFG_POWERUP_RESET BIT(10)
#define SPEAR1340_PCIE_CFG_CORE_CLK_EN BIT(9)
#define SPEAR1340_PCIE_CFG_AUX_CLK_EN BIT(8)
#define SPEAR1340_SATA_CFG_TX_CLK_EN BIT(4)
#define SPEAR1340_SATA_CFG_RX_CLK_EN BIT(3)
#define SPEAR1340_SATA_CFG_POWERUP_RESET BIT(2)
#define SPEAR1340_SATA_CFG_PM_CLK_EN BIT(1)
#define SPEAR1340_PCIE_SATA_SEL_PCIE (0)
#define SPEAR1340_PCIE_SATA_SEL_SATA (1)
#define SPEAR1340_PCIE_SATA_CFG_MASK 0xF1F
#define SPEAR1340_PCIE_CFG_VAL (SPEAR1340_PCIE_SATA_SEL_PCIE | \
SPEAR1340_PCIE_CFG_AUX_CLK_EN | \
SPEAR1340_PCIE_CFG_CORE_CLK_EN | \
SPEAR1340_PCIE_CFG_POWERUP_RESET | \
SPEAR1340_PCIE_CFG_DEVICE_PRESENT)
#define SPEAR1340_SATA_CFG_VAL (SPEAR1340_PCIE_SATA_SEL_SATA | \
SPEAR1340_SATA_CFG_PM_CLK_EN | \
SPEAR1340_SATA_CFG_POWERUP_RESET | \
SPEAR1340_SATA_CFG_RX_CLK_EN | \
SPEAR1340_SATA_CFG_TX_CLK_EN)
#define SPEAR1340_PCIE_MIPHY_CFG 0x428
#define SPEAR1340_MIPHY_OSC_BYPASS_EXT BIT(31)
#define SPEAR1340_MIPHY_CLK_REF_DIV2 BIT(27)
#define SPEAR1340_MIPHY_CLK_REF_DIV4 (2 << 27)
#define SPEAR1340_MIPHY_CLK_REF_DIV8 (3 << 27)
#define SPEAR1340_MIPHY_PLL_RATIO_TOP(x) (x << 0)
#define SPEAR1340_PCIE_MIPHY_CFG_MASK 0xF80000FF
#define SPEAR1340_PCIE_SATA_MIPHY_CFG_SATA \
(SPEAR1340_MIPHY_OSC_BYPASS_EXT | \
SPEAR1340_MIPHY_CLK_REF_DIV2 | \
SPEAR1340_MIPHY_PLL_RATIO_TOP(60))
#define SPEAR1340_PCIE_SATA_MIPHY_CFG_SATA_25M_CRYSTAL_CLK \
(SPEAR1340_MIPHY_PLL_RATIO_TOP(120))
#define SPEAR1340_PCIE_SATA_MIPHY_CFG_PCIE \
(SPEAR1340_MIPHY_OSC_BYPASS_EXT | \
SPEAR1340_MIPHY_PLL_RATIO_TOP(25))
enum spear1340_miphy_mode {
SATA,
PCIE,
};
struct spear1340_miphy_priv {
/* phy mode: 0 for SATA 1 for PCIe */
enum spear1340_miphy_mode mode;
/* regmap for any soc specific misc registers */
struct regmap *misc;
/* phy struct pointer */
struct phy *phy;
};
static int spear1340_miphy_sata_init(struct spear1340_miphy_priv *priv)
{
regmap_update_bits(priv->misc, SPEAR1340_PCIE_SATA_CFG,
SPEAR1340_PCIE_SATA_CFG_MASK,
SPEAR1340_SATA_CFG_VAL);
regmap_update_bits(priv->misc, SPEAR1340_PCIE_MIPHY_CFG,
SPEAR1340_PCIE_MIPHY_CFG_MASK,
SPEAR1340_PCIE_SATA_MIPHY_CFG_SATA_25M_CRYSTAL_CLK);
/* Switch on sata power domain */
regmap_update_bits(priv->misc, SPEAR1340_PCM_CFG,
SPEAR1340_PCM_CFG_SATA_POWER_EN,
SPEAR1340_PCM_CFG_SATA_POWER_EN);
/* Wait for SATA power domain on */
msleep(20);
/* Disable PCIE SATA Controller reset */
regmap_update_bits(priv->misc, SPEAR1340_PERIP1_SW_RST,
SPEAR1340_PERIP1_SW_RSATA, 0);
/* Wait for SATA reset de-assert completion */
msleep(20);
return 0;
}
static int spear1340_miphy_sata_exit(struct spear1340_miphy_priv *priv)
{
regmap_update_bits(priv->misc, SPEAR1340_PCIE_SATA_CFG,
SPEAR1340_PCIE_SATA_CFG_MASK, 0);
regmap_update_bits(priv->misc, SPEAR1340_PCIE_MIPHY_CFG,
SPEAR1340_PCIE_MIPHY_CFG_MASK, 0);
/* Enable PCIE SATA Controller reset */
regmap_update_bits(priv->misc, SPEAR1340_PERIP1_SW_RST,
SPEAR1340_PERIP1_SW_RSATA,
SPEAR1340_PERIP1_SW_RSATA);
/* Wait for SATA power domain off */
msleep(20);
/* Switch off sata power domain */
regmap_update_bits(priv->misc, SPEAR1340_PCM_CFG,
SPEAR1340_PCM_CFG_SATA_POWER_EN, 0);
/* Wait for SATA reset assert completion */
msleep(20);
return 0;
}
static int spear1340_miphy_pcie_init(struct spear1340_miphy_priv *priv)
{
regmap_update_bits(priv->misc, SPEAR1340_PCIE_MIPHY_CFG,
SPEAR1340_PCIE_MIPHY_CFG_MASK,
SPEAR1340_PCIE_SATA_MIPHY_CFG_PCIE);
regmap_update_bits(priv->misc, SPEAR1340_PCIE_SATA_CFG,
SPEAR1340_PCIE_SATA_CFG_MASK,
SPEAR1340_PCIE_CFG_VAL);
return 0;
}
static int spear1340_miphy_pcie_exit(struct spear1340_miphy_priv *priv)
{
regmap_update_bits(priv->misc, SPEAR1340_PCIE_MIPHY_CFG,
SPEAR1340_PCIE_MIPHY_CFG_MASK, 0);
regmap_update_bits(priv->misc, SPEAR1340_PCIE_SATA_CFG,
SPEAR1340_PCIE_SATA_CFG_MASK, 0);
return 0;
}
static int spear1340_miphy_init(struct phy *phy)
{
struct spear1340_miphy_priv *priv = phy_get_drvdata(phy);
int ret = 0;
if (priv->mode == SATA)
ret = spear1340_miphy_sata_init(priv);
else if (priv->mode == PCIE)
ret = spear1340_miphy_pcie_init(priv);
return ret;
}
static int spear1340_miphy_exit(struct phy *phy)
{
struct spear1340_miphy_priv *priv = phy_get_drvdata(phy);
int ret = 0;
if (priv->mode == SATA)
ret = spear1340_miphy_sata_exit(priv);
else if (priv->mode == PCIE)
ret = spear1340_miphy_pcie_exit(priv);
return ret;
}
static const struct of_device_id spear1340_miphy_of_match[] = {
{ .compatible = "st,spear1340-miphy" },
{ },
};
MODULE_DEVICE_TABLE(of, spear1340_miphy_of_match);
static const struct phy_ops spear1340_miphy_ops = {
.init = spear1340_miphy_init,
.exit = spear1340_miphy_exit,
.owner = THIS_MODULE,
};
#ifdef CONFIG_PM_SLEEP
static int spear1340_miphy_suspend(struct device *dev)
{
struct spear1340_miphy_priv *priv = dev_get_drvdata(dev);
int ret = 0;
if (priv->mode == SATA)
ret = spear1340_miphy_sata_exit(priv);
return ret;
}
static int spear1340_miphy_resume(struct device *dev)
{
struct spear1340_miphy_priv *priv = dev_get_drvdata(dev);
int ret = 0;
if (priv->mode == SATA)
ret = spear1340_miphy_sata_init(priv);
return ret;
}
#endif
static SIMPLE_DEV_PM_OPS(spear1340_miphy_pm_ops, spear1340_miphy_suspend,
spear1340_miphy_resume);
static struct phy *spear1340_miphy_xlate(struct device *dev,
struct of_phandle_args *args)
{
struct spear1340_miphy_priv *priv = dev_get_drvdata(dev);
if (args->args_count < 1) {
dev_err(dev, "DT did not pass correct no of args\n");
return ERR_PTR(-ENODEV);
}
priv->mode = args->args[0];
if (priv->mode != SATA && priv->mode != PCIE) {
dev_err(dev, "DT did not pass correct phy mode\n");
return ERR_PTR(-ENODEV);
}
return priv->phy;
}
static int spear1340_miphy_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct spear1340_miphy_priv *priv;
struct phy_provider *phy_provider;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->misc =
syscon_regmap_lookup_by_phandle(dev->of_node, "misc");
if (IS_ERR(priv->misc)) {
dev_err(dev, "failed to find misc regmap\n");
return PTR_ERR(priv->misc);
}
priv->phy = devm_phy_create(dev, NULL, &spear1340_miphy_ops);
if (IS_ERR(priv->phy)) {
dev_err(dev, "failed to create SATA PCIe PHY\n");
return PTR_ERR(priv->phy);
}
dev_set_drvdata(dev, priv);
phy_set_drvdata(priv->phy, priv);
phy_provider =
devm_of_phy_provider_register(dev, spear1340_miphy_xlate);
if (IS_ERR(phy_provider)) {
dev_err(dev, "failed to register phy provider\n");
return PTR_ERR(phy_provider);
}
return 0;
}
static struct platform_driver spear1340_miphy_driver = {
.probe = spear1340_miphy_probe,
.driver = {
.name = "spear1340-miphy",
.pm = &spear1340_miphy_pm_ops,
.of_match_table = spear1340_miphy_of_match,
},
};
module_platform_driver(spear1340_miphy_driver);
MODULE_DESCRIPTION("ST SPEAR1340-MIPHY driver");
MODULE_AUTHOR("Pratyush Anand <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/phy/st/phy-spear1340-miphy.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2014 STMicroelectronics
*
* STMicroelectronics Generic PHY driver for STiH407 USB2.
*
* Author: Giuseppe Cavallaro <[email protected]>
*/
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/clk.h>
#include <linux/regmap.h>
#include <linux/reset.h>
#include <linux/mfd/syscon.h>
#include <linux/phy/phy.h>
#define PHYPARAM_REG 1
#define PHYCTRL_REG 2
/* Default PHY_SEL and REFCLKSEL configuration */
#define STIH407_USB_PICOPHY_CTRL_PORT_CONF 0x6
#define STIH407_USB_PICOPHY_CTRL_PORT_MASK 0x1f
/* ports parameters overriding */
#define STIH407_USB_PICOPHY_PARAM_DEF 0x39a4dc
#define STIH407_USB_PICOPHY_PARAM_MASK 0xffffffff
struct stih407_usb2_picophy {
struct phy *phy;
struct regmap *regmap;
struct device *dev;
struct reset_control *rstc;
struct reset_control *rstport;
int ctrl;
int param;
};
static int stih407_usb2_pico_ctrl(struct stih407_usb2_picophy *phy_dev)
{
reset_control_deassert(phy_dev->rstc);
return regmap_update_bits(phy_dev->regmap, phy_dev->ctrl,
STIH407_USB_PICOPHY_CTRL_PORT_MASK,
STIH407_USB_PICOPHY_CTRL_PORT_CONF);
}
static int stih407_usb2_init_port(struct phy *phy)
{
int ret;
struct stih407_usb2_picophy *phy_dev = phy_get_drvdata(phy);
stih407_usb2_pico_ctrl(phy_dev);
ret = regmap_update_bits(phy_dev->regmap,
phy_dev->param,
STIH407_USB_PICOPHY_PARAM_MASK,
STIH407_USB_PICOPHY_PARAM_DEF);
if (ret)
return ret;
return reset_control_deassert(phy_dev->rstport);
}
static int stih407_usb2_exit_port(struct phy *phy)
{
struct stih407_usb2_picophy *phy_dev = phy_get_drvdata(phy);
/*
* Only port reset is asserted, phy global reset is kept untouched
* as other ports may still be active. When all ports are in reset
* state, assumption is made that power will be cut off on the phy, in
* case of suspend for instance. Theoretically, asserting individual
* reset (like here) or global reset should be equivalent.
*/
return reset_control_assert(phy_dev->rstport);
}
static const struct phy_ops stih407_usb2_picophy_data = {
.init = stih407_usb2_init_port,
.exit = stih407_usb2_exit_port,
.owner = THIS_MODULE,
};
static int stih407_usb2_picophy_probe(struct platform_device *pdev)
{
struct stih407_usb2_picophy *phy_dev;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct phy_provider *phy_provider;
struct phy *phy;
int ret;
phy_dev = devm_kzalloc(dev, sizeof(*phy_dev), GFP_KERNEL);
if (!phy_dev)
return -ENOMEM;
phy_dev->dev = dev;
dev_set_drvdata(dev, phy_dev);
phy_dev->rstc = devm_reset_control_get_shared(dev, "global");
if (IS_ERR(phy_dev->rstc)) {
dev_err(dev, "failed to ctrl picoPHY reset\n");
return PTR_ERR(phy_dev->rstc);
}
phy_dev->rstport = devm_reset_control_get_exclusive(dev, "port");
if (IS_ERR(phy_dev->rstport)) {
dev_err(dev, "failed to ctrl picoPHY reset\n");
return PTR_ERR(phy_dev->rstport);
}
/* Reset port by default: only deassert it in phy init */
reset_control_assert(phy_dev->rstport);
phy_dev->regmap = syscon_regmap_lookup_by_phandle(np, "st,syscfg");
if (IS_ERR(phy_dev->regmap)) {
dev_err(dev, "No syscfg phandle specified\n");
return PTR_ERR(phy_dev->regmap);
}
ret = of_property_read_u32_index(np, "st,syscfg", PHYPARAM_REG,
&phy_dev->param);
if (ret) {
dev_err(dev, "can't get phyparam offset (%d)\n", ret);
return ret;
}
ret = of_property_read_u32_index(np, "st,syscfg", PHYCTRL_REG,
&phy_dev->ctrl);
if (ret) {
dev_err(dev, "can't get phyctrl offset (%d)\n", ret);
return ret;
}
phy = devm_phy_create(dev, NULL, &stih407_usb2_picophy_data);
if (IS_ERR(phy)) {
dev_err(dev, "failed to create Display Port PHY\n");
return PTR_ERR(phy);
}
phy_dev->phy = phy;
phy_set_drvdata(phy, phy_dev);
phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
if (IS_ERR(phy_provider))
return PTR_ERR(phy_provider);
dev_info(dev, "STiH407 USB Generic picoPHY driver probed!");
return 0;
}
static const struct of_device_id stih407_usb2_picophy_of_match[] = {
{ .compatible = "st,stih407-usb2-phy" },
{ /*sentinel */ },
};
MODULE_DEVICE_TABLE(of, stih407_usb2_picophy_of_match);
static struct platform_driver stih407_usb2_picophy_driver = {
.probe = stih407_usb2_picophy_probe,
.driver = {
.name = "stih407-usb-genphy",
.of_match_table = stih407_usb2_picophy_of_match,
}
};
module_platform_driver(stih407_usb2_picophy_driver);
MODULE_AUTHOR("Giuseppe Cavallaro <[email protected]>");
MODULE_DESCRIPTION("STMicroelectronics Generic picoPHY driver for STiH407");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/phy/st/phy-stih407-usb.c |
// SPDX-License-Identifier: GPL-2.0
/*
* STMicroelectronics STM32 USB PHY Controller driver
*
* Copyright (C) 2018 STMicroelectronics
* Author(s): Amelie Delaunay <[email protected]>.
*/
#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/delay.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
#include <linux/units.h>
#define STM32_USBPHYC_PLL 0x0
#define STM32_USBPHYC_MISC 0x8
#define STM32_USBPHYC_MONITOR(X) (0x108 + ((X) * 0x100))
#define STM32_USBPHYC_TUNE(X) (0x10C + ((X) * 0x100))
#define STM32_USBPHYC_VERSION 0x3F4
/* STM32_USBPHYC_PLL bit fields */
#define PLLNDIV GENMASK(6, 0)
#define PLLFRACIN GENMASK(25, 10)
#define PLLEN BIT(26)
#define PLLSTRB BIT(27)
#define PLLSTRBYP BIT(28)
#define PLLFRACCTL BIT(29)
#define PLLDITHEN0 BIT(30)
#define PLLDITHEN1 BIT(31)
/* STM32_USBPHYC_MISC bit fields */
#define SWITHOST BIT(0)
/* STM32_USBPHYC_MONITOR bit fields */
#define STM32_USBPHYC_MON_OUT GENMASK(3, 0)
#define STM32_USBPHYC_MON_SEL GENMASK(8, 4)
#define STM32_USBPHYC_MON_SEL_LOCKP 0x1F
#define STM32_USBPHYC_MON_OUT_LOCKP BIT(3)
/* STM32_USBPHYC_TUNE bit fields */
#define INCURREN BIT(0)
#define INCURRINT BIT(1)
#define LFSCAPEN BIT(2)
#define HSDRVSLEW BIT(3)
#define HSDRVDCCUR BIT(4)
#define HSDRVDCLEV BIT(5)
#define HSDRVCURINCR BIT(6)
#define FSDRVRFADJ BIT(7)
#define HSDRVRFRED BIT(8)
#define HSDRVCHKITRM GENMASK(12, 9)
#define HSDRVCHKZTRM GENMASK(14, 13)
#define OTPCOMP GENMASK(19, 15)
#define SQLCHCTL GENMASK(21, 20)
#define HDRXGNEQEN BIT(22)
#define HSRXOFF GENMASK(24, 23)
#define HSFALLPREEM BIT(25)
#define SHTCCTCTLPROT BIT(26)
#define STAGSEL BIT(27)
enum boosting_vals {
BOOST_1000_UA = 1000,
BOOST_2000_UA = 2000,
};
enum dc_level_vals {
DC_NOMINAL,
DC_PLUS_5_TO_7_MV,
DC_PLUS_10_TO_14_MV,
DC_MINUS_5_TO_7_MV,
DC_MAX,
};
enum current_trim {
CUR_NOMINAL,
CUR_PLUS_1_56_PCT,
CUR_PLUS_3_12_PCT,
CUR_PLUS_4_68_PCT,
CUR_PLUS_6_24_PCT,
CUR_PLUS_7_8_PCT,
CUR_PLUS_9_36_PCT,
CUR_PLUS_10_92_PCT,
CUR_PLUS_12_48_PCT,
CUR_PLUS_14_04_PCT,
CUR_PLUS_15_6_PCT,
CUR_PLUS_17_16_PCT,
CUR_PLUS_19_01_PCT,
CUR_PLUS_20_58_PCT,
CUR_PLUS_22_16_PCT,
CUR_PLUS_23_73_PCT,
CUR_MAX,
};
enum impedance_trim {
IMP_NOMINAL,
IMP_MINUS_2_OHMS,
IMP_MINUS_4_OMHS,
IMP_MINUS_6_OHMS,
IMP_MAX,
};
enum squelch_level {
SQLCH_NOMINAL,
SQLCH_PLUS_7_MV,
SQLCH_MINUS_5_MV,
SQLCH_PLUS_14_MV,
SQLCH_MAX,
};
enum rx_offset {
NO_RX_OFFSET,
RX_OFFSET_PLUS_5_MV,
RX_OFFSET_PLUS_10_MV,
RX_OFFSET_MINUS_5_MV,
RX_OFFSET_MAX,
};
/* STM32_USBPHYC_VERSION bit fields */
#define MINREV GENMASK(3, 0)
#define MAJREV GENMASK(7, 4)
#define PLL_FVCO_MHZ 2880
#define PLL_INFF_MIN_RATE_HZ 19200000
#define PLL_INFF_MAX_RATE_HZ 38400000
struct pll_params {
u8 ndiv;
u16 frac;
};
struct stm32_usbphyc_phy {
struct phy *phy;
struct stm32_usbphyc *usbphyc;
struct regulator *vbus;
u32 index;
bool active;
u32 tune;
};
struct stm32_usbphyc {
struct device *dev;
void __iomem *base;
struct clk *clk;
struct reset_control *rst;
struct stm32_usbphyc_phy **phys;
int nphys;
struct regulator *vdda1v1;
struct regulator *vdda1v8;
atomic_t n_pll_cons;
struct clk_hw clk48_hw;
int switch_setup;
};
static inline void stm32_usbphyc_set_bits(void __iomem *reg, u32 bits)
{
writel_relaxed(readl_relaxed(reg) | bits, reg);
}
static inline void stm32_usbphyc_clr_bits(void __iomem *reg, u32 bits)
{
writel_relaxed(readl_relaxed(reg) & ~bits, reg);
}
static int stm32_usbphyc_regulators_enable(struct stm32_usbphyc *usbphyc)
{
int ret;
ret = regulator_enable(usbphyc->vdda1v1);
if (ret)
return ret;
ret = regulator_enable(usbphyc->vdda1v8);
if (ret)
goto vdda1v1_disable;
return 0;
vdda1v1_disable:
regulator_disable(usbphyc->vdda1v1);
return ret;
}
static int stm32_usbphyc_regulators_disable(struct stm32_usbphyc *usbphyc)
{
int ret;
ret = regulator_disable(usbphyc->vdda1v8);
if (ret)
return ret;
ret = regulator_disable(usbphyc->vdda1v1);
if (ret)
return ret;
return 0;
}
static void stm32_usbphyc_get_pll_params(u32 clk_rate,
struct pll_params *pll_params)
{
unsigned long long fvco, ndiv, frac;
/* _
* | FVCO = INFF*2*(NDIV + FRACT/2^16) when DITHER_DISABLE[1] = 1
* | FVCO = 2880MHz
* <
* | NDIV = integer part of input bits to set the LDF
* |_FRACT = fractional part of input bits to set the LDF
* => PLLNDIV = integer part of (FVCO / (INFF*2))
* => PLLFRACIN = fractional part of(FVCO / INFF*2) * 2^16
* <=> PLLFRACIN = ((FVCO / (INFF*2)) - PLLNDIV) * 2^16
*/
fvco = (unsigned long long)PLL_FVCO_MHZ * HZ_PER_MHZ;
ndiv = fvco;
do_div(ndiv, (clk_rate * 2));
pll_params->ndiv = (u8)ndiv;
frac = fvco * (1 << 16);
do_div(frac, (clk_rate * 2));
frac = frac - (ndiv * (1 << 16));
pll_params->frac = (u16)frac;
}
static int stm32_usbphyc_pll_init(struct stm32_usbphyc *usbphyc)
{
struct pll_params pll_params;
u32 clk_rate = clk_get_rate(usbphyc->clk);
u32 ndiv, frac;
u32 usbphyc_pll;
if ((clk_rate < PLL_INFF_MIN_RATE_HZ) ||
(clk_rate > PLL_INFF_MAX_RATE_HZ)) {
dev_err(usbphyc->dev, "input clk freq (%dHz) out of range\n",
clk_rate);
return -EINVAL;
}
stm32_usbphyc_get_pll_params(clk_rate, &pll_params);
ndiv = FIELD_PREP(PLLNDIV, pll_params.ndiv);
frac = FIELD_PREP(PLLFRACIN, pll_params.frac);
usbphyc_pll = PLLDITHEN1 | PLLDITHEN0 | PLLSTRBYP | ndiv;
if (pll_params.frac)
usbphyc_pll |= PLLFRACCTL | frac;
writel_relaxed(usbphyc_pll, usbphyc->base + STM32_USBPHYC_PLL);
dev_dbg(usbphyc->dev, "input clk freq=%dHz, ndiv=%lu, frac=%lu\n",
clk_rate, FIELD_GET(PLLNDIV, usbphyc_pll),
FIELD_GET(PLLFRACIN, usbphyc_pll));
return 0;
}
static int __stm32_usbphyc_pll_disable(struct stm32_usbphyc *usbphyc)
{
void __iomem *pll_reg = usbphyc->base + STM32_USBPHYC_PLL;
u32 pllen;
stm32_usbphyc_clr_bits(pll_reg, PLLEN);
/* Wait for minimum width of powerdown pulse (ENABLE = Low) */
if (readl_relaxed_poll_timeout(pll_reg, pllen, !(pllen & PLLEN), 5, 50))
dev_err(usbphyc->dev, "PLL not reset\n");
return stm32_usbphyc_regulators_disable(usbphyc);
}
static int stm32_usbphyc_pll_disable(struct stm32_usbphyc *usbphyc)
{
/* Check if a phy port is still active or clk48 in use */
if (atomic_dec_return(&usbphyc->n_pll_cons) > 0)
return 0;
return __stm32_usbphyc_pll_disable(usbphyc);
}
static int stm32_usbphyc_pll_enable(struct stm32_usbphyc *usbphyc)
{
void __iomem *pll_reg = usbphyc->base + STM32_USBPHYC_PLL;
bool pllen = readl_relaxed(pll_reg) & PLLEN;
int ret;
/*
* Check if a phy port or clk48 prepare has configured the pll
* and ensure the PLL is enabled
*/
if (atomic_inc_return(&usbphyc->n_pll_cons) > 1 && pllen)
return 0;
if (pllen) {
/*
* PLL shouldn't be enabled without known consumer,
* disable it and reinit n_pll_cons
*/
dev_warn(usbphyc->dev, "PLL enabled without known consumers\n");
ret = __stm32_usbphyc_pll_disable(usbphyc);
if (ret)
goto dec_n_pll_cons;
}
ret = stm32_usbphyc_regulators_enable(usbphyc);
if (ret)
goto dec_n_pll_cons;
ret = stm32_usbphyc_pll_init(usbphyc);
if (ret)
goto reg_disable;
stm32_usbphyc_set_bits(pll_reg, PLLEN);
/* Wait for maximum lock time */
usleep_range(200, 300);
return 0;
reg_disable:
stm32_usbphyc_regulators_disable(usbphyc);
dec_n_pll_cons:
atomic_dec(&usbphyc->n_pll_cons);
return ret;
}
static int stm32_usbphyc_phy_init(struct phy *phy)
{
struct stm32_usbphyc_phy *usbphyc_phy = phy_get_drvdata(phy);
struct stm32_usbphyc *usbphyc = usbphyc_phy->usbphyc;
u32 reg_mon = STM32_USBPHYC_MONITOR(usbphyc_phy->index);
u32 monsel = FIELD_PREP(STM32_USBPHYC_MON_SEL,
STM32_USBPHYC_MON_SEL_LOCKP);
u32 monout;
int ret;
ret = stm32_usbphyc_pll_enable(usbphyc);
if (ret)
return ret;
/* Check that PLL Lock input to PHY is High */
writel_relaxed(monsel, usbphyc->base + reg_mon);
ret = readl_relaxed_poll_timeout(usbphyc->base + reg_mon, monout,
(monout & STM32_USBPHYC_MON_OUT_LOCKP),
100, 1000);
if (ret) {
dev_err(usbphyc->dev, "PLL Lock input to PHY is Low (val=%x)\n",
(u32)(monout & STM32_USBPHYC_MON_OUT));
goto pll_disable;
}
usbphyc_phy->active = true;
return 0;
pll_disable:
stm32_usbphyc_pll_disable(usbphyc);
return ret;
}
static int stm32_usbphyc_phy_exit(struct phy *phy)
{
struct stm32_usbphyc_phy *usbphyc_phy = phy_get_drvdata(phy);
struct stm32_usbphyc *usbphyc = usbphyc_phy->usbphyc;
usbphyc_phy->active = false;
return stm32_usbphyc_pll_disable(usbphyc);
}
static int stm32_usbphyc_phy_power_on(struct phy *phy)
{
struct stm32_usbphyc_phy *usbphyc_phy = phy_get_drvdata(phy);
if (usbphyc_phy->vbus)
return regulator_enable(usbphyc_phy->vbus);
return 0;
}
static int stm32_usbphyc_phy_power_off(struct phy *phy)
{
struct stm32_usbphyc_phy *usbphyc_phy = phy_get_drvdata(phy);
if (usbphyc_phy->vbus)
return regulator_disable(usbphyc_phy->vbus);
return 0;
}
static const struct phy_ops stm32_usbphyc_phy_ops = {
.init = stm32_usbphyc_phy_init,
.exit = stm32_usbphyc_phy_exit,
.power_on = stm32_usbphyc_phy_power_on,
.power_off = stm32_usbphyc_phy_power_off,
.owner = THIS_MODULE,
};
static int stm32_usbphyc_clk48_prepare(struct clk_hw *hw)
{
struct stm32_usbphyc *usbphyc = container_of(hw, struct stm32_usbphyc, clk48_hw);
return stm32_usbphyc_pll_enable(usbphyc);
}
static void stm32_usbphyc_clk48_unprepare(struct clk_hw *hw)
{
struct stm32_usbphyc *usbphyc = container_of(hw, struct stm32_usbphyc, clk48_hw);
stm32_usbphyc_pll_disable(usbphyc);
}
static unsigned long stm32_usbphyc_clk48_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
{
return 48000000;
}
static const struct clk_ops usbphyc_clk48_ops = {
.prepare = stm32_usbphyc_clk48_prepare,
.unprepare = stm32_usbphyc_clk48_unprepare,
.recalc_rate = stm32_usbphyc_clk48_recalc_rate,
};
static void stm32_usbphyc_clk48_unregister(void *data)
{
struct stm32_usbphyc *usbphyc = data;
of_clk_del_provider(usbphyc->dev->of_node);
clk_hw_unregister(&usbphyc->clk48_hw);
}
static int stm32_usbphyc_clk48_register(struct stm32_usbphyc *usbphyc)
{
struct device_node *node = usbphyc->dev->of_node;
struct clk_init_data init = { };
int ret = 0;
init.name = "ck_usbo_48m";
init.ops = &usbphyc_clk48_ops;
usbphyc->clk48_hw.init = &init;
ret = clk_hw_register(usbphyc->dev, &usbphyc->clk48_hw);
if (ret)
return ret;
ret = of_clk_add_hw_provider(node, of_clk_hw_simple_get, &usbphyc->clk48_hw);
if (ret)
clk_hw_unregister(&usbphyc->clk48_hw);
return ret;
}
static void stm32_usbphyc_phy_tuning(struct stm32_usbphyc *usbphyc,
struct device_node *np, u32 index)
{
struct stm32_usbphyc_phy *usbphyc_phy = usbphyc->phys[index];
u32 reg = STM32_USBPHYC_TUNE(index);
u32 otpcomp, val;
int ret;
/* Backup OTP compensation code */
otpcomp = FIELD_GET(OTPCOMP, readl_relaxed(usbphyc->base + reg));
ret = of_property_read_u32(np, "st,current-boost-microamp", &val);
if (ret != -EINVAL) {
if (!ret && (val == BOOST_1000_UA || val == BOOST_2000_UA)) {
val = (val == BOOST_2000_UA) ? 1 : 0;
usbphyc_phy->tune |= INCURREN | FIELD_PREP(INCURRINT, val);
} else {
dev_warn(usbphyc->dev, "phy%d: invalid st,current-boost-microamp\n", index);
}
}
if (!of_property_read_bool(np, "st,no-lsfs-fb-cap"))
usbphyc_phy->tune |= LFSCAPEN;
if (of_property_read_bool(np, "st,decrease-hs-slew-rate"))
usbphyc_phy->tune |= HSDRVSLEW;
ret = of_property_read_u32(np, "st,tune-hs-dc-level", &val);
if (ret != -EINVAL) {
if (!ret && val < DC_MAX) {
if (val == DC_MINUS_5_TO_7_MV) {/* Decreases HS driver DC level */
usbphyc_phy->tune |= HSDRVDCCUR;
} else if (val > 0) { /* Increases HS driver DC level */
val = (val == DC_PLUS_10_TO_14_MV) ? 1 : 0;
usbphyc_phy->tune |= HSDRVCURINCR | FIELD_PREP(HSDRVDCLEV, val);
}
} else {
dev_warn(usbphyc->dev, "phy%d: invalid st,tune-hs-dc-level\n", index);
}
}
if (of_property_read_bool(np, "st,enable-fs-rftime-tuning"))
usbphyc_phy->tune |= FSDRVRFADJ;
if (of_property_read_bool(np, "st,enable-hs-rftime-reduction"))
usbphyc_phy->tune |= HSDRVRFRED;
ret = of_property_read_u32(np, "st,trim-hs-current", &val);
if (ret != -EINVAL) {
if (!ret && val < CUR_MAX)
usbphyc_phy->tune |= FIELD_PREP(HSDRVCHKITRM, val);
else
dev_warn(usbphyc->dev, "phy%d: invalid st,trim-hs-current\n", index);
}
ret = of_property_read_u32(np, "st,trim-hs-impedance", &val);
if (ret != -EINVAL) {
if (!ret && val < IMP_MAX)
usbphyc_phy->tune |= FIELD_PREP(HSDRVCHKZTRM, val);
else
dev_warn(usbphyc->dev, "phy%d: invalid st,trim-hs-impedance\n", index);
}
ret = of_property_read_u32(np, "st,tune-squelch-level", &val);
if (ret != -EINVAL) {
if (!ret && val < SQLCH_MAX)
usbphyc_phy->tune |= FIELD_PREP(SQLCHCTL, val);
else
dev_warn(usbphyc->dev, "phy%d: invalid st,tune-squelch\n", index);
}
if (of_property_read_bool(np, "st,enable-hs-rx-gain-eq"))
usbphyc_phy->tune |= HDRXGNEQEN;
ret = of_property_read_u32(np, "st,tune-hs-rx-offset", &val);
if (ret != -EINVAL) {
if (!ret && val < RX_OFFSET_MAX)
usbphyc_phy->tune |= FIELD_PREP(HSRXOFF, val);
else
dev_warn(usbphyc->dev, "phy%d: invalid st,tune-hs-rx-offset\n", index);
}
if (of_property_read_bool(np, "st,no-hs-ftime-ctrl"))
usbphyc_phy->tune |= HSFALLPREEM;
if (!of_property_read_bool(np, "st,no-lsfs-sc"))
usbphyc_phy->tune |= SHTCCTCTLPROT;
if (of_property_read_bool(np, "st,enable-hs-tx-staggering"))
usbphyc_phy->tune |= STAGSEL;
/* Restore OTP compensation code */
usbphyc_phy->tune |= FIELD_PREP(OTPCOMP, otpcomp);
/*
* By default, if no st,xxx tuning property is used, usbphyc_phy->tune is equal to
* STM32_USBPHYC_TUNE reset value (LFSCAPEN | SHTCCTCTLPROT | OTPCOMP).
*/
writel_relaxed(usbphyc_phy->tune, usbphyc->base + reg);
}
static void stm32_usbphyc_switch_setup(struct stm32_usbphyc *usbphyc,
u32 utmi_switch)
{
if (!utmi_switch)
stm32_usbphyc_clr_bits(usbphyc->base + STM32_USBPHYC_MISC,
SWITHOST);
else
stm32_usbphyc_set_bits(usbphyc->base + STM32_USBPHYC_MISC,
SWITHOST);
usbphyc->switch_setup = utmi_switch;
}
static struct phy *stm32_usbphyc_of_xlate(struct device *dev,
struct of_phandle_args *args)
{
struct stm32_usbphyc *usbphyc = dev_get_drvdata(dev);
struct stm32_usbphyc_phy *usbphyc_phy = NULL;
struct device_node *phynode = args->np;
int port = 0;
for (port = 0; port < usbphyc->nphys; port++) {
if (phynode == usbphyc->phys[port]->phy->dev.of_node) {
usbphyc_phy = usbphyc->phys[port];
break;
}
}
if (!usbphyc_phy) {
dev_err(dev, "failed to find phy\n");
return ERR_PTR(-EINVAL);
}
if (((usbphyc_phy->index == 0) && (args->args_count != 0)) ||
((usbphyc_phy->index == 1) && (args->args_count != 1))) {
dev_err(dev, "invalid number of cells for phy port%d\n",
usbphyc_phy->index);
return ERR_PTR(-EINVAL);
}
/* Configure the UTMI switch for PHY port#2 */
if (usbphyc_phy->index == 1) {
if (usbphyc->switch_setup < 0) {
stm32_usbphyc_switch_setup(usbphyc, args->args[0]);
} else {
if (args->args[0] != usbphyc->switch_setup) {
dev_err(dev, "phy port1 already used\n");
return ERR_PTR(-EBUSY);
}
}
}
return usbphyc_phy->phy;
}
static int stm32_usbphyc_probe(struct platform_device *pdev)
{
struct stm32_usbphyc *usbphyc;
struct device *dev = &pdev->dev;
struct device_node *child, *np = dev->of_node;
struct phy_provider *phy_provider;
u32 pllen, version;
int ret, port = 0;
usbphyc = devm_kzalloc(dev, sizeof(*usbphyc), GFP_KERNEL);
if (!usbphyc)
return -ENOMEM;
usbphyc->dev = dev;
dev_set_drvdata(dev, usbphyc);
usbphyc->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(usbphyc->base))
return PTR_ERR(usbphyc->base);
usbphyc->clk = devm_clk_get(dev, NULL);
if (IS_ERR(usbphyc->clk))
return dev_err_probe(dev, PTR_ERR(usbphyc->clk), "clk get_failed\n");
ret = clk_prepare_enable(usbphyc->clk);
if (ret) {
dev_err(dev, "clk enable failed: %d\n", ret);
return ret;
}
usbphyc->rst = devm_reset_control_get(dev, NULL);
if (!IS_ERR(usbphyc->rst)) {
reset_control_assert(usbphyc->rst);
udelay(2);
reset_control_deassert(usbphyc->rst);
} else {
ret = PTR_ERR(usbphyc->rst);
if (ret == -EPROBE_DEFER)
goto clk_disable;
stm32_usbphyc_clr_bits(usbphyc->base + STM32_USBPHYC_PLL, PLLEN);
}
/*
* Wait for minimum width of powerdown pulse (ENABLE = Low):
* we have to ensure the PLL is disabled before phys initialization.
*/
if (readl_relaxed_poll_timeout(usbphyc->base + STM32_USBPHYC_PLL,
pllen, !(pllen & PLLEN), 5, 50)) {
dev_warn(usbphyc->dev, "PLL not reset\n");
ret = -EPROBE_DEFER;
goto clk_disable;
}
usbphyc->switch_setup = -EINVAL;
usbphyc->nphys = of_get_child_count(np);
usbphyc->phys = devm_kcalloc(dev, usbphyc->nphys,
sizeof(*usbphyc->phys), GFP_KERNEL);
if (!usbphyc->phys) {
ret = -ENOMEM;
goto clk_disable;
}
usbphyc->vdda1v1 = devm_regulator_get(dev, "vdda1v1");
if (IS_ERR(usbphyc->vdda1v1)) {
ret = dev_err_probe(dev, PTR_ERR(usbphyc->vdda1v1),
"failed to get vdda1v1 supply\n");
goto clk_disable;
}
usbphyc->vdda1v8 = devm_regulator_get(dev, "vdda1v8");
if (IS_ERR(usbphyc->vdda1v8)) {
ret = dev_err_probe(dev, PTR_ERR(usbphyc->vdda1v8),
"failed to get vdda1v8 supply\n");
goto clk_disable;
}
for_each_child_of_node(np, child) {
struct stm32_usbphyc_phy *usbphyc_phy;
struct phy *phy;
u32 index;
phy = devm_phy_create(dev, child, &stm32_usbphyc_phy_ops);
if (IS_ERR(phy)) {
ret = PTR_ERR(phy);
if (ret != -EPROBE_DEFER)
dev_err(dev, "failed to create phy%d: %d\n",
port, ret);
goto put_child;
}
usbphyc_phy = devm_kzalloc(dev, sizeof(*usbphyc_phy),
GFP_KERNEL);
if (!usbphyc_phy) {
ret = -ENOMEM;
goto put_child;
}
ret = of_property_read_u32(child, "reg", &index);
if (ret || index > usbphyc->nphys) {
dev_err(&phy->dev, "invalid reg property: %d\n", ret);
if (!ret)
ret = -EINVAL;
goto put_child;
}
usbphyc->phys[port] = usbphyc_phy;
phy_set_bus_width(phy, 8);
phy_set_drvdata(phy, usbphyc_phy);
usbphyc->phys[port]->phy = phy;
usbphyc->phys[port]->usbphyc = usbphyc;
usbphyc->phys[port]->index = index;
usbphyc->phys[port]->active = false;
usbphyc->phys[port]->vbus = devm_regulator_get_optional(&phy->dev, "vbus");
if (IS_ERR(usbphyc->phys[port]->vbus)) {
ret = PTR_ERR(usbphyc->phys[port]->vbus);
if (ret == -EPROBE_DEFER)
goto put_child;
usbphyc->phys[port]->vbus = NULL;
}
/* Configure phy tuning */
stm32_usbphyc_phy_tuning(usbphyc, child, index);
port++;
}
phy_provider = devm_of_phy_provider_register(dev,
stm32_usbphyc_of_xlate);
if (IS_ERR(phy_provider)) {
ret = PTR_ERR(phy_provider);
dev_err(dev, "failed to register phy provider: %d\n", ret);
goto clk_disable;
}
ret = stm32_usbphyc_clk48_register(usbphyc);
if (ret) {
dev_err(dev, "failed to register ck_usbo_48m clock: %d\n", ret);
goto clk_disable;
}
version = readl_relaxed(usbphyc->base + STM32_USBPHYC_VERSION);
dev_info(dev, "registered rev:%lu.%lu\n",
FIELD_GET(MAJREV, version), FIELD_GET(MINREV, version));
return 0;
put_child:
of_node_put(child);
clk_disable:
clk_disable_unprepare(usbphyc->clk);
return ret;
}
static void stm32_usbphyc_remove(struct platform_device *pdev)
{
struct stm32_usbphyc *usbphyc = dev_get_drvdata(&pdev->dev);
int port;
/* Ensure PHYs are not active, to allow PLL disabling */
for (port = 0; port < usbphyc->nphys; port++)
if (usbphyc->phys[port]->active)
stm32_usbphyc_phy_exit(usbphyc->phys[port]->phy);
stm32_usbphyc_clk48_unregister(usbphyc);
clk_disable_unprepare(usbphyc->clk);
}
static int __maybe_unused stm32_usbphyc_resume(struct device *dev)
{
struct stm32_usbphyc *usbphyc = dev_get_drvdata(dev);
struct stm32_usbphyc_phy *usbphyc_phy;
int port;
if (usbphyc->switch_setup >= 0)
stm32_usbphyc_switch_setup(usbphyc, usbphyc->switch_setup);
for (port = 0; port < usbphyc->nphys; port++) {
usbphyc_phy = usbphyc->phys[port];
writel_relaxed(usbphyc_phy->tune, usbphyc->base + STM32_USBPHYC_TUNE(port));
}
return 0;
}
static SIMPLE_DEV_PM_OPS(stm32_usbphyc_pm_ops, NULL, stm32_usbphyc_resume);
static const struct of_device_id stm32_usbphyc_of_match[] = {
{ .compatible = "st,stm32mp1-usbphyc", },
{ },
};
MODULE_DEVICE_TABLE(of, stm32_usbphyc_of_match);
static struct platform_driver stm32_usbphyc_driver = {
.probe = stm32_usbphyc_probe,
.remove_new = stm32_usbphyc_remove,
.driver = {
.of_match_table = stm32_usbphyc_of_match,
.name = "stm32-usbphyc",
.pm = &stm32_usbphyc_pm_ops,
}
};
module_platform_driver(stm32_usbphyc_driver);
MODULE_DESCRIPTION("STMicroelectronics STM32 USBPHYC driver");
MODULE_AUTHOR("Amelie Delaunay <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/phy/st/phy-stm32-usbphyc.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Allwinner sun50i(H6) USB 3.0 phy driver
*
* Copyright (C) 2017 Icenowy Zheng <[email protected]>
*
* Based on phy-sun9i-usb.c, which is:
*
* Copyright (C) 2014-2015 Chen-Yu Tsai <[email protected]>
*
* Based on code from Allwinner BSP, which is:
*
* Copyright (c) 2010-2015 Allwinner Technology Co., Ltd.
*/
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
/* Interface Status and Control Registers */
#define SUNXI_ISCR 0x00
#define SUNXI_PIPE_CLOCK_CONTROL 0x14
#define SUNXI_PHY_TUNE_LOW 0x18
#define SUNXI_PHY_TUNE_HIGH 0x1c
#define SUNXI_PHY_EXTERNAL_CONTROL 0x20
/* USB2.0 Interface Status and Control Register */
#define SUNXI_ISCR_FORCE_VBUS (3 << 12)
/* PIPE Clock Control Register */
#define SUNXI_PCC_PIPE_CLK_OPEN (1 << 6)
/* PHY External Control Register */
#define SUNXI_PEC_EXTERN_VBUS (3 << 1)
#define SUNXI_PEC_SSC_EN (1 << 24)
#define SUNXI_PEC_REF_SSP_EN (1 << 26)
/* PHY Tune High Register */
#define SUNXI_TX_DEEMPH_3P5DB(n) ((n) << 19)
#define SUNXI_TX_DEEMPH_3P5DB_MASK GENMASK(24, 19)
#define SUNXI_TX_DEEMPH_6DB(n) ((n) << 13)
#define SUNXI_TX_DEEMPH_6GB_MASK GENMASK(18, 13)
#define SUNXI_TX_SWING_FULL(n) ((n) << 6)
#define SUNXI_TX_SWING_FULL_MASK GENMASK(12, 6)
#define SUNXI_LOS_BIAS(n) ((n) << 3)
#define SUNXI_LOS_BIAS_MASK GENMASK(5, 3)
#define SUNXI_TXVBOOSTLVL(n) ((n) << 0)
#define SUNXI_TXVBOOSTLVL_MASK GENMASK(2, 0)
struct sun50i_usb3_phy {
struct phy *phy;
void __iomem *regs;
struct reset_control *reset;
struct clk *clk;
};
static void sun50i_usb3_phy_open(struct sun50i_usb3_phy *phy)
{
u32 val;
val = readl(phy->regs + SUNXI_PHY_EXTERNAL_CONTROL);
val |= SUNXI_PEC_EXTERN_VBUS;
val |= SUNXI_PEC_SSC_EN | SUNXI_PEC_REF_SSP_EN;
writel(val, phy->regs + SUNXI_PHY_EXTERNAL_CONTROL);
val = readl(phy->regs + SUNXI_PIPE_CLOCK_CONTROL);
val |= SUNXI_PCC_PIPE_CLK_OPEN;
writel(val, phy->regs + SUNXI_PIPE_CLOCK_CONTROL);
val = readl(phy->regs + SUNXI_ISCR);
val |= SUNXI_ISCR_FORCE_VBUS;
writel(val, phy->regs + SUNXI_ISCR);
/*
* All the magic numbers written to the PHY_TUNE_{LOW_HIGH}
* registers are directly taken from the BSP USB3 driver from
* Allwiner.
*/
writel(0x0047fc87, phy->regs + SUNXI_PHY_TUNE_LOW);
val = readl(phy->regs + SUNXI_PHY_TUNE_HIGH);
val &= ~(SUNXI_TXVBOOSTLVL_MASK | SUNXI_LOS_BIAS_MASK |
SUNXI_TX_SWING_FULL_MASK | SUNXI_TX_DEEMPH_6GB_MASK |
SUNXI_TX_DEEMPH_3P5DB_MASK);
val |= SUNXI_TXVBOOSTLVL(0x7);
val |= SUNXI_LOS_BIAS(0x7);
val |= SUNXI_TX_SWING_FULL(0x55);
val |= SUNXI_TX_DEEMPH_6DB(0x20);
val |= SUNXI_TX_DEEMPH_3P5DB(0x15);
writel(val, phy->regs + SUNXI_PHY_TUNE_HIGH);
}
static int sun50i_usb3_phy_init(struct phy *_phy)
{
struct sun50i_usb3_phy *phy = phy_get_drvdata(_phy);
int ret;
ret = clk_prepare_enable(phy->clk);
if (ret)
return ret;
ret = reset_control_deassert(phy->reset);
if (ret) {
clk_disable_unprepare(phy->clk);
return ret;
}
sun50i_usb3_phy_open(phy);
return 0;
}
static int sun50i_usb3_phy_exit(struct phy *_phy)
{
struct sun50i_usb3_phy *phy = phy_get_drvdata(_phy);
reset_control_assert(phy->reset);
clk_disable_unprepare(phy->clk);
return 0;
}
static const struct phy_ops sun50i_usb3_phy_ops = {
.init = sun50i_usb3_phy_init,
.exit = sun50i_usb3_phy_exit,
.owner = THIS_MODULE,
};
static int sun50i_usb3_phy_probe(struct platform_device *pdev)
{
struct sun50i_usb3_phy *phy;
struct device *dev = &pdev->dev;
struct phy_provider *phy_provider;
phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
if (!phy)
return -ENOMEM;
phy->clk = devm_clk_get(dev, NULL);
if (IS_ERR(phy->clk)) {
if (PTR_ERR(phy->clk) != -EPROBE_DEFER)
dev_err(dev, "failed to get phy clock\n");
return PTR_ERR(phy->clk);
}
phy->reset = devm_reset_control_get(dev, NULL);
if (IS_ERR(phy->reset)) {
dev_err(dev, "failed to get reset control\n");
return PTR_ERR(phy->reset);
}
phy->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(phy->regs))
return PTR_ERR(phy->regs);
phy->phy = devm_phy_create(dev, NULL, &sun50i_usb3_phy_ops);
if (IS_ERR(phy->phy)) {
dev_err(dev, "failed to create PHY\n");
return PTR_ERR(phy->phy);
}
phy_set_drvdata(phy->phy, phy);
phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
return PTR_ERR_OR_ZERO(phy_provider);
}
static const struct of_device_id sun50i_usb3_phy_of_match[] = {
{ .compatible = "allwinner,sun50i-h6-usb3-phy" },
{ },
};
MODULE_DEVICE_TABLE(of, sun50i_usb3_phy_of_match);
static struct platform_driver sun50i_usb3_phy_driver = {
.probe = sun50i_usb3_phy_probe,
.driver = {
.of_match_table = sun50i_usb3_phy_of_match,
.name = "sun50i-usb3-phy",
}
};
module_platform_driver(sun50i_usb3_phy_driver);
MODULE_DESCRIPTION("Allwinner H6 USB 3.0 phy driver");
MODULE_AUTHOR("Icenowy Zheng <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/phy/allwinner/phy-sun50i-usb3.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 2016 Allwinnertech Co., Ltd.
* Copyright (C) 2017-2018 Bootlin
*
* Maxime Ripard <[email protected]>
*/
#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/reset.h>
#include <linux/phy/phy.h>
#include <linux/phy/phy-mipi-dphy.h>
#define SUN6I_DPHY_GCTL_REG 0x00
#define SUN6I_DPHY_GCTL_LANE_NUM(n) ((((n) - 1) & 3) << 4)
#define SUN6I_DPHY_GCTL_EN BIT(0)
#define SUN6I_DPHY_TX_CTL_REG 0x04
#define SUN6I_DPHY_TX_CTL_HS_TX_CLK_CONT BIT(28)
#define SUN6I_DPHY_RX_CTL_REG 0x08
#define SUN6I_DPHY_RX_CTL_EN_DBC BIT(31)
#define SUN6I_DPHY_RX_CTL_RX_CLK_FORCE BIT(24)
#define SUN6I_DPHY_RX_CTL_RX_D3_FORCE BIT(23)
#define SUN6I_DPHY_RX_CTL_RX_D2_FORCE BIT(22)
#define SUN6I_DPHY_RX_CTL_RX_D1_FORCE BIT(21)
#define SUN6I_DPHY_RX_CTL_RX_D0_FORCE BIT(20)
#define SUN6I_DPHY_TX_TIME0_REG 0x10
#define SUN6I_DPHY_TX_TIME0_HS_TRAIL(n) (((n) & 0xff) << 24)
#define SUN6I_DPHY_TX_TIME0_HS_PREPARE(n) (((n) & 0xff) << 16)
#define SUN6I_DPHY_TX_TIME0_LP_CLK_DIV(n) ((n) & 0xff)
#define SUN6I_DPHY_TX_TIME1_REG 0x14
#define SUN6I_DPHY_TX_TIME1_CLK_POST(n) (((n) & 0xff) << 24)
#define SUN6I_DPHY_TX_TIME1_CLK_PRE(n) (((n) & 0xff) << 16)
#define SUN6I_DPHY_TX_TIME1_CLK_ZERO(n) (((n) & 0xff) << 8)
#define SUN6I_DPHY_TX_TIME1_CLK_PREPARE(n) ((n) & 0xff)
#define SUN6I_DPHY_TX_TIME2_REG 0x18
#define SUN6I_DPHY_TX_TIME2_CLK_TRAIL(n) ((n) & 0xff)
#define SUN6I_DPHY_TX_TIME3_REG 0x1c
#define SUN6I_DPHY_TX_TIME4_REG 0x20
#define SUN6I_DPHY_TX_TIME4_HS_TX_ANA1(n) (((n) & 0xff) << 8)
#define SUN6I_DPHY_TX_TIME4_HS_TX_ANA0(n) ((n) & 0xff)
#define SUN6I_DPHY_RX_TIME0_REG 0x30
#define SUN6I_DPHY_RX_TIME0_HS_RX_SYNC(n) (((n) & 0xff) << 24)
#define SUN6I_DPHY_RX_TIME0_HS_RX_CLK_MISS(n) (((n) & 0xff) << 16)
#define SUN6I_DPHY_RX_TIME0_LP_RX(n) (((n) & 0xff) << 8)
#define SUN6I_DPHY_RX_TIME1_REG 0x34
#define SUN6I_DPHY_RX_TIME1_RX_DLY(n) (((n) & 0xfff) << 20)
#define SUN6I_DPHY_RX_TIME1_LP_RX_ULPS_WP(n) ((n) & 0xfffff)
#define SUN6I_DPHY_RX_TIME2_REG 0x38
#define SUN6I_DPHY_RX_TIME2_HS_RX_ANA1(n) (((n) & 0xff) << 8)
#define SUN6I_DPHY_RX_TIME2_HS_RX_ANA0(n) ((n) & 0xff)
#define SUN6I_DPHY_RX_TIME3_REG 0x40
#define SUN6I_DPHY_RX_TIME3_LPRST_DLY(n) (((n) & 0xffff) << 16)
#define SUN6I_DPHY_ANA0_REG 0x4c
#define SUN6I_DPHY_ANA0_REG_PWS BIT(31)
#define SUN6I_DPHY_ANA0_REG_PWEND BIT(30)
#define SUN6I_DPHY_ANA0_REG_PWENC BIT(29)
#define SUN6I_DPHY_ANA0_REG_DMPC BIT(28)
#define SUN6I_DPHY_ANA0_REG_DMPD(n) (((n) & 0xf) << 24)
#define SUN6I_DPHY_ANA0_REG_SRXDT(n) (((n) & 0xf) << 20)
#define SUN6I_DPHY_ANA0_REG_SRXCK(n) (((n) & 0xf) << 16)
#define SUN6I_DPHY_ANA0_REG_SDIV2 BIT(15)
#define SUN6I_DPHY_ANA0_REG_SLV(n) (((n) & 7) << 12)
#define SUN6I_DPHY_ANA0_REG_DEN(n) (((n) & 0xf) << 8)
#define SUN6I_DPHY_ANA0_REG_PLR(n) (((n) & 0xf) << 4)
#define SUN6I_DPHY_ANA0_REG_SFB(n) (((n) & 3) << 2)
#define SUN6I_DPHY_ANA0_REG_RSD BIT(1)
#define SUN6I_DPHY_ANA0_REG_SELSCK BIT(0)
#define SUN6I_DPHY_ANA1_REG 0x50
#define SUN6I_DPHY_ANA1_REG_VTTMODE BIT(31)
#define SUN6I_DPHY_ANA1_REG_CSMPS(n) (((n) & 3) << 28)
#define SUN6I_DPHY_ANA1_REG_SVTT(n) (((n) & 0xf) << 24)
#define SUN6I_DPHY_ANA2_REG 0x54
#define SUN6I_DPHY_ANA2_EN_P2S_CPU(n) (((n) & 0xf) << 24)
#define SUN6I_DPHY_ANA2_EN_P2S_CPU_MASK GENMASK(27, 24)
#define SUN6I_DPHY_ANA2_EN_CK_CPU BIT(4)
#define SUN6I_DPHY_ANA2_REG_ENIB BIT(1)
#define SUN6I_DPHY_ANA3_REG 0x58
#define SUN6I_DPHY_ANA3_EN_VTTD(n) (((n) & 0xf) << 28)
#define SUN6I_DPHY_ANA3_EN_VTTD_MASK GENMASK(31, 28)
#define SUN6I_DPHY_ANA3_EN_VTTC BIT(27)
#define SUN6I_DPHY_ANA3_EN_DIV BIT(26)
#define SUN6I_DPHY_ANA3_EN_LDOC BIT(25)
#define SUN6I_DPHY_ANA3_EN_LDOD BIT(24)
#define SUN6I_DPHY_ANA3_EN_LDOR BIT(18)
#define SUN6I_DPHY_ANA4_REG 0x5c
#define SUN6I_DPHY_ANA4_REG_EN_MIPI BIT(31)
#define SUN6I_DPHY_ANA4_REG_EN_COMTEST BIT(30)
#define SUN6I_DPHY_ANA4_REG_COMTEST(n) (((n) & 3) << 28)
#define SUN6I_DPHY_ANA4_REG_IB(n) (((n) & 3) << 25)
#define SUN6I_DPHY_ANA4_REG_DMPLVC BIT(24)
#define SUN6I_DPHY_ANA4_REG_DMPLVD(n) (((n) & 0xf) << 20)
#define SUN6I_DPHY_ANA4_REG_VTT_SET(n) (((n) & 0x7) << 17)
#define SUN6I_DPHY_ANA4_REG_CKDV(n) (((n) & 0x1f) << 12)
#define SUN6I_DPHY_ANA4_REG_TMSC(n) (((n) & 3) << 10)
#define SUN6I_DPHY_ANA4_REG_TMSD(n) (((n) & 3) << 8)
#define SUN6I_DPHY_ANA4_REG_TXDNSC(n) (((n) & 3) << 6)
#define SUN6I_DPHY_ANA4_REG_TXDNSD(n) (((n) & 3) << 4)
#define SUN6I_DPHY_ANA4_REG_TXPUSC(n) (((n) & 3) << 2)
#define SUN6I_DPHY_ANA4_REG_TXPUSD(n) ((n) & 3)
#define SUN6I_DPHY_DBG5_REG 0xf4
#define SUN50I_DPHY_TX_SLEW_REG0 0xf8
#define SUN50I_DPHY_TX_SLEW_REG1 0xfc
#define SUN50I_DPHY_TX_SLEW_REG2 0x100
#define SUN50I_DPHY_PLL_REG0 0x104
#define SUN50I_DPHY_PLL_REG0_CP36_EN BIT(23)
#define SUN50I_DPHY_PLL_REG0_LDO_EN BIT(22)
#define SUN50I_DPHY_PLL_REG0_EN_LVS BIT(21)
#define SUN50I_DPHY_PLL_REG0_PLL_EN BIT(20)
#define SUN50I_DPHY_PLL_REG0_P(n) (((n) & 0xf) << 16)
#define SUN50I_DPHY_PLL_REG0_N(n) (((n) & 0xff) << 8)
#define SUN50I_DPHY_PLL_REG0_NDET BIT(7)
#define SUN50I_DPHY_PLL_REG0_TDIV BIT(6)
#define SUN50I_DPHY_PLL_REG0_M0(n) (((n) & 3) << 4)
#define SUN50I_DPHY_PLL_REG0_M1(n) ((n) & 0xf)
#define SUN50I_DPHY_PLL_REG1 0x108
#define SUN50I_DPHY_PLL_REG1_UNLOCK_MDSEL(n) (((n) & 3) << 14)
#define SUN50I_DPHY_PLL_REG1_LOCKMDSEL BIT(13)
#define SUN50I_DPHY_PLL_REG1_LOCKDET_EN BIT(12)
#define SUN50I_DPHY_PLL_REG1_VSETA(n) (((n) & 0x7) << 9)
#define SUN50I_DPHY_PLL_REG1_VSETD(n) (((n) & 0x7) << 6)
#define SUN50I_DPHY_PLL_REG1_LPF_SW BIT(5)
#define SUN50I_DPHY_PLL_REG1_ICP_SEL(n) (((n) & 3) << 3)
#define SUN50I_DPHY_PLL_REG1_ATEST_SEL(n) (((n) & 3) << 1)
#define SUN50I_DPHY_PLL_REG1_TEST_EN BIT(0)
#define SUN50I_DPHY_PLL_REG2 0x10c
#define SUN50I_DPHY_PLL_REG2_SDM_EN BIT(31)
#define SUN50I_DPHY_PLL_REG2_FF_EN BIT(30)
#define SUN50I_DPHY_PLL_REG2_SS_EN BIT(29)
#define SUN50I_DPHY_PLL_REG2_SS_FRAC(n) (((n) & 0x1ff) << 20)
#define SUN50I_DPHY_PLL_REG2_SS_INT(n) (((n) & 0xff) << 12)
#define SUN50I_DPHY_PLL_REG2_FRAC(n) ((n) & 0xfff)
#define SUN50I_COMBO_PHY_REG0 0x110
#define SUN50I_COMBO_PHY_REG0_EN_TEST_COMBOLDO BIT(5)
#define SUN50I_COMBO_PHY_REG0_EN_TEST_0P8 BIT(4)
#define SUN50I_COMBO_PHY_REG0_EN_MIPI BIT(3)
#define SUN50I_COMBO_PHY_REG0_EN_LVDS BIT(2)
#define SUN50I_COMBO_PHY_REG0_EN_COMBOLDO BIT(1)
#define SUN50I_COMBO_PHY_REG0_EN_CP BIT(0)
#define SUN50I_COMBO_PHY_REG1 0x114
#define SUN50I_COMBO_PHY_REG2_REG_VREF1P6(n) (((n) & 0x7) << 4)
#define SUN50I_COMBO_PHY_REG2_REG_VREF0P8(n) ((n) & 0x7)
#define SUN50I_COMBO_PHY_REG2 0x118
#define SUN50I_COMBO_PHY_REG2_HS_STOP_DLY(n) ((n) & 0xff)
enum sun6i_dphy_direction {
SUN6I_DPHY_DIRECTION_TX,
SUN6I_DPHY_DIRECTION_RX,
};
struct sun6i_dphy;
struct sun6i_dphy_variant {
void (*tx_power_on)(struct sun6i_dphy *dphy);
bool rx_supported;
};
struct sun6i_dphy {
struct clk *bus_clk;
struct clk *mod_clk;
struct regmap *regs;
struct reset_control *reset;
struct phy *phy;
struct phy_configure_opts_mipi_dphy config;
const struct sun6i_dphy_variant *variant;
enum sun6i_dphy_direction direction;
};
static int sun6i_dphy_init(struct phy *phy)
{
struct sun6i_dphy *dphy = phy_get_drvdata(phy);
reset_control_deassert(dphy->reset);
clk_prepare_enable(dphy->mod_clk);
clk_set_rate_exclusive(dphy->mod_clk, 150000000);
return 0;
}
static int sun6i_dphy_configure(struct phy *phy, union phy_configure_opts *opts)
{
struct sun6i_dphy *dphy = phy_get_drvdata(phy);
int ret;
ret = phy_mipi_dphy_config_validate(&opts->mipi_dphy);
if (ret)
return ret;
memcpy(&dphy->config, opts, sizeof(dphy->config));
return 0;
}
static void sun6i_a31_mipi_dphy_tx_power_on(struct sun6i_dphy *dphy)
{
u8 lanes_mask = GENMASK(dphy->config.lanes - 1, 0);
regmap_write(dphy->regs, SUN6I_DPHY_ANA0_REG,
SUN6I_DPHY_ANA0_REG_PWS |
SUN6I_DPHY_ANA0_REG_DMPC |
SUN6I_DPHY_ANA0_REG_SLV(7) |
SUN6I_DPHY_ANA0_REG_DMPD(lanes_mask) |
SUN6I_DPHY_ANA0_REG_DEN(lanes_mask));
regmap_write(dphy->regs, SUN6I_DPHY_ANA1_REG,
SUN6I_DPHY_ANA1_REG_CSMPS(1) |
SUN6I_DPHY_ANA1_REG_SVTT(7));
regmap_write(dphy->regs, SUN6I_DPHY_ANA4_REG,
SUN6I_DPHY_ANA4_REG_CKDV(1) |
SUN6I_DPHY_ANA4_REG_TMSC(1) |
SUN6I_DPHY_ANA4_REG_TMSD(1) |
SUN6I_DPHY_ANA4_REG_TXDNSC(1) |
SUN6I_DPHY_ANA4_REG_TXDNSD(1) |
SUN6I_DPHY_ANA4_REG_TXPUSC(1) |
SUN6I_DPHY_ANA4_REG_TXPUSD(1) |
SUN6I_DPHY_ANA4_REG_DMPLVC |
SUN6I_DPHY_ANA4_REG_DMPLVD(lanes_mask));
regmap_write(dphy->regs, SUN6I_DPHY_ANA2_REG,
SUN6I_DPHY_ANA2_REG_ENIB);
udelay(5);
regmap_write(dphy->regs, SUN6I_DPHY_ANA3_REG,
SUN6I_DPHY_ANA3_EN_LDOR |
SUN6I_DPHY_ANA3_EN_LDOC |
SUN6I_DPHY_ANA3_EN_LDOD);
udelay(1);
}
static void sun50i_a100_mipi_dphy_tx_power_on(struct sun6i_dphy *dphy)
{
unsigned long mipi_symbol_rate = dphy->config.hs_clk_rate;
unsigned int div, n;
regmap_write(dphy->regs, SUN6I_DPHY_ANA4_REG,
SUN6I_DPHY_ANA4_REG_IB(2) |
SUN6I_DPHY_ANA4_REG_DMPLVD(4) |
SUN6I_DPHY_ANA4_REG_VTT_SET(3) |
SUN6I_DPHY_ANA4_REG_CKDV(3) |
SUN6I_DPHY_ANA4_REG_TMSD(1) |
SUN6I_DPHY_ANA4_REG_TMSC(1) |
SUN6I_DPHY_ANA4_REG_TXPUSD(2) |
SUN6I_DPHY_ANA4_REG_TXPUSC(3) |
SUN6I_DPHY_ANA4_REG_TXDNSD(2) |
SUN6I_DPHY_ANA4_REG_TXDNSC(3));
regmap_update_bits(dphy->regs, SUN6I_DPHY_ANA2_REG,
SUN6I_DPHY_ANA2_EN_CK_CPU,
SUN6I_DPHY_ANA2_EN_CK_CPU);
regmap_update_bits(dphy->regs, SUN6I_DPHY_ANA2_REG,
SUN6I_DPHY_ANA2_REG_ENIB,
SUN6I_DPHY_ANA2_REG_ENIB);
regmap_write(dphy->regs, SUN6I_DPHY_ANA3_REG,
SUN6I_DPHY_ANA3_EN_LDOR |
SUN6I_DPHY_ANA3_EN_LDOC |
SUN6I_DPHY_ANA3_EN_LDOD);
regmap_write(dphy->regs, SUN6I_DPHY_ANA0_REG,
SUN6I_DPHY_ANA0_REG_PLR(4) |
SUN6I_DPHY_ANA0_REG_SFB(1));
regmap_write(dphy->regs, SUN50I_COMBO_PHY_REG0,
SUN50I_COMBO_PHY_REG0_EN_CP);
/* Choose a divider to limit the VCO frequency to around 2 GHz. */
div = 16 >> order_base_2(DIV_ROUND_UP(mipi_symbol_rate, 264000000));
n = mipi_symbol_rate * div / 24000000;
regmap_write(dphy->regs, SUN50I_DPHY_PLL_REG0,
SUN50I_DPHY_PLL_REG0_CP36_EN |
SUN50I_DPHY_PLL_REG0_LDO_EN |
SUN50I_DPHY_PLL_REG0_EN_LVS |
SUN50I_DPHY_PLL_REG0_PLL_EN |
SUN50I_DPHY_PLL_REG0_NDET |
SUN50I_DPHY_PLL_REG0_P((div - 1) % 8) |
SUN50I_DPHY_PLL_REG0_N(n) |
SUN50I_DPHY_PLL_REG0_M0((div - 1) / 8) |
SUN50I_DPHY_PLL_REG0_M1(2));
/* Disable sigma-delta modulation. */
regmap_write(dphy->regs, SUN50I_DPHY_PLL_REG2, 0);
regmap_update_bits(dphy->regs, SUN6I_DPHY_ANA4_REG,
SUN6I_DPHY_ANA4_REG_EN_MIPI,
SUN6I_DPHY_ANA4_REG_EN_MIPI);
regmap_update_bits(dphy->regs, SUN50I_COMBO_PHY_REG0,
SUN50I_COMBO_PHY_REG0_EN_MIPI |
SUN50I_COMBO_PHY_REG0_EN_COMBOLDO,
SUN50I_COMBO_PHY_REG0_EN_MIPI |
SUN50I_COMBO_PHY_REG0_EN_COMBOLDO);
regmap_write(dphy->regs, SUN50I_COMBO_PHY_REG2,
SUN50I_COMBO_PHY_REG2_HS_STOP_DLY(20));
udelay(1);
}
static int sun6i_dphy_tx_power_on(struct sun6i_dphy *dphy)
{
u8 lanes_mask = GENMASK(dphy->config.lanes - 1, 0);
regmap_write(dphy->regs, SUN6I_DPHY_TX_CTL_REG,
SUN6I_DPHY_TX_CTL_HS_TX_CLK_CONT);
regmap_write(dphy->regs, SUN6I_DPHY_TX_TIME0_REG,
SUN6I_DPHY_TX_TIME0_LP_CLK_DIV(14) |
SUN6I_DPHY_TX_TIME0_HS_PREPARE(6) |
SUN6I_DPHY_TX_TIME0_HS_TRAIL(10));
regmap_write(dphy->regs, SUN6I_DPHY_TX_TIME1_REG,
SUN6I_DPHY_TX_TIME1_CLK_PREPARE(7) |
SUN6I_DPHY_TX_TIME1_CLK_ZERO(50) |
SUN6I_DPHY_TX_TIME1_CLK_PRE(3) |
SUN6I_DPHY_TX_TIME1_CLK_POST(10));
regmap_write(dphy->regs, SUN6I_DPHY_TX_TIME2_REG,
SUN6I_DPHY_TX_TIME2_CLK_TRAIL(30));
regmap_write(dphy->regs, SUN6I_DPHY_TX_TIME3_REG, 0);
regmap_write(dphy->regs, SUN6I_DPHY_TX_TIME4_REG,
SUN6I_DPHY_TX_TIME4_HS_TX_ANA0(3) |
SUN6I_DPHY_TX_TIME4_HS_TX_ANA1(3));
dphy->variant->tx_power_on(dphy);
regmap_update_bits(dphy->regs, SUN6I_DPHY_ANA3_REG,
SUN6I_DPHY_ANA3_EN_VTTC |
SUN6I_DPHY_ANA3_EN_VTTD_MASK,
SUN6I_DPHY_ANA3_EN_VTTC |
SUN6I_DPHY_ANA3_EN_VTTD(lanes_mask));
udelay(1);
regmap_update_bits(dphy->regs, SUN6I_DPHY_ANA3_REG,
SUN6I_DPHY_ANA3_EN_DIV,
SUN6I_DPHY_ANA3_EN_DIV);
udelay(1);
regmap_update_bits(dphy->regs, SUN6I_DPHY_ANA2_REG,
SUN6I_DPHY_ANA2_EN_CK_CPU,
SUN6I_DPHY_ANA2_EN_CK_CPU);
udelay(1);
regmap_update_bits(dphy->regs, SUN6I_DPHY_ANA1_REG,
SUN6I_DPHY_ANA1_REG_VTTMODE,
SUN6I_DPHY_ANA1_REG_VTTMODE);
regmap_update_bits(dphy->regs, SUN6I_DPHY_ANA2_REG,
SUN6I_DPHY_ANA2_EN_P2S_CPU_MASK,
SUN6I_DPHY_ANA2_EN_P2S_CPU(lanes_mask));
regmap_write(dphy->regs, SUN6I_DPHY_GCTL_REG,
SUN6I_DPHY_GCTL_LANE_NUM(dphy->config.lanes) |
SUN6I_DPHY_GCTL_EN);
return 0;
}
static int sun6i_dphy_rx_power_on(struct sun6i_dphy *dphy)
{
/* Physical clock rate is actually half of symbol rate with DDR. */
unsigned long mipi_symbol_rate = dphy->config.hs_clk_rate;
unsigned long dphy_clk_rate;
unsigned int rx_dly;
unsigned int lprst_dly;
u32 value;
dphy_clk_rate = clk_get_rate(dphy->mod_clk);
if (!dphy_clk_rate)
return -EINVAL;
/* Hardcoded timing parameters from the Allwinner BSP. */
regmap_write(dphy->regs, SUN6I_DPHY_RX_TIME0_REG,
SUN6I_DPHY_RX_TIME0_HS_RX_SYNC(255) |
SUN6I_DPHY_RX_TIME0_HS_RX_CLK_MISS(255) |
SUN6I_DPHY_RX_TIME0_LP_RX(255));
/*
* Formula from the Allwinner BSP, with hardcoded coefficients
* (probably internal divider/multiplier).
*/
rx_dly = 8 * (unsigned int)(dphy_clk_rate / (mipi_symbol_rate / 8));
/*
* The Allwinner BSP has an alternative formula for LP_RX_ULPS_WP:
* lp_ulps_wp_cnt = lp_ulps_wp_ms * lp_clk / 1000
* but does not use it and hardcodes 255 instead.
*/
regmap_write(dphy->regs, SUN6I_DPHY_RX_TIME1_REG,
SUN6I_DPHY_RX_TIME1_RX_DLY(rx_dly) |
SUN6I_DPHY_RX_TIME1_LP_RX_ULPS_WP(255));
/* HS_RX_ANA0 value is hardcoded in the Allwinner BSP. */
regmap_write(dphy->regs, SUN6I_DPHY_RX_TIME2_REG,
SUN6I_DPHY_RX_TIME2_HS_RX_ANA0(4));
/*
* Formula from the Allwinner BSP, with hardcoded coefficients
* (probably internal divider/multiplier).
*/
lprst_dly = 4 * (unsigned int)(dphy_clk_rate / (mipi_symbol_rate / 2));
regmap_write(dphy->regs, SUN6I_DPHY_RX_TIME3_REG,
SUN6I_DPHY_RX_TIME3_LPRST_DLY(lprst_dly));
/* Analog parameters are hardcoded in the Allwinner BSP. */
regmap_write(dphy->regs, SUN6I_DPHY_ANA0_REG,
SUN6I_DPHY_ANA0_REG_PWS |
SUN6I_DPHY_ANA0_REG_SLV(7) |
SUN6I_DPHY_ANA0_REG_SFB(2));
regmap_write(dphy->regs, SUN6I_DPHY_ANA1_REG,
SUN6I_DPHY_ANA1_REG_SVTT(4));
regmap_write(dphy->regs, SUN6I_DPHY_ANA4_REG,
SUN6I_DPHY_ANA4_REG_DMPLVC |
SUN6I_DPHY_ANA4_REG_DMPLVD(1));
regmap_write(dphy->regs, SUN6I_DPHY_ANA2_REG,
SUN6I_DPHY_ANA2_REG_ENIB);
regmap_write(dphy->regs, SUN6I_DPHY_ANA3_REG,
SUN6I_DPHY_ANA3_EN_LDOR |
SUN6I_DPHY_ANA3_EN_LDOC |
SUN6I_DPHY_ANA3_EN_LDOD);
/*
* Delay comes from the Allwinner BSP, likely for internal regulator
* ramp-up.
*/
udelay(3);
value = SUN6I_DPHY_RX_CTL_EN_DBC | SUN6I_DPHY_RX_CTL_RX_CLK_FORCE;
/*
* Rx data lane force-enable bits are used as regular RX enable by the
* Allwinner BSP.
*/
if (dphy->config.lanes >= 1)
value |= SUN6I_DPHY_RX_CTL_RX_D0_FORCE;
if (dphy->config.lanes >= 2)
value |= SUN6I_DPHY_RX_CTL_RX_D1_FORCE;
if (dphy->config.lanes >= 3)
value |= SUN6I_DPHY_RX_CTL_RX_D2_FORCE;
if (dphy->config.lanes == 4)
value |= SUN6I_DPHY_RX_CTL_RX_D3_FORCE;
regmap_write(dphy->regs, SUN6I_DPHY_RX_CTL_REG, value);
regmap_write(dphy->regs, SUN6I_DPHY_GCTL_REG,
SUN6I_DPHY_GCTL_LANE_NUM(dphy->config.lanes) |
SUN6I_DPHY_GCTL_EN);
return 0;
}
static int sun6i_dphy_power_on(struct phy *phy)
{
struct sun6i_dphy *dphy = phy_get_drvdata(phy);
switch (dphy->direction) {
case SUN6I_DPHY_DIRECTION_TX:
return sun6i_dphy_tx_power_on(dphy);
case SUN6I_DPHY_DIRECTION_RX:
return sun6i_dphy_rx_power_on(dphy);
default:
return -EINVAL;
}
}
static int sun6i_dphy_power_off(struct phy *phy)
{
struct sun6i_dphy *dphy = phy_get_drvdata(phy);
regmap_write(dphy->regs, SUN6I_DPHY_GCTL_REG, 0);
regmap_write(dphy->regs, SUN6I_DPHY_ANA0_REG, 0);
regmap_write(dphy->regs, SUN6I_DPHY_ANA1_REG, 0);
regmap_write(dphy->regs, SUN6I_DPHY_ANA2_REG, 0);
regmap_write(dphy->regs, SUN6I_DPHY_ANA3_REG, 0);
regmap_write(dphy->regs, SUN6I_DPHY_ANA4_REG, 0);
return 0;
}
static int sun6i_dphy_exit(struct phy *phy)
{
struct sun6i_dphy *dphy = phy_get_drvdata(phy);
clk_rate_exclusive_put(dphy->mod_clk);
clk_disable_unprepare(dphy->mod_clk);
reset_control_assert(dphy->reset);
return 0;
}
static const struct phy_ops sun6i_dphy_ops = {
.configure = sun6i_dphy_configure,
.power_on = sun6i_dphy_power_on,
.power_off = sun6i_dphy_power_off,
.init = sun6i_dphy_init,
.exit = sun6i_dphy_exit,
};
static const struct regmap_config sun6i_dphy_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
.max_register = SUN50I_COMBO_PHY_REG2,
.name = "mipi-dphy",
};
static int sun6i_dphy_probe(struct platform_device *pdev)
{
struct phy_provider *phy_provider;
struct sun6i_dphy *dphy;
const char *direction;
void __iomem *regs;
int ret;
dphy = devm_kzalloc(&pdev->dev, sizeof(*dphy), GFP_KERNEL);
if (!dphy)
return -ENOMEM;
dphy->variant = device_get_match_data(&pdev->dev);
if (!dphy->variant)
return -EINVAL;
regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(regs)) {
dev_err(&pdev->dev, "Couldn't map the DPHY encoder registers\n");
return PTR_ERR(regs);
}
dphy->regs = devm_regmap_init_mmio_clk(&pdev->dev, "bus",
regs, &sun6i_dphy_regmap_config);
if (IS_ERR(dphy->regs)) {
dev_err(&pdev->dev, "Couldn't create the DPHY encoder regmap\n");
return PTR_ERR(dphy->regs);
}
dphy->reset = devm_reset_control_get_shared(&pdev->dev, NULL);
if (IS_ERR(dphy->reset)) {
dev_err(&pdev->dev, "Couldn't get our reset line\n");
return PTR_ERR(dphy->reset);
}
dphy->mod_clk = devm_clk_get(&pdev->dev, "mod");
if (IS_ERR(dphy->mod_clk)) {
dev_err(&pdev->dev, "Couldn't get the DPHY mod clock\n");
return PTR_ERR(dphy->mod_clk);
}
dphy->phy = devm_phy_create(&pdev->dev, NULL, &sun6i_dphy_ops);
if (IS_ERR(dphy->phy)) {
dev_err(&pdev->dev, "failed to create PHY\n");
return PTR_ERR(dphy->phy);
}
dphy->direction = SUN6I_DPHY_DIRECTION_TX;
ret = of_property_read_string(pdev->dev.of_node, "allwinner,direction",
&direction);
if (!ret && !strncmp(direction, "rx", 2)) {
if (!dphy->variant->rx_supported) {
dev_err(&pdev->dev, "RX not supported on this variant\n");
return -EOPNOTSUPP;
}
dphy->direction = SUN6I_DPHY_DIRECTION_RX;
}
phy_set_drvdata(dphy->phy, dphy);
phy_provider = devm_of_phy_provider_register(&pdev->dev, of_phy_simple_xlate);
return PTR_ERR_OR_ZERO(phy_provider);
}
static const struct sun6i_dphy_variant sun6i_a31_mipi_dphy_variant = {
.tx_power_on = sun6i_a31_mipi_dphy_tx_power_on,
.rx_supported = true,
};
static const struct sun6i_dphy_variant sun50i_a100_mipi_dphy_variant = {
.tx_power_on = sun50i_a100_mipi_dphy_tx_power_on,
};
static const struct of_device_id sun6i_dphy_of_table[] = {
{
.compatible = "allwinner,sun6i-a31-mipi-dphy",
.data = &sun6i_a31_mipi_dphy_variant,
},
{
.compatible = "allwinner,sun50i-a100-mipi-dphy",
.data = &sun50i_a100_mipi_dphy_variant,
},
{ }
};
MODULE_DEVICE_TABLE(of, sun6i_dphy_of_table);
static struct platform_driver sun6i_dphy_platform_driver = {
.probe = sun6i_dphy_probe,
.driver = {
.name = "sun6i-mipi-dphy",
.of_match_table = sun6i_dphy_of_table,
},
};
module_platform_driver(sun6i_dphy_platform_driver);
MODULE_AUTHOR("Maxime Ripard <maxime.ripard@bootlin>");
MODULE_DESCRIPTION("Allwinner A31 MIPI D-PHY Driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/phy/allwinner/phy-sun6i-mipi-dphy.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Allwinner sun4i USB phy driver
*
* Copyright (C) 2014-2015 Hans de Goede <[email protected]>
*
* Based on code from
* Allwinner Technology Co., Ltd. <www.allwinnertech.com>
*
* Modelled after: Samsung S5P/Exynos SoC series MIPI CSIS/DSIM DPHY driver
* Copyright (C) 2013 Samsung Electronics Co., Ltd.
* Author: Sylwester Nawrocki <[email protected]>
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/extcon-provider.h>
#include <linux/gpio/consumer.h>
#include <linux/io.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
#include <linux/phy/phy.h>
#include <linux/phy/phy-sun4i-usb.h>
#include <linux/platform_device.h>
#include <linux/power_supply.h>
#include <linux/regulator/consumer.h>
#include <linux/reset.h>
#include <linux/spinlock.h>
#include <linux/usb/of.h>
#include <linux/workqueue.h>
#define REG_ISCR 0x00
#define REG_PHYCTL_A10 0x04
#define REG_PHYBIST 0x08
#define REG_PHYTUNE 0x0c
#define REG_PHYCTL_A33 0x10
#define REG_PHY_OTGCTL 0x20
#define REG_HCI_PHY_CTL 0x10
#define PHYCTL_DATA BIT(7)
#define OTGCTL_ROUTE_MUSB BIT(0)
#define SUNXI_AHB_ICHR8_EN BIT(10)
#define SUNXI_AHB_INCR4_BURST_EN BIT(9)
#define SUNXI_AHB_INCRX_ALIGN_EN BIT(8)
#define SUNXI_ULPI_BYPASS_EN BIT(0)
/* ISCR, Interface Status and Control bits */
#define ISCR_ID_PULLUP_EN (1 << 17)
#define ISCR_DPDM_PULLUP_EN (1 << 16)
/* sunxi has the phy id/vbus pins not connected, so we use the force bits */
#define ISCR_FORCE_ID_MASK (3 << 14)
#define ISCR_FORCE_ID_LOW (2 << 14)
#define ISCR_FORCE_ID_HIGH (3 << 14)
#define ISCR_FORCE_VBUS_MASK (3 << 12)
#define ISCR_FORCE_VBUS_LOW (2 << 12)
#define ISCR_FORCE_VBUS_HIGH (3 << 12)
/* Common Control Bits for Both PHYs */
#define PHY_PLL_BW 0x03
#define PHY_RES45_CAL_EN 0x0c
/* Private Control Bits for Each PHY */
#define PHY_TX_AMPLITUDE_TUNE 0x20
#define PHY_TX_SLEWRATE_TUNE 0x22
#define PHY_VBUSVALID_TH_SEL 0x25
#define PHY_PULLUP_RES_SEL 0x27
#define PHY_OTG_FUNC_EN 0x28
#define PHY_VBUS_DET_EN 0x29
#define PHY_DISCON_TH_SEL 0x2a
#define PHY_SQUELCH_DETECT 0x3c
/* A83T specific control bits for PHY0 */
#define PHY_CTL_VBUSVLDEXT BIT(5)
#define PHY_CTL_SIDDQ BIT(3)
#define PHY_CTL_H3_SIDDQ BIT(1)
/* A83T specific control bits for PHY2 HSIC */
#define SUNXI_EHCI_HS_FORCE BIT(20)
#define SUNXI_HSIC_CONNECT_DET BIT(17)
#define SUNXI_HSIC_CONNECT_INT BIT(16)
#define SUNXI_HSIC BIT(1)
#define MAX_PHYS 4
/*
* Note do not raise the debounce time, we must report Vusb high within 100ms
* otherwise we get Vbus errors
*/
#define DEBOUNCE_TIME msecs_to_jiffies(50)
#define POLL_TIME msecs_to_jiffies(250)
struct sun4i_usb_phy_cfg {
int num_phys;
int hsic_index;
u32 disc_thresh;
u32 hci_phy_ctl_clear;
u8 phyctl_offset;
bool dedicated_clocks;
bool phy0_dual_route;
bool needs_phy2_siddq;
bool siddq_in_base;
bool poll_vbusen;
int missing_phys;
};
struct sun4i_usb_phy_data {
void __iomem *base;
const struct sun4i_usb_phy_cfg *cfg;
enum usb_dr_mode dr_mode;
spinlock_t reg_lock; /* guard access to phyctl reg */
struct sun4i_usb_phy {
struct phy *phy;
void __iomem *pmu;
struct regulator *vbus;
struct reset_control *reset;
struct clk *clk;
struct clk *clk2;
bool regulator_on;
int index;
} phys[MAX_PHYS];
/* phy0 / otg related variables */
struct extcon_dev *extcon;
bool phy0_init;
struct gpio_desc *id_det_gpio;
struct gpio_desc *vbus_det_gpio;
struct power_supply *vbus_power_supply;
struct notifier_block vbus_power_nb;
bool vbus_power_nb_registered;
bool force_session_end;
int id_det_irq;
int vbus_det_irq;
int id_det;
int vbus_det;
struct delayed_work detect;
};
#define to_sun4i_usb_phy_data(phy) \
container_of((phy), struct sun4i_usb_phy_data, phys[(phy)->index])
static void sun4i_usb_phy0_update_iscr(struct phy *_phy, u32 clr, u32 set)
{
struct sun4i_usb_phy *phy = phy_get_drvdata(_phy);
struct sun4i_usb_phy_data *data = to_sun4i_usb_phy_data(phy);
u32 iscr;
iscr = readl(data->base + REG_ISCR);
iscr &= ~clr;
iscr |= set;
writel(iscr, data->base + REG_ISCR);
}
static void sun4i_usb_phy0_set_id_detect(struct phy *phy, u32 val)
{
if (val)
val = ISCR_FORCE_ID_HIGH;
else
val = ISCR_FORCE_ID_LOW;
sun4i_usb_phy0_update_iscr(phy, ISCR_FORCE_ID_MASK, val);
}
static void sun4i_usb_phy0_set_vbus_detect(struct phy *phy, u32 val)
{
if (val)
val = ISCR_FORCE_VBUS_HIGH;
else
val = ISCR_FORCE_VBUS_LOW;
sun4i_usb_phy0_update_iscr(phy, ISCR_FORCE_VBUS_MASK, val);
}
static void sun4i_usb_phy_write(struct sun4i_usb_phy *phy, u32 addr, u32 data,
int len)
{
struct sun4i_usb_phy_data *phy_data = to_sun4i_usb_phy_data(phy);
u32 temp, usbc_bit = BIT(phy->index * 2);
void __iomem *phyctl = phy_data->base + phy_data->cfg->phyctl_offset;
unsigned long flags;
int i;
spin_lock_irqsave(&phy_data->reg_lock, flags);
if (phy_data->cfg->phyctl_offset == REG_PHYCTL_A33) {
/* SoCs newer than A33 need us to set phyctl to 0 explicitly */
writel(0, phyctl);
}
for (i = 0; i < len; i++) {
temp = readl(phyctl);
/* clear the address portion */
temp &= ~(0xff << 8);
/* set the address */
temp |= ((addr + i) << 8);
writel(temp, phyctl);
/* set the data bit and clear usbc bit*/
temp = readb(phyctl);
if (data & 0x1)
temp |= PHYCTL_DATA;
else
temp &= ~PHYCTL_DATA;
temp &= ~usbc_bit;
writeb(temp, phyctl);
/* pulse usbc_bit */
temp = readb(phyctl);
temp |= usbc_bit;
writeb(temp, phyctl);
temp = readb(phyctl);
temp &= ~usbc_bit;
writeb(temp, phyctl);
data >>= 1;
}
spin_unlock_irqrestore(&phy_data->reg_lock, flags);
}
static void sun4i_usb_phy_passby(struct sun4i_usb_phy *phy, int enable)
{
struct sun4i_usb_phy_data *phy_data = to_sun4i_usb_phy_data(phy);
u32 bits, reg_value;
if (!phy->pmu)
return;
bits = SUNXI_AHB_ICHR8_EN | SUNXI_AHB_INCR4_BURST_EN |
SUNXI_AHB_INCRX_ALIGN_EN | SUNXI_ULPI_BYPASS_EN;
/* A83T USB2 is HSIC */
if (phy_data->cfg->hsic_index &&
phy->index == phy_data->cfg->hsic_index)
bits |= SUNXI_EHCI_HS_FORCE | SUNXI_HSIC_CONNECT_INT |
SUNXI_HSIC;
reg_value = readl(phy->pmu);
if (enable)
reg_value |= bits;
else
reg_value &= ~bits;
writel(reg_value, phy->pmu);
}
static int sun4i_usb_phy_init(struct phy *_phy)
{
struct sun4i_usb_phy *phy = phy_get_drvdata(_phy);
struct sun4i_usb_phy_data *data = to_sun4i_usb_phy_data(phy);
int ret;
u32 val;
ret = clk_prepare_enable(phy->clk);
if (ret)
return ret;
ret = clk_prepare_enable(phy->clk2);
if (ret) {
clk_disable_unprepare(phy->clk);
return ret;
}
ret = reset_control_deassert(phy->reset);
if (ret) {
clk_disable_unprepare(phy->clk2);
clk_disable_unprepare(phy->clk);
return ret;
}
/* Some PHYs on some SoCs need the help of PHY2 to work. */
if (data->cfg->needs_phy2_siddq && phy->index != 2) {
struct sun4i_usb_phy *phy2 = &data->phys[2];
ret = clk_prepare_enable(phy2->clk);
if (ret) {
reset_control_assert(phy->reset);
clk_disable_unprepare(phy->clk2);
clk_disable_unprepare(phy->clk);
return ret;
}
ret = reset_control_deassert(phy2->reset);
if (ret) {
clk_disable_unprepare(phy2->clk);
reset_control_assert(phy->reset);
clk_disable_unprepare(phy->clk2);
clk_disable_unprepare(phy->clk);
return ret;
}
/*
* This extra clock is just needed to access the
* REG_HCI_PHY_CTL PMU register for PHY2.
*/
ret = clk_prepare_enable(phy2->clk2);
if (ret) {
reset_control_assert(phy2->reset);
clk_disable_unprepare(phy2->clk);
reset_control_assert(phy->reset);
clk_disable_unprepare(phy->clk2);
clk_disable_unprepare(phy->clk);
return ret;
}
if (phy2->pmu && data->cfg->hci_phy_ctl_clear) {
val = readl(phy2->pmu + REG_HCI_PHY_CTL);
val &= ~data->cfg->hci_phy_ctl_clear;
writel(val, phy2->pmu + REG_HCI_PHY_CTL);
}
clk_disable_unprepare(phy->clk2);
}
if (phy->pmu && data->cfg->hci_phy_ctl_clear) {
val = readl(phy->pmu + REG_HCI_PHY_CTL);
val &= ~data->cfg->hci_phy_ctl_clear;
writel(val, phy->pmu + REG_HCI_PHY_CTL);
}
if (data->cfg->siddq_in_base) {
if (phy->index == 0) {
val = readl(data->base + data->cfg->phyctl_offset);
val |= PHY_CTL_VBUSVLDEXT;
val &= ~PHY_CTL_SIDDQ;
writel(val, data->base + data->cfg->phyctl_offset);
}
} else {
/* Enable USB 45 Ohm resistor calibration */
if (phy->index == 0)
sun4i_usb_phy_write(phy, PHY_RES45_CAL_EN, 0x01, 1);
/* Adjust PHY's magnitude and rate */
sun4i_usb_phy_write(phy, PHY_TX_AMPLITUDE_TUNE, 0x14, 5);
/* Disconnect threshold adjustment */
sun4i_usb_phy_write(phy, PHY_DISCON_TH_SEL,
data->cfg->disc_thresh, 2);
}
sun4i_usb_phy_passby(phy, 1);
if (phy->index == 0) {
data->phy0_init = true;
/* Enable pull-ups */
sun4i_usb_phy0_update_iscr(_phy, 0, ISCR_DPDM_PULLUP_EN);
sun4i_usb_phy0_update_iscr(_phy, 0, ISCR_ID_PULLUP_EN);
/* Force ISCR and cable state updates */
data->id_det = -1;
data->vbus_det = -1;
queue_delayed_work(system_wq, &data->detect, 0);
}
return 0;
}
static int sun4i_usb_phy_exit(struct phy *_phy)
{
struct sun4i_usb_phy *phy = phy_get_drvdata(_phy);
struct sun4i_usb_phy_data *data = to_sun4i_usb_phy_data(phy);
if (phy->index == 0) {
if (data->cfg->siddq_in_base) {
void __iomem *phyctl = data->base +
data->cfg->phyctl_offset;
writel(readl(phyctl) | PHY_CTL_SIDDQ, phyctl);
}
/* Disable pull-ups */
sun4i_usb_phy0_update_iscr(_phy, ISCR_DPDM_PULLUP_EN, 0);
sun4i_usb_phy0_update_iscr(_phy, ISCR_ID_PULLUP_EN, 0);
data->phy0_init = false;
}
if (data->cfg->needs_phy2_siddq && phy->index != 2) {
struct sun4i_usb_phy *phy2 = &data->phys[2];
clk_disable_unprepare(phy2->clk);
reset_control_assert(phy2->reset);
}
sun4i_usb_phy_passby(phy, 0);
reset_control_assert(phy->reset);
clk_disable_unprepare(phy->clk2);
clk_disable_unprepare(phy->clk);
return 0;
}
static int sun4i_usb_phy0_get_id_det(struct sun4i_usb_phy_data *data)
{
switch (data->dr_mode) {
case USB_DR_MODE_OTG:
if (data->id_det_gpio)
return gpiod_get_value_cansleep(data->id_det_gpio);
else
return 1; /* Fallback to peripheral mode */
case USB_DR_MODE_HOST:
return 0;
case USB_DR_MODE_PERIPHERAL:
default:
return 1;
}
}
static int sun4i_usb_phy0_get_vbus_det(struct sun4i_usb_phy_data *data)
{
if (data->vbus_det_gpio)
return gpiod_get_value_cansleep(data->vbus_det_gpio);
if (data->vbus_power_supply) {
union power_supply_propval val;
int r;
r = power_supply_get_property(data->vbus_power_supply,
POWER_SUPPLY_PROP_PRESENT, &val);
if (r == 0)
return val.intval;
}
/* Fallback: report vbus as high */
return 1;
}
static bool sun4i_usb_phy0_have_vbus_det(struct sun4i_usb_phy_data *data)
{
return data->vbus_det_gpio || data->vbus_power_supply;
}
static bool sun4i_usb_phy0_poll(struct sun4i_usb_phy_data *data)
{
if ((data->id_det_gpio && data->id_det_irq <= 0) ||
(data->vbus_det_gpio && data->vbus_det_irq <= 0))
return true;
/*
* The A31/A23/A33 companion pmics (AXP221/AXP223) do not
* generate vbus change interrupts when the board is driving
* vbus using the N_VBUSEN pin on the pmic, so we must poll
* when using the pmic for vbus-det _and_ we're driving vbus.
*/
if (data->cfg->poll_vbusen && data->vbus_power_supply &&
data->phys[0].regulator_on)
return true;
return false;
}
static int sun4i_usb_phy_power_on(struct phy *_phy)
{
struct sun4i_usb_phy *phy = phy_get_drvdata(_phy);
struct sun4i_usb_phy_data *data = to_sun4i_usb_phy_data(phy);
int ret;
if (!phy->vbus || phy->regulator_on)
return 0;
/* For phy0 only turn on Vbus if we don't have an ext. Vbus */
if (phy->index == 0 && sun4i_usb_phy0_have_vbus_det(data) &&
data->vbus_det) {
dev_warn(&_phy->dev, "External vbus detected, not enabling our own vbus\n");
return 0;
}
ret = regulator_enable(phy->vbus);
if (ret)
return ret;
phy->regulator_on = true;
/* We must report Vbus high within OTG_TIME_A_WAIT_VRISE msec. */
if (phy->index == 0 && sun4i_usb_phy0_poll(data))
mod_delayed_work(system_wq, &data->detect, DEBOUNCE_TIME);
return 0;
}
static int sun4i_usb_phy_power_off(struct phy *_phy)
{
struct sun4i_usb_phy *phy = phy_get_drvdata(_phy);
struct sun4i_usb_phy_data *data = to_sun4i_usb_phy_data(phy);
if (!phy->vbus || !phy->regulator_on)
return 0;
regulator_disable(phy->vbus);
phy->regulator_on = false;
/*
* phy0 vbus typically slowly discharges, sometimes this causes the
* Vbus gpio to not trigger an edge irq on Vbus off, so force a rescan.
*/
if (phy->index == 0 && !sun4i_usb_phy0_poll(data))
mod_delayed_work(system_wq, &data->detect, POLL_TIME);
return 0;
}
static int sun4i_usb_phy_set_mode(struct phy *_phy,
enum phy_mode mode, int submode)
{
struct sun4i_usb_phy *phy = phy_get_drvdata(_phy);
struct sun4i_usb_phy_data *data = to_sun4i_usb_phy_data(phy);
int new_mode;
if (phy->index != 0) {
if (mode == PHY_MODE_USB_HOST)
return 0;
return -EINVAL;
}
switch (mode) {
case PHY_MODE_USB_HOST:
new_mode = USB_DR_MODE_HOST;
break;
case PHY_MODE_USB_DEVICE:
new_mode = USB_DR_MODE_PERIPHERAL;
break;
case PHY_MODE_USB_OTG:
new_mode = USB_DR_MODE_OTG;
break;
default:
return -EINVAL;
}
if (new_mode != data->dr_mode) {
dev_info(&_phy->dev, "Changing dr_mode to %d\n", new_mode);
data->dr_mode = new_mode;
}
data->id_det = -1; /* Force reprocessing of id */
data->force_session_end = true;
queue_delayed_work(system_wq, &data->detect, 0);
return 0;
}
void sun4i_usb_phy_set_squelch_detect(struct phy *_phy, bool enabled)
{
struct sun4i_usb_phy *phy = phy_get_drvdata(_phy);
sun4i_usb_phy_write(phy, PHY_SQUELCH_DETECT, enabled ? 0 : 2, 2);
}
EXPORT_SYMBOL_GPL(sun4i_usb_phy_set_squelch_detect);
static const struct phy_ops sun4i_usb_phy_ops = {
.init = sun4i_usb_phy_init,
.exit = sun4i_usb_phy_exit,
.power_on = sun4i_usb_phy_power_on,
.power_off = sun4i_usb_phy_power_off,
.set_mode = sun4i_usb_phy_set_mode,
.owner = THIS_MODULE,
};
static void sun4i_usb_phy0_reroute(struct sun4i_usb_phy_data *data, int id_det)
{
u32 regval;
regval = readl(data->base + REG_PHY_OTGCTL);
if (id_det == 0) {
/* Host mode. Route phy0 to EHCI/OHCI */
regval &= ~OTGCTL_ROUTE_MUSB;
} else {
/* Peripheral mode. Route phy0 to MUSB */
regval |= OTGCTL_ROUTE_MUSB;
}
writel(regval, data->base + REG_PHY_OTGCTL);
}
static void sun4i_usb_phy0_id_vbus_det_scan(struct work_struct *work)
{
struct sun4i_usb_phy_data *data =
container_of(work, struct sun4i_usb_phy_data, detect.work);
struct phy *phy0 = data->phys[0].phy;
struct sun4i_usb_phy *phy;
bool force_session_end, id_notify = false, vbus_notify = false;
int id_det, vbus_det;
if (!phy0)
return;
phy = phy_get_drvdata(phy0);
id_det = sun4i_usb_phy0_get_id_det(data);
vbus_det = sun4i_usb_phy0_get_vbus_det(data);
mutex_lock(&phy0->mutex);
if (!data->phy0_init) {
mutex_unlock(&phy0->mutex);
return;
}
force_session_end = data->force_session_end;
data->force_session_end = false;
if (id_det != data->id_det) {
/* id-change, force session end if we've no vbus detection */
if (data->dr_mode == USB_DR_MODE_OTG &&
!sun4i_usb_phy0_have_vbus_det(data))
force_session_end = true;
/* When entering host mode (id = 0) force end the session now */
if (force_session_end && id_det == 0) {
sun4i_usb_phy0_set_vbus_detect(phy0, 0);
msleep(200);
sun4i_usb_phy0_set_vbus_detect(phy0, 1);
}
sun4i_usb_phy0_set_id_detect(phy0, id_det);
data->id_det = id_det;
id_notify = true;
}
if (vbus_det != data->vbus_det) {
sun4i_usb_phy0_set_vbus_detect(phy0, vbus_det);
data->vbus_det = vbus_det;
vbus_notify = true;
}
mutex_unlock(&phy0->mutex);
if (id_notify) {
extcon_set_state_sync(data->extcon, EXTCON_USB_HOST,
!id_det);
/* When leaving host mode force end the session here */
if (force_session_end && id_det == 1) {
mutex_lock(&phy0->mutex);
sun4i_usb_phy0_set_vbus_detect(phy0, 0);
msleep(1000);
sun4i_usb_phy0_set_vbus_detect(phy0, 1);
mutex_unlock(&phy0->mutex);
}
/* Enable PHY0 passby for host mode only. */
sun4i_usb_phy_passby(phy, !id_det);
/* Re-route PHY0 if necessary */
if (data->cfg->phy0_dual_route)
sun4i_usb_phy0_reroute(data, id_det);
}
if (vbus_notify)
extcon_set_state_sync(data->extcon, EXTCON_USB, vbus_det);
if (sun4i_usb_phy0_poll(data))
queue_delayed_work(system_wq, &data->detect, POLL_TIME);
}
static irqreturn_t sun4i_usb_phy0_id_vbus_det_irq(int irq, void *dev_id)
{
struct sun4i_usb_phy_data *data = dev_id;
/* vbus or id changed, let the pins settle and then scan them */
mod_delayed_work(system_wq, &data->detect, DEBOUNCE_TIME);
return IRQ_HANDLED;
}
static int sun4i_usb_phy0_vbus_notify(struct notifier_block *nb,
unsigned long val, void *v)
{
struct sun4i_usb_phy_data *data =
container_of(nb, struct sun4i_usb_phy_data, vbus_power_nb);
struct power_supply *psy = v;
/* Properties on the vbus_power_supply changed, scan vbus_det */
if (val == PSY_EVENT_PROP_CHANGED && psy == data->vbus_power_supply)
mod_delayed_work(system_wq, &data->detect, DEBOUNCE_TIME);
return NOTIFY_OK;
}
static struct phy *sun4i_usb_phy_xlate(struct device *dev,
struct of_phandle_args *args)
{
struct sun4i_usb_phy_data *data = dev_get_drvdata(dev);
if (args->args[0] >= data->cfg->num_phys)
return ERR_PTR(-ENODEV);
if (data->cfg->missing_phys & BIT(args->args[0]))
return ERR_PTR(-ENODEV);
return data->phys[args->args[0]].phy;
}
static void sun4i_usb_phy_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct sun4i_usb_phy_data *data = dev_get_drvdata(dev);
if (data->vbus_power_nb_registered)
power_supply_unreg_notifier(&data->vbus_power_nb);
if (data->id_det_irq > 0)
devm_free_irq(dev, data->id_det_irq, data);
if (data->vbus_det_irq > 0)
devm_free_irq(dev, data->vbus_det_irq, data);
cancel_delayed_work_sync(&data->detect);
}
static const unsigned int sun4i_usb_phy0_cable[] = {
EXTCON_USB,
EXTCON_USB_HOST,
EXTCON_NONE,
};
static int sun4i_usb_phy_probe(struct platform_device *pdev)
{
struct sun4i_usb_phy_data *data;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct phy_provider *phy_provider;
int i, ret;
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
spin_lock_init(&data->reg_lock);
INIT_DELAYED_WORK(&data->detect, sun4i_usb_phy0_id_vbus_det_scan);
dev_set_drvdata(dev, data);
data->cfg = of_device_get_match_data(dev);
if (!data->cfg)
return -EINVAL;
data->base = devm_platform_ioremap_resource_byname(pdev, "phy_ctrl");
if (IS_ERR(data->base))
return PTR_ERR(data->base);
data->id_det_gpio = devm_gpiod_get_optional(dev, "usb0_id_det",
GPIOD_IN);
if (IS_ERR(data->id_det_gpio)) {
dev_err(dev, "Couldn't request ID GPIO\n");
return PTR_ERR(data->id_det_gpio);
}
data->vbus_det_gpio = devm_gpiod_get_optional(dev, "usb0_vbus_det",
GPIOD_IN);
if (IS_ERR(data->vbus_det_gpio)) {
dev_err(dev, "Couldn't request VBUS detect GPIO\n");
return PTR_ERR(data->vbus_det_gpio);
}
if (of_property_present(np, "usb0_vbus_power-supply")) {
data->vbus_power_supply = devm_power_supply_get_by_phandle(dev,
"usb0_vbus_power-supply");
if (IS_ERR(data->vbus_power_supply)) {
dev_err(dev, "Couldn't get the VBUS power supply\n");
return PTR_ERR(data->vbus_power_supply);
}
if (!data->vbus_power_supply)
return -EPROBE_DEFER;
}
data->dr_mode = of_usb_get_dr_mode_by_phy(np, 0);
data->extcon = devm_extcon_dev_allocate(dev, sun4i_usb_phy0_cable);
if (IS_ERR(data->extcon)) {
dev_err(dev, "Couldn't allocate our extcon device\n");
return PTR_ERR(data->extcon);
}
ret = devm_extcon_dev_register(dev, data->extcon);
if (ret) {
dev_err(dev, "failed to register extcon: %d\n", ret);
return ret;
}
for (i = 0; i < data->cfg->num_phys; i++) {
struct sun4i_usb_phy *phy = data->phys + i;
char name[16];
if (data->cfg->missing_phys & BIT(i))
continue;
snprintf(name, sizeof(name), "usb%d_vbus", i);
phy->vbus = devm_regulator_get_optional(dev, name);
if (IS_ERR(phy->vbus)) {
if (PTR_ERR(phy->vbus) == -EPROBE_DEFER) {
dev_err(dev,
"Couldn't get regulator %s... Deferring probe\n",
name);
return -EPROBE_DEFER;
}
phy->vbus = NULL;
}
if (data->cfg->dedicated_clocks)
snprintf(name, sizeof(name), "usb%d_phy", i);
else
strscpy(name, "usb_phy", sizeof(name));
phy->clk = devm_clk_get(dev, name);
if (IS_ERR(phy->clk)) {
dev_err(dev, "failed to get clock %s\n", name);
return PTR_ERR(phy->clk);
}
/* The first PHY is always tied to OTG, and never HSIC */
if (data->cfg->hsic_index && i == data->cfg->hsic_index) {
/* HSIC needs secondary clock */
snprintf(name, sizeof(name), "usb%d_hsic_12M", i);
phy->clk2 = devm_clk_get(dev, name);
if (IS_ERR(phy->clk2)) {
dev_err(dev, "failed to get clock %s\n", name);
return PTR_ERR(phy->clk2);
}
} else {
snprintf(name, sizeof(name), "pmu%d_clk", i);
phy->clk2 = devm_clk_get_optional(dev, name);
if (IS_ERR(phy->clk2)) {
dev_err(dev, "failed to get clock %s\n", name);
return PTR_ERR(phy->clk2);
}
}
snprintf(name, sizeof(name), "usb%d_reset", i);
phy->reset = devm_reset_control_get(dev, name);
if (IS_ERR(phy->reset)) {
dev_err(dev, "failed to get reset %s\n", name);
return PTR_ERR(phy->reset);
}
if (i || data->cfg->phy0_dual_route) { /* No pmu for musb */
snprintf(name, sizeof(name), "pmu%d", i);
phy->pmu = devm_platform_ioremap_resource_byname(pdev, name);
if (IS_ERR(phy->pmu))
return PTR_ERR(phy->pmu);
}
phy->phy = devm_phy_create(dev, NULL, &sun4i_usb_phy_ops);
if (IS_ERR(phy->phy)) {
dev_err(dev, "failed to create PHY %d\n", i);
return PTR_ERR(phy->phy);
}
phy->index = i;
phy_set_drvdata(phy->phy, &data->phys[i]);
}
data->id_det_irq = gpiod_to_irq(data->id_det_gpio);
if (data->id_det_irq > 0) {
ret = devm_request_irq(dev, data->id_det_irq,
sun4i_usb_phy0_id_vbus_det_irq,
IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
"usb0-id-det", data);
if (ret) {
dev_err(dev, "Err requesting id-det-irq: %d\n", ret);
return ret;
}
}
data->vbus_det_irq = gpiod_to_irq(data->vbus_det_gpio);
if (data->vbus_det_irq > 0) {
ret = devm_request_irq(dev, data->vbus_det_irq,
sun4i_usb_phy0_id_vbus_det_irq,
IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
"usb0-vbus-det", data);
if (ret) {
dev_err(dev, "Err requesting vbus-det-irq: %d\n", ret);
data->vbus_det_irq = -1;
sun4i_usb_phy_remove(pdev); /* Stop detect work */
return ret;
}
}
if (data->vbus_power_supply) {
data->vbus_power_nb.notifier_call = sun4i_usb_phy0_vbus_notify;
data->vbus_power_nb.priority = 0;
ret = power_supply_reg_notifier(&data->vbus_power_nb);
if (ret) {
sun4i_usb_phy_remove(pdev); /* Stop detect work */
return ret;
}
data->vbus_power_nb_registered = true;
}
phy_provider = devm_of_phy_provider_register(dev, sun4i_usb_phy_xlate);
if (IS_ERR(phy_provider)) {
sun4i_usb_phy_remove(pdev); /* Stop detect work */
return PTR_ERR(phy_provider);
}
dev_dbg(dev, "successfully loaded\n");
return 0;
}
static const struct sun4i_usb_phy_cfg suniv_f1c100s_cfg = {
.num_phys = 1,
.disc_thresh = 3,
.phyctl_offset = REG_PHYCTL_A10,
.dedicated_clocks = true,
};
static const struct sun4i_usb_phy_cfg sun4i_a10_cfg = {
.num_phys = 3,
.disc_thresh = 3,
.phyctl_offset = REG_PHYCTL_A10,
.dedicated_clocks = false,
};
static const struct sun4i_usb_phy_cfg sun5i_a13_cfg = {
.num_phys = 2,
.disc_thresh = 2,
.phyctl_offset = REG_PHYCTL_A10,
.dedicated_clocks = false,
};
static const struct sun4i_usb_phy_cfg sun6i_a31_cfg = {
.num_phys = 3,
.disc_thresh = 3,
.phyctl_offset = REG_PHYCTL_A10,
.dedicated_clocks = true,
.poll_vbusen = true,
};
static const struct sun4i_usb_phy_cfg sun7i_a20_cfg = {
.num_phys = 3,
.disc_thresh = 2,
.phyctl_offset = REG_PHYCTL_A10,
.dedicated_clocks = false,
};
static const struct sun4i_usb_phy_cfg sun8i_a23_cfg = {
.num_phys = 2,
.disc_thresh = 3,
.phyctl_offset = REG_PHYCTL_A10,
.dedicated_clocks = true,
.poll_vbusen = true,
};
static const struct sun4i_usb_phy_cfg sun8i_a33_cfg = {
.num_phys = 2,
.disc_thresh = 3,
.phyctl_offset = REG_PHYCTL_A33,
.dedicated_clocks = true,
.poll_vbusen = true,
};
static const struct sun4i_usb_phy_cfg sun8i_a83t_cfg = {
.num_phys = 3,
.hsic_index = 2,
.phyctl_offset = REG_PHYCTL_A33,
.dedicated_clocks = true,
.siddq_in_base = true,
};
static const struct sun4i_usb_phy_cfg sun8i_h3_cfg = {
.num_phys = 4,
.disc_thresh = 3,
.phyctl_offset = REG_PHYCTL_A33,
.dedicated_clocks = true,
.hci_phy_ctl_clear = PHY_CTL_H3_SIDDQ,
.phy0_dual_route = true,
};
static const struct sun4i_usb_phy_cfg sun8i_r40_cfg = {
.num_phys = 3,
.disc_thresh = 3,
.phyctl_offset = REG_PHYCTL_A33,
.dedicated_clocks = true,
.hci_phy_ctl_clear = PHY_CTL_H3_SIDDQ,
.phy0_dual_route = true,
};
static const struct sun4i_usb_phy_cfg sun8i_v3s_cfg = {
.num_phys = 1,
.disc_thresh = 3,
.phyctl_offset = REG_PHYCTL_A33,
.dedicated_clocks = true,
.hci_phy_ctl_clear = PHY_CTL_H3_SIDDQ,
.phy0_dual_route = true,
};
static const struct sun4i_usb_phy_cfg sun20i_d1_cfg = {
.num_phys = 2,
.phyctl_offset = REG_PHYCTL_A33,
.dedicated_clocks = true,
.hci_phy_ctl_clear = PHY_CTL_SIDDQ,
.phy0_dual_route = true,
.siddq_in_base = true,
};
static const struct sun4i_usb_phy_cfg sun50i_a64_cfg = {
.num_phys = 2,
.disc_thresh = 3,
.phyctl_offset = REG_PHYCTL_A33,
.dedicated_clocks = true,
.hci_phy_ctl_clear = PHY_CTL_H3_SIDDQ,
.phy0_dual_route = true,
};
static const struct sun4i_usb_phy_cfg sun50i_h6_cfg = {
.num_phys = 4,
.phyctl_offset = REG_PHYCTL_A33,
.dedicated_clocks = true,
.phy0_dual_route = true,
.missing_phys = BIT(1) | BIT(2),
.siddq_in_base = true,
};
static const struct sun4i_usb_phy_cfg sun50i_h616_cfg = {
.num_phys = 4,
.disc_thresh = 3,
.phyctl_offset = REG_PHYCTL_A33,
.dedicated_clocks = true,
.phy0_dual_route = true,
.hci_phy_ctl_clear = PHY_CTL_SIDDQ,
.needs_phy2_siddq = true,
.siddq_in_base = true,
};
static const struct of_device_id sun4i_usb_phy_of_match[] = {
{ .compatible = "allwinner,sun4i-a10-usb-phy", .data = &sun4i_a10_cfg },
{ .compatible = "allwinner,sun5i-a13-usb-phy", .data = &sun5i_a13_cfg },
{ .compatible = "allwinner,sun6i-a31-usb-phy", .data = &sun6i_a31_cfg },
{ .compatible = "allwinner,sun7i-a20-usb-phy", .data = &sun7i_a20_cfg },
{ .compatible = "allwinner,sun8i-a23-usb-phy", .data = &sun8i_a23_cfg },
{ .compatible = "allwinner,sun8i-a33-usb-phy", .data = &sun8i_a33_cfg },
{ .compatible = "allwinner,sun8i-a83t-usb-phy", .data = &sun8i_a83t_cfg },
{ .compatible = "allwinner,sun8i-h3-usb-phy", .data = &sun8i_h3_cfg },
{ .compatible = "allwinner,sun8i-r40-usb-phy", .data = &sun8i_r40_cfg },
{ .compatible = "allwinner,sun8i-v3s-usb-phy", .data = &sun8i_v3s_cfg },
{ .compatible = "allwinner,sun20i-d1-usb-phy", .data = &sun20i_d1_cfg },
{ .compatible = "allwinner,sun50i-a64-usb-phy",
.data = &sun50i_a64_cfg},
{ .compatible = "allwinner,sun50i-h6-usb-phy", .data = &sun50i_h6_cfg },
{ .compatible = "allwinner,sun50i-h616-usb-phy", .data = &sun50i_h616_cfg },
{ .compatible = "allwinner,suniv-f1c100s-usb-phy",
.data = &suniv_f1c100s_cfg },
{ },
};
MODULE_DEVICE_TABLE(of, sun4i_usb_phy_of_match);
static struct platform_driver sun4i_usb_phy_driver = {
.probe = sun4i_usb_phy_probe,
.remove_new = sun4i_usb_phy_remove,
.driver = {
.of_match_table = sun4i_usb_phy_of_match,
.name = "sun4i-usb-phy",
}
};
module_platform_driver(sun4i_usb_phy_driver);
MODULE_DESCRIPTION("Allwinner sun4i USB phy driver");
MODULE_AUTHOR("Hans de Goede <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/phy/allwinner/phy-sun4i-usb.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Allwinner sun9i USB phy driver
*
* Copyright (C) 2014-2015 Chen-Yu Tsai <[email protected]>
*
* Based on phy-sun4i-usb.c from
* Hans de Goede <[email protected]>
*
* and code from
* Allwinner Technology Co., Ltd. <www.allwinnertech.com>
*/
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/phy/phy.h>
#include <linux/usb/of.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
#define SUNXI_AHB_INCR16_BURST_EN BIT(11)
#define SUNXI_AHB_INCR8_BURST_EN BIT(10)
#define SUNXI_AHB_INCR4_BURST_EN BIT(9)
#define SUNXI_AHB_INCRX_ALIGN_EN BIT(8)
#define SUNXI_ULPI_BYPASS_EN BIT(0)
/* usb1 HSIC specific bits */
#define SUNXI_EHCI_HS_FORCE BIT(20)
#define SUNXI_HSIC_CONNECT_DET BIT(17)
#define SUNXI_HSIC_CONNECT_INT BIT(16)
#define SUNXI_HSIC BIT(1)
struct sun9i_usb_phy {
struct phy *phy;
void __iomem *pmu;
struct reset_control *reset;
struct clk *clk;
struct clk *hsic_clk;
enum usb_phy_interface type;
};
static void sun9i_usb_phy_passby(struct sun9i_usb_phy *phy, int enable)
{
u32 bits, reg_value;
bits = SUNXI_AHB_INCR16_BURST_EN | SUNXI_AHB_INCR8_BURST_EN |
SUNXI_AHB_INCR4_BURST_EN | SUNXI_AHB_INCRX_ALIGN_EN |
SUNXI_ULPI_BYPASS_EN;
if (phy->type == USBPHY_INTERFACE_MODE_HSIC)
bits |= SUNXI_HSIC | SUNXI_EHCI_HS_FORCE |
SUNXI_HSIC_CONNECT_DET | SUNXI_HSIC_CONNECT_INT;
reg_value = readl(phy->pmu);
if (enable)
reg_value |= bits;
else
reg_value &= ~bits;
writel(reg_value, phy->pmu);
}
static int sun9i_usb_phy_init(struct phy *_phy)
{
struct sun9i_usb_phy *phy = phy_get_drvdata(_phy);
int ret;
ret = clk_prepare_enable(phy->clk);
if (ret)
goto err_clk;
ret = clk_prepare_enable(phy->hsic_clk);
if (ret)
goto err_hsic_clk;
ret = reset_control_deassert(phy->reset);
if (ret)
goto err_reset;
sun9i_usb_phy_passby(phy, 1);
return 0;
err_reset:
clk_disable_unprepare(phy->hsic_clk);
err_hsic_clk:
clk_disable_unprepare(phy->clk);
err_clk:
return ret;
}
static int sun9i_usb_phy_exit(struct phy *_phy)
{
struct sun9i_usb_phy *phy = phy_get_drvdata(_phy);
sun9i_usb_phy_passby(phy, 0);
reset_control_assert(phy->reset);
clk_disable_unprepare(phy->hsic_clk);
clk_disable_unprepare(phy->clk);
return 0;
}
static const struct phy_ops sun9i_usb_phy_ops = {
.init = sun9i_usb_phy_init,
.exit = sun9i_usb_phy_exit,
.owner = THIS_MODULE,
};
static int sun9i_usb_phy_probe(struct platform_device *pdev)
{
struct sun9i_usb_phy *phy;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct phy_provider *phy_provider;
phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
if (!phy)
return -ENOMEM;
phy->type = of_usb_get_phy_mode(np);
if (phy->type == USBPHY_INTERFACE_MODE_HSIC) {
phy->clk = devm_clk_get(dev, "hsic_480M");
if (IS_ERR(phy->clk)) {
dev_err(dev, "failed to get hsic_480M clock\n");
return PTR_ERR(phy->clk);
}
phy->hsic_clk = devm_clk_get(dev, "hsic_12M");
if (IS_ERR(phy->hsic_clk)) {
dev_err(dev, "failed to get hsic_12M clock\n");
return PTR_ERR(phy->hsic_clk);
}
phy->reset = devm_reset_control_get(dev, "hsic");
if (IS_ERR(phy->reset)) {
dev_err(dev, "failed to get reset control\n");
return PTR_ERR(phy->reset);
}
} else {
phy->clk = devm_clk_get(dev, "phy");
if (IS_ERR(phy->clk)) {
dev_err(dev, "failed to get phy clock\n");
return PTR_ERR(phy->clk);
}
phy->reset = devm_reset_control_get(dev, "phy");
if (IS_ERR(phy->reset)) {
dev_err(dev, "failed to get reset control\n");
return PTR_ERR(phy->reset);
}
}
phy->pmu = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(phy->pmu))
return PTR_ERR(phy->pmu);
phy->phy = devm_phy_create(dev, NULL, &sun9i_usb_phy_ops);
if (IS_ERR(phy->phy)) {
dev_err(dev, "failed to create PHY\n");
return PTR_ERR(phy->phy);
}
phy_set_drvdata(phy->phy, phy);
phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
return PTR_ERR_OR_ZERO(phy_provider);
}
static const struct of_device_id sun9i_usb_phy_of_match[] = {
{ .compatible = "allwinner,sun9i-a80-usb-phy" },
{ },
};
MODULE_DEVICE_TABLE(of, sun9i_usb_phy_of_match);
static struct platform_driver sun9i_usb_phy_driver = {
.probe = sun9i_usb_phy_probe,
.driver = {
.of_match_table = sun9i_usb_phy_of_match,
.name = "sun9i-usb-phy",
}
};
module_platform_driver(sun9i_usb_phy_driver);
MODULE_DESCRIPTION("Allwinner sun9i USB phy driver");
MODULE_AUTHOR("Chen-Yu Tsai <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/phy/allwinner/phy-sun9i-usb.c |
// SPDX-License-Identifier: GPL-2.0-or-later
#include <linux/err.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/phy.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <dt-bindings/phy/phy-lan966x-serdes.h>
#include "lan966x_serdes_regs.h"
#define PLL_CONF_MASK GENMASK(4, 3)
#define PLL_CONF_25MHZ 0
#define PLL_CONF_125MHZ 1
#define PLL_CONF_SERDES_125MHZ 2
#define PLL_CONF_BYPASS 3
#define lan_offset_(id, tinst, tcnt, \
gbase, ginst, gcnt, gwidth, \
raddr, rinst, rcnt, rwidth) \
(gbase + ((ginst) * gwidth) + raddr + ((rinst) * rwidth))
#define lan_offset(...) lan_offset_(__VA_ARGS__)
#define lan_rmw(val, mask, reg, off) \
lan_rmw_(val, mask, reg, lan_offset(off))
#define SERDES_MUX(_idx, _port, _mode, _submode, _mask, _mux) { \
.idx = _idx, \
.port = _port, \
.mode = _mode, \
.submode = _submode, \
.mask = _mask, \
.mux = _mux, \
}
#define SERDES_MUX_GMII(i, p, m, c) \
SERDES_MUX(i, p, PHY_MODE_ETHERNET, PHY_INTERFACE_MODE_GMII, m, c)
#define SERDES_MUX_SGMII(i, p, m, c) \
SERDES_MUX(i, p, PHY_MODE_ETHERNET, PHY_INTERFACE_MODE_SGMII, m, c)
#define SERDES_MUX_QSGMII(i, p, m, c) \
SERDES_MUX(i, p, PHY_MODE_ETHERNET, PHY_INTERFACE_MODE_QSGMII, m, c)
#define SERDES_MUX_RGMII(i, p, m, c) \
SERDES_MUX(i, p, PHY_MODE_ETHERNET, PHY_INTERFACE_MODE_RGMII, m, c), \
SERDES_MUX(i, p, PHY_MODE_ETHERNET, PHY_INTERFACE_MODE_RGMII_TXID, m, c), \
SERDES_MUX(i, p, PHY_MODE_ETHERNET, PHY_INTERFACE_MODE_RGMII_RXID, m, c), \
SERDES_MUX(i, p, PHY_MODE_ETHERNET, PHY_INTERFACE_MODE_RGMII_ID, m, c)
static void lan_rmw_(u32 val, u32 mask, void __iomem *mem, u32 offset)
{
u32 v;
v = readl(mem + offset);
v = (v & ~mask) | (val & mask);
writel(v, mem + offset);
}
struct serdes_mux {
u8 idx;
u8 port;
enum phy_mode mode;
int submode;
u32 mask;
u32 mux;
};
static const struct serdes_mux lan966x_serdes_muxes[] = {
SERDES_MUX_QSGMII(SERDES6G(1), 0, HSIO_HW_CFG_QSGMII_ENA,
HSIO_HW_CFG_QSGMII_ENA_SET(BIT(0))),
SERDES_MUX_QSGMII(SERDES6G(1), 1, HSIO_HW_CFG_QSGMII_ENA,
HSIO_HW_CFG_QSGMII_ENA_SET(BIT(0))),
SERDES_MUX_QSGMII(SERDES6G(1), 2, HSIO_HW_CFG_QSGMII_ENA,
HSIO_HW_CFG_QSGMII_ENA_SET(BIT(0))),
SERDES_MUX_QSGMII(SERDES6G(1), 3, HSIO_HW_CFG_QSGMII_ENA,
HSIO_HW_CFG_QSGMII_ENA_SET(BIT(0))),
SERDES_MUX_QSGMII(SERDES6G(2), 4, HSIO_HW_CFG_QSGMII_ENA,
HSIO_HW_CFG_QSGMII_ENA_SET(BIT(1))),
SERDES_MUX_QSGMII(SERDES6G(2), 5, HSIO_HW_CFG_QSGMII_ENA,
HSIO_HW_CFG_QSGMII_ENA_SET(BIT(1))),
SERDES_MUX_QSGMII(SERDES6G(2), 6, HSIO_HW_CFG_QSGMII_ENA,
HSIO_HW_CFG_QSGMII_ENA_SET(BIT(1))),
SERDES_MUX_QSGMII(SERDES6G(2), 7, HSIO_HW_CFG_QSGMII_ENA,
HSIO_HW_CFG_QSGMII_ENA_SET(BIT(1))),
SERDES_MUX_GMII(CU(0), 0, HSIO_HW_CFG_GMII_ENA,
HSIO_HW_CFG_GMII_ENA_SET(BIT(0))),
SERDES_MUX_GMII(CU(1), 1, HSIO_HW_CFG_GMII_ENA,
HSIO_HW_CFG_GMII_ENA_SET(BIT(1))),
SERDES_MUX_SGMII(SERDES6G(0), 0, HSIO_HW_CFG_SD6G_0_CFG, 0),
SERDES_MUX_SGMII(SERDES6G(1), 1, HSIO_HW_CFG_SD6G_1_CFG, 0),
SERDES_MUX_SGMII(SERDES6G(0), 2, HSIO_HW_CFG_SD6G_0_CFG,
HSIO_HW_CFG_SD6G_0_CFG_SET(1)),
SERDES_MUX_SGMII(SERDES6G(1), 3, HSIO_HW_CFG_SD6G_1_CFG,
HSIO_HW_CFG_SD6G_1_CFG_SET(1)),
SERDES_MUX_RGMII(RGMII(0), 2, HSIO_HW_CFG_RGMII_0_CFG |
HSIO_HW_CFG_RGMII_ENA |
HSIO_HW_CFG_GMII_ENA,
HSIO_HW_CFG_RGMII_0_CFG_SET(0) |
HSIO_HW_CFG_RGMII_ENA_SET(BIT(0)) |
HSIO_HW_CFG_GMII_ENA_SET(BIT(2))),
SERDES_MUX_RGMII(RGMII(1), 3, HSIO_HW_CFG_RGMII_1_CFG |
HSIO_HW_CFG_RGMII_ENA |
HSIO_HW_CFG_GMII_ENA,
HSIO_HW_CFG_RGMII_1_CFG_SET(0) |
HSIO_HW_CFG_RGMII_ENA_SET(BIT(1)) |
HSIO_HW_CFG_GMII_ENA_SET(BIT(3))),
SERDES_MUX_RGMII(RGMII(0), 5, HSIO_HW_CFG_RGMII_0_CFG |
HSIO_HW_CFG_RGMII_ENA |
HSIO_HW_CFG_GMII_ENA,
HSIO_HW_CFG_RGMII_0_CFG_SET(BIT(0)) |
HSIO_HW_CFG_RGMII_ENA_SET(BIT(0)) |
HSIO_HW_CFG_GMII_ENA_SET(BIT(5))),
SERDES_MUX_RGMII(RGMII(1), 6, HSIO_HW_CFG_RGMII_1_CFG |
HSIO_HW_CFG_RGMII_ENA |
HSIO_HW_CFG_GMII_ENA,
HSIO_HW_CFG_RGMII_1_CFG_SET(BIT(0)) |
HSIO_HW_CFG_RGMII_ENA_SET(BIT(1)) |
HSIO_HW_CFG_GMII_ENA_SET(BIT(6))),
};
struct serdes_ctrl {
void __iomem *regs;
struct device *dev;
struct phy *phys[SERDES_MAX];
int ref125;
};
struct serdes_macro {
u8 idx;
int port;
struct serdes_ctrl *ctrl;
int speed;
phy_interface_t mode;
};
enum lan966x_sd6g40_mode {
LAN966X_SD6G40_MODE_QSGMII,
LAN966X_SD6G40_MODE_SGMII,
};
enum lan966x_sd6g40_ltx2rx {
LAN966X_SD6G40_TX2RX_LOOP_NONE,
LAN966X_SD6G40_LTX2RX
};
struct lan966x_sd6g40_setup_args {
enum lan966x_sd6g40_mode mode;
enum lan966x_sd6g40_ltx2rx tx2rx_loop;
bool txinvert;
bool rxinvert;
bool refclk125M;
bool mute;
};
struct lan966x_sd6g40_mode_args {
enum lan966x_sd6g40_mode mode;
u8 lane_10bit_sel;
u8 mpll_multiplier;
u8 ref_clkdiv2;
u8 tx_rate;
u8 rx_rate;
};
struct lan966x_sd6g40_setup {
u8 rx_term_en;
u8 lane_10bit_sel;
u8 tx_invert;
u8 rx_invert;
u8 mpll_multiplier;
u8 lane_loopbk_en;
u8 ref_clkdiv2;
u8 tx_rate;
u8 rx_rate;
};
static int lan966x_sd6g40_reg_cfg(struct serdes_macro *macro,
struct lan966x_sd6g40_setup *res_struct,
u32 idx)
{
u32 value;
/* Note: SerDes HSIO is configured in 1G_LAN mode */
lan_rmw(HSIO_SD_CFG_LANE_10BIT_SEL_SET(res_struct->lane_10bit_sel) |
HSIO_SD_CFG_RX_RATE_SET(res_struct->rx_rate) |
HSIO_SD_CFG_TX_RATE_SET(res_struct->tx_rate) |
HSIO_SD_CFG_TX_INVERT_SET(res_struct->tx_invert) |
HSIO_SD_CFG_RX_INVERT_SET(res_struct->rx_invert) |
HSIO_SD_CFG_LANE_LOOPBK_EN_SET(res_struct->lane_loopbk_en) |
HSIO_SD_CFG_RX_RESET_SET(0) |
HSIO_SD_CFG_TX_RESET_SET(0),
HSIO_SD_CFG_LANE_10BIT_SEL |
HSIO_SD_CFG_RX_RATE |
HSIO_SD_CFG_TX_RATE |
HSIO_SD_CFG_TX_INVERT |
HSIO_SD_CFG_RX_INVERT |
HSIO_SD_CFG_LANE_LOOPBK_EN |
HSIO_SD_CFG_RX_RESET |
HSIO_SD_CFG_TX_RESET,
macro->ctrl->regs, HSIO_SD_CFG(idx));
lan_rmw(HSIO_MPLL_CFG_MPLL_MULTIPLIER_SET(res_struct->mpll_multiplier) |
HSIO_MPLL_CFG_REF_CLKDIV2_SET(res_struct->ref_clkdiv2),
HSIO_MPLL_CFG_MPLL_MULTIPLIER |
HSIO_MPLL_CFG_REF_CLKDIV2,
macro->ctrl->regs, HSIO_MPLL_CFG(idx));
lan_rmw(HSIO_SD_CFG_RX_TERM_EN_SET(res_struct->rx_term_en),
HSIO_SD_CFG_RX_TERM_EN,
macro->ctrl->regs, HSIO_SD_CFG(idx));
lan_rmw(HSIO_MPLL_CFG_REF_SSP_EN_SET(1),
HSIO_MPLL_CFG_REF_SSP_EN,
macro->ctrl->regs, HSIO_MPLL_CFG(idx));
usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
lan_rmw(HSIO_SD_CFG_PHY_RESET_SET(0),
HSIO_SD_CFG_PHY_RESET,
macro->ctrl->regs, HSIO_SD_CFG(idx));
usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
lan_rmw(HSIO_MPLL_CFG_MPLL_EN_SET(1),
HSIO_MPLL_CFG_MPLL_EN,
macro->ctrl->regs, HSIO_MPLL_CFG(idx));
usleep_range(7 * USEC_PER_MSEC, 8 * USEC_PER_MSEC);
value = readl(macro->ctrl->regs + lan_offset(HSIO_SD_STAT(idx)));
value = HSIO_SD_STAT_MPLL_STATE_GET(value);
if (value != 0x1) {
dev_err(macro->ctrl->dev,
"Unexpected sd_sd_stat[%u] mpll_state was 0x1 but is 0x%x\n",
idx, value);
return -EIO;
}
lan_rmw(HSIO_SD_CFG_TX_CM_EN_SET(1),
HSIO_SD_CFG_TX_CM_EN,
macro->ctrl->regs, HSIO_SD_CFG(idx));
usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
value = readl(macro->ctrl->regs + lan_offset(HSIO_SD_STAT(idx)));
value = HSIO_SD_STAT_TX_CM_STATE_GET(value);
if (value != 0x1) {
dev_err(macro->ctrl->dev,
"Unexpected sd_sd_stat[%u] tx_cm_state was 0x1 but is 0x%x\n",
idx, value);
return -EIO;
}
lan_rmw(HSIO_SD_CFG_RX_PLL_EN_SET(1) |
HSIO_SD_CFG_TX_EN_SET(1),
HSIO_SD_CFG_RX_PLL_EN |
HSIO_SD_CFG_TX_EN,
macro->ctrl->regs, HSIO_SD_CFG(idx));
usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
/* Waiting for serdes 0 rx DPLL to lock... */
value = readl(macro->ctrl->regs + lan_offset(HSIO_SD_STAT(idx)));
value = HSIO_SD_STAT_RX_PLL_STATE_GET(value);
if (value != 0x1) {
dev_err(macro->ctrl->dev,
"Unexpected sd_sd_stat[%u] rx_pll_state was 0x1 but is 0x%x\n",
idx, value);
return -EIO;
}
/* Waiting for serdes 0 tx operational... */
value = readl(macro->ctrl->regs + lan_offset(HSIO_SD_STAT(idx)));
value = HSIO_SD_STAT_TX_STATE_GET(value);
if (value != 0x1) {
dev_err(macro->ctrl->dev,
"Unexpected sd_sd_stat[%u] tx_state was 0x1 but is 0x%x\n",
idx, value);
return -EIO;
}
lan_rmw(HSIO_SD_CFG_TX_DATA_EN_SET(1) |
HSIO_SD_CFG_RX_DATA_EN_SET(1),
HSIO_SD_CFG_TX_DATA_EN |
HSIO_SD_CFG_RX_DATA_EN,
macro->ctrl->regs, HSIO_SD_CFG(idx));
return 0;
}
static int lan966x_sd6g40_get_conf_from_mode(struct serdes_macro *macro,
enum lan966x_sd6g40_mode f_mode,
bool ref125M,
struct lan966x_sd6g40_mode_args *ret_val)
{
switch (f_mode) {
case LAN966X_SD6G40_MODE_QSGMII:
ret_val->lane_10bit_sel = 0;
if (ref125M) {
ret_val->mpll_multiplier = 40;
ret_val->ref_clkdiv2 = 0x1;
ret_val->tx_rate = 0x0;
ret_val->rx_rate = 0x0;
} else {
ret_val->mpll_multiplier = 100;
ret_val->ref_clkdiv2 = 0x0;
ret_val->tx_rate = 0x0;
ret_val->rx_rate = 0x0;
}
break;
case LAN966X_SD6G40_MODE_SGMII:
ret_val->lane_10bit_sel = 1;
if (ref125M) {
ret_val->mpll_multiplier = macro->speed == SPEED_2500 ? 50 : 40;
ret_val->ref_clkdiv2 = 0x1;
ret_val->tx_rate = macro->speed == SPEED_2500 ? 0x1 : 0x2;
ret_val->rx_rate = macro->speed == SPEED_2500 ? 0x1 : 0x2;
} else {
ret_val->mpll_multiplier = macro->speed == SPEED_2500 ? 125 : 100;
ret_val->ref_clkdiv2 = 0x0;
ret_val->tx_rate = macro->speed == SPEED_2500 ? 0x1 : 0x2;
ret_val->rx_rate = macro->speed == SPEED_2500 ? 0x1 : 0x2;
}
break;
default:
return -EOPNOTSUPP;
}
return 0;
}
static int lan966x_calc_sd6g40_setup_lane(struct serdes_macro *macro,
struct lan966x_sd6g40_setup_args config,
struct lan966x_sd6g40_setup *ret_val)
{
struct lan966x_sd6g40_mode_args sd6g40_mode;
struct lan966x_sd6g40_mode_args *mode_args = &sd6g40_mode;
int ret;
ret = lan966x_sd6g40_get_conf_from_mode(macro, config.mode,
config.refclk125M, mode_args);
if (ret)
return ret;
ret_val->lane_10bit_sel = mode_args->lane_10bit_sel;
ret_val->rx_rate = mode_args->rx_rate;
ret_val->tx_rate = mode_args->tx_rate;
ret_val->mpll_multiplier = mode_args->mpll_multiplier;
ret_val->ref_clkdiv2 = mode_args->ref_clkdiv2;
ret_val->rx_term_en = 0;
if (config.tx2rx_loop == LAN966X_SD6G40_LTX2RX)
ret_val->lane_loopbk_en = 1;
else
ret_val->lane_loopbk_en = 0;
ret_val->tx_invert = !!config.txinvert;
ret_val->rx_invert = !!config.rxinvert;
return 0;
}
static int lan966x_sd6g40_setup_lane(struct serdes_macro *macro,
struct lan966x_sd6g40_setup_args config,
u32 idx)
{
struct lan966x_sd6g40_setup calc_results = {};
int ret;
ret = lan966x_calc_sd6g40_setup_lane(macro, config, &calc_results);
if (ret)
return ret;
return lan966x_sd6g40_reg_cfg(macro, &calc_results, idx);
}
static int lan966x_sd6g40_setup(struct serdes_macro *macro, u32 idx, int mode)
{
struct lan966x_sd6g40_setup_args conf = {};
conf.refclk125M = macro->ctrl->ref125;
if (mode == PHY_INTERFACE_MODE_QSGMII)
conf.mode = LAN966X_SD6G40_MODE_QSGMII;
else
conf.mode = LAN966X_SD6G40_MODE_SGMII;
return lan966x_sd6g40_setup_lane(macro, conf, idx);
}
static int lan966x_rgmii_setup(struct serdes_macro *macro, u32 idx, int mode)
{
bool tx_delay = false;
bool rx_delay = false;
/* Configure RGMII */
lan_rmw(HSIO_RGMII_CFG_RGMII_RX_RST_SET(0) |
HSIO_RGMII_CFG_RGMII_TX_RST_SET(0) |
HSIO_RGMII_CFG_TX_CLK_CFG_SET(macro->speed == SPEED_1000 ? 1 :
macro->speed == SPEED_100 ? 2 :
macro->speed == SPEED_10 ? 3 : 0),
HSIO_RGMII_CFG_RGMII_RX_RST |
HSIO_RGMII_CFG_RGMII_TX_RST |
HSIO_RGMII_CFG_TX_CLK_CFG,
macro->ctrl->regs, HSIO_RGMII_CFG(idx));
if (mode == PHY_INTERFACE_MODE_RGMII ||
mode == PHY_INTERFACE_MODE_RGMII_TXID)
rx_delay = true;
if (mode == PHY_INTERFACE_MODE_RGMII ||
mode == PHY_INTERFACE_MODE_RGMII_RXID)
tx_delay = true;
/* Setup DLL configuration */
lan_rmw(HSIO_DLL_CFG_DLL_RST_SET(0) |
HSIO_DLL_CFG_DLL_ENA_SET(rx_delay),
HSIO_DLL_CFG_DLL_RST |
HSIO_DLL_CFG_DLL_ENA,
macro->ctrl->regs, HSIO_DLL_CFG(idx == 0 ? 0x0 : 0x2));
lan_rmw(HSIO_DLL_CFG_DELAY_ENA_SET(rx_delay),
HSIO_DLL_CFG_DELAY_ENA,
macro->ctrl->regs, HSIO_DLL_CFG(idx == 0 ? 0x0 : 0x2));
lan_rmw(HSIO_DLL_CFG_DLL_RST_SET(0) |
HSIO_DLL_CFG_DLL_ENA_SET(tx_delay),
HSIO_DLL_CFG_DLL_RST |
HSIO_DLL_CFG_DLL_ENA,
macro->ctrl->regs, HSIO_DLL_CFG(idx == 0 ? 0x1 : 0x3));
lan_rmw(HSIO_DLL_CFG_DELAY_ENA_SET(tx_delay),
HSIO_DLL_CFG_DELAY_ENA,
macro->ctrl->regs, HSIO_DLL_CFG(idx == 0 ? 0x1 : 0x3));
return 0;
}
static int serdes_set_speed(struct phy *phy, int speed)
{
struct serdes_macro *macro = phy_get_drvdata(phy);
if (!phy_interface_mode_is_rgmii(macro->mode))
return 0;
macro->speed = speed;
lan966x_rgmii_setup(macro, macro->idx - (SERDES6G_MAX + 1), macro->mode);
return 0;
}
static int serdes_set_mode(struct phy *phy, enum phy_mode mode, int submode)
{
struct serdes_macro *macro = phy_get_drvdata(phy);
unsigned int i;
int val;
/* As of now only PHY_MODE_ETHERNET is supported */
if (mode != PHY_MODE_ETHERNET)
return -EOPNOTSUPP;
if (submode == PHY_INTERFACE_MODE_2500BASEX)
macro->speed = SPEED_2500;
else
macro->speed = SPEED_1000;
if (submode == PHY_INTERFACE_MODE_1000BASEX ||
submode == PHY_INTERFACE_MODE_2500BASEX)
submode = PHY_INTERFACE_MODE_SGMII;
if (submode == PHY_INTERFACE_MODE_QUSGMII)
submode = PHY_INTERFACE_MODE_QSGMII;
for (i = 0; i < ARRAY_SIZE(lan966x_serdes_muxes); i++) {
if (macro->idx != lan966x_serdes_muxes[i].idx ||
mode != lan966x_serdes_muxes[i].mode ||
submode != lan966x_serdes_muxes[i].submode ||
macro->port != lan966x_serdes_muxes[i].port)
continue;
val = readl(macro->ctrl->regs + lan_offset(HSIO_HW_CFG));
val |= lan966x_serdes_muxes[i].mux;
lan_rmw(val, lan966x_serdes_muxes[i].mask,
macro->ctrl->regs, HSIO_HW_CFG);
macro->mode = lan966x_serdes_muxes[i].submode;
if (macro->idx < CU_MAX)
return 0;
if (macro->idx < SERDES6G_MAX)
return lan966x_sd6g40_setup(macro,
macro->idx - (CU_MAX + 1),
macro->mode);
if (macro->idx < RGMII_MAX)
return lan966x_rgmii_setup(macro,
macro->idx - (SERDES6G_MAX + 1),
macro->mode);
return -EOPNOTSUPP;
}
return -EINVAL;
}
static const struct phy_ops serdes_ops = {
.set_mode = serdes_set_mode,
.set_speed = serdes_set_speed,
.owner = THIS_MODULE,
};
static struct phy *serdes_simple_xlate(struct device *dev,
struct of_phandle_args *args)
{
struct serdes_ctrl *ctrl = dev_get_drvdata(dev);
unsigned int port, idx, i;
if (args->args_count != 2)
return ERR_PTR(-EINVAL);
port = args->args[0];
idx = args->args[1];
for (i = 0; i < SERDES_MAX; i++) {
struct serdes_macro *macro = phy_get_drvdata(ctrl->phys[i]);
if (idx != macro->idx)
continue;
macro->port = port;
return ctrl->phys[i];
}
return ERR_PTR(-ENODEV);
}
static int serdes_phy_create(struct serdes_ctrl *ctrl, u8 idx, struct phy **phy)
{
struct serdes_macro *macro;
*phy = devm_phy_create(ctrl->dev, NULL, &serdes_ops);
if (IS_ERR(*phy))
return PTR_ERR(*phy);
macro = devm_kzalloc(ctrl->dev, sizeof(*macro), GFP_KERNEL);
if (!macro)
return -ENOMEM;
macro->idx = idx;
macro->ctrl = ctrl;
macro->port = -1;
phy_set_drvdata(*phy, macro);
return 0;
}
static int serdes_probe(struct platform_device *pdev)
{
struct phy_provider *provider;
struct serdes_ctrl *ctrl;
void __iomem *hw_stat;
unsigned int i;
u32 val;
int ret;
ctrl = devm_kzalloc(&pdev->dev, sizeof(*ctrl), GFP_KERNEL);
if (!ctrl)
return -ENOMEM;
ctrl->dev = &pdev->dev;
ctrl->regs = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
if (IS_ERR(ctrl->regs))
return PTR_ERR(ctrl->regs);
hw_stat = devm_platform_get_and_ioremap_resource(pdev, 1, NULL);
if (IS_ERR(hw_stat))
return PTR_ERR(hw_stat);
for (i = 0; i < SERDES_MAX; i++) {
ret = serdes_phy_create(ctrl, i, &ctrl->phys[i]);
if (ret)
return ret;
}
val = readl(hw_stat);
val = FIELD_GET(PLL_CONF_MASK, val);
ctrl->ref125 = (val == PLL_CONF_125MHZ ||
val == PLL_CONF_SERDES_125MHZ);
dev_set_drvdata(&pdev->dev, ctrl);
provider = devm_of_phy_provider_register(ctrl->dev,
serdes_simple_xlate);
return PTR_ERR_OR_ZERO(provider);
}
static const struct of_device_id serdes_ids[] = {
{ .compatible = "microchip,lan966x-serdes", },
{},
};
MODULE_DEVICE_TABLE(of, serdes_ids);
static struct platform_driver mscc_lan966x_serdes = {
.probe = serdes_probe,
.driver = {
.name = "microchip,lan966x-serdes",
.of_match_table = of_match_ptr(serdes_ids),
},
};
module_platform_driver(mscc_lan966x_serdes);
MODULE_DESCRIPTION("Microchip lan966x switch serdes driver");
MODULE_AUTHOR("Horatiu Vultur <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/phy/microchip/lan966x_serdes.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* Microchip Sparx5 Switch SerDes driver
*
* Copyright (c) 2020 Microchip Technology Inc. and its subsidiaries.
*
* The Sparx5 Chip Register Model can be browsed at this location:
* https://github.com/microchip-ung/sparx-5_reginfo
* and the datasheet is available here:
* https://ww1.microchip.com/downloads/en/DeviceDoc/SparX-5_Family_L2L3_Enterprise_10G_Ethernet_Switches_Datasheet_00003822B.pdf
*/
#include <linux/printk.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/netdevice.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/phy.h>
#include <linux/phy/phy.h>
#include "sparx5_serdes.h"
#define SPX5_CMU_MAX 14
#define SPX5_SERDES_10G_START 13
#define SPX5_SERDES_25G_START 25
#define SPX5_SERDES_6G10G_CNT SPX5_SERDES_25G_START
/* Optimal power settings from GUC */
#define SPX5_SERDES_QUIET_MODE_VAL 0x01ef4e0c
enum sparx5_10g28cmu_mode {
SPX5_SD10G28_CMU_MAIN = 0,
SPX5_SD10G28_CMU_AUX1 = 1,
SPX5_SD10G28_CMU_AUX2 = 3,
SPX5_SD10G28_CMU_NONE = 4,
SPX5_SD10G28_CMU_MAX,
};
enum sparx5_sd25g28_mode_preset_type {
SPX5_SD25G28_MODE_PRESET_25000,
SPX5_SD25G28_MODE_PRESET_10000,
SPX5_SD25G28_MODE_PRESET_5000,
SPX5_SD25G28_MODE_PRESET_SD_2G5,
SPX5_SD25G28_MODE_PRESET_1000BASEX,
};
enum sparx5_sd10g28_mode_preset_type {
SPX5_SD10G28_MODE_PRESET_10000,
SPX5_SD10G28_MODE_PRESET_SFI_5000_6G,
SPX5_SD10G28_MODE_PRESET_SFI_5000_10G,
SPX5_SD10G28_MODE_PRESET_QSGMII,
SPX5_SD10G28_MODE_PRESET_SD_2G5,
SPX5_SD10G28_MODE_PRESET_1000BASEX,
};
struct sparx5_serdes_io_resource {
enum sparx5_serdes_target id;
phys_addr_t offset;
};
struct sparx5_sd25g28_mode_preset {
u8 bitwidth;
u8 tx_pre_div;
u8 fifo_ck_div;
u8 pre_divsel;
u8 vco_div_mode;
u8 sel_div;
u8 ck_bitwidth;
u8 subrate;
u8 com_txcal_en;
u8 com_tx_reserve_msb;
u8 com_tx_reserve_lsb;
u8 cfg_itx_ipcml_base;
u8 tx_reserve_lsb;
u8 tx_reserve_msb;
u8 bw;
u8 rxterm;
u8 dfe_tap;
u8 dfe_enable;
bool txmargin;
u8 cfg_ctle_rstn;
u8 r_dfe_rstn;
u8 cfg_pi_bw_3_0;
u8 tx_tap_dly;
u8 tx_tap_adv;
};
struct sparx5_sd25g28_media_preset {
u8 cfg_eq_c_force_3_0;
u8 cfg_vga_ctrl_byp_4_0;
u8 cfg_eq_r_force_3_0;
u8 cfg_en_adv;
u8 cfg_en_main;
u8 cfg_en_dly;
u8 cfg_tap_adv_3_0;
u8 cfg_tap_main;
u8 cfg_tap_dly_4_0;
u8 cfg_alos_thr_2_0;
};
struct sparx5_sd25g28_args {
u8 if_width; /* UDL if-width: 10/16/20/32/64 */
bool skip_cmu_cfg:1; /* Enable/disable CMU cfg */
enum sparx5_10g28cmu_mode cmu_sel; /* Device/Mode serdes uses */
bool no_pwrcycle:1; /* Omit initial power-cycle */
bool txinvert:1; /* Enable inversion of output data */
bool rxinvert:1; /* Enable inversion of input data */
u16 txswing; /* Set output level */
u8 rate; /* Rate of network interface */
u8 pi_bw_gen1;
u8 duty_cycle; /* Set output level to half/full */
bool mute:1; /* Mute Output Buffer */
bool reg_rst:1;
u8 com_pll_reserve;
};
struct sparx5_sd25g28_params {
u8 reg_rst;
u8 cfg_jc_byp;
u8 cfg_common_reserve_7_0;
u8 r_reg_manual;
u8 r_d_width_ctrl_from_hwt;
u8 r_d_width_ctrl_2_0;
u8 r_txfifo_ck_div_pmad_2_0;
u8 r_rxfifo_ck_div_pmad_2_0;
u8 cfg_pll_lol_set;
u8 cfg_vco_div_mode_1_0;
u8 cfg_pre_divsel_1_0;
u8 cfg_sel_div_3_0;
u8 cfg_vco_start_code_3_0;
u8 cfg_pma_tx_ck_bitwidth_2_0;
u8 cfg_tx_prediv_1_0;
u8 cfg_rxdiv_sel_2_0;
u8 cfg_tx_subrate_2_0;
u8 cfg_rx_subrate_2_0;
u8 r_multi_lane_mode;
u8 cfg_cdrck_en;
u8 cfg_dfeck_en;
u8 cfg_dfe_pd;
u8 cfg_dfedmx_pd;
u8 cfg_dfetap_en_5_1;
u8 cfg_dmux_pd;
u8 cfg_dmux_clk_pd;
u8 cfg_erramp_pd;
u8 cfg_pi_dfe_en;
u8 cfg_pi_en;
u8 cfg_pd_ctle;
u8 cfg_summer_en;
u8 cfg_pmad_ck_pd;
u8 cfg_pd_clk;
u8 cfg_pd_cml;
u8 cfg_pd_driver;
u8 cfg_rx_reg_pu;
u8 cfg_pd_rms_det;
u8 cfg_dcdr_pd;
u8 cfg_ecdr_pd;
u8 cfg_pd_sq;
u8 cfg_itx_ipdriver_base_2_0;
u8 cfg_tap_dly_4_0;
u8 cfg_tap_main;
u8 cfg_en_main;
u8 cfg_tap_adv_3_0;
u8 cfg_en_adv;
u8 cfg_en_dly;
u8 cfg_iscan_en;
u8 l1_pcs_en_fast_iscan;
u8 l0_cfg_bw_1_0;
u8 l0_cfg_txcal_en;
u8 cfg_en_dummy;
u8 cfg_pll_reserve_3_0;
u8 l0_cfg_tx_reserve_15_8;
u8 l0_cfg_tx_reserve_7_0;
u8 cfg_tx_reserve_15_8;
u8 cfg_tx_reserve_7_0;
u8 cfg_bw_1_0;
u8 cfg_txcal_man_en;
u8 cfg_phase_man_4_0;
u8 cfg_quad_man_1_0;
u8 cfg_txcal_shift_code_5_0;
u8 cfg_txcal_valid_sel_3_0;
u8 cfg_txcal_en;
u8 cfg_cdr_kf_2_0;
u8 cfg_cdr_m_7_0;
u8 cfg_pi_bw_3_0;
u8 cfg_pi_steps_1_0;
u8 cfg_dis_2ndorder;
u8 cfg_ctle_rstn;
u8 r_dfe_rstn;
u8 cfg_alos_thr_2_0;
u8 cfg_itx_ipcml_base_1_0;
u8 cfg_rx_reserve_7_0;
u8 cfg_rx_reserve_15_8;
u8 cfg_rxterm_2_0;
u8 cfg_fom_selm;
u8 cfg_rx_sp_ctle_1_0;
u8 cfg_isel_ctle_1_0;
u8 cfg_vga_ctrl_byp_4_0;
u8 cfg_vga_byp;
u8 cfg_agc_adpt_byp;
u8 cfg_eqr_byp;
u8 cfg_eqr_force_3_0;
u8 cfg_eqc_force_3_0;
u8 cfg_sum_setcm_en;
u8 cfg_init_pos_iscan_6_0;
u8 cfg_init_pos_ipi_6_0;
u8 cfg_dfedig_m_2_0;
u8 cfg_en_dfedig;
u8 cfg_pi_DFE_en;
u8 cfg_tx2rx_lp_en;
u8 cfg_txlb_en;
u8 cfg_rx2tx_lp_en;
u8 cfg_rxlb_en;
u8 r_tx_pol_inv;
u8 r_rx_pol_inv;
};
struct sparx5_sd10g28_media_preset {
u8 cfg_en_adv;
u8 cfg_en_main;
u8 cfg_en_dly;
u8 cfg_tap_adv_3_0;
u8 cfg_tap_main;
u8 cfg_tap_dly_4_0;
u8 cfg_vga_ctrl_3_0;
u8 cfg_vga_cp_2_0;
u8 cfg_eq_res_3_0;
u8 cfg_eq_r_byp;
u8 cfg_eq_c_force_3_0;
u8 cfg_alos_thr_3_0;
};
struct sparx5_sd10g28_mode_preset {
u8 bwidth; /* interface width: 10/16/20/32/64 */
enum sparx5_10g28cmu_mode cmu_sel; /* Device/Mode serdes uses */
u8 rate; /* Rate of network interface */
u8 dfe_tap;
u8 dfe_enable;
u8 pi_bw_gen1;
u8 duty_cycle; /* Set output level to half/full */
};
struct sparx5_sd10g28_args {
bool skip_cmu_cfg:1; /* Enable/disable CMU cfg */
bool no_pwrcycle:1; /* Omit initial power-cycle */
bool txinvert:1; /* Enable inversion of output data */
bool rxinvert:1; /* Enable inversion of input data */
bool txmargin:1; /* Set output level to half/full */
u16 txswing; /* Set output level */
bool mute:1; /* Mute Output Buffer */
bool is_6g:1;
bool reg_rst:1;
};
struct sparx5_sd10g28_params {
u8 cmu_sel;
u8 is_6g;
u8 skip_cmu_cfg;
u8 cfg_lane_reserve_7_0;
u8 cfg_ssc_rtl_clk_sel;
u8 cfg_lane_reserve_15_8;
u8 cfg_txrate_1_0;
u8 cfg_rxrate_1_0;
u8 r_d_width_ctrl_2_0;
u8 cfg_pma_tx_ck_bitwidth_2_0;
u8 cfg_rxdiv_sel_2_0;
u8 r_pcs2pma_phymode_4_0;
u8 cfg_lane_id_2_0;
u8 cfg_cdrck_en;
u8 cfg_dfeck_en;
u8 cfg_dfe_pd;
u8 cfg_dfetap_en_5_1;
u8 cfg_erramp_pd;
u8 cfg_pi_DFE_en;
u8 cfg_pi_en;
u8 cfg_pd_ctle;
u8 cfg_summer_en;
u8 cfg_pd_rx_cktree;
u8 cfg_pd_clk;
u8 cfg_pd_cml;
u8 cfg_pd_driver;
u8 cfg_rx_reg_pu;
u8 cfg_d_cdr_pd;
u8 cfg_pd_sq;
u8 cfg_rxdet_en;
u8 cfg_rxdet_str;
u8 r_multi_lane_mode;
u8 cfg_en_adv;
u8 cfg_en_main;
u8 cfg_en_dly;
u8 cfg_tap_adv_3_0;
u8 cfg_tap_main;
u8 cfg_tap_dly_4_0;
u8 cfg_vga_ctrl_3_0;
u8 cfg_vga_cp_2_0;
u8 cfg_eq_res_3_0;
u8 cfg_eq_r_byp;
u8 cfg_eq_c_force_3_0;
u8 cfg_en_dfedig;
u8 cfg_sum_setcm_en;
u8 cfg_en_preemph;
u8 cfg_itx_ippreemp_base_1_0;
u8 cfg_itx_ipdriver_base_2_0;
u8 cfg_ibias_tune_reserve_5_0;
u8 cfg_txswing_half;
u8 cfg_dis_2nd_order;
u8 cfg_rx_ssc_lh;
u8 cfg_pi_floop_steps_1_0;
u8 cfg_pi_ext_dac_23_16;
u8 cfg_pi_ext_dac_15_8;
u8 cfg_iscan_ext_dac_7_0;
u8 cfg_cdr_kf_gen1_2_0;
u8 cfg_cdr_kf_gen2_2_0;
u8 cfg_cdr_kf_gen3_2_0;
u8 cfg_cdr_kf_gen4_2_0;
u8 r_cdr_m_gen1_7_0;
u8 cfg_pi_bw_gen1_3_0;
u8 cfg_pi_bw_gen2;
u8 cfg_pi_bw_gen3;
u8 cfg_pi_bw_gen4;
u8 cfg_pi_ext_dac_7_0;
u8 cfg_pi_steps;
u8 cfg_mp_max_3_0;
u8 cfg_rstn_dfedig;
u8 cfg_alos_thr_3_0;
u8 cfg_predrv_slewrate_1_0;
u8 cfg_itx_ipcml_base_1_0;
u8 cfg_ip_pre_base_1_0;
u8 r_cdr_m_gen2_7_0;
u8 r_cdr_m_gen3_7_0;
u8 r_cdr_m_gen4_7_0;
u8 r_en_auto_cdr_rstn;
u8 cfg_oscal_afe;
u8 cfg_pd_osdac_afe;
u8 cfg_resetb_oscal_afe[2];
u8 cfg_center_spreading;
u8 cfg_m_cnt_maxval_4_0;
u8 cfg_ncnt_maxval_7_0;
u8 cfg_ncnt_maxval_10_8;
u8 cfg_ssc_en;
u8 cfg_tx2rx_lp_en;
u8 cfg_txlb_en;
u8 cfg_rx2tx_lp_en;
u8 cfg_rxlb_en;
u8 r_tx_pol_inv;
u8 r_rx_pol_inv;
u8 fx_100;
};
static struct sparx5_sd25g28_media_preset media_presets_25g[] = {
{ /* ETH_MEDIA_DEFAULT */
.cfg_en_adv = 0,
.cfg_en_main = 1,
.cfg_en_dly = 0,
.cfg_tap_adv_3_0 = 0,
.cfg_tap_main = 1,
.cfg_tap_dly_4_0 = 0,
.cfg_eq_c_force_3_0 = 0xf,
.cfg_vga_ctrl_byp_4_0 = 4,
.cfg_eq_r_force_3_0 = 12,
.cfg_alos_thr_2_0 = 7,
},
{ /* ETH_MEDIA_SR */
.cfg_en_adv = 1,
.cfg_en_main = 1,
.cfg_en_dly = 1,
.cfg_tap_adv_3_0 = 0,
.cfg_tap_main = 1,
.cfg_tap_dly_4_0 = 0x10,
.cfg_eq_c_force_3_0 = 0xf,
.cfg_vga_ctrl_byp_4_0 = 8,
.cfg_eq_r_force_3_0 = 4,
.cfg_alos_thr_2_0 = 0,
},
{ /* ETH_MEDIA_DAC */
.cfg_en_adv = 0,
.cfg_en_main = 1,
.cfg_en_dly = 0,
.cfg_tap_adv_3_0 = 0,
.cfg_tap_main = 1,
.cfg_tap_dly_4_0 = 0,
.cfg_eq_c_force_3_0 = 0xf,
.cfg_vga_ctrl_byp_4_0 = 8,
.cfg_eq_r_force_3_0 = 0xc,
.cfg_alos_thr_2_0 = 0,
},
};
static struct sparx5_sd25g28_mode_preset mode_presets_25g[] = {
{ /* SPX5_SD25G28_MODE_PRESET_25000 */
.bitwidth = 40,
.tx_pre_div = 0,
.fifo_ck_div = 0,
.pre_divsel = 1,
.vco_div_mode = 0,
.sel_div = 15,
.ck_bitwidth = 3,
.subrate = 0,
.com_txcal_en = 0,
.com_tx_reserve_msb = (0x26 << 1),
.com_tx_reserve_lsb = 0xf0,
.cfg_itx_ipcml_base = 0,
.tx_reserve_msb = 0xcc,
.tx_reserve_lsb = 0xfe,
.bw = 3,
.rxterm = 0,
.dfe_enable = 1,
.dfe_tap = 0x1f,
.txmargin = 1,
.cfg_ctle_rstn = 1,
.r_dfe_rstn = 1,
.cfg_pi_bw_3_0 = 0,
.tx_tap_dly = 8,
.tx_tap_adv = 0xc,
},
{ /* SPX5_SD25G28_MODE_PRESET_10000 */
.bitwidth = 64,
.tx_pre_div = 0,
.fifo_ck_div = 2,
.pre_divsel = 0,
.vco_div_mode = 1,
.sel_div = 9,
.ck_bitwidth = 0,
.subrate = 0,
.com_txcal_en = 1,
.com_tx_reserve_msb = (0x20 << 1),
.com_tx_reserve_lsb = 0x40,
.cfg_itx_ipcml_base = 0,
.tx_reserve_msb = 0x4c,
.tx_reserve_lsb = 0x44,
.bw = 3,
.cfg_pi_bw_3_0 = 0,
.rxterm = 3,
.dfe_enable = 1,
.dfe_tap = 0x1f,
.txmargin = 0,
.cfg_ctle_rstn = 1,
.r_dfe_rstn = 1,
.tx_tap_dly = 0,
.tx_tap_adv = 0,
},
{ /* SPX5_SD25G28_MODE_PRESET_5000 */
.bitwidth = 64,
.tx_pre_div = 0,
.fifo_ck_div = 2,
.pre_divsel = 0,
.vco_div_mode = 2,
.sel_div = 9,
.ck_bitwidth = 0,
.subrate = 0,
.com_txcal_en = 1,
.com_tx_reserve_msb = (0x20 << 1),
.com_tx_reserve_lsb = 0,
.cfg_itx_ipcml_base = 0,
.tx_reserve_msb = 0xe,
.tx_reserve_lsb = 0x80,
.bw = 0,
.rxterm = 0,
.cfg_pi_bw_3_0 = 6,
.dfe_enable = 0,
.dfe_tap = 0,
.tx_tap_dly = 0,
.tx_tap_adv = 0,
},
{ /* SPX5_SD25G28_MODE_PRESET_SD_2G5 */
.bitwidth = 10,
.tx_pre_div = 0,
.fifo_ck_div = 0,
.pre_divsel = 0,
.vco_div_mode = 1,
.sel_div = 6,
.ck_bitwidth = 3,
.subrate = 2,
.com_txcal_en = 1,
.com_tx_reserve_msb = (0x26 << 1),
.com_tx_reserve_lsb = (0xf << 4),
.cfg_itx_ipcml_base = 2,
.tx_reserve_msb = 0x8,
.tx_reserve_lsb = 0x8a,
.bw = 0,
.cfg_pi_bw_3_0 = 0,
.rxterm = (1 << 2),
.dfe_enable = 0,
.dfe_tap = 0,
.tx_tap_dly = 0,
.tx_tap_adv = 0,
},
{ /* SPX5_SD25G28_MODE_PRESET_1000BASEX */
.bitwidth = 10,
.tx_pre_div = 0,
.fifo_ck_div = 1,
.pre_divsel = 0,
.vco_div_mode = 1,
.sel_div = 8,
.ck_bitwidth = 3,
.subrate = 3,
.com_txcal_en = 1,
.com_tx_reserve_msb = (0x26 << 1),
.com_tx_reserve_lsb = 0xf0,
.cfg_itx_ipcml_base = 0,
.tx_reserve_msb = 0x8,
.tx_reserve_lsb = 0xce,
.bw = 0,
.rxterm = 0,
.cfg_pi_bw_3_0 = 0,
.dfe_enable = 0,
.dfe_tap = 0,
.tx_tap_dly = 0,
.tx_tap_adv = 0,
},
};
static struct sparx5_sd10g28_media_preset media_presets_10g[] = {
{ /* ETH_MEDIA_DEFAULT */
.cfg_en_adv = 0,
.cfg_en_main = 1,
.cfg_en_dly = 0,
.cfg_tap_adv_3_0 = 0,
.cfg_tap_main = 1,
.cfg_tap_dly_4_0 = 0,
.cfg_vga_ctrl_3_0 = 5,
.cfg_vga_cp_2_0 = 0,
.cfg_eq_res_3_0 = 0xa,
.cfg_eq_r_byp = 1,
.cfg_eq_c_force_3_0 = 0x8,
.cfg_alos_thr_3_0 = 0x3,
},
{ /* ETH_MEDIA_SR */
.cfg_en_adv = 1,
.cfg_en_main = 1,
.cfg_en_dly = 1,
.cfg_tap_adv_3_0 = 0,
.cfg_tap_main = 1,
.cfg_tap_dly_4_0 = 0xc,
.cfg_vga_ctrl_3_0 = 0xa,
.cfg_vga_cp_2_0 = 0x4,
.cfg_eq_res_3_0 = 0xa,
.cfg_eq_r_byp = 1,
.cfg_eq_c_force_3_0 = 0xF,
.cfg_alos_thr_3_0 = 0x3,
},
{ /* ETH_MEDIA_DAC */
.cfg_en_adv = 1,
.cfg_en_main = 1,
.cfg_en_dly = 1,
.cfg_tap_adv_3_0 = 12,
.cfg_tap_main = 1,
.cfg_tap_dly_4_0 = 8,
.cfg_vga_ctrl_3_0 = 0xa,
.cfg_vga_cp_2_0 = 4,
.cfg_eq_res_3_0 = 0xa,
.cfg_eq_r_byp = 1,
.cfg_eq_c_force_3_0 = 0xf,
.cfg_alos_thr_3_0 = 0x0,
}
};
static struct sparx5_sd10g28_mode_preset mode_presets_10g[] = {
{ /* SPX5_SD10G28_MODE_PRESET_10000 */
.bwidth = 64,
.cmu_sel = SPX5_SD10G28_CMU_MAIN,
.rate = 0x0,
.dfe_enable = 1,
.dfe_tap = 0x1f,
.pi_bw_gen1 = 0x0,
.duty_cycle = 0x2,
},
{ /* SPX5_SD10G28_MODE_PRESET_SFI_5000_6G */
.bwidth = 16,
.cmu_sel = SPX5_SD10G28_CMU_MAIN,
.rate = 0x1,
.dfe_enable = 0,
.dfe_tap = 0,
.pi_bw_gen1 = 0x5,
.duty_cycle = 0x0,
},
{ /* SPX5_SD10G28_MODE_PRESET_SFI_5000_10G */
.bwidth = 64,
.cmu_sel = SPX5_SD10G28_CMU_MAIN,
.rate = 0x1,
.dfe_enable = 0,
.dfe_tap = 0,
.pi_bw_gen1 = 0x5,
.duty_cycle = 0x0,
},
{ /* SPX5_SD10G28_MODE_PRESET_QSGMII */
.bwidth = 20,
.cmu_sel = SPX5_SD10G28_CMU_AUX1,
.rate = 0x1,
.dfe_enable = 0,
.dfe_tap = 0,
.pi_bw_gen1 = 0x5,
.duty_cycle = 0x0,
},
{ /* SPX5_SD10G28_MODE_PRESET_SD_2G5 */
.bwidth = 10,
.cmu_sel = SPX5_SD10G28_CMU_AUX2,
.rate = 0x2,
.dfe_enable = 0,
.dfe_tap = 0,
.pi_bw_gen1 = 0x7,
.duty_cycle = 0x0,
},
{ /* SPX5_SD10G28_MODE_PRESET_1000BASEX */
.bwidth = 10,
.cmu_sel = SPX5_SD10G28_CMU_AUX1,
.rate = 0x3,
.dfe_enable = 0,
.dfe_tap = 0,
.pi_bw_gen1 = 0x7,
.duty_cycle = 0x0,
},
};
/* map from SD25G28 interface width to configuration value */
static u8 sd25g28_get_iw_setting(struct device *dev, const u8 interface_width)
{
switch (interface_width) {
case 10: return 0;
case 16: return 1;
case 32: return 3;
case 40: return 4;
case 64: return 5;
default:
dev_err(dev, "%s: Illegal value %d for interface width\n",
__func__, interface_width);
}
return 0;
}
/* map from SD10G28 interface width to configuration value */
static u8 sd10g28_get_iw_setting(struct device *dev, const u8 interface_width)
{
switch (interface_width) {
case 10: return 0;
case 16: return 1;
case 20: return 2;
case 32: return 3;
case 40: return 4;
case 64: return 7;
default:
dev_err(dev, "%s: Illegal value %d for interface width\n", __func__,
interface_width);
return 0;
}
}
static int sparx5_sd10g25_get_mode_preset(struct sparx5_serdes_macro *macro,
struct sparx5_sd25g28_mode_preset *mode)
{
switch (macro->serdesmode) {
case SPX5_SD_MODE_SFI:
if (macro->speed == SPEED_25000)
*mode = mode_presets_25g[SPX5_SD25G28_MODE_PRESET_25000];
else if (macro->speed == SPEED_10000)
*mode = mode_presets_25g[SPX5_SD25G28_MODE_PRESET_10000];
else if (macro->speed == SPEED_5000)
*mode = mode_presets_25g[SPX5_SD25G28_MODE_PRESET_5000];
break;
case SPX5_SD_MODE_2G5:
*mode = mode_presets_25g[SPX5_SD25G28_MODE_PRESET_SD_2G5];
break;
case SPX5_SD_MODE_1000BASEX:
*mode = mode_presets_25g[SPX5_SD25G28_MODE_PRESET_1000BASEX];
break;
case SPX5_SD_MODE_100FX:
/* Not supported */
return -EINVAL;
default:
*mode = mode_presets_25g[SPX5_SD25G28_MODE_PRESET_25000];
break;
}
return 0;
}
static int sparx5_sd10g28_get_mode_preset(struct sparx5_serdes_macro *macro,
struct sparx5_sd10g28_mode_preset *mode,
struct sparx5_sd10g28_args *args)
{
switch (macro->serdesmode) {
case SPX5_SD_MODE_SFI:
if (macro->speed == SPEED_10000) {
*mode = mode_presets_10g[SPX5_SD10G28_MODE_PRESET_10000];
} else if (macro->speed == SPEED_5000) {
if (args->is_6g)
*mode = mode_presets_10g[SPX5_SD10G28_MODE_PRESET_SFI_5000_6G];
else
*mode = mode_presets_10g[SPX5_SD10G28_MODE_PRESET_SFI_5000_10G];
} else {
dev_err(macro->priv->dev, "%s: Illegal speed: %02u, sidx: %02u, mode (%u)",
__func__, macro->speed, macro->sidx,
macro->serdesmode);
return -EINVAL;
}
break;
case SPX5_SD_MODE_QSGMII:
*mode = mode_presets_10g[SPX5_SD10G28_MODE_PRESET_QSGMII];
break;
case SPX5_SD_MODE_2G5:
*mode = mode_presets_10g[SPX5_SD10G28_MODE_PRESET_SD_2G5];
break;
case SPX5_SD_MODE_100FX:
case SPX5_SD_MODE_1000BASEX:
*mode = mode_presets_10g[SPX5_SD10G28_MODE_PRESET_1000BASEX];
break;
default:
*mode = mode_presets_10g[SPX5_SD10G28_MODE_PRESET_10000];
break;
}
return 0;
}
static void sparx5_sd25g28_get_params(struct sparx5_serdes_macro *macro,
struct sparx5_sd25g28_media_preset *media,
struct sparx5_sd25g28_mode_preset *mode,
struct sparx5_sd25g28_args *args,
struct sparx5_sd25g28_params *params)
{
u8 iw = sd25g28_get_iw_setting(macro->priv->dev, mode->bitwidth);
struct sparx5_sd25g28_params init = {
.r_d_width_ctrl_2_0 = iw,
.r_txfifo_ck_div_pmad_2_0 = mode->fifo_ck_div,
.r_rxfifo_ck_div_pmad_2_0 = mode->fifo_ck_div,
.cfg_vco_div_mode_1_0 = mode->vco_div_mode,
.cfg_pre_divsel_1_0 = mode->pre_divsel,
.cfg_sel_div_3_0 = mode->sel_div,
.cfg_vco_start_code_3_0 = 0,
.cfg_pma_tx_ck_bitwidth_2_0 = mode->ck_bitwidth,
.cfg_tx_prediv_1_0 = mode->tx_pre_div,
.cfg_rxdiv_sel_2_0 = mode->ck_bitwidth,
.cfg_tx_subrate_2_0 = mode->subrate,
.cfg_rx_subrate_2_0 = mode->subrate,
.r_multi_lane_mode = 0,
.cfg_cdrck_en = 1,
.cfg_dfeck_en = mode->dfe_enable,
.cfg_dfe_pd = mode->dfe_enable == 1 ? 0 : 1,
.cfg_dfedmx_pd = 1,
.cfg_dfetap_en_5_1 = mode->dfe_tap,
.cfg_dmux_pd = 0,
.cfg_dmux_clk_pd = 1,
.cfg_erramp_pd = mode->dfe_enable == 1 ? 0 : 1,
.cfg_pi_DFE_en = mode->dfe_enable,
.cfg_pi_en = 1,
.cfg_pd_ctle = 0,
.cfg_summer_en = 1,
.cfg_pmad_ck_pd = 0,
.cfg_pd_clk = 0,
.cfg_pd_cml = 0,
.cfg_pd_driver = 0,
.cfg_rx_reg_pu = 1,
.cfg_pd_rms_det = 1,
.cfg_dcdr_pd = 0,
.cfg_ecdr_pd = 1,
.cfg_pd_sq = 1,
.cfg_itx_ipdriver_base_2_0 = mode->txmargin,
.cfg_tap_dly_4_0 = media->cfg_tap_dly_4_0,
.cfg_tap_main = media->cfg_tap_main,
.cfg_en_main = media->cfg_en_main,
.cfg_tap_adv_3_0 = media->cfg_tap_adv_3_0,
.cfg_en_adv = media->cfg_en_adv,
.cfg_en_dly = media->cfg_en_dly,
.cfg_iscan_en = 0,
.l1_pcs_en_fast_iscan = 0,
.l0_cfg_bw_1_0 = 0,
.cfg_en_dummy = 0,
.cfg_pll_reserve_3_0 = args->com_pll_reserve,
.l0_cfg_txcal_en = mode->com_txcal_en,
.l0_cfg_tx_reserve_15_8 = mode->com_tx_reserve_msb,
.l0_cfg_tx_reserve_7_0 = mode->com_tx_reserve_lsb,
.cfg_tx_reserve_15_8 = mode->tx_reserve_msb,
.cfg_tx_reserve_7_0 = mode->tx_reserve_lsb,
.cfg_bw_1_0 = mode->bw,
.cfg_txcal_man_en = 1,
.cfg_phase_man_4_0 = 0,
.cfg_quad_man_1_0 = 0,
.cfg_txcal_shift_code_5_0 = 2,
.cfg_txcal_valid_sel_3_0 = 4,
.cfg_txcal_en = 0,
.cfg_cdr_kf_2_0 = 1,
.cfg_cdr_m_7_0 = 6,
.cfg_pi_bw_3_0 = mode->cfg_pi_bw_3_0,
.cfg_pi_steps_1_0 = 0,
.cfg_dis_2ndorder = 1,
.cfg_ctle_rstn = mode->cfg_ctle_rstn,
.r_dfe_rstn = mode->r_dfe_rstn,
.cfg_alos_thr_2_0 = media->cfg_alos_thr_2_0,
.cfg_itx_ipcml_base_1_0 = mode->cfg_itx_ipcml_base,
.cfg_rx_reserve_7_0 = 0xbf,
.cfg_rx_reserve_15_8 = 0x61,
.cfg_rxterm_2_0 = mode->rxterm,
.cfg_fom_selm = 0,
.cfg_rx_sp_ctle_1_0 = 0,
.cfg_isel_ctle_1_0 = 0,
.cfg_vga_ctrl_byp_4_0 = media->cfg_vga_ctrl_byp_4_0,
.cfg_vga_byp = 1,
.cfg_agc_adpt_byp = 1,
.cfg_eqr_byp = 1,
.cfg_eqr_force_3_0 = media->cfg_eq_r_force_3_0,
.cfg_eqc_force_3_0 = media->cfg_eq_c_force_3_0,
.cfg_sum_setcm_en = 1,
.cfg_pi_dfe_en = 1,
.cfg_init_pos_iscan_6_0 = 6,
.cfg_init_pos_ipi_6_0 = 9,
.cfg_dfedig_m_2_0 = 6,
.cfg_en_dfedig = mode->dfe_enable,
.r_d_width_ctrl_from_hwt = 0,
.r_reg_manual = 1,
.reg_rst = args->reg_rst,
.cfg_jc_byp = 1,
.cfg_common_reserve_7_0 = 1,
.cfg_pll_lol_set = 1,
.cfg_tx2rx_lp_en = 0,
.cfg_txlb_en = 0,
.cfg_rx2tx_lp_en = 0,
.cfg_rxlb_en = 0,
.r_tx_pol_inv = args->txinvert,
.r_rx_pol_inv = args->rxinvert,
};
*params = init;
}
static void sparx5_sd10g28_get_params(struct sparx5_serdes_macro *macro,
struct sparx5_sd10g28_media_preset *media,
struct sparx5_sd10g28_mode_preset *mode,
struct sparx5_sd10g28_args *args,
struct sparx5_sd10g28_params *params)
{
u8 iw = sd10g28_get_iw_setting(macro->priv->dev, mode->bwidth);
struct sparx5_sd10g28_params init = {
.skip_cmu_cfg = args->skip_cmu_cfg,
.is_6g = args->is_6g,
.cmu_sel = mode->cmu_sel,
.cfg_lane_reserve_7_0 = (mode->cmu_sel % 2) << 6,
.cfg_ssc_rtl_clk_sel = (mode->cmu_sel / 2),
.cfg_lane_reserve_15_8 = mode->duty_cycle,
.cfg_txrate_1_0 = mode->rate,
.cfg_rxrate_1_0 = mode->rate,
.fx_100 = macro->serdesmode == SPX5_SD_MODE_100FX,
.r_d_width_ctrl_2_0 = iw,
.cfg_pma_tx_ck_bitwidth_2_0 = iw,
.cfg_rxdiv_sel_2_0 = iw,
.r_pcs2pma_phymode_4_0 = 0,
.cfg_lane_id_2_0 = 0,
.cfg_cdrck_en = 1,
.cfg_dfeck_en = mode->dfe_enable,
.cfg_dfe_pd = (mode->dfe_enable == 1) ? 0 : 1,
.cfg_dfetap_en_5_1 = mode->dfe_tap,
.cfg_erramp_pd = (mode->dfe_enable == 1) ? 0 : 1,
.cfg_pi_DFE_en = mode->dfe_enable,
.cfg_pi_en = 1,
.cfg_pd_ctle = 0,
.cfg_summer_en = 1,
.cfg_pd_rx_cktree = 0,
.cfg_pd_clk = 0,
.cfg_pd_cml = 0,
.cfg_pd_driver = 0,
.cfg_rx_reg_pu = 1,
.cfg_d_cdr_pd = 0,
.cfg_pd_sq = mode->dfe_enable,
.cfg_rxdet_en = 0,
.cfg_rxdet_str = 0,
.r_multi_lane_mode = 0,
.cfg_en_adv = media->cfg_en_adv,
.cfg_en_main = 1,
.cfg_en_dly = media->cfg_en_dly,
.cfg_tap_adv_3_0 = media->cfg_tap_adv_3_0,
.cfg_tap_main = media->cfg_tap_main,
.cfg_tap_dly_4_0 = media->cfg_tap_dly_4_0,
.cfg_vga_ctrl_3_0 = media->cfg_vga_ctrl_3_0,
.cfg_vga_cp_2_0 = media->cfg_vga_cp_2_0,
.cfg_eq_res_3_0 = media->cfg_eq_res_3_0,
.cfg_eq_r_byp = media->cfg_eq_r_byp,
.cfg_eq_c_force_3_0 = media->cfg_eq_c_force_3_0,
.cfg_en_dfedig = mode->dfe_enable,
.cfg_sum_setcm_en = 1,
.cfg_en_preemph = 0,
.cfg_itx_ippreemp_base_1_0 = 0,
.cfg_itx_ipdriver_base_2_0 = (args->txswing >> 6),
.cfg_ibias_tune_reserve_5_0 = (args->txswing & 63),
.cfg_txswing_half = (args->txmargin),
.cfg_dis_2nd_order = 0x1,
.cfg_rx_ssc_lh = 0x0,
.cfg_pi_floop_steps_1_0 = 0x0,
.cfg_pi_ext_dac_23_16 = (1 << 5),
.cfg_pi_ext_dac_15_8 = (0 << 6),
.cfg_iscan_ext_dac_7_0 = (1 << 7) + 9,
.cfg_cdr_kf_gen1_2_0 = 1,
.cfg_cdr_kf_gen2_2_0 = 1,
.cfg_cdr_kf_gen3_2_0 = 1,
.cfg_cdr_kf_gen4_2_0 = 1,
.r_cdr_m_gen1_7_0 = 4,
.cfg_pi_bw_gen1_3_0 = mode->pi_bw_gen1,
.cfg_pi_bw_gen2 = mode->pi_bw_gen1,
.cfg_pi_bw_gen3 = mode->pi_bw_gen1,
.cfg_pi_bw_gen4 = mode->pi_bw_gen1,
.cfg_pi_ext_dac_7_0 = 3,
.cfg_pi_steps = 0,
.cfg_mp_max_3_0 = 1,
.cfg_rstn_dfedig = mode->dfe_enable,
.cfg_alos_thr_3_0 = media->cfg_alos_thr_3_0,
.cfg_predrv_slewrate_1_0 = 3,
.cfg_itx_ipcml_base_1_0 = 0,
.cfg_ip_pre_base_1_0 = 0,
.r_cdr_m_gen2_7_0 = 2,
.r_cdr_m_gen3_7_0 = 2,
.r_cdr_m_gen4_7_0 = 2,
.r_en_auto_cdr_rstn = 0,
.cfg_oscal_afe = 1,
.cfg_pd_osdac_afe = 0,
.cfg_resetb_oscal_afe[0] = 0,
.cfg_resetb_oscal_afe[1] = 1,
.cfg_center_spreading = 0,
.cfg_m_cnt_maxval_4_0 = 15,
.cfg_ncnt_maxval_7_0 = 32,
.cfg_ncnt_maxval_10_8 = 6,
.cfg_ssc_en = 1,
.cfg_tx2rx_lp_en = 0,
.cfg_txlb_en = 0,
.cfg_rx2tx_lp_en = 0,
.cfg_rxlb_en = 0,
.r_tx_pol_inv = args->txinvert,
.r_rx_pol_inv = args->rxinvert,
};
*params = init;
}
static int sparx5_cmu_apply_cfg(struct sparx5_serdes_private *priv,
u32 cmu_idx,
void __iomem *cmu_tgt,
void __iomem *cmu_cfg_tgt,
u32 spd10g)
{
void __iomem **regs = priv->regs;
struct device *dev = priv->dev;
int value;
cmu_tgt = sdx5_inst_get(priv, TARGET_SD_CMU, cmu_idx);
cmu_cfg_tgt = sdx5_inst_get(priv, TARGET_SD_CMU_CFG, cmu_idx);
if (cmu_idx == 1 || cmu_idx == 4 || cmu_idx == 7 ||
cmu_idx == 10 || cmu_idx == 13) {
spd10g = 0;
}
sdx5_inst_rmw(SD_CMU_CFG_SD_CMU_CFG_EXT_CFG_RST_SET(1),
SD_CMU_CFG_SD_CMU_CFG_EXT_CFG_RST,
cmu_cfg_tgt,
SD_CMU_CFG_SD_CMU_CFG(cmu_idx));
sdx5_inst_rmw(SD_CMU_CFG_SD_CMU_CFG_EXT_CFG_RST_SET(0),
SD_CMU_CFG_SD_CMU_CFG_EXT_CFG_RST,
cmu_cfg_tgt,
SD_CMU_CFG_SD_CMU_CFG(cmu_idx));
sdx5_inst_rmw(SD_CMU_CFG_SD_CMU_CFG_CMU_RST_SET(1),
SD_CMU_CFG_SD_CMU_CFG_CMU_RST,
cmu_cfg_tgt,
SD_CMU_CFG_SD_CMU_CFG(cmu_idx));
sdx5_inst_rmw(SD_CMU_CMU_45_R_DWIDTHCTRL_FROM_HWT_SET(0x1) |
SD_CMU_CMU_45_R_REFCK_SSC_EN_FROM_HWT_SET(0x1) |
SD_CMU_CMU_45_R_LINK_BUF_EN_FROM_HWT_SET(0x1) |
SD_CMU_CMU_45_R_BIAS_EN_FROM_HWT_SET(0x1) |
SD_CMU_CMU_45_R_EN_RATECHG_CTRL_SET(0x0),
SD_CMU_CMU_45_R_DWIDTHCTRL_FROM_HWT |
SD_CMU_CMU_45_R_REFCK_SSC_EN_FROM_HWT |
SD_CMU_CMU_45_R_LINK_BUF_EN_FROM_HWT |
SD_CMU_CMU_45_R_BIAS_EN_FROM_HWT |
SD_CMU_CMU_45_R_EN_RATECHG_CTRL,
cmu_tgt,
SD_CMU_CMU_45(cmu_idx));
sdx5_inst_rmw(SD_CMU_CMU_47_R_PCS2PMA_PHYMODE_4_0_SET(0),
SD_CMU_CMU_47_R_PCS2PMA_PHYMODE_4_0,
cmu_tgt,
SD_CMU_CMU_47(cmu_idx));
sdx5_inst_rmw(SD_CMU_CMU_1B_CFG_RESERVE_7_0_SET(0),
SD_CMU_CMU_1B_CFG_RESERVE_7_0,
cmu_tgt,
SD_CMU_CMU_1B(cmu_idx));
sdx5_inst_rmw(SD_CMU_CMU_0D_CFG_JC_BYP_SET(0x1),
SD_CMU_CMU_0D_CFG_JC_BYP,
cmu_tgt,
SD_CMU_CMU_0D(cmu_idx));
sdx5_inst_rmw(SD_CMU_CMU_1F_CFG_VTUNE_SEL_SET(1),
SD_CMU_CMU_1F_CFG_VTUNE_SEL,
cmu_tgt,
SD_CMU_CMU_1F(cmu_idx));
sdx5_inst_rmw(SD_CMU_CMU_00_CFG_PLL_TP_SEL_1_0_SET(3),
SD_CMU_CMU_00_CFG_PLL_TP_SEL_1_0,
cmu_tgt,
SD_CMU_CMU_00(cmu_idx));
sdx5_inst_rmw(SD_CMU_CMU_05_CFG_BIAS_TP_SEL_1_0_SET(3),
SD_CMU_CMU_05_CFG_BIAS_TP_SEL_1_0,
cmu_tgt,
SD_CMU_CMU_05(cmu_idx));
sdx5_inst_rmw(SD_CMU_CMU_30_R_PLL_DLOL_EN_SET(1),
SD_CMU_CMU_30_R_PLL_DLOL_EN,
cmu_tgt,
SD_CMU_CMU_30(cmu_idx));
sdx5_inst_rmw(SD_CMU_CMU_09_CFG_SW_10G_SET(spd10g),
SD_CMU_CMU_09_CFG_SW_10G,
cmu_tgt,
SD_CMU_CMU_09(cmu_idx));
sdx5_inst_rmw(SD_CMU_CFG_SD_CMU_CFG_CMU_RST_SET(0),
SD_CMU_CFG_SD_CMU_CFG_CMU_RST,
cmu_cfg_tgt,
SD_CMU_CFG_SD_CMU_CFG(cmu_idx));
msleep(20);
sdx5_inst_rmw(SD_CMU_CMU_44_R_PLL_RSTN_SET(0),
SD_CMU_CMU_44_R_PLL_RSTN,
cmu_tgt,
SD_CMU_CMU_44(cmu_idx));
sdx5_inst_rmw(SD_CMU_CMU_44_R_PLL_RSTN_SET(1),
SD_CMU_CMU_44_R_PLL_RSTN,
cmu_tgt,
SD_CMU_CMU_44(cmu_idx));
msleep(20);
value = readl(sdx5_addr(regs, SD_CMU_CMU_E0(cmu_idx)));
value = SD_CMU_CMU_E0_PLL_LOL_UDL_GET(value);
if (value) {
dev_err(dev, "CMU PLL Loss of Lock: 0x%x\n", value);
return -EINVAL;
}
sdx5_inst_rmw(SD_CMU_CMU_0D_CFG_PMA_TX_CK_PD_SET(0),
SD_CMU_CMU_0D_CFG_PMA_TX_CK_PD,
cmu_tgt,
SD_CMU_CMU_0D(cmu_idx));
return 0;
}
static int sparx5_cmu_cfg(struct sparx5_serdes_private *priv, u32 cmu_idx)
{
void __iomem *cmu_tgt, *cmu_cfg_tgt;
u32 spd10g = 1;
if (cmu_idx == 1 || cmu_idx == 4 || cmu_idx == 7 ||
cmu_idx == 10 || cmu_idx == 13) {
spd10g = 0;
}
cmu_tgt = sdx5_inst_get(priv, TARGET_SD_CMU, cmu_idx);
cmu_cfg_tgt = sdx5_inst_get(priv, TARGET_SD_CMU_CFG, cmu_idx);
return sparx5_cmu_apply_cfg(priv, cmu_idx, cmu_tgt, cmu_cfg_tgt, spd10g);
}
/* Map of 6G/10G serdes mode and index to CMU index. */
static const int
sparx5_serdes_cmu_map[SPX5_SD10G28_CMU_MAX][SPX5_SERDES_6G10G_CNT] = {
[SPX5_SD10G28_CMU_MAIN] = { 2, 2, 2, 2, 2,
2, 2, 2, 5, 5,
5, 5, 5, 5, 5,
5, 8, 11, 11, 11,
11, 11, 11, 11, 11 },
[SPX5_SD10G28_CMU_AUX1] = { 0, 0, 3, 3, 3,
3, 3, 3, 3, 3,
6, 6, 6, 6, 6,
6, 6, 9, 9, 12,
12, 12, 12, 12, 12 },
[SPX5_SD10G28_CMU_AUX2] = { 1, 1, 1, 1, 4,
4, 4, 4, 4, 4,
4, 4, 7, 7, 7,
7, 7, 10, 10, 10,
10, 13, 13, 13, 13 },
[SPX5_SD10G28_CMU_NONE] = { 1, 1, 1, 1, 4,
4, 4, 4, 4, 4,
4, 4, 7, 7, 7,
7, 7, 10, 10, 10,
10, 13, 13, 13, 13 },
};
/* Get the index of the CMU which provides the clock for the specified serdes
* mode and index.
*/
static int sparx5_serdes_cmu_get(enum sparx5_10g28cmu_mode mode, int sd_index)
{
return sparx5_serdes_cmu_map[mode][sd_index];
}
static void sparx5_serdes_cmu_power_off(struct sparx5_serdes_private *priv)
{
void __iomem *cmu_inst, *cmu_cfg_inst;
int i;
/* Power down each CMU */
for (i = 0; i < SPX5_CMU_MAX; i++) {
cmu_inst = sdx5_inst_get(priv, TARGET_SD_CMU, i);
cmu_cfg_inst = sdx5_inst_get(priv, TARGET_SD_CMU_CFG, i);
sdx5_inst_rmw(SD_CMU_CFG_SD_CMU_CFG_EXT_CFG_RST_SET(0),
SD_CMU_CFG_SD_CMU_CFG_EXT_CFG_RST, cmu_cfg_inst,
SD_CMU_CFG_SD_CMU_CFG(0));
sdx5_inst_rmw(SD_CMU_CMU_05_CFG_REFCK_TERM_EN_SET(0),
SD_CMU_CMU_05_CFG_REFCK_TERM_EN, cmu_inst,
SD_CMU_CMU_05(0));
sdx5_inst_rmw(SD_CMU_CMU_09_CFG_EN_TX_CK_DN_SET(0),
SD_CMU_CMU_09_CFG_EN_TX_CK_DN, cmu_inst,
SD_CMU_CMU_09(0));
sdx5_inst_rmw(SD_CMU_CMU_06_CFG_VCO_PD_SET(1),
SD_CMU_CMU_06_CFG_VCO_PD, cmu_inst,
SD_CMU_CMU_06(0));
sdx5_inst_rmw(SD_CMU_CMU_09_CFG_EN_TX_CK_UP_SET(0),
SD_CMU_CMU_09_CFG_EN_TX_CK_UP, cmu_inst,
SD_CMU_CMU_09(0));
sdx5_inst_rmw(SD_CMU_CMU_08_CFG_CK_TREE_PD_SET(1),
SD_CMU_CMU_08_CFG_CK_TREE_PD, cmu_inst,
SD_CMU_CMU_08(0));
sdx5_inst_rmw(SD_CMU_CMU_0D_CFG_REFCK_PD_SET(1) |
SD_CMU_CMU_0D_CFG_PD_DIV64_SET(1) |
SD_CMU_CMU_0D_CFG_PD_DIV66_SET(1),
SD_CMU_CMU_0D_CFG_REFCK_PD |
SD_CMU_CMU_0D_CFG_PD_DIV64 |
SD_CMU_CMU_0D_CFG_PD_DIV66, cmu_inst,
SD_CMU_CMU_0D(0));
sdx5_inst_rmw(SD_CMU_CMU_06_CFG_CTRL_LOGIC_PD_SET(1),
SD_CMU_CMU_06_CFG_CTRL_LOGIC_PD, cmu_inst,
SD_CMU_CMU_06(0));
}
}
static void sparx5_sd25g28_reset(void __iomem *regs[],
struct sparx5_sd25g28_params *params,
u32 sd_index)
{
if (params->reg_rst == 1) {
sdx5_rmw_addr(SD_LANE_25G_SD_LANE_CFG_EXT_CFG_RST_SET(1),
SD_LANE_25G_SD_LANE_CFG_EXT_CFG_RST,
sdx5_addr(regs, SD_LANE_25G_SD_LANE_CFG(sd_index)));
usleep_range(1000, 2000);
sdx5_rmw_addr(SD_LANE_25G_SD_LANE_CFG_EXT_CFG_RST_SET(0),
SD_LANE_25G_SD_LANE_CFG_EXT_CFG_RST,
sdx5_addr(regs, SD_LANE_25G_SD_LANE_CFG(sd_index)));
}
}
static int sparx5_sd25g28_apply_params(struct sparx5_serdes_macro *macro,
struct sparx5_sd25g28_params *params)
{
struct sparx5_serdes_private *priv = macro->priv;
void __iomem **regs = priv->regs;
struct device *dev = priv->dev;
u32 sd_index = macro->stpidx;
u32 value;
sdx5_rmw(SD_LANE_25G_SD_LANE_CFG_MACRO_RST_SET(1),
SD_LANE_25G_SD_LANE_CFG_MACRO_RST,
priv,
SD_LANE_25G_SD_LANE_CFG(sd_index));
sdx5_rmw(SD25G_LANE_CMU_FF_REGISTER_TABLE_INDEX_SET(0xFF),
SD25G_LANE_CMU_FF_REGISTER_TABLE_INDEX,
priv,
SD25G_LANE_CMU_FF(sd_index));
sdx5_rmw(SD25G_LANE_CMU_1A_R_DWIDTHCTRL_FROM_HWT_SET
(params->r_d_width_ctrl_from_hwt) |
SD25G_LANE_CMU_1A_R_REG_MANUAL_SET(params->r_reg_manual),
SD25G_LANE_CMU_1A_R_DWIDTHCTRL_FROM_HWT |
SD25G_LANE_CMU_1A_R_REG_MANUAL,
priv,
SD25G_LANE_CMU_1A(sd_index));
sdx5_rmw(SD25G_LANE_CMU_31_CFG_COMMON_RESERVE_7_0_SET
(params->cfg_common_reserve_7_0),
SD25G_LANE_CMU_31_CFG_COMMON_RESERVE_7_0,
priv,
SD25G_LANE_CMU_31(sd_index));
sdx5_rmw(SD25G_LANE_CMU_09_CFG_EN_DUMMY_SET(params->cfg_en_dummy),
SD25G_LANE_CMU_09_CFG_EN_DUMMY,
priv,
SD25G_LANE_CMU_09(sd_index));
sdx5_rmw(SD25G_LANE_CMU_13_CFG_PLL_RESERVE_3_0_SET
(params->cfg_pll_reserve_3_0),
SD25G_LANE_CMU_13_CFG_PLL_RESERVE_3_0,
priv,
SD25G_LANE_CMU_13(sd_index));
sdx5_rmw(SD25G_LANE_CMU_40_L0_CFG_TXCAL_EN_SET(params->l0_cfg_txcal_en),
SD25G_LANE_CMU_40_L0_CFG_TXCAL_EN,
priv,
SD25G_LANE_CMU_40(sd_index));
sdx5_rmw(SD25G_LANE_CMU_46_L0_CFG_TX_RESERVE_15_8_SET
(params->l0_cfg_tx_reserve_15_8),
SD25G_LANE_CMU_46_L0_CFG_TX_RESERVE_15_8,
priv,
SD25G_LANE_CMU_46(sd_index));
sdx5_rmw(SD25G_LANE_CMU_45_L0_CFG_TX_RESERVE_7_0_SET
(params->l0_cfg_tx_reserve_7_0),
SD25G_LANE_CMU_45_L0_CFG_TX_RESERVE_7_0,
priv,
SD25G_LANE_CMU_45(sd_index));
sdx5_rmw(SD25G_LANE_CMU_0B_CFG_VCO_CAL_RESETN_SET(0),
SD25G_LANE_CMU_0B_CFG_VCO_CAL_RESETN,
priv,
SD25G_LANE_CMU_0B(sd_index));
sdx5_rmw(SD25G_LANE_CMU_0B_CFG_VCO_CAL_RESETN_SET(1),
SD25G_LANE_CMU_0B_CFG_VCO_CAL_RESETN,
priv,
SD25G_LANE_CMU_0B(sd_index));
sdx5_rmw(SD25G_LANE_CMU_19_R_CK_RESETB_SET(0),
SD25G_LANE_CMU_19_R_CK_RESETB,
priv,
SD25G_LANE_CMU_19(sd_index));
sdx5_rmw(SD25G_LANE_CMU_19_R_CK_RESETB_SET(1),
SD25G_LANE_CMU_19_R_CK_RESETB,
priv,
SD25G_LANE_CMU_19(sd_index));
sdx5_rmw(SD25G_LANE_CMU_18_R_PLL_RSTN_SET(0),
SD25G_LANE_CMU_18_R_PLL_RSTN,
priv,
SD25G_LANE_CMU_18(sd_index));
sdx5_rmw(SD25G_LANE_CMU_18_R_PLL_RSTN_SET(1),
SD25G_LANE_CMU_18_R_PLL_RSTN,
priv,
SD25G_LANE_CMU_18(sd_index));
sdx5_rmw(SD25G_LANE_CMU_1A_R_DWIDTHCTRL_2_0_SET(params->r_d_width_ctrl_2_0),
SD25G_LANE_CMU_1A_R_DWIDTHCTRL_2_0,
priv,
SD25G_LANE_CMU_1A(sd_index));
sdx5_rmw(SD25G_LANE_CMU_30_R_TXFIFO_CK_DIV_PMAD_2_0_SET
(params->r_txfifo_ck_div_pmad_2_0) |
SD25G_LANE_CMU_30_R_RXFIFO_CK_DIV_PMAD_2_0_SET
(params->r_rxfifo_ck_div_pmad_2_0),
SD25G_LANE_CMU_30_R_TXFIFO_CK_DIV_PMAD_2_0 |
SD25G_LANE_CMU_30_R_RXFIFO_CK_DIV_PMAD_2_0,
priv,
SD25G_LANE_CMU_30(sd_index));
sdx5_rmw(SD25G_LANE_CMU_0C_CFG_PLL_LOL_SET_SET(params->cfg_pll_lol_set) |
SD25G_LANE_CMU_0C_CFG_VCO_DIV_MODE_1_0_SET
(params->cfg_vco_div_mode_1_0),
SD25G_LANE_CMU_0C_CFG_PLL_LOL_SET |
SD25G_LANE_CMU_0C_CFG_VCO_DIV_MODE_1_0,
priv,
SD25G_LANE_CMU_0C(sd_index));
sdx5_rmw(SD25G_LANE_CMU_0D_CFG_PRE_DIVSEL_1_0_SET
(params->cfg_pre_divsel_1_0),
SD25G_LANE_CMU_0D_CFG_PRE_DIVSEL_1_0,
priv,
SD25G_LANE_CMU_0D(sd_index));
sdx5_rmw(SD25G_LANE_CMU_0E_CFG_SEL_DIV_3_0_SET(params->cfg_sel_div_3_0),
SD25G_LANE_CMU_0E_CFG_SEL_DIV_3_0,
priv,
SD25G_LANE_CMU_0E(sd_index));
sdx5_rmw(SD25G_LANE_CMU_FF_REGISTER_TABLE_INDEX_SET(0x00),
SD25G_LANE_CMU_FF_REGISTER_TABLE_INDEX,
priv,
SD25G_LANE_CMU_FF(sd_index));
sdx5_rmw(SD25G_LANE_LANE_0C_LN_CFG_PMA_TX_CK_BITWIDTH_2_0_SET
(params->cfg_pma_tx_ck_bitwidth_2_0),
SD25G_LANE_LANE_0C_LN_CFG_PMA_TX_CK_BITWIDTH_2_0,
priv,
SD25G_LANE_LANE_0C(sd_index));
sdx5_rmw(SD25G_LANE_LANE_01_LN_CFG_TX_PREDIV_1_0_SET
(params->cfg_tx_prediv_1_0),
SD25G_LANE_LANE_01_LN_CFG_TX_PREDIV_1_0,
priv,
SD25G_LANE_LANE_01(sd_index));
sdx5_rmw(SD25G_LANE_LANE_18_LN_CFG_RXDIV_SEL_2_0_SET
(params->cfg_rxdiv_sel_2_0),
SD25G_LANE_LANE_18_LN_CFG_RXDIV_SEL_2_0,
priv,
SD25G_LANE_LANE_18(sd_index));
sdx5_rmw(SD25G_LANE_LANE_2C_LN_CFG_TX_SUBRATE_2_0_SET
(params->cfg_tx_subrate_2_0),
SD25G_LANE_LANE_2C_LN_CFG_TX_SUBRATE_2_0,
priv,
SD25G_LANE_LANE_2C(sd_index));
sdx5_rmw(SD25G_LANE_LANE_28_LN_CFG_RX_SUBRATE_2_0_SET
(params->cfg_rx_subrate_2_0),
SD25G_LANE_LANE_28_LN_CFG_RX_SUBRATE_2_0,
priv,
SD25G_LANE_LANE_28(sd_index));
sdx5_rmw(SD25G_LANE_LANE_18_LN_CFG_CDRCK_EN_SET(params->cfg_cdrck_en),
SD25G_LANE_LANE_18_LN_CFG_CDRCK_EN,
priv,
SD25G_LANE_LANE_18(sd_index));
sdx5_rmw(SD25G_LANE_LANE_0F_LN_CFG_DFETAP_EN_5_1_SET
(params->cfg_dfetap_en_5_1),
SD25G_LANE_LANE_0F_LN_CFG_DFETAP_EN_5_1,
priv,
SD25G_LANE_LANE_0F(sd_index));
sdx5_rmw(SD25G_LANE_LANE_18_LN_CFG_ERRAMP_PD_SET(params->cfg_erramp_pd),
SD25G_LANE_LANE_18_LN_CFG_ERRAMP_PD,
priv,
SD25G_LANE_LANE_18(sd_index));
sdx5_rmw(SD25G_LANE_LANE_1D_LN_CFG_PI_DFE_EN_SET(params->cfg_pi_dfe_en),
SD25G_LANE_LANE_1D_LN_CFG_PI_DFE_EN,
priv,
SD25G_LANE_LANE_1D(sd_index));
sdx5_rmw(SD25G_LANE_LANE_19_LN_CFG_ECDR_PD_SET(params->cfg_ecdr_pd),
SD25G_LANE_LANE_19_LN_CFG_ECDR_PD,
priv,
SD25G_LANE_LANE_19(sd_index));
sdx5_rmw(SD25G_LANE_LANE_01_LN_CFG_ITX_IPDRIVER_BASE_2_0_SET
(params->cfg_itx_ipdriver_base_2_0),
SD25G_LANE_LANE_01_LN_CFG_ITX_IPDRIVER_BASE_2_0,
priv,
SD25G_LANE_LANE_01(sd_index));
sdx5_rmw(SD25G_LANE_LANE_03_LN_CFG_TAP_DLY_4_0_SET(params->cfg_tap_dly_4_0),
SD25G_LANE_LANE_03_LN_CFG_TAP_DLY_4_0,
priv,
SD25G_LANE_LANE_03(sd_index));
sdx5_rmw(SD25G_LANE_LANE_06_LN_CFG_TAP_ADV_3_0_SET(params->cfg_tap_adv_3_0),
SD25G_LANE_LANE_06_LN_CFG_TAP_ADV_3_0,
priv,
SD25G_LANE_LANE_06(sd_index));
sdx5_rmw(SD25G_LANE_LANE_07_LN_CFG_EN_ADV_SET(params->cfg_en_adv) |
SD25G_LANE_LANE_07_LN_CFG_EN_DLY_SET(params->cfg_en_dly),
SD25G_LANE_LANE_07_LN_CFG_EN_ADV |
SD25G_LANE_LANE_07_LN_CFG_EN_DLY,
priv,
SD25G_LANE_LANE_07(sd_index));
sdx5_rmw(SD25G_LANE_LANE_43_LN_CFG_TX_RESERVE_15_8_SET
(params->cfg_tx_reserve_15_8),
SD25G_LANE_LANE_43_LN_CFG_TX_RESERVE_15_8,
priv,
SD25G_LANE_LANE_43(sd_index));
sdx5_rmw(SD25G_LANE_LANE_42_LN_CFG_TX_RESERVE_7_0_SET
(params->cfg_tx_reserve_7_0),
SD25G_LANE_LANE_42_LN_CFG_TX_RESERVE_7_0,
priv,
SD25G_LANE_LANE_42(sd_index));
sdx5_rmw(SD25G_LANE_LANE_05_LN_CFG_BW_1_0_SET(params->cfg_bw_1_0),
SD25G_LANE_LANE_05_LN_CFG_BW_1_0,
priv,
SD25G_LANE_LANE_05(sd_index));
sdx5_rmw(SD25G_LANE_LANE_0B_LN_CFG_TXCAL_MAN_EN_SET
(params->cfg_txcal_man_en),
SD25G_LANE_LANE_0B_LN_CFG_TXCAL_MAN_EN,
priv,
SD25G_LANE_LANE_0B(sd_index));
sdx5_rmw(SD25G_LANE_LANE_0A_LN_CFG_TXCAL_SHIFT_CODE_5_0_SET
(params->cfg_txcal_shift_code_5_0),
SD25G_LANE_LANE_0A_LN_CFG_TXCAL_SHIFT_CODE_5_0,
priv,
SD25G_LANE_LANE_0A(sd_index));
sdx5_rmw(SD25G_LANE_LANE_09_LN_CFG_TXCAL_VALID_SEL_3_0_SET
(params->cfg_txcal_valid_sel_3_0),
SD25G_LANE_LANE_09_LN_CFG_TXCAL_VALID_SEL_3_0,
priv,
SD25G_LANE_LANE_09(sd_index));
sdx5_rmw(SD25G_LANE_LANE_1A_LN_CFG_CDR_KF_2_0_SET(params->cfg_cdr_kf_2_0),
SD25G_LANE_LANE_1A_LN_CFG_CDR_KF_2_0,
priv,
SD25G_LANE_LANE_1A(sd_index));
sdx5_rmw(SD25G_LANE_LANE_1B_LN_CFG_CDR_M_7_0_SET(params->cfg_cdr_m_7_0),
SD25G_LANE_LANE_1B_LN_CFG_CDR_M_7_0,
priv,
SD25G_LANE_LANE_1B(sd_index));
sdx5_rmw(SD25G_LANE_LANE_2B_LN_CFG_PI_BW_3_0_SET(params->cfg_pi_bw_3_0),
SD25G_LANE_LANE_2B_LN_CFG_PI_BW_3_0,
priv,
SD25G_LANE_LANE_2B(sd_index));
sdx5_rmw(SD25G_LANE_LANE_2C_LN_CFG_DIS_2NDORDER_SET
(params->cfg_dis_2ndorder),
SD25G_LANE_LANE_2C_LN_CFG_DIS_2NDORDER,
priv,
SD25G_LANE_LANE_2C(sd_index));
sdx5_rmw(SD25G_LANE_LANE_2E_LN_CFG_CTLE_RSTN_SET(params->cfg_ctle_rstn),
SD25G_LANE_LANE_2E_LN_CFG_CTLE_RSTN,
priv,
SD25G_LANE_LANE_2E(sd_index));
sdx5_rmw(SD25G_LANE_LANE_00_LN_CFG_ITX_IPCML_BASE_1_0_SET
(params->cfg_itx_ipcml_base_1_0),
SD25G_LANE_LANE_00_LN_CFG_ITX_IPCML_BASE_1_0,
priv,
SD25G_LANE_LANE_00(sd_index));
sdx5_rmw(SD25G_LANE_LANE_44_LN_CFG_RX_RESERVE_7_0_SET
(params->cfg_rx_reserve_7_0),
SD25G_LANE_LANE_44_LN_CFG_RX_RESERVE_7_0,
priv,
SD25G_LANE_LANE_44(sd_index));
sdx5_rmw(SD25G_LANE_LANE_45_LN_CFG_RX_RESERVE_15_8_SET
(params->cfg_rx_reserve_15_8),
SD25G_LANE_LANE_45_LN_CFG_RX_RESERVE_15_8,
priv,
SD25G_LANE_LANE_45(sd_index));
sdx5_rmw(SD25G_LANE_LANE_0D_LN_CFG_DFECK_EN_SET(params->cfg_dfeck_en) |
SD25G_LANE_LANE_0D_LN_CFG_RXTERM_2_0_SET(params->cfg_rxterm_2_0),
SD25G_LANE_LANE_0D_LN_CFG_DFECK_EN |
SD25G_LANE_LANE_0D_LN_CFG_RXTERM_2_0,
priv,
SD25G_LANE_LANE_0D(sd_index));
sdx5_rmw(SD25G_LANE_LANE_21_LN_CFG_VGA_CTRL_BYP_4_0_SET
(params->cfg_vga_ctrl_byp_4_0),
SD25G_LANE_LANE_21_LN_CFG_VGA_CTRL_BYP_4_0,
priv,
SD25G_LANE_LANE_21(sd_index));
sdx5_rmw(SD25G_LANE_LANE_22_LN_CFG_EQR_FORCE_3_0_SET
(params->cfg_eqr_force_3_0),
SD25G_LANE_LANE_22_LN_CFG_EQR_FORCE_3_0,
priv,
SD25G_LANE_LANE_22(sd_index));
sdx5_rmw(SD25G_LANE_LANE_1C_LN_CFG_EQC_FORCE_3_0_SET
(params->cfg_eqc_force_3_0) |
SD25G_LANE_LANE_1C_LN_CFG_DFE_PD_SET(params->cfg_dfe_pd),
SD25G_LANE_LANE_1C_LN_CFG_EQC_FORCE_3_0 |
SD25G_LANE_LANE_1C_LN_CFG_DFE_PD,
priv,
SD25G_LANE_LANE_1C(sd_index));
sdx5_rmw(SD25G_LANE_LANE_1E_LN_CFG_SUM_SETCM_EN_SET
(params->cfg_sum_setcm_en),
SD25G_LANE_LANE_1E_LN_CFG_SUM_SETCM_EN,
priv,
SD25G_LANE_LANE_1E(sd_index));
sdx5_rmw(SD25G_LANE_LANE_25_LN_CFG_INIT_POS_ISCAN_6_0_SET
(params->cfg_init_pos_iscan_6_0),
SD25G_LANE_LANE_25_LN_CFG_INIT_POS_ISCAN_6_0,
priv,
SD25G_LANE_LANE_25(sd_index));
sdx5_rmw(SD25G_LANE_LANE_26_LN_CFG_INIT_POS_IPI_6_0_SET
(params->cfg_init_pos_ipi_6_0),
SD25G_LANE_LANE_26_LN_CFG_INIT_POS_IPI_6_0,
priv,
SD25G_LANE_LANE_26(sd_index));
sdx5_rmw(SD25G_LANE_LANE_18_LN_CFG_ERRAMP_PD_SET(params->cfg_erramp_pd),
SD25G_LANE_LANE_18_LN_CFG_ERRAMP_PD,
priv,
SD25G_LANE_LANE_18(sd_index));
sdx5_rmw(SD25G_LANE_LANE_0E_LN_CFG_DFEDIG_M_2_0_SET
(params->cfg_dfedig_m_2_0),
SD25G_LANE_LANE_0E_LN_CFG_DFEDIG_M_2_0,
priv,
SD25G_LANE_LANE_0E(sd_index));
sdx5_rmw(SD25G_LANE_LANE_0E_LN_CFG_EN_DFEDIG_SET(params->cfg_en_dfedig),
SD25G_LANE_LANE_0E_LN_CFG_EN_DFEDIG,
priv,
SD25G_LANE_LANE_0E(sd_index));
sdx5_rmw(SD25G_LANE_LANE_40_LN_R_TX_POL_INV_SET(params->r_tx_pol_inv) |
SD25G_LANE_LANE_40_LN_R_RX_POL_INV_SET(params->r_rx_pol_inv),
SD25G_LANE_LANE_40_LN_R_TX_POL_INV |
SD25G_LANE_LANE_40_LN_R_RX_POL_INV,
priv,
SD25G_LANE_LANE_40(sd_index));
sdx5_rmw(SD25G_LANE_LANE_04_LN_CFG_RX2TX_LP_EN_SET(params->cfg_rx2tx_lp_en) |
SD25G_LANE_LANE_04_LN_CFG_TX2RX_LP_EN_SET(params->cfg_tx2rx_lp_en),
SD25G_LANE_LANE_04_LN_CFG_RX2TX_LP_EN |
SD25G_LANE_LANE_04_LN_CFG_TX2RX_LP_EN,
priv,
SD25G_LANE_LANE_04(sd_index));
sdx5_rmw(SD25G_LANE_LANE_1E_LN_CFG_RXLB_EN_SET(params->cfg_rxlb_en),
SD25G_LANE_LANE_1E_LN_CFG_RXLB_EN,
priv,
SD25G_LANE_LANE_1E(sd_index));
sdx5_rmw(SD25G_LANE_LANE_19_LN_CFG_TXLB_EN_SET(params->cfg_txlb_en),
SD25G_LANE_LANE_19_LN_CFG_TXLB_EN,
priv,
SD25G_LANE_LANE_19(sd_index));
sdx5_rmw(SD25G_LANE_LANE_2E_LN_CFG_RSTN_DFEDIG_SET(0),
SD25G_LANE_LANE_2E_LN_CFG_RSTN_DFEDIG,
priv,
SD25G_LANE_LANE_2E(sd_index));
sdx5_rmw(SD25G_LANE_LANE_2E_LN_CFG_RSTN_DFEDIG_SET(1),
SD25G_LANE_LANE_2E_LN_CFG_RSTN_DFEDIG,
priv,
SD25G_LANE_LANE_2E(sd_index));
sdx5_rmw(SD_LANE_25G_SD_LANE_CFG_MACRO_RST_SET(0),
SD_LANE_25G_SD_LANE_CFG_MACRO_RST,
priv,
SD_LANE_25G_SD_LANE_CFG(sd_index));
sdx5_rmw(SD25G_LANE_LANE_1C_LN_CFG_CDR_RSTN_SET(0),
SD25G_LANE_LANE_1C_LN_CFG_CDR_RSTN,
priv,
SD25G_LANE_LANE_1C(sd_index));
usleep_range(1000, 2000);
sdx5_rmw(SD25G_LANE_LANE_1C_LN_CFG_CDR_RSTN_SET(1),
SD25G_LANE_LANE_1C_LN_CFG_CDR_RSTN,
priv,
SD25G_LANE_LANE_1C(sd_index));
usleep_range(10000, 20000);
sdx5_rmw(SD25G_LANE_CMU_FF_REGISTER_TABLE_INDEX_SET(0xff),
SD25G_LANE_CMU_FF_REGISTER_TABLE_INDEX,
priv,
SD25G_LANE_CMU_FF(sd_index));
value = readl(sdx5_addr(regs, SD25G_LANE_CMU_C0(sd_index)));
value = SD25G_LANE_CMU_C0_PLL_LOL_UDL_GET(value);
if (value) {
dev_err(dev, "25G PLL Loss of Lock: 0x%x\n", value);
return -EINVAL;
}
value = readl(sdx5_addr(regs, SD_LANE_25G_SD_LANE_STAT(sd_index)));
value = SD_LANE_25G_SD_LANE_STAT_PMA_RST_DONE_GET(value);
if (value != 0x1) {
dev_err(dev, "25G PMA Reset failed: 0x%x\n", value);
return -EINVAL;
}
sdx5_rmw(SD25G_LANE_CMU_2A_R_DBG_LOL_STATUS_SET(0x1),
SD25G_LANE_CMU_2A_R_DBG_LOL_STATUS,
priv,
SD25G_LANE_CMU_2A(sd_index));
sdx5_rmw(SD_LANE_25G_SD_SER_RST_SER_RST_SET(0x0),
SD_LANE_25G_SD_SER_RST_SER_RST,
priv,
SD_LANE_25G_SD_SER_RST(sd_index));
sdx5_rmw(SD_LANE_25G_SD_DES_RST_DES_RST_SET(0x0),
SD_LANE_25G_SD_DES_RST_DES_RST,
priv,
SD_LANE_25G_SD_DES_RST(sd_index));
sdx5_rmw(SD25G_LANE_CMU_FF_REGISTER_TABLE_INDEX_SET(0),
SD25G_LANE_CMU_FF_REGISTER_TABLE_INDEX,
priv,
SD25G_LANE_CMU_FF(sd_index));
sdx5_rmw(SD25G_LANE_LANE_2D_LN_CFG_ALOS_THR_2_0_SET
(params->cfg_alos_thr_2_0),
SD25G_LANE_LANE_2D_LN_CFG_ALOS_THR_2_0,
priv,
SD25G_LANE_LANE_2D(sd_index));
sdx5_rmw(SD25G_LANE_LANE_2E_LN_CFG_DIS_SQ_SET(0),
SD25G_LANE_LANE_2E_LN_CFG_DIS_SQ,
priv,
SD25G_LANE_LANE_2E(sd_index));
sdx5_rmw(SD25G_LANE_LANE_2E_LN_CFG_PD_SQ_SET(0),
SD25G_LANE_LANE_2E_LN_CFG_PD_SQ,
priv,
SD25G_LANE_LANE_2E(sd_index));
return 0;
}
static void sparx5_sd10g28_reset(void __iomem *regs[], u32 lane_index)
{
/* Note: SerDes SD10G_LANE_1 is configured in 10G_LAN mode */
sdx5_rmw_addr(SD_LANE_SD_LANE_CFG_EXT_CFG_RST_SET(1),
SD_LANE_SD_LANE_CFG_EXT_CFG_RST,
sdx5_addr(regs, SD_LANE_SD_LANE_CFG(lane_index)));
usleep_range(1000, 2000);
sdx5_rmw_addr(SD_LANE_SD_LANE_CFG_EXT_CFG_RST_SET(0),
SD_LANE_SD_LANE_CFG_EXT_CFG_RST,
sdx5_addr(regs, SD_LANE_SD_LANE_CFG(lane_index)));
}
static int sparx5_sd10g28_apply_params(struct sparx5_serdes_macro *macro,
struct sparx5_sd10g28_params *params)
{
struct sparx5_serdes_private *priv = macro->priv;
void __iomem **regs = priv->regs;
struct device *dev = priv->dev;
u32 lane_index = macro->sidx;
u32 sd_index = macro->stpidx;
void __iomem *sd_inst;
u32 value, cmu_idx;
int err;
/* Do not configure serdes if CMU is not to be configured too */
if (params->skip_cmu_cfg)
return 0;
cmu_idx = sparx5_serdes_cmu_get(params->cmu_sel, lane_index);
err = sparx5_cmu_cfg(priv, cmu_idx);
if (err)
return err;
if (params->is_6g)
sd_inst = sdx5_inst_get(priv, TARGET_SD6G_LANE, sd_index);
else
sd_inst = sdx5_inst_get(priv, TARGET_SD10G_LANE, sd_index);
sdx5_rmw(SD_LANE_SD_LANE_CFG_MACRO_RST_SET(1),
SD_LANE_SD_LANE_CFG_MACRO_RST,
priv,
SD_LANE_SD_LANE_CFG(lane_index));
sdx5_inst_rmw(SD10G_LANE_LANE_93_R_DWIDTHCTRL_FROM_HWT_SET(0x0) |
SD10G_LANE_LANE_93_R_REG_MANUAL_SET(0x1) |
SD10G_LANE_LANE_93_R_AUXCKSEL_FROM_HWT_SET(0x1) |
SD10G_LANE_LANE_93_R_LANE_ID_FROM_HWT_SET(0x1) |
SD10G_LANE_LANE_93_R_EN_RATECHG_CTRL_SET(0x0),
SD10G_LANE_LANE_93_R_DWIDTHCTRL_FROM_HWT |
SD10G_LANE_LANE_93_R_REG_MANUAL |
SD10G_LANE_LANE_93_R_AUXCKSEL_FROM_HWT |
SD10G_LANE_LANE_93_R_LANE_ID_FROM_HWT |
SD10G_LANE_LANE_93_R_EN_RATECHG_CTRL,
sd_inst,
SD10G_LANE_LANE_93(sd_index));
sdx5_inst_rmw(SD10G_LANE_LANE_94_R_ISCAN_REG_SET(0x1) |
SD10G_LANE_LANE_94_R_TXEQ_REG_SET(0x1) |
SD10G_LANE_LANE_94_R_MISC_REG_SET(0x1) |
SD10G_LANE_LANE_94_R_SWING_REG_SET(0x1),
SD10G_LANE_LANE_94_R_ISCAN_REG |
SD10G_LANE_LANE_94_R_TXEQ_REG |
SD10G_LANE_LANE_94_R_MISC_REG |
SD10G_LANE_LANE_94_R_SWING_REG,
sd_inst,
SD10G_LANE_LANE_94(sd_index));
sdx5_inst_rmw(SD10G_LANE_LANE_9E_R_RXEQ_REG_SET(0x1),
SD10G_LANE_LANE_9E_R_RXEQ_REG,
sd_inst,
SD10G_LANE_LANE_9E(sd_index));
sdx5_inst_rmw(SD10G_LANE_LANE_A1_R_SSC_FROM_HWT_SET(0x0) |
SD10G_LANE_LANE_A1_R_CDR_FROM_HWT_SET(0x0) |
SD10G_LANE_LANE_A1_R_PCLK_GATING_FROM_HWT_SET(0x1),
SD10G_LANE_LANE_A1_R_SSC_FROM_HWT |
SD10G_LANE_LANE_A1_R_CDR_FROM_HWT |
SD10G_LANE_LANE_A1_R_PCLK_GATING_FROM_HWT,
sd_inst,
SD10G_LANE_LANE_A1(sd_index));
sdx5_rmw(SD_LANE_SD_LANE_CFG_RX_REF_SEL_SET(params->cmu_sel) |
SD_LANE_SD_LANE_CFG_TX_REF_SEL_SET(params->cmu_sel),
SD_LANE_SD_LANE_CFG_RX_REF_SEL |
SD_LANE_SD_LANE_CFG_TX_REF_SEL,
priv,
SD_LANE_SD_LANE_CFG(lane_index));
sdx5_inst_rmw(SD10G_LANE_LANE_40_CFG_LANE_RESERVE_7_0_SET
(params->cfg_lane_reserve_7_0),
SD10G_LANE_LANE_40_CFG_LANE_RESERVE_7_0,
sd_inst,
SD10G_LANE_LANE_40(sd_index));
sdx5_inst_rmw(SD10G_LANE_LANE_50_CFG_SSC_RTL_CLK_SEL_SET
(params->cfg_ssc_rtl_clk_sel),
SD10G_LANE_LANE_50_CFG_SSC_RTL_CLK_SEL,
sd_inst,
SD10G_LANE_LANE_50(sd_index));
sdx5_inst_rmw(SD10G_LANE_LANE_35_CFG_TXRATE_1_0_SET
(params->cfg_txrate_1_0) |
SD10G_LANE_LANE_35_CFG_RXRATE_1_0_SET
(params->cfg_rxrate_1_0),
SD10G_LANE_LANE_35_CFG_TXRATE_1_0 |
SD10G_LANE_LANE_35_CFG_RXRATE_1_0,
sd_inst,
SD10G_LANE_LANE_35(sd_index));
sdx5_inst_rmw(SD10G_LANE_LANE_94_R_DWIDTHCTRL_2_0_SET
(params->r_d_width_ctrl_2_0),
SD10G_LANE_LANE_94_R_DWIDTHCTRL_2_0,
sd_inst,
SD10G_LANE_LANE_94(sd_index));
sdx5_inst_rmw(SD10G_LANE_LANE_01_CFG_PMA_TX_CK_BITWIDTH_2_0_SET
(params->cfg_pma_tx_ck_bitwidth_2_0),
SD10G_LANE_LANE_01_CFG_PMA_TX_CK_BITWIDTH_2_0,
sd_inst,
SD10G_LANE_LANE_01(sd_index));
sdx5_inst_rmw(SD10G_LANE_LANE_30_CFG_RXDIV_SEL_2_0_SET
(params->cfg_rxdiv_sel_2_0),
SD10G_LANE_LANE_30_CFG_RXDIV_SEL_2_0,
sd_inst,
SD10G_LANE_LANE_30(sd_index));
sdx5_inst_rmw(SD10G_LANE_LANE_A2_R_PCS2PMA_PHYMODE_4_0_SET
(params->r_pcs2pma_phymode_4_0),
SD10G_LANE_LANE_A2_R_PCS2PMA_PHYMODE_4_0,
sd_inst,
SD10G_LANE_LANE_A2(sd_index));
sdx5_inst_rmw(SD10G_LANE_LANE_13_CFG_CDRCK_EN_SET(params->cfg_cdrck_en),
SD10G_LANE_LANE_13_CFG_CDRCK_EN,
sd_inst,
SD10G_LANE_LANE_13(sd_index));
sdx5_inst_rmw(SD10G_LANE_LANE_23_CFG_DFECK_EN_SET
(params->cfg_dfeck_en) |
SD10G_LANE_LANE_23_CFG_DFE_PD_SET(params->cfg_dfe_pd) |
SD10G_LANE_LANE_23_CFG_ERRAMP_PD_SET
(params->cfg_erramp_pd),
SD10G_LANE_LANE_23_CFG_DFECK_EN |
SD10G_LANE_LANE_23_CFG_DFE_PD |
SD10G_LANE_LANE_23_CFG_ERRAMP_PD,
sd_inst,
SD10G_LANE_LANE_23(sd_index));
sdx5_inst_rmw(SD10G_LANE_LANE_22_CFG_DFETAP_EN_5_1_SET
(params->cfg_dfetap_en_5_1),
SD10G_LANE_LANE_22_CFG_DFETAP_EN_5_1,
sd_inst,
SD10G_LANE_LANE_22(sd_index));
sdx5_inst_rmw(SD10G_LANE_LANE_1A_CFG_PI_DFE_EN_SET
(params->cfg_pi_DFE_en),
SD10G_LANE_LANE_1A_CFG_PI_DFE_EN,
sd_inst,
SD10G_LANE_LANE_1A(sd_index));
sdx5_inst_rmw(SD10G_LANE_LANE_02_CFG_EN_ADV_SET(params->cfg_en_adv) |
SD10G_LANE_LANE_02_CFG_EN_MAIN_SET(params->cfg_en_main) |
SD10G_LANE_LANE_02_CFG_EN_DLY_SET(params->cfg_en_dly) |
SD10G_LANE_LANE_02_CFG_TAP_ADV_3_0_SET
(params->cfg_tap_adv_3_0),
SD10G_LANE_LANE_02_CFG_EN_ADV |
SD10G_LANE_LANE_02_CFG_EN_MAIN |
SD10G_LANE_LANE_02_CFG_EN_DLY |
SD10G_LANE_LANE_02_CFG_TAP_ADV_3_0,
sd_inst,
SD10G_LANE_LANE_02(sd_index));
sdx5_inst_rmw(SD10G_LANE_LANE_03_CFG_TAP_MAIN_SET(params->cfg_tap_main),
SD10G_LANE_LANE_03_CFG_TAP_MAIN,
sd_inst,
SD10G_LANE_LANE_03(sd_index));
sdx5_inst_rmw(SD10G_LANE_LANE_04_CFG_TAP_DLY_4_0_SET
(params->cfg_tap_dly_4_0),
SD10G_LANE_LANE_04_CFG_TAP_DLY_4_0,
sd_inst,
SD10G_LANE_LANE_04(sd_index));
sdx5_inst_rmw(SD10G_LANE_LANE_2F_CFG_VGA_CTRL_3_0_SET
(params->cfg_vga_ctrl_3_0),
SD10G_LANE_LANE_2F_CFG_VGA_CTRL_3_0,
sd_inst,
SD10G_LANE_LANE_2F(sd_index));
sdx5_inst_rmw(SD10G_LANE_LANE_2F_CFG_VGA_CP_2_0_SET
(params->cfg_vga_cp_2_0),
SD10G_LANE_LANE_2F_CFG_VGA_CP_2_0,
sd_inst,
SD10G_LANE_LANE_2F(sd_index));
sdx5_inst_rmw(SD10G_LANE_LANE_0B_CFG_EQ_RES_3_0_SET
(params->cfg_eq_res_3_0),
SD10G_LANE_LANE_0B_CFG_EQ_RES_3_0,
sd_inst,
SD10G_LANE_LANE_0B(sd_index));
sdx5_inst_rmw(SD10G_LANE_LANE_0D_CFG_EQR_BYP_SET(params->cfg_eq_r_byp),
SD10G_LANE_LANE_0D_CFG_EQR_BYP,
sd_inst,
SD10G_LANE_LANE_0D(sd_index));
sdx5_inst_rmw(SD10G_LANE_LANE_0E_CFG_EQC_FORCE_3_0_SET
(params->cfg_eq_c_force_3_0) |
SD10G_LANE_LANE_0E_CFG_SUM_SETCM_EN_SET
(params->cfg_sum_setcm_en),
SD10G_LANE_LANE_0E_CFG_EQC_FORCE_3_0 |
SD10G_LANE_LANE_0E_CFG_SUM_SETCM_EN,
sd_inst,
SD10G_LANE_LANE_0E(sd_index));
sdx5_inst_rmw(SD10G_LANE_LANE_23_CFG_EN_DFEDIG_SET
(params->cfg_en_dfedig),
SD10G_LANE_LANE_23_CFG_EN_DFEDIG,
sd_inst,
SD10G_LANE_LANE_23(sd_index));
sdx5_inst_rmw(SD10G_LANE_LANE_06_CFG_EN_PREEMPH_SET
(params->cfg_en_preemph),
SD10G_LANE_LANE_06_CFG_EN_PREEMPH,
sd_inst,
SD10G_LANE_LANE_06(sd_index));
sdx5_inst_rmw(SD10G_LANE_LANE_33_CFG_ITX_IPPREEMP_BASE_1_0_SET
(params->cfg_itx_ippreemp_base_1_0) |
SD10G_LANE_LANE_33_CFG_ITX_IPDRIVER_BASE_2_0_SET
(params->cfg_itx_ipdriver_base_2_0),
SD10G_LANE_LANE_33_CFG_ITX_IPPREEMP_BASE_1_0 |
SD10G_LANE_LANE_33_CFG_ITX_IPDRIVER_BASE_2_0,
sd_inst,
SD10G_LANE_LANE_33(sd_index));
sdx5_inst_rmw(SD10G_LANE_LANE_52_CFG_IBIAS_TUNE_RESERVE_5_0_SET
(params->cfg_ibias_tune_reserve_5_0),
SD10G_LANE_LANE_52_CFG_IBIAS_TUNE_RESERVE_5_0,
sd_inst,
SD10G_LANE_LANE_52(sd_index));
sdx5_inst_rmw(SD10G_LANE_LANE_37_CFG_TXSWING_HALF_SET
(params->cfg_txswing_half),
SD10G_LANE_LANE_37_CFG_TXSWING_HALF,
sd_inst,
SD10G_LANE_LANE_37(sd_index));
sdx5_inst_rmw(SD10G_LANE_LANE_3C_CFG_DIS_2NDORDER_SET
(params->cfg_dis_2nd_order),
SD10G_LANE_LANE_3C_CFG_DIS_2NDORDER,
sd_inst,
SD10G_LANE_LANE_3C(sd_index));
sdx5_inst_rmw(SD10G_LANE_LANE_39_CFG_RX_SSC_LH_SET
(params->cfg_rx_ssc_lh),
SD10G_LANE_LANE_39_CFG_RX_SSC_LH,
sd_inst,
SD10G_LANE_LANE_39(sd_index));
sdx5_inst_rmw(SD10G_LANE_LANE_1A_CFG_PI_FLOOP_STEPS_1_0_SET
(params->cfg_pi_floop_steps_1_0),
SD10G_LANE_LANE_1A_CFG_PI_FLOOP_STEPS_1_0,
sd_inst,
SD10G_LANE_LANE_1A(sd_index));
sdx5_inst_rmw(SD10G_LANE_LANE_16_CFG_PI_EXT_DAC_23_16_SET
(params->cfg_pi_ext_dac_23_16),
SD10G_LANE_LANE_16_CFG_PI_EXT_DAC_23_16,
sd_inst,
SD10G_LANE_LANE_16(sd_index));
sdx5_inst_rmw(SD10G_LANE_LANE_15_CFG_PI_EXT_DAC_15_8_SET
(params->cfg_pi_ext_dac_15_8),
SD10G_LANE_LANE_15_CFG_PI_EXT_DAC_15_8,
sd_inst,
SD10G_LANE_LANE_15(sd_index));
sdx5_inst_rmw(SD10G_LANE_LANE_26_CFG_ISCAN_EXT_DAC_7_0_SET
(params->cfg_iscan_ext_dac_7_0),
SD10G_LANE_LANE_26_CFG_ISCAN_EXT_DAC_7_0,
sd_inst,
SD10G_LANE_LANE_26(sd_index));
sdx5_inst_rmw(SD10G_LANE_LANE_42_CFG_CDR_KF_GEN1_2_0_SET
(params->cfg_cdr_kf_gen1_2_0),
SD10G_LANE_LANE_42_CFG_CDR_KF_GEN1_2_0,
sd_inst,
SD10G_LANE_LANE_42(sd_index));
sdx5_inst_rmw(SD10G_LANE_LANE_0F_R_CDR_M_GEN1_7_0_SET
(params->r_cdr_m_gen1_7_0),
SD10G_LANE_LANE_0F_R_CDR_M_GEN1_7_0,
sd_inst,
SD10G_LANE_LANE_0F(sd_index));
sdx5_inst_rmw(SD10G_LANE_LANE_24_CFG_PI_BW_GEN1_3_0_SET
(params->cfg_pi_bw_gen1_3_0),
SD10G_LANE_LANE_24_CFG_PI_BW_GEN1_3_0,
sd_inst,
SD10G_LANE_LANE_24(sd_index));
sdx5_inst_rmw(SD10G_LANE_LANE_14_CFG_PI_EXT_DAC_7_0_SET
(params->cfg_pi_ext_dac_7_0),
SD10G_LANE_LANE_14_CFG_PI_EXT_DAC_7_0,
sd_inst,
SD10G_LANE_LANE_14(sd_index));
sdx5_inst_rmw(SD10G_LANE_LANE_1A_CFG_PI_STEPS_SET(params->cfg_pi_steps),
SD10G_LANE_LANE_1A_CFG_PI_STEPS,
sd_inst,
SD10G_LANE_LANE_1A(sd_index));
sdx5_inst_rmw(SD10G_LANE_LANE_3A_CFG_MP_MAX_3_0_SET
(params->cfg_mp_max_3_0),
SD10G_LANE_LANE_3A_CFG_MP_MAX_3_0,
sd_inst,
SD10G_LANE_LANE_3A(sd_index));
sdx5_inst_rmw(SD10G_LANE_LANE_31_CFG_RSTN_DFEDIG_SET
(params->cfg_rstn_dfedig),
SD10G_LANE_LANE_31_CFG_RSTN_DFEDIG,
sd_inst,
SD10G_LANE_LANE_31(sd_index));
sdx5_inst_rmw(SD10G_LANE_LANE_48_CFG_ALOS_THR_3_0_SET
(params->cfg_alos_thr_3_0),
SD10G_LANE_LANE_48_CFG_ALOS_THR_3_0,
sd_inst,
SD10G_LANE_LANE_48(sd_index));
sdx5_inst_rmw(SD10G_LANE_LANE_36_CFG_PREDRV_SLEWRATE_1_0_SET
(params->cfg_predrv_slewrate_1_0),
SD10G_LANE_LANE_36_CFG_PREDRV_SLEWRATE_1_0,
sd_inst,
SD10G_LANE_LANE_36(sd_index));
sdx5_inst_rmw(SD10G_LANE_LANE_32_CFG_ITX_IPCML_BASE_1_0_SET
(params->cfg_itx_ipcml_base_1_0),
SD10G_LANE_LANE_32_CFG_ITX_IPCML_BASE_1_0,
sd_inst,
SD10G_LANE_LANE_32(sd_index));
sdx5_inst_rmw(SD10G_LANE_LANE_37_CFG_IP_PRE_BASE_1_0_SET
(params->cfg_ip_pre_base_1_0),
SD10G_LANE_LANE_37_CFG_IP_PRE_BASE_1_0,
sd_inst,
SD10G_LANE_LANE_37(sd_index));
sdx5_inst_rmw(SD10G_LANE_LANE_41_CFG_LANE_RESERVE_15_8_SET
(params->cfg_lane_reserve_15_8),
SD10G_LANE_LANE_41_CFG_LANE_RESERVE_15_8,
sd_inst,
SD10G_LANE_LANE_41(sd_index));
sdx5_inst_rmw(SD10G_LANE_LANE_9E_R_EN_AUTO_CDR_RSTN_SET
(params->r_en_auto_cdr_rstn),
SD10G_LANE_LANE_9E_R_EN_AUTO_CDR_RSTN,
sd_inst,
SD10G_LANE_LANE_9E(sd_index));
sdx5_inst_rmw(SD10G_LANE_LANE_0C_CFG_OSCAL_AFE_SET
(params->cfg_oscal_afe) |
SD10G_LANE_LANE_0C_CFG_PD_OSDAC_AFE_SET
(params->cfg_pd_osdac_afe),
SD10G_LANE_LANE_0C_CFG_OSCAL_AFE |
SD10G_LANE_LANE_0C_CFG_PD_OSDAC_AFE,
sd_inst,
SD10G_LANE_LANE_0C(sd_index));
sdx5_inst_rmw(SD10G_LANE_LANE_0B_CFG_RESETB_OSCAL_AFE_SET
(params->cfg_resetb_oscal_afe[0]),
SD10G_LANE_LANE_0B_CFG_RESETB_OSCAL_AFE,
sd_inst,
SD10G_LANE_LANE_0B(sd_index));
sdx5_inst_rmw(SD10G_LANE_LANE_0B_CFG_RESETB_OSCAL_AFE_SET
(params->cfg_resetb_oscal_afe[1]),
SD10G_LANE_LANE_0B_CFG_RESETB_OSCAL_AFE,
sd_inst,
SD10G_LANE_LANE_0B(sd_index));
sdx5_inst_rmw(SD10G_LANE_LANE_83_R_TX_POL_INV_SET
(params->r_tx_pol_inv) |
SD10G_LANE_LANE_83_R_RX_POL_INV_SET
(params->r_rx_pol_inv),
SD10G_LANE_LANE_83_R_TX_POL_INV |
SD10G_LANE_LANE_83_R_RX_POL_INV,
sd_inst,
SD10G_LANE_LANE_83(sd_index));
sdx5_inst_rmw(SD10G_LANE_LANE_06_CFG_RX2TX_LP_EN_SET
(params->cfg_rx2tx_lp_en) |
SD10G_LANE_LANE_06_CFG_TX2RX_LP_EN_SET
(params->cfg_tx2rx_lp_en),
SD10G_LANE_LANE_06_CFG_RX2TX_LP_EN |
SD10G_LANE_LANE_06_CFG_TX2RX_LP_EN,
sd_inst,
SD10G_LANE_LANE_06(sd_index));
sdx5_inst_rmw(SD10G_LANE_LANE_0E_CFG_RXLB_EN_SET(params->cfg_rxlb_en) |
SD10G_LANE_LANE_0E_CFG_TXLB_EN_SET(params->cfg_txlb_en),
SD10G_LANE_LANE_0E_CFG_RXLB_EN |
SD10G_LANE_LANE_0E_CFG_TXLB_EN,
sd_inst,
SD10G_LANE_LANE_0E(sd_index));
sdx5_rmw(SD_LANE_SD_LANE_CFG_MACRO_RST_SET(0),
SD_LANE_SD_LANE_CFG_MACRO_RST,
priv,
SD_LANE_SD_LANE_CFG(lane_index));
sdx5_inst_rmw(SD10G_LANE_LANE_50_CFG_SSC_RESETB_SET(1),
SD10G_LANE_LANE_50_CFG_SSC_RESETB,
sd_inst,
SD10G_LANE_LANE_50(sd_index));
sdx5_rmw(SD10G_LANE_LANE_50_CFG_SSC_RESETB_SET(1),
SD10G_LANE_LANE_50_CFG_SSC_RESETB,
priv,
SD10G_LANE_LANE_50(sd_index));
sdx5_rmw(SD_LANE_MISC_SD_125_RST_DIS_SET(params->fx_100),
SD_LANE_MISC_SD_125_RST_DIS,
priv,
SD_LANE_MISC(lane_index));
sdx5_rmw(SD_LANE_MISC_RX_ENA_SET(params->fx_100),
SD_LANE_MISC_RX_ENA,
priv,
SD_LANE_MISC(lane_index));
sdx5_rmw(SD_LANE_MISC_MUX_ENA_SET(params->fx_100),
SD_LANE_MISC_MUX_ENA,
priv,
SD_LANE_MISC(lane_index));
usleep_range(3000, 6000);
value = readl(sdx5_addr(regs, SD_LANE_SD_LANE_STAT(lane_index)));
value = SD_LANE_SD_LANE_STAT_PMA_RST_DONE_GET(value);
if (value != 1) {
dev_err(dev, "10G PMA Reset failed: 0x%x\n", value);
return -EINVAL;
}
sdx5_rmw(SD_LANE_SD_SER_RST_SER_RST_SET(0x0),
SD_LANE_SD_SER_RST_SER_RST,
priv,
SD_LANE_SD_SER_RST(lane_index));
sdx5_rmw(SD_LANE_SD_DES_RST_DES_RST_SET(0x0),
SD_LANE_SD_DES_RST_DES_RST,
priv,
SD_LANE_SD_DES_RST(lane_index));
return 0;
}
static int sparx5_sd25g28_config(struct sparx5_serdes_macro *macro, bool reset)
{
struct sparx5_sd25g28_media_preset media = media_presets_25g[macro->media];
struct sparx5_sd25g28_mode_preset mode;
struct sparx5_sd25g28_args args = {
.rxinvert = 1,
.txinvert = 0,
.txswing = 240,
.com_pll_reserve = 0xf,
.reg_rst = reset,
};
struct sparx5_sd25g28_params params;
int err;
err = sparx5_sd10g25_get_mode_preset(macro, &mode);
if (err)
return err;
sparx5_sd25g28_get_params(macro, &media, &mode, &args, ¶ms);
sparx5_sd25g28_reset(macro->priv->regs, ¶ms, macro->stpidx);
return sparx5_sd25g28_apply_params(macro, ¶ms);
}
static int sparx5_sd10g28_config(struct sparx5_serdes_macro *macro, bool reset)
{
struct sparx5_sd10g28_media_preset media = media_presets_10g[macro->media];
struct sparx5_sd10g28_mode_preset mode;
struct sparx5_sd10g28_params params;
struct sparx5_sd10g28_args args = {
.is_6g = (macro->serdestype == SPX5_SDT_6G),
.txinvert = 0,
.rxinvert = 1,
.txswing = 240,
.reg_rst = reset,
.skip_cmu_cfg = reset,
};
int err;
err = sparx5_sd10g28_get_mode_preset(macro, &mode, &args);
if (err)
return err;
sparx5_sd10g28_get_params(macro, &media, &mode, &args, ¶ms);
sparx5_sd10g28_reset(macro->priv->regs, macro->sidx);
return sparx5_sd10g28_apply_params(macro, ¶ms);
}
/* Power down serdes TX driver */
static int sparx5_serdes_power_save(struct sparx5_serdes_macro *macro, u32 pwdn)
{
struct sparx5_serdes_private *priv = macro->priv;
void __iomem *sd_inst, *sd_lane_inst;
if (macro->serdestype == SPX5_SDT_6G)
sd_inst = sdx5_inst_get(priv, TARGET_SD6G_LANE, macro->stpidx);
else if (macro->serdestype == SPX5_SDT_10G)
sd_inst = sdx5_inst_get(priv, TARGET_SD10G_LANE, macro->stpidx);
else
sd_inst = sdx5_inst_get(priv, TARGET_SD25G_LANE, macro->stpidx);
if (macro->serdestype == SPX5_SDT_25G) {
sd_lane_inst = sdx5_inst_get(priv, TARGET_SD_LANE_25G,
macro->stpidx);
/* Take serdes out of reset */
sdx5_inst_rmw(SD_LANE_25G_SD_LANE_CFG_EXT_CFG_RST_SET(0),
SD_LANE_25G_SD_LANE_CFG_EXT_CFG_RST, sd_lane_inst,
SD_LANE_25G_SD_LANE_CFG(0));
/* Configure optimal settings for quiet mode */
sdx5_inst_rmw(SD_LANE_25G_QUIET_MODE_6G_QUIET_MODE_SET(SPX5_SERDES_QUIET_MODE_VAL),
SD_LANE_25G_QUIET_MODE_6G_QUIET_MODE,
sd_lane_inst, SD_LANE_25G_QUIET_MODE_6G(0));
sdx5_inst_rmw(SD25G_LANE_LANE_04_LN_CFG_PD_DRIVER_SET(pwdn),
SD25G_LANE_LANE_04_LN_CFG_PD_DRIVER,
sd_inst,
SD25G_LANE_LANE_04(0));
} else {
/* 6G and 10G */
sd_lane_inst = sdx5_inst_get(priv, TARGET_SD_LANE, macro->sidx);
/* Take serdes out of reset */
sdx5_inst_rmw(SD_LANE_SD_LANE_CFG_EXT_CFG_RST_SET(0),
SD_LANE_SD_LANE_CFG_EXT_CFG_RST, sd_lane_inst,
SD_LANE_SD_LANE_CFG(0));
/* Configure optimal settings for quiet mode */
sdx5_inst_rmw(SD_LANE_QUIET_MODE_6G_QUIET_MODE_SET(SPX5_SERDES_QUIET_MODE_VAL),
SD_LANE_QUIET_MODE_6G_QUIET_MODE, sd_lane_inst,
SD_LANE_QUIET_MODE_6G(0));
sdx5_inst_rmw(SD10G_LANE_LANE_06_CFG_PD_DRIVER_SET(pwdn),
SD10G_LANE_LANE_06_CFG_PD_DRIVER,
sd_inst,
SD10G_LANE_LANE_06(0));
}
return 0;
}
static int sparx5_serdes_clock_config(struct sparx5_serdes_macro *macro)
{
struct sparx5_serdes_private *priv = macro->priv;
if (macro->serdesmode == SPX5_SD_MODE_100FX) {
u32 freq = priv->coreclock == 250000000 ? 2 :
priv->coreclock == 500000000 ? 1 : 0;
sdx5_rmw(SD_LANE_MISC_CORE_CLK_FREQ_SET(freq),
SD_LANE_MISC_CORE_CLK_FREQ,
priv,
SD_LANE_MISC(macro->sidx));
}
return 0;
}
static int sparx5_serdes_get_serdesmode(phy_interface_t portmode, int speed)
{
switch (portmode) {
case PHY_INTERFACE_MODE_1000BASEX:
case PHY_INTERFACE_MODE_2500BASEX:
if (speed == SPEED_2500)
return SPX5_SD_MODE_2G5;
if (speed == SPEED_100)
return SPX5_SD_MODE_100FX;
return SPX5_SD_MODE_1000BASEX;
case PHY_INTERFACE_MODE_SGMII:
/* The same Serdes mode is used for both SGMII and 1000BaseX */
return SPX5_SD_MODE_1000BASEX;
case PHY_INTERFACE_MODE_QSGMII:
return SPX5_SD_MODE_QSGMII;
case PHY_INTERFACE_MODE_10GBASER:
return SPX5_SD_MODE_SFI;
default:
return -EINVAL;
}
}
static int sparx5_serdes_config(struct sparx5_serdes_macro *macro)
{
struct device *dev = macro->priv->dev;
int serdesmode;
int err;
serdesmode = sparx5_serdes_get_serdesmode(macro->portmode, macro->speed);
if (serdesmode < 0) {
dev_err(dev, "SerDes %u, interface not supported: %s\n",
macro->sidx,
phy_modes(macro->portmode));
return serdesmode;
}
macro->serdesmode = serdesmode;
sparx5_serdes_clock_config(macro);
if (macro->serdestype == SPX5_SDT_25G)
err = sparx5_sd25g28_config(macro, false);
else
err = sparx5_sd10g28_config(macro, false);
if (err) {
dev_err(dev, "SerDes %u, config error: %d\n",
macro->sidx, err);
}
return err;
}
static int sparx5_serdes_power_on(struct phy *phy)
{
struct sparx5_serdes_macro *macro = phy_get_drvdata(phy);
return sparx5_serdes_power_save(macro, false);
}
static int sparx5_serdes_power_off(struct phy *phy)
{
struct sparx5_serdes_macro *macro = phy_get_drvdata(phy);
return sparx5_serdes_power_save(macro, true);
}
static int sparx5_serdes_set_mode(struct phy *phy, enum phy_mode mode, int submode)
{
struct sparx5_serdes_macro *macro;
if (mode != PHY_MODE_ETHERNET)
return -EINVAL;
switch (submode) {
case PHY_INTERFACE_MODE_1000BASEX:
case PHY_INTERFACE_MODE_2500BASEX:
case PHY_INTERFACE_MODE_SGMII:
case PHY_INTERFACE_MODE_QSGMII:
case PHY_INTERFACE_MODE_10GBASER:
macro = phy_get_drvdata(phy);
macro->portmode = submode;
sparx5_serdes_config(macro);
return 0;
default:
return -EINVAL;
}
}
static int sparx5_serdes_set_media(struct phy *phy, enum phy_media media)
{
struct sparx5_serdes_macro *macro = phy_get_drvdata(phy);
if (media != macro->media) {
macro->media = media;
if (macro->serdesmode != SPX5_SD_MODE_NONE)
sparx5_serdes_config(macro);
}
return 0;
}
static int sparx5_serdes_set_speed(struct phy *phy, int speed)
{
struct sparx5_serdes_macro *macro = phy_get_drvdata(phy);
if (macro->sidx < SPX5_SERDES_10G_START && speed > SPEED_5000)
return -EINVAL;
if (macro->sidx < SPX5_SERDES_25G_START && speed > SPEED_10000)
return -EINVAL;
if (speed != macro->speed) {
macro->speed = speed;
if (macro->serdesmode != SPX5_SD_MODE_NONE)
sparx5_serdes_config(macro);
}
return 0;
}
static int sparx5_serdes_reset(struct phy *phy)
{
struct sparx5_serdes_macro *macro = phy_get_drvdata(phy);
int err;
if (macro->serdestype == SPX5_SDT_25G)
err = sparx5_sd25g28_config(macro, true);
else
err = sparx5_sd10g28_config(macro, true);
if (err) {
dev_err(&phy->dev, "SerDes %u, reset error: %d\n",
macro->sidx, err);
}
return err;
}
static int sparx5_serdes_validate(struct phy *phy, enum phy_mode mode,
int submode,
union phy_configure_opts *opts)
{
struct sparx5_serdes_macro *macro = phy_get_drvdata(phy);
if (mode != PHY_MODE_ETHERNET)
return -EINVAL;
if (macro->speed == 0)
return -EINVAL;
if (macro->sidx < SPX5_SERDES_10G_START && macro->speed > SPEED_5000)
return -EINVAL;
if (macro->sidx < SPX5_SERDES_25G_START && macro->speed > SPEED_10000)
return -EINVAL;
switch (submode) {
case PHY_INTERFACE_MODE_1000BASEX:
if (macro->speed != SPEED_100 && /* This is for 100BASE-FX */
macro->speed != SPEED_1000)
return -EINVAL;
break;
case PHY_INTERFACE_MODE_SGMII:
case PHY_INTERFACE_MODE_2500BASEX:
case PHY_INTERFACE_MODE_QSGMII:
if (macro->speed >= SPEED_5000)
return -EINVAL;
break;
case PHY_INTERFACE_MODE_10GBASER:
if (macro->speed < SPEED_5000)
return -EINVAL;
break;
default:
return -EINVAL;
}
return 0;
}
static const struct phy_ops sparx5_serdes_ops = {
.power_on = sparx5_serdes_power_on,
.power_off = sparx5_serdes_power_off,
.set_mode = sparx5_serdes_set_mode,
.set_media = sparx5_serdes_set_media,
.set_speed = sparx5_serdes_set_speed,
.reset = sparx5_serdes_reset,
.validate = sparx5_serdes_validate,
.owner = THIS_MODULE,
};
static int sparx5_phy_create(struct sparx5_serdes_private *priv,
int idx, struct phy **phy)
{
struct sparx5_serdes_macro *macro;
*phy = devm_phy_create(priv->dev, NULL, &sparx5_serdes_ops);
if (IS_ERR(*phy))
return PTR_ERR(*phy);
macro = devm_kzalloc(priv->dev, sizeof(*macro), GFP_KERNEL);
if (!macro)
return -ENOMEM;
macro->sidx = idx;
macro->priv = priv;
macro->speed = SPEED_UNKNOWN;
if (idx < SPX5_SERDES_10G_START) {
macro->serdestype = SPX5_SDT_6G;
macro->stpidx = macro->sidx;
} else if (idx < SPX5_SERDES_25G_START) {
macro->serdestype = SPX5_SDT_10G;
macro->stpidx = macro->sidx - SPX5_SERDES_10G_START;
} else {
macro->serdestype = SPX5_SDT_25G;
macro->stpidx = macro->sidx - SPX5_SERDES_25G_START;
}
phy_set_drvdata(*phy, macro);
/* Power off serdes by default */
sparx5_serdes_power_off(*phy);
return 0;
}
static struct sparx5_serdes_io_resource sparx5_serdes_iomap[] = {
{ TARGET_SD_CMU, 0x0 }, /* 0x610808000: sd_cmu_0 */
{ TARGET_SD_CMU + 1, 0x8000 }, /* 0x610810000: sd_cmu_1 */
{ TARGET_SD_CMU + 2, 0x10000 }, /* 0x610818000: sd_cmu_2 */
{ TARGET_SD_CMU + 3, 0x18000 }, /* 0x610820000: sd_cmu_3 */
{ TARGET_SD_CMU + 4, 0x20000 }, /* 0x610828000: sd_cmu_4 */
{ TARGET_SD_CMU + 5, 0x28000 }, /* 0x610830000: sd_cmu_5 */
{ TARGET_SD_CMU + 6, 0x30000 }, /* 0x610838000: sd_cmu_6 */
{ TARGET_SD_CMU + 7, 0x38000 }, /* 0x610840000: sd_cmu_7 */
{ TARGET_SD_CMU + 8, 0x40000 }, /* 0x610848000: sd_cmu_8 */
{ TARGET_SD_CMU_CFG, 0x48000 }, /* 0x610850000: sd_cmu_cfg_0 */
{ TARGET_SD_CMU_CFG + 1, 0x50000 }, /* 0x610858000: sd_cmu_cfg_1 */
{ TARGET_SD_CMU_CFG + 2, 0x58000 }, /* 0x610860000: sd_cmu_cfg_2 */
{ TARGET_SD_CMU_CFG + 3, 0x60000 }, /* 0x610868000: sd_cmu_cfg_3 */
{ TARGET_SD_CMU_CFG + 4, 0x68000 }, /* 0x610870000: sd_cmu_cfg_4 */
{ TARGET_SD_CMU_CFG + 5, 0x70000 }, /* 0x610878000: sd_cmu_cfg_5 */
{ TARGET_SD_CMU_CFG + 6, 0x78000 }, /* 0x610880000: sd_cmu_cfg_6 */
{ TARGET_SD_CMU_CFG + 7, 0x80000 }, /* 0x610888000: sd_cmu_cfg_7 */
{ TARGET_SD_CMU_CFG + 8, 0x88000 }, /* 0x610890000: sd_cmu_cfg_8 */
{ TARGET_SD6G_LANE, 0x90000 }, /* 0x610898000: sd6g_lane_0 */
{ TARGET_SD6G_LANE + 1, 0x98000 }, /* 0x6108a0000: sd6g_lane_1 */
{ TARGET_SD6G_LANE + 2, 0xa0000 }, /* 0x6108a8000: sd6g_lane_2 */
{ TARGET_SD6G_LANE + 3, 0xa8000 }, /* 0x6108b0000: sd6g_lane_3 */
{ TARGET_SD6G_LANE + 4, 0xb0000 }, /* 0x6108b8000: sd6g_lane_4 */
{ TARGET_SD6G_LANE + 5, 0xb8000 }, /* 0x6108c0000: sd6g_lane_5 */
{ TARGET_SD6G_LANE + 6, 0xc0000 }, /* 0x6108c8000: sd6g_lane_6 */
{ TARGET_SD6G_LANE + 7, 0xc8000 }, /* 0x6108d0000: sd6g_lane_7 */
{ TARGET_SD6G_LANE + 8, 0xd0000 }, /* 0x6108d8000: sd6g_lane_8 */
{ TARGET_SD6G_LANE + 9, 0xd8000 }, /* 0x6108e0000: sd6g_lane_9 */
{ TARGET_SD6G_LANE + 10, 0xe0000 }, /* 0x6108e8000: sd6g_lane_10 */
{ TARGET_SD6G_LANE + 11, 0xe8000 }, /* 0x6108f0000: sd6g_lane_11 */
{ TARGET_SD6G_LANE + 12, 0xf0000 }, /* 0x6108f8000: sd6g_lane_12 */
{ TARGET_SD10G_LANE, 0xf8000 }, /* 0x610900000: sd10g_lane_0 */
{ TARGET_SD10G_LANE + 1, 0x100000 }, /* 0x610908000: sd10g_lane_1 */
{ TARGET_SD10G_LANE + 2, 0x108000 }, /* 0x610910000: sd10g_lane_2 */
{ TARGET_SD10G_LANE + 3, 0x110000 }, /* 0x610918000: sd10g_lane_3 */
{ TARGET_SD_LANE, 0x1a0000 }, /* 0x6109a8000: sd_lane_0 */
{ TARGET_SD_LANE + 1, 0x1a8000 }, /* 0x6109b0000: sd_lane_1 */
{ TARGET_SD_LANE + 2, 0x1b0000 }, /* 0x6109b8000: sd_lane_2 */
{ TARGET_SD_LANE + 3, 0x1b8000 }, /* 0x6109c0000: sd_lane_3 */
{ TARGET_SD_LANE + 4, 0x1c0000 }, /* 0x6109c8000: sd_lane_4 */
{ TARGET_SD_LANE + 5, 0x1c8000 }, /* 0x6109d0000: sd_lane_5 */
{ TARGET_SD_LANE + 6, 0x1d0000 }, /* 0x6109d8000: sd_lane_6 */
{ TARGET_SD_LANE + 7, 0x1d8000 }, /* 0x6109e0000: sd_lane_7 */
{ TARGET_SD_LANE + 8, 0x1e0000 }, /* 0x6109e8000: sd_lane_8 */
{ TARGET_SD_LANE + 9, 0x1e8000 }, /* 0x6109f0000: sd_lane_9 */
{ TARGET_SD_LANE + 10, 0x1f0000 }, /* 0x6109f8000: sd_lane_10 */
{ TARGET_SD_LANE + 11, 0x1f8000 }, /* 0x610a00000: sd_lane_11 */
{ TARGET_SD_LANE + 12, 0x200000 }, /* 0x610a08000: sd_lane_12 */
{ TARGET_SD_LANE + 13, 0x208000 }, /* 0x610a10000: sd_lane_13 */
{ TARGET_SD_LANE + 14, 0x210000 }, /* 0x610a18000: sd_lane_14 */
{ TARGET_SD_LANE + 15, 0x218000 }, /* 0x610a20000: sd_lane_15 */
{ TARGET_SD_LANE + 16, 0x220000 }, /* 0x610a28000: sd_lane_16 */
{ TARGET_SD_CMU + 9, 0x400000 }, /* 0x610c08000: sd_cmu_9 */
{ TARGET_SD_CMU + 10, 0x408000 }, /* 0x610c10000: sd_cmu_10 */
{ TARGET_SD_CMU + 11, 0x410000 }, /* 0x610c18000: sd_cmu_11 */
{ TARGET_SD_CMU + 12, 0x418000 }, /* 0x610c20000: sd_cmu_12 */
{ TARGET_SD_CMU + 13, 0x420000 }, /* 0x610c28000: sd_cmu_13 */
{ TARGET_SD_CMU_CFG + 9, 0x428000 }, /* 0x610c30000: sd_cmu_cfg_9 */
{ TARGET_SD_CMU_CFG + 10, 0x430000 }, /* 0x610c38000: sd_cmu_cfg_10 */
{ TARGET_SD_CMU_CFG + 11, 0x438000 }, /* 0x610c40000: sd_cmu_cfg_11 */
{ TARGET_SD_CMU_CFG + 12, 0x440000 }, /* 0x610c48000: sd_cmu_cfg_12 */
{ TARGET_SD_CMU_CFG + 13, 0x448000 }, /* 0x610c50000: sd_cmu_cfg_13 */
{ TARGET_SD10G_LANE + 4, 0x450000 }, /* 0x610c58000: sd10g_lane_4 */
{ TARGET_SD10G_LANE + 5, 0x458000 }, /* 0x610c60000: sd10g_lane_5 */
{ TARGET_SD10G_LANE + 6, 0x460000 }, /* 0x610c68000: sd10g_lane_6 */
{ TARGET_SD10G_LANE + 7, 0x468000 }, /* 0x610c70000: sd10g_lane_7 */
{ TARGET_SD10G_LANE + 8, 0x470000 }, /* 0x610c78000: sd10g_lane_8 */
{ TARGET_SD10G_LANE + 9, 0x478000 }, /* 0x610c80000: sd10g_lane_9 */
{ TARGET_SD10G_LANE + 10, 0x480000 }, /* 0x610c88000: sd10g_lane_10 */
{ TARGET_SD10G_LANE + 11, 0x488000 }, /* 0x610c90000: sd10g_lane_11 */
{ TARGET_SD25G_LANE, 0x490000 }, /* 0x610c98000: sd25g_lane_0 */
{ TARGET_SD25G_LANE + 1, 0x498000 }, /* 0x610ca0000: sd25g_lane_1 */
{ TARGET_SD25G_LANE + 2, 0x4a0000 }, /* 0x610ca8000: sd25g_lane_2 */
{ TARGET_SD25G_LANE + 3, 0x4a8000 }, /* 0x610cb0000: sd25g_lane_3 */
{ TARGET_SD25G_LANE + 4, 0x4b0000 }, /* 0x610cb8000: sd25g_lane_4 */
{ TARGET_SD25G_LANE + 5, 0x4b8000 }, /* 0x610cc0000: sd25g_lane_5 */
{ TARGET_SD25G_LANE + 6, 0x4c0000 }, /* 0x610cc8000: sd25g_lane_6 */
{ TARGET_SD25G_LANE + 7, 0x4c8000 }, /* 0x610cd0000: sd25g_lane_7 */
{ TARGET_SD_LANE + 17, 0x550000 }, /* 0x610d58000: sd_lane_17 */
{ TARGET_SD_LANE + 18, 0x558000 }, /* 0x610d60000: sd_lane_18 */
{ TARGET_SD_LANE + 19, 0x560000 }, /* 0x610d68000: sd_lane_19 */
{ TARGET_SD_LANE + 20, 0x568000 }, /* 0x610d70000: sd_lane_20 */
{ TARGET_SD_LANE + 21, 0x570000 }, /* 0x610d78000: sd_lane_21 */
{ TARGET_SD_LANE + 22, 0x578000 }, /* 0x610d80000: sd_lane_22 */
{ TARGET_SD_LANE + 23, 0x580000 }, /* 0x610d88000: sd_lane_23 */
{ TARGET_SD_LANE + 24, 0x588000 }, /* 0x610d90000: sd_lane_24 */
{ TARGET_SD_LANE_25G, 0x590000 }, /* 0x610d98000: sd_lane_25g_25 */
{ TARGET_SD_LANE_25G + 1, 0x598000 }, /* 0x610da0000: sd_lane_25g_26 */
{ TARGET_SD_LANE_25G + 2, 0x5a0000 }, /* 0x610da8000: sd_lane_25g_27 */
{ TARGET_SD_LANE_25G + 3, 0x5a8000 }, /* 0x610db0000: sd_lane_25g_28 */
{ TARGET_SD_LANE_25G + 4, 0x5b0000 }, /* 0x610db8000: sd_lane_25g_29 */
{ TARGET_SD_LANE_25G + 5, 0x5b8000 }, /* 0x610dc0000: sd_lane_25g_30 */
{ TARGET_SD_LANE_25G + 6, 0x5c0000 }, /* 0x610dc8000: sd_lane_25g_31 */
{ TARGET_SD_LANE_25G + 7, 0x5c8000 }, /* 0x610dd0000: sd_lane_25g_32 */
};
/* Client lookup function, uses serdes index */
static struct phy *sparx5_serdes_xlate(struct device *dev,
struct of_phandle_args *args)
{
struct sparx5_serdes_private *priv = dev_get_drvdata(dev);
int idx;
unsigned int sidx;
if (args->args_count != 1)
return ERR_PTR(-EINVAL);
sidx = args->args[0];
/* Check validity: ERR_PTR(-ENODEV) if not valid */
for (idx = 0; idx < SPX5_SERDES_MAX; idx++) {
struct sparx5_serdes_macro *macro =
phy_get_drvdata(priv->phys[idx]);
if (sidx != macro->sidx)
continue;
return priv->phys[idx];
}
return ERR_PTR(-ENODEV);
}
static int sparx5_serdes_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct sparx5_serdes_private *priv;
struct phy_provider *provider;
struct resource *iores;
void __iomem *iomem;
unsigned long clock;
struct clk *clk;
int idx;
int err;
if (!np && !pdev->dev.platform_data)
return -ENODEV;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
platform_set_drvdata(pdev, priv);
priv->dev = &pdev->dev;
/* Get coreclock */
clk = devm_clk_get(priv->dev, NULL);
if (IS_ERR(clk)) {
dev_err(priv->dev, "Failed to get coreclock\n");
return PTR_ERR(clk);
}
clock = clk_get_rate(clk);
if (clock == 0) {
dev_err(priv->dev, "Invalid coreclock %lu\n", clock);
return -EINVAL;
}
priv->coreclock = clock;
iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!iores) {
dev_err(priv->dev, "Invalid resource\n");
return -EINVAL;
}
iomem = devm_ioremap(priv->dev, iores->start, resource_size(iores));
if (!iomem) {
dev_err(priv->dev, "Unable to get serdes registers: %s\n",
iores->name);
return -ENOMEM;
}
for (idx = 0; idx < ARRAY_SIZE(sparx5_serdes_iomap); idx++) {
struct sparx5_serdes_io_resource *iomap = &sparx5_serdes_iomap[idx];
priv->regs[iomap->id] = iomem + iomap->offset;
}
for (idx = 0; idx < SPX5_SERDES_MAX; idx++) {
err = sparx5_phy_create(priv, idx, &priv->phys[idx]);
if (err)
return err;
}
/* Power down all CMUs by default */
sparx5_serdes_cmu_power_off(priv);
provider = devm_of_phy_provider_register(priv->dev, sparx5_serdes_xlate);
return PTR_ERR_OR_ZERO(provider);
}
static const struct of_device_id sparx5_serdes_match[] = {
{ .compatible = "microchip,sparx5-serdes" },
{ }
};
MODULE_DEVICE_TABLE(of, sparx5_serdes_match);
static struct platform_driver sparx5_serdes_driver = {
.probe = sparx5_serdes_probe,
.driver = {
.name = "sparx5-serdes",
.of_match_table = sparx5_serdes_match,
},
};
module_platform_driver(sparx5_serdes_driver);
MODULE_DESCRIPTION("Microchip Sparx5 switch serdes driver");
MODULE_AUTHOR("Steen Hegelund <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/phy/microchip/sparx5_serdes.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Lantiq XWAY SoC RCU module based USB 1.1/2.0 PHY driver
*
* Copyright (C) 2016 Martin Blumenstingl <[email protected]>
* Copyright (C) 2017 Hauke Mehrtens <[email protected]>
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/reset.h>
/* Transmitter HS Pre-Emphasis Enable */
#define RCU_CFG1_TX_PEE BIT(0)
/* Disconnect Threshold */
#define RCU_CFG1_DIS_THR_MASK 0x00038000
#define RCU_CFG1_DIS_THR_SHIFT 15
struct ltq_rcu_usb2_bits {
u8 hostmode;
u8 slave_endianness;
u8 host_endianness;
bool have_ana_cfg;
};
struct ltq_rcu_usb2_priv {
struct regmap *regmap;
unsigned int phy_reg_offset;
unsigned int ana_cfg1_reg_offset;
const struct ltq_rcu_usb2_bits *reg_bits;
struct device *dev;
struct phy *phy;
struct clk *phy_gate_clk;
struct reset_control *ctrl_reset;
struct reset_control *phy_reset;
};
static const struct ltq_rcu_usb2_bits xway_rcu_usb2_reg_bits = {
.hostmode = 11,
.slave_endianness = 9,
.host_endianness = 10,
.have_ana_cfg = false,
};
static const struct ltq_rcu_usb2_bits xrx100_rcu_usb2_reg_bits = {
.hostmode = 11,
.slave_endianness = 17,
.host_endianness = 10,
.have_ana_cfg = false,
};
static const struct ltq_rcu_usb2_bits xrx200_rcu_usb2_reg_bits = {
.hostmode = 11,
.slave_endianness = 9,
.host_endianness = 10,
.have_ana_cfg = true,
};
static const struct of_device_id ltq_rcu_usb2_phy_of_match[] = {
{
.compatible = "lantiq,ase-usb2-phy",
.data = &xway_rcu_usb2_reg_bits,
},
{
.compatible = "lantiq,danube-usb2-phy",
.data = &xway_rcu_usb2_reg_bits,
},
{
.compatible = "lantiq,xrx100-usb2-phy",
.data = &xrx100_rcu_usb2_reg_bits,
},
{
.compatible = "lantiq,xrx200-usb2-phy",
.data = &xrx200_rcu_usb2_reg_bits,
},
{
.compatible = "lantiq,xrx300-usb2-phy",
.data = &xrx200_rcu_usb2_reg_bits,
},
{ },
};
MODULE_DEVICE_TABLE(of, ltq_rcu_usb2_phy_of_match);
static int ltq_rcu_usb2_phy_init(struct phy *phy)
{
struct ltq_rcu_usb2_priv *priv = phy_get_drvdata(phy);
if (priv->reg_bits->have_ana_cfg) {
regmap_update_bits(priv->regmap, priv->ana_cfg1_reg_offset,
RCU_CFG1_TX_PEE, RCU_CFG1_TX_PEE);
regmap_update_bits(priv->regmap, priv->ana_cfg1_reg_offset,
RCU_CFG1_DIS_THR_MASK, 7 << RCU_CFG1_DIS_THR_SHIFT);
}
/* Configure core to host mode */
regmap_update_bits(priv->regmap, priv->phy_reg_offset,
BIT(priv->reg_bits->hostmode), 0);
/* Select DMA endianness (Host-endian: big-endian) */
regmap_update_bits(priv->regmap, priv->phy_reg_offset,
BIT(priv->reg_bits->slave_endianness), 0);
regmap_update_bits(priv->regmap, priv->phy_reg_offset,
BIT(priv->reg_bits->host_endianness),
BIT(priv->reg_bits->host_endianness));
return 0;
}
static int ltq_rcu_usb2_phy_power_on(struct phy *phy)
{
struct ltq_rcu_usb2_priv *priv = phy_get_drvdata(phy);
struct device *dev = priv->dev;
int ret;
reset_control_deassert(priv->phy_reset);
ret = clk_prepare_enable(priv->phy_gate_clk);
if (ret) {
dev_err(dev, "failed to enable PHY gate\n");
return ret;
}
/*
* at least the xrx200 usb2 phy requires some extra time to be
* operational after enabling the clock
*/
usleep_range(100, 200);
return ret;
}
static int ltq_rcu_usb2_phy_power_off(struct phy *phy)
{
struct ltq_rcu_usb2_priv *priv = phy_get_drvdata(phy);
reset_control_assert(priv->phy_reset);
clk_disable_unprepare(priv->phy_gate_clk);
return 0;
}
static const struct phy_ops ltq_rcu_usb2_phy_ops = {
.init = ltq_rcu_usb2_phy_init,
.power_on = ltq_rcu_usb2_phy_power_on,
.power_off = ltq_rcu_usb2_phy_power_off,
.owner = THIS_MODULE,
};
static int ltq_rcu_usb2_of_parse(struct ltq_rcu_usb2_priv *priv,
struct platform_device *pdev)
{
struct device *dev = priv->dev;
const __be32 *offset;
priv->reg_bits = of_device_get_match_data(dev);
priv->regmap = syscon_node_to_regmap(dev->of_node->parent);
if (IS_ERR(priv->regmap)) {
dev_err(dev, "Failed to lookup RCU regmap\n");
return PTR_ERR(priv->regmap);
}
offset = of_get_address(dev->of_node, 0, NULL, NULL);
if (!offset) {
dev_err(dev, "Failed to get RCU PHY reg offset\n");
return -ENOENT;
}
priv->phy_reg_offset = __be32_to_cpu(*offset);
if (priv->reg_bits->have_ana_cfg) {
offset = of_get_address(dev->of_node, 1, NULL, NULL);
if (!offset) {
dev_err(dev, "Failed to get RCU ANA CFG1 reg offset\n");
return -ENOENT;
}
priv->ana_cfg1_reg_offset = __be32_to_cpu(*offset);
}
priv->phy_gate_clk = devm_clk_get(dev, "phy");
if (IS_ERR(priv->phy_gate_clk)) {
dev_err(dev, "Unable to get USB phy gate clk\n");
return PTR_ERR(priv->phy_gate_clk);
}
priv->ctrl_reset = devm_reset_control_get_shared(dev, "ctrl");
if (IS_ERR(priv->ctrl_reset)) {
if (PTR_ERR(priv->ctrl_reset) != -EPROBE_DEFER)
dev_err(dev, "failed to get 'ctrl' reset\n");
return PTR_ERR(priv->ctrl_reset);
}
priv->phy_reset = devm_reset_control_get_optional(dev, "phy");
return PTR_ERR_OR_ZERO(priv->phy_reset);
}
static int ltq_rcu_usb2_phy_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct ltq_rcu_usb2_priv *priv;
struct phy_provider *provider;
int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->dev = dev;
ret = ltq_rcu_usb2_of_parse(priv, pdev);
if (ret)
return ret;
/* Reset USB core through reset controller */
reset_control_deassert(priv->ctrl_reset);
reset_control_assert(priv->phy_reset);
priv->phy = devm_phy_create(dev, dev->of_node, <q_rcu_usb2_phy_ops);
if (IS_ERR(priv->phy)) {
dev_err(dev, "failed to create PHY\n");
return PTR_ERR(priv->phy);
}
phy_set_drvdata(priv->phy, priv);
provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
if (IS_ERR(provider))
return PTR_ERR(provider);
dev_set_drvdata(priv->dev, priv);
return 0;
}
static struct platform_driver ltq_rcu_usb2_phy_driver = {
.probe = ltq_rcu_usb2_phy_probe,
.driver = {
.name = "lantiq-rcu-usb2-phy",
.of_match_table = ltq_rcu_usb2_phy_of_match,
}
};
module_platform_driver(ltq_rcu_usb2_phy_driver);
MODULE_AUTHOR("Martin Blumenstingl <[email protected]>");
MODULE_DESCRIPTION("Lantiq XWAY USB2 PHY driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/phy/lantiq/phy-lantiq-rcu-usb2.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* PCIe PHY driver for Lantiq VRX200 and ARX300 SoCs.
*
* Copyright (C) 2019 Martin Blumenstingl <[email protected]>
*
* Based on the BSP (called "UGW") driver:
* Copyright (C) 2009-2015 Lei Chuanhua <[email protected]>
* Copyright (C) 2016 Intel Corporation
*
* TODO: PHY modes other than 36MHz (without "SSC")
*/
#include <linux/bitfield.h>
#include <linux/bits.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/reset.h>
#include <dt-bindings/phy/phy-lantiq-vrx200-pcie.h>
#define PCIE_PHY_PLL_CTRL1 0x44
#define PCIE_PHY_PLL_CTRL2 0x46
#define PCIE_PHY_PLL_CTRL2_CONST_SDM_MASK GENMASK(7, 0)
#define PCIE_PHY_PLL_CTRL2_CONST_SDM_EN BIT(8)
#define PCIE_PHY_PLL_CTRL2_PLL_SDM_EN BIT(9)
#define PCIE_PHY_PLL_CTRL3 0x48
#define PCIE_PHY_PLL_CTRL3_EXT_MMD_DIV_RATIO_EN BIT(1)
#define PCIE_PHY_PLL_CTRL3_EXT_MMD_DIV_RATIO_MASK GENMASK(6, 4)
#define PCIE_PHY_PLL_CTRL4 0x4a
#define PCIE_PHY_PLL_CTRL5 0x4c
#define PCIE_PHY_PLL_CTRL6 0x4e
#define PCIE_PHY_PLL_CTRL7 0x50
#define PCIE_PHY_PLL_A_CTRL1 0x52
#define PCIE_PHY_PLL_A_CTRL2 0x54
#define PCIE_PHY_PLL_A_CTRL2_LF_MODE_EN BIT(14)
#define PCIE_PHY_PLL_A_CTRL3 0x56
#define PCIE_PHY_PLL_A_CTRL3_MMD_MASK GENMASK(15, 13)
#define PCIE_PHY_PLL_STATUS 0x58
#define PCIE_PHY_TX1_CTRL1 0x60
#define PCIE_PHY_TX1_CTRL1_FORCE_EN BIT(3)
#define PCIE_PHY_TX1_CTRL1_LOAD_EN BIT(4)
#define PCIE_PHY_TX1_CTRL2 0x62
#define PCIE_PHY_TX1_CTRL3 0x64
#define PCIE_PHY_TX1_A_CTRL1 0x66
#define PCIE_PHY_TX1_A_CTRL2 0x68
#define PCIE_PHY_TX1_MOD1 0x6a
#define PCIE_PHY_TX1_MOD2 0x6c
#define PCIE_PHY_TX1_MOD3 0x6e
#define PCIE_PHY_TX2_CTRL1 0x70
#define PCIE_PHY_TX2_CTRL1_LOAD_EN BIT(4)
#define PCIE_PHY_TX2_CTRL2 0x72
#define PCIE_PHY_TX2_A_CTRL1 0x76
#define PCIE_PHY_TX2_A_CTRL2 0x78
#define PCIE_PHY_TX2_MOD1 0x7a
#define PCIE_PHY_TX2_MOD2 0x7c
#define PCIE_PHY_TX2_MOD3 0x7e
#define PCIE_PHY_RX1_CTRL1 0xa0
#define PCIE_PHY_RX1_CTRL1_LOAD_EN BIT(1)
#define PCIE_PHY_RX1_CTRL2 0xa2
#define PCIE_PHY_RX1_CDR 0xa4
#define PCIE_PHY_RX1_EI 0xa6
#define PCIE_PHY_RX1_A_CTRL 0xaa
struct ltq_vrx200_pcie_phy_priv {
struct phy *phy;
unsigned int mode;
struct device *dev;
struct regmap *phy_regmap;
struct regmap *rcu_regmap;
struct clk *pdi_clk;
struct clk *phy_clk;
struct reset_control *phy_reset;
struct reset_control *pcie_reset;
u32 rcu_ahb_endian_offset;
u32 rcu_ahb_endian_big_endian_mask;
};
static void ltq_vrx200_pcie_phy_common_setup(struct phy *phy)
{
struct ltq_vrx200_pcie_phy_priv *priv = phy_get_drvdata(phy);
/* PLL Setting */
regmap_write(priv->phy_regmap, PCIE_PHY_PLL_A_CTRL1, 0x120e);
/* increase the bias reference voltage */
regmap_write(priv->phy_regmap, PCIE_PHY_PLL_A_CTRL2, 0x39d7);
regmap_write(priv->phy_regmap, PCIE_PHY_PLL_A_CTRL3, 0x0900);
/* Endcnt */
regmap_write(priv->phy_regmap, PCIE_PHY_RX1_EI, 0x0004);
regmap_write(priv->phy_regmap, PCIE_PHY_RX1_A_CTRL, 0x6803);
regmap_update_bits(priv->phy_regmap, PCIE_PHY_TX1_CTRL1,
PCIE_PHY_TX1_CTRL1_FORCE_EN,
PCIE_PHY_TX1_CTRL1_FORCE_EN);
/* predrv_ser_en */
regmap_write(priv->phy_regmap, PCIE_PHY_TX1_A_CTRL2, 0x0706);
/* ctrl_lim */
regmap_write(priv->phy_regmap, PCIE_PHY_TX1_CTRL3, 0x1fff);
/* ctrl */
regmap_write(priv->phy_regmap, PCIE_PHY_TX1_A_CTRL1, 0x0810);
/* predrv_ser_en */
regmap_update_bits(priv->phy_regmap, PCIE_PHY_TX2_A_CTRL2, 0x7f00,
0x4700);
/* RTERM */
regmap_write(priv->phy_regmap, PCIE_PHY_TX1_CTRL2, 0x2e00);
/* Improved 100MHz clock output */
regmap_write(priv->phy_regmap, PCIE_PHY_TX2_CTRL2, 0x3096);
regmap_write(priv->phy_regmap, PCIE_PHY_TX2_A_CTRL2, 0x4707);
/* Reduced CDR BW to avoid glitches */
regmap_write(priv->phy_regmap, PCIE_PHY_RX1_CDR, 0x0235);
}
static void pcie_phy_36mhz_mode_setup(struct phy *phy)
{
struct ltq_vrx200_pcie_phy_priv *priv = phy_get_drvdata(phy);
regmap_update_bits(priv->phy_regmap, PCIE_PHY_PLL_CTRL3,
PCIE_PHY_PLL_CTRL3_EXT_MMD_DIV_RATIO_EN, 0x0000);
regmap_update_bits(priv->phy_regmap, PCIE_PHY_PLL_CTRL3,
PCIE_PHY_PLL_CTRL3_EXT_MMD_DIV_RATIO_MASK, 0x0000);
regmap_update_bits(priv->phy_regmap, PCIE_PHY_PLL_CTRL2,
PCIE_PHY_PLL_CTRL2_PLL_SDM_EN,
PCIE_PHY_PLL_CTRL2_PLL_SDM_EN);
regmap_update_bits(priv->phy_regmap, PCIE_PHY_PLL_CTRL2,
PCIE_PHY_PLL_CTRL2_CONST_SDM_EN,
PCIE_PHY_PLL_CTRL2_CONST_SDM_EN);
regmap_update_bits(priv->phy_regmap, PCIE_PHY_PLL_A_CTRL3,
PCIE_PHY_PLL_A_CTRL3_MMD_MASK,
FIELD_PREP(PCIE_PHY_PLL_A_CTRL3_MMD_MASK, 0x1));
regmap_update_bits(priv->phy_regmap, PCIE_PHY_PLL_A_CTRL2,
PCIE_PHY_PLL_A_CTRL2_LF_MODE_EN, 0x0000);
/* const_sdm */
regmap_write(priv->phy_regmap, PCIE_PHY_PLL_CTRL1, 0x38e4);
regmap_update_bits(priv->phy_regmap, PCIE_PHY_PLL_CTRL2,
PCIE_PHY_PLL_CTRL2_CONST_SDM_MASK,
FIELD_PREP(PCIE_PHY_PLL_CTRL2_CONST_SDM_MASK,
0xee));
/* pllmod */
regmap_write(priv->phy_regmap, PCIE_PHY_PLL_CTRL7, 0x0002);
regmap_write(priv->phy_regmap, PCIE_PHY_PLL_CTRL6, 0x3a04);
regmap_write(priv->phy_regmap, PCIE_PHY_PLL_CTRL5, 0xfae3);
regmap_write(priv->phy_regmap, PCIE_PHY_PLL_CTRL4, 0x1b72);
}
static int ltq_vrx200_pcie_phy_wait_for_pll(struct phy *phy)
{
struct ltq_vrx200_pcie_phy_priv *priv = phy_get_drvdata(phy);
unsigned int tmp;
int ret;
ret = regmap_read_poll_timeout(priv->phy_regmap, PCIE_PHY_PLL_STATUS,
tmp, ((tmp & 0x0070) == 0x0070), 10,
10000);
if (ret) {
dev_err(priv->dev, "PLL Link timeout, PLL status = 0x%04x\n",
tmp);
return ret;
}
return 0;
}
static void ltq_vrx200_pcie_phy_apply_workarounds(struct phy *phy)
{
struct ltq_vrx200_pcie_phy_priv *priv = phy_get_drvdata(phy);
static const struct reg_default slices[] = {
{
.reg = PCIE_PHY_TX1_CTRL1,
.def = PCIE_PHY_TX1_CTRL1_LOAD_EN,
},
{
.reg = PCIE_PHY_TX2_CTRL1,
.def = PCIE_PHY_TX2_CTRL1_LOAD_EN,
},
{
.reg = PCIE_PHY_RX1_CTRL1,
.def = PCIE_PHY_RX1_CTRL1_LOAD_EN,
}
};
int i;
for (i = 0; i < ARRAY_SIZE(slices); i++) {
/* enable load_en */
regmap_update_bits(priv->phy_regmap, slices[i].reg,
slices[i].def, slices[i].def);
udelay(1);
/* disable load_en */
regmap_update_bits(priv->phy_regmap, slices[i].reg,
slices[i].def, 0x0);
}
for (i = 0; i < 5; i++) {
/* TX2 modulation */
regmap_write(priv->phy_regmap, PCIE_PHY_TX2_MOD1, 0x1ffe);
regmap_write(priv->phy_regmap, PCIE_PHY_TX2_MOD2, 0xfffe);
regmap_write(priv->phy_regmap, PCIE_PHY_TX2_MOD3, 0x0601);
usleep_range(1000, 2000);
regmap_write(priv->phy_regmap, PCIE_PHY_TX2_MOD3, 0x0001);
/* TX1 modulation */
regmap_write(priv->phy_regmap, PCIE_PHY_TX1_MOD1, 0x1ffe);
regmap_write(priv->phy_regmap, PCIE_PHY_TX1_MOD2, 0xfffe);
regmap_write(priv->phy_regmap, PCIE_PHY_TX1_MOD3, 0x0601);
usleep_range(1000, 2000);
regmap_write(priv->phy_regmap, PCIE_PHY_TX1_MOD3, 0x0001);
}
}
static int ltq_vrx200_pcie_phy_init(struct phy *phy)
{
struct ltq_vrx200_pcie_phy_priv *priv = phy_get_drvdata(phy);
int ret;
if (of_device_is_big_endian(priv->dev->of_node))
regmap_update_bits(priv->rcu_regmap,
priv->rcu_ahb_endian_offset,
priv->rcu_ahb_endian_big_endian_mask,
priv->rcu_ahb_endian_big_endian_mask);
else
regmap_update_bits(priv->rcu_regmap,
priv->rcu_ahb_endian_offset,
priv->rcu_ahb_endian_big_endian_mask, 0x0);
ret = reset_control_assert(priv->phy_reset);
if (ret)
goto err;
udelay(1);
ret = reset_control_deassert(priv->phy_reset);
if (ret)
goto err;
udelay(1);
ret = reset_control_deassert(priv->pcie_reset);
if (ret)
goto err_assert_phy_reset;
/* Make sure PHY PLL is stable */
usleep_range(20, 40);
return 0;
err_assert_phy_reset:
reset_control_assert(priv->phy_reset);
err:
return ret;
}
static int ltq_vrx200_pcie_phy_exit(struct phy *phy)
{
struct ltq_vrx200_pcie_phy_priv *priv = phy_get_drvdata(phy);
int ret;
ret = reset_control_assert(priv->pcie_reset);
if (ret)
return ret;
ret = reset_control_assert(priv->phy_reset);
if (ret)
return ret;
return 0;
}
static int ltq_vrx200_pcie_phy_power_on(struct phy *phy)
{
struct ltq_vrx200_pcie_phy_priv *priv = phy_get_drvdata(phy);
int ret;
/* Enable PDI to access PCIe PHY register */
ret = clk_prepare_enable(priv->pdi_clk);
if (ret)
goto err;
/* Configure PLL and PHY clock */
ltq_vrx200_pcie_phy_common_setup(phy);
pcie_phy_36mhz_mode_setup(phy);
/* Enable the PCIe PHY and make PLL setting take effect */
ret = clk_prepare_enable(priv->phy_clk);
if (ret)
goto err_disable_pdi_clk;
/* Check if we are in "startup ready" status */
ret = ltq_vrx200_pcie_phy_wait_for_pll(phy);
if (ret)
goto err_disable_phy_clk;
ltq_vrx200_pcie_phy_apply_workarounds(phy);
return 0;
err_disable_phy_clk:
clk_disable_unprepare(priv->phy_clk);
err_disable_pdi_clk:
clk_disable_unprepare(priv->pdi_clk);
err:
return ret;
}
static int ltq_vrx200_pcie_phy_power_off(struct phy *phy)
{
struct ltq_vrx200_pcie_phy_priv *priv = phy_get_drvdata(phy);
clk_disable_unprepare(priv->phy_clk);
clk_disable_unprepare(priv->pdi_clk);
return 0;
}
static const struct phy_ops ltq_vrx200_pcie_phy_ops = {
.init = ltq_vrx200_pcie_phy_init,
.exit = ltq_vrx200_pcie_phy_exit,
.power_on = ltq_vrx200_pcie_phy_power_on,
.power_off = ltq_vrx200_pcie_phy_power_off,
.owner = THIS_MODULE,
};
static struct phy *ltq_vrx200_pcie_phy_xlate(struct device *dev,
struct of_phandle_args *args)
{
struct ltq_vrx200_pcie_phy_priv *priv = dev_get_drvdata(dev);
unsigned int mode;
if (args->args_count != 1) {
dev_err(dev, "invalid number of arguments\n");
return ERR_PTR(-EINVAL);
}
mode = args->args[0];
switch (mode) {
case LANTIQ_PCIE_PHY_MODE_36MHZ:
priv->mode = mode;
break;
case LANTIQ_PCIE_PHY_MODE_25MHZ:
case LANTIQ_PCIE_PHY_MODE_25MHZ_SSC:
case LANTIQ_PCIE_PHY_MODE_36MHZ_SSC:
case LANTIQ_PCIE_PHY_MODE_100MHZ:
case LANTIQ_PCIE_PHY_MODE_100MHZ_SSC:
dev_err(dev, "PHY mode not implemented yet: %u\n", mode);
return ERR_PTR(-EINVAL);
default:
dev_err(dev, "invalid PHY mode %u\n", mode);
return ERR_PTR(-EINVAL);
}
return priv->phy;
}
static int ltq_vrx200_pcie_phy_probe(struct platform_device *pdev)
{
static const struct regmap_config regmap_config = {
.reg_bits = 8,
.val_bits = 16,
.reg_stride = 2,
.max_register = PCIE_PHY_RX1_A_CTRL,
};
struct ltq_vrx200_pcie_phy_priv *priv;
struct device *dev = &pdev->dev;
struct phy_provider *provider;
void __iomem *base;
int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
priv->phy_regmap = devm_regmap_init_mmio(dev, base, ®map_config);
if (IS_ERR(priv->phy_regmap))
return PTR_ERR(priv->phy_regmap);
priv->rcu_regmap = syscon_regmap_lookup_by_phandle(dev->of_node,
"lantiq,rcu");
if (IS_ERR(priv->rcu_regmap))
return PTR_ERR(priv->rcu_regmap);
ret = device_property_read_u32(dev, "lantiq,rcu-endian-offset",
&priv->rcu_ahb_endian_offset);
if (ret) {
dev_err(dev,
"failed to parse the 'lantiq,rcu-endian-offset' property\n");
return ret;
}
ret = device_property_read_u32(dev, "lantiq,rcu-big-endian-mask",
&priv->rcu_ahb_endian_big_endian_mask);
if (ret) {
dev_err(dev,
"failed to parse the 'lantiq,rcu-big-endian-mask' property\n");
return ret;
}
priv->pdi_clk = devm_clk_get(dev, "pdi");
if (IS_ERR(priv->pdi_clk))
return PTR_ERR(priv->pdi_clk);
priv->phy_clk = devm_clk_get(dev, "phy");
if (IS_ERR(priv->phy_clk))
return PTR_ERR(priv->phy_clk);
priv->phy_reset = devm_reset_control_get_exclusive(dev, "phy");
if (IS_ERR(priv->phy_reset))
return PTR_ERR(priv->phy_reset);
priv->pcie_reset = devm_reset_control_get_shared(dev, "pcie");
if (IS_ERR(priv->pcie_reset))
return PTR_ERR(priv->pcie_reset);
priv->dev = dev;
priv->phy = devm_phy_create(dev, dev->of_node,
<q_vrx200_pcie_phy_ops);
if (IS_ERR(priv->phy)) {
dev_err(dev, "failed to create PHY\n");
return PTR_ERR(priv->phy);
}
phy_set_drvdata(priv->phy, priv);
dev_set_drvdata(dev, priv);
provider = devm_of_phy_provider_register(dev,
ltq_vrx200_pcie_phy_xlate);
return PTR_ERR_OR_ZERO(provider);
}
static const struct of_device_id ltq_vrx200_pcie_phy_of_match[] = {
{ .compatible = "lantiq,vrx200-pcie-phy", },
{ .compatible = "lantiq,arx300-pcie-phy", },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, ltq_vrx200_pcie_phy_of_match);
static struct platform_driver ltq_vrx200_pcie_phy_driver = {
.probe = ltq_vrx200_pcie_phy_probe,
.driver = {
.name = "ltq-vrx200-pcie-phy",
.of_match_table = ltq_vrx200_pcie_phy_of_match,
}
};
module_platform_driver(ltq_vrx200_pcie_phy_driver);
MODULE_AUTHOR("Martin Blumenstingl <[email protected]>");
MODULE_DESCRIPTION("Lantiq VRX200 and ARX300 PCIe PHY driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/phy/lantiq/phy-lantiq-vrx200-pcie.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Motorola Mapphone MDM6600 modem GPIO controlled USB PHY driver
* Copyright (C) 2018 Tony Lindgren <[email protected]>
*/
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/gpio/consumer.h>
#include <linux/of_platform.h>
#include <linux/phy/phy.h>
#include <linux/pinctrl/consumer.h>
#define PHY_MDM6600_PHY_DELAY_MS 4000 /* PHY enable 2.2s to 3.5s */
#define PHY_MDM6600_ENABLED_DELAY_MS 8000 /* 8s more total for MDM6600 */
#define PHY_MDM6600_WAKE_KICK_MS 600 /* time on after GPIO toggle */
#define MDM6600_MODEM_IDLE_DELAY_MS 1000 /* modem after USB suspend */
#define MDM6600_MODEM_WAKE_DELAY_MS 200 /* modem response after idle */
enum phy_mdm6600_ctrl_lines {
PHY_MDM6600_ENABLE, /* USB PHY enable */
PHY_MDM6600_POWER, /* Device power */
PHY_MDM6600_RESET, /* Device reset */
PHY_MDM6600_NR_CTRL_LINES,
};
enum phy_mdm6600_bootmode_lines {
PHY_MDM6600_MODE0, /* out USB mode0 and OOB wake */
PHY_MDM6600_MODE1, /* out USB mode1, in OOB wake */
PHY_MDM6600_NR_MODE_LINES,
};
enum phy_mdm6600_cmd_lines {
PHY_MDM6600_CMD0,
PHY_MDM6600_CMD1,
PHY_MDM6600_CMD2,
PHY_MDM6600_NR_CMD_LINES,
};
enum phy_mdm6600_status_lines {
PHY_MDM6600_STATUS0,
PHY_MDM6600_STATUS1,
PHY_MDM6600_STATUS2,
PHY_MDM6600_NR_STATUS_LINES,
};
/*
* MDM6600 command codes. These are based on Motorola Mapphone Linux
* kernel tree.
*/
enum phy_mdm6600_cmd {
PHY_MDM6600_CMD_BP_PANIC_ACK,
PHY_MDM6600_CMD_DATA_ONLY_BYPASS, /* Reroute USB to CPCAP PHY */
PHY_MDM6600_CMD_FULL_BYPASS, /* Reroute USB to CPCAP PHY */
PHY_MDM6600_CMD_NO_BYPASS, /* Request normal USB mode */
PHY_MDM6600_CMD_BP_SHUTDOWN_REQ, /* Request device power off */
PHY_MDM6600_CMD_BP_UNKNOWN_5,
PHY_MDM6600_CMD_BP_UNKNOWN_6,
PHY_MDM6600_CMD_UNDEFINED,
};
/*
* MDM6600 status codes. These are based on Motorola Mapphone Linux
* kernel tree.
*/
enum phy_mdm6600_status {
PHY_MDM6600_STATUS_PANIC, /* Seems to be really off */
PHY_MDM6600_STATUS_PANIC_BUSY_WAIT,
PHY_MDM6600_STATUS_QC_DLOAD,
PHY_MDM6600_STATUS_RAM_DOWNLOADER, /* MDM6600 USB flashing mode */
PHY_MDM6600_STATUS_PHONE_CODE_AWAKE, /* MDM6600 normal USB mode */
PHY_MDM6600_STATUS_PHONE_CODE_ASLEEP,
PHY_MDM6600_STATUS_SHUTDOWN_ACK,
PHY_MDM6600_STATUS_UNDEFINED,
};
static const char * const
phy_mdm6600_status_name[] = {
"off", "busy", "qc_dl", "ram_dl", "awake",
"asleep", "shutdown", "undefined",
};
struct phy_mdm6600 {
struct device *dev;
struct phy *generic_phy;
struct phy_provider *phy_provider;
struct gpio_desc *ctrl_gpios[PHY_MDM6600_NR_CTRL_LINES];
struct gpio_descs *mode_gpios;
struct gpio_descs *status_gpios;
struct gpio_descs *cmd_gpios;
struct delayed_work bootup_work;
struct delayed_work status_work;
struct delayed_work modem_wake_work;
struct completion ack;
bool enabled; /* mdm6600 phy enabled */
bool running; /* mdm6600 boot done */
bool awake; /* mdm6600 respnds on n_gsm */
int status;
};
static int phy_mdm6600_init(struct phy *x)
{
struct phy_mdm6600 *ddata = phy_get_drvdata(x);
struct gpio_desc *enable_gpio = ddata->ctrl_gpios[PHY_MDM6600_ENABLE];
if (!ddata->enabled)
return -EPROBE_DEFER;
gpiod_set_value_cansleep(enable_gpio, 0);
return 0;
}
static int phy_mdm6600_power_on(struct phy *x)
{
struct phy_mdm6600 *ddata = phy_get_drvdata(x);
struct gpio_desc *enable_gpio = ddata->ctrl_gpios[PHY_MDM6600_ENABLE];
int error;
if (!ddata->enabled)
return -ENODEV;
error = pinctrl_pm_select_default_state(ddata->dev);
if (error)
dev_warn(ddata->dev, "%s: error with default_state: %i\n",
__func__, error);
gpiod_set_value_cansleep(enable_gpio, 1);
/* Allow aggressive PM for USB, it's only needed for n_gsm port */
if (pm_runtime_enabled(&x->dev))
phy_pm_runtime_put(x);
return 0;
}
static int phy_mdm6600_power_off(struct phy *x)
{
struct phy_mdm6600 *ddata = phy_get_drvdata(x);
struct gpio_desc *enable_gpio = ddata->ctrl_gpios[PHY_MDM6600_ENABLE];
int error;
if (!ddata->enabled)
return -ENODEV;
/* Paired with phy_pm_runtime_put() in phy_mdm6600_power_on() */
if (pm_runtime_enabled(&x->dev)) {
error = phy_pm_runtime_get(x);
if (error < 0 && error != -EINPROGRESS)
dev_warn(ddata->dev, "%s: phy_pm_runtime_get: %i\n",
__func__, error);
}
gpiod_set_value_cansleep(enable_gpio, 0);
error = pinctrl_pm_select_sleep_state(ddata->dev);
if (error)
dev_warn(ddata->dev, "%s: error with sleep_state: %i\n",
__func__, error);
return 0;
}
static const struct phy_ops gpio_usb_ops = {
.init = phy_mdm6600_init,
.power_on = phy_mdm6600_power_on,
.power_off = phy_mdm6600_power_off,
.owner = THIS_MODULE,
};
/**
* phy_mdm6600_cmd() - send a command request to mdm6600
* @ddata: device driver data
* @val: value of cmd to be set
*
* Configures the three command request GPIOs to the specified value.
*/
static void phy_mdm6600_cmd(struct phy_mdm6600 *ddata, int val)
{
DECLARE_BITMAP(values, PHY_MDM6600_NR_CMD_LINES);
values[0] = val;
gpiod_set_array_value_cansleep(PHY_MDM6600_NR_CMD_LINES,
ddata->cmd_gpios->desc,
ddata->cmd_gpios->info, values);
}
/**
* phy_mdm6600_status() - read mdm6600 status lines
* @work: work structure
*/
static void phy_mdm6600_status(struct work_struct *work)
{
struct phy_mdm6600 *ddata;
struct device *dev;
DECLARE_BITMAP(values, PHY_MDM6600_NR_STATUS_LINES);
int error;
ddata = container_of(work, struct phy_mdm6600, status_work.work);
dev = ddata->dev;
error = gpiod_get_array_value_cansleep(PHY_MDM6600_NR_STATUS_LINES,
ddata->status_gpios->desc,
ddata->status_gpios->info,
values);
if (error)
return;
ddata->status = values[0] & ((1 << PHY_MDM6600_NR_STATUS_LINES) - 1);
dev_info(dev, "modem status: %i %s\n",
ddata->status,
phy_mdm6600_status_name[ddata->status]);
complete(&ddata->ack);
}
static irqreturn_t phy_mdm6600_irq_thread(int irq, void *data)
{
struct phy_mdm6600 *ddata = data;
schedule_delayed_work(&ddata->status_work, msecs_to_jiffies(10));
return IRQ_HANDLED;
}
/**
* phy_mdm6600_wakeirq_thread - handle mode1 line OOB wake after booting
* @irq: interrupt
* @data: interrupt handler data
*
* GPIO mode1 is used initially as output to configure the USB boot
* mode for mdm6600. After booting it is used as input for OOB wake
* signal from mdm6600 to the SoC. Just use it for debug info only
* for now.
*/
static irqreturn_t phy_mdm6600_wakeirq_thread(int irq, void *data)
{
struct phy_mdm6600 *ddata = data;
struct gpio_desc *mode_gpio1;
int error, wakeup;
mode_gpio1 = ddata->mode_gpios->desc[PHY_MDM6600_MODE1];
wakeup = gpiod_get_value(mode_gpio1);
if (!wakeup)
return IRQ_NONE;
dev_dbg(ddata->dev, "OOB wake on mode_gpio1: %i\n", wakeup);
error = pm_runtime_get_sync(ddata->dev);
if (error < 0) {
pm_runtime_put_noidle(ddata->dev);
return IRQ_NONE;
}
/* Just wake-up and kick the autosuspend timer */
pm_runtime_mark_last_busy(ddata->dev);
pm_runtime_put_autosuspend(ddata->dev);
return IRQ_HANDLED;
}
/**
* phy_mdm6600_init_irq() - initialize mdm6600 status IRQ lines
* @ddata: device driver data
*/
static void phy_mdm6600_init_irq(struct phy_mdm6600 *ddata)
{
struct device *dev = ddata->dev;
int i, error, irq;
for (i = PHY_MDM6600_STATUS0;
i <= PHY_MDM6600_STATUS2; i++) {
struct gpio_desc *gpio = ddata->status_gpios->desc[i];
irq = gpiod_to_irq(gpio);
if (irq <= 0)
continue;
error = devm_request_threaded_irq(dev, irq, NULL,
phy_mdm6600_irq_thread,
IRQF_TRIGGER_RISING |
IRQF_TRIGGER_FALLING |
IRQF_ONESHOT,
"mdm6600",
ddata);
if (error)
dev_warn(dev, "no modem status irq%i: %i\n",
irq, error);
}
}
struct phy_mdm6600_map {
const char *name;
int direction;
};
static const struct phy_mdm6600_map
phy_mdm6600_ctrl_gpio_map[PHY_MDM6600_NR_CTRL_LINES] = {
{ "enable", GPIOD_OUT_LOW, }, /* low = phy disabled */
{ "power", GPIOD_OUT_LOW, }, /* low = off */
{ "reset", GPIOD_OUT_HIGH, }, /* high = reset */
};
/**
* phy_mdm6600_init_lines() - initialize mdm6600 GPIO lines
* @ddata: device driver data
*/
static int phy_mdm6600_init_lines(struct phy_mdm6600 *ddata)
{
struct device *dev = ddata->dev;
int i;
/* MDM6600 control lines */
for (i = 0; i < ARRAY_SIZE(phy_mdm6600_ctrl_gpio_map); i++) {
const struct phy_mdm6600_map *map =
&phy_mdm6600_ctrl_gpio_map[i];
struct gpio_desc **gpio = &ddata->ctrl_gpios[i];
*gpio = devm_gpiod_get(dev, map->name, map->direction);
if (IS_ERR(*gpio)) {
dev_info(dev, "gpio %s error %li\n",
map->name, PTR_ERR(*gpio));
return PTR_ERR(*gpio);
}
}
/* MDM6600 USB start-up mode output lines */
ddata->mode_gpios = devm_gpiod_get_array(dev, "motorola,mode",
GPIOD_OUT_LOW);
if (IS_ERR(ddata->mode_gpios))
return PTR_ERR(ddata->mode_gpios);
if (ddata->mode_gpios->ndescs != PHY_MDM6600_NR_MODE_LINES)
return -EINVAL;
/* MDM6600 status input lines */
ddata->status_gpios = devm_gpiod_get_array(dev, "motorola,status",
GPIOD_IN);
if (IS_ERR(ddata->status_gpios))
return PTR_ERR(ddata->status_gpios);
if (ddata->status_gpios->ndescs != PHY_MDM6600_NR_STATUS_LINES)
return -EINVAL;
/* MDM6600 cmd output lines */
ddata->cmd_gpios = devm_gpiod_get_array(dev, "motorola,cmd",
GPIOD_OUT_LOW);
if (IS_ERR(ddata->cmd_gpios))
return PTR_ERR(ddata->cmd_gpios);
if (ddata->cmd_gpios->ndescs != PHY_MDM6600_NR_CMD_LINES)
return -EINVAL;
return 0;
}
/**
* phy_mdm6600_device_power_on() - power on mdm6600 device
* @ddata: device driver data
*
* To get the integrated USB phy in MDM6600 takes some hoops. We must ensure
* the shared USB bootmode GPIOs are configured, then request modem start-up,
* reset and power-up.. And then we need to recycle the shared USB bootmode
* GPIOs as they are also used for Out of Band (OOB) wake for the USB and
* TS 27.010 serial mux.
*/
static int phy_mdm6600_device_power_on(struct phy_mdm6600 *ddata)
{
struct gpio_desc *mode_gpio0, *mode_gpio1, *reset_gpio, *power_gpio;
int error = 0, wakeirq;
mode_gpio0 = ddata->mode_gpios->desc[PHY_MDM6600_MODE0];
mode_gpio1 = ddata->mode_gpios->desc[PHY_MDM6600_MODE1];
reset_gpio = ddata->ctrl_gpios[PHY_MDM6600_RESET];
power_gpio = ddata->ctrl_gpios[PHY_MDM6600_POWER];
/*
* Shared GPIOs must be low for normal USB mode. After booting
* they are used for OOB wake signaling. These can be also used
* to configure USB flashing mode later on based on a module
* parameter.
*/
gpiod_set_value_cansleep(mode_gpio0, 0);
gpiod_set_value_cansleep(mode_gpio1, 0);
/* Request start-up mode */
phy_mdm6600_cmd(ddata, PHY_MDM6600_CMD_NO_BYPASS);
/* Request a reset first */
gpiod_set_value_cansleep(reset_gpio, 0);
msleep(100);
/* Toggle power GPIO to request mdm6600 to start */
gpiod_set_value_cansleep(power_gpio, 1);
msleep(100);
gpiod_set_value_cansleep(power_gpio, 0);
/*
* Looks like the USB PHY needs between 2.2 to 4 seconds.
* If we try to use it before that, we will get L3 errors
* from omap-usb-host trying to access the PHY. See also
* phy_mdm6600_init() for -EPROBE_DEFER.
*/
msleep(PHY_MDM6600_PHY_DELAY_MS);
ddata->enabled = true;
/* Booting up the rest of MDM6600 will take total about 8 seconds */
dev_info(ddata->dev, "Waiting for power up request to complete..\n");
if (wait_for_completion_timeout(&ddata->ack,
msecs_to_jiffies(PHY_MDM6600_ENABLED_DELAY_MS))) {
if (ddata->status > PHY_MDM6600_STATUS_PANIC &&
ddata->status < PHY_MDM6600_STATUS_SHUTDOWN_ACK)
dev_info(ddata->dev, "Powered up OK\n");
} else {
ddata->enabled = false;
error = -ETIMEDOUT;
dev_err(ddata->dev, "Timed out powering up\n");
}
/* Reconfigure mode1 GPIO as input for OOB wake */
gpiod_direction_input(mode_gpio1);
wakeirq = gpiod_to_irq(mode_gpio1);
if (wakeirq <= 0)
return wakeirq;
error = devm_request_threaded_irq(ddata->dev, wakeirq, NULL,
phy_mdm6600_wakeirq_thread,
IRQF_TRIGGER_RISING |
IRQF_TRIGGER_FALLING |
IRQF_ONESHOT,
"mdm6600-wake",
ddata);
if (error)
dev_warn(ddata->dev, "no modem wakeirq irq%i: %i\n",
wakeirq, error);
ddata->running = true;
return error;
}
/**
* phy_mdm6600_device_power_off() - power off mdm6600 device
* @ddata: device driver data
*/
static void phy_mdm6600_device_power_off(struct phy_mdm6600 *ddata)
{
struct gpio_desc *reset_gpio =
ddata->ctrl_gpios[PHY_MDM6600_RESET];
ddata->enabled = false;
phy_mdm6600_cmd(ddata, PHY_MDM6600_CMD_BP_SHUTDOWN_REQ);
msleep(100);
gpiod_set_value_cansleep(reset_gpio, 1);
dev_info(ddata->dev, "Waiting for power down request to complete.. ");
if (wait_for_completion_timeout(&ddata->ack,
msecs_to_jiffies(5000))) {
if (ddata->status == PHY_MDM6600_STATUS_PANIC)
dev_info(ddata->dev, "Powered down OK\n");
} else {
dev_err(ddata->dev, "Timed out powering down\n");
}
}
static void phy_mdm6600_deferred_power_on(struct work_struct *work)
{
struct phy_mdm6600 *ddata;
int error;
ddata = container_of(work, struct phy_mdm6600, bootup_work.work);
error = phy_mdm6600_device_power_on(ddata);
if (error)
dev_err(ddata->dev, "Device not functional\n");
}
/*
* USB suspend puts mdm6600 into low power mode. For any n_gsm using apps,
* we need to keep the modem awake by kicking it's mode0 GPIO. This will
* keep the modem awake for about 1.2 seconds. When no n_gsm apps are using
* the modem, runtime PM auto mode can be enabled so modem can enter low
* power mode.
*/
static void phy_mdm6600_wake_modem(struct phy_mdm6600 *ddata)
{
struct gpio_desc *mode_gpio0;
mode_gpio0 = ddata->mode_gpios->desc[PHY_MDM6600_MODE0];
gpiod_set_value_cansleep(mode_gpio0, 1);
usleep_range(5, 15);
gpiod_set_value_cansleep(mode_gpio0, 0);
if (ddata->awake)
usleep_range(5, 15);
else
msleep(MDM6600_MODEM_WAKE_DELAY_MS);
}
static void phy_mdm6600_modem_wake(struct work_struct *work)
{
struct phy_mdm6600 *ddata;
ddata = container_of(work, struct phy_mdm6600, modem_wake_work.work);
phy_mdm6600_wake_modem(ddata);
/*
* The modem does not always stay awake 1.2 seconds after toggling
* the wake GPIO, and sometimes it idles after about some 600 ms
* making writes time out.
*/
schedule_delayed_work(&ddata->modem_wake_work,
msecs_to_jiffies(PHY_MDM6600_WAKE_KICK_MS));
}
static int __maybe_unused phy_mdm6600_runtime_suspend(struct device *dev)
{
struct phy_mdm6600 *ddata = dev_get_drvdata(dev);
cancel_delayed_work_sync(&ddata->modem_wake_work);
ddata->awake = false;
return 0;
}
static int __maybe_unused phy_mdm6600_runtime_resume(struct device *dev)
{
struct phy_mdm6600 *ddata = dev_get_drvdata(dev);
phy_mdm6600_modem_wake(&ddata->modem_wake_work.work);
ddata->awake = true;
return 0;
}
static const struct dev_pm_ops phy_mdm6600_pm_ops = {
SET_RUNTIME_PM_OPS(phy_mdm6600_runtime_suspend,
phy_mdm6600_runtime_resume, NULL)
};
static const struct of_device_id phy_mdm6600_id_table[] = {
{ .compatible = "motorola,mapphone-mdm6600", },
{},
};
MODULE_DEVICE_TABLE(of, phy_mdm6600_id_table);
static int phy_mdm6600_probe(struct platform_device *pdev)
{
struct phy_mdm6600 *ddata;
int error;
ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
if (!ddata)
return -ENOMEM;
INIT_DELAYED_WORK(&ddata->bootup_work,
phy_mdm6600_deferred_power_on);
INIT_DELAYED_WORK(&ddata->status_work, phy_mdm6600_status);
INIT_DELAYED_WORK(&ddata->modem_wake_work, phy_mdm6600_modem_wake);
init_completion(&ddata->ack);
ddata->dev = &pdev->dev;
platform_set_drvdata(pdev, ddata);
/* Active state selected in phy_mdm6600_power_on() */
error = pinctrl_pm_select_sleep_state(ddata->dev);
if (error)
dev_warn(ddata->dev, "%s: error with sleep_state: %i\n",
__func__, error);
error = phy_mdm6600_init_lines(ddata);
if (error)
return error;
phy_mdm6600_init_irq(ddata);
schedule_delayed_work(&ddata->bootup_work, 0);
/*
* See phy_mdm6600_device_power_on(). We should be able
* to remove this eventually when ohci-platform can deal
* with -EPROBE_DEFER.
*/
msleep(PHY_MDM6600_PHY_DELAY_MS + 500);
/*
* Enable PM runtime only after PHY has been powered up properly.
* It is currently only needed after USB suspends mdm6600 and n_gsm
* needs to access the device. We don't want to do this earlier as
* gpio mode0 pin doubles as mdm6600 wake-up gpio.
*/
pm_runtime_use_autosuspend(ddata->dev);
pm_runtime_set_autosuspend_delay(ddata->dev,
MDM6600_MODEM_IDLE_DELAY_MS);
pm_runtime_enable(ddata->dev);
error = pm_runtime_get_sync(ddata->dev);
if (error < 0) {
dev_warn(ddata->dev, "failed to wake modem: %i\n", error);
pm_runtime_put_noidle(ddata->dev);
goto cleanup;
}
ddata->generic_phy = devm_phy_create(ddata->dev, NULL, &gpio_usb_ops);
if (IS_ERR(ddata->generic_phy)) {
error = PTR_ERR(ddata->generic_phy);
goto idle;
}
phy_set_drvdata(ddata->generic_phy, ddata);
ddata->phy_provider =
devm_of_phy_provider_register(ddata->dev,
of_phy_simple_xlate);
if (IS_ERR(ddata->phy_provider))
error = PTR_ERR(ddata->phy_provider);
idle:
pm_runtime_mark_last_busy(ddata->dev);
pm_runtime_put_autosuspend(ddata->dev);
cleanup:
if (error < 0)
phy_mdm6600_device_power_off(ddata);
pm_runtime_disable(ddata->dev);
pm_runtime_dont_use_autosuspend(ddata->dev);
return error;
}
static void phy_mdm6600_remove(struct platform_device *pdev)
{
struct phy_mdm6600 *ddata = platform_get_drvdata(pdev);
struct gpio_desc *reset_gpio = ddata->ctrl_gpios[PHY_MDM6600_RESET];
pm_runtime_dont_use_autosuspend(ddata->dev);
pm_runtime_put_sync(ddata->dev);
pm_runtime_disable(ddata->dev);
if (!ddata->running)
wait_for_completion_timeout(&ddata->ack,
msecs_to_jiffies(PHY_MDM6600_ENABLED_DELAY_MS));
gpiod_set_value_cansleep(reset_gpio, 1);
phy_mdm6600_device_power_off(ddata);
cancel_delayed_work_sync(&ddata->modem_wake_work);
cancel_delayed_work_sync(&ddata->bootup_work);
cancel_delayed_work_sync(&ddata->status_work);
}
static struct platform_driver phy_mdm6600_driver = {
.probe = phy_mdm6600_probe,
.remove_new = phy_mdm6600_remove,
.driver = {
.name = "phy-mapphone-mdm6600",
.pm = &phy_mdm6600_pm_ops,
.of_match_table = of_match_ptr(phy_mdm6600_id_table),
},
};
module_platform_driver(phy_mdm6600_driver);
MODULE_ALIAS("platform:gpio_usb");
MODULE_AUTHOR("Tony Lindgren <[email protected]>");
MODULE_DESCRIPTION("mdm6600 gpio usb phy driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/phy/motorola/phy-mapphone-mdm6600.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Motorola CPCAP PMIC USB PHY driver
* Copyright (C) 2017 Tony Lindgren <[email protected]>
*
* Some parts based on earlier Motorola Linux kernel tree code in
* board-mapphone-usb.c and cpcap-usb-det.c:
* Copyright (C) 2007 - 2011 Motorola, Inc.
*/
#include <linux/atomic.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/iio/consumer.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/slab.h>
#include <linux/gpio/consumer.h>
#include <linux/mfd/motorola-cpcap.h>
#include <linux/phy/omap_usb.h>
#include <linux/phy/phy.h>
#include <linux/regulator/consumer.h>
#include <linux/usb/musb.h>
/* CPCAP_REG_USBC1 register bits */
#define CPCAP_BIT_IDPULSE BIT(15)
#define CPCAP_BIT_ID100KPU BIT(14)
#define CPCAP_BIT_IDPUCNTRL BIT(13)
#define CPCAP_BIT_IDPU BIT(12)
#define CPCAP_BIT_IDPD BIT(11)
#define CPCAP_BIT_VBUSCHRGTMR3 BIT(10)
#define CPCAP_BIT_VBUSCHRGTMR2 BIT(9)
#define CPCAP_BIT_VBUSCHRGTMR1 BIT(8)
#define CPCAP_BIT_VBUSCHRGTMR0 BIT(7)
#define CPCAP_BIT_VBUSPU BIT(6)
#define CPCAP_BIT_VBUSPD BIT(5)
#define CPCAP_BIT_DMPD BIT(4)
#define CPCAP_BIT_DPPD BIT(3)
#define CPCAP_BIT_DM1K5PU BIT(2)
#define CPCAP_BIT_DP1K5PU BIT(1)
#define CPCAP_BIT_DP150KPU BIT(0)
/* CPCAP_REG_USBC2 register bits */
#define CPCAP_BIT_ZHSDRV1 BIT(15)
#define CPCAP_BIT_ZHSDRV0 BIT(14)
#define CPCAP_BIT_DPLLCLKREQ BIT(13)
#define CPCAP_BIT_SE0CONN BIT(12)
#define CPCAP_BIT_UARTTXTRI BIT(11)
#define CPCAP_BIT_UARTSWAP BIT(10)
#define CPCAP_BIT_UARTMUX1 BIT(9)
#define CPCAP_BIT_UARTMUX0 BIT(8)
#define CPCAP_BIT_ULPISTPLOW BIT(7)
#define CPCAP_BIT_TXENPOL BIT(6)
#define CPCAP_BIT_USBXCVREN BIT(5)
#define CPCAP_BIT_USBCNTRL BIT(4)
#define CPCAP_BIT_USBSUSPEND BIT(3)
#define CPCAP_BIT_EMUMODE2 BIT(2)
#define CPCAP_BIT_EMUMODE1 BIT(1)
#define CPCAP_BIT_EMUMODE0 BIT(0)
/* CPCAP_REG_USBC3 register bits */
#define CPCAP_BIT_SPARE_898_15 BIT(15)
#define CPCAP_BIT_IHSTX03 BIT(14)
#define CPCAP_BIT_IHSTX02 BIT(13)
#define CPCAP_BIT_IHSTX01 BIT(12)
#define CPCAP_BIT_IHSTX0 BIT(11)
#define CPCAP_BIT_IDPU_SPI BIT(10)
#define CPCAP_BIT_UNUSED_898_9 BIT(9)
#define CPCAP_BIT_VBUSSTBY_EN BIT(8)
#define CPCAP_BIT_VBUSEN_SPI BIT(7)
#define CPCAP_BIT_VBUSPU_SPI BIT(6)
#define CPCAP_BIT_VBUSPD_SPI BIT(5)
#define CPCAP_BIT_DMPD_SPI BIT(4)
#define CPCAP_BIT_DPPD_SPI BIT(3)
#define CPCAP_BIT_SUSPEND_SPI BIT(2)
#define CPCAP_BIT_PU_SPI BIT(1)
#define CPCAP_BIT_ULPI_SPI_SEL BIT(0)
struct cpcap_usb_ints_state {
bool id_ground;
bool id_float;
bool chrg_det;
bool rvrs_chrg;
bool vbusov;
bool chrg_se1b;
bool se0conn;
bool rvrs_mode;
bool chrgcurr1;
bool vbusvld;
bool sessvld;
bool sessend;
bool se1;
bool battdetb;
bool dm;
bool dp;
};
enum cpcap_gpio_mode {
CPCAP_DM_DP,
CPCAP_MDM_RX_TX,
CPCAP_UNKNOWN_DISABLED, /* Seems to disable USB lines */
CPCAP_OTG_DM_DP,
};
struct cpcap_phy_ddata {
struct regmap *reg;
struct device *dev;
struct usb_phy phy;
struct delayed_work detect_work;
struct pinctrl *pins;
struct pinctrl_state *pins_ulpi;
struct pinctrl_state *pins_utmi;
struct pinctrl_state *pins_uart;
struct gpio_desc *gpio[2];
struct iio_channel *vbus;
struct iio_channel *id;
struct regulator *vusb;
atomic_t active;
unsigned int vbus_provider:1;
unsigned int docked:1;
};
static bool cpcap_usb_vbus_valid(struct cpcap_phy_ddata *ddata)
{
int error, value = 0;
error = iio_read_channel_processed(ddata->vbus, &value);
if (error >= 0)
return value > 3900;
dev_err(ddata->dev, "error reading VBUS: %i\n", error);
return false;
}
static int cpcap_usb_phy_set_host(struct usb_otg *otg, struct usb_bus *host)
{
otg->host = host;
if (!host)
otg->state = OTG_STATE_UNDEFINED;
return 0;
}
static int cpcap_usb_phy_set_peripheral(struct usb_otg *otg,
struct usb_gadget *gadget)
{
otg->gadget = gadget;
if (!gadget)
otg->state = OTG_STATE_UNDEFINED;
return 0;
}
static const struct phy_ops ops = {
.owner = THIS_MODULE,
};
static int cpcap_phy_get_ints_state(struct cpcap_phy_ddata *ddata,
struct cpcap_usb_ints_state *s)
{
int val, error;
error = regmap_read(ddata->reg, CPCAP_REG_INTS1, &val);
if (error)
return error;
s->id_ground = val & BIT(15);
s->id_float = val & BIT(14);
s->vbusov = val & BIT(11);
error = regmap_read(ddata->reg, CPCAP_REG_INTS2, &val);
if (error)
return error;
s->vbusvld = val & BIT(3);
s->sessvld = val & BIT(2);
s->sessend = val & BIT(1);
s->se1 = val & BIT(0);
error = regmap_read(ddata->reg, CPCAP_REG_INTS4, &val);
if (error)
return error;
s->dm = val & BIT(1);
s->dp = val & BIT(0);
return 0;
}
static int cpcap_usb_set_uart_mode(struct cpcap_phy_ddata *ddata);
static int cpcap_usb_set_usb_mode(struct cpcap_phy_ddata *ddata);
static void cpcap_usb_try_musb_mailbox(struct cpcap_phy_ddata *ddata,
enum musb_vbus_id_status status)
{
int error;
error = musb_mailbox(status);
if (!error)
return;
dev_dbg(ddata->dev, "%s: musb_mailbox failed: %i\n",
__func__, error);
}
static void cpcap_usb_detect(struct work_struct *work)
{
struct cpcap_phy_ddata *ddata;
struct cpcap_usb_ints_state s;
bool vbus = false;
int error;
ddata = container_of(work, struct cpcap_phy_ddata, detect_work.work);
error = cpcap_phy_get_ints_state(ddata, &s);
if (error)
return;
vbus = cpcap_usb_vbus_valid(ddata);
/* We need to kick the VBUS as USB A-host */
if (s.id_ground && ddata->vbus_provider) {
dev_dbg(ddata->dev, "still in USB A-host mode, kicking VBUS\n");
cpcap_usb_try_musb_mailbox(ddata, MUSB_ID_GROUND);
error = regmap_update_bits(ddata->reg, CPCAP_REG_USBC3,
CPCAP_BIT_VBUSSTBY_EN |
CPCAP_BIT_VBUSEN_SPI,
CPCAP_BIT_VBUSEN_SPI);
if (error)
goto out_err;
return;
}
if (vbus && s.id_ground && ddata->docked) {
dev_dbg(ddata->dev, "still docked as A-host, signal ID down\n");
cpcap_usb_try_musb_mailbox(ddata, MUSB_ID_GROUND);
return;
}
/* No VBUS needed with docks */
if (vbus && s.id_ground && !ddata->vbus_provider) {
dev_dbg(ddata->dev, "connected to a dock\n");
ddata->docked = true;
error = cpcap_usb_set_usb_mode(ddata);
if (error)
goto out_err;
cpcap_usb_try_musb_mailbox(ddata, MUSB_ID_GROUND);
/*
* Force check state again after musb has reoriented,
* otherwise devices won't enumerate after loading PHY
* driver.
*/
schedule_delayed_work(&ddata->detect_work,
msecs_to_jiffies(1000));
return;
}
if (s.id_ground && !ddata->docked) {
dev_dbg(ddata->dev, "id ground, USB host mode\n");
ddata->vbus_provider = true;
error = cpcap_usb_set_usb_mode(ddata);
if (error)
goto out_err;
cpcap_usb_try_musb_mailbox(ddata, MUSB_ID_GROUND);
error = regmap_update_bits(ddata->reg, CPCAP_REG_USBC3,
CPCAP_BIT_VBUSSTBY_EN |
CPCAP_BIT_VBUSEN_SPI,
CPCAP_BIT_VBUSEN_SPI);
if (error)
goto out_err;
return;
}
error = regmap_update_bits(ddata->reg, CPCAP_REG_USBC3,
CPCAP_BIT_VBUSSTBY_EN |
CPCAP_BIT_VBUSEN_SPI, 0);
if (error)
goto out_err;
vbus = cpcap_usb_vbus_valid(ddata);
/* Otherwise assume we're connected to a USB host */
if (vbus) {
dev_dbg(ddata->dev, "connected to USB host\n");
error = cpcap_usb_set_usb_mode(ddata);
if (error)
goto out_err;
cpcap_usb_try_musb_mailbox(ddata, MUSB_VBUS_VALID);
return;
}
ddata->vbus_provider = false;
ddata->docked = false;
cpcap_usb_try_musb_mailbox(ddata, MUSB_VBUS_OFF);
/* Default to debug UART mode */
error = cpcap_usb_set_uart_mode(ddata);
if (error)
goto out_err;
dev_dbg(ddata->dev, "set UART mode\n");
return;
out_err:
dev_err(ddata->dev, "error setting cable state: %i\n", error);
}
static irqreturn_t cpcap_phy_irq_thread(int irq, void *data)
{
struct cpcap_phy_ddata *ddata = data;
if (!atomic_read(&ddata->active))
return IRQ_NONE;
schedule_delayed_work(&ddata->detect_work, msecs_to_jiffies(1));
return IRQ_HANDLED;
}
static int cpcap_usb_init_irq(struct platform_device *pdev,
struct cpcap_phy_ddata *ddata,
const char *name)
{
int irq, error;
irq = platform_get_irq_byname(pdev, name);
if (irq < 0)
return -ENODEV;
error = devm_request_threaded_irq(ddata->dev, irq, NULL,
cpcap_phy_irq_thread,
IRQF_SHARED |
IRQF_ONESHOT,
name, ddata);
if (error) {
dev_err(ddata->dev, "could not get irq %s: %i\n",
name, error);
return error;
}
return 0;
}
static const char * const cpcap_phy_irqs[] = {
/* REG_INT_0 */
"id_ground", "id_float",
/* REG_INT1 */
"se0conn", "vbusvld", "sessvld", "sessend", "se1",
/* REG_INT_3 */
"dm", "dp",
};
static int cpcap_usb_init_interrupts(struct platform_device *pdev,
struct cpcap_phy_ddata *ddata)
{
int i, error;
for (i = 0; i < ARRAY_SIZE(cpcap_phy_irqs); i++) {
error = cpcap_usb_init_irq(pdev, ddata, cpcap_phy_irqs[i]);
if (error)
return error;
}
return 0;
}
/*
* Optional pins and modes. At least Motorola mapphone devices
* are using two GPIOs and dynamic pinctrl to multiplex PHY pins
* to UART, ULPI or UTMI mode.
*/
static int cpcap_usb_gpio_set_mode(struct cpcap_phy_ddata *ddata,
enum cpcap_gpio_mode mode)
{
if (!ddata->gpio[0] || !ddata->gpio[1])
return 0;
gpiod_set_value(ddata->gpio[0], mode & 1);
gpiod_set_value(ddata->gpio[1], mode >> 1);
return 0;
}
static int cpcap_usb_set_uart_mode(struct cpcap_phy_ddata *ddata)
{
int error;
/* Disable lines to prevent glitches from waking up mdm6600 */
error = cpcap_usb_gpio_set_mode(ddata, CPCAP_UNKNOWN_DISABLED);
if (error)
goto out_err;
if (ddata->pins_uart) {
error = pinctrl_select_state(ddata->pins, ddata->pins_uart);
if (error)
goto out_err;
}
error = regmap_update_bits(ddata->reg, CPCAP_REG_USBC1,
CPCAP_BIT_VBUSPD,
CPCAP_BIT_VBUSPD);
if (error)
goto out_err;
error = regmap_update_bits(ddata->reg, CPCAP_REG_USBC2,
0xffff, CPCAP_BIT_UARTMUX0 |
CPCAP_BIT_EMUMODE0);
if (error)
goto out_err;
error = regmap_update_bits(ddata->reg, CPCAP_REG_USBC3, 0x7fff,
CPCAP_BIT_IDPU_SPI);
if (error)
goto out_err;
/* Enable UART mode */
error = cpcap_usb_gpio_set_mode(ddata, CPCAP_DM_DP);
if (error)
goto out_err;
return 0;
out_err:
dev_err(ddata->dev, "%s failed with %i\n", __func__, error);
return error;
}
static int cpcap_usb_set_usb_mode(struct cpcap_phy_ddata *ddata)
{
int error;
/* Disable lines to prevent glitches from waking up mdm6600 */
error = cpcap_usb_gpio_set_mode(ddata, CPCAP_UNKNOWN_DISABLED);
if (error)
return error;
if (ddata->pins_utmi) {
error = pinctrl_select_state(ddata->pins, ddata->pins_utmi);
if (error) {
dev_err(ddata->dev, "could not set usb mode: %i\n",
error);
return error;
}
}
error = regmap_update_bits(ddata->reg, CPCAP_REG_USBC1,
CPCAP_BIT_VBUSPD, 0);
if (error)
goto out_err;
error = regmap_update_bits(ddata->reg, CPCAP_REG_USBC3,
CPCAP_BIT_PU_SPI |
CPCAP_BIT_DMPD_SPI |
CPCAP_BIT_DPPD_SPI |
CPCAP_BIT_SUSPEND_SPI |
CPCAP_BIT_ULPI_SPI_SEL, 0);
if (error)
goto out_err;
error = regmap_update_bits(ddata->reg, CPCAP_REG_USBC2,
CPCAP_BIT_USBXCVREN,
CPCAP_BIT_USBXCVREN);
if (error)
goto out_err;
/* Enable USB mode */
error = cpcap_usb_gpio_set_mode(ddata, CPCAP_OTG_DM_DP);
if (error)
goto out_err;
return 0;
out_err:
dev_err(ddata->dev, "%s failed with %i\n", __func__, error);
return error;
}
static int cpcap_usb_init_optional_pins(struct cpcap_phy_ddata *ddata)
{
ddata->pins = devm_pinctrl_get(ddata->dev);
if (IS_ERR(ddata->pins)) {
dev_info(ddata->dev, "default pins not configured: %ld\n",
PTR_ERR(ddata->pins));
ddata->pins = NULL;
return 0;
}
ddata->pins_ulpi = pinctrl_lookup_state(ddata->pins, "ulpi");
if (IS_ERR(ddata->pins_ulpi)) {
dev_info(ddata->dev, "ulpi pins not configured\n");
ddata->pins_ulpi = NULL;
}
ddata->pins_utmi = pinctrl_lookup_state(ddata->pins, "utmi");
if (IS_ERR(ddata->pins_utmi)) {
dev_info(ddata->dev, "utmi pins not configured\n");
ddata->pins_utmi = NULL;
}
ddata->pins_uart = pinctrl_lookup_state(ddata->pins, "uart");
if (IS_ERR(ddata->pins_uart)) {
dev_info(ddata->dev, "uart pins not configured\n");
ddata->pins_uart = NULL;
}
if (ddata->pins_uart)
return pinctrl_select_state(ddata->pins, ddata->pins_uart);
return 0;
}
static void cpcap_usb_init_optional_gpios(struct cpcap_phy_ddata *ddata)
{
int i;
for (i = 0; i < 2; i++) {
ddata->gpio[i] = devm_gpiod_get_index(ddata->dev, "mode",
i, GPIOD_OUT_HIGH);
if (IS_ERR(ddata->gpio[i])) {
dev_info(ddata->dev, "no mode change GPIO%i: %li\n",
i, PTR_ERR(ddata->gpio[i]));
ddata->gpio[i] = NULL;
}
}
}
static int cpcap_usb_init_iio(struct cpcap_phy_ddata *ddata)
{
enum iio_chan_type type;
int error;
ddata->vbus = devm_iio_channel_get(ddata->dev, "vbus");
if (IS_ERR(ddata->vbus)) {
error = PTR_ERR(ddata->vbus);
goto out_err;
}
if (!ddata->vbus->indio_dev) {
error = -ENXIO;
goto out_err;
}
error = iio_get_channel_type(ddata->vbus, &type);
if (error < 0)
goto out_err;
if (type != IIO_VOLTAGE) {
error = -EINVAL;
goto out_err;
}
return 0;
out_err:
dev_err(ddata->dev, "could not initialize VBUS or ID IIO: %i\n",
error);
return error;
}
#ifdef CONFIG_OF
static const struct of_device_id cpcap_usb_phy_id_table[] = {
{
.compatible = "motorola,cpcap-usb-phy",
},
{
.compatible = "motorola,mapphone-cpcap-usb-phy",
},
{},
};
MODULE_DEVICE_TABLE(of, cpcap_usb_phy_id_table);
#endif
static int cpcap_usb_phy_probe(struct platform_device *pdev)
{
struct cpcap_phy_ddata *ddata;
struct phy *generic_phy;
struct phy_provider *phy_provider;
struct usb_otg *otg;
const struct of_device_id *of_id;
int error;
of_id = of_match_device(of_match_ptr(cpcap_usb_phy_id_table),
&pdev->dev);
if (!of_id)
return -EINVAL;
ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
if (!ddata)
return -ENOMEM;
ddata->reg = dev_get_regmap(pdev->dev.parent, NULL);
if (!ddata->reg)
return -ENODEV;
otg = devm_kzalloc(&pdev->dev, sizeof(*otg), GFP_KERNEL);
if (!otg)
return -ENOMEM;
ddata->dev = &pdev->dev;
ddata->phy.dev = ddata->dev;
ddata->phy.label = "cpcap_usb_phy";
ddata->phy.otg = otg;
ddata->phy.type = USB_PHY_TYPE_USB2;
otg->set_host = cpcap_usb_phy_set_host;
otg->set_peripheral = cpcap_usb_phy_set_peripheral;
otg->usb_phy = &ddata->phy;
INIT_DELAYED_WORK(&ddata->detect_work, cpcap_usb_detect);
platform_set_drvdata(pdev, ddata);
ddata->vusb = devm_regulator_get(&pdev->dev, "vusb");
if (IS_ERR(ddata->vusb))
return PTR_ERR(ddata->vusb);
error = regulator_enable(ddata->vusb);
if (error)
return error;
generic_phy = devm_phy_create(ddata->dev, NULL, &ops);
if (IS_ERR(generic_phy)) {
error = PTR_ERR(generic_phy);
goto out_reg_disable;
}
phy_set_drvdata(generic_phy, ddata);
phy_provider = devm_of_phy_provider_register(ddata->dev,
of_phy_simple_xlate);
if (IS_ERR(phy_provider)) {
error = PTR_ERR(phy_provider);
goto out_reg_disable;
}
error = cpcap_usb_init_optional_pins(ddata);
if (error)
goto out_reg_disable;
cpcap_usb_init_optional_gpios(ddata);
error = cpcap_usb_init_iio(ddata);
if (error)
goto out_reg_disable;
error = cpcap_usb_init_interrupts(pdev, ddata);
if (error)
goto out_reg_disable;
usb_add_phy_dev(&ddata->phy);
atomic_set(&ddata->active, 1);
schedule_delayed_work(&ddata->detect_work, msecs_to_jiffies(1));
return 0;
out_reg_disable:
regulator_disable(ddata->vusb);
return error;
}
static void cpcap_usb_phy_remove(struct platform_device *pdev)
{
struct cpcap_phy_ddata *ddata = platform_get_drvdata(pdev);
int error;
atomic_set(&ddata->active, 0);
error = cpcap_usb_set_uart_mode(ddata);
if (error)
dev_err(ddata->dev, "could not set UART mode\n");
cpcap_usb_try_musb_mailbox(ddata, MUSB_VBUS_OFF);
usb_remove_phy(&ddata->phy);
cancel_delayed_work_sync(&ddata->detect_work);
regulator_disable(ddata->vusb);
}
static struct platform_driver cpcap_usb_phy_driver = {
.probe = cpcap_usb_phy_probe,
.remove_new = cpcap_usb_phy_remove,
.driver = {
.name = "cpcap-usb-phy",
.of_match_table = of_match_ptr(cpcap_usb_phy_id_table),
},
};
module_platform_driver(cpcap_usb_phy_driver);
MODULE_ALIAS("platform:cpcap_usb");
MODULE_AUTHOR("Tony Lindgren <[email protected]>");
MODULE_DESCRIPTION("CPCAP usb phy driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/phy/motorola/phy-cpcap-usb.c |
// SPDX-License-Identifier: GPL-2.0
/*
* PCIe phy driver for Kirin 970
*
* Copyright (C) 2017 HiSilicon Electronics Co., Ltd.
* https://www.huawei.com
* Copyright (C) 2021 Huawei Technologies Co., Ltd.
* https://www.huawei.com
*
* Authors:
* Mauro Carvalho Chehab <[email protected]>
* Manivannan Sadhasivam <[email protected]>
*
* Based on:
* https://lore.kernel.org/lkml/4c9d6581478aa966698758c0420933f5defab4dd.1612335031.git.mchehab+huawei@kernel.org/
*/
#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/gpio.h>
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of_gpio.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#define AXI_CLK_FREQ 207500000
#define REF_CLK_FREQ 100000000
/* PCIe CTRL registers */
#define SOC_PCIECTRL_CTRL7_ADDR 0x01c
#define SOC_PCIECTRL_CTRL12_ADDR 0x030
#define SOC_PCIECTRL_CTRL20_ADDR 0x050
#define SOC_PCIECTRL_CTRL21_ADDR 0x054
#define PCIE_OUTPUT_PULL_BITS GENMASK(3, 0)
#define SOC_PCIECTRL_CTRL20_2P_MEM_CTRL 0x02605550
#define SOC_PCIECTRL_CTRL21_DEFAULT 0x20000070
#define PCIE_PULL_UP_SYS_AUX_PWR_DET BIT(10)
#define PCIE_OUTPUT_PULL_DOWN BIT(1)
/* PCIe PHY registers */
#define SOC_PCIEPHY_CTRL0_ADDR 0x000
#define SOC_PCIEPHY_CTRL1_ADDR 0x004
#define SOC_PCIEPHY_CTRL38_ADDR 0x0098
#define SOC_PCIEPHY_STATE0_ADDR 0x400
#define RAWLANEN_DIG_PCS_XF_TX_OVRD_IN_1 0xc004
#define SUP_DIG_LVL_OVRD_IN 0x003c
#define LANEN_DIG_ASIC_TX_OVRD_IN_1 0x4008
#define LANEN_DIG_ASIC_TX_OVRD_IN_2 0x400c
#define PCIEPHY_RESET_BIT BIT(17)
#define PCIEPHY_PIPE_LINE0_RESET_BIT BIT(19)
#define PCIE_TXDETECT_RX_FAIL BIT(2)
#define PCIE_CLK_SOURCE BIT(8)
#define PCIE_IS_CLOCK_STABLE BIT(19)
#define PCIE_PULL_DOWN_PHY_TEST_POWERDOWN BIT(22)
#define PCIE_DEASSERT_CONTROLLER_PERST BIT(2)
#define EYEPARAM_NOCFG 0xffffffff
#define EYE_PARM0_MASK GENMASK(8, 6)
#define EYE_PARM1_MASK GENMASK(11, 8)
#define EYE_PARM2_MASK GENMASK(5, 0)
#define EYE_PARM3_MASK GENMASK(12, 7)
#define EYE_PARM4_MASK GENMASK(14, 9)
#define EYE_PARM0_EN BIT(9)
#define EYE_PARM1_EN BIT(12)
#define EYE_PARM2_EN BIT(6)
#define EYE_PARM3_EN BIT(13)
#define EYE_PARM4_EN BIT(15)
/* hi3670 pciephy register */
#define APB_PHY_START_ADDR 0x40000
#define SOC_PCIEPHY_MMC1PLL_CTRL1 0xc04
#define SOC_PCIEPHY_MMC1PLL_CTRL16 0xC40
#define SOC_PCIEPHY_MMC1PLL_CTRL17 0xC44
#define SOC_PCIEPHY_MMC1PLL_CTRL20 0xC50
#define SOC_PCIEPHY_MMC1PLL_CTRL21 0xC54
#define SOC_PCIEPHY_MMC1PLL_STAT0 0xE00
#define CRGPERIPH_PEREN12 0x470
#define CRGPERIPH_PERDIS12 0x474
#define CRGPERIPH_PCIECTRL0 0x800
#define PCIE_FNPLL_FBDIV_MASK GENMASK(27, 16)
#define PCIE_FNPLL_FRACDIV_MASK GENMASK(23, 0)
#define PCIE_FNPLL_POSTDIV1_MASK GENMASK(10, 8)
#define PCIE_FNPLL_POSTDIV2_MASK GENMASK(14, 12)
#define PCIE_FNPLL_PLL_MODE_MASK BIT(25)
#define PCIE_FNPLL_DLL_EN BIT(27)
#define PCIE_FNPLL_FBDIV 0xd0
#define PCIE_FNPLL_FRACDIV 0x555555
#define PCIE_FNPLL_POSTDIV1 0x5
#define PCIE_FNPLL_POSTDIV2 0x4
#define PCIE_FNPLL_PLL_MODE 0x0
#define PCIE_PHY_MMC1PLL 0x20
#define PCIE_PHY_CHOOSE_FNPLL BIT(27)
#define PCIE_PHY_MMC1PLL_DISABLE BIT(0)
#define PCIE_PHY_PCIEPL_BP BIT(16)
/* define ie,oe cfg */
#define IO_OE_HARD_GT_MODE BIT(1)
#define IO_IE_EN_HARD_BYPASS BIT(27)
#define IO_OE_EN_HARD_BYPASS BIT(11)
#define IO_HARD_CTRL_DEBOUNCE_BYPASS BIT(10)
#define IO_OE_GT_MODE BIT(8)
#define DEBOUNCE_WAITCFG_IN GENMASK(23, 20)
#define DEBOUNCE_WAITCFG_OUT GENMASK(16, 13)
#define IO_HP_DEBOUNCE_GT (BIT(12) | BIT(15))
#define IO_PHYREF_SOFT_GT_MODE BIT(14)
#define IO_REF_SOFT_GT_MODE BIT(13)
#define IO_REF_HARD_GT_MODE BIT(0)
/* noc power domain */
#define NOC_POWER_IDLEREQ_1 0x38c
#define NOC_POWER_IDLE_1 0x394
#define NOC_PW_MASK 0x10000
#define NOC_PW_SET_BIT 0x1
#define NUM_EYEPARAM 5
/* info located in sysctrl */
#define SCTRL_PCIE_CMOS_OFFSET 0x60
#define SCTRL_PCIE_CMOS_BIT 0x10
#define SCTRL_PCIE_ISO_OFFSET 0x44
#define SCTRL_PCIE_ISO_BIT 0x30
#define SCTRL_PCIE_HPCLK_OFFSET 0x190
#define SCTRL_PCIE_HPCLK_BIT 0x184000
#define SCTRL_PCIE_OE_OFFSET 0x14a
#define PCIE_DEBOUNCE_PARAM 0xf0f400
#define PCIE_OE_BYPASS GENMASK(29, 28)
/* peri_crg ctrl */
#define CRGCTRL_PCIE_ASSERT_OFFSET 0x88
#define CRGCTRL_PCIE_ASSERT_BIT 0x8c000000
#define FNPLL_HAS_LOCKED BIT(4)
/* Time for delay */
#define TIME_CMOS_MIN 100
#define TIME_CMOS_MAX 105
#define PIPE_CLK_STABLE_TIME 100
#define PLL_CTRL_WAIT_TIME 200
#define NOC_POWER_TIME 100
struct hi3670_pcie_phy {
struct device *dev;
void __iomem *base;
struct regmap *apb;
struct regmap *crgctrl;
struct regmap *sysctrl;
struct regmap *pmctrl;
struct clk *apb_sys_clk;
struct clk *apb_phy_clk;
struct clk *phy_ref_clk;
struct clk *aclk;
struct clk *aux_clk;
u32 eye_param[NUM_EYEPARAM];
};
/* Registers in PCIePHY */
static inline void hi3670_apb_phy_writel(struct hi3670_pcie_phy *phy, u32 val,
u32 reg)
{
writel(val, phy->base + APB_PHY_START_ADDR + reg);
}
static inline u32 hi3670_apb_phy_readl(struct hi3670_pcie_phy *phy, u32 reg)
{
return readl(phy->base + APB_PHY_START_ADDR + reg);
}
static inline void hi3670_apb_phy_updatel(struct hi3670_pcie_phy *phy,
u32 val, u32 mask, u32 reg)
{
u32 regval;
regval = hi3670_apb_phy_readl(phy, reg);
regval &= ~mask;
regval |= val;
hi3670_apb_phy_writel(phy, regval, reg);
}
static inline void kirin_apb_natural_phy_writel(struct hi3670_pcie_phy *phy,
u32 val, u32 reg)
{
writel(val, phy->base + reg);
}
static inline u32 kirin_apb_natural_phy_readl(struct hi3670_pcie_phy *phy,
u32 reg)
{
return readl(phy->base + reg);
}
static void hi3670_pcie_phy_oe_enable(struct hi3670_pcie_phy *phy, bool enable)
{
u32 val;
regmap_read(phy->sysctrl, SCTRL_PCIE_OE_OFFSET, &val);
val |= PCIE_DEBOUNCE_PARAM;
if (enable)
val &= ~PCIE_OE_BYPASS;
else
val |= PCIE_OE_BYPASS;
regmap_write(phy->sysctrl, SCTRL_PCIE_OE_OFFSET, val);
}
static void hi3670_pcie_get_eyeparam(struct hi3670_pcie_phy *phy)
{
struct device *dev = phy->dev;
struct device_node *np;
int ret, i;
np = dev->of_node;
ret = of_property_read_u32_array(np, "hisilicon,eye-diagram-param",
phy->eye_param, NUM_EYEPARAM);
if (!ret)
return;
/* There's no optional eye_param property. Set array to default */
for (i = 0; i < NUM_EYEPARAM; i++)
phy->eye_param[i] = EYEPARAM_NOCFG;
}
static void hi3670_pcie_set_eyeparam(struct hi3670_pcie_phy *phy)
{
u32 val;
val = kirin_apb_natural_phy_readl(phy, RAWLANEN_DIG_PCS_XF_TX_OVRD_IN_1);
if (phy->eye_param[1] != EYEPARAM_NOCFG) {
val &= ~EYE_PARM1_MASK;
val |= FIELD_PREP(EYE_PARM1_MASK, phy->eye_param[1]);
val |= EYE_PARM1_EN;
}
kirin_apb_natural_phy_writel(phy, val,
RAWLANEN_DIG_PCS_XF_TX_OVRD_IN_1);
val = kirin_apb_natural_phy_readl(phy, LANEN_DIG_ASIC_TX_OVRD_IN_2);
val &= ~(EYE_PARM2_MASK | EYE_PARM3_MASK);
if (phy->eye_param[2] != EYEPARAM_NOCFG) {
val |= FIELD_PREP(EYE_PARM2_MASK, phy->eye_param[2]);
val |= EYE_PARM2_EN;
}
if (phy->eye_param[3] != EYEPARAM_NOCFG) {
val |= FIELD_PREP(EYE_PARM3_MASK, phy->eye_param[3]);
val |= EYE_PARM3_EN;
}
kirin_apb_natural_phy_writel(phy, val, LANEN_DIG_ASIC_TX_OVRD_IN_2);
val = kirin_apb_natural_phy_readl(phy, SUP_DIG_LVL_OVRD_IN);
if (phy->eye_param[0] != EYEPARAM_NOCFG) {
val &= ~EYE_PARM0_MASK;
val |= FIELD_PREP(EYE_PARM0_MASK, phy->eye_param[0]);
val |= EYE_PARM0_EN;
}
kirin_apb_natural_phy_writel(phy, val, SUP_DIG_LVL_OVRD_IN);
val = kirin_apb_natural_phy_readl(phy, LANEN_DIG_ASIC_TX_OVRD_IN_1);
if (phy->eye_param[4] != EYEPARAM_NOCFG) {
val &= ~EYE_PARM4_MASK;
val |= FIELD_PREP(EYE_PARM4_MASK, phy->eye_param[4]);
val |= EYE_PARM4_EN;
}
kirin_apb_natural_phy_writel(phy, val, LANEN_DIG_ASIC_TX_OVRD_IN_1);
}
static void hi3670_pcie_natural_cfg(struct hi3670_pcie_phy *phy)
{
u32 val;
/* change 2p mem_ctrl */
regmap_write(phy->apb, SOC_PCIECTRL_CTRL20_ADDR,
SOC_PCIECTRL_CTRL20_2P_MEM_CTRL);
regmap_read(phy->apb, SOC_PCIECTRL_CTRL7_ADDR, &val);
val |= PCIE_PULL_UP_SYS_AUX_PWR_DET;
regmap_write(phy->apb, SOC_PCIECTRL_CTRL7_ADDR, val);
/* output, pull down */
regmap_read(phy->apb, SOC_PCIECTRL_CTRL12_ADDR, &val);
val &= ~PCIE_OUTPUT_PULL_BITS;
val |= PCIE_OUTPUT_PULL_DOWN;
regmap_write(phy->apb, SOC_PCIECTRL_CTRL12_ADDR, val);
/* Handle phy_reset and lane0_reset to HW */
hi3670_apb_phy_updatel(phy, PCIEPHY_RESET_BIT,
PCIEPHY_PIPE_LINE0_RESET_BIT | PCIEPHY_RESET_BIT,
SOC_PCIEPHY_CTRL1_ADDR);
/* fix chip bug: TxDetectRx fail */
hi3670_apb_phy_updatel(phy, PCIE_TXDETECT_RX_FAIL, PCIE_TXDETECT_RX_FAIL,
SOC_PCIEPHY_CTRL38_ADDR);
}
static void hi3670_pcie_pll_init(struct hi3670_pcie_phy *phy)
{
hi3670_apb_phy_updatel(phy, PCIE_PHY_CHOOSE_FNPLL, PCIE_PHY_CHOOSE_FNPLL,
SOC_PCIEPHY_MMC1PLL_CTRL1);
hi3670_apb_phy_updatel(phy,
FIELD_PREP(PCIE_FNPLL_FBDIV_MASK, PCIE_FNPLL_FBDIV),
PCIE_FNPLL_FBDIV_MASK,
SOC_PCIEPHY_MMC1PLL_CTRL16);
hi3670_apb_phy_updatel(phy,
FIELD_PREP(PCIE_FNPLL_FRACDIV_MASK, PCIE_FNPLL_FRACDIV),
PCIE_FNPLL_FRACDIV_MASK, SOC_PCIEPHY_MMC1PLL_CTRL17);
hi3670_apb_phy_updatel(phy,
PCIE_FNPLL_DLL_EN |
FIELD_PREP(PCIE_FNPLL_POSTDIV1_MASK, PCIE_FNPLL_POSTDIV1) |
FIELD_PREP(PCIE_FNPLL_POSTDIV2_MASK, PCIE_FNPLL_POSTDIV2) |
FIELD_PREP(PCIE_FNPLL_PLL_MODE_MASK, PCIE_FNPLL_PLL_MODE),
PCIE_FNPLL_POSTDIV1_MASK |
PCIE_FNPLL_POSTDIV2_MASK |
PCIE_FNPLL_PLL_MODE_MASK | PCIE_FNPLL_DLL_EN,
SOC_PCIEPHY_MMC1PLL_CTRL20);
hi3670_apb_phy_writel(phy, PCIE_PHY_MMC1PLL,
SOC_PCIEPHY_MMC1PLL_CTRL21);
}
static int hi3670_pcie_pll_ctrl(struct hi3670_pcie_phy *phy, bool enable)
{
struct device *dev = phy->dev;
u32 val;
int time = PLL_CTRL_WAIT_TIME;
if (enable) {
/* pd = 0 */
hi3670_apb_phy_updatel(phy, 0, PCIE_PHY_MMC1PLL_DISABLE,
SOC_PCIEPHY_MMC1PLL_CTRL16);
/* choose FNPLL */
val = hi3670_apb_phy_readl(phy, SOC_PCIEPHY_MMC1PLL_STAT0);
while (!(val & FNPLL_HAS_LOCKED)) {
if (!time) {
dev_err(dev, "wait for pll_lock timeout\n");
return -EINVAL;
}
time--;
udelay(1);
val = hi3670_apb_phy_readl(phy, SOC_PCIEPHY_MMC1PLL_STAT0);
}
hi3670_apb_phy_updatel(phy, 0, PCIE_PHY_PCIEPL_BP,
SOC_PCIEPHY_MMC1PLL_CTRL20);
} else {
hi3670_apb_phy_updatel(phy,
PCIE_PHY_MMC1PLL_DISABLE,
PCIE_PHY_MMC1PLL_DISABLE,
SOC_PCIEPHY_MMC1PLL_CTRL16);
hi3670_apb_phy_updatel(phy, PCIE_PHY_PCIEPL_BP,
PCIE_PHY_PCIEPL_BP,
SOC_PCIEPHY_MMC1PLL_CTRL20);
}
return 0;
}
static void hi3670_pcie_hp_debounce_gt(struct hi3670_pcie_phy *phy, bool open)
{
if (open)
/* gt_clk_pcie_hp/gt_clk_pcie_debounce open */
regmap_write(phy->crgctrl, CRGPERIPH_PEREN12,
IO_HP_DEBOUNCE_GT);
else
/* gt_clk_pcie_hp/gt_clk_pcie_debounce close */
regmap_write(phy->crgctrl, CRGPERIPH_PERDIS12,
IO_HP_DEBOUNCE_GT);
}
static void hi3670_pcie_phyref_gt(struct hi3670_pcie_phy *phy, bool open)
{
unsigned int val;
regmap_read(phy->crgctrl, CRGPERIPH_PCIECTRL0, &val);
if (open)
val &= ~IO_OE_HARD_GT_MODE; /* enable hard gt mode */
else
val |= IO_OE_HARD_GT_MODE; /* disable hard gt mode */
regmap_write(phy->crgctrl, CRGPERIPH_PCIECTRL0, val);
/* disable soft gt mode */
regmap_write(phy->crgctrl, CRGPERIPH_PERDIS12, IO_PHYREF_SOFT_GT_MODE);
}
static void hi3670_pcie_oe_ctrl(struct hi3670_pcie_phy *phy, bool en_flag)
{
unsigned int val;
regmap_read(phy->crgctrl, CRGPERIPH_PCIECTRL0, &val);
/* set ie cfg */
val |= IO_IE_EN_HARD_BYPASS;
/* set oe cfg */
val &= ~IO_HARD_CTRL_DEBOUNCE_BYPASS;
/* set phy_debounce in&out time */
val |= (DEBOUNCE_WAITCFG_IN | DEBOUNCE_WAITCFG_OUT);
/* select oe_gt_mode */
val |= IO_OE_GT_MODE;
if (en_flag)
val &= ~IO_OE_EN_HARD_BYPASS;
else
val |= IO_OE_EN_HARD_BYPASS;
regmap_write(phy->crgctrl, CRGPERIPH_PCIECTRL0, val);
}
static void hi3670_pcie_ioref_gt(struct hi3670_pcie_phy *phy, bool open)
{
unsigned int val;
if (open) {
regmap_write(phy->apb, SOC_PCIECTRL_CTRL21_ADDR,
SOC_PCIECTRL_CTRL21_DEFAULT);
hi3670_pcie_oe_ctrl(phy, true);
/* en hard gt mode */
regmap_read(phy->crgctrl, CRGPERIPH_PCIECTRL0, &val);
val &= ~IO_REF_HARD_GT_MODE;
regmap_write(phy->crgctrl, CRGPERIPH_PCIECTRL0, val);
/* disable soft gt mode */
regmap_write(phy->crgctrl, CRGPERIPH_PERDIS12,
IO_REF_SOFT_GT_MODE);
} else {
/* disable hard gt mode */
regmap_read(phy->crgctrl, CRGPERIPH_PCIECTRL0, &val);
val |= IO_REF_HARD_GT_MODE;
regmap_write(phy->crgctrl, CRGPERIPH_PCIECTRL0, val);
/* disable soft gt mode */
regmap_write(phy->crgctrl, CRGPERIPH_PERDIS12,
IO_REF_SOFT_GT_MODE);
hi3670_pcie_oe_ctrl(phy, false);
}
}
static int hi3670_pcie_allclk_ctrl(struct hi3670_pcie_phy *phy, bool clk_on)
{
struct device *dev = phy->dev;
int ret = 0;
if (!clk_on)
goto close_clocks;
/* choose 100MHz clk src: Bit[8]==1 pad, Bit[8]==0 pll */
hi3670_apb_phy_updatel(phy, 0, PCIE_CLK_SOURCE,
SOC_PCIEPHY_CTRL1_ADDR);
hi3670_pcie_pll_init(phy);
ret = hi3670_pcie_pll_ctrl(phy, true);
if (ret) {
dev_err(dev, "Failed to enable pll\n");
return -EINVAL;
}
hi3670_pcie_hp_debounce_gt(phy, true);
hi3670_pcie_phyref_gt(phy, true);
hi3670_pcie_ioref_gt(phy, true);
ret = clk_set_rate(phy->aclk, AXI_CLK_FREQ);
if (ret) {
dev_err(dev, "Failed to set rate\n");
goto close_clocks;
}
return 0;
close_clocks:
hi3670_pcie_ioref_gt(phy, false);
hi3670_pcie_phyref_gt(phy, false);
hi3670_pcie_hp_debounce_gt(phy, false);
hi3670_pcie_pll_ctrl(phy, false);
return ret;
}
static bool is_pipe_clk_stable(struct hi3670_pcie_phy *phy)
{
struct device *dev = phy->dev;
u32 val;
u32 time = PIPE_CLK_STABLE_TIME;
u32 pipe_clk_stable = PCIE_IS_CLOCK_STABLE;
val = hi3670_apb_phy_readl(phy, SOC_PCIEPHY_STATE0_ADDR);
while (val & pipe_clk_stable) {
mdelay(1);
if (!time) {
dev_err(dev, "PIPE clk is not stable\n");
return false;
}
time--;
val = hi3670_apb_phy_readl(phy, SOC_PCIEPHY_STATE0_ADDR);
}
return true;
}
static int hi3670_pcie_noc_power(struct hi3670_pcie_phy *phy, bool enable)
{
struct device *dev = phy->dev;
u32 time = NOC_POWER_TIME;
unsigned int val = NOC_PW_MASK;
int rst;
if (enable)
val = NOC_PW_MASK | NOC_PW_SET_BIT;
else
val = NOC_PW_MASK;
rst = enable ? 1 : 0;
regmap_write(phy->pmctrl, NOC_POWER_IDLEREQ_1, val);
time = NOC_POWER_TIME;
regmap_read(phy->pmctrl, NOC_POWER_IDLE_1, &val);
while ((val & NOC_PW_SET_BIT) != rst) {
udelay(10);
if (!time) {
dev_err(dev, "Failed to reverse noc power-status\n");
return -EINVAL;
}
time--;
regmap_read(phy->pmctrl, NOC_POWER_IDLE_1, &val);
}
return 0;
}
static int hi3670_pcie_get_resources_from_pcie(struct hi3670_pcie_phy *phy)
{
struct device_node *pcie_port;
struct device *dev = phy->dev;
struct device *pcie_dev;
pcie_port = of_get_child_by_name(dev->parent->of_node, "pcie");
if (!pcie_port) {
dev_err(dev, "no pcie node found in %s\n",
dev->parent->of_node->full_name);
return -ENODEV;
}
pcie_dev = bus_find_device_by_of_node(&platform_bus_type, pcie_port);
if (!pcie_dev) {
dev_err(dev, "Didn't find pcie device\n");
return -ENODEV;
}
/*
* We might just use NULL instead of the APB name, as the
* pcie-kirin currently registers directly just one regmap (although
* the DWC driver register other regmaps).
*
* Yet, it sounds safer to warrant that it will be accessing the
* right regmap. So, let's use the named version.
*/
phy->apb = dev_get_regmap(pcie_dev, "kirin_pcie_apb");
if (!phy->apb) {
dev_err(dev, "Failed to get APB regmap\n");
return -ENODEV;
}
return 0;
}
static int kirin_pcie_clk_ctrl(struct hi3670_pcie_phy *phy, bool enable)
{
int ret = 0;
if (!enable)
goto close_clk;
ret = clk_set_rate(phy->phy_ref_clk, REF_CLK_FREQ);
if (ret)
return ret;
ret = clk_prepare_enable(phy->phy_ref_clk);
if (ret)
return ret;
ret = clk_prepare_enable(phy->apb_sys_clk);
if (ret)
goto apb_sys_fail;
ret = clk_prepare_enable(phy->apb_phy_clk);
if (ret)
goto apb_phy_fail;
ret = clk_prepare_enable(phy->aclk);
if (ret)
goto aclk_fail;
ret = clk_prepare_enable(phy->aux_clk);
if (ret)
goto aux_clk_fail;
return 0;
close_clk:
clk_disable_unprepare(phy->aux_clk);
aux_clk_fail:
clk_disable_unprepare(phy->aclk);
aclk_fail:
clk_disable_unprepare(phy->apb_phy_clk);
apb_phy_fail:
clk_disable_unprepare(phy->apb_sys_clk);
apb_sys_fail:
clk_disable_unprepare(phy->phy_ref_clk);
return ret;
}
static int hi3670_pcie_phy_init(struct phy *generic_phy)
{
struct hi3670_pcie_phy *phy = phy_get_drvdata(generic_phy);
int ret;
/*
* The code under hi3670_pcie_get_resources_from_pcie() need to
* access the reset-gpios and the APB registers, both from the
* pcie-kirin driver.
*
* The APB is obtained via the pcie driver's regmap
* Such kind of resource can only be obtained during the PCIe
* power_on sequence, as the code inside pcie-kirin needs to
* be already probed, as it needs to register the APB regmap.
*/
ret = hi3670_pcie_get_resources_from_pcie(phy);
if (ret)
return ret;
return 0;
}
static int hi3670_pcie_phy_power_on(struct phy *generic_phy)
{
struct hi3670_pcie_phy *phy = phy_get_drvdata(generic_phy);
int val, ret;
/* Power supply for Host */
regmap_write(phy->sysctrl, SCTRL_PCIE_CMOS_OFFSET, SCTRL_PCIE_CMOS_BIT);
usleep_range(TIME_CMOS_MIN, TIME_CMOS_MAX);
hi3670_pcie_phy_oe_enable(phy, true);
ret = kirin_pcie_clk_ctrl(phy, true);
if (ret)
return ret;
/* ISO disable, PCIeCtrl, PHY assert and clk gate clear */
regmap_write(phy->sysctrl, SCTRL_PCIE_ISO_OFFSET, SCTRL_PCIE_ISO_BIT);
regmap_write(phy->crgctrl, CRGCTRL_PCIE_ASSERT_OFFSET,
CRGCTRL_PCIE_ASSERT_BIT);
regmap_write(phy->sysctrl, SCTRL_PCIE_HPCLK_OFFSET,
SCTRL_PCIE_HPCLK_BIT);
hi3670_pcie_natural_cfg(phy);
ret = hi3670_pcie_allclk_ctrl(phy, true);
if (ret)
goto disable_clks;
/* pull down phy_test_powerdown signal */
hi3670_apb_phy_updatel(phy, 0, PCIE_PULL_DOWN_PHY_TEST_POWERDOWN,
SOC_PCIEPHY_CTRL0_ADDR);
/* deassert controller perst_n */
regmap_read(phy->apb, SOC_PCIECTRL_CTRL12_ADDR, &val);
val |= PCIE_DEASSERT_CONTROLLER_PERST;
regmap_write(phy->apb, SOC_PCIECTRL_CTRL12_ADDR, val);
udelay(10);
ret = is_pipe_clk_stable(phy);
if (!ret)
goto disable_clks;
hi3670_pcie_set_eyeparam(phy);
ret = hi3670_pcie_noc_power(phy, false);
if (ret)
goto disable_clks;
return 0;
disable_clks:
kirin_pcie_clk_ctrl(phy, false);
return ret;
}
static int hi3670_pcie_phy_power_off(struct phy *generic_phy)
{
struct hi3670_pcie_phy *phy = phy_get_drvdata(generic_phy);
hi3670_pcie_phy_oe_enable(phy, false);
hi3670_pcie_allclk_ctrl(phy, false);
/* Drop power supply for Host */
regmap_write(phy->sysctrl, SCTRL_PCIE_CMOS_OFFSET, 0);
/*
* FIXME: The enabled clocks should be disabled here by calling
* kirin_pcie_clk_ctrl(phy, false);
* However, some clocks used at Kirin 970 should be marked as
* CLK_IS_CRITICAL at clk-hi3670 driver, as powering such clocks off
* cause an Asynchronous SError interrupt, which produces panic().
* While clk-hi3670 is not fixed, we cannot risk disabling clocks here.
*/
return 0;
}
static const struct phy_ops hi3670_phy_ops = {
.init = hi3670_pcie_phy_init,
.power_on = hi3670_pcie_phy_power_on,
.power_off = hi3670_pcie_phy_power_off,
.owner = THIS_MODULE,
};
static int hi3670_pcie_phy_get_resources(struct hi3670_pcie_phy *phy,
struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
/* syscon */
phy->crgctrl = syscon_regmap_lookup_by_compatible("hisilicon,hi3670-crgctrl");
if (IS_ERR(phy->crgctrl))
return PTR_ERR(phy->crgctrl);
phy->sysctrl = syscon_regmap_lookup_by_compatible("hisilicon,hi3670-sctrl");
if (IS_ERR(phy->sysctrl))
return PTR_ERR(phy->sysctrl);
phy->pmctrl = syscon_regmap_lookup_by_compatible("hisilicon,hi3670-pmctrl");
if (IS_ERR(phy->pmctrl))
return PTR_ERR(phy->pmctrl);
/* clocks */
phy->phy_ref_clk = devm_clk_get(dev, "phy_ref");
if (IS_ERR(phy->phy_ref_clk))
return PTR_ERR(phy->phy_ref_clk);
phy->aux_clk = devm_clk_get(dev, "aux");
if (IS_ERR(phy->aux_clk))
return PTR_ERR(phy->aux_clk);
phy->apb_phy_clk = devm_clk_get(dev, "apb_phy");
if (IS_ERR(phy->apb_phy_clk))
return PTR_ERR(phy->apb_phy_clk);
phy->apb_sys_clk = devm_clk_get(dev, "apb_sys");
if (IS_ERR(phy->apb_sys_clk))
return PTR_ERR(phy->apb_sys_clk);
phy->aclk = devm_clk_get(dev, "aclk");
if (IS_ERR(phy->aclk))
return PTR_ERR(phy->aclk);
/* registers */
phy->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(phy->base))
return PTR_ERR(phy->base);
hi3670_pcie_get_eyeparam(phy);
return 0;
}
static int hi3670_pcie_phy_probe(struct platform_device *pdev)
{
struct phy_provider *phy_provider;
struct device *dev = &pdev->dev;
struct hi3670_pcie_phy *phy;
struct phy *generic_phy;
int ret;
phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
if (!phy)
return -ENOMEM;
phy->dev = dev;
ret = hi3670_pcie_phy_get_resources(phy, pdev);
if (ret)
return ret;
generic_phy = devm_phy_create(dev, dev->of_node, &hi3670_phy_ops);
if (IS_ERR(generic_phy)) {
dev_err(dev, "failed to create PHY\n");
return PTR_ERR(generic_phy);
}
phy_set_drvdata(generic_phy, phy);
phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
return PTR_ERR_OR_ZERO(phy_provider);
}
static const struct of_device_id hi3670_pcie_phy_match[] = {
{
.compatible = "hisilicon,hi970-pcie-phy",
},
{},
};
static struct platform_driver hi3670_pcie_phy_driver = {
.probe = hi3670_pcie_phy_probe,
.driver = {
.of_match_table = hi3670_pcie_phy_match,
.name = "hi3670_pcie_phy",
.suppress_bind_attrs = true,
}
};
builtin_platform_driver(hi3670_pcie_phy_driver);
MODULE_DEVICE_TABLE(of, hi3670_pcie_phy_match);
MODULE_DESCRIPTION("PCIe phy driver for Kirin 970");
MODULE_AUTHOR("Mauro Carvalho Chehab <[email protected]>");
MODULE_AUTHOR("Manivannan Sadhasivam <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/phy/hisilicon/phy-hi3670-pcie.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* HiSilicon INNO USB2 PHY Driver.
*
* Copyright (c) 2016-2017 HiSilicon Technologies Co., Ltd.
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
#define INNO_PHY_PORT_NUM 2
#define REF_CLK_STABLE_TIME 100 /* unit:us */
#define UTMI_CLK_STABLE_TIME 200 /* unit:us */
#define TEST_CLK_STABLE_TIME 2 /* unit:ms */
#define PHY_CLK_STABLE_TIME 2 /* unit:ms */
#define UTMI_RST_COMPLETE_TIME 2 /* unit:ms */
#define POR_RST_COMPLETE_TIME 300 /* unit:us */
#define PHY_TYPE_0 0
#define PHY_TYPE_1 1
#define PHY_TEST_DATA GENMASK(7, 0)
#define PHY_TEST_ADDR_OFFSET 8
#define PHY0_TEST_ADDR GENMASK(15, 8)
#define PHY0_TEST_PORT_OFFSET 16
#define PHY0_TEST_PORT GENMASK(18, 16)
#define PHY0_TEST_WREN BIT(21)
#define PHY0_TEST_CLK BIT(22) /* rising edge active */
#define PHY0_TEST_RST BIT(23) /* low active */
#define PHY1_TEST_ADDR GENMASK(11, 8)
#define PHY1_TEST_PORT_OFFSET 12
#define PHY1_TEST_PORT BIT(12)
#define PHY1_TEST_WREN BIT(13)
#define PHY1_TEST_CLK BIT(14) /* rising edge active */
#define PHY1_TEST_RST BIT(15) /* low active */
#define PHY_CLK_ENABLE BIT(2)
struct hisi_inno_phy_port {
struct reset_control *utmi_rst;
struct hisi_inno_phy_priv *priv;
};
struct hisi_inno_phy_priv {
void __iomem *mmio;
struct clk *ref_clk;
struct reset_control *por_rst;
unsigned int type;
struct hisi_inno_phy_port ports[INNO_PHY_PORT_NUM];
};
static void hisi_inno_phy_write_reg(struct hisi_inno_phy_priv *priv,
u8 port, u32 addr, u32 data)
{
void __iomem *reg = priv->mmio;
u32 val;
u32 value;
if (priv->type == PHY_TYPE_0)
val = (data & PHY_TEST_DATA) |
((addr << PHY_TEST_ADDR_OFFSET) & PHY0_TEST_ADDR) |
((port << PHY0_TEST_PORT_OFFSET) & PHY0_TEST_PORT) |
PHY0_TEST_WREN | PHY0_TEST_RST;
else
val = (data & PHY_TEST_DATA) |
((addr << PHY_TEST_ADDR_OFFSET) & PHY1_TEST_ADDR) |
((port << PHY1_TEST_PORT_OFFSET) & PHY1_TEST_PORT) |
PHY1_TEST_WREN | PHY1_TEST_RST;
writel(val, reg);
value = val;
if (priv->type == PHY_TYPE_0)
value |= PHY0_TEST_CLK;
else
value |= PHY1_TEST_CLK;
writel(value, reg);
writel(val, reg);
}
static void hisi_inno_phy_setup(struct hisi_inno_phy_priv *priv)
{
/* The phy clk is controlled by the port0 register 0x06. */
hisi_inno_phy_write_reg(priv, 0, 0x06, PHY_CLK_ENABLE);
msleep(PHY_CLK_STABLE_TIME);
}
static int hisi_inno_phy_init(struct phy *phy)
{
struct hisi_inno_phy_port *port = phy_get_drvdata(phy);
struct hisi_inno_phy_priv *priv = port->priv;
int ret;
ret = clk_prepare_enable(priv->ref_clk);
if (ret)
return ret;
udelay(REF_CLK_STABLE_TIME);
reset_control_deassert(priv->por_rst);
udelay(POR_RST_COMPLETE_TIME);
/* Set up phy registers */
hisi_inno_phy_setup(priv);
reset_control_deassert(port->utmi_rst);
udelay(UTMI_RST_COMPLETE_TIME);
return 0;
}
static int hisi_inno_phy_exit(struct phy *phy)
{
struct hisi_inno_phy_port *port = phy_get_drvdata(phy);
struct hisi_inno_phy_priv *priv = port->priv;
reset_control_assert(port->utmi_rst);
reset_control_assert(priv->por_rst);
clk_disable_unprepare(priv->ref_clk);
return 0;
}
static const struct phy_ops hisi_inno_phy_ops = {
.init = hisi_inno_phy_init,
.exit = hisi_inno_phy_exit,
.owner = THIS_MODULE,
};
static int hisi_inno_phy_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct hisi_inno_phy_priv *priv;
struct phy_provider *provider;
struct device_node *child;
int i = 0;
int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->mmio = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->mmio)) {
ret = PTR_ERR(priv->mmio);
return ret;
}
priv->ref_clk = devm_clk_get(dev, NULL);
if (IS_ERR(priv->ref_clk))
return PTR_ERR(priv->ref_clk);
priv->por_rst = devm_reset_control_get_exclusive(dev, NULL);
if (IS_ERR(priv->por_rst))
return PTR_ERR(priv->por_rst);
priv->type = (uintptr_t) of_device_get_match_data(dev);
for_each_child_of_node(np, child) {
struct reset_control *rst;
struct phy *phy;
rst = of_reset_control_get_exclusive(child, NULL);
if (IS_ERR(rst)) {
of_node_put(child);
return PTR_ERR(rst);
}
priv->ports[i].utmi_rst = rst;
priv->ports[i].priv = priv;
phy = devm_phy_create(dev, child, &hisi_inno_phy_ops);
if (IS_ERR(phy)) {
of_node_put(child);
return PTR_ERR(phy);
}
phy_set_bus_width(phy, 8);
phy_set_drvdata(phy, &priv->ports[i]);
i++;
if (i >= INNO_PHY_PORT_NUM) {
dev_warn(dev, "Support %d ports in maximum\n", i);
of_node_put(child);
break;
}
}
provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
return PTR_ERR_OR_ZERO(provider);
}
static const struct of_device_id hisi_inno_phy_of_match[] = {
{ .compatible = "hisilicon,inno-usb2-phy",
.data = (void *) PHY_TYPE_0 },
{ .compatible = "hisilicon,hi3798cv200-usb2-phy",
.data = (void *) PHY_TYPE_0 },
{ .compatible = "hisilicon,hi3798mv100-usb2-phy",
.data = (void *) PHY_TYPE_1 },
{ },
};
MODULE_DEVICE_TABLE(of, hisi_inno_phy_of_match);
static struct platform_driver hisi_inno_phy_driver = {
.probe = hisi_inno_phy_probe,
.driver = {
.name = "hisi-inno-phy",
.of_match_table = hisi_inno_phy_of_match,
}
};
module_platform_driver(hisi_inno_phy_driver);
MODULE_DESCRIPTION("HiSilicon INNO USB2 PHY Driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/phy/hisilicon/phy-hisi-inno-usb2.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Phy provider for USB 3.0 controller on HiSilicon 3660 platform
*
* Copyright (C) 2017-2018 Hilisicon Electronics Co., Ltd.
* http://www.huawei.com
*
* Authors: Yu Chen <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#define PERI_CRG_CLK_EN4 0x40
#define PERI_CRG_CLK_DIS4 0x44
#define GT_CLK_USB3OTG_REF BIT(0)
#define GT_ACLK_USB3OTG BIT(1)
#define PERI_CRG_RSTEN4 0x90
#define PERI_CRG_RSTDIS4 0x94
#define IP_RST_USB3OTGPHY_POR BIT(3)
#define IP_RST_USB3OTG BIT(5)
#define PERI_CRG_ISODIS 0x148
#define USB_REFCLK_ISO_EN BIT(25)
#define PCTRL_PERI_CTRL3 0x10
#define PCTRL_PERI_CTRL3_MSK_START 16
#define USB_TCXO_EN BIT(1)
#define PCTRL_PERI_CTRL24 0x64
#define SC_CLK_USB3PHY_3MUX1_SEL BIT(25)
#define USBOTG3_CTRL0 0x00
#define SC_USB3PHY_ABB_GT_EN BIT(15)
#define USBOTG3_CTRL2 0x08
#define USBOTG3CTRL2_POWERDOWN_HSP BIT(0)
#define USBOTG3CTRL2_POWERDOWN_SSP BIT(1)
#define USBOTG3_CTRL3 0x0C
#define USBOTG3_CTRL3_VBUSVLDEXT BIT(6)
#define USBOTG3_CTRL3_VBUSVLDEXTSEL BIT(5)
#define USBOTG3_CTRL4 0x10
#define USBOTG3_CTRL7 0x1c
#define REF_SSP_EN BIT(16)
/* This value config the default txtune parameter of the usb 2.0 phy */
#define HI3660_USB_DEFAULT_PHY_PARAM 0x1c466e3
struct hi3660_priv {
struct device *dev;
struct regmap *peri_crg;
struct regmap *pctrl;
struct regmap *otg_bc;
u32 eye_diagram_param;
};
static int hi3660_phy_init(struct phy *phy)
{
struct hi3660_priv *priv = phy_get_drvdata(phy);
u32 val, mask;
int ret;
/* usb refclk iso disable */
ret = regmap_write(priv->peri_crg, PERI_CRG_ISODIS, USB_REFCLK_ISO_EN);
if (ret)
goto out;
/* enable usb_tcxo_en */
val = USB_TCXO_EN | (USB_TCXO_EN << PCTRL_PERI_CTRL3_MSK_START);
ret = regmap_write(priv->pctrl, PCTRL_PERI_CTRL3, val);
if (ret)
goto out;
/* assert phy */
val = IP_RST_USB3OTGPHY_POR | IP_RST_USB3OTG;
ret = regmap_write(priv->peri_crg, PERI_CRG_RSTEN4, val);
if (ret)
goto out;
/* enable phy ref clk */
val = SC_USB3PHY_ABB_GT_EN;
mask = val;
ret = regmap_update_bits(priv->otg_bc, USBOTG3_CTRL0, mask, val);
if (ret)
goto out;
val = REF_SSP_EN;
mask = val;
ret = regmap_update_bits(priv->otg_bc, USBOTG3_CTRL7, mask, val);
if (ret)
goto out;
/* exit from IDDQ mode */
mask = USBOTG3CTRL2_POWERDOWN_HSP | USBOTG3CTRL2_POWERDOWN_SSP;
ret = regmap_update_bits(priv->otg_bc, USBOTG3_CTRL2, mask, 0);
if (ret)
goto out;
/* delay for exit from IDDQ mode */
usleep_range(100, 120);
/* deassert phy */
val = IP_RST_USB3OTGPHY_POR | IP_RST_USB3OTG;
ret = regmap_write(priv->peri_crg, PERI_CRG_RSTDIS4, val);
if (ret)
goto out;
/* delay for phy deasserted */
usleep_range(10000, 15000);
/* fake vbus valid signal */
val = USBOTG3_CTRL3_VBUSVLDEXT | USBOTG3_CTRL3_VBUSVLDEXTSEL;
mask = val;
ret = regmap_update_bits(priv->otg_bc, USBOTG3_CTRL3, mask, val);
if (ret)
goto out;
/* delay for vbus valid */
usleep_range(100, 120);
ret = regmap_write(priv->otg_bc, USBOTG3_CTRL4,
priv->eye_diagram_param);
if (ret)
goto out;
return 0;
out:
dev_err(priv->dev, "failed to init phy ret: %d\n", ret);
return ret;
}
static int hi3660_phy_exit(struct phy *phy)
{
struct hi3660_priv *priv = phy_get_drvdata(phy);
u32 val;
int ret;
/* assert phy */
val = IP_RST_USB3OTGPHY_POR;
ret = regmap_write(priv->peri_crg, PERI_CRG_RSTEN4, val);
if (ret)
goto out;
/* disable usb_tcxo_en */
val = USB_TCXO_EN << PCTRL_PERI_CTRL3_MSK_START;
ret = regmap_write(priv->pctrl, PCTRL_PERI_CTRL3, val);
if (ret)
goto out;
return 0;
out:
dev_err(priv->dev, "failed to exit phy ret: %d\n", ret);
return ret;
}
static const struct phy_ops hi3660_phy_ops = {
.init = hi3660_phy_init,
.exit = hi3660_phy_exit,
.owner = THIS_MODULE,
};
static int hi3660_phy_probe(struct platform_device *pdev)
{
struct phy_provider *phy_provider;
struct device *dev = &pdev->dev;
struct phy *phy;
struct hi3660_priv *priv;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->dev = dev;
priv->peri_crg = syscon_regmap_lookup_by_phandle(dev->of_node,
"hisilicon,pericrg-syscon");
if (IS_ERR(priv->peri_crg)) {
dev_err(dev, "no hisilicon,pericrg-syscon\n");
return PTR_ERR(priv->peri_crg);
}
priv->pctrl = syscon_regmap_lookup_by_phandle(dev->of_node,
"hisilicon,pctrl-syscon");
if (IS_ERR(priv->pctrl)) {
dev_err(dev, "no hisilicon,pctrl-syscon\n");
return PTR_ERR(priv->pctrl);
}
/* node of hi3660 phy is a sub-node of usb3_otg_bc */
priv->otg_bc = syscon_node_to_regmap(dev->parent->of_node);
if (IS_ERR(priv->otg_bc)) {
dev_err(dev, "no hisilicon,usb3-otg-bc-syscon\n");
return PTR_ERR(priv->otg_bc);
}
if (of_property_read_u32(dev->of_node, "hisilicon,eye-diagram-param",
&(priv->eye_diagram_param)))
priv->eye_diagram_param = HI3660_USB_DEFAULT_PHY_PARAM;
phy = devm_phy_create(dev, NULL, &hi3660_phy_ops);
if (IS_ERR(phy))
return PTR_ERR(phy);
phy_set_drvdata(phy, priv);
phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
return PTR_ERR_OR_ZERO(phy_provider);
}
static const struct of_device_id hi3660_phy_of_match[] = {
{.compatible = "hisilicon,hi3660-usb-phy",},
{ }
};
MODULE_DEVICE_TABLE(of, hi3660_phy_of_match);
static struct platform_driver hi3660_phy_driver = {
.probe = hi3660_phy_probe,
.driver = {
.name = "hi3660-usb-phy",
.of_match_table = hi3660_phy_of_match,
}
};
module_platform_driver(hi3660_phy_driver);
MODULE_AUTHOR("Yu Chen <[email protected]>");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Hilisicon Hi3660 USB3 PHY Driver");
| linux-master | drivers/phy/hisilicon/phy-hi3660-usb3.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Phy provider for USB 3.1 controller on HiSilicon Kirin970 platform
*
* Copyright (C) 2017-2020 Hilisicon Electronics Co., Ltd.
* http://www.huawei.com
*
* Authors: Yu Chen <[email protected]>
*/
#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#define SCTRL_SCDEEPSLEEPED (0x0)
#define USB_CLK_SELECTED BIT(20)
#define PERI_CRG_PEREN0 (0x00)
#define PERI_CRG_PERDIS0 (0x04)
#define PERI_CRG_PEREN4 (0x40)
#define PERI_CRG_PERDIS4 (0x44)
#define PERI_CRG_PERRSTEN4 (0x90)
#define PERI_CRG_PERRSTDIS4 (0x94)
#define PERI_CRG_ISODIS (0x148)
#define PERI_CRG_PEREN6 (0x410)
#define PERI_CRG_PERDIS6 (0x414)
#define USB_REFCLK_ISO_EN BIT(25)
#define GT_CLK_USB2PHY_REF BIT(19)
#define PCTRL_PERI_CTRL3 (0x10)
#define PCTRL_PERI_CTRL3_MSK_START (16)
#define USB_TCXO_EN BIT(1)
#define PCTRL_PERI_CTRL24 (0x64)
#define SC_CLK_USB3PHY_3MUX1_SEL BIT(25)
#define USB3OTG_CTRL0 (0x00)
#define USB3OTG_CTRL3 (0x0c)
#define USB3OTG_CTRL4 (0x10)
#define USB3OTG_CTRL5 (0x14)
#define USB3OTG_CTRL7 (0x1c)
#define USB_MISC_CFG50 (0x50)
#define USB_MISC_CFG54 (0x54)
#define USB_MISC_CFG58 (0x58)
#define USB_MISC_CFG5C (0x5c)
#define USB_MISC_CFGA0 (0xa0)
#define TCA_CLK_RST (0x200)
#define TCA_INTR_EN (0x204)
#define TCA_INTR_STS (0x208)
#define TCA_GCFG (0x210)
#define TCA_TCPC (0x214)
#define TCA_SYSMODE_CFG (0x218)
#define TCA_VBUS_CTRL (0x240)
#define CTRL0_USB3_VBUSVLD BIT(7)
#define CTRL0_USB3_VBUSVLD_SEL BIT(6)
#define CTRL3_USB2_VBUSVLDEXT0 BIT(6)
#define CTRL3_USB2_VBUSVLDEXTSEL0 BIT(5)
#define CTRL5_USB2_SIDDQ BIT(0)
#define CTRL7_USB2_REFCLKSEL_MASK GENMASK(4, 3)
#define CTRL7_USB2_REFCLKSEL_ABB (BIT(4) | BIT(3))
#define CTRL7_USB2_REFCLKSEL_PAD BIT(4)
#define CFG50_USB3_PHY_TEST_POWERDOWN BIT(23)
#define CFG54_USB31PHY_CR_ADDR_MASK GENMASK(31, 16)
#define CFG54_USB3PHY_REF_USE_PAD BIT(12)
#define CFG54_PHY0_PMA_PWR_STABLE BIT(11)
#define CFG54_PHY0_PCS_PWR_STABLE BIT(9)
#define CFG54_USB31PHY_CR_ACK BIT(7)
#define CFG54_USB31PHY_CR_WR_EN BIT(5)
#define CFG54_USB31PHY_CR_SEL BIT(4)
#define CFG54_USB31PHY_CR_RD_EN BIT(3)
#define CFG54_USB31PHY_CR_CLK BIT(2)
#define CFG54_USB3_PHY0_ANA_PWR_EN BIT(1)
#define CFG58_USB31PHY_CR_DATA_MASK GENMASK(31, 16)
#define CFG5C_USB3_PHY0_SS_MPLLA_SSC_EN BIT(1)
#define CFGA0_VAUX_RESET BIT(9)
#define CFGA0_USB31C_RESET BIT(8)
#define CFGA0_USB2PHY_REFCLK_SELECT BIT(4)
#define CFGA0_USB3PHY_RESET BIT(1)
#define CFGA0_USB2PHY_POR BIT(0)
#define INTR_EN_XA_TIMEOUT_EVT_EN BIT(1)
#define INTR_EN_XA_ACK_EVT_EN BIT(0)
#define CLK_RST_TCA_REF_CLK_EN BIT(1)
#define CLK_RST_SUSPEND_CLK_EN BIT(0)
#define GCFG_ROLE_HSTDEV BIT(4)
#define GCFG_OP_MODE GENMASK(1, 0)
#define GCFG_OP_MODE_CTRL_SYNC_MODE BIT(0)
#define TCPC_VALID BIT(4)
#define TCPC_LOW_POWER_EN BIT(3)
#define TCPC_MUX_CONTROL_MASK GENMASK(1, 0)
#define TCPC_MUX_CONTROL_USB31 BIT(0)
#define SYSMODE_CFG_TYPEC_DISABLE BIT(3)
#define VBUS_CTRL_POWERPRESENT_OVERRD GENMASK(3, 2)
#define VBUS_CTRL_VBUSVALID_OVERRD GENMASK(1, 0)
#define KIRIN970_USB_DEFAULT_PHY_PARAM (0xfdfee4)
#define KIRIN970_USB_DEFAULT_PHY_VBOOST (0x5)
#define TX_VBOOST_LVL_REG (0xf)
#define TX_VBOOST_LVL_START (6)
#define TX_VBOOST_LVL_ENABLE BIT(9)
struct hi3670_priv {
struct device *dev;
struct regmap *peri_crg;
struct regmap *pctrl;
struct regmap *sctrl;
struct regmap *usb31misc;
u32 eye_diagram_param;
u32 tx_vboost_lvl;
u32 peri_crg_offset;
u32 pctrl_offset;
u32 usb31misc_offset;
};
static int hi3670_phy_cr_clk(struct regmap *usb31misc)
{
int ret;
/* Clock up */
ret = regmap_update_bits(usb31misc, USB_MISC_CFG54,
CFG54_USB31PHY_CR_CLK, CFG54_USB31PHY_CR_CLK);
if (ret)
return ret;
/* Clock down */
return regmap_update_bits(usb31misc, USB_MISC_CFG54,
CFG54_USB31PHY_CR_CLK, 0);
}
static int hi3670_phy_cr_set_sel(struct regmap *usb31misc)
{
return regmap_update_bits(usb31misc, USB_MISC_CFG54,
CFG54_USB31PHY_CR_SEL, CFG54_USB31PHY_CR_SEL);
}
static int hi3670_phy_cr_start(struct regmap *usb31misc, int direction)
{
int ret, reg;
if (direction)
reg = CFG54_USB31PHY_CR_WR_EN;
else
reg = CFG54_USB31PHY_CR_RD_EN;
ret = regmap_update_bits(usb31misc, USB_MISC_CFG54, reg, reg);
if (ret)
return ret;
ret = hi3670_phy_cr_clk(usb31misc);
if (ret)
return ret;
return regmap_update_bits(usb31misc, USB_MISC_CFG54,
CFG54_USB31PHY_CR_RD_EN | CFG54_USB31PHY_CR_WR_EN, 0);
}
static int hi3670_phy_cr_wait_ack(struct regmap *usb31misc)
{
u32 reg;
int retry = 10;
int ret;
while (retry-- > 0) {
ret = regmap_read(usb31misc, USB_MISC_CFG54, ®);
if (ret)
return ret;
if ((reg & CFG54_USB31PHY_CR_ACK) == CFG54_USB31PHY_CR_ACK)
return 0;
ret = hi3670_phy_cr_clk(usb31misc);
if (ret)
return ret;
usleep_range(10, 20);
}
return -ETIMEDOUT;
}
static int hi3670_phy_cr_set_addr(struct regmap *usb31misc, u32 addr)
{
u32 reg;
int ret;
ret = regmap_read(usb31misc, USB_MISC_CFG54, ®);
if (ret)
return ret;
reg = FIELD_PREP(CFG54_USB31PHY_CR_ADDR_MASK, addr);
return regmap_update_bits(usb31misc, USB_MISC_CFG54,
CFG54_USB31PHY_CR_ADDR_MASK, reg);
}
static int hi3670_phy_cr_read(struct regmap *usb31misc, u32 addr, u32 *val)
{
int reg, i, ret;
for (i = 0; i < 100; i++) {
ret = hi3670_phy_cr_clk(usb31misc);
if (ret)
return ret;
}
ret = hi3670_phy_cr_set_sel(usb31misc);
if (ret)
return ret;
ret = hi3670_phy_cr_set_addr(usb31misc, addr);
if (ret)
return ret;
ret = hi3670_phy_cr_start(usb31misc, 0);
if (ret)
return ret;
ret = hi3670_phy_cr_wait_ack(usb31misc);
if (ret)
return ret;
ret = regmap_read(usb31misc, USB_MISC_CFG58, ®);
if (ret)
return ret;
*val = FIELD_GET(CFG58_USB31PHY_CR_DATA_MASK, reg);
return 0;
}
static int hi3670_phy_cr_write(struct regmap *usb31misc, u32 addr, u32 val)
{
int i;
int ret;
for (i = 0; i < 100; i++) {
ret = hi3670_phy_cr_clk(usb31misc);
if (ret)
return ret;
}
ret = hi3670_phy_cr_set_sel(usb31misc);
if (ret)
return ret;
ret = hi3670_phy_cr_set_addr(usb31misc, addr);
if (ret)
return ret;
ret = regmap_write(usb31misc, USB_MISC_CFG58,
FIELD_PREP(CFG58_USB31PHY_CR_DATA_MASK, val));
if (ret)
return ret;
ret = hi3670_phy_cr_start(usb31misc, 1);
if (ret)
return ret;
return hi3670_phy_cr_wait_ack(usb31misc);
}
static int hi3670_phy_set_params(struct hi3670_priv *priv)
{
u32 reg;
int ret;
int retry = 3;
ret = regmap_write(priv->usb31misc, USB3OTG_CTRL4,
priv->eye_diagram_param);
if (ret) {
dev_err(priv->dev, "set USB3OTG_CTRL4 failed\n");
return ret;
}
while (retry-- > 0) {
ret = hi3670_phy_cr_read(priv->usb31misc,
TX_VBOOST_LVL_REG, ®);
if (!ret)
break;
if (ret != -ETIMEDOUT) {
dev_err(priv->dev, "read TX_VBOOST_LVL_REG failed\n");
return ret;
}
}
if (ret)
return ret;
reg |= (TX_VBOOST_LVL_ENABLE | (priv->tx_vboost_lvl << TX_VBOOST_LVL_START));
ret = hi3670_phy_cr_write(priv->usb31misc, TX_VBOOST_LVL_REG, reg);
if (ret)
dev_err(priv->dev, "write TX_VBOOST_LVL_REG failed\n");
return ret;
}
static bool hi3670_is_abbclk_selected(struct hi3670_priv *priv)
{
u32 reg;
if (!priv->sctrl) {
dev_err(priv->dev, "priv->sctrl is null!\n");
return false;
}
if (regmap_read(priv->sctrl, SCTRL_SCDEEPSLEEPED, ®)) {
dev_err(priv->dev, "SCTRL_SCDEEPSLEEPED read failed!\n");
return false;
}
if ((reg & USB_CLK_SELECTED) == 0)
return false;
return true;
}
static int hi3670_config_phy_clock(struct hi3670_priv *priv)
{
u32 val, mask;
int ret;
if (!hi3670_is_abbclk_selected(priv)) {
/* usb refclk iso disable */
ret = regmap_write(priv->peri_crg, PERI_CRG_ISODIS,
USB_REFCLK_ISO_EN);
if (ret)
goto out;
/* enable usb_tcxo_en */
ret = regmap_write(priv->pctrl, PCTRL_PERI_CTRL3,
USB_TCXO_EN |
(USB_TCXO_EN << PCTRL_PERI_CTRL3_MSK_START));
/* select usbphy clk from abb */
mask = SC_CLK_USB3PHY_3MUX1_SEL;
ret = regmap_update_bits(priv->pctrl,
PCTRL_PERI_CTRL24, mask, 0);
if (ret)
goto out;
ret = regmap_update_bits(priv->usb31misc, USB_MISC_CFGA0,
CFGA0_USB2PHY_REFCLK_SELECT, 0);
if (ret)
goto out;
ret = regmap_read(priv->usb31misc, USB3OTG_CTRL7, &val);
if (ret)
goto out;
val &= ~CTRL7_USB2_REFCLKSEL_MASK;
val |= CTRL7_USB2_REFCLKSEL_ABB;
ret = regmap_write(priv->usb31misc, USB3OTG_CTRL7, val);
if (ret)
goto out;
return 0;
}
ret = regmap_update_bits(priv->usb31misc, USB_MISC_CFG54,
CFG54_USB3PHY_REF_USE_PAD,
CFG54_USB3PHY_REF_USE_PAD);
if (ret)
goto out;
ret = regmap_update_bits(priv->usb31misc, USB_MISC_CFGA0,
CFGA0_USB2PHY_REFCLK_SELECT,
CFGA0_USB2PHY_REFCLK_SELECT);
if (ret)
goto out;
ret = regmap_read(priv->usb31misc, USB3OTG_CTRL7, &val);
if (ret)
goto out;
val &= ~CTRL7_USB2_REFCLKSEL_MASK;
val |= CTRL7_USB2_REFCLKSEL_PAD;
ret = regmap_write(priv->usb31misc, USB3OTG_CTRL7, val);
if (ret)
goto out;
ret = regmap_write(priv->peri_crg,
PERI_CRG_PEREN6, GT_CLK_USB2PHY_REF);
if (ret)
goto out;
return 0;
out:
dev_err(priv->dev, "failed to config phy clock ret: %d\n", ret);
return ret;
}
static int hi3670_config_tca(struct hi3670_priv *priv)
{
u32 val, mask;
int ret;
ret = regmap_write(priv->usb31misc, TCA_INTR_STS, 0xffff);
if (ret)
goto out;
ret = regmap_write(priv->usb31misc, TCA_INTR_EN,
INTR_EN_XA_TIMEOUT_EVT_EN | INTR_EN_XA_ACK_EVT_EN);
if (ret)
goto out;
mask = CLK_RST_TCA_REF_CLK_EN | CLK_RST_SUSPEND_CLK_EN;
ret = regmap_update_bits(priv->usb31misc, TCA_CLK_RST, mask, 0);
if (ret)
goto out;
ret = regmap_update_bits(priv->usb31misc, TCA_GCFG,
GCFG_ROLE_HSTDEV | GCFG_OP_MODE,
GCFG_ROLE_HSTDEV | GCFG_OP_MODE_CTRL_SYNC_MODE);
if (ret)
goto out;
ret = regmap_update_bits(priv->usb31misc, TCA_SYSMODE_CFG,
SYSMODE_CFG_TYPEC_DISABLE, 0);
if (ret)
goto out;
ret = regmap_read(priv->usb31misc, TCA_TCPC, &val);
if (ret)
goto out;
val &= ~(TCPC_VALID | TCPC_LOW_POWER_EN | TCPC_MUX_CONTROL_MASK);
val |= (TCPC_VALID | TCPC_MUX_CONTROL_USB31);
ret = regmap_write(priv->usb31misc, TCA_TCPC, val);
if (ret)
goto out;
ret = regmap_write(priv->usb31misc, TCA_VBUS_CTRL,
VBUS_CTRL_POWERPRESENT_OVERRD | VBUS_CTRL_VBUSVALID_OVERRD);
if (ret)
goto out;
return 0;
out:
dev_err(priv->dev, "failed to config phy clock ret: %d\n", ret);
return ret;
}
static int hi3670_phy_init(struct phy *phy)
{
struct hi3670_priv *priv = phy_get_drvdata(phy);
u32 val;
int ret;
/* assert controller */
val = CFGA0_VAUX_RESET | CFGA0_USB31C_RESET |
CFGA0_USB3PHY_RESET | CFGA0_USB2PHY_POR;
ret = regmap_update_bits(priv->usb31misc, USB_MISC_CFGA0, val, 0);
if (ret)
goto out;
ret = hi3670_config_phy_clock(priv);
if (ret)
goto out;
/* Exit from IDDQ mode */
ret = regmap_update_bits(priv->usb31misc, USB3OTG_CTRL5,
CTRL5_USB2_SIDDQ, 0);
if (ret)
goto out;
/* Release USB31 PHY out of TestPowerDown mode */
ret = regmap_update_bits(priv->usb31misc, USB_MISC_CFG50,
CFG50_USB3_PHY_TEST_POWERDOWN, 0);
if (ret)
goto out;
/* Deassert phy */
val = CFGA0_USB3PHY_RESET | CFGA0_USB2PHY_POR;
ret = regmap_update_bits(priv->usb31misc, USB_MISC_CFGA0, val, val);
if (ret)
goto out;
usleep_range(100, 120);
/* Tell the PHY power is stable */
val = CFG54_USB3_PHY0_ANA_PWR_EN | CFG54_PHY0_PCS_PWR_STABLE |
CFG54_PHY0_PMA_PWR_STABLE;
ret = regmap_update_bits(priv->usb31misc, USB_MISC_CFG54,
val, val);
if (ret)
goto out;
ret = hi3670_config_tca(priv);
if (ret)
goto out;
/* Enable SSC */
ret = regmap_update_bits(priv->usb31misc, USB_MISC_CFG5C,
CFG5C_USB3_PHY0_SS_MPLLA_SSC_EN,
CFG5C_USB3_PHY0_SS_MPLLA_SSC_EN);
if (ret)
goto out;
/* Deassert controller */
val = CFGA0_VAUX_RESET | CFGA0_USB31C_RESET;
ret = regmap_update_bits(priv->usb31misc, USB_MISC_CFGA0, val, val);
if (ret)
goto out;
usleep_range(100, 120);
/* Set fake vbus valid signal */
val = CTRL0_USB3_VBUSVLD | CTRL0_USB3_VBUSVLD_SEL;
ret = regmap_update_bits(priv->usb31misc, USB3OTG_CTRL0, val, val);
if (ret)
goto out;
val = CTRL3_USB2_VBUSVLDEXT0 | CTRL3_USB2_VBUSVLDEXTSEL0;
ret = regmap_update_bits(priv->usb31misc, USB3OTG_CTRL3, val, val);
if (ret)
goto out;
usleep_range(100, 120);
ret = hi3670_phy_set_params(priv);
if (ret)
goto out;
return 0;
out:
dev_err(priv->dev, "failed to init phy ret: %d\n", ret);
return ret;
}
static int hi3670_phy_exit(struct phy *phy)
{
struct hi3670_priv *priv = phy_get_drvdata(phy);
u32 mask;
int ret;
/* Assert phy */
mask = CFGA0_USB3PHY_RESET | CFGA0_USB2PHY_POR;
ret = regmap_update_bits(priv->usb31misc, USB_MISC_CFGA0, mask, 0);
if (ret)
goto out;
if (!hi3670_is_abbclk_selected(priv)) {
/* disable usb_tcxo_en */
ret = regmap_write(priv->pctrl, PCTRL_PERI_CTRL3,
USB_TCXO_EN << PCTRL_PERI_CTRL3_MSK_START);
} else {
ret = regmap_write(priv->peri_crg, PERI_CRG_PERDIS6,
GT_CLK_USB2PHY_REF);
if (ret)
goto out;
}
return 0;
out:
dev_err(priv->dev, "failed to exit phy ret: %d\n", ret);
return ret;
}
static const struct phy_ops hi3670_phy_ops = {
.init = hi3670_phy_init,
.exit = hi3670_phy_exit,
.owner = THIS_MODULE,
};
static int hi3670_phy_probe(struct platform_device *pdev)
{
struct phy_provider *phy_provider;
struct device *dev = &pdev->dev;
struct phy *phy;
struct hi3670_priv *priv;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->dev = dev;
priv->peri_crg = syscon_regmap_lookup_by_phandle(dev->of_node,
"hisilicon,pericrg-syscon");
if (IS_ERR(priv->peri_crg)) {
dev_err(dev, "no hisilicon,pericrg-syscon\n");
return PTR_ERR(priv->peri_crg);
}
priv->pctrl = syscon_regmap_lookup_by_phandle(dev->of_node,
"hisilicon,pctrl-syscon");
if (IS_ERR(priv->pctrl)) {
dev_err(dev, "no hisilicon,pctrl-syscon\n");
return PTR_ERR(priv->pctrl);
}
priv->sctrl = syscon_regmap_lookup_by_phandle(dev->of_node,
"hisilicon,sctrl-syscon");
if (IS_ERR(priv->sctrl)) {
dev_err(dev, "no hisilicon,sctrl-syscon\n");
return PTR_ERR(priv->sctrl);
}
/* node of hi3670 phy is a sub-node of usb3_otg_bc */
priv->usb31misc = syscon_node_to_regmap(dev->parent->of_node);
if (IS_ERR(priv->usb31misc)) {
dev_err(dev, "no hisilicon,usb3-otg-bc-syscon\n");
return PTR_ERR(priv->usb31misc);
}
if (of_property_read_u32(dev->of_node, "hisilicon,eye-diagram-param",
&priv->eye_diagram_param))
priv->eye_diagram_param = KIRIN970_USB_DEFAULT_PHY_PARAM;
if (of_property_read_u32(dev->of_node, "hisilicon,tx-vboost-lvl",
&priv->tx_vboost_lvl))
priv->tx_vboost_lvl = KIRIN970_USB_DEFAULT_PHY_VBOOST;
phy = devm_phy_create(dev, NULL, &hi3670_phy_ops);
if (IS_ERR(phy))
return PTR_ERR(phy);
phy_set_drvdata(phy, priv);
phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
return PTR_ERR_OR_ZERO(phy_provider);
}
static const struct of_device_id hi3670_phy_of_match[] = {
{ .compatible = "hisilicon,hi3670-usb-phy" },
{ },
};
MODULE_DEVICE_TABLE(of, hi3670_phy_of_match);
static struct platform_driver hi3670_phy_driver = {
.probe = hi3670_phy_probe,
.driver = {
.name = "hi3670-usb-phy",
.of_match_table = hi3670_phy_of_match,
}
};
module_platform_driver(hi3670_phy_driver);
MODULE_AUTHOR("Yu Chen <[email protected]>");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Hilisicon Kirin970 USB31 PHY Driver");
| linux-master | drivers/phy/hisilicon/phy-hi3670-usb3.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (c) 2014 Linaro Ltd.
* Copyright (c) 2014 HiSilicon Limited.
*/
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#define SATA_PHY0_CTLL 0xa0
#define MPLL_MULTIPLIER_SHIFT 1
#define MPLL_MULTIPLIER_MASK 0xfe
#define MPLL_MULTIPLIER_50M 0x3c
#define MPLL_MULTIPLIER_100M 0x1e
#define PHY_RESET BIT(0)
#define REF_SSP_EN BIT(9)
#define SSC_EN BIT(10)
#define REF_USE_PAD BIT(23)
#define SATA_PORT_PHYCTL 0x174
#define SPEED_MODE_MASK 0x6f0000
#define HALF_RATE_SHIFT 16
#define PHY_CONFIG_SHIFT 18
#define GEN2_EN_SHIFT 21
#define SPEED_CTRL BIT(20)
#define SATA_PORT_PHYCTL1 0x148
#define AMPLITUDE_MASK 0x3ffffe
#define AMPLITUDE_GEN3 0x68
#define AMPLITUDE_GEN3_SHIFT 15
#define AMPLITUDE_GEN2 0x56
#define AMPLITUDE_GEN2_SHIFT 8
#define AMPLITUDE_GEN1 0x56
#define AMPLITUDE_GEN1_SHIFT 1
#define SATA_PORT_PHYCTL2 0x14c
#define PREEMPH_MASK 0x3ffff
#define PREEMPH_GEN3 0x20
#define PREEMPH_GEN3_SHIFT 12
#define PREEMPH_GEN2 0x15
#define PREEMPH_GEN2_SHIFT 6
#define PREEMPH_GEN1 0x5
#define PREEMPH_GEN1_SHIFT 0
struct hix5hd2_priv {
void __iomem *base;
struct regmap *peri_ctrl;
};
enum phy_speed_mode {
SPEED_MODE_GEN1 = 0,
SPEED_MODE_GEN2 = 1,
SPEED_MODE_GEN3 = 2,
};
static int hix5hd2_sata_phy_init(struct phy *phy)
{
struct hix5hd2_priv *priv = phy_get_drvdata(phy);
u32 val, data[2];
int ret;
if (priv->peri_ctrl) {
ret = of_property_read_u32_array(phy->dev.of_node,
"hisilicon,power-reg",
&data[0], 2);
if (ret) {
dev_err(&phy->dev, "Fail read hisilicon,power-reg\n");
return ret;
}
regmap_update_bits(priv->peri_ctrl, data[0],
BIT(data[1]), BIT(data[1]));
}
/* reset phy */
val = readl_relaxed(priv->base + SATA_PHY0_CTLL);
val &= ~(MPLL_MULTIPLIER_MASK | REF_USE_PAD);
val |= MPLL_MULTIPLIER_50M << MPLL_MULTIPLIER_SHIFT |
REF_SSP_EN | PHY_RESET;
writel_relaxed(val, priv->base + SATA_PHY0_CTLL);
msleep(20);
val &= ~PHY_RESET;
writel_relaxed(val, priv->base + SATA_PHY0_CTLL);
val = readl_relaxed(priv->base + SATA_PORT_PHYCTL1);
val &= ~AMPLITUDE_MASK;
val |= AMPLITUDE_GEN3 << AMPLITUDE_GEN3_SHIFT |
AMPLITUDE_GEN2 << AMPLITUDE_GEN2_SHIFT |
AMPLITUDE_GEN1 << AMPLITUDE_GEN1_SHIFT;
writel_relaxed(val, priv->base + SATA_PORT_PHYCTL1);
val = readl_relaxed(priv->base + SATA_PORT_PHYCTL2);
val &= ~PREEMPH_MASK;
val |= PREEMPH_GEN3 << PREEMPH_GEN3_SHIFT |
PREEMPH_GEN2 << PREEMPH_GEN2_SHIFT |
PREEMPH_GEN1 << PREEMPH_GEN1_SHIFT;
writel_relaxed(val, priv->base + SATA_PORT_PHYCTL2);
/* ensure PHYCTRL setting takes effect */
val = readl_relaxed(priv->base + SATA_PORT_PHYCTL);
val &= ~SPEED_MODE_MASK;
val |= SPEED_MODE_GEN1 << HALF_RATE_SHIFT |
SPEED_MODE_GEN1 << PHY_CONFIG_SHIFT |
SPEED_MODE_GEN1 << GEN2_EN_SHIFT | SPEED_CTRL;
writel_relaxed(val, priv->base + SATA_PORT_PHYCTL);
msleep(20);
val &= ~SPEED_MODE_MASK;
val |= SPEED_MODE_GEN3 << HALF_RATE_SHIFT |
SPEED_MODE_GEN3 << PHY_CONFIG_SHIFT |
SPEED_MODE_GEN3 << GEN2_EN_SHIFT | SPEED_CTRL;
writel_relaxed(val, priv->base + SATA_PORT_PHYCTL);
val &= ~(SPEED_MODE_MASK | SPEED_CTRL);
val |= SPEED_MODE_GEN2 << HALF_RATE_SHIFT |
SPEED_MODE_GEN2 << PHY_CONFIG_SHIFT |
SPEED_MODE_GEN2 << GEN2_EN_SHIFT;
writel_relaxed(val, priv->base + SATA_PORT_PHYCTL);
return 0;
}
static const struct phy_ops hix5hd2_sata_phy_ops = {
.init = hix5hd2_sata_phy_init,
.owner = THIS_MODULE,
};
static int hix5hd2_sata_phy_probe(struct platform_device *pdev)
{
struct phy_provider *phy_provider;
struct device *dev = &pdev->dev;
struct resource *res;
struct phy *phy;
struct hix5hd2_priv *priv;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -EINVAL;
priv->base = devm_ioremap(dev, res->start, resource_size(res));
if (!priv->base)
return -ENOMEM;
priv->peri_ctrl = syscon_regmap_lookup_by_phandle(dev->of_node,
"hisilicon,peripheral-syscon");
if (IS_ERR(priv->peri_ctrl))
priv->peri_ctrl = NULL;
phy = devm_phy_create(dev, NULL, &hix5hd2_sata_phy_ops);
if (IS_ERR(phy)) {
dev_err(dev, "failed to create PHY\n");
return PTR_ERR(phy);
}
phy_set_drvdata(phy, priv);
phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
return PTR_ERR_OR_ZERO(phy_provider);
}
static const struct of_device_id hix5hd2_sata_phy_of_match[] = {
{.compatible = "hisilicon,hix5hd2-sata-phy",},
{ },
};
MODULE_DEVICE_TABLE(of, hix5hd2_sata_phy_of_match);
static struct platform_driver hix5hd2_sata_phy_driver = {
.probe = hix5hd2_sata_phy_probe,
.driver = {
.name = "hix5hd2-sata-phy",
.of_match_table = hix5hd2_sata_phy_of_match,
}
};
module_platform_driver(hix5hd2_sata_phy_driver);
MODULE_AUTHOR("Jiancheng Xue <[email protected]>");
MODULE_DESCRIPTION("HISILICON HIX5HD2 SATA PHY driver");
MODULE_ALIAS("platform:hix5hd2-sata-phy");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/phy/hisilicon/phy-hix5hd2-sata.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (c) 2015 Linaro Ltd.
* Copyright (c) 2015 HiSilicon Limited.
*/
#include <linux/mfd/syscon.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/phy/phy.h>
#include <linux/regmap.h>
#define SC_PERIPH_CTRL4 0x00c
#define CTRL4_PICO_SIDDQ BIT(6)
#define CTRL4_PICO_OGDISABLE BIT(8)
#define CTRL4_PICO_VBUSVLDEXT BIT(10)
#define CTRL4_PICO_VBUSVLDEXTSEL BIT(11)
#define CTRL4_OTG_PHY_SEL BIT(21)
#define SC_PERIPH_CTRL5 0x010
#define CTRL5_USBOTG_RES_SEL BIT(3)
#define CTRL5_PICOPHY_ACAENB BIT(4)
#define CTRL5_PICOPHY_BC_MODE BIT(5)
#define CTRL5_PICOPHY_CHRGSEL BIT(6)
#define CTRL5_PICOPHY_VDATSRCEND BIT(7)
#define CTRL5_PICOPHY_VDATDETENB BIT(8)
#define CTRL5_PICOPHY_DCDENB BIT(9)
#define CTRL5_PICOPHY_IDDIG BIT(10)
#define SC_PERIPH_CTRL8 0x018
#define SC_PERIPH_RSTEN0 0x300
#define SC_PERIPH_RSTDIS0 0x304
#define RST0_USBOTG_BUS BIT(4)
#define RST0_POR_PICOPHY BIT(5)
#define RST0_USBOTG BIT(6)
#define RST0_USBOTG_32K BIT(7)
#define EYE_PATTERN_PARA 0x7053348c
struct hi6220_priv {
struct regmap *reg;
struct device *dev;
};
static void hi6220_phy_init(struct hi6220_priv *priv)
{
struct regmap *reg = priv->reg;
u32 val, mask;
val = RST0_USBOTG_BUS | RST0_POR_PICOPHY |
RST0_USBOTG | RST0_USBOTG_32K;
mask = val;
regmap_update_bits(reg, SC_PERIPH_RSTEN0, mask, val);
regmap_update_bits(reg, SC_PERIPH_RSTDIS0, mask, val);
}
static int hi6220_phy_setup(struct hi6220_priv *priv, bool on)
{
struct regmap *reg = priv->reg;
u32 val, mask;
int ret;
if (on) {
val = CTRL5_USBOTG_RES_SEL | CTRL5_PICOPHY_ACAENB;
mask = val | CTRL5_PICOPHY_BC_MODE;
ret = regmap_update_bits(reg, SC_PERIPH_CTRL5, mask, val);
if (ret)
goto out;
val = CTRL4_PICO_VBUSVLDEXT | CTRL4_PICO_VBUSVLDEXTSEL |
CTRL4_OTG_PHY_SEL;
mask = val | CTRL4_PICO_SIDDQ | CTRL4_PICO_OGDISABLE;
ret = regmap_update_bits(reg, SC_PERIPH_CTRL4, mask, val);
if (ret)
goto out;
ret = regmap_write(reg, SC_PERIPH_CTRL8, EYE_PATTERN_PARA);
if (ret)
goto out;
} else {
val = CTRL4_PICO_SIDDQ;
mask = val;
ret = regmap_update_bits(reg, SC_PERIPH_CTRL4, mask, val);
if (ret)
goto out;
}
return 0;
out:
dev_err(priv->dev, "failed to setup phy ret: %d\n", ret);
return ret;
}
static int hi6220_phy_start(struct phy *phy)
{
struct hi6220_priv *priv = phy_get_drvdata(phy);
return hi6220_phy_setup(priv, true);
}
static int hi6220_phy_exit(struct phy *phy)
{
struct hi6220_priv *priv = phy_get_drvdata(phy);
return hi6220_phy_setup(priv, false);
}
static const struct phy_ops hi6220_phy_ops = {
.init = hi6220_phy_start,
.exit = hi6220_phy_exit,
.owner = THIS_MODULE,
};
static int hi6220_phy_probe(struct platform_device *pdev)
{
struct phy_provider *phy_provider;
struct device *dev = &pdev->dev;
struct phy *phy;
struct hi6220_priv *priv;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->dev = dev;
priv->reg = syscon_regmap_lookup_by_phandle(dev->of_node,
"hisilicon,peripheral-syscon");
if (IS_ERR(priv->reg)) {
dev_err(dev, "no hisilicon,peripheral-syscon\n");
return PTR_ERR(priv->reg);
}
hi6220_phy_init(priv);
phy = devm_phy_create(dev, NULL, &hi6220_phy_ops);
if (IS_ERR(phy))
return PTR_ERR(phy);
phy_set_drvdata(phy, priv);
phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
return PTR_ERR_OR_ZERO(phy_provider);
}
static const struct of_device_id hi6220_phy_of_match[] = {
{.compatible = "hisilicon,hi6220-usb-phy",},
{ },
};
MODULE_DEVICE_TABLE(of, hi6220_phy_of_match);
static struct platform_driver hi6220_phy_driver = {
.probe = hi6220_phy_probe,
.driver = {
.name = "hi6220-usb-phy",
.of_match_table = hi6220_phy_of_match,
}
};
module_platform_driver(hi6220_phy_driver);
MODULE_DESCRIPTION("HISILICON HI6220 USB PHY driver");
MODULE_ALIAS("platform:hi6220-usb-phy");
MODULE_LICENSE("GPL");
| linux-master | drivers/phy/hisilicon/phy-hi6220-usb.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* COMBPHY driver for HiSilicon STB SoCs
*
* Copyright (C) 2016-2017 HiSilicon Co., Ltd. http://www.hisilicon.com
*
* Authors: Jianguo Sun <[email protected]>
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/reset.h>
#include <dt-bindings/phy/phy.h>
#define COMBPHY_MODE_PCIE 0
#define COMBPHY_MODE_USB3 1
#define COMBPHY_MODE_SATA 2
#define COMBPHY_CFG_REG 0x0
#define COMBPHY_BYPASS_CODEC BIT(31)
#define COMBPHY_TEST_WRITE BIT(24)
#define COMBPHY_TEST_DATA_SHIFT 20
#define COMBPHY_TEST_DATA_MASK GENMASK(23, 20)
#define COMBPHY_TEST_ADDR_SHIFT 12
#define COMBPHY_TEST_ADDR_MASK GENMASK(16, 12)
#define COMBPHY_CLKREF_OUT_OEN BIT(0)
struct histb_combphy_mode {
int fixed;
int select;
u32 reg;
u32 shift;
u32 mask;
};
struct histb_combphy_priv {
void __iomem *mmio;
struct regmap *syscon;
struct reset_control *por_rst;
struct clk *ref_clk;
struct phy *phy;
struct histb_combphy_mode mode;
};
static void nano_register_write(struct histb_combphy_priv *priv,
u32 addr, u32 data)
{
void __iomem *reg = priv->mmio + COMBPHY_CFG_REG;
u32 val;
/* Set up address and data for the write */
val = readl(reg);
val &= ~COMBPHY_TEST_ADDR_MASK;
val |= addr << COMBPHY_TEST_ADDR_SHIFT;
val &= ~COMBPHY_TEST_DATA_MASK;
val |= data << COMBPHY_TEST_DATA_SHIFT;
writel(val, reg);
/* Flip strobe control to trigger the write */
val &= ~COMBPHY_TEST_WRITE;
writel(val, reg);
val |= COMBPHY_TEST_WRITE;
writel(val, reg);
}
static int is_mode_fixed(struct histb_combphy_mode *mode)
{
return (mode->fixed != PHY_NONE) ? true : false;
}
static int histb_combphy_set_mode(struct histb_combphy_priv *priv)
{
struct histb_combphy_mode *mode = &priv->mode;
struct regmap *syscon = priv->syscon;
u32 hw_sel;
if (is_mode_fixed(mode))
return 0;
switch (mode->select) {
case PHY_TYPE_SATA:
hw_sel = COMBPHY_MODE_SATA;
break;
case PHY_TYPE_PCIE:
hw_sel = COMBPHY_MODE_PCIE;
break;
case PHY_TYPE_USB3:
hw_sel = COMBPHY_MODE_USB3;
break;
default:
return -EINVAL;
}
return regmap_update_bits(syscon, mode->reg, mode->mask,
hw_sel << mode->shift);
}
static int histb_combphy_init(struct phy *phy)
{
struct histb_combphy_priv *priv = phy_get_drvdata(phy);
u32 val;
int ret;
ret = histb_combphy_set_mode(priv);
if (ret)
return ret;
/* Clear bypass bit to enable encoding/decoding */
val = readl(priv->mmio + COMBPHY_CFG_REG);
val &= ~COMBPHY_BYPASS_CODEC;
writel(val, priv->mmio + COMBPHY_CFG_REG);
ret = clk_prepare_enable(priv->ref_clk);
if (ret)
return ret;
reset_control_deassert(priv->por_rst);
/* Enable EP clock */
val = readl(priv->mmio + COMBPHY_CFG_REG);
val |= COMBPHY_CLKREF_OUT_OEN;
writel(val, priv->mmio + COMBPHY_CFG_REG);
/* Need to wait for EP clock stable */
mdelay(5);
/* Configure nano phy registers as suggested by vendor */
nano_register_write(priv, 0x1, 0x8);
nano_register_write(priv, 0xc, 0x9);
nano_register_write(priv, 0x1a, 0x4);
return 0;
}
static int histb_combphy_exit(struct phy *phy)
{
struct histb_combphy_priv *priv = phy_get_drvdata(phy);
u32 val;
/* Disable EP clock */
val = readl(priv->mmio + COMBPHY_CFG_REG);
val &= ~COMBPHY_CLKREF_OUT_OEN;
writel(val, priv->mmio + COMBPHY_CFG_REG);
reset_control_assert(priv->por_rst);
clk_disable_unprepare(priv->ref_clk);
return 0;
}
static const struct phy_ops histb_combphy_ops = {
.init = histb_combphy_init,
.exit = histb_combphy_exit,
.owner = THIS_MODULE,
};
static struct phy *histb_combphy_xlate(struct device *dev,
struct of_phandle_args *args)
{
struct histb_combphy_priv *priv = dev_get_drvdata(dev);
struct histb_combphy_mode *mode = &priv->mode;
if (args->args_count < 1) {
dev_err(dev, "invalid number of arguments\n");
return ERR_PTR(-EINVAL);
}
mode->select = args->args[0];
if (mode->select < PHY_TYPE_SATA || mode->select > PHY_TYPE_USB3) {
dev_err(dev, "invalid phy mode select argument\n");
return ERR_PTR(-EINVAL);
}
if (is_mode_fixed(mode) && mode->select != mode->fixed) {
dev_err(dev, "mode select %d mismatch fixed phy mode %d\n",
mode->select, mode->fixed);
return ERR_PTR(-EINVAL);
}
return priv->phy;
}
static int histb_combphy_probe(struct platform_device *pdev)
{
struct phy_provider *phy_provider;
struct device *dev = &pdev->dev;
struct histb_combphy_priv *priv;
struct device_node *np = dev->of_node;
struct histb_combphy_mode *mode;
u32 vals[3];
int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->mmio = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->mmio)) {
ret = PTR_ERR(priv->mmio);
return ret;
}
priv->syscon = syscon_node_to_regmap(np->parent);
if (IS_ERR(priv->syscon)) {
dev_err(dev, "failed to find peri_ctrl syscon regmap\n");
return PTR_ERR(priv->syscon);
}
mode = &priv->mode;
mode->fixed = PHY_NONE;
ret = of_property_read_u32(np, "hisilicon,fixed-mode", &mode->fixed);
if (ret == 0)
dev_dbg(dev, "found fixed phy mode %d\n", mode->fixed);
ret = of_property_read_u32_array(np, "hisilicon,mode-select-bits",
vals, ARRAY_SIZE(vals));
if (ret == 0) {
if (is_mode_fixed(mode)) {
dev_err(dev, "found select bits for fixed mode phy\n");
return -EINVAL;
}
mode->reg = vals[0];
mode->shift = vals[1];
mode->mask = vals[2];
dev_dbg(dev, "found mode select bits\n");
} else {
if (!is_mode_fixed(mode)) {
dev_err(dev, "no valid select bits found for non-fixed phy\n");
return -ENODEV;
}
}
priv->ref_clk = devm_clk_get(dev, NULL);
if (IS_ERR(priv->ref_clk)) {
dev_err(dev, "failed to find ref clock\n");
return PTR_ERR(priv->ref_clk);
}
priv->por_rst = devm_reset_control_get(dev, NULL);
if (IS_ERR(priv->por_rst)) {
dev_err(dev, "failed to get poweron reset\n");
return PTR_ERR(priv->por_rst);
}
priv->phy = devm_phy_create(dev, NULL, &histb_combphy_ops);
if (IS_ERR(priv->phy)) {
dev_err(dev, "failed to create combphy\n");
return PTR_ERR(priv->phy);
}
dev_set_drvdata(dev, priv);
phy_set_drvdata(priv->phy, priv);
phy_provider = devm_of_phy_provider_register(dev, histb_combphy_xlate);
return PTR_ERR_OR_ZERO(phy_provider);
}
static const struct of_device_id histb_combphy_of_match[] = {
{ .compatible = "hisilicon,hi3798cv200-combphy" },
{ },
};
MODULE_DEVICE_TABLE(of, histb_combphy_of_match);
static struct platform_driver histb_combphy_driver = {
.probe = histb_combphy_probe,
.driver = {
.name = "combphy",
.of_match_table = histb_combphy_of_match,
},
};
module_platform_driver(histb_combphy_driver);
MODULE_DESCRIPTION("HiSilicon STB COMBPHY driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/phy/hisilicon/phy-histb-combphy.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright 2021 NXP
*/
#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/mfd/syscon.h>
#include <linux/mfd/syscon/imx7-iomuxc-gpr.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/reset.h>
#include <dt-bindings/phy/phy-imx8-pcie.h>
#define IMX8MM_PCIE_PHY_CMN_REG061 0x184
#define ANA_PLL_CLK_OUT_TO_EXT_IO_EN BIT(0)
#define IMX8MM_PCIE_PHY_CMN_REG062 0x188
#define ANA_PLL_CLK_OUT_TO_EXT_IO_SEL BIT(3)
#define IMX8MM_PCIE_PHY_CMN_REG063 0x18C
#define AUX_PLL_REFCLK_SEL_SYS_PLL GENMASK(7, 6)
#define IMX8MM_PCIE_PHY_CMN_REG064 0x190
#define ANA_AUX_RX_TX_SEL_TX BIT(7)
#define ANA_AUX_RX_TERM_GND_EN BIT(3)
#define ANA_AUX_TX_TERM BIT(2)
#define IMX8MM_PCIE_PHY_CMN_REG065 0x194
#define ANA_AUX_RX_TERM (BIT(7) | BIT(4))
#define ANA_AUX_TX_LVL GENMASK(3, 0)
#define IMX8MM_PCIE_PHY_CMN_REG075 0x1D4
#define ANA_PLL_DONE 0x3
#define PCIE_PHY_TRSV_REG5 0x414
#define PCIE_PHY_TRSV_REG6 0x418
#define IMX8MM_GPR_PCIE_REF_CLK_SEL GENMASK(25, 24)
#define IMX8MM_GPR_PCIE_REF_CLK_PLL FIELD_PREP(IMX8MM_GPR_PCIE_REF_CLK_SEL, 0x3)
#define IMX8MM_GPR_PCIE_REF_CLK_EXT FIELD_PREP(IMX8MM_GPR_PCIE_REF_CLK_SEL, 0x2)
#define IMX8MM_GPR_PCIE_AUX_EN BIT(19)
#define IMX8MM_GPR_PCIE_CMN_RST BIT(18)
#define IMX8MM_GPR_PCIE_POWER_OFF BIT(17)
#define IMX8MM_GPR_PCIE_SSC_EN BIT(16)
#define IMX8MM_GPR_PCIE_AUX_EN_OVERRIDE BIT(9)
enum imx8_pcie_phy_type {
IMX8MM,
IMX8MP,
};
struct imx8_pcie_phy_drvdata {
const char *gpr;
enum imx8_pcie_phy_type variant;
};
struct imx8_pcie_phy {
void __iomem *base;
struct clk *clk;
struct phy *phy;
struct regmap *iomuxc_gpr;
struct reset_control *perst;
struct reset_control *reset;
u32 refclk_pad_mode;
u32 tx_deemph_gen1;
u32 tx_deemph_gen2;
bool clkreq_unused;
const struct imx8_pcie_phy_drvdata *drvdata;
};
static int imx8_pcie_phy_power_on(struct phy *phy)
{
int ret;
u32 val, pad_mode;
struct imx8_pcie_phy *imx8_phy = phy_get_drvdata(phy);
pad_mode = imx8_phy->refclk_pad_mode;
switch (imx8_phy->drvdata->variant) {
case IMX8MM:
reset_control_assert(imx8_phy->reset);
/* Tune PHY de-emphasis setting to pass PCIe compliance. */
if (imx8_phy->tx_deemph_gen1)
writel(imx8_phy->tx_deemph_gen1,
imx8_phy->base + PCIE_PHY_TRSV_REG5);
if (imx8_phy->tx_deemph_gen2)
writel(imx8_phy->tx_deemph_gen2,
imx8_phy->base + PCIE_PHY_TRSV_REG6);
break;
case IMX8MP: /* Do nothing. */
break;
}
if (pad_mode == IMX8_PCIE_REFCLK_PAD_INPUT ||
pad_mode == IMX8_PCIE_REFCLK_PAD_UNUSED) {
/* Configure the pad as input */
val = readl(imx8_phy->base + IMX8MM_PCIE_PHY_CMN_REG061);
writel(val & ~ANA_PLL_CLK_OUT_TO_EXT_IO_EN,
imx8_phy->base + IMX8MM_PCIE_PHY_CMN_REG061);
} else {
/* Configure the PHY to output the refclock via pad */
writel(ANA_PLL_CLK_OUT_TO_EXT_IO_EN,
imx8_phy->base + IMX8MM_PCIE_PHY_CMN_REG061);
}
if (pad_mode == IMX8_PCIE_REFCLK_PAD_OUTPUT ||
pad_mode == IMX8_PCIE_REFCLK_PAD_UNUSED) {
/* Source clock from SoC internal PLL */
writel(ANA_PLL_CLK_OUT_TO_EXT_IO_SEL,
imx8_phy->base + IMX8MM_PCIE_PHY_CMN_REG062);
writel(AUX_PLL_REFCLK_SEL_SYS_PLL,
imx8_phy->base + IMX8MM_PCIE_PHY_CMN_REG063);
val = ANA_AUX_RX_TX_SEL_TX | ANA_AUX_TX_TERM;
writel(val | ANA_AUX_RX_TERM_GND_EN,
imx8_phy->base + IMX8MM_PCIE_PHY_CMN_REG064);
writel(ANA_AUX_RX_TERM | ANA_AUX_TX_LVL,
imx8_phy->base + IMX8MM_PCIE_PHY_CMN_REG065);
}
/* Set AUX_EN_OVERRIDE 1'b0, when the CLKREQ# isn't hooked */
regmap_update_bits(imx8_phy->iomuxc_gpr, IOMUXC_GPR14,
IMX8MM_GPR_PCIE_AUX_EN_OVERRIDE,
imx8_phy->clkreq_unused ?
0 : IMX8MM_GPR_PCIE_AUX_EN_OVERRIDE);
regmap_update_bits(imx8_phy->iomuxc_gpr, IOMUXC_GPR14,
IMX8MM_GPR_PCIE_AUX_EN,
IMX8MM_GPR_PCIE_AUX_EN);
regmap_update_bits(imx8_phy->iomuxc_gpr, IOMUXC_GPR14,
IMX8MM_GPR_PCIE_POWER_OFF, 0);
regmap_update_bits(imx8_phy->iomuxc_gpr, IOMUXC_GPR14,
IMX8MM_GPR_PCIE_SSC_EN, 0);
regmap_update_bits(imx8_phy->iomuxc_gpr, IOMUXC_GPR14,
IMX8MM_GPR_PCIE_REF_CLK_SEL,
pad_mode == IMX8_PCIE_REFCLK_PAD_INPUT ?
IMX8MM_GPR_PCIE_REF_CLK_EXT :
IMX8MM_GPR_PCIE_REF_CLK_PLL);
usleep_range(100, 200);
/* Do the PHY common block reset */
regmap_update_bits(imx8_phy->iomuxc_gpr, IOMUXC_GPR14,
IMX8MM_GPR_PCIE_CMN_RST,
IMX8MM_GPR_PCIE_CMN_RST);
switch (imx8_phy->drvdata->variant) {
case IMX8MP:
reset_control_deassert(imx8_phy->perst);
fallthrough;
case IMX8MM:
reset_control_deassert(imx8_phy->reset);
usleep_range(200, 500);
break;
}
/* Polling to check the phy is ready or not. */
ret = readl_poll_timeout(imx8_phy->base + IMX8MM_PCIE_PHY_CMN_REG075,
val, val == ANA_PLL_DONE, 10, 20000);
return ret;
}
static int imx8_pcie_phy_init(struct phy *phy)
{
struct imx8_pcie_phy *imx8_phy = phy_get_drvdata(phy);
return clk_prepare_enable(imx8_phy->clk);
}
static int imx8_pcie_phy_exit(struct phy *phy)
{
struct imx8_pcie_phy *imx8_phy = phy_get_drvdata(phy);
clk_disable_unprepare(imx8_phy->clk);
return 0;
}
static const struct phy_ops imx8_pcie_phy_ops = {
.init = imx8_pcie_phy_init,
.exit = imx8_pcie_phy_exit,
.power_on = imx8_pcie_phy_power_on,
.owner = THIS_MODULE,
};
static const struct imx8_pcie_phy_drvdata imx8mm_drvdata = {
.gpr = "fsl,imx8mm-iomuxc-gpr",
.variant = IMX8MM,
};
static const struct imx8_pcie_phy_drvdata imx8mp_drvdata = {
.gpr = "fsl,imx8mp-iomuxc-gpr",
.variant = IMX8MP,
};
static const struct of_device_id imx8_pcie_phy_of_match[] = {
{.compatible = "fsl,imx8mm-pcie-phy", .data = &imx8mm_drvdata, },
{.compatible = "fsl,imx8mp-pcie-phy", .data = &imx8mp_drvdata, },
{ },
};
MODULE_DEVICE_TABLE(of, imx8_pcie_phy_of_match);
static int imx8_pcie_phy_probe(struct platform_device *pdev)
{
struct phy_provider *phy_provider;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct imx8_pcie_phy *imx8_phy;
imx8_phy = devm_kzalloc(dev, sizeof(*imx8_phy), GFP_KERNEL);
if (!imx8_phy)
return -ENOMEM;
imx8_phy->drvdata = of_device_get_match_data(dev);
/* get PHY refclk pad mode */
of_property_read_u32(np, "fsl,refclk-pad-mode",
&imx8_phy->refclk_pad_mode);
if (of_property_read_u32(np, "fsl,tx-deemph-gen1",
&imx8_phy->tx_deemph_gen1))
imx8_phy->tx_deemph_gen1 = 0;
if (of_property_read_u32(np, "fsl,tx-deemph-gen2",
&imx8_phy->tx_deemph_gen2))
imx8_phy->tx_deemph_gen2 = 0;
if (of_property_read_bool(np, "fsl,clkreq-unsupported"))
imx8_phy->clkreq_unused = true;
else
imx8_phy->clkreq_unused = false;
imx8_phy->clk = devm_clk_get(dev, "ref");
if (IS_ERR(imx8_phy->clk)) {
dev_err(dev, "failed to get imx pcie phy clock\n");
return PTR_ERR(imx8_phy->clk);
}
/* Grab GPR config register range */
imx8_phy->iomuxc_gpr =
syscon_regmap_lookup_by_compatible(imx8_phy->drvdata->gpr);
if (IS_ERR(imx8_phy->iomuxc_gpr)) {
dev_err(dev, "unable to find iomuxc registers\n");
return PTR_ERR(imx8_phy->iomuxc_gpr);
}
imx8_phy->reset = devm_reset_control_get_exclusive(dev, "pciephy");
if (IS_ERR(imx8_phy->reset)) {
dev_err(dev, "Failed to get PCIEPHY reset control\n");
return PTR_ERR(imx8_phy->reset);
}
if (imx8_phy->drvdata->variant == IMX8MP) {
imx8_phy->perst =
devm_reset_control_get_exclusive(dev, "perst");
if (IS_ERR(imx8_phy->perst))
return dev_err_probe(dev, PTR_ERR(imx8_phy->perst),
"Failed to get PCIE PHY PERST control\n");
}
imx8_phy->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(imx8_phy->base))
return PTR_ERR(imx8_phy->base);
imx8_phy->phy = devm_phy_create(dev, NULL, &imx8_pcie_phy_ops);
if (IS_ERR(imx8_phy->phy))
return PTR_ERR(imx8_phy->phy);
phy_set_drvdata(imx8_phy->phy, imx8_phy);
phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
return PTR_ERR_OR_ZERO(phy_provider);
}
static struct platform_driver imx8_pcie_phy_driver = {
.probe = imx8_pcie_phy_probe,
.driver = {
.name = "imx8-pcie-phy",
.of_match_table = imx8_pcie_phy_of_match,
}
};
module_platform_driver(imx8_pcie_phy_driver);
MODULE_DESCRIPTION("FSL IMX8 PCIE PHY driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/phy/freescale/phy-fsl-imx8m-pcie.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright 2017,2018 NXP
* Copyright 2019 Purism SPC
*/
#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/delay.h>
#include <linux/firmware/imx/ipc.h>
#include <linux/firmware/imx/svc/misc.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <dt-bindings/firmware/imx/rsrc.h>
/* Control and Status Registers(CSR) */
#define PHY_CTRL 0x00
#define CCM_MASK GENMASK(7, 5)
#define CCM(n) FIELD_PREP(CCM_MASK, (n))
#define CCM_1_2V 0x5
#define CA_MASK GENMASK(4, 2)
#define CA_3_51MA 0x4
#define CA(n) FIELD_PREP(CA_MASK, (n))
#define RFB BIT(1)
#define LVDS_EN BIT(0)
/* DPHY registers */
#define DPHY_PD_DPHY 0x00
#define DPHY_M_PRG_HS_PREPARE 0x04
#define DPHY_MC_PRG_HS_PREPARE 0x08
#define DPHY_M_PRG_HS_ZERO 0x0c
#define DPHY_MC_PRG_HS_ZERO 0x10
#define DPHY_M_PRG_HS_TRAIL 0x14
#define DPHY_MC_PRG_HS_TRAIL 0x18
#define DPHY_PD_PLL 0x1c
#define DPHY_TST 0x20
#define DPHY_CN 0x24
#define DPHY_CM 0x28
#define DPHY_CO 0x2c
#define DPHY_LOCK 0x30
#define DPHY_LOCK_BYP 0x34
#define DPHY_REG_BYPASS_PLL 0x4C
#define MBPS(x) ((x) * 1000000)
#define DATA_RATE_MAX_SPEED MBPS(1500)
#define DATA_RATE_MIN_SPEED MBPS(80)
#define PLL_LOCK_SLEEP 10
#define PLL_LOCK_TIMEOUT 1000
#define CN_BUF 0xcb7a89c0
#define CO_BUF 0x63
#define CM(x) ( \
((x) < 32) ? 0xe0 | ((x) - 16) : \
((x) < 64) ? 0xc0 | ((x) - 32) : \
((x) < 128) ? 0x80 | ((x) - 64) : \
((x) - 128))
#define CN(x) (((x) == 1) ? 0x1f : (((CN_BUF) >> ((x) - 1)) & 0x1f))
#define CO(x) ((CO_BUF) >> (8 - (x)) & 0x03)
/* PHY power on is active low */
#define PWR_ON 0
#define PWR_OFF 1
#define MIN_VCO_FREQ 640000000
#define MAX_VCO_FREQ 1500000000
#define MIN_LVDS_REFCLK_FREQ 24000000
#define MAX_LVDS_REFCLK_FREQ 150000000
enum mixel_dphy_devtype {
MIXEL_IMX8MQ,
MIXEL_IMX8QXP,
};
struct mixel_dphy_devdata {
u8 reg_tx_rcal;
u8 reg_auto_pd_en;
u8 reg_rxlprp;
u8 reg_rxcdrp;
u8 reg_rxhs_settle;
bool is_combo; /* MIPI DPHY and LVDS PHY combo */
};
static const struct mixel_dphy_devdata mixel_dphy_devdata[] = {
[MIXEL_IMX8MQ] = {
.reg_tx_rcal = 0x38,
.reg_auto_pd_en = 0x3c,
.reg_rxlprp = 0x40,
.reg_rxcdrp = 0x44,
.reg_rxhs_settle = 0x48,
.is_combo = false,
},
[MIXEL_IMX8QXP] = {
.is_combo = true,
},
};
struct mixel_dphy_cfg {
/* DPHY PLL parameters */
u32 cm;
u32 cn;
u32 co;
/* DPHY register values */
u8 mc_prg_hs_prepare;
u8 m_prg_hs_prepare;
u8 mc_prg_hs_zero;
u8 m_prg_hs_zero;
u8 mc_prg_hs_trail;
u8 m_prg_hs_trail;
u8 rxhs_settle;
};
struct mixel_dphy_priv {
struct mixel_dphy_cfg cfg;
struct regmap *regmap;
struct regmap *lvds_regmap;
struct clk *phy_ref_clk;
const struct mixel_dphy_devdata *devdata;
struct imx_sc_ipc *ipc_handle;
bool is_slave;
int id;
};
static const struct regmap_config mixel_dphy_regmap_config = {
.reg_bits = 8,
.val_bits = 32,
.reg_stride = 4,
.max_register = DPHY_REG_BYPASS_PLL,
.name = "mipi-dphy",
};
static int phy_write(struct phy *phy, u32 value, unsigned int reg)
{
struct mixel_dphy_priv *priv = phy_get_drvdata(phy);
int ret;
ret = regmap_write(priv->regmap, reg, value);
if (ret < 0)
dev_err(&phy->dev, "Failed to write DPHY reg %d: %d\n", reg,
ret);
return ret;
}
/*
* Find a ratio close to the desired one using continued fraction
* approximation ending either at exact match or maximum allowed
* nominator, denominator.
*/
static void get_best_ratio(u32 *pnum, u32 *pdenom, u32 max_n, u32 max_d)
{
u32 a = *pnum;
u32 b = *pdenom;
u32 c;
u32 n[] = {0, 1};
u32 d[] = {1, 0};
u32 whole;
unsigned int i = 1;
while (b) {
i ^= 1;
whole = a / b;
n[i] += (n[i ^ 1] * whole);
d[i] += (d[i ^ 1] * whole);
if ((n[i] > max_n) || (d[i] > max_d)) {
i ^= 1;
break;
}
c = a - (b * whole);
a = b;
b = c;
}
*pnum = n[i];
*pdenom = d[i];
}
static int mixel_dphy_config_from_opts(struct phy *phy,
struct phy_configure_opts_mipi_dphy *dphy_opts,
struct mixel_dphy_cfg *cfg)
{
struct mixel_dphy_priv *priv = dev_get_drvdata(phy->dev.parent);
unsigned long ref_clk = clk_get_rate(priv->phy_ref_clk);
u32 lp_t, numerator, denominator;
unsigned long long tmp;
u32 n;
int i;
if (dphy_opts->hs_clk_rate > DATA_RATE_MAX_SPEED ||
dphy_opts->hs_clk_rate < DATA_RATE_MIN_SPEED)
return -EINVAL;
numerator = dphy_opts->hs_clk_rate;
denominator = ref_clk;
get_best_ratio(&numerator, &denominator, 255, 256);
if (!numerator || !denominator) {
dev_err(&phy->dev, "Invalid %d/%d for %ld/%ld\n",
numerator, denominator,
dphy_opts->hs_clk_rate, ref_clk);
return -EINVAL;
}
while ((numerator < 16) && (denominator <= 128)) {
numerator <<= 1;
denominator <<= 1;
}
/*
* CM ranges between 16 and 255
* CN ranges between 1 and 32
* CO is power of 2: 1, 2, 4, 8
*/
i = __ffs(denominator);
if (i > 3)
i = 3;
cfg->cn = denominator >> i;
cfg->co = 1 << i;
cfg->cm = numerator;
if (cfg->cm < 16 || cfg->cm > 255 ||
cfg->cn < 1 || cfg->cn > 32 ||
cfg->co < 1 || cfg->co > 8) {
dev_err(&phy->dev, "Invalid CM/CN/CO values: %u/%u/%u\n",
cfg->cm, cfg->cn, cfg->co);
dev_err(&phy->dev, "for hs_clk/ref_clk=%ld/%ld ~ %d/%d\n",
dphy_opts->hs_clk_rate, ref_clk,
numerator, denominator);
return -EINVAL;
}
dev_dbg(&phy->dev, "hs_clk/ref_clk=%ld/%ld ~ %d/%d\n",
dphy_opts->hs_clk_rate, ref_clk, numerator, denominator);
/* LP clock period */
tmp = 1000000000000LL;
do_div(tmp, dphy_opts->lp_clk_rate); /* ps */
if (tmp > ULONG_MAX)
return -EINVAL;
lp_t = tmp;
dev_dbg(&phy->dev, "LP clock %lu, period: %u ps\n",
dphy_opts->lp_clk_rate, lp_t);
/* hs_prepare: in lp clock periods */
if (2 * dphy_opts->hs_prepare > 5 * lp_t) {
dev_err(&phy->dev,
"hs_prepare (%u) > 2.5 * lp clock period (%u)\n",
dphy_opts->hs_prepare, lp_t);
return -EINVAL;
}
/* 00: lp_t, 01: 1.5 * lp_t, 10: 2 * lp_t, 11: 2.5 * lp_t */
if (dphy_opts->hs_prepare < lp_t) {
n = 0;
} else {
tmp = 2 * (dphy_opts->hs_prepare - lp_t);
do_div(tmp, lp_t);
n = tmp;
}
cfg->m_prg_hs_prepare = n;
/* clk_prepare: in lp clock periods */
if (2 * dphy_opts->clk_prepare > 3 * lp_t) {
dev_err(&phy->dev,
"clk_prepare (%u) > 1.5 * lp clock period (%u)\n",
dphy_opts->clk_prepare, lp_t);
return -EINVAL;
}
/* 00: lp_t, 01: 1.5 * lp_t */
cfg->mc_prg_hs_prepare = dphy_opts->clk_prepare > lp_t ? 1 : 0;
/* hs_zero: formula from NXP BSP */
n = (144 * (dphy_opts->hs_clk_rate / 1000000) - 47500) / 10000;
cfg->m_prg_hs_zero = n < 1 ? 1 : n;
/* clk_zero: formula from NXP BSP */
n = (34 * (dphy_opts->hs_clk_rate / 1000000) - 2500) / 1000;
cfg->mc_prg_hs_zero = n < 1 ? 1 : n;
/* clk_trail, hs_trail: formula from NXP BSP */
n = (103 * (dphy_opts->hs_clk_rate / 1000000) + 10000) / 10000;
if (n > 15)
n = 15;
if (n < 1)
n = 1;
cfg->m_prg_hs_trail = n;
cfg->mc_prg_hs_trail = n;
/* rxhs_settle: formula from NXP BSP */
if (dphy_opts->hs_clk_rate < MBPS(80))
cfg->rxhs_settle = 0x0d;
else if (dphy_opts->hs_clk_rate < MBPS(90))
cfg->rxhs_settle = 0x0c;
else if (dphy_opts->hs_clk_rate < MBPS(125))
cfg->rxhs_settle = 0x0b;
else if (dphy_opts->hs_clk_rate < MBPS(150))
cfg->rxhs_settle = 0x0a;
else if (dphy_opts->hs_clk_rate < MBPS(225))
cfg->rxhs_settle = 0x09;
else if (dphy_opts->hs_clk_rate < MBPS(500))
cfg->rxhs_settle = 0x08;
else
cfg->rxhs_settle = 0x07;
dev_dbg(&phy->dev, "phy_config: %u %u %u %u %u %u %u\n",
cfg->m_prg_hs_prepare, cfg->mc_prg_hs_prepare,
cfg->m_prg_hs_zero, cfg->mc_prg_hs_zero,
cfg->m_prg_hs_trail, cfg->mc_prg_hs_trail,
cfg->rxhs_settle);
return 0;
}
static void mixel_phy_set_hs_timings(struct phy *phy)
{
struct mixel_dphy_priv *priv = phy_get_drvdata(phy);
phy_write(phy, priv->cfg.m_prg_hs_prepare, DPHY_M_PRG_HS_PREPARE);
phy_write(phy, priv->cfg.mc_prg_hs_prepare, DPHY_MC_PRG_HS_PREPARE);
phy_write(phy, priv->cfg.m_prg_hs_zero, DPHY_M_PRG_HS_ZERO);
phy_write(phy, priv->cfg.mc_prg_hs_zero, DPHY_MC_PRG_HS_ZERO);
phy_write(phy, priv->cfg.m_prg_hs_trail, DPHY_M_PRG_HS_TRAIL);
phy_write(phy, priv->cfg.mc_prg_hs_trail, DPHY_MC_PRG_HS_TRAIL);
phy_write(phy, priv->cfg.rxhs_settle, priv->devdata->reg_rxhs_settle);
}
static int mixel_dphy_set_pll_params(struct phy *phy)
{
struct mixel_dphy_priv *priv = dev_get_drvdata(phy->dev.parent);
if (priv->cfg.cm < 16 || priv->cfg.cm > 255 ||
priv->cfg.cn < 1 || priv->cfg.cn > 32 ||
priv->cfg.co < 1 || priv->cfg.co > 8) {
dev_err(&phy->dev, "Invalid CM/CN/CO values! (%u/%u/%u)\n",
priv->cfg.cm, priv->cfg.cn, priv->cfg.co);
return -EINVAL;
}
dev_dbg(&phy->dev, "Using CM:%u CN:%u CO:%u\n",
priv->cfg.cm, priv->cfg.cn, priv->cfg.co);
phy_write(phy, CM(priv->cfg.cm), DPHY_CM);
phy_write(phy, CN(priv->cfg.cn), DPHY_CN);
phy_write(phy, CO(priv->cfg.co), DPHY_CO);
return 0;
}
static int
mixel_dphy_configure_mipi_dphy(struct phy *phy, union phy_configure_opts *opts)
{
struct mixel_dphy_priv *priv = phy_get_drvdata(phy);
struct mixel_dphy_cfg cfg = { 0 };
int ret;
ret = mixel_dphy_config_from_opts(phy, &opts->mipi_dphy, &cfg);
if (ret)
return ret;
/* Update the configuration */
memcpy(&priv->cfg, &cfg, sizeof(struct mixel_dphy_cfg));
phy_write(phy, 0x00, DPHY_LOCK_BYP);
phy_write(phy, 0x01, priv->devdata->reg_tx_rcal);
phy_write(phy, 0x00, priv->devdata->reg_auto_pd_en);
phy_write(phy, 0x02, priv->devdata->reg_rxlprp);
phy_write(phy, 0x02, priv->devdata->reg_rxcdrp);
phy_write(phy, 0x25, DPHY_TST);
mixel_phy_set_hs_timings(phy);
ret = mixel_dphy_set_pll_params(phy);
if (ret < 0)
return ret;
return 0;
}
static int
mixel_dphy_configure_lvds_phy(struct phy *phy, union phy_configure_opts *opts)
{
struct mixel_dphy_priv *priv = phy_get_drvdata(phy);
struct phy_configure_opts_lvds *lvds_opts = &opts->lvds;
unsigned long data_rate;
unsigned long fvco;
u32 rsc;
u32 co;
int ret;
priv->is_slave = lvds_opts->is_slave;
/* LVDS interface pins */
regmap_write(priv->lvds_regmap, PHY_CTRL,
CCM(CCM_1_2V) | CA(CA_3_51MA) | RFB);
/* enable MODE8 only for slave LVDS PHY */
rsc = priv->id ? IMX_SC_R_MIPI_1 : IMX_SC_R_MIPI_0;
ret = imx_sc_misc_set_control(priv->ipc_handle, rsc, IMX_SC_C_DUAL_MODE,
lvds_opts->is_slave);
if (ret) {
dev_err(&phy->dev, "Failed to configure MODE8: %d\n", ret);
return ret;
}
/*
* Choose an appropriate divider ratio to meet the requirement of
* PLL VCO frequency range.
*
* ----- 640MHz ~ 1500MHz ------------ ---------------
* | VCO | ----------------> | CO divider | -> | LVDS data rate|
* ----- FVCO ------------ ---------------
* 1/2/4/8 div 7 * differential_clk_rate
*/
data_rate = 7 * lvds_opts->differential_clk_rate;
for (co = 1; co <= 8; co *= 2) {
fvco = data_rate * co;
if (fvco >= MIN_VCO_FREQ)
break;
}
if (fvco < MIN_VCO_FREQ || fvco > MAX_VCO_FREQ) {
dev_err(&phy->dev, "VCO frequency %lu is out of range\n", fvco);
return -ERANGE;
}
/*
* CO is configurable, while CN and CM are not,
* as fixed ratios 1 and 7 are applied respectively.
*/
phy_write(phy, __ffs(co), DPHY_CO);
/* set reference clock rate */
clk_set_rate(priv->phy_ref_clk, lvds_opts->differential_clk_rate);
return ret;
}
static int mixel_dphy_configure(struct phy *phy, union phy_configure_opts *opts)
{
if (!opts) {
dev_err(&phy->dev, "No configuration options\n");
return -EINVAL;
}
if (phy->attrs.mode == PHY_MODE_MIPI_DPHY)
return mixel_dphy_configure_mipi_dphy(phy, opts);
else if (phy->attrs.mode == PHY_MODE_LVDS)
return mixel_dphy_configure_lvds_phy(phy, opts);
dev_err(&phy->dev,
"Failed to configure PHY with invalid PHY mode: %d\n", phy->attrs.mode);
return -EINVAL;
}
static int
mixel_dphy_validate_lvds_phy(struct phy *phy, union phy_configure_opts *opts)
{
struct phy_configure_opts_lvds *lvds_cfg = &opts->lvds;
if (lvds_cfg->bits_per_lane_and_dclk_cycle != 7) {
dev_err(&phy->dev, "Invalid bits per LVDS data lane: %u\n",
lvds_cfg->bits_per_lane_and_dclk_cycle);
return -EINVAL;
}
if (lvds_cfg->lanes != 4) {
dev_err(&phy->dev, "Invalid LVDS data lanes: %u\n", lvds_cfg->lanes);
return -EINVAL;
}
if (lvds_cfg->differential_clk_rate < MIN_LVDS_REFCLK_FREQ ||
lvds_cfg->differential_clk_rate > MAX_LVDS_REFCLK_FREQ) {
dev_err(&phy->dev,
"Invalid LVDS differential clock rate: %lu\n",
lvds_cfg->differential_clk_rate);
return -EINVAL;
}
return 0;
}
static int mixel_dphy_validate(struct phy *phy, enum phy_mode mode, int submode,
union phy_configure_opts *opts)
{
if (mode == PHY_MODE_MIPI_DPHY) {
struct mixel_dphy_cfg mipi_dphy_cfg = { 0 };
return mixel_dphy_config_from_opts(phy, &opts->mipi_dphy,
&mipi_dphy_cfg);
} else if (mode == PHY_MODE_LVDS) {
return mixel_dphy_validate_lvds_phy(phy, opts);
}
dev_err(&phy->dev,
"Failed to validate PHY with invalid PHY mode: %d\n", mode);
return -EINVAL;
}
static int mixel_dphy_init(struct phy *phy)
{
phy_write(phy, PWR_OFF, DPHY_PD_PLL);
phy_write(phy, PWR_OFF, DPHY_PD_DPHY);
return 0;
}
static int mixel_dphy_exit(struct phy *phy)
{
phy_write(phy, 0, DPHY_CM);
phy_write(phy, 0, DPHY_CN);
phy_write(phy, 0, DPHY_CO);
return 0;
}
static int mixel_dphy_power_on_mipi_dphy(struct phy *phy)
{
struct mixel_dphy_priv *priv = phy_get_drvdata(phy);
u32 locked;
int ret;
phy_write(phy, PWR_ON, DPHY_PD_PLL);
ret = regmap_read_poll_timeout(priv->regmap, DPHY_LOCK, locked,
locked, PLL_LOCK_SLEEP,
PLL_LOCK_TIMEOUT);
if (ret < 0) {
dev_err(&phy->dev, "Could not get DPHY lock (%d)!\n", ret);
return ret;
}
phy_write(phy, PWR_ON, DPHY_PD_DPHY);
return 0;
}
static int mixel_dphy_power_on_lvds_phy(struct phy *phy)
{
struct mixel_dphy_priv *priv = phy_get_drvdata(phy);
u32 locked;
int ret;
regmap_update_bits(priv->lvds_regmap, PHY_CTRL, LVDS_EN, LVDS_EN);
phy_write(phy, PWR_ON, DPHY_PD_DPHY);
phy_write(phy, PWR_ON, DPHY_PD_PLL);
/* do not wait for slave LVDS PHY being locked */
if (priv->is_slave)
return 0;
ret = regmap_read_poll_timeout(priv->regmap, DPHY_LOCK, locked,
locked, PLL_LOCK_SLEEP,
PLL_LOCK_TIMEOUT);
if (ret < 0) {
dev_err(&phy->dev, "Could not get LVDS PHY lock (%d)!\n", ret);
return ret;
}
return 0;
}
static int mixel_dphy_power_on(struct phy *phy)
{
struct mixel_dphy_priv *priv = phy_get_drvdata(phy);
int ret;
ret = clk_prepare_enable(priv->phy_ref_clk);
if (ret < 0)
return ret;
if (phy->attrs.mode == PHY_MODE_MIPI_DPHY) {
ret = mixel_dphy_power_on_mipi_dphy(phy);
} else if (phy->attrs.mode == PHY_MODE_LVDS) {
ret = mixel_dphy_power_on_lvds_phy(phy);
} else {
dev_err(&phy->dev,
"Failed to power on PHY with invalid PHY mode: %d\n",
phy->attrs.mode);
ret = -EINVAL;
}
if (ret)
goto clock_disable;
return 0;
clock_disable:
clk_disable_unprepare(priv->phy_ref_clk);
return ret;
}
static int mixel_dphy_power_off(struct phy *phy)
{
struct mixel_dphy_priv *priv = phy_get_drvdata(phy);
phy_write(phy, PWR_OFF, DPHY_PD_PLL);
phy_write(phy, PWR_OFF, DPHY_PD_DPHY);
if (phy->attrs.mode == PHY_MODE_LVDS)
regmap_update_bits(priv->lvds_regmap, PHY_CTRL, LVDS_EN, 0);
clk_disable_unprepare(priv->phy_ref_clk);
return 0;
}
static int mixel_dphy_set_mode(struct phy *phy, enum phy_mode mode, int submode)
{
struct mixel_dphy_priv *priv = phy_get_drvdata(phy);
int ret;
if (priv->devdata->is_combo && mode != PHY_MODE_LVDS) {
dev_err(&phy->dev, "Failed to set PHY mode for combo PHY\n");
return -EINVAL;
}
if (!priv->devdata->is_combo && mode != PHY_MODE_MIPI_DPHY) {
dev_err(&phy->dev, "Failed to set PHY mode to MIPI DPHY\n");
return -EINVAL;
}
if (priv->devdata->is_combo) {
u32 rsc = priv->id ? IMX_SC_R_MIPI_1 : IMX_SC_R_MIPI_0;
ret = imx_sc_misc_set_control(priv->ipc_handle,
rsc, IMX_SC_C_MODE,
mode == PHY_MODE_LVDS);
if (ret) {
dev_err(&phy->dev,
"Failed to set PHY mode via SCU ipc: %d\n", ret);
return ret;
}
}
return 0;
}
static const struct phy_ops mixel_dphy_phy_ops = {
.init = mixel_dphy_init,
.exit = mixel_dphy_exit,
.power_on = mixel_dphy_power_on,
.power_off = mixel_dphy_power_off,
.set_mode = mixel_dphy_set_mode,
.configure = mixel_dphy_configure,
.validate = mixel_dphy_validate,
.owner = THIS_MODULE,
};
static const struct of_device_id mixel_dphy_of_match[] = {
{ .compatible = "fsl,imx8mq-mipi-dphy",
.data = &mixel_dphy_devdata[MIXEL_IMX8MQ] },
{ .compatible = "fsl,imx8qxp-mipi-dphy",
.data = &mixel_dphy_devdata[MIXEL_IMX8QXP] },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, mixel_dphy_of_match);
static int mixel_dphy_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct phy_provider *phy_provider;
struct mixel_dphy_priv *priv;
struct phy *phy;
void __iomem *base;
int ret;
if (!np)
return -ENODEV;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->devdata = of_device_get_match_data(&pdev->dev);
if (!priv->devdata)
return -EINVAL;
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
priv->regmap = devm_regmap_init_mmio(&pdev->dev, base,
&mixel_dphy_regmap_config);
if (IS_ERR(priv->regmap)) {
dev_err(dev, "Couldn't create the DPHY regmap\n");
return PTR_ERR(priv->regmap);
}
priv->phy_ref_clk = devm_clk_get(&pdev->dev, "phy_ref");
if (IS_ERR(priv->phy_ref_clk)) {
dev_err(dev, "No phy_ref clock found\n");
return PTR_ERR(priv->phy_ref_clk);
}
dev_dbg(dev, "phy_ref clock rate: %lu\n",
clk_get_rate(priv->phy_ref_clk));
if (priv->devdata->is_combo) {
priv->lvds_regmap =
syscon_regmap_lookup_by_phandle(np, "fsl,syscon");
if (IS_ERR(priv->lvds_regmap)) {
ret = PTR_ERR(priv->lvds_regmap);
dev_err_probe(dev, ret, "Failed to get LVDS regmap\n");
return ret;
}
priv->id = of_alias_get_id(np, "mipi_dphy");
if (priv->id < 0) {
dev_err(dev, "Failed to get phy node alias id: %d\n",
priv->id);
return priv->id;
}
ret = imx_scu_get_handle(&priv->ipc_handle);
if (ret) {
dev_err_probe(dev, ret,
"Failed to get SCU ipc handle\n");
return ret;
}
}
dev_set_drvdata(dev, priv);
phy = devm_phy_create(dev, np, &mixel_dphy_phy_ops);
if (IS_ERR(phy)) {
dev_err(dev, "Failed to create phy %ld\n", PTR_ERR(phy));
return PTR_ERR(phy);
}
phy_set_drvdata(phy, priv);
phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
return PTR_ERR_OR_ZERO(phy_provider);
}
static struct platform_driver mixel_dphy_driver = {
.probe = mixel_dphy_probe,
.driver = {
.name = "mixel-mipi-dphy",
.of_match_table = mixel_dphy_of_match,
}
};
module_platform_driver(mixel_dphy_driver);
MODULE_AUTHOR("NXP Semiconductor");
MODULE_DESCRIPTION("Mixel MIPI-DSI PHY driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/phy/freescale/phy-fsl-imx8-mipi-dphy.c |
// SPDX-License-Identifier: GPL-2.0+
/* Copyright (c) 2021-2022 NXP. */
#include <linux/module.h>
#include <linux/of.h>
#include <linux/phy.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/workqueue.h>
#define LYNX_28G_NUM_LANE 8
#define LYNX_28G_NUM_PLL 2
/* General registers per SerDes block */
#define LYNX_28G_PCC8 0x10a0
#define LYNX_28G_PCC8_SGMII 0x1
#define LYNX_28G_PCC8_SGMII_DIS 0x0
#define LYNX_28G_PCCC 0x10b0
#define LYNX_28G_PCCC_10GBASER 0x9
#define LYNX_28G_PCCC_USXGMII 0x1
#define LYNX_28G_PCCC_SXGMII_DIS 0x0
#define LYNX_28G_LNa_PCC_OFFSET(lane) (4 * (LYNX_28G_NUM_LANE - (lane->id) - 1))
/* Per PLL registers */
#define LYNX_28G_PLLnRSTCTL(pll) (0x400 + (pll) * 0x100 + 0x0)
#define LYNX_28G_PLLnRSTCTL_DIS(rstctl) (((rstctl) & BIT(24)) >> 24)
#define LYNX_28G_PLLnRSTCTL_LOCK(rstctl) (((rstctl) & BIT(23)) >> 23)
#define LYNX_28G_PLLnCR0(pll) (0x400 + (pll) * 0x100 + 0x4)
#define LYNX_28G_PLLnCR0_REFCLK_SEL(cr0) (((cr0) & GENMASK(20, 16)))
#define LYNX_28G_PLLnCR0_REFCLK_SEL_100MHZ 0x0
#define LYNX_28G_PLLnCR0_REFCLK_SEL_125MHZ 0x10000
#define LYNX_28G_PLLnCR0_REFCLK_SEL_156MHZ 0x20000
#define LYNX_28G_PLLnCR0_REFCLK_SEL_150MHZ 0x30000
#define LYNX_28G_PLLnCR0_REFCLK_SEL_161MHZ 0x40000
#define LYNX_28G_PLLnCR1(pll) (0x400 + (pll) * 0x100 + 0x8)
#define LYNX_28G_PLLnCR1_FRATE_SEL(cr1) (((cr1) & GENMASK(28, 24)))
#define LYNX_28G_PLLnCR1_FRATE_5G_10GVCO 0x0
#define LYNX_28G_PLLnCR1_FRATE_5G_25GVCO 0x10000000
#define LYNX_28G_PLLnCR1_FRATE_10G_20GVCO 0x6000000
/* Per SerDes lane registers */
/* Lane a General Control Register */
#define LYNX_28G_LNaGCR0(lane) (0x800 + (lane) * 0x100 + 0x0)
#define LYNX_28G_LNaGCR0_PROTO_SEL_MSK GENMASK(7, 3)
#define LYNX_28G_LNaGCR0_PROTO_SEL_SGMII 0x8
#define LYNX_28G_LNaGCR0_PROTO_SEL_XFI 0x50
#define LYNX_28G_LNaGCR0_IF_WIDTH_MSK GENMASK(2, 0)
#define LYNX_28G_LNaGCR0_IF_WIDTH_10_BIT 0x0
#define LYNX_28G_LNaGCR0_IF_WIDTH_20_BIT 0x2
/* Lane a Tx Reset Control Register */
#define LYNX_28G_LNaTRSTCTL(lane) (0x800 + (lane) * 0x100 + 0x20)
#define LYNX_28G_LNaTRSTCTL_HLT_REQ BIT(27)
#define LYNX_28G_LNaTRSTCTL_RST_DONE BIT(30)
#define LYNX_28G_LNaTRSTCTL_RST_REQ BIT(31)
/* Lane a Tx General Control Register */
#define LYNX_28G_LNaTGCR0(lane) (0x800 + (lane) * 0x100 + 0x24)
#define LYNX_28G_LNaTGCR0_USE_PLLF 0x0
#define LYNX_28G_LNaTGCR0_USE_PLLS BIT(28)
#define LYNX_28G_LNaTGCR0_USE_PLL_MSK BIT(28)
#define LYNX_28G_LNaTGCR0_N_RATE_FULL 0x0
#define LYNX_28G_LNaTGCR0_N_RATE_HALF 0x1000000
#define LYNX_28G_LNaTGCR0_N_RATE_QUARTER 0x2000000
#define LYNX_28G_LNaTGCR0_N_RATE_MSK GENMASK(26, 24)
#define LYNX_28G_LNaTECR0(lane) (0x800 + (lane) * 0x100 + 0x30)
/* Lane a Rx Reset Control Register */
#define LYNX_28G_LNaRRSTCTL(lane) (0x800 + (lane) * 0x100 + 0x40)
#define LYNX_28G_LNaRRSTCTL_HLT_REQ BIT(27)
#define LYNX_28G_LNaRRSTCTL_RST_DONE BIT(30)
#define LYNX_28G_LNaRRSTCTL_RST_REQ BIT(31)
#define LYNX_28G_LNaRRSTCTL_CDR_LOCK BIT(12)
/* Lane a Rx General Control Register */
#define LYNX_28G_LNaRGCR0(lane) (0x800 + (lane) * 0x100 + 0x44)
#define LYNX_28G_LNaRGCR0_USE_PLLF 0x0
#define LYNX_28G_LNaRGCR0_USE_PLLS BIT(28)
#define LYNX_28G_LNaRGCR0_USE_PLL_MSK BIT(28)
#define LYNX_28G_LNaRGCR0_N_RATE_MSK GENMASK(26, 24)
#define LYNX_28G_LNaRGCR0_N_RATE_FULL 0x0
#define LYNX_28G_LNaRGCR0_N_RATE_HALF 0x1000000
#define LYNX_28G_LNaRGCR0_N_RATE_QUARTER 0x2000000
#define LYNX_28G_LNaRGCR0_N_RATE_MSK GENMASK(26, 24)
#define LYNX_28G_LNaRGCR1(lane) (0x800 + (lane) * 0x100 + 0x48)
#define LYNX_28G_LNaRECR0(lane) (0x800 + (lane) * 0x100 + 0x50)
#define LYNX_28G_LNaRECR1(lane) (0x800 + (lane) * 0x100 + 0x54)
#define LYNX_28G_LNaRECR2(lane) (0x800 + (lane) * 0x100 + 0x58)
#define LYNX_28G_LNaRSCCR0(lane) (0x800 + (lane) * 0x100 + 0x74)
#define LYNX_28G_LNaPSS(lane) (0x1000 + (lane) * 0x4)
#define LYNX_28G_LNaPSS_TYPE(pss) (((pss) & GENMASK(30, 24)) >> 24)
#define LYNX_28G_LNaPSS_TYPE_SGMII 0x4
#define LYNX_28G_LNaPSS_TYPE_XFI 0x28
#define LYNX_28G_SGMIIaCR1(lane) (0x1804 + (lane) * 0x10)
#define LYNX_28G_SGMIIaCR1_SGPCS_EN BIT(11)
#define LYNX_28G_SGMIIaCR1_SGPCS_DIS 0x0
#define LYNX_28G_SGMIIaCR1_SGPCS_MSK BIT(11)
struct lynx_28g_priv;
struct lynx_28g_pll {
struct lynx_28g_priv *priv;
u32 rstctl, cr0, cr1;
int id;
DECLARE_PHY_INTERFACE_MASK(supported);
};
struct lynx_28g_lane {
struct lynx_28g_priv *priv;
struct phy *phy;
bool powered_up;
bool init;
unsigned int id;
phy_interface_t interface;
};
struct lynx_28g_priv {
void __iomem *base;
struct device *dev;
struct lynx_28g_pll pll[LYNX_28G_NUM_PLL];
struct lynx_28g_lane lane[LYNX_28G_NUM_LANE];
struct delayed_work cdr_check;
};
static void lynx_28g_rmw(struct lynx_28g_priv *priv, unsigned long off,
u32 val, u32 mask)
{
void __iomem *reg = priv->base + off;
u32 orig, tmp;
orig = ioread32(reg);
tmp = orig & ~mask;
tmp |= val;
iowrite32(tmp, reg);
}
#define lynx_28g_lane_rmw(lane, reg, val, mask) \
lynx_28g_rmw((lane)->priv, LYNX_28G_##reg(lane->id), \
LYNX_28G_##reg##_##val, LYNX_28G_##reg##_##mask)
#define lynx_28g_lane_read(lane, reg) \
ioread32((lane)->priv->base + LYNX_28G_##reg((lane)->id))
#define lynx_28g_pll_read(pll, reg) \
ioread32((pll)->priv->base + LYNX_28G_##reg((pll)->id))
static bool lynx_28g_supports_interface(struct lynx_28g_priv *priv, int intf)
{
int i;
for (i = 0; i < LYNX_28G_NUM_PLL; i++) {
if (LYNX_28G_PLLnRSTCTL_DIS(priv->pll[i].rstctl))
continue;
if (test_bit(intf, priv->pll[i].supported))
return true;
}
return false;
}
static struct lynx_28g_pll *lynx_28g_pll_get(struct lynx_28g_priv *priv,
phy_interface_t intf)
{
struct lynx_28g_pll *pll;
int i;
for (i = 0; i < LYNX_28G_NUM_PLL; i++) {
pll = &priv->pll[i];
if (LYNX_28G_PLLnRSTCTL_DIS(pll->rstctl))
continue;
if (test_bit(intf, pll->supported))
return pll;
}
return NULL;
}
static void lynx_28g_lane_set_nrate(struct lynx_28g_lane *lane,
struct lynx_28g_pll *pll,
phy_interface_t intf)
{
switch (LYNX_28G_PLLnCR1_FRATE_SEL(pll->cr1)) {
case LYNX_28G_PLLnCR1_FRATE_5G_10GVCO:
case LYNX_28G_PLLnCR1_FRATE_5G_25GVCO:
switch (intf) {
case PHY_INTERFACE_MODE_SGMII:
case PHY_INTERFACE_MODE_1000BASEX:
lynx_28g_lane_rmw(lane, LNaTGCR0, N_RATE_QUARTER, N_RATE_MSK);
lynx_28g_lane_rmw(lane, LNaRGCR0, N_RATE_QUARTER, N_RATE_MSK);
break;
default:
break;
}
break;
case LYNX_28G_PLLnCR1_FRATE_10G_20GVCO:
switch (intf) {
case PHY_INTERFACE_MODE_10GBASER:
case PHY_INTERFACE_MODE_USXGMII:
lynx_28g_lane_rmw(lane, LNaTGCR0, N_RATE_FULL, N_RATE_MSK);
lynx_28g_lane_rmw(lane, LNaRGCR0, N_RATE_FULL, N_RATE_MSK);
break;
default:
break;
}
break;
default:
break;
}
}
static void lynx_28g_lane_set_pll(struct lynx_28g_lane *lane,
struct lynx_28g_pll *pll)
{
if (pll->id == 0) {
lynx_28g_lane_rmw(lane, LNaTGCR0, USE_PLLF, USE_PLL_MSK);
lynx_28g_lane_rmw(lane, LNaRGCR0, USE_PLLF, USE_PLL_MSK);
} else {
lynx_28g_lane_rmw(lane, LNaTGCR0, USE_PLLS, USE_PLL_MSK);
lynx_28g_lane_rmw(lane, LNaRGCR0, USE_PLLS, USE_PLL_MSK);
}
}
static void lynx_28g_cleanup_lane(struct lynx_28g_lane *lane)
{
u32 lane_offset = LYNX_28G_LNa_PCC_OFFSET(lane);
struct lynx_28g_priv *priv = lane->priv;
/* Cleanup the protocol configuration registers of the current protocol */
switch (lane->interface) {
case PHY_INTERFACE_MODE_10GBASER:
lynx_28g_rmw(priv, LYNX_28G_PCCC,
LYNX_28G_PCCC_SXGMII_DIS << lane_offset,
GENMASK(3, 0) << lane_offset);
break;
case PHY_INTERFACE_MODE_SGMII:
case PHY_INTERFACE_MODE_1000BASEX:
lynx_28g_rmw(priv, LYNX_28G_PCC8,
LYNX_28G_PCC8_SGMII_DIS << lane_offset,
GENMASK(3, 0) << lane_offset);
break;
default:
break;
}
}
static void lynx_28g_lane_set_sgmii(struct lynx_28g_lane *lane)
{
u32 lane_offset = LYNX_28G_LNa_PCC_OFFSET(lane);
struct lynx_28g_priv *priv = lane->priv;
struct lynx_28g_pll *pll;
lynx_28g_cleanup_lane(lane);
/* Setup the lane to run in SGMII */
lynx_28g_rmw(priv, LYNX_28G_PCC8,
LYNX_28G_PCC8_SGMII << lane_offset,
GENMASK(3, 0) << lane_offset);
/* Setup the protocol select and SerDes parallel interface width */
lynx_28g_lane_rmw(lane, LNaGCR0, PROTO_SEL_SGMII, PROTO_SEL_MSK);
lynx_28g_lane_rmw(lane, LNaGCR0, IF_WIDTH_10_BIT, IF_WIDTH_MSK);
/* Switch to the PLL that works with this interface type */
pll = lynx_28g_pll_get(priv, PHY_INTERFACE_MODE_SGMII);
lynx_28g_lane_set_pll(lane, pll);
/* Choose the portion of clock net to be used on this lane */
lynx_28g_lane_set_nrate(lane, pll, PHY_INTERFACE_MODE_SGMII);
/* Enable the SGMII PCS */
lynx_28g_lane_rmw(lane, SGMIIaCR1, SGPCS_EN, SGPCS_MSK);
/* Configure the appropriate equalization parameters for the protocol */
iowrite32(0x00808006, priv->base + LYNX_28G_LNaTECR0(lane->id));
iowrite32(0x04310000, priv->base + LYNX_28G_LNaRGCR1(lane->id));
iowrite32(0x9f800000, priv->base + LYNX_28G_LNaRECR0(lane->id));
iowrite32(0x001f0000, priv->base + LYNX_28G_LNaRECR1(lane->id));
iowrite32(0x00000000, priv->base + LYNX_28G_LNaRECR2(lane->id));
iowrite32(0x00000000, priv->base + LYNX_28G_LNaRSCCR0(lane->id));
}
static void lynx_28g_lane_set_10gbaser(struct lynx_28g_lane *lane)
{
u32 lane_offset = LYNX_28G_LNa_PCC_OFFSET(lane);
struct lynx_28g_priv *priv = lane->priv;
struct lynx_28g_pll *pll;
lynx_28g_cleanup_lane(lane);
/* Enable the SXGMII lane */
lynx_28g_rmw(priv, LYNX_28G_PCCC,
LYNX_28G_PCCC_10GBASER << lane_offset,
GENMASK(3, 0) << lane_offset);
/* Setup the protocol select and SerDes parallel interface width */
lynx_28g_lane_rmw(lane, LNaGCR0, PROTO_SEL_XFI, PROTO_SEL_MSK);
lynx_28g_lane_rmw(lane, LNaGCR0, IF_WIDTH_20_BIT, IF_WIDTH_MSK);
/* Switch to the PLL that works with this interface type */
pll = lynx_28g_pll_get(priv, PHY_INTERFACE_MODE_10GBASER);
lynx_28g_lane_set_pll(lane, pll);
/* Choose the portion of clock net to be used on this lane */
lynx_28g_lane_set_nrate(lane, pll, PHY_INTERFACE_MODE_10GBASER);
/* Disable the SGMII PCS */
lynx_28g_lane_rmw(lane, SGMIIaCR1, SGPCS_DIS, SGPCS_MSK);
/* Configure the appropriate equalization parameters for the protocol */
iowrite32(0x10808307, priv->base + LYNX_28G_LNaTECR0(lane->id));
iowrite32(0x10000000, priv->base + LYNX_28G_LNaRGCR1(lane->id));
iowrite32(0x00000000, priv->base + LYNX_28G_LNaRECR0(lane->id));
iowrite32(0x001f0000, priv->base + LYNX_28G_LNaRECR1(lane->id));
iowrite32(0x81000020, priv->base + LYNX_28G_LNaRECR2(lane->id));
iowrite32(0x00002000, priv->base + LYNX_28G_LNaRSCCR0(lane->id));
}
static int lynx_28g_power_off(struct phy *phy)
{
struct lynx_28g_lane *lane = phy_get_drvdata(phy);
u32 trstctl, rrstctl;
if (!lane->powered_up)
return 0;
/* Issue a halt request */
lynx_28g_lane_rmw(lane, LNaTRSTCTL, HLT_REQ, HLT_REQ);
lynx_28g_lane_rmw(lane, LNaRRSTCTL, HLT_REQ, HLT_REQ);
/* Wait until the halting process is complete */
do {
trstctl = lynx_28g_lane_read(lane, LNaTRSTCTL);
rrstctl = lynx_28g_lane_read(lane, LNaRRSTCTL);
} while ((trstctl & LYNX_28G_LNaTRSTCTL_HLT_REQ) ||
(rrstctl & LYNX_28G_LNaRRSTCTL_HLT_REQ));
lane->powered_up = false;
return 0;
}
static int lynx_28g_power_on(struct phy *phy)
{
struct lynx_28g_lane *lane = phy_get_drvdata(phy);
u32 trstctl, rrstctl;
if (lane->powered_up)
return 0;
/* Issue a reset request on the lane */
lynx_28g_lane_rmw(lane, LNaTRSTCTL, RST_REQ, RST_REQ);
lynx_28g_lane_rmw(lane, LNaRRSTCTL, RST_REQ, RST_REQ);
/* Wait until the reset sequence is completed */
do {
trstctl = lynx_28g_lane_read(lane, LNaTRSTCTL);
rrstctl = lynx_28g_lane_read(lane, LNaRRSTCTL);
} while (!(trstctl & LYNX_28G_LNaTRSTCTL_RST_DONE) ||
!(rrstctl & LYNX_28G_LNaRRSTCTL_RST_DONE));
lane->powered_up = true;
return 0;
}
static int lynx_28g_set_mode(struct phy *phy, enum phy_mode mode, int submode)
{
struct lynx_28g_lane *lane = phy_get_drvdata(phy);
struct lynx_28g_priv *priv = lane->priv;
int powered_up = lane->powered_up;
int err = 0;
if (mode != PHY_MODE_ETHERNET)
return -EOPNOTSUPP;
if (lane->interface == PHY_INTERFACE_MODE_NA)
return -EOPNOTSUPP;
if (!lynx_28g_supports_interface(priv, submode))
return -EOPNOTSUPP;
/* If the lane is powered up, put the lane into the halt state while
* the reconfiguration is being done.
*/
if (powered_up)
lynx_28g_power_off(phy);
switch (submode) {
case PHY_INTERFACE_MODE_SGMII:
case PHY_INTERFACE_MODE_1000BASEX:
lynx_28g_lane_set_sgmii(lane);
break;
case PHY_INTERFACE_MODE_10GBASER:
lynx_28g_lane_set_10gbaser(lane);
break;
default:
err = -EOPNOTSUPP;
goto out;
}
lane->interface = submode;
out:
/* Power up the lane if necessary */
if (powered_up)
lynx_28g_power_on(phy);
return err;
}
static int lynx_28g_validate(struct phy *phy, enum phy_mode mode, int submode,
union phy_configure_opts *opts __always_unused)
{
struct lynx_28g_lane *lane = phy_get_drvdata(phy);
struct lynx_28g_priv *priv = lane->priv;
if (mode != PHY_MODE_ETHERNET)
return -EOPNOTSUPP;
if (!lynx_28g_supports_interface(priv, submode))
return -EOPNOTSUPP;
return 0;
}
static int lynx_28g_init(struct phy *phy)
{
struct lynx_28g_lane *lane = phy_get_drvdata(phy);
/* Mark the fact that the lane was init */
lane->init = true;
/* SerDes lanes are powered on at boot time. Any lane that is managed
* by this driver will get powered down at init time aka at dpaa2-eth
* probe time.
*/
lane->powered_up = true;
lynx_28g_power_off(phy);
return 0;
}
static const struct phy_ops lynx_28g_ops = {
.init = lynx_28g_init,
.power_on = lynx_28g_power_on,
.power_off = lynx_28g_power_off,
.set_mode = lynx_28g_set_mode,
.validate = lynx_28g_validate,
.owner = THIS_MODULE,
};
static void lynx_28g_pll_read_configuration(struct lynx_28g_priv *priv)
{
struct lynx_28g_pll *pll;
int i;
for (i = 0; i < LYNX_28G_NUM_PLL; i++) {
pll = &priv->pll[i];
pll->priv = priv;
pll->id = i;
pll->rstctl = lynx_28g_pll_read(pll, PLLnRSTCTL);
pll->cr0 = lynx_28g_pll_read(pll, PLLnCR0);
pll->cr1 = lynx_28g_pll_read(pll, PLLnCR1);
if (LYNX_28G_PLLnRSTCTL_DIS(pll->rstctl))
continue;
switch (LYNX_28G_PLLnCR1_FRATE_SEL(pll->cr1)) {
case LYNX_28G_PLLnCR1_FRATE_5G_10GVCO:
case LYNX_28G_PLLnCR1_FRATE_5G_25GVCO:
/* 5GHz clock net */
__set_bit(PHY_INTERFACE_MODE_1000BASEX, pll->supported);
__set_bit(PHY_INTERFACE_MODE_SGMII, pll->supported);
break;
case LYNX_28G_PLLnCR1_FRATE_10G_20GVCO:
/* 10.3125GHz clock net */
__set_bit(PHY_INTERFACE_MODE_10GBASER, pll->supported);
break;
default:
/* 6GHz, 12.890625GHz, 8GHz */
break;
}
}
}
#define work_to_lynx(w) container_of((w), struct lynx_28g_priv, cdr_check.work)
static void lynx_28g_cdr_lock_check(struct work_struct *work)
{
struct lynx_28g_priv *priv = work_to_lynx(work);
struct lynx_28g_lane *lane;
u32 rrstctl;
int i;
for (i = 0; i < LYNX_28G_NUM_LANE; i++) {
lane = &priv->lane[i];
if (!lane->init)
continue;
if (!lane->powered_up)
continue;
rrstctl = lynx_28g_lane_read(lane, LNaRRSTCTL);
if (!(rrstctl & LYNX_28G_LNaRRSTCTL_CDR_LOCK)) {
lynx_28g_lane_rmw(lane, LNaRRSTCTL, RST_REQ, RST_REQ);
do {
rrstctl = lynx_28g_lane_read(lane, LNaRRSTCTL);
} while (!(rrstctl & LYNX_28G_LNaRRSTCTL_RST_DONE));
}
}
queue_delayed_work(system_power_efficient_wq, &priv->cdr_check,
msecs_to_jiffies(1000));
}
static void lynx_28g_lane_read_configuration(struct lynx_28g_lane *lane)
{
u32 pss, protocol;
pss = lynx_28g_lane_read(lane, LNaPSS);
protocol = LYNX_28G_LNaPSS_TYPE(pss);
switch (protocol) {
case LYNX_28G_LNaPSS_TYPE_SGMII:
lane->interface = PHY_INTERFACE_MODE_SGMII;
break;
case LYNX_28G_LNaPSS_TYPE_XFI:
lane->interface = PHY_INTERFACE_MODE_10GBASER;
break;
default:
lane->interface = PHY_INTERFACE_MODE_NA;
}
}
static struct phy *lynx_28g_xlate(struct device *dev,
struct of_phandle_args *args)
{
struct lynx_28g_priv *priv = dev_get_drvdata(dev);
int idx = args->args[0];
if (WARN_ON(idx >= LYNX_28G_NUM_LANE))
return ERR_PTR(-EINVAL);
return priv->lane[idx].phy;
}
static int lynx_28g_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct phy_provider *provider;
struct lynx_28g_priv *priv;
int i;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->dev = &pdev->dev;
priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
lynx_28g_pll_read_configuration(priv);
for (i = 0; i < LYNX_28G_NUM_LANE; i++) {
struct lynx_28g_lane *lane = &priv->lane[i];
struct phy *phy;
memset(lane, 0, sizeof(*lane));
phy = devm_phy_create(&pdev->dev, NULL, &lynx_28g_ops);
if (IS_ERR(phy))
return PTR_ERR(phy);
lane->priv = priv;
lane->phy = phy;
lane->id = i;
phy_set_drvdata(phy, lane);
lynx_28g_lane_read_configuration(lane);
}
dev_set_drvdata(dev, priv);
INIT_DELAYED_WORK(&priv->cdr_check, lynx_28g_cdr_lock_check);
queue_delayed_work(system_power_efficient_wq, &priv->cdr_check,
msecs_to_jiffies(1000));
dev_set_drvdata(&pdev->dev, priv);
provider = devm_of_phy_provider_register(&pdev->dev, lynx_28g_xlate);
return PTR_ERR_OR_ZERO(provider);
}
static const struct of_device_id lynx_28g_of_match_table[] = {
{ .compatible = "fsl,lynx-28g" },
{ },
};
MODULE_DEVICE_TABLE(of, lynx_28g_of_match_table);
static struct platform_driver lynx_28g_driver = {
.probe = lynx_28g_probe,
.driver = {
.name = "lynx-28g",
.of_match_table = lynx_28g_of_match_table,
},
};
module_platform_driver(lynx_28g_driver);
MODULE_AUTHOR("Ioana Ciornei <[email protected]>");
MODULE_DESCRIPTION("Lynx 28G SerDes PHY driver for Layerscape SoCs");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/phy/freescale/phy-fsl-lynx-28g.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright 2017-2020,2022 NXP
*/
#include <linux/bitfield.h>
#include <linux/bits.h>
#include <linux/clk.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/units.h>
#define REG_SET 0x4
#define REG_CLR 0x8
#define PHY_CTRL 0x0
#define M_MASK GENMASK(18, 17)
#define M(n) FIELD_PREP(M_MASK, (n))
#define CCM_MASK GENMASK(16, 14)
#define CCM(n) FIELD_PREP(CCM_MASK, (n))
#define CA_MASK GENMASK(13, 11)
#define CA(n) FIELD_PREP(CA_MASK, (n))
#define TST_MASK GENMASK(10, 5)
#define TST(n) FIELD_PREP(TST_MASK, (n))
#define CH_EN(id) BIT(3 + (id))
#define NB BIT(2)
#define RFB BIT(1)
#define PD BIT(0)
/* Power On Reset(POR) value */
#define CTRL_RESET_VAL (M(0x0) | CCM(0x4) | CA(0x4) | TST(0x25))
/* PHY initialization value and mask */
#define CTRL_INIT_MASK (M_MASK | CCM_MASK | CA_MASK | TST_MASK | NB | RFB)
#define CTRL_INIT_VAL (M(0x0) | CCM(0x5) | CA(0x4) | TST(0x25) | RFB)
#define PHY_STATUS 0x10
#define LOCK BIT(0)
#define PHY_NUM 2
#define MIN_CLKIN_FREQ (25 * MEGA)
#define MAX_CLKIN_FREQ (165 * MEGA)
#define PLL_LOCK_SLEEP 10
#define PLL_LOCK_TIMEOUT 1000
struct mixel_lvds_phy {
struct phy *phy;
struct phy_configure_opts_lvds cfg;
unsigned int id;
};
struct mixel_lvds_phy_priv {
struct regmap *regmap;
struct mutex lock; /* protect remap access and cfg of our own */
struct clk *phy_ref_clk;
struct mixel_lvds_phy *phys[PHY_NUM];
};
static int mixel_lvds_phy_init(struct phy *phy)
{
struct mixel_lvds_phy_priv *priv = dev_get_drvdata(phy->dev.parent);
mutex_lock(&priv->lock);
regmap_update_bits(priv->regmap,
PHY_CTRL, CTRL_INIT_MASK, CTRL_INIT_VAL);
mutex_unlock(&priv->lock);
return 0;
}
static int mixel_lvds_phy_power_on(struct phy *phy)
{
struct mixel_lvds_phy_priv *priv = dev_get_drvdata(phy->dev.parent);
struct mixel_lvds_phy *lvds_phy = phy_get_drvdata(phy);
struct mixel_lvds_phy *companion = priv->phys[lvds_phy->id ^ 1];
struct phy_configure_opts_lvds *cfg = &lvds_phy->cfg;
u32 val = 0;
u32 locked;
int ret;
/* The master PHY would power on the slave PHY. */
if (cfg->is_slave)
return 0;
ret = clk_prepare_enable(priv->phy_ref_clk);
if (ret < 0) {
dev_err(&phy->dev,
"failed to enable PHY reference clock: %d\n", ret);
return ret;
}
mutex_lock(&priv->lock);
if (cfg->bits_per_lane_and_dclk_cycle == 7) {
if (cfg->differential_clk_rate < 44000000)
val |= M(0x2);
else if (cfg->differential_clk_rate < 90000000)
val |= M(0x1);
else
val |= M(0x0);
} else {
val = NB;
if (cfg->differential_clk_rate < 32000000)
val |= M(0x2);
else if (cfg->differential_clk_rate < 63000000)
val |= M(0x1);
else
val |= M(0x0);
}
regmap_update_bits(priv->regmap, PHY_CTRL, M_MASK | NB, val);
/*
* Enable two channels synchronously,
* if the companion PHY is a slave PHY.
*/
if (companion->cfg.is_slave)
val = CH_EN(0) | CH_EN(1);
else
val = CH_EN(lvds_phy->id);
regmap_write(priv->regmap, PHY_CTRL + REG_SET, val);
ret = regmap_read_poll_timeout(priv->regmap, PHY_STATUS, locked,
locked, PLL_LOCK_SLEEP,
PLL_LOCK_TIMEOUT);
if (ret < 0) {
dev_err(&phy->dev, "failed to get PHY lock: %d\n", ret);
clk_disable_unprepare(priv->phy_ref_clk);
}
mutex_unlock(&priv->lock);
return ret;
}
static int mixel_lvds_phy_power_off(struct phy *phy)
{
struct mixel_lvds_phy_priv *priv = dev_get_drvdata(phy->dev.parent);
struct mixel_lvds_phy *lvds_phy = phy_get_drvdata(phy);
struct mixel_lvds_phy *companion = priv->phys[lvds_phy->id ^ 1];
struct phy_configure_opts_lvds *cfg = &lvds_phy->cfg;
/* The master PHY would power off the slave PHY. */
if (cfg->is_slave)
return 0;
mutex_lock(&priv->lock);
if (companion->cfg.is_slave)
regmap_write(priv->regmap, PHY_CTRL + REG_CLR,
CH_EN(0) | CH_EN(1));
else
regmap_write(priv->regmap, PHY_CTRL + REG_CLR,
CH_EN(lvds_phy->id));
mutex_unlock(&priv->lock);
clk_disable_unprepare(priv->phy_ref_clk);
return 0;
}
static int mixel_lvds_phy_configure(struct phy *phy,
union phy_configure_opts *opts)
{
struct mixel_lvds_phy_priv *priv = dev_get_drvdata(phy->dev.parent);
struct phy_configure_opts_lvds *cfg = &opts->lvds;
int ret;
ret = clk_set_rate(priv->phy_ref_clk, cfg->differential_clk_rate);
if (ret)
dev_err(&phy->dev, "failed to set PHY reference clock rate(%lu): %d\n",
cfg->differential_clk_rate, ret);
return ret;
}
/* Assume the master PHY's configuration set is cached first. */
static int mixel_lvds_phy_check_slave(struct phy *slave_phy)
{
struct device *dev = &slave_phy->dev;
struct mixel_lvds_phy_priv *priv = dev_get_drvdata(dev->parent);
struct mixel_lvds_phy *slv = phy_get_drvdata(slave_phy);
struct mixel_lvds_phy *mst = priv->phys[slv->id ^ 1];
struct phy_configure_opts_lvds *mst_cfg = &mst->cfg;
struct phy_configure_opts_lvds *slv_cfg = &slv->cfg;
if (mst_cfg->bits_per_lane_and_dclk_cycle !=
slv_cfg->bits_per_lane_and_dclk_cycle) {
dev_err(dev, "number bits mismatch(mst: %u vs slv: %u)\n",
mst_cfg->bits_per_lane_and_dclk_cycle,
slv_cfg->bits_per_lane_and_dclk_cycle);
return -EINVAL;
}
if (mst_cfg->differential_clk_rate !=
slv_cfg->differential_clk_rate) {
dev_err(dev, "dclk rate mismatch(mst: %lu vs slv: %lu)\n",
mst_cfg->differential_clk_rate,
slv_cfg->differential_clk_rate);
return -EINVAL;
}
if (mst_cfg->lanes != slv_cfg->lanes) {
dev_err(dev, "lanes mismatch(mst: %u vs slv: %u)\n",
mst_cfg->lanes, slv_cfg->lanes);
return -EINVAL;
}
if (mst_cfg->is_slave == slv_cfg->is_slave) {
dev_err(dev, "master PHY is not found\n");
return -EINVAL;
}
return 0;
}
static int mixel_lvds_phy_validate(struct phy *phy, enum phy_mode mode,
int submode, union phy_configure_opts *opts)
{
struct mixel_lvds_phy_priv *priv = dev_get_drvdata(phy->dev.parent);
struct mixel_lvds_phy *lvds_phy = phy_get_drvdata(phy);
struct phy_configure_opts_lvds *cfg = &opts->lvds;
int ret = 0;
if (mode != PHY_MODE_LVDS) {
dev_err(&phy->dev, "invalid PHY mode(%d)\n", mode);
return -EINVAL;
}
if (cfg->bits_per_lane_and_dclk_cycle != 7 &&
cfg->bits_per_lane_and_dclk_cycle != 10) {
dev_err(&phy->dev, "invalid bits per data lane(%u)\n",
cfg->bits_per_lane_and_dclk_cycle);
return -EINVAL;
}
if (cfg->lanes != 4 && cfg->lanes != 3) {
dev_err(&phy->dev, "invalid data lanes(%u)\n", cfg->lanes);
return -EINVAL;
}
if (cfg->differential_clk_rate < MIN_CLKIN_FREQ ||
cfg->differential_clk_rate > MAX_CLKIN_FREQ) {
dev_err(&phy->dev, "invalid differential clock rate(%lu)\n",
cfg->differential_clk_rate);
return -EINVAL;
}
mutex_lock(&priv->lock);
/* cache configuration set of our own for check */
memcpy(&lvds_phy->cfg, cfg, sizeof(*cfg));
if (cfg->is_slave) {
ret = mixel_lvds_phy_check_slave(phy);
if (ret)
dev_err(&phy->dev, "failed to check slave PHY: %d\n", ret);
}
mutex_unlock(&priv->lock);
return ret;
}
static const struct phy_ops mixel_lvds_phy_ops = {
.init = mixel_lvds_phy_init,
.power_on = mixel_lvds_phy_power_on,
.power_off = mixel_lvds_phy_power_off,
.configure = mixel_lvds_phy_configure,
.validate = mixel_lvds_phy_validate,
.owner = THIS_MODULE,
};
static int mixel_lvds_phy_reset(struct device *dev)
{
struct mixel_lvds_phy_priv *priv = dev_get_drvdata(dev);
int ret;
ret = pm_runtime_resume_and_get(dev);
if (ret < 0) {
dev_err(dev, "failed to get PM runtime: %d\n", ret);
return ret;
}
regmap_write(priv->regmap, PHY_CTRL, CTRL_RESET_VAL);
ret = pm_runtime_put(dev);
if (ret < 0)
dev_err(dev, "failed to put PM runtime: %d\n", ret);
return ret;
}
static struct phy *mixel_lvds_phy_xlate(struct device *dev,
struct of_phandle_args *args)
{
struct mixel_lvds_phy_priv *priv = dev_get_drvdata(dev);
unsigned int phy_id;
if (args->args_count != 1) {
dev_err(dev,
"invalid argument number(%d) for 'phys' property\n",
args->args_count);
return ERR_PTR(-EINVAL);
}
phy_id = args->args[0];
if (phy_id >= PHY_NUM) {
dev_err(dev, "invalid PHY index(%d)\n", phy_id);
return ERR_PTR(-ENODEV);
}
return priv->phys[phy_id]->phy;
}
static int mixel_lvds_phy_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct phy_provider *phy_provider;
struct mixel_lvds_phy_priv *priv;
struct mixel_lvds_phy *lvds_phy;
struct phy *phy;
int i;
int ret;
if (!dev->of_node)
return -ENODEV;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->regmap = syscon_node_to_regmap(dev->of_node->parent);
if (IS_ERR(priv->regmap))
return dev_err_probe(dev, PTR_ERR(priv->regmap),
"failed to get regmap\n");
priv->phy_ref_clk = devm_clk_get(dev, NULL);
if (IS_ERR(priv->phy_ref_clk))
return dev_err_probe(dev, PTR_ERR(priv->phy_ref_clk),
"failed to get PHY reference clock\n");
mutex_init(&priv->lock);
dev_set_drvdata(dev, priv);
pm_runtime_enable(dev);
ret = mixel_lvds_phy_reset(dev);
if (ret) {
dev_err(dev, "failed to do POR reset: %d\n", ret);
return ret;
}
for (i = 0; i < PHY_NUM; i++) {
lvds_phy = devm_kzalloc(dev, sizeof(*lvds_phy), GFP_KERNEL);
if (!lvds_phy) {
ret = -ENOMEM;
goto err;
}
phy = devm_phy_create(dev, NULL, &mixel_lvds_phy_ops);
if (IS_ERR(phy)) {
ret = PTR_ERR(phy);
dev_err(dev, "failed to create PHY for channel%d: %d\n",
i, ret);
goto err;
}
lvds_phy->phy = phy;
lvds_phy->id = i;
priv->phys[i] = lvds_phy;
phy_set_drvdata(phy, lvds_phy);
}
phy_provider = devm_of_phy_provider_register(dev, mixel_lvds_phy_xlate);
if (IS_ERR(phy_provider)) {
ret = PTR_ERR(phy_provider);
dev_err(dev, "failed to register PHY provider: %d\n", ret);
goto err;
}
return 0;
err:
pm_runtime_disable(dev);
return ret;
}
static void mixel_lvds_phy_remove(struct platform_device *pdev)
{
pm_runtime_disable(&pdev->dev);
}
static int __maybe_unused mixel_lvds_phy_runtime_suspend(struct device *dev)
{
struct mixel_lvds_phy_priv *priv = dev_get_drvdata(dev);
/* power down */
mutex_lock(&priv->lock);
regmap_write(priv->regmap, PHY_CTRL + REG_SET, PD);
mutex_unlock(&priv->lock);
return 0;
}
static int __maybe_unused mixel_lvds_phy_runtime_resume(struct device *dev)
{
struct mixel_lvds_phy_priv *priv = dev_get_drvdata(dev);
/* power up + control initialization */
mutex_lock(&priv->lock);
regmap_update_bits(priv->regmap, PHY_CTRL,
CTRL_INIT_MASK | PD, CTRL_INIT_VAL);
mutex_unlock(&priv->lock);
return 0;
}
static const struct dev_pm_ops mixel_lvds_phy_pm_ops = {
SET_RUNTIME_PM_OPS(mixel_lvds_phy_runtime_suspend,
mixel_lvds_phy_runtime_resume, NULL)
};
static const struct of_device_id mixel_lvds_phy_of_match[] = {
{ .compatible = "fsl,imx8qm-lvds-phy" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, mixel_lvds_phy_of_match);
static struct platform_driver mixel_lvds_phy_driver = {
.probe = mixel_lvds_phy_probe,
.remove_new = mixel_lvds_phy_remove,
.driver = {
.pm = &mixel_lvds_phy_pm_ops,
.name = "mixel-lvds-phy",
.of_match_table = mixel_lvds_phy_of_match,
}
};
module_platform_driver(mixel_lvds_phy_driver);
MODULE_DESCRIPTION("Mixel LVDS PHY driver");
MODULE_AUTHOR("Liu Ying <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/phy/freescale/phy-fsl-imx8qm-lvds-phy.c |
// SPDX-License-Identifier: GPL-2.0+
/* Copyright (c) 2017 NXP. */
#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#define PHY_CTRL0 0x0
#define PHY_CTRL0_REF_SSP_EN BIT(2)
#define PHY_CTRL0_FSEL_MASK GENMASK(10, 5)
#define PHY_CTRL0_FSEL_24M 0x2a
#define PHY_CTRL1 0x4
#define PHY_CTRL1_RESET BIT(0)
#define PHY_CTRL1_COMMONONN BIT(1)
#define PHY_CTRL1_ATERESET BIT(3)
#define PHY_CTRL1_VDATSRCENB0 BIT(19)
#define PHY_CTRL1_VDATDETENB0 BIT(20)
#define PHY_CTRL2 0x8
#define PHY_CTRL2_TXENABLEN0 BIT(8)
#define PHY_CTRL2_OTG_DISABLE BIT(9)
#define PHY_CTRL3 0xc
#define PHY_CTRL3_COMPDISTUNE_MASK GENMASK(2, 0)
#define PHY_CTRL3_TXPREEMP_TUNE_MASK GENMASK(16, 15)
#define PHY_CTRL3_TXRISE_TUNE_MASK GENMASK(21, 20)
#define PHY_CTRL3_TXVREF_TUNE_MASK GENMASK(25, 22)
#define PHY_CTRL3_TX_VBOOST_LEVEL_MASK GENMASK(31, 29)
#define PHY_CTRL4 0x10
#define PHY_CTRL4_PCS_TX_DEEMPH_3P5DB_MASK GENMASK(20, 15)
#define PHY_CTRL5 0x14
#define PHY_CTRL5_DMPWD_OVERRIDE_SEL BIT(23)
#define PHY_CTRL5_DMPWD_OVERRIDE BIT(22)
#define PHY_CTRL5_DPPWD_OVERRIDE_SEL BIT(21)
#define PHY_CTRL5_DPPWD_OVERRIDE BIT(20)
#define PHY_CTRL5_PCS_TX_SWING_FULL_MASK GENMASK(6, 0)
#define PHY_CTRL6 0x18
#define PHY_CTRL6_ALT_CLK_EN BIT(1)
#define PHY_CTRL6_ALT_CLK_SEL BIT(0)
#define PHY_TUNE_DEFAULT 0xffffffff
struct imx8mq_usb_phy {
struct phy *phy;
struct clk *clk;
void __iomem *base;
struct regulator *vbus;
u32 pcs_tx_swing_full;
u32 pcs_tx_deemph_3p5db;
u32 tx_vref_tune;
u32 tx_rise_tune;
u32 tx_preemp_amp_tune;
u32 tx_vboost_level;
u32 comp_dis_tune;
};
static u32 phy_tx_vref_tune_from_property(u32 percent)
{
percent = clamp(percent, 94U, 124U);
return DIV_ROUND_CLOSEST(percent - 94U, 2);
}
static u32 phy_tx_rise_tune_from_property(u32 percent)
{
switch (percent) {
case 0 ... 98:
return 3;
case 99:
return 2;
case 100 ... 101:
return 1;
default:
return 0;
}
}
static u32 phy_tx_preemp_amp_tune_from_property(u32 microamp)
{
microamp = min(microamp, 1800U);
return microamp / 600;
}
static u32 phy_tx_vboost_level_from_property(u32 microvolt)
{
switch (microvolt) {
case 0 ... 960:
return 0;
case 961 ... 1160:
return 2;
default:
return 3;
}
}
static u32 phy_pcs_tx_deemph_3p5db_from_property(u32 decibel)
{
return min(decibel, 36U);
}
static u32 phy_comp_dis_tune_from_property(u32 percent)
{
switch (percent) {
case 0 ... 92:
return 0;
case 93 ... 95:
return 1;
case 96 ... 97:
return 2;
case 98 ... 102:
return 3;
case 103 ... 105:
return 4;
case 106 ... 109:
return 5;
case 110 ... 113:
return 6;
default:
return 7;
}
}
static u32 phy_pcs_tx_swing_full_from_property(u32 percent)
{
percent = min(percent, 100U);
return (percent * 127) / 100;
}
static void imx8m_get_phy_tuning_data(struct imx8mq_usb_phy *imx_phy)
{
struct device *dev = imx_phy->phy->dev.parent;
if (device_property_read_u32(dev, "fsl,phy-tx-vref-tune-percent",
&imx_phy->tx_vref_tune))
imx_phy->tx_vref_tune = PHY_TUNE_DEFAULT;
else
imx_phy->tx_vref_tune =
phy_tx_vref_tune_from_property(imx_phy->tx_vref_tune);
if (device_property_read_u32(dev, "fsl,phy-tx-rise-tune-percent",
&imx_phy->tx_rise_tune))
imx_phy->tx_rise_tune = PHY_TUNE_DEFAULT;
else
imx_phy->tx_rise_tune =
phy_tx_rise_tune_from_property(imx_phy->tx_rise_tune);
if (device_property_read_u32(dev, "fsl,phy-tx-preemp-amp-tune-microamp",
&imx_phy->tx_preemp_amp_tune))
imx_phy->tx_preemp_amp_tune = PHY_TUNE_DEFAULT;
else
imx_phy->tx_preemp_amp_tune =
phy_tx_preemp_amp_tune_from_property(imx_phy->tx_preemp_amp_tune);
if (device_property_read_u32(dev, "fsl,phy-tx-vboost-level-microvolt",
&imx_phy->tx_vboost_level))
imx_phy->tx_vboost_level = PHY_TUNE_DEFAULT;
else
imx_phy->tx_vboost_level =
phy_tx_vboost_level_from_property(imx_phy->tx_vboost_level);
if (device_property_read_u32(dev, "fsl,phy-comp-dis-tune-percent",
&imx_phy->comp_dis_tune))
imx_phy->comp_dis_tune = PHY_TUNE_DEFAULT;
else
imx_phy->comp_dis_tune =
phy_comp_dis_tune_from_property(imx_phy->comp_dis_tune);
if (device_property_read_u32(dev, "fsl,pcs-tx-deemph-3p5db-attenuation-db",
&imx_phy->pcs_tx_deemph_3p5db))
imx_phy->pcs_tx_deemph_3p5db = PHY_TUNE_DEFAULT;
else
imx_phy->pcs_tx_deemph_3p5db =
phy_pcs_tx_deemph_3p5db_from_property(imx_phy->pcs_tx_deemph_3p5db);
if (device_property_read_u32(dev, "fsl,phy-pcs-tx-swing-full-percent",
&imx_phy->pcs_tx_swing_full))
imx_phy->pcs_tx_swing_full = PHY_TUNE_DEFAULT;
else
imx_phy->pcs_tx_swing_full =
phy_pcs_tx_swing_full_from_property(imx_phy->pcs_tx_swing_full);
}
static void imx8m_phy_tune(struct imx8mq_usb_phy *imx_phy)
{
u32 value;
/* PHY tuning */
if (imx_phy->pcs_tx_deemph_3p5db != PHY_TUNE_DEFAULT) {
value = readl(imx_phy->base + PHY_CTRL4);
value &= ~PHY_CTRL4_PCS_TX_DEEMPH_3P5DB_MASK;
value |= FIELD_PREP(PHY_CTRL4_PCS_TX_DEEMPH_3P5DB_MASK,
imx_phy->pcs_tx_deemph_3p5db);
writel(value, imx_phy->base + PHY_CTRL4);
}
if (imx_phy->pcs_tx_swing_full != PHY_TUNE_DEFAULT) {
value = readl(imx_phy->base + PHY_CTRL5);
value |= FIELD_PREP(PHY_CTRL5_PCS_TX_SWING_FULL_MASK,
imx_phy->pcs_tx_swing_full);
writel(value, imx_phy->base + PHY_CTRL5);
}
if ((imx_phy->tx_vref_tune & imx_phy->tx_rise_tune &
imx_phy->tx_preemp_amp_tune & imx_phy->comp_dis_tune &
imx_phy->tx_vboost_level) == PHY_TUNE_DEFAULT)
/* If all are the default values, no need update. */
return;
value = readl(imx_phy->base + PHY_CTRL3);
if (imx_phy->tx_vref_tune != PHY_TUNE_DEFAULT) {
value &= ~PHY_CTRL3_TXVREF_TUNE_MASK;
value |= FIELD_PREP(PHY_CTRL3_TXVREF_TUNE_MASK,
imx_phy->tx_vref_tune);
}
if (imx_phy->tx_rise_tune != PHY_TUNE_DEFAULT) {
value &= ~PHY_CTRL3_TXRISE_TUNE_MASK;
value |= FIELD_PREP(PHY_CTRL3_TXRISE_TUNE_MASK,
imx_phy->tx_rise_tune);
}
if (imx_phy->tx_preemp_amp_tune != PHY_TUNE_DEFAULT) {
value &= ~PHY_CTRL3_TXPREEMP_TUNE_MASK;
value |= FIELD_PREP(PHY_CTRL3_TXPREEMP_TUNE_MASK,
imx_phy->tx_preemp_amp_tune);
}
if (imx_phy->comp_dis_tune != PHY_TUNE_DEFAULT) {
value &= ~PHY_CTRL3_COMPDISTUNE_MASK;
value |= FIELD_PREP(PHY_CTRL3_COMPDISTUNE_MASK,
imx_phy->comp_dis_tune);
}
if (imx_phy->tx_vboost_level != PHY_TUNE_DEFAULT) {
value &= ~PHY_CTRL3_TX_VBOOST_LEVEL_MASK;
value |= FIELD_PREP(PHY_CTRL3_TX_VBOOST_LEVEL_MASK,
imx_phy->tx_vboost_level);
}
writel(value, imx_phy->base + PHY_CTRL3);
}
static int imx8mq_usb_phy_init(struct phy *phy)
{
struct imx8mq_usb_phy *imx_phy = phy_get_drvdata(phy);
u32 value;
value = readl(imx_phy->base + PHY_CTRL1);
value &= ~(PHY_CTRL1_VDATSRCENB0 | PHY_CTRL1_VDATDETENB0 |
PHY_CTRL1_COMMONONN);
value |= PHY_CTRL1_RESET | PHY_CTRL1_ATERESET;
writel(value, imx_phy->base + PHY_CTRL1);
value = readl(imx_phy->base + PHY_CTRL0);
value |= PHY_CTRL0_REF_SSP_EN;
writel(value, imx_phy->base + PHY_CTRL0);
value = readl(imx_phy->base + PHY_CTRL2);
value |= PHY_CTRL2_TXENABLEN0;
writel(value, imx_phy->base + PHY_CTRL2);
value = readl(imx_phy->base + PHY_CTRL1);
value &= ~(PHY_CTRL1_RESET | PHY_CTRL1_ATERESET);
writel(value, imx_phy->base + PHY_CTRL1);
return 0;
}
static int imx8mp_usb_phy_init(struct phy *phy)
{
struct imx8mq_usb_phy *imx_phy = phy_get_drvdata(phy);
u32 value;
/* USB3.0 PHY signal fsel for 24M ref */
value = readl(imx_phy->base + PHY_CTRL0);
value &= ~PHY_CTRL0_FSEL_MASK;
value |= FIELD_PREP(PHY_CTRL0_FSEL_MASK, PHY_CTRL0_FSEL_24M);
writel(value, imx_phy->base + PHY_CTRL0);
/* Disable alt_clk_en and use internal MPLL clocks */
value = readl(imx_phy->base + PHY_CTRL6);
value &= ~(PHY_CTRL6_ALT_CLK_SEL | PHY_CTRL6_ALT_CLK_EN);
writel(value, imx_phy->base + PHY_CTRL6);
value = readl(imx_phy->base + PHY_CTRL1);
value &= ~(PHY_CTRL1_VDATSRCENB0 | PHY_CTRL1_VDATDETENB0);
value |= PHY_CTRL1_RESET | PHY_CTRL1_ATERESET;
writel(value, imx_phy->base + PHY_CTRL1);
value = readl(imx_phy->base + PHY_CTRL0);
value |= PHY_CTRL0_REF_SSP_EN;
writel(value, imx_phy->base + PHY_CTRL0);
value = readl(imx_phy->base + PHY_CTRL2);
value |= PHY_CTRL2_TXENABLEN0 | PHY_CTRL2_OTG_DISABLE;
writel(value, imx_phy->base + PHY_CTRL2);
udelay(10);
value = readl(imx_phy->base + PHY_CTRL1);
value &= ~(PHY_CTRL1_RESET | PHY_CTRL1_ATERESET);
writel(value, imx_phy->base + PHY_CTRL1);
imx8m_phy_tune(imx_phy);
return 0;
}
static int imx8mq_phy_power_on(struct phy *phy)
{
struct imx8mq_usb_phy *imx_phy = phy_get_drvdata(phy);
int ret;
ret = regulator_enable(imx_phy->vbus);
if (ret)
return ret;
return clk_prepare_enable(imx_phy->clk);
}
static int imx8mq_phy_power_off(struct phy *phy)
{
struct imx8mq_usb_phy *imx_phy = phy_get_drvdata(phy);
clk_disable_unprepare(imx_phy->clk);
regulator_disable(imx_phy->vbus);
return 0;
}
static const struct phy_ops imx8mq_usb_phy_ops = {
.init = imx8mq_usb_phy_init,
.power_on = imx8mq_phy_power_on,
.power_off = imx8mq_phy_power_off,
.owner = THIS_MODULE,
};
static const struct phy_ops imx8mp_usb_phy_ops = {
.init = imx8mp_usb_phy_init,
.power_on = imx8mq_phy_power_on,
.power_off = imx8mq_phy_power_off,
.owner = THIS_MODULE,
};
static const struct of_device_id imx8mq_usb_phy_of_match[] = {
{.compatible = "fsl,imx8mq-usb-phy",
.data = &imx8mq_usb_phy_ops,},
{.compatible = "fsl,imx8mp-usb-phy",
.data = &imx8mp_usb_phy_ops,},
{ }
};
MODULE_DEVICE_TABLE(of, imx8mq_usb_phy_of_match);
static int imx8mq_usb_phy_probe(struct platform_device *pdev)
{
struct phy_provider *phy_provider;
struct device *dev = &pdev->dev;
struct imx8mq_usb_phy *imx_phy;
const struct phy_ops *phy_ops;
imx_phy = devm_kzalloc(dev, sizeof(*imx_phy), GFP_KERNEL);
if (!imx_phy)
return -ENOMEM;
imx_phy->clk = devm_clk_get(dev, "phy");
if (IS_ERR(imx_phy->clk)) {
dev_err(dev, "failed to get imx8mq usb phy clock\n");
return PTR_ERR(imx_phy->clk);
}
imx_phy->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(imx_phy->base))
return PTR_ERR(imx_phy->base);
phy_ops = of_device_get_match_data(dev);
if (!phy_ops)
return -EINVAL;
imx_phy->phy = devm_phy_create(dev, NULL, phy_ops);
if (IS_ERR(imx_phy->phy))
return PTR_ERR(imx_phy->phy);
imx_phy->vbus = devm_regulator_get(dev, "vbus");
if (IS_ERR(imx_phy->vbus))
return dev_err_probe(dev, PTR_ERR(imx_phy->vbus), "failed to get vbus\n");
phy_set_drvdata(imx_phy->phy, imx_phy);
imx8m_get_phy_tuning_data(imx_phy);
phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
return PTR_ERR_OR_ZERO(phy_provider);
}
static struct platform_driver imx8mq_usb_phy_driver = {
.probe = imx8mq_usb_phy_probe,
.driver = {
.name = "imx8mq-usb-phy",
.of_match_table = imx8mq_usb_phy_of_match,
}
};
module_platform_driver(imx8mq_usb_phy_driver);
MODULE_DESCRIPTION("FSL IMX8MQ USB PHY driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/phy/freescale/phy-fsl-imx8mq-usb.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2017 John Crispin <[email protected]>
*
* Based on code from
* Allwinner Technology Co., Ltd. <www.allwinnertech.com>
*/
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of_platform.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/reset.h>
#define RT_SYSC_REG_SYSCFG1 0x014
#define RT_SYSC_REG_CLKCFG1 0x030
#define RT_SYSC_REG_USB_PHY_CFG 0x05c
#define OFS_U2_PHY_AC0 0x800
#define OFS_U2_PHY_AC1 0x804
#define OFS_U2_PHY_AC2 0x808
#define OFS_U2_PHY_ACR0 0x810
#define OFS_U2_PHY_ACR1 0x814
#define OFS_U2_PHY_ACR2 0x818
#define OFS_U2_PHY_ACR3 0x81C
#define OFS_U2_PHY_ACR4 0x820
#define OFS_U2_PHY_AMON0 0x824
#define OFS_U2_PHY_DCR0 0x860
#define OFS_U2_PHY_DCR1 0x864
#define OFS_U2_PHY_DTM0 0x868
#define OFS_U2_PHY_DTM1 0x86C
#define RT_RSTCTRL_UDEV BIT(25)
#define RT_RSTCTRL_UHST BIT(22)
#define RT_SYSCFG1_USB0_HOST_MODE BIT(10)
#define MT7620_CLKCFG1_UPHY0_CLK_EN BIT(25)
#define MT7620_CLKCFG1_UPHY1_CLK_EN BIT(22)
#define RT_CLKCFG1_UPHY1_CLK_EN BIT(20)
#define RT_CLKCFG1_UPHY0_CLK_EN BIT(18)
#define USB_PHY_UTMI_8B60M BIT(1)
#define UDEV_WAKEUP BIT(0)
struct ralink_usb_phy {
struct reset_control *rstdev;
struct reset_control *rsthost;
u32 clk;
struct phy *phy;
void __iomem *base;
struct regmap *sysctl;
};
static void u2_phy_w32(struct ralink_usb_phy *phy, u32 val, u32 reg)
{
writel(val, phy->base + reg);
}
static u32 u2_phy_r32(struct ralink_usb_phy *phy, u32 reg)
{
return readl(phy->base + reg);
}
static void ralink_usb_phy_init(struct ralink_usb_phy *phy)
{
u2_phy_r32(phy, OFS_U2_PHY_AC2);
u2_phy_r32(phy, OFS_U2_PHY_ACR0);
u2_phy_r32(phy, OFS_U2_PHY_DCR0);
u2_phy_w32(phy, 0x00ffff02, OFS_U2_PHY_DCR0);
u2_phy_r32(phy, OFS_U2_PHY_DCR0);
u2_phy_w32(phy, 0x00555502, OFS_U2_PHY_DCR0);
u2_phy_r32(phy, OFS_U2_PHY_DCR0);
u2_phy_w32(phy, 0x00aaaa02, OFS_U2_PHY_DCR0);
u2_phy_r32(phy, OFS_U2_PHY_DCR0);
u2_phy_w32(phy, 0x00000402, OFS_U2_PHY_DCR0);
u2_phy_r32(phy, OFS_U2_PHY_DCR0);
u2_phy_w32(phy, 0x0048086a, OFS_U2_PHY_AC0);
u2_phy_w32(phy, 0x4400001c, OFS_U2_PHY_AC1);
u2_phy_w32(phy, 0xc0200000, OFS_U2_PHY_ACR3);
u2_phy_w32(phy, 0x02000000, OFS_U2_PHY_DTM0);
}
static int ralink_usb_phy_power_on(struct phy *_phy)
{
struct ralink_usb_phy *phy = phy_get_drvdata(_phy);
u32 t;
/* enable the phy */
regmap_update_bits(phy->sysctl, RT_SYSC_REG_CLKCFG1,
phy->clk, phy->clk);
/* setup host mode */
regmap_update_bits(phy->sysctl, RT_SYSC_REG_SYSCFG1,
RT_SYSCFG1_USB0_HOST_MODE,
RT_SYSCFG1_USB0_HOST_MODE);
/* deassert the reset lines */
reset_control_deassert(phy->rsthost);
reset_control_deassert(phy->rstdev);
/*
* The SDK kernel had a delay of 100ms. however on device
* testing showed that 10ms is enough
*/
mdelay(10);
if (phy->base)
ralink_usb_phy_init(phy);
/* print some status info */
regmap_read(phy->sysctl, RT_SYSC_REG_USB_PHY_CFG, &t);
dev_info(&phy->phy->dev, "remote usb device wakeup %s\n",
(t & UDEV_WAKEUP) ? ("enabled") : ("disabled"));
if (t & USB_PHY_UTMI_8B60M)
dev_info(&phy->phy->dev, "UTMI 8bit 60MHz\n");
else
dev_info(&phy->phy->dev, "UTMI 16bit 30MHz\n");
return 0;
}
static int ralink_usb_phy_power_off(struct phy *_phy)
{
struct ralink_usb_phy *phy = phy_get_drvdata(_phy);
/* disable the phy */
regmap_update_bits(phy->sysctl, RT_SYSC_REG_CLKCFG1,
phy->clk, 0);
/* assert the reset lines */
reset_control_assert(phy->rstdev);
reset_control_assert(phy->rsthost);
return 0;
}
static const struct phy_ops ralink_usb_phy_ops = {
.power_on = ralink_usb_phy_power_on,
.power_off = ralink_usb_phy_power_off,
.owner = THIS_MODULE,
};
static const struct of_device_id ralink_usb_phy_of_match[] = {
{
.compatible = "ralink,rt3352-usbphy",
.data = (void *)(uintptr_t)(RT_CLKCFG1_UPHY1_CLK_EN |
RT_CLKCFG1_UPHY0_CLK_EN)
},
{
.compatible = "mediatek,mt7620-usbphy",
.data = (void *)(uintptr_t)(MT7620_CLKCFG1_UPHY1_CLK_EN |
MT7620_CLKCFG1_UPHY0_CLK_EN)
},
{
.compatible = "mediatek,mt7628-usbphy",
.data = (void *)(uintptr_t)(MT7620_CLKCFG1_UPHY1_CLK_EN |
MT7620_CLKCFG1_UPHY0_CLK_EN) },
{ },
};
MODULE_DEVICE_TABLE(of, ralink_usb_phy_of_match);
static int ralink_usb_phy_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct phy_provider *phy_provider;
const struct of_device_id *match;
struct ralink_usb_phy *phy;
match = of_match_device(ralink_usb_phy_of_match, &pdev->dev);
if (!match)
return -ENODEV;
phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
if (!phy)
return -ENOMEM;
phy->clk = (uintptr_t)match->data;
phy->base = NULL;
phy->sysctl = syscon_regmap_lookup_by_phandle(dev->of_node, "ralink,sysctl");
if (IS_ERR(phy->sysctl)) {
dev_err(dev, "failed to get sysctl registers\n");
return PTR_ERR(phy->sysctl);
}
/* The MT7628 and MT7688 require extra setup of PHY registers. */
if (of_device_is_compatible(dev->of_node, "mediatek,mt7628-usbphy")) {
phy->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(phy->base)) {
dev_err(dev, "failed to remap register memory\n");
return PTR_ERR(phy->base);
}
}
phy->rsthost = devm_reset_control_get(&pdev->dev, "host");
if (IS_ERR(phy->rsthost)) {
dev_err(dev, "host reset is missing\n");
return PTR_ERR(phy->rsthost);
}
phy->rstdev = devm_reset_control_get(&pdev->dev, "device");
if (IS_ERR(phy->rstdev)) {
dev_err(dev, "device reset is missing\n");
return PTR_ERR(phy->rstdev);
}
phy->phy = devm_phy_create(dev, NULL, &ralink_usb_phy_ops);
if (IS_ERR(phy->phy)) {
dev_err(dev, "failed to create PHY\n");
return PTR_ERR(phy->phy);
}
phy_set_drvdata(phy->phy, phy);
phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
return PTR_ERR_OR_ZERO(phy_provider);
}
static struct platform_driver ralink_usb_phy_driver = {
.probe = ralink_usb_phy_probe,
.driver = {
.of_match_table = ralink_usb_phy_of_match,
.name = "ralink-usb-phy",
}
};
module_platform_driver(ralink_usb_phy_driver);
MODULE_DESCRIPTION("Ralink USB phy driver");
MODULE_AUTHOR("John Crispin <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/phy/ralink/phy-ralink-usb.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Mediatek MT7621 PCI PHY Driver
* Author: Sergio Paracuellos <[email protected]>
*/
#include <dt-bindings/phy/phy.h>
#include <linux/clk.h>
#include <linux/bitfield.h>
#include <linux/bitops.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/sys_soc.h>
#define RG_PE1_PIPE_REG 0x02c
#define RG_PE1_PIPE_RST BIT(12)
#define RG_PE1_PIPE_CMD_FRC BIT(4)
#define RG_P0_TO_P1_WIDTH 0x100
#define RG_PE1_H_LCDDS_REG 0x49c
#define RG_PE1_H_LCDDS_PCW GENMASK(30, 0)
#define RG_PE1_FRC_H_XTAL_REG 0x400
#define RG_PE1_FRC_H_XTAL_TYPE BIT(8)
#define RG_PE1_H_XTAL_TYPE GENMASK(10, 9)
#define RG_PE1_FRC_PHY_REG 0x000
#define RG_PE1_FRC_PHY_EN BIT(4)
#define RG_PE1_PHY_EN BIT(5)
#define RG_PE1_H_PLL_REG 0x490
#define RG_PE1_H_PLL_BC GENMASK(23, 22)
#define RG_PE1_H_PLL_BP GENMASK(21, 18)
#define RG_PE1_H_PLL_IR GENMASK(15, 12)
#define RG_PE1_H_PLL_IC GENMASK(11, 8)
#define RG_PE1_H_PLL_PREDIV GENMASK(7, 6)
#define RG_PE1_PLL_DIVEN GENMASK(3, 1)
#define RG_PE1_H_PLL_FBKSEL_REG 0x4bc
#define RG_PE1_H_PLL_FBKSEL GENMASK(5, 4)
#define RG_PE1_H_LCDDS_SSC_PRD_REG 0x4a4
#define RG_PE1_H_LCDDS_SSC_PRD GENMASK(15, 0)
#define RG_PE1_H_LCDDS_SSC_DELTA_REG 0x4a8
#define RG_PE1_H_LCDDS_SSC_DELTA GENMASK(11, 0)
#define RG_PE1_H_LCDDS_SSC_DELTA1 GENMASK(27, 16)
#define RG_PE1_LCDDS_CLK_PH_INV_REG 0x4a0
#define RG_PE1_LCDDS_CLK_PH_INV BIT(5)
#define RG_PE1_H_PLL_BR_REG 0x4ac
#define RG_PE1_H_PLL_BR GENMASK(18, 16)
#define RG_PE1_MSTCKDIV_REG 0x414
#define RG_PE1_MSTCKDIV GENMASK(7, 6)
#define RG_PE1_FRC_MSTCKDIV BIT(5)
#define MAX_PHYS 2
/**
* struct mt7621_pci_phy - Mt7621 Pcie PHY core
* @dev: pointer to device
* @regmap: kernel regmap pointer
* @phy: pointer to the kernel PHY device
* @sys_clk: pointer to the system XTAL clock
* @port_base: base register
* @has_dual_port: if the phy has dual ports.
* @bypass_pipe_rst: mark if 'mt7621_bypass_pipe_rst'
* needs to be executed. Depends on chip revision.
*/
struct mt7621_pci_phy {
struct device *dev;
struct regmap *regmap;
struct phy *phy;
struct clk *sys_clk;
void __iomem *port_base;
bool has_dual_port;
bool bypass_pipe_rst;
};
static inline void mt7621_phy_rmw(struct mt7621_pci_phy *phy,
u32 reg, u32 clr, u32 set)
{
u32 val;
/*
* We cannot use 'regmap_write_bits' here because internally
* 'set' is masked before is set to the value that will be
* written to the register. That way results in no reliable
* pci setup. Avoid to mask 'set' before set value to 'val'
* completely avoid the problem.
*/
regmap_read(phy->regmap, reg, &val);
val &= ~clr;
val |= set;
regmap_write(phy->regmap, reg, val);
}
static void mt7621_bypass_pipe_rst(struct mt7621_pci_phy *phy)
{
mt7621_phy_rmw(phy, RG_PE1_PIPE_REG, 0, RG_PE1_PIPE_RST);
mt7621_phy_rmw(phy, RG_PE1_PIPE_REG, 0, RG_PE1_PIPE_CMD_FRC);
if (phy->has_dual_port) {
mt7621_phy_rmw(phy, RG_PE1_PIPE_REG + RG_P0_TO_P1_WIDTH,
0, RG_PE1_PIPE_RST);
mt7621_phy_rmw(phy, RG_PE1_PIPE_REG + RG_P0_TO_P1_WIDTH,
0, RG_PE1_PIPE_CMD_FRC);
}
}
static int mt7621_set_phy_for_ssc(struct mt7621_pci_phy *phy)
{
struct device *dev = phy->dev;
unsigned long clk_rate;
clk_rate = clk_get_rate(phy->sys_clk);
if (!clk_rate)
return -EINVAL;
/* Set PCIe Port PHY to disable SSC */
/* Debug Xtal Type */
mt7621_phy_rmw(phy, RG_PE1_FRC_H_XTAL_REG,
RG_PE1_FRC_H_XTAL_TYPE | RG_PE1_H_XTAL_TYPE,
RG_PE1_FRC_H_XTAL_TYPE |
FIELD_PREP(RG_PE1_H_XTAL_TYPE, 0x00));
/* disable port */
mt7621_phy_rmw(phy, RG_PE1_FRC_PHY_REG, RG_PE1_PHY_EN,
RG_PE1_FRC_PHY_EN);
if (phy->has_dual_port) {
mt7621_phy_rmw(phy, RG_PE1_FRC_PHY_REG + RG_P0_TO_P1_WIDTH,
RG_PE1_PHY_EN, RG_PE1_FRC_PHY_EN);
}
if (clk_rate == 40000000) { /* 40MHz Xtal */
/* Set Pre-divider ratio (for host mode) */
mt7621_phy_rmw(phy, RG_PE1_H_PLL_REG, RG_PE1_H_PLL_PREDIV,
FIELD_PREP(RG_PE1_H_PLL_PREDIV, 0x01));
dev_dbg(dev, "Xtal is 40MHz\n");
} else if (clk_rate == 25000000) { /* 25MHz Xal */
mt7621_phy_rmw(phy, RG_PE1_H_PLL_REG, RG_PE1_H_PLL_PREDIV,
FIELD_PREP(RG_PE1_H_PLL_PREDIV, 0x00));
/* Select feedback clock */
mt7621_phy_rmw(phy, RG_PE1_H_PLL_FBKSEL_REG,
RG_PE1_H_PLL_FBKSEL,
FIELD_PREP(RG_PE1_H_PLL_FBKSEL, 0x01));
/* DDS NCPO PCW (for host mode) */
mt7621_phy_rmw(phy, RG_PE1_H_LCDDS_SSC_PRD_REG,
RG_PE1_H_LCDDS_SSC_PRD,
FIELD_PREP(RG_PE1_H_LCDDS_SSC_PRD, 0x00));
/* DDS SSC dither period control */
mt7621_phy_rmw(phy, RG_PE1_H_LCDDS_SSC_PRD_REG,
RG_PE1_H_LCDDS_SSC_PRD,
FIELD_PREP(RG_PE1_H_LCDDS_SSC_PRD, 0x18d));
/* DDS SSC dither amplitude control */
mt7621_phy_rmw(phy, RG_PE1_H_LCDDS_SSC_DELTA_REG,
RG_PE1_H_LCDDS_SSC_DELTA |
RG_PE1_H_LCDDS_SSC_DELTA1,
FIELD_PREP(RG_PE1_H_LCDDS_SSC_DELTA, 0x4a) |
FIELD_PREP(RG_PE1_H_LCDDS_SSC_DELTA1, 0x4a));
dev_dbg(dev, "Xtal is 25MHz\n");
} else { /* 20MHz Xtal */
mt7621_phy_rmw(phy, RG_PE1_H_PLL_REG, RG_PE1_H_PLL_PREDIV,
FIELD_PREP(RG_PE1_H_PLL_PREDIV, 0x00));
dev_dbg(dev, "Xtal is 20MHz\n");
}
/* DDS clock inversion */
mt7621_phy_rmw(phy, RG_PE1_LCDDS_CLK_PH_INV_REG,
RG_PE1_LCDDS_CLK_PH_INV, RG_PE1_LCDDS_CLK_PH_INV);
/* Set PLL bits */
mt7621_phy_rmw(phy, RG_PE1_H_PLL_REG,
RG_PE1_H_PLL_BC | RG_PE1_H_PLL_BP | RG_PE1_H_PLL_IR |
RG_PE1_H_PLL_IC | RG_PE1_PLL_DIVEN,
FIELD_PREP(RG_PE1_H_PLL_BC, 0x02) |
FIELD_PREP(RG_PE1_H_PLL_BP, 0x06) |
FIELD_PREP(RG_PE1_H_PLL_IR, 0x02) |
FIELD_PREP(RG_PE1_H_PLL_IC, 0x01) |
FIELD_PREP(RG_PE1_PLL_DIVEN, 0x02));
mt7621_phy_rmw(phy, RG_PE1_H_PLL_BR_REG, RG_PE1_H_PLL_BR,
FIELD_PREP(RG_PE1_H_PLL_BR, 0x00));
if (clk_rate == 40000000) { /* 40MHz Xtal */
/* set force mode enable of da_pe1_mstckdiv */
mt7621_phy_rmw(phy, RG_PE1_MSTCKDIV_REG,
RG_PE1_MSTCKDIV | RG_PE1_FRC_MSTCKDIV,
FIELD_PREP(RG_PE1_MSTCKDIV, 0x01) |
RG_PE1_FRC_MSTCKDIV);
}
return 0;
}
static int mt7621_pci_phy_init(struct phy *phy)
{
struct mt7621_pci_phy *mphy = phy_get_drvdata(phy);
if (mphy->bypass_pipe_rst)
mt7621_bypass_pipe_rst(mphy);
return mt7621_set_phy_for_ssc(mphy);
}
static int mt7621_pci_phy_power_on(struct phy *phy)
{
struct mt7621_pci_phy *mphy = phy_get_drvdata(phy);
/* Enable PHY and disable force mode */
mt7621_phy_rmw(mphy, RG_PE1_FRC_PHY_REG,
RG_PE1_FRC_PHY_EN, RG_PE1_PHY_EN);
if (mphy->has_dual_port) {
mt7621_phy_rmw(mphy, RG_PE1_FRC_PHY_REG + RG_P0_TO_P1_WIDTH,
RG_PE1_FRC_PHY_EN, RG_PE1_PHY_EN);
}
return 0;
}
static int mt7621_pci_phy_power_off(struct phy *phy)
{
struct mt7621_pci_phy *mphy = phy_get_drvdata(phy);
/* Disable PHY */
mt7621_phy_rmw(mphy, RG_PE1_FRC_PHY_REG,
RG_PE1_PHY_EN, RG_PE1_FRC_PHY_EN);
if (mphy->has_dual_port) {
mt7621_phy_rmw(mphy, RG_PE1_FRC_PHY_REG + RG_P0_TO_P1_WIDTH,
RG_PE1_PHY_EN, RG_PE1_FRC_PHY_EN);
}
return 0;
}
static int mt7621_pci_phy_exit(struct phy *phy)
{
return 0;
}
static const struct phy_ops mt7621_pci_phy_ops = {
.init = mt7621_pci_phy_init,
.exit = mt7621_pci_phy_exit,
.power_on = mt7621_pci_phy_power_on,
.power_off = mt7621_pci_phy_power_off,
.owner = THIS_MODULE,
};
static struct phy *mt7621_pcie_phy_of_xlate(struct device *dev,
struct of_phandle_args *args)
{
struct mt7621_pci_phy *mt7621_phy = dev_get_drvdata(dev);
if (WARN_ON(args->args[0] >= MAX_PHYS))
return ERR_PTR(-ENODEV);
mt7621_phy->has_dual_port = args->args[0];
dev_dbg(dev, "PHY for 0x%px (dual port = %d)\n",
mt7621_phy->port_base, mt7621_phy->has_dual_port);
return mt7621_phy->phy;
}
static const struct soc_device_attribute mt7621_pci_quirks_match[] = {
{ .soc_id = "mt7621", .revision = "E2" },
{ /* sentinel */ }
};
static const struct regmap_config mt7621_pci_phy_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
.max_register = 0x700,
};
static int mt7621_pci_phy_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
const struct soc_device_attribute *attr;
struct phy_provider *provider;
struct mt7621_pci_phy *phy;
phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
if (!phy)
return -ENOMEM;
attr = soc_device_match(mt7621_pci_quirks_match);
if (attr)
phy->bypass_pipe_rst = true;
phy->dev = dev;
platform_set_drvdata(pdev, phy);
phy->port_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(phy->port_base)) {
dev_err(dev, "failed to remap phy regs\n");
return PTR_ERR(phy->port_base);
}
phy->regmap = devm_regmap_init_mmio(phy->dev, phy->port_base,
&mt7621_pci_phy_regmap_config);
if (IS_ERR(phy->regmap))
return PTR_ERR(phy->regmap);
phy->phy = devm_phy_create(dev, dev->of_node, &mt7621_pci_phy_ops);
if (IS_ERR(phy->phy)) {
dev_err(dev, "failed to create phy\n");
return PTR_ERR(phy->phy);
}
phy->sys_clk = devm_clk_get(dev, NULL);
if (IS_ERR(phy->sys_clk)) {
dev_err(dev, "failed to get phy clock\n");
return PTR_ERR(phy->sys_clk);
}
phy_set_drvdata(phy->phy, phy);
provider = devm_of_phy_provider_register(dev, mt7621_pcie_phy_of_xlate);
return PTR_ERR_OR_ZERO(provider);
}
static const struct of_device_id mt7621_pci_phy_ids[] = {
{ .compatible = "mediatek,mt7621-pci-phy" },
{},
};
MODULE_DEVICE_TABLE(of, mt7621_pci_phy_ids);
static struct platform_driver mt7621_pci_phy_driver = {
.probe = mt7621_pci_phy_probe,
.driver = {
.name = "mt7621-pci-phy",
.of_match_table = mt7621_pci_phy_ids,
},
};
builtin_platform_driver(mt7621_pci_phy_driver);
MODULE_AUTHOR("Sergio Paracuellos <[email protected]>");
MODULE_DESCRIPTION("MediaTek MT7621 PCIe PHY driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/phy/ralink/phy-mt7621-pci.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Ingenic SoCs USB PHY driver
* Copyright (c) Paul Cercueil <[email protected]>
* Copyright (c) 漆鹏振 (Qi Pengzhen) <[email protected]>
* Copyright (c) 周琰杰 (Zhou Yanjie) <[email protected]>
*/
#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
/* OTGPHY register offsets */
#define REG_USBPCR_OFFSET 0x00
#define REG_USBRDT_OFFSET 0x04
#define REG_USBVBFIL_OFFSET 0x08
#define REG_USBPCR1_OFFSET 0x0c
/* bits within the USBPCR register */
#define USBPCR_USB_MODE BIT(31)
#define USBPCR_AVLD_REG BIT(30)
#define USBPCR_COMMONONN BIT(25)
#define USBPCR_VBUSVLDEXT BIT(24)
#define USBPCR_VBUSVLDEXTSEL BIT(23)
#define USBPCR_POR BIT(22)
#define USBPCR_SIDDQ BIT(21)
#define USBPCR_OTG_DISABLE BIT(20)
#define USBPCR_TXPREEMPHTUNE BIT(6)
#define USBPCR_IDPULLUP_MASK GENMASK(29, 28)
#define USBPCR_IDPULLUP_ALWAYS 0x2
#define USBPCR_IDPULLUP_SUSPEND 0x1
#define USBPCR_IDPULLUP_OTG 0x0
#define USBPCR_COMPDISTUNE_MASK GENMASK(19, 17)
#define USBPCR_COMPDISTUNE_DFT 0x4
#define USBPCR_OTGTUNE_MASK GENMASK(16, 14)
#define USBPCR_OTGTUNE_DFT 0x4
#define USBPCR_SQRXTUNE_MASK GENMASK(13, 11)
#define USBPCR_SQRXTUNE_DCR_20PCT 0x7
#define USBPCR_SQRXTUNE_DFT 0x3
#define USBPCR_TXFSLSTUNE_MASK GENMASK(10, 7)
#define USBPCR_TXFSLSTUNE_DCR_50PPT 0xf
#define USBPCR_TXFSLSTUNE_DCR_25PPT 0x7
#define USBPCR_TXFSLSTUNE_DFT 0x3
#define USBPCR_TXFSLSTUNE_INC_25PPT 0x1
#define USBPCR_TXFSLSTUNE_INC_50PPT 0x0
#define USBPCR_TXHSXVTUNE_MASK GENMASK(5, 4)
#define USBPCR_TXHSXVTUNE_DFT 0x3
#define USBPCR_TXHSXVTUNE_DCR_15MV 0x1
#define USBPCR_TXRISETUNE_MASK GENMASK(5, 4)
#define USBPCR_TXRISETUNE_DFT 0x3
#define USBPCR_TXVREFTUNE_MASK GENMASK(3, 0)
#define USBPCR_TXVREFTUNE_INC_75PPT 0xb
#define USBPCR_TXVREFTUNE_INC_25PPT 0x7
#define USBPCR_TXVREFTUNE_DFT 0x5
/* bits within the USBRDTR register */
#define USBRDT_UTMI_RST BIT(27)
#define USBRDT_HB_MASK BIT(26)
#define USBRDT_VBFIL_LD_EN BIT(25)
#define USBRDT_IDDIG_EN BIT(24)
#define USBRDT_IDDIG_REG BIT(23)
#define USBRDT_VBFIL_EN BIT(2)
/* bits within the USBPCR1 register */
#define USBPCR1_BVLD_REG BIT(31)
#define USBPCR1_DPPD BIT(29)
#define USBPCR1_DMPD BIT(28)
#define USBPCR1_USB_SEL BIT(28)
#define USBPCR1_PORT_RST BIT(21)
#define USBPCR1_WORD_IF_16BIT BIT(19)
struct ingenic_soc_info {
void (*usb_phy_init)(struct phy *phy);
};
struct ingenic_usb_phy {
const struct ingenic_soc_info *soc_info;
struct phy *phy;
void __iomem *base;
struct clk *clk;
struct regulator *vcc_supply;
};
static int ingenic_usb_phy_init(struct phy *phy)
{
struct ingenic_usb_phy *priv = phy_get_drvdata(phy);
int err;
u32 reg;
err = clk_prepare_enable(priv->clk);
if (err) {
dev_err(&phy->dev, "Unable to start clock: %d\n", err);
return err;
}
priv->soc_info->usb_phy_init(phy);
/* Wait for PHY to reset */
usleep_range(30, 300);
reg = readl(priv->base + REG_USBPCR_OFFSET);
writel(reg & ~USBPCR_POR, priv->base + REG_USBPCR_OFFSET);
usleep_range(300, 1000);
return 0;
}
static int ingenic_usb_phy_exit(struct phy *phy)
{
struct ingenic_usb_phy *priv = phy_get_drvdata(phy);
clk_disable_unprepare(priv->clk);
regulator_disable(priv->vcc_supply);
return 0;
}
static int ingenic_usb_phy_power_on(struct phy *phy)
{
struct ingenic_usb_phy *priv = phy_get_drvdata(phy);
int err;
err = regulator_enable(priv->vcc_supply);
if (err) {
dev_err(&phy->dev, "Unable to enable VCC: %d\n", err);
return err;
}
return 0;
}
static int ingenic_usb_phy_power_off(struct phy *phy)
{
struct ingenic_usb_phy *priv = phy_get_drvdata(phy);
regulator_disable(priv->vcc_supply);
return 0;
}
static int ingenic_usb_phy_set_mode(struct phy *phy,
enum phy_mode mode, int submode)
{
struct ingenic_usb_phy *priv = phy_get_drvdata(phy);
u32 reg;
switch (mode) {
case PHY_MODE_USB_HOST:
reg = readl(priv->base + REG_USBPCR_OFFSET);
u32p_replace_bits(®, 1, USBPCR_USB_MODE);
u32p_replace_bits(®, 0, USBPCR_VBUSVLDEXT);
u32p_replace_bits(®, 0, USBPCR_VBUSVLDEXTSEL);
u32p_replace_bits(®, 0, USBPCR_OTG_DISABLE);
writel(reg, priv->base + REG_USBPCR_OFFSET);
break;
case PHY_MODE_USB_DEVICE:
reg = readl(priv->base + REG_USBPCR_OFFSET);
u32p_replace_bits(®, 0, USBPCR_USB_MODE);
u32p_replace_bits(®, 1, USBPCR_VBUSVLDEXT);
u32p_replace_bits(®, 1, USBPCR_VBUSVLDEXTSEL);
u32p_replace_bits(®, 1, USBPCR_OTG_DISABLE);
writel(reg, priv->base + REG_USBPCR_OFFSET);
break;
case PHY_MODE_USB_OTG:
reg = readl(priv->base + REG_USBPCR_OFFSET);
u32p_replace_bits(®, 1, USBPCR_USB_MODE);
u32p_replace_bits(®, 1, USBPCR_VBUSVLDEXT);
u32p_replace_bits(®, 1, USBPCR_VBUSVLDEXTSEL);
u32p_replace_bits(®, 0, USBPCR_OTG_DISABLE);
writel(reg, priv->base + REG_USBPCR_OFFSET);
break;
default:
return -EINVAL;
}
return 0;
}
static const struct phy_ops ingenic_usb_phy_ops = {
.init = ingenic_usb_phy_init,
.exit = ingenic_usb_phy_exit,
.power_on = ingenic_usb_phy_power_on,
.power_off = ingenic_usb_phy_power_off,
.set_mode = ingenic_usb_phy_set_mode,
.owner = THIS_MODULE,
};
static void jz4770_usb_phy_init(struct phy *phy)
{
struct ingenic_usb_phy *priv = phy_get_drvdata(phy);
u32 reg;
reg = USBPCR_AVLD_REG | USBPCR_COMMONONN | USBPCR_POR |
FIELD_PREP(USBPCR_IDPULLUP_MASK, USBPCR_IDPULLUP_ALWAYS) |
FIELD_PREP(USBPCR_COMPDISTUNE_MASK, USBPCR_COMPDISTUNE_DFT) |
FIELD_PREP(USBPCR_OTGTUNE_MASK, USBPCR_OTGTUNE_DFT) |
FIELD_PREP(USBPCR_SQRXTUNE_MASK, USBPCR_SQRXTUNE_DFT) |
FIELD_PREP(USBPCR_TXFSLSTUNE_MASK, USBPCR_TXFSLSTUNE_DFT) |
FIELD_PREP(USBPCR_TXRISETUNE_MASK, USBPCR_TXRISETUNE_DFT) |
FIELD_PREP(USBPCR_TXVREFTUNE_MASK, USBPCR_TXVREFTUNE_DFT);
writel(reg, priv->base + REG_USBPCR_OFFSET);
}
static void jz4775_usb_phy_init(struct phy *phy)
{
struct ingenic_usb_phy *priv = phy_get_drvdata(phy);
u32 reg;
reg = readl(priv->base + REG_USBPCR1_OFFSET) | USBPCR1_USB_SEL |
USBPCR1_WORD_IF_16BIT;
writel(reg, priv->base + REG_USBPCR1_OFFSET);
reg = USBPCR_COMMONONN | USBPCR_POR |
FIELD_PREP(USBPCR_TXVREFTUNE_MASK, USBPCR_TXVREFTUNE_INC_75PPT);
writel(reg, priv->base + REG_USBPCR_OFFSET);
}
static void jz4780_usb_phy_init(struct phy *phy)
{
struct ingenic_usb_phy *priv = phy_get_drvdata(phy);
u32 reg;
reg = readl(priv->base + REG_USBPCR1_OFFSET) | USBPCR1_USB_SEL |
USBPCR1_WORD_IF_16BIT;
writel(reg, priv->base + REG_USBPCR1_OFFSET);
reg = USBPCR_TXPREEMPHTUNE | USBPCR_COMMONONN | USBPCR_POR;
writel(reg, priv->base + REG_USBPCR_OFFSET);
}
static void x1000_usb_phy_init(struct phy *phy)
{
struct ingenic_usb_phy *priv = phy_get_drvdata(phy);
u32 reg;
reg = readl(priv->base + REG_USBPCR1_OFFSET) | USBPCR1_WORD_IF_16BIT;
writel(reg, priv->base + REG_USBPCR1_OFFSET);
reg = USBPCR_TXPREEMPHTUNE | USBPCR_COMMONONN | USBPCR_POR |
FIELD_PREP(USBPCR_SQRXTUNE_MASK, USBPCR_SQRXTUNE_DCR_20PCT) |
FIELD_PREP(USBPCR_TXHSXVTUNE_MASK, USBPCR_TXHSXVTUNE_DCR_15MV) |
FIELD_PREP(USBPCR_TXVREFTUNE_MASK, USBPCR_TXVREFTUNE_INC_25PPT);
writel(reg, priv->base + REG_USBPCR_OFFSET);
}
static void x1830_usb_phy_init(struct phy *phy)
{
struct ingenic_usb_phy *priv = phy_get_drvdata(phy);
u32 reg;
/* rdt */
writel(USBRDT_VBFIL_EN | USBRDT_UTMI_RST, priv->base + REG_USBRDT_OFFSET);
reg = readl(priv->base + REG_USBPCR1_OFFSET) | USBPCR1_WORD_IF_16BIT |
USBPCR1_DMPD | USBPCR1_DPPD;
writel(reg, priv->base + REG_USBPCR1_OFFSET);
reg = USBPCR_VBUSVLDEXT | USBPCR_TXPREEMPHTUNE | USBPCR_COMMONONN | USBPCR_POR |
FIELD_PREP(USBPCR_IDPULLUP_MASK, USBPCR_IDPULLUP_OTG);
writel(reg, priv->base + REG_USBPCR_OFFSET);
}
static void x2000_usb_phy_init(struct phy *phy)
{
struct ingenic_usb_phy *priv = phy_get_drvdata(phy);
u32 reg;
reg = readl(priv->base + REG_USBPCR1_OFFSET) | USBPCR1_DPPD | USBPCR1_DMPD;
writel(reg & ~USBPCR1_PORT_RST, priv->base + REG_USBPCR1_OFFSET);
reg = USBPCR_POR | FIELD_PREP(USBPCR_IDPULLUP_MASK, USBPCR_IDPULLUP_OTG);
writel(reg, priv->base + REG_USBPCR_OFFSET);
}
static const struct ingenic_soc_info jz4770_soc_info = {
.usb_phy_init = jz4770_usb_phy_init,
};
static const struct ingenic_soc_info jz4775_soc_info = {
.usb_phy_init = jz4775_usb_phy_init,
};
static const struct ingenic_soc_info jz4780_soc_info = {
.usb_phy_init = jz4780_usb_phy_init,
};
static const struct ingenic_soc_info x1000_soc_info = {
.usb_phy_init = x1000_usb_phy_init,
};
static const struct ingenic_soc_info x1830_soc_info = {
.usb_phy_init = x1830_usb_phy_init,
};
static const struct ingenic_soc_info x2000_soc_info = {
.usb_phy_init = x2000_usb_phy_init,
};
static int ingenic_usb_phy_probe(struct platform_device *pdev)
{
struct ingenic_usb_phy *priv;
struct phy_provider *provider;
struct device *dev = &pdev->dev;
int err;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->soc_info = device_get_match_data(dev);
if (!priv->soc_info) {
dev_err(dev, "Error: No device match found\n");
return -ENODEV;
}
priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base)) {
dev_err(dev, "Failed to map registers\n");
return PTR_ERR(priv->base);
}
priv->clk = devm_clk_get(dev, NULL);
if (IS_ERR(priv->clk)) {
err = PTR_ERR(priv->clk);
if (err != -EPROBE_DEFER)
dev_err(dev, "Failed to get clock\n");
return err;
}
priv->vcc_supply = devm_regulator_get(dev, "vcc");
if (IS_ERR(priv->vcc_supply)) {
err = PTR_ERR(priv->vcc_supply);
if (err != -EPROBE_DEFER)
dev_err(dev, "Failed to get regulator\n");
return err;
}
priv->phy = devm_phy_create(dev, NULL, &ingenic_usb_phy_ops);
if (IS_ERR(priv->phy))
return PTR_ERR(priv->phy);
phy_set_drvdata(priv->phy, priv);
provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
return PTR_ERR_OR_ZERO(provider);
}
static const struct of_device_id ingenic_usb_phy_of_matches[] = {
{ .compatible = "ingenic,jz4770-phy", .data = &jz4770_soc_info },
{ .compatible = "ingenic,jz4775-phy", .data = &jz4775_soc_info },
{ .compatible = "ingenic,jz4780-phy", .data = &jz4780_soc_info },
{ .compatible = "ingenic,x1000-phy", .data = &x1000_soc_info },
{ .compatible = "ingenic,x1830-phy", .data = &x1830_soc_info },
{ .compatible = "ingenic,x2000-phy", .data = &x2000_soc_info },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, ingenic_usb_phy_of_matches);
static struct platform_driver ingenic_usb_phy_driver = {
.probe = ingenic_usb_phy_probe,
.driver = {
.name = "ingenic-usb-phy",
.of_match_table = ingenic_usb_phy_of_matches,
},
};
module_platform_driver(ingenic_usb_phy_driver);
MODULE_AUTHOR("周琰杰 (Zhou Yanjie) <[email protected]>");
MODULE_AUTHOR("漆鹏振 (Qi Pengzhen) <[email protected]>");
MODULE_AUTHOR("Paul Cercueil <[email protected]>");
MODULE_DESCRIPTION("Ingenic SoCs USB PHY driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/phy/ingenic/phy-ingenic-usb.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Cadence Torrent SD0801 PHY driver.
*
* Copyright 2018 Cadence Design Systems, Inc.
*
*/
#include <dt-bindings/phy/phy.h>
#include <dt-bindings/phy/phy-cadence.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
#include <linux/regmap.h>
#define REF_CLK_19_2MHZ 19200000
#define REF_CLK_25MHZ 25000000
#define REF_CLK_100MHZ 100000000
#define REF_CLK_156_25MHZ 156250000
#define MAX_NUM_LANES 4
#define DEFAULT_MAX_BIT_RATE 8100 /* in Mbps */
#define POLL_TIMEOUT_US 5000
#define PLL_LOCK_TIMEOUT 100000
#define DP_PLL0 BIT(0)
#define DP_PLL1 BIT(1)
#define TORRENT_COMMON_CDB_OFFSET 0x0
#define TORRENT_TX_LANE_CDB_OFFSET(ln, block_offset, reg_offset) \
((0x4000 << (block_offset)) + \
(((ln) << 9) << (reg_offset)))
#define TORRENT_RX_LANE_CDB_OFFSET(ln, block_offset, reg_offset) \
((0x8000 << (block_offset)) + \
(((ln) << 9) << (reg_offset)))
#define TORRENT_PHY_PCS_COMMON_OFFSET(block_offset) \
(0xC000 << (block_offset))
#define TORRENT_PHY_PCS_LANE_CDB_OFFSET(ln, block_offset, reg_offset) \
((0xD000 << (block_offset)) + \
(((ln) << 8) << (reg_offset)))
#define TORRENT_PHY_PMA_COMMON_OFFSET(block_offset) \
(0xE000 << (block_offset))
#define TORRENT_DPTX_PHY_OFFSET 0x0
/*
* register offsets from DPTX PHY register block base (i.e MHDP
* register base + 0x30a00)
*/
#define PHY_AUX_CTRL 0x04
#define PHY_RESET 0x20
#define PMA_TX_ELEC_IDLE_SHIFT 4
#define PHY_PMA_XCVR_PLLCLK_EN 0x24
#define PHY_PMA_XCVR_PLLCLK_EN_ACK 0x28
#define PHY_PMA_XCVR_POWER_STATE_REQ 0x2c
#define PHY_POWER_STATE_LN(ln) ((ln) * 8)
#define PMA_XCVR_POWER_STATE_REQ_LN_MASK 0x3FU
#define PHY_PMA_XCVR_POWER_STATE_ACK 0x30
#define PHY_PMA_CMN_READY 0x34
/*
* register offsets from SD0801 PHY register block base (i.e MHDP
* register base + 0x500000)
*/
#define CMN_SSM_BANDGAP_TMR 0x0021U
#define CMN_SSM_BIAS_TMR 0x0022U
#define CMN_PLLSM0_PLLPRE_TMR 0x002AU
#define CMN_PLLSM0_PLLLOCK_TMR 0x002CU
#define CMN_PLLSM1_PLLPRE_TMR 0x0032U
#define CMN_PLLSM1_PLLLOCK_TMR 0x0034U
#define CMN_CDIAG_CDB_PWRI_OVRD 0x0041U
#define CMN_CDIAG_XCVRC_PWRI_OVRD 0x0047U
#define CMN_CDIAG_REFCLK_OVRD 0x004CU
#define CMN_CDIAG_REFCLK_DRV0_CTRL 0x0050U
#define CMN_BGCAL_INIT_TMR 0x0064U
#define CMN_BGCAL_ITER_TMR 0x0065U
#define CMN_IBCAL_INIT_TMR 0x0074U
#define CMN_PLL0_VCOCAL_TCTRL 0x0082U
#define CMN_PLL0_VCOCAL_INIT_TMR 0x0084U
#define CMN_PLL0_VCOCAL_ITER_TMR 0x0085U
#define CMN_PLL0_VCOCAL_REFTIM_START 0x0086U
#define CMN_PLL0_VCOCAL_PLLCNT_START 0x0088U
#define CMN_PLL0_INTDIV_M0 0x0090U
#define CMN_PLL0_FRACDIVL_M0 0x0091U
#define CMN_PLL0_FRACDIVH_M0 0x0092U
#define CMN_PLL0_HIGH_THR_M0 0x0093U
#define CMN_PLL0_DSM_DIAG_M0 0x0094U
#define CMN_PLL0_DSM_FBH_OVRD_M0 0x0095U
#define CMN_PLL0_DSM_FBL_OVRD_M0 0x0096U
#define CMN_PLL0_SS_CTRL1_M0 0x0098U
#define CMN_PLL0_SS_CTRL2_M0 0x0099U
#define CMN_PLL0_SS_CTRL3_M0 0x009AU
#define CMN_PLL0_SS_CTRL4_M0 0x009BU
#define CMN_PLL0_LOCK_REFCNT_START 0x009CU
#define CMN_PLL0_LOCK_PLLCNT_START 0x009EU
#define CMN_PLL0_LOCK_PLLCNT_THR 0x009FU
#define CMN_PLL0_INTDIV_M1 0x00A0U
#define CMN_PLL0_FRACDIVH_M1 0x00A2U
#define CMN_PLL0_HIGH_THR_M1 0x00A3U
#define CMN_PLL0_DSM_DIAG_M1 0x00A4U
#define CMN_PLL0_SS_CTRL1_M1 0x00A8U
#define CMN_PLL0_SS_CTRL2_M1 0x00A9U
#define CMN_PLL0_SS_CTRL3_M1 0x00AAU
#define CMN_PLL0_SS_CTRL4_M1 0x00ABU
#define CMN_PLL1_VCOCAL_TCTRL 0x00C2U
#define CMN_PLL1_VCOCAL_INIT_TMR 0x00C4U
#define CMN_PLL1_VCOCAL_ITER_TMR 0x00C5U
#define CMN_PLL1_VCOCAL_REFTIM_START 0x00C6U
#define CMN_PLL1_VCOCAL_PLLCNT_START 0x00C8U
#define CMN_PLL1_INTDIV_M0 0x00D0U
#define CMN_PLL1_FRACDIVL_M0 0x00D1U
#define CMN_PLL1_FRACDIVH_M0 0x00D2U
#define CMN_PLL1_HIGH_THR_M0 0x00D3U
#define CMN_PLL1_DSM_DIAG_M0 0x00D4U
#define CMN_PLL1_DSM_FBH_OVRD_M0 0x00D5U
#define CMN_PLL1_DSM_FBL_OVRD_M0 0x00D6U
#define CMN_PLL1_SS_CTRL1_M0 0x00D8U
#define CMN_PLL1_SS_CTRL2_M0 0x00D9U
#define CMN_PLL1_SS_CTRL3_M0 0x00DAU
#define CMN_PLL1_SS_CTRL4_M0 0x00DBU
#define CMN_PLL1_LOCK_REFCNT_START 0x00DCU
#define CMN_PLL1_LOCK_PLLCNT_START 0x00DEU
#define CMN_PLL1_LOCK_PLLCNT_THR 0x00DFU
#define CMN_TXPUCAL_TUNE 0x0103U
#define CMN_TXPUCAL_INIT_TMR 0x0104U
#define CMN_TXPUCAL_ITER_TMR 0x0105U
#define CMN_TXPDCAL_TUNE 0x010BU
#define CMN_TXPDCAL_INIT_TMR 0x010CU
#define CMN_TXPDCAL_ITER_TMR 0x010DU
#define CMN_RXCAL_INIT_TMR 0x0114U
#define CMN_RXCAL_ITER_TMR 0x0115U
#define CMN_SD_CAL_INIT_TMR 0x0124U
#define CMN_SD_CAL_ITER_TMR 0x0125U
#define CMN_SD_CAL_REFTIM_START 0x0126U
#define CMN_SD_CAL_PLLCNT_START 0x0128U
#define CMN_PDIAG_PLL0_CTRL_M0 0x01A0U
#define CMN_PDIAG_PLL0_CLK_SEL_M0 0x01A1U
#define CMN_PDIAG_PLL0_CP_PADJ_M0 0x01A4U
#define CMN_PDIAG_PLL0_CP_IADJ_M0 0x01A5U
#define CMN_PDIAG_PLL0_FILT_PADJ_M0 0x01A6U
#define CMN_PDIAG_PLL0_CTRL_M1 0x01B0U
#define CMN_PDIAG_PLL0_CLK_SEL_M1 0x01B1U
#define CMN_PDIAG_PLL0_CP_PADJ_M1 0x01B4U
#define CMN_PDIAG_PLL0_CP_IADJ_M1 0x01B5U
#define CMN_PDIAG_PLL0_FILT_PADJ_M1 0x01B6U
#define CMN_PDIAG_PLL1_CTRL_M0 0x01C0U
#define CMN_PDIAG_PLL1_CLK_SEL_M0 0x01C1U
#define CMN_PDIAG_PLL1_CP_PADJ_M0 0x01C4U
#define CMN_PDIAG_PLL1_CP_IADJ_M0 0x01C5U
#define CMN_PDIAG_PLL1_FILT_PADJ_M0 0x01C6U
#define CMN_DIAG_BIAS_OVRD1 0x01E1U
/* PMA TX Lane registers */
#define TX_TXCC_CTRL 0x0040U
#define TX_TXCC_CPOST_MULT_00 0x004CU
#define TX_TXCC_CPOST_MULT_01 0x004DU
#define TX_TXCC_MGNFS_MULT_000 0x0050U
#define TX_TXCC_MGNFS_MULT_100 0x0054U
#define DRV_DIAG_TX_DRV 0x00C6U
#define XCVR_DIAG_PLLDRC_CTRL 0x00E5U
#define XCVR_DIAG_HSCLK_SEL 0x00E6U
#define XCVR_DIAG_HSCLK_DIV 0x00E7U
#define XCVR_DIAG_RXCLK_CTRL 0x00E9U
#define XCVR_DIAG_BIDI_CTRL 0x00EAU
#define XCVR_DIAG_PSC_OVRD 0x00EBU
#define TX_PSC_A0 0x0100U
#define TX_PSC_A1 0x0101U
#define TX_PSC_A2 0x0102U
#define TX_PSC_A3 0x0103U
#define TX_RCVDET_ST_TMR 0x0123U
#define TX_DIAG_ACYA 0x01E7U
#define TX_DIAG_ACYA_HBDC_MASK 0x0001U
/* PMA RX Lane registers */
#define RX_PSC_A0 0x0000U
#define RX_PSC_A1 0x0001U
#define RX_PSC_A2 0x0002U
#define RX_PSC_A3 0x0003U
#define RX_PSC_CAL 0x0006U
#define RX_SDCAL0_INIT_TMR 0x0044U
#define RX_SDCAL0_ITER_TMR 0x0045U
#define RX_SDCAL1_INIT_TMR 0x004CU
#define RX_SDCAL1_ITER_TMR 0x004DU
#define RX_CDRLF_CNFG 0x0080U
#define RX_CDRLF_CNFG3 0x0082U
#define RX_SIGDET_HL_FILT_TMR 0x0090U
#define RX_REE_GCSM1_CTRL 0x0108U
#define RX_REE_GCSM1_EQENM_PH1 0x0109U
#define RX_REE_GCSM1_EQENM_PH2 0x010AU
#define RX_REE_GCSM2_CTRL 0x0110U
#define RX_REE_PERGCSM_CTRL 0x0118U
#define RX_REE_ATTEN_THR 0x0149U
#define RX_REE_TAP1_CLIP 0x0171U
#define RX_REE_TAP2TON_CLIP 0x0172U
#define RX_REE_SMGM_CTRL1 0x0177U
#define RX_REE_SMGM_CTRL2 0x0178U
#define RX_DIAG_DFE_CTRL 0x01E0U
#define RX_DIAG_DFE_AMP_TUNE_2 0x01E2U
#define RX_DIAG_DFE_AMP_TUNE_3 0x01E3U
#define RX_DIAG_NQST_CTRL 0x01E5U
#define RX_DIAG_SIGDET_TUNE 0x01E8U
#define RX_DIAG_PI_RATE 0x01F4U
#define RX_DIAG_PI_CAP 0x01F5U
#define RX_DIAG_ACYA 0x01FFU
/* PHY PCS common registers */
#define PHY_PIPE_CMN_CTRL1 0x0000U
#define PHY_PLL_CFG 0x000EU
#define PHY_PIPE_USB3_GEN2_PRE_CFG0 0x0020U
#define PHY_PIPE_USB3_GEN2_POST_CFG0 0x0022U
#define PHY_PIPE_USB3_GEN2_POST_CFG1 0x0023U
/* PHY PCS lane registers */
#define PHY_PCS_ISO_LINK_CTRL 0x000BU
/* PHY PMA common registers */
#define PHY_PMA_CMN_CTRL1 0x0000U
#define PHY_PMA_CMN_CTRL2 0x0001U
#define PHY_PMA_PLL_RAW_CTRL 0x0003U
#define CDNS_TORRENT_OUTPUT_CLOCKS 3
static const char * const clk_names[] = {
[CDNS_TORRENT_REFCLK_DRIVER] = "refclk-driver",
[CDNS_TORRENT_DERIVED_REFCLK] = "refclk-der",
[CDNS_TORRENT_RECEIVED_REFCLK] = "refclk-rec",
};
static const struct reg_field phy_pll_cfg =
REG_FIELD(PHY_PLL_CFG, 0, 1);
static const struct reg_field phy_pma_cmn_ctrl_1 =
REG_FIELD(PHY_PMA_CMN_CTRL1, 0, 0);
static const struct reg_field phy_pma_cmn_ctrl_2 =
REG_FIELD(PHY_PMA_CMN_CTRL2, 0, 7);
static const struct reg_field phy_pma_pll_raw_ctrl =
REG_FIELD(PHY_PMA_PLL_RAW_CTRL, 0, 1);
static const struct reg_field phy_reset_ctrl =
REG_FIELD(PHY_RESET, 8, 8);
static const struct reg_field phy_pcs_iso_link_ctrl_1 =
REG_FIELD(PHY_PCS_ISO_LINK_CTRL, 1, 1);
static const struct reg_field phy_pipe_cmn_ctrl1_0 = REG_FIELD(PHY_PIPE_CMN_CTRL1, 0, 0);
static const struct reg_field cmn_cdiag_refclk_ovrd_4 =
REG_FIELD(CMN_CDIAG_REFCLK_OVRD, 4, 4);
#define REFCLK_OUT_NUM_CMN_CONFIG 4
enum cdns_torrent_refclk_out_cmn {
CMN_CDIAG_REFCLK_DRV0_CTRL_1,
CMN_CDIAG_REFCLK_DRV0_CTRL_4,
CMN_CDIAG_REFCLK_DRV0_CTRL_5,
CMN_CDIAG_REFCLK_DRV0_CTRL_6,
};
static const struct reg_field refclk_out_cmn_cfg[] = {
[CMN_CDIAG_REFCLK_DRV0_CTRL_1] = REG_FIELD(CMN_CDIAG_REFCLK_DRV0_CTRL, 1, 1),
[CMN_CDIAG_REFCLK_DRV0_CTRL_4] = REG_FIELD(CMN_CDIAG_REFCLK_DRV0_CTRL, 4, 4),
[CMN_CDIAG_REFCLK_DRV0_CTRL_5] = REG_FIELD(CMN_CDIAG_REFCLK_DRV0_CTRL, 5, 5),
[CMN_CDIAG_REFCLK_DRV0_CTRL_6] = REG_FIELD(CMN_CDIAG_REFCLK_DRV0_CTRL, 6, 6),
};
static const int refclk_driver_parent_index[] = {
CDNS_TORRENT_DERIVED_REFCLK,
CDNS_TORRENT_RECEIVED_REFCLK
};
static u32 cdns_torrent_refclk_driver_mux_table[] = { 1, 0 };
enum cdns_torrent_phy_type {
TYPE_NONE,
TYPE_DP,
TYPE_PCIE,
TYPE_SGMII,
TYPE_QSGMII,
TYPE_USB,
TYPE_USXGMII,
};
enum cdns_torrent_ref_clk {
CLK_19_2_MHZ,
CLK_25_MHZ,
CLK_100_MHZ,
CLK_156_25_MHZ,
CLK_ANY,
};
enum cdns_torrent_ssc_mode {
NO_SSC,
EXTERNAL_SSC,
INTERNAL_SSC,
ANY_SSC,
};
/* Unique key id for vals table entry
* REFCLK0_RATE | REFCLK1_RATE | LINK0_TYPE | LINK1_TYPE | SSC_TYPE
*/
#define REFCLK0_SHIFT 12
#define REFCLK0_MASK GENMASK(14, 12)
#define REFCLK1_SHIFT 9
#define REFCLK1_MASK GENMASK(11, 9)
#define LINK0_SHIFT 6
#define LINK0_MASK GENMASK(8, 6)
#define LINK1_SHIFT 3
#define LINK1_MASK GENMASK(5, 3)
#define SSC_SHIFT 0
#define SSC_MASK GENMASK(2, 0)
#define CDNS_TORRENT_KEY(refclk0, refclk1, link0, link1, ssc) \
((((refclk0) << REFCLK0_SHIFT) & REFCLK0_MASK) | \
(((refclk1) << REFCLK1_SHIFT) & REFCLK1_MASK) | \
(((link0) << LINK0_SHIFT) & LINK0_MASK) | \
(((link1) << LINK1_SHIFT) & LINK1_MASK) | \
(((ssc) << SSC_SHIFT) & SSC_MASK))
#define CDNS_TORRENT_KEY_ANYCLK(link0, link1) \
CDNS_TORRENT_KEY(CLK_ANY, CLK_ANY, \
(link0), (link1), ANY_SSC)
struct cdns_torrent_inst {
struct phy *phy;
u32 mlane;
enum cdns_torrent_phy_type phy_type;
u32 num_lanes;
struct reset_control *lnk_rst;
enum cdns_torrent_ssc_mode ssc_mode;
};
struct cdns_torrent_phy {
void __iomem *base; /* DPTX registers base */
void __iomem *sd_base; /* SD0801 registers base */
u32 max_bit_rate; /* Maximum link bit rate to use (in Mbps) */
u32 dp_pll;
struct reset_control *phy_rst;
struct reset_control *apb_rst;
struct device *dev;
struct clk *clk;
enum cdns_torrent_ref_clk ref_clk_rate;
struct cdns_torrent_inst phys[MAX_NUM_LANES];
int nsubnodes;
const struct cdns_torrent_data *init_data;
struct regmap *regmap_common_cdb;
struct regmap *regmap_phy_pcs_common_cdb;
struct regmap *regmap_phy_pma_common_cdb;
struct regmap *regmap_tx_lane_cdb[MAX_NUM_LANES];
struct regmap *regmap_rx_lane_cdb[MAX_NUM_LANES];
struct regmap *regmap_phy_pcs_lane_cdb[MAX_NUM_LANES];
struct regmap *regmap_dptx_phy_reg;
struct regmap_field *phy_pll_cfg;
struct regmap_field *phy_pipe_cmn_ctrl1_0;
struct regmap_field *cmn_cdiag_refclk_ovrd_4;
struct regmap_field *phy_pma_cmn_ctrl_1;
struct regmap_field *phy_pma_cmn_ctrl_2;
struct regmap_field *phy_pma_pll_raw_ctrl;
struct regmap_field *phy_reset_ctrl;
struct regmap_field *phy_pcs_iso_link_ctrl_1[MAX_NUM_LANES];
struct clk_hw_onecell_data *clk_hw_data;
};
enum phy_powerstate {
POWERSTATE_A0 = 0,
/* Powerstate A1 is unused */
POWERSTATE_A2 = 2,
POWERSTATE_A3 = 3,
};
struct cdns_torrent_refclk_driver {
struct clk_hw hw;
struct regmap_field *cmn_fields[REFCLK_OUT_NUM_CMN_CONFIG];
struct clk_init_data clk_data;
};
#define to_cdns_torrent_refclk_driver(_hw) \
container_of(_hw, struct cdns_torrent_refclk_driver, hw)
struct cdns_torrent_derived_refclk {
struct clk_hw hw;
struct regmap_field *phy_pipe_cmn_ctrl1_0;
struct regmap_field *cmn_cdiag_refclk_ovrd_4;
struct clk_init_data clk_data;
};
#define to_cdns_torrent_derived_refclk(_hw) \
container_of(_hw, struct cdns_torrent_derived_refclk, hw)
struct cdns_torrent_received_refclk {
struct clk_hw hw;
struct regmap_field *phy_pipe_cmn_ctrl1_0;
struct regmap_field *cmn_cdiag_refclk_ovrd_4;
struct clk_init_data clk_data;
};
#define to_cdns_torrent_received_refclk(_hw) \
container_of(_hw, struct cdns_torrent_received_refclk, hw)
struct cdns_reg_pairs {
u32 val;
u32 off;
};
struct cdns_torrent_vals {
struct cdns_reg_pairs *reg_pairs;
u32 num_regs;
};
struct cdns_torrent_vals_entry {
u32 key;
struct cdns_torrent_vals *vals;
};
struct cdns_torrent_vals_table {
struct cdns_torrent_vals_entry *entries;
u32 num_entries;
};
struct cdns_torrent_data {
u8 block_offset_shift;
u8 reg_offset_shift;
struct cdns_torrent_vals_table link_cmn_vals_tbl;
struct cdns_torrent_vals_table xcvr_diag_vals_tbl;
struct cdns_torrent_vals_table pcs_cmn_vals_tbl;
struct cdns_torrent_vals_table phy_pma_cmn_vals_tbl;
struct cdns_torrent_vals_table cmn_vals_tbl;
struct cdns_torrent_vals_table tx_ln_vals_tbl;
struct cdns_torrent_vals_table rx_ln_vals_tbl;
};
struct cdns_regmap_cdb_context {
struct device *dev;
void __iomem *base;
u8 reg_offset_shift;
};
static struct cdns_torrent_vals *cdns_torrent_get_tbl_vals(const struct cdns_torrent_vals_table *tbl,
enum cdns_torrent_ref_clk refclk0,
enum cdns_torrent_ref_clk refclk1,
enum cdns_torrent_phy_type link0,
enum cdns_torrent_phy_type link1,
enum cdns_torrent_ssc_mode ssc)
{
int i;
u32 key = CDNS_TORRENT_KEY(refclk0, refclk1, link0, link1, ssc);
for (i = 0; i < tbl->num_entries; i++) {
if (tbl->entries[i].key == key)
return tbl->entries[i].vals;
}
return NULL;
}
static int cdns_regmap_write(void *context, unsigned int reg, unsigned int val)
{
struct cdns_regmap_cdb_context *ctx = context;
u32 offset = reg << ctx->reg_offset_shift;
writew(val, ctx->base + offset);
return 0;
}
static int cdns_regmap_read(void *context, unsigned int reg, unsigned int *val)
{
struct cdns_regmap_cdb_context *ctx = context;
u32 offset = reg << ctx->reg_offset_shift;
*val = readw(ctx->base + offset);
return 0;
}
static int cdns_regmap_dptx_write(void *context, unsigned int reg,
unsigned int val)
{
struct cdns_regmap_cdb_context *ctx = context;
u32 offset = reg;
writel(val, ctx->base + offset);
return 0;
}
static int cdns_regmap_dptx_read(void *context, unsigned int reg,
unsigned int *val)
{
struct cdns_regmap_cdb_context *ctx = context;
u32 offset = reg;
*val = readl(ctx->base + offset);
return 0;
}
#define TORRENT_TX_LANE_CDB_REGMAP_CONF(n) \
{ \
.name = "torrent_tx_lane" n "_cdb", \
.reg_stride = 1, \
.fast_io = true, \
.reg_write = cdns_regmap_write, \
.reg_read = cdns_regmap_read, \
}
#define TORRENT_RX_LANE_CDB_REGMAP_CONF(n) \
{ \
.name = "torrent_rx_lane" n "_cdb", \
.reg_stride = 1, \
.fast_io = true, \
.reg_write = cdns_regmap_write, \
.reg_read = cdns_regmap_read, \
}
static const struct regmap_config cdns_torrent_tx_lane_cdb_config[] = {
TORRENT_TX_LANE_CDB_REGMAP_CONF("0"),
TORRENT_TX_LANE_CDB_REGMAP_CONF("1"),
TORRENT_TX_LANE_CDB_REGMAP_CONF("2"),
TORRENT_TX_LANE_CDB_REGMAP_CONF("3"),
};
static const struct regmap_config cdns_torrent_rx_lane_cdb_config[] = {
TORRENT_RX_LANE_CDB_REGMAP_CONF("0"),
TORRENT_RX_LANE_CDB_REGMAP_CONF("1"),
TORRENT_RX_LANE_CDB_REGMAP_CONF("2"),
TORRENT_RX_LANE_CDB_REGMAP_CONF("3"),
};
static const struct regmap_config cdns_torrent_common_cdb_config = {
.name = "torrent_common_cdb",
.reg_stride = 1,
.fast_io = true,
.reg_write = cdns_regmap_write,
.reg_read = cdns_regmap_read,
};
#define TORRENT_PHY_PCS_LANE_CDB_REGMAP_CONF(n) \
{ \
.name = "torrent_phy_pcs_lane" n "_cdb", \
.reg_stride = 1, \
.fast_io = true, \
.reg_write = cdns_regmap_write, \
.reg_read = cdns_regmap_read, \
}
static const struct regmap_config cdns_torrent_phy_pcs_lane_cdb_config[] = {
TORRENT_PHY_PCS_LANE_CDB_REGMAP_CONF("0"),
TORRENT_PHY_PCS_LANE_CDB_REGMAP_CONF("1"),
TORRENT_PHY_PCS_LANE_CDB_REGMAP_CONF("2"),
TORRENT_PHY_PCS_LANE_CDB_REGMAP_CONF("3"),
};
static const struct regmap_config cdns_torrent_phy_pcs_cmn_cdb_config = {
.name = "torrent_phy_pcs_cmn_cdb",
.reg_stride = 1,
.fast_io = true,
.reg_write = cdns_regmap_write,
.reg_read = cdns_regmap_read,
};
static const struct regmap_config cdns_torrent_phy_pma_cmn_cdb_config = {
.name = "torrent_phy_pma_cmn_cdb",
.reg_stride = 1,
.fast_io = true,
.reg_write = cdns_regmap_write,
.reg_read = cdns_regmap_read,
};
static const struct regmap_config cdns_torrent_dptx_phy_config = {
.name = "torrent_dptx_phy",
.reg_stride = 1,
.fast_io = true,
.reg_write = cdns_regmap_dptx_write,
.reg_read = cdns_regmap_dptx_read,
};
/* PHY mmr access functions */
static void cdns_torrent_phy_write(struct regmap *regmap, u32 offset, u32 val)
{
regmap_write(regmap, offset, val);
}
static u32 cdns_torrent_phy_read(struct regmap *regmap, u32 offset)
{
unsigned int val;
regmap_read(regmap, offset, &val);
return val;
}
/* DPTX mmr access functions */
static void cdns_torrent_dp_write(struct regmap *regmap, u32 offset, u32 val)
{
regmap_write(regmap, offset, val);
}
static u32 cdns_torrent_dp_read(struct regmap *regmap, u32 offset)
{
u32 val;
regmap_read(regmap, offset, &val);
return val;
}
/*
* Structure used to store values of PHY registers for voltage-related
* coefficients, for particular voltage swing and pre-emphasis level. Values
* are shared across all physical lanes.
*/
struct coefficients {
/* Value of DRV_DIAG_TX_DRV register to use */
u16 diag_tx_drv;
/* Value of TX_TXCC_MGNFS_MULT_000 register to use */
u16 mgnfs_mult;
/* Value of TX_TXCC_CPOST_MULT_00 register to use */
u16 cpost_mult;
};
/*
* Array consists of values of voltage-related registers for sd0801 PHY. A value
* of 0xFFFF is a placeholder for invalid combination, and will never be used.
*/
static const struct coefficients vltg_coeff[4][4] = {
/* voltage swing 0, pre-emphasis 0->3 */
{ {.diag_tx_drv = 0x0003, .mgnfs_mult = 0x002A,
.cpost_mult = 0x0000},
{.diag_tx_drv = 0x0003, .mgnfs_mult = 0x001F,
.cpost_mult = 0x0014},
{.diag_tx_drv = 0x0003, .mgnfs_mult = 0x0012,
.cpost_mult = 0x0020},
{.diag_tx_drv = 0x0003, .mgnfs_mult = 0x0000,
.cpost_mult = 0x002A}
},
/* voltage swing 1, pre-emphasis 0->3 */
{ {.diag_tx_drv = 0x0003, .mgnfs_mult = 0x001F,
.cpost_mult = 0x0000},
{.diag_tx_drv = 0x0003, .mgnfs_mult = 0x0013,
.cpost_mult = 0x0012},
{.diag_tx_drv = 0x0003, .mgnfs_mult = 0x0000,
.cpost_mult = 0x001F},
{.diag_tx_drv = 0xFFFF, .mgnfs_mult = 0xFFFF,
.cpost_mult = 0xFFFF}
},
/* voltage swing 2, pre-emphasis 0->3 */
{ {.diag_tx_drv = 0x0003, .mgnfs_mult = 0x0013,
.cpost_mult = 0x0000},
{.diag_tx_drv = 0x0003, .mgnfs_mult = 0x0000,
.cpost_mult = 0x0013},
{.diag_tx_drv = 0xFFFF, .mgnfs_mult = 0xFFFF,
.cpost_mult = 0xFFFF},
{.diag_tx_drv = 0xFFFF, .mgnfs_mult = 0xFFFF,
.cpost_mult = 0xFFFF}
},
/* voltage swing 3, pre-emphasis 0->3 */
{ {.diag_tx_drv = 0x0003, .mgnfs_mult = 0x0000,
.cpost_mult = 0x0000},
{.diag_tx_drv = 0xFFFF, .mgnfs_mult = 0xFFFF,
.cpost_mult = 0xFFFF},
{.diag_tx_drv = 0xFFFF, .mgnfs_mult = 0xFFFF,
.cpost_mult = 0xFFFF},
{.diag_tx_drv = 0xFFFF, .mgnfs_mult = 0xFFFF,
.cpost_mult = 0xFFFF}
}
};
static const char *cdns_torrent_get_phy_type(enum cdns_torrent_phy_type phy_type)
{
switch (phy_type) {
case TYPE_DP:
return "DisplayPort";
case TYPE_PCIE:
return "PCIe";
case TYPE_SGMII:
return "SGMII";
case TYPE_QSGMII:
return "QSGMII";
case TYPE_USB:
return "USB";
case TYPE_USXGMII:
return "USXGMII";
default:
return "None";
}
}
/*
* Set registers responsible for enabling and configuring SSC, with second and
* third register values provided by parameters.
*/
static
void cdns_torrent_dp_enable_ssc_19_2mhz(struct cdns_torrent_phy *cdns_phy,
u32 ctrl2_val, u32 ctrl3_val)
{
struct regmap *regmap = cdns_phy->regmap_common_cdb;
cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL1_M0, 0x0001);
cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL1_M0, ctrl2_val);
cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL1_M0, ctrl3_val);
cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL4_M0, 0x0003);
cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL1_M0, 0x0001);
cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL1_M0, ctrl2_val);
cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL1_M0, ctrl3_val);
cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL4_M0, 0x0003);
}
static
void cdns_torrent_dp_pma_cmn_vco_cfg_19_2mhz(struct cdns_torrent_phy *cdns_phy,
u32 rate, bool ssc)
{
struct regmap *regmap = cdns_phy->regmap_common_cdb;
/* Assumes 19.2 MHz refclock */
switch (rate) {
/* Setting VCO for 10.8GHz */
case 2700:
case 5400:
cdns_torrent_phy_write(regmap, CMN_PLL0_INTDIV_M0, 0x0119);
cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVL_M0, 0x4000);
cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVH_M0, 0x0002);
cdns_torrent_phy_write(regmap, CMN_PLL0_HIGH_THR_M0, 0x00BC);
cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CTRL_M0, 0x0012);
cdns_torrent_phy_write(regmap, CMN_PLL1_INTDIV_M0, 0x0119);
cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVL_M0, 0x4000);
cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVH_M0, 0x0002);
cdns_torrent_phy_write(regmap, CMN_PLL1_HIGH_THR_M0, 0x00BC);
cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CTRL_M0, 0x0012);
if (ssc)
cdns_torrent_dp_enable_ssc_19_2mhz(cdns_phy, 0x033A, 0x006A);
break;
/* Setting VCO for 9.72GHz */
case 1620:
case 2430:
case 3240:
cdns_torrent_phy_write(regmap, CMN_PLL0_INTDIV_M0, 0x01FA);
cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVL_M0, 0x4000);
cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVH_M0, 0x0002);
cdns_torrent_phy_write(regmap, CMN_PLL0_HIGH_THR_M0, 0x0152);
cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CTRL_M0, 0x0002);
cdns_torrent_phy_write(regmap, CMN_PLL1_INTDIV_M0, 0x01FA);
cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVL_M0, 0x4000);
cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVH_M0, 0x0002);
cdns_torrent_phy_write(regmap, CMN_PLL1_HIGH_THR_M0, 0x0152);
cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CTRL_M0, 0x0002);
if (ssc)
cdns_torrent_dp_enable_ssc_19_2mhz(cdns_phy, 0x05DD, 0x0069);
break;
/* Setting VCO for 8.64GHz */
case 2160:
case 4320:
cdns_torrent_phy_write(regmap, CMN_PLL0_INTDIV_M0, 0x01C2);
cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVL_M0, 0x0000);
cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVH_M0, 0x0002);
cdns_torrent_phy_write(regmap, CMN_PLL0_HIGH_THR_M0, 0x012C);
cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CTRL_M0, 0x0002);
cdns_torrent_phy_write(regmap, CMN_PLL1_INTDIV_M0, 0x01C2);
cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVL_M0, 0x0000);
cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVH_M0, 0x0002);
cdns_torrent_phy_write(regmap, CMN_PLL1_HIGH_THR_M0, 0x012C);
cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CTRL_M0, 0x0002);
if (ssc)
cdns_torrent_dp_enable_ssc_19_2mhz(cdns_phy, 0x0536, 0x0069);
break;
/* Setting VCO for 8.1GHz */
case 8100:
cdns_torrent_phy_write(regmap, CMN_PLL0_INTDIV_M0, 0x01A5);
cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVL_M0, 0xE000);
cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVH_M0, 0x0002);
cdns_torrent_phy_write(regmap, CMN_PLL0_HIGH_THR_M0, 0x011A);
cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CTRL_M0, 0x0002);
cdns_torrent_phy_write(regmap, CMN_PLL1_INTDIV_M0, 0x01A5);
cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVL_M0, 0xE000);
cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVH_M0, 0x0002);
cdns_torrent_phy_write(regmap, CMN_PLL1_HIGH_THR_M0, 0x011A);
cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CTRL_M0, 0x0002);
if (ssc)
cdns_torrent_dp_enable_ssc_19_2mhz(cdns_phy, 0x04D7, 0x006A);
break;
}
if (ssc) {
cdns_torrent_phy_write(regmap, CMN_PLL0_VCOCAL_PLLCNT_START, 0x025E);
cdns_torrent_phy_write(regmap, CMN_PLL0_LOCK_PLLCNT_THR, 0x0005);
cdns_torrent_phy_write(regmap, CMN_PLL1_VCOCAL_PLLCNT_START, 0x025E);
cdns_torrent_phy_write(regmap, CMN_PLL1_LOCK_PLLCNT_THR, 0x0005);
} else {
cdns_torrent_phy_write(regmap, CMN_PLL0_VCOCAL_PLLCNT_START, 0x0260);
cdns_torrent_phy_write(regmap, CMN_PLL1_VCOCAL_PLLCNT_START, 0x0260);
/* Set reset register values to disable SSC */
cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL1_M0, 0x0002);
cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL2_M0, 0x0000);
cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL3_M0, 0x0000);
cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL4_M0, 0x0000);
cdns_torrent_phy_write(regmap, CMN_PLL0_LOCK_PLLCNT_THR, 0x0003);
cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL1_M0, 0x0002);
cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL2_M0, 0x0000);
cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL3_M0, 0x0000);
cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL4_M0, 0x0000);
cdns_torrent_phy_write(regmap, CMN_PLL1_LOCK_PLLCNT_THR, 0x0003);
}
cdns_torrent_phy_write(regmap, CMN_PLL0_LOCK_REFCNT_START, 0x0099);
cdns_torrent_phy_write(regmap, CMN_PLL0_LOCK_PLLCNT_START, 0x0099);
cdns_torrent_phy_write(regmap, CMN_PLL1_LOCK_REFCNT_START, 0x0099);
cdns_torrent_phy_write(regmap, CMN_PLL1_LOCK_PLLCNT_START, 0x0099);
}
/*
* Set registers responsible for enabling and configuring SSC, with second
* register value provided by a parameter.
*/
static void cdns_torrent_dp_enable_ssc_25mhz(struct cdns_torrent_phy *cdns_phy,
u32 ctrl2_val)
{
struct regmap *regmap = cdns_phy->regmap_common_cdb;
cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL1_M0, 0x0001);
cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL1_M0, ctrl2_val);
cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL1_M0, 0x007F);
cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL4_M0, 0x0003);
cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL1_M0, 0x0001);
cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL1_M0, ctrl2_val);
cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL1_M0, 0x007F);
cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL4_M0, 0x0003);
}
static
void cdns_torrent_dp_pma_cmn_vco_cfg_25mhz(struct cdns_torrent_phy *cdns_phy,
u32 rate, bool ssc)
{
struct regmap *regmap = cdns_phy->regmap_common_cdb;
/* Assumes 25 MHz refclock */
switch (rate) {
/* Setting VCO for 10.8GHz */
case 2700:
case 5400:
cdns_torrent_phy_write(regmap, CMN_PLL0_INTDIV_M0, 0x01B0);
cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVL_M0, 0x0000);
cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVH_M0, 0x0002);
cdns_torrent_phy_write(regmap, CMN_PLL0_HIGH_THR_M0, 0x0120);
cdns_torrent_phy_write(regmap, CMN_PLL1_INTDIV_M0, 0x01B0);
cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVL_M0, 0x0000);
cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVH_M0, 0x0002);
cdns_torrent_phy_write(regmap, CMN_PLL1_HIGH_THR_M0, 0x0120);
if (ssc)
cdns_torrent_dp_enable_ssc_25mhz(cdns_phy, 0x0423);
break;
/* Setting VCO for 9.72GHz */
case 1620:
case 2430:
case 3240:
cdns_torrent_phy_write(regmap, CMN_PLL0_INTDIV_M0, 0x0184);
cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVL_M0, 0xCCCD);
cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVH_M0, 0x0002);
cdns_torrent_phy_write(regmap, CMN_PLL0_HIGH_THR_M0, 0x0104);
cdns_torrent_phy_write(regmap, CMN_PLL1_INTDIV_M0, 0x0184);
cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVL_M0, 0xCCCD);
cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVH_M0, 0x0002);
cdns_torrent_phy_write(regmap, CMN_PLL1_HIGH_THR_M0, 0x0104);
if (ssc)
cdns_torrent_dp_enable_ssc_25mhz(cdns_phy, 0x03B9);
break;
/* Setting VCO for 8.64GHz */
case 2160:
case 4320:
cdns_torrent_phy_write(regmap, CMN_PLL0_INTDIV_M0, 0x0159);
cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVL_M0, 0x999A);
cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVH_M0, 0x0002);
cdns_torrent_phy_write(regmap, CMN_PLL0_HIGH_THR_M0, 0x00E7);
cdns_torrent_phy_write(regmap, CMN_PLL1_INTDIV_M0, 0x0159);
cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVL_M0, 0x999A);
cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVH_M0, 0x0002);
cdns_torrent_phy_write(regmap, CMN_PLL1_HIGH_THR_M0, 0x00E7);
if (ssc)
cdns_torrent_dp_enable_ssc_25mhz(cdns_phy, 0x034F);
break;
/* Setting VCO for 8.1GHz */
case 8100:
cdns_torrent_phy_write(regmap, CMN_PLL0_INTDIV_M0, 0x0144);
cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVL_M0, 0x0000);
cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVH_M0, 0x0002);
cdns_torrent_phy_write(regmap, CMN_PLL0_HIGH_THR_M0, 0x00D8);
cdns_torrent_phy_write(regmap, CMN_PLL1_INTDIV_M0, 0x0144);
cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVL_M0, 0x0000);
cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVH_M0, 0x0002);
cdns_torrent_phy_write(regmap, CMN_PLL1_HIGH_THR_M0, 0x00D8);
if (ssc)
cdns_torrent_dp_enable_ssc_25mhz(cdns_phy, 0x031A);
break;
}
cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CTRL_M0, 0x0002);
cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CTRL_M0, 0x0002);
if (ssc) {
cdns_torrent_phy_write(regmap,
CMN_PLL0_VCOCAL_PLLCNT_START, 0x0315);
cdns_torrent_phy_write(regmap,
CMN_PLL0_LOCK_PLLCNT_THR, 0x0005);
cdns_torrent_phy_write(regmap,
CMN_PLL1_VCOCAL_PLLCNT_START, 0x0315);
cdns_torrent_phy_write(regmap,
CMN_PLL1_LOCK_PLLCNT_THR, 0x0005);
} else {
cdns_torrent_phy_write(regmap,
CMN_PLL0_VCOCAL_PLLCNT_START, 0x0317);
cdns_torrent_phy_write(regmap,
CMN_PLL1_VCOCAL_PLLCNT_START, 0x0317);
/* Set reset register values to disable SSC */
cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL1_M0, 0x0002);
cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL2_M0, 0x0000);
cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL3_M0, 0x0000);
cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL4_M0, 0x0000);
cdns_torrent_phy_write(regmap,
CMN_PLL0_LOCK_PLLCNT_THR, 0x0003);
cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL1_M0, 0x0002);
cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL2_M0, 0x0000);
cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL3_M0, 0x0000);
cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL4_M0, 0x0000);
cdns_torrent_phy_write(regmap,
CMN_PLL1_LOCK_PLLCNT_THR, 0x0003);
}
cdns_torrent_phy_write(regmap, CMN_PLL0_LOCK_REFCNT_START, 0x00C7);
cdns_torrent_phy_write(regmap, CMN_PLL0_LOCK_PLLCNT_START, 0x00C7);
cdns_torrent_phy_write(regmap, CMN_PLL1_LOCK_REFCNT_START, 0x00C7);
cdns_torrent_phy_write(regmap, CMN_PLL1_LOCK_PLLCNT_START, 0x00C7);
}
static
void cdns_torrent_dp_pma_cmn_vco_cfg_100mhz(struct cdns_torrent_phy *cdns_phy,
u32 rate, bool ssc)
{
struct regmap *regmap = cdns_phy->regmap_common_cdb;
/* Assumes 100 MHz refclock */
switch (rate) {
/* Setting VCO for 10.8GHz */
case 2700:
case 5400:
if (cdns_phy->dp_pll & DP_PLL0)
cdns_torrent_phy_write(regmap, CMN_PLL0_DSM_FBH_OVRD_M0, 0x0022);
if (cdns_phy->dp_pll & DP_PLL1) {
cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CP_PADJ_M0, 0x0028);
cdns_torrent_phy_write(regmap, CMN_PLL1_DSM_FBH_OVRD_M0, 0x0022);
cdns_torrent_phy_write(regmap, CMN_PLL1_DSM_FBL_OVRD_M0, 0x000C);
}
break;
/* Setting VCO for 9.72GHz */
case 1620:
case 2430:
case 3240:
if (cdns_phy->dp_pll & DP_PLL0) {
cdns_torrent_phy_write(regmap, CMN_PLL0_DSM_DIAG_M0, 0x0004);
cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CP_PADJ_M0, 0x0509);
cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CP_IADJ_M0, 0x0F00);
cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_FILT_PADJ_M0, 0x0F08);
cdns_torrent_phy_write(regmap, CMN_PLL0_INTDIV_M0, 0x0061);
cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVL_M0, 0x3333);
cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVH_M0, 0x0002);
cdns_torrent_phy_write(regmap, CMN_PLL0_HIGH_THR_M0, 0x0042);
cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CTRL_M0, 0x0002);
}
if (cdns_phy->dp_pll & DP_PLL1) {
cdns_torrent_phy_write(regmap, CMN_PLL1_DSM_DIAG_M0, 0x0004);
cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CP_PADJ_M0, 0x0509);
cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CP_IADJ_M0, 0x0F00);
cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_FILT_PADJ_M0, 0x0F08);
cdns_torrent_phy_write(regmap, CMN_PLL1_INTDIV_M0, 0x0061);
cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVL_M0, 0x3333);
cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVH_M0, 0x0002);
cdns_torrent_phy_write(regmap, CMN_PLL1_HIGH_THR_M0, 0x0042);
cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CTRL_M0, 0x0002);
}
break;
/* Setting VCO for 8.64GHz */
case 2160:
case 4320:
if (cdns_phy->dp_pll & DP_PLL0) {
cdns_torrent_phy_write(regmap, CMN_PLL0_DSM_DIAG_M0, 0x0004);
cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CP_PADJ_M0, 0x0509);
cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CP_IADJ_M0, 0x0F00);
cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_FILT_PADJ_M0, 0x0F08);
cdns_torrent_phy_write(regmap, CMN_PLL0_INTDIV_M0, 0x0056);
cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVL_M0, 0x6666);
cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVH_M0, 0x0002);
cdns_torrent_phy_write(regmap, CMN_PLL0_HIGH_THR_M0, 0x003A);
cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CTRL_M0, 0x0002);
}
if (cdns_phy->dp_pll & DP_PLL1) {
cdns_torrent_phy_write(regmap, CMN_PLL1_DSM_DIAG_M0, 0x0004);
cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CP_PADJ_M0, 0x0509);
cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CP_IADJ_M0, 0x0F00);
cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_FILT_PADJ_M0, 0x0F08);
cdns_torrent_phy_write(regmap, CMN_PLL1_INTDIV_M0, 0x0056);
cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVL_M0, 0x6666);
cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVH_M0, 0x0002);
cdns_torrent_phy_write(regmap, CMN_PLL1_HIGH_THR_M0, 0x003A);
cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CTRL_M0, 0x0002);
}
break;
/* Setting VCO for 8.1GHz */
case 8100:
if (cdns_phy->dp_pll & DP_PLL0) {
cdns_torrent_phy_write(regmap, CMN_PLL0_DSM_DIAG_M0, 0x0004);
cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CP_PADJ_M0, 0x0509);
cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CP_IADJ_M0, 0x0F00);
cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_FILT_PADJ_M0, 0x0F08);
cdns_torrent_phy_write(regmap, CMN_PLL0_INTDIV_M0, 0x0051);
cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVH_M0, 0x0002);
cdns_torrent_phy_write(regmap, CMN_PLL0_HIGH_THR_M0, 0x0036);
cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CTRL_M0, 0x0002);
}
if (cdns_phy->dp_pll & DP_PLL1) {
cdns_torrent_phy_write(regmap, CMN_PLL1_DSM_DIAG_M0, 0x0004);
cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CP_PADJ_M0, 0x0509);
cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CP_IADJ_M0, 0x0F00);
cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_FILT_PADJ_M0, 0x0F08);
cdns_torrent_phy_write(regmap, CMN_PLL1_INTDIV_M0, 0x0051);
cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVH_M0, 0x0002);
cdns_torrent_phy_write(regmap, CMN_PLL1_HIGH_THR_M0, 0x0036);
cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CTRL_M0, 0x0002);
}
break;
}
}
/* Set PLL used for DP configuration */
static int cdns_torrent_dp_get_pll(struct cdns_torrent_phy *cdns_phy,
enum cdns_torrent_phy_type phy_t2)
{
switch (phy_t2) {
case TYPE_PCIE:
case TYPE_USB:
cdns_phy->dp_pll = DP_PLL1;
break;
case TYPE_SGMII:
case TYPE_QSGMII:
cdns_phy->dp_pll = DP_PLL0;
break;
case TYPE_NONE:
cdns_phy->dp_pll = DP_PLL0 | DP_PLL1;
break;
default:
dev_err(cdns_phy->dev, "Unsupported PHY configuration\n");
return -EINVAL;
}
return 0;
}
/*
* Enable or disable PLL for selected lanes.
*/
static int cdns_torrent_dp_set_pll_en(struct cdns_torrent_phy *cdns_phy,
struct cdns_torrent_inst *inst,
struct phy_configure_opts_dp *dp,
bool enable)
{
struct regmap *regmap = cdns_phy->regmap_dptx_phy_reg;
u32 rd_val, pll_ack_val;
int ret;
/*
* Used to determine, which bits to check for or enable in
* PHY_PMA_XCVR_PLLCLK_EN register.
*/
u32 pll_bits;
/* Used to enable or disable lanes. */
u32 pll_val;
/* Select values of registers and mask, depending on enabled lane count. */
pll_val = cdns_torrent_dp_read(regmap, PHY_PMA_XCVR_PLLCLK_EN);
if (enable) {
pll_bits = ((1 << dp->lanes) - 1);
pll_val |= pll_bits;
pll_ack_val = pll_bits;
} else {
pll_bits = ((1 << inst->num_lanes) - 1);
pll_val &= (~pll_bits);
pll_ack_val = 0;
}
cdns_torrent_dp_write(regmap, PHY_PMA_XCVR_PLLCLK_EN, pll_val);
/* Wait for acknowledgment from PHY. */
ret = regmap_read_poll_timeout(regmap,
PHY_PMA_XCVR_PLLCLK_EN_ACK,
rd_val,
(rd_val & pll_bits) == pll_ack_val,
0, POLL_TIMEOUT_US);
ndelay(100);
return ret;
}
static int cdns_torrent_dp_set_power_state(struct cdns_torrent_phy *cdns_phy,
struct cdns_torrent_inst *inst,
u32 num_lanes,
enum phy_powerstate powerstate)
{
/* Register value for power state for a single byte. */
u32 value_part, i;
u32 value = 0;
u32 mask = 0;
u32 read_val;
int ret;
struct regmap *regmap = cdns_phy->regmap_dptx_phy_reg;
switch (powerstate) {
case (POWERSTATE_A0):
value_part = 0x01U;
break;
case (POWERSTATE_A2):
value_part = 0x04U;
break;
default:
/* Powerstate A3 */
value_part = 0x08U;
break;
}
/* Select values of registers and mask, depending on enabled lane count. */
for (i = 0; i < num_lanes; i++) {
value |= (value_part << PHY_POWER_STATE_LN(i));
mask |= (PMA_XCVR_POWER_STATE_REQ_LN_MASK << PHY_POWER_STATE_LN(i));
}
/* Set power state A<n>. */
cdns_torrent_dp_write(regmap, PHY_PMA_XCVR_POWER_STATE_REQ, value);
/* Wait, until PHY acknowledges power state completion. */
ret = regmap_read_poll_timeout(regmap, PHY_PMA_XCVR_POWER_STATE_ACK,
read_val, (read_val & mask) == value, 0,
POLL_TIMEOUT_US);
cdns_torrent_dp_write(regmap, PHY_PMA_XCVR_POWER_STATE_REQ, 0x00000000);
ndelay(100);
return ret;
}
static int cdns_torrent_dp_run(struct cdns_torrent_phy *cdns_phy,
struct cdns_torrent_inst *inst, u32 num_lanes)
{
unsigned int read_val;
int ret;
struct regmap *regmap = cdns_phy->regmap_dptx_phy_reg;
/*
* waiting for ACK of pma_xcvr_pllclk_en_ln_*, only for the
* master lane
*/
ret = regmap_read_poll_timeout(regmap, PHY_PMA_XCVR_PLLCLK_EN_ACK,
read_val, read_val & 1,
0, POLL_TIMEOUT_US);
if (ret == -ETIMEDOUT) {
dev_err(cdns_phy->dev,
"timeout waiting for link PLL clock enable ack\n");
return ret;
}
ndelay(100);
ret = cdns_torrent_dp_set_power_state(cdns_phy, inst, num_lanes,
POWERSTATE_A2);
if (ret)
return ret;
ret = cdns_torrent_dp_set_power_state(cdns_phy, inst, num_lanes,
POWERSTATE_A0);
return ret;
}
static int cdns_torrent_dp_wait_pma_cmn_ready(struct cdns_torrent_phy *cdns_phy)
{
unsigned int reg;
int ret;
struct regmap *regmap = cdns_phy->regmap_dptx_phy_reg;
ret = regmap_read_poll_timeout(regmap, PHY_PMA_CMN_READY, reg,
reg & 1, 0, POLL_TIMEOUT_US);
if (ret == -ETIMEDOUT) {
dev_err(cdns_phy->dev,
"timeout waiting for PMA common ready\n");
return -ETIMEDOUT;
}
return 0;
}
static void cdns_torrent_dp_pma_cmn_rate(struct cdns_torrent_phy *cdns_phy,
struct cdns_torrent_inst *inst,
u32 rate, u32 num_lanes)
{
unsigned int clk_sel_val = 0;
unsigned int hsclk_div_val = 0;
unsigned int i;
switch (rate) {
case 1620:
clk_sel_val = 0x0f01;
hsclk_div_val = 2;
break;
case 2160:
case 2430:
case 2700:
clk_sel_val = 0x0701;
hsclk_div_val = 1;
break;
case 3240:
clk_sel_val = 0x0b00;
hsclk_div_val = 2;
break;
case 4320:
case 5400:
clk_sel_val = 0x0301;
hsclk_div_val = 0;
break;
case 8100:
clk_sel_val = 0x0200;
hsclk_div_val = 0;
break;
}
if (cdns_phy->dp_pll & DP_PLL0)
cdns_torrent_phy_write(cdns_phy->regmap_common_cdb,
CMN_PDIAG_PLL0_CLK_SEL_M0, clk_sel_val);
if (cdns_phy->dp_pll & DP_PLL1)
cdns_torrent_phy_write(cdns_phy->regmap_common_cdb,
CMN_PDIAG_PLL1_CLK_SEL_M0, clk_sel_val);
/* PMA lane configuration to deal with multi-link operation */
for (i = 0; i < num_lanes; i++)
cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[inst->mlane + i],
XCVR_DIAG_HSCLK_DIV, hsclk_div_val);
}
/*
* Perform register operations related to setting link rate, once powerstate is
* set and PLL disable request was processed.
*/
static int cdns_torrent_dp_configure_rate(struct cdns_torrent_phy *cdns_phy,
struct cdns_torrent_inst *inst,
struct phy_configure_opts_dp *dp)
{
u32 read_val, field_val;
int ret;
/*
* Disable the associated PLL (cmn_pll0_en or cmn_pll1_en) before
* re-programming the new data rate.
*/
ret = regmap_field_read(cdns_phy->phy_pma_pll_raw_ctrl, &field_val);
if (ret)
return ret;
field_val &= ~(cdns_phy->dp_pll);
regmap_field_write(cdns_phy->phy_pma_pll_raw_ctrl, field_val);
/*
* Wait for PLL ready de-assertion.
* For PLL0 - PHY_PMA_CMN_CTRL2[2] == 1
* For PLL1 - PHY_PMA_CMN_CTRL2[3] == 1
*/
if (cdns_phy->dp_pll & DP_PLL0) {
ret = regmap_field_read_poll_timeout(cdns_phy->phy_pma_cmn_ctrl_2,
read_val,
((read_val >> 2) & 0x01) != 0,
0, POLL_TIMEOUT_US);
if (ret)
return ret;
}
if ((cdns_phy->dp_pll & DP_PLL1) && cdns_phy->nsubnodes != 1) {
ret = regmap_field_read_poll_timeout(cdns_phy->phy_pma_cmn_ctrl_2,
read_val,
((read_val >> 3) & 0x01) != 0,
0, POLL_TIMEOUT_US);
if (ret)
return ret;
}
ndelay(200);
/* DP Rate Change - VCO Output settings. */
if (cdns_phy->ref_clk_rate == CLK_19_2_MHZ)
/* PMA common configuration 19.2MHz */
cdns_torrent_dp_pma_cmn_vco_cfg_19_2mhz(cdns_phy, dp->link_rate, dp->ssc);
else if (cdns_phy->ref_clk_rate == CLK_25_MHZ)
/* PMA common configuration 25MHz */
cdns_torrent_dp_pma_cmn_vco_cfg_25mhz(cdns_phy, dp->link_rate, dp->ssc);
else if (cdns_phy->ref_clk_rate == CLK_100_MHZ)
/* PMA common configuration 100MHz */
cdns_torrent_dp_pma_cmn_vco_cfg_100mhz(cdns_phy, dp->link_rate, dp->ssc);
cdns_torrent_dp_pma_cmn_rate(cdns_phy, inst, dp->link_rate, dp->lanes);
/* Enable the associated PLL (cmn_pll0_en or cmn_pll1_en) */
ret = regmap_field_read(cdns_phy->phy_pma_pll_raw_ctrl, &field_val);
if (ret)
return ret;
field_val |= cdns_phy->dp_pll;
regmap_field_write(cdns_phy->phy_pma_pll_raw_ctrl, field_val);
/*
* Wait for PLL ready assertion.
* For PLL0 - PHY_PMA_CMN_CTRL2[0] == 1
* For PLL1 - PHY_PMA_CMN_CTRL2[1] == 1
*/
if (cdns_phy->dp_pll & DP_PLL0) {
ret = regmap_field_read_poll_timeout(cdns_phy->phy_pma_cmn_ctrl_2,
read_val,
(read_val & 0x01) != 0,
0, POLL_TIMEOUT_US);
if (ret)
return ret;
}
if ((cdns_phy->dp_pll & DP_PLL1) && cdns_phy->nsubnodes != 1)
ret = regmap_field_read_poll_timeout(cdns_phy->phy_pma_cmn_ctrl_2,
read_val,
((read_val >> 1) & 0x01) != 0,
0, POLL_TIMEOUT_US);
return ret;
}
/*
* Verify, that parameters to configure PHY with are correct.
*/
static int cdns_torrent_dp_verify_config(struct cdns_torrent_inst *inst,
struct phy_configure_opts_dp *dp)
{
u8 i;
/* If changing link rate was required, verify it's supported. */
if (dp->set_rate) {
switch (dp->link_rate) {
case 1620:
case 2160:
case 2430:
case 2700:
case 3240:
case 4320:
case 5400:
case 8100:
/* valid bit rate */
break;
default:
return -EINVAL;
}
}
/* Verify lane count. */
switch (dp->lanes) {
case 1:
case 2:
case 4:
/* valid lane count. */
break;
default:
return -EINVAL;
}
/* Check against actual number of PHY's lanes. */
if (dp->lanes > inst->num_lanes)
return -EINVAL;
/*
* If changing voltages is required, check swing and pre-emphasis
* levels, per-lane.
*/
if (dp->set_voltages) {
/* Lane count verified previously. */
for (i = 0; i < dp->lanes; i++) {
if (dp->voltage[i] > 3 || dp->pre[i] > 3)
return -EINVAL;
/* Sum of voltage swing and pre-emphasis levels cannot
* exceed 3.
*/
if (dp->voltage[i] + dp->pre[i] > 3)
return -EINVAL;
}
}
return 0;
}
/* Set power state A0 and PLL clock enable to 0 on enabled lanes. */
static void cdns_torrent_dp_set_a0_pll(struct cdns_torrent_phy *cdns_phy,
struct cdns_torrent_inst *inst,
u32 num_lanes)
{
struct regmap *regmap = cdns_phy->regmap_dptx_phy_reg;
u32 pwr_state = cdns_torrent_dp_read(regmap,
PHY_PMA_XCVR_POWER_STATE_REQ);
u32 pll_clk_en = cdns_torrent_dp_read(regmap,
PHY_PMA_XCVR_PLLCLK_EN);
u32 i;
for (i = 0; i < num_lanes; i++) {
pwr_state &= ~(PMA_XCVR_POWER_STATE_REQ_LN_MASK
<< PHY_POWER_STATE_LN(inst->mlane + i));
pll_clk_en &= ~(0x01U << (inst->mlane + i));
}
cdns_torrent_dp_write(regmap, PHY_PMA_XCVR_POWER_STATE_REQ, pwr_state);
cdns_torrent_dp_write(regmap, PHY_PMA_XCVR_PLLCLK_EN, pll_clk_en);
}
/* Configure lane count as required. */
static int cdns_torrent_dp_set_lanes(struct cdns_torrent_phy *cdns_phy,
struct cdns_torrent_inst *inst,
struct phy_configure_opts_dp *dp)
{
u32 value, i;
int ret;
struct regmap *regmap = cdns_phy->regmap_dptx_phy_reg;
u8 lane_mask = (1 << dp->lanes) - 1;
u8 pma_tx_elec_idle_mask = 0;
u32 clane = inst->mlane;
lane_mask <<= clane;
value = cdns_torrent_dp_read(regmap, PHY_RESET);
/* clear pma_tx_elec_idle_ln_* bits. */
pma_tx_elec_idle_mask = ((1 << inst->num_lanes) - 1) << clane;
pma_tx_elec_idle_mask <<= PMA_TX_ELEC_IDLE_SHIFT;
value &= ~pma_tx_elec_idle_mask;
/* Assert pma_tx_elec_idle_ln_* for disabled lanes. */
value |= ((~lane_mask) << PMA_TX_ELEC_IDLE_SHIFT) &
pma_tx_elec_idle_mask;
cdns_torrent_dp_write(regmap, PHY_RESET, value);
/* reset the link by asserting master lane phy_l0*_reset_n low */
cdns_torrent_dp_write(regmap, PHY_RESET,
value & (~(1 << clane)));
/*
* Assert lane reset on unused lanes and master lane so they remain in reset
* and powered down when re-enabling the link
*/
for (i = 0; i < inst->num_lanes; i++)
value &= (~(1 << (clane + i)));
for (i = 1; i < inst->num_lanes; i++)
value |= ((1 << (clane + i)) & lane_mask);
cdns_torrent_dp_write(regmap, PHY_RESET, value);
cdns_torrent_dp_set_a0_pll(cdns_phy, inst, dp->lanes);
/* release phy_l0*_reset_n based on used laneCount */
for (i = 0; i < inst->num_lanes; i++)
value &= (~(1 << (clane + i)));
for (i = 0; i < inst->num_lanes; i++)
value |= ((1 << (clane + i)) & lane_mask);
cdns_torrent_dp_write(regmap, PHY_RESET, value);
/* Wait, until PHY gets ready after releasing PHY reset signal. */
ret = cdns_torrent_dp_wait_pma_cmn_ready(cdns_phy);
if (ret)
return ret;
ndelay(100);
/* release pma_xcvr_pllclk_en_ln_*, only for the master lane */
value = cdns_torrent_dp_read(regmap, PHY_PMA_XCVR_PLLCLK_EN);
value |= (1 << clane);
cdns_torrent_dp_write(regmap, PHY_PMA_XCVR_PLLCLK_EN, value);
ret = cdns_torrent_dp_run(cdns_phy, inst, dp->lanes);
return ret;
}
/* Configure link rate as required. */
static int cdns_torrent_dp_set_rate(struct cdns_torrent_phy *cdns_phy,
struct cdns_torrent_inst *inst,
struct phy_configure_opts_dp *dp)
{
int ret;
ret = cdns_torrent_dp_set_power_state(cdns_phy, inst, dp->lanes,
POWERSTATE_A3);
if (ret)
return ret;
ret = cdns_torrent_dp_set_pll_en(cdns_phy, inst, dp, false);
if (ret)
return ret;
ndelay(200);
ret = cdns_torrent_dp_configure_rate(cdns_phy, inst, dp);
if (ret)
return ret;
ndelay(200);
ret = cdns_torrent_dp_set_pll_en(cdns_phy, inst, dp, true);
if (ret)
return ret;
ret = cdns_torrent_dp_set_power_state(cdns_phy, inst, dp->lanes,
POWERSTATE_A2);
if (ret)
return ret;
ret = cdns_torrent_dp_set_power_state(cdns_phy, inst, dp->lanes,
POWERSTATE_A0);
if (ret)
return ret;
ndelay(900);
return ret;
}
/* Configure voltage swing and pre-emphasis for all enabled lanes. */
static void cdns_torrent_dp_set_voltages(struct cdns_torrent_phy *cdns_phy,
struct cdns_torrent_inst *inst,
struct phy_configure_opts_dp *dp)
{
u8 lane;
u16 val;
for (lane = 0; lane < dp->lanes; lane++) {
val = cdns_torrent_phy_read(cdns_phy->regmap_tx_lane_cdb[inst->mlane + lane],
TX_DIAG_ACYA);
/*
* Write 1 to register bit TX_DIAG_ACYA[0] to freeze the
* current state of the analog TX driver.
*/
val |= TX_DIAG_ACYA_HBDC_MASK;
cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[inst->mlane + lane],
TX_DIAG_ACYA, val);
cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[inst->mlane + lane],
TX_TXCC_CTRL, 0x08A4);
val = vltg_coeff[dp->voltage[lane]][dp->pre[lane]].diag_tx_drv;
cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[inst->mlane + lane],
DRV_DIAG_TX_DRV, val);
val = vltg_coeff[dp->voltage[lane]][dp->pre[lane]].mgnfs_mult;
cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[inst->mlane + lane],
TX_TXCC_MGNFS_MULT_000,
val);
val = vltg_coeff[dp->voltage[lane]][dp->pre[lane]].cpost_mult;
cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[inst->mlane + lane],
TX_TXCC_CPOST_MULT_00,
val);
val = cdns_torrent_phy_read(cdns_phy->regmap_tx_lane_cdb[inst->mlane + lane],
TX_DIAG_ACYA);
/*
* Write 0 to register bit TX_DIAG_ACYA[0] to allow the state of
* analog TX driver to reflect the new programmed one.
*/
val &= ~TX_DIAG_ACYA_HBDC_MASK;
cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[inst->mlane + lane],
TX_DIAG_ACYA, val);
}
};
static int cdns_torrent_dp_configure(struct phy *phy,
union phy_configure_opts *opts)
{
struct cdns_torrent_inst *inst = phy_get_drvdata(phy);
struct cdns_torrent_phy *cdns_phy = dev_get_drvdata(phy->dev.parent);
int ret;
ret = cdns_torrent_dp_verify_config(inst, &opts->dp);
if (ret) {
dev_err(&phy->dev, "invalid params for phy configure\n");
return ret;
}
if (opts->dp.set_lanes) {
ret = cdns_torrent_dp_set_lanes(cdns_phy, inst, &opts->dp);
if (ret) {
dev_err(&phy->dev, "cdns_torrent_dp_set_lanes failed\n");
return ret;
}
}
if (opts->dp.set_rate) {
ret = cdns_torrent_dp_set_rate(cdns_phy, inst, &opts->dp);
if (ret) {
dev_err(&phy->dev, "cdns_torrent_dp_set_rate failed\n");
return ret;
}
}
if (opts->dp.set_voltages)
cdns_torrent_dp_set_voltages(cdns_phy, inst, &opts->dp);
return ret;
}
static int cdns_torrent_phy_on(struct phy *phy)
{
struct cdns_torrent_inst *inst = phy_get_drvdata(phy);
struct cdns_torrent_phy *cdns_phy = dev_get_drvdata(phy->dev.parent);
u32 read_val;
int ret;
if (cdns_phy->nsubnodes == 1) {
/* Take the PHY lane group out of reset */
reset_control_deassert(inst->lnk_rst);
/* Take the PHY out of reset */
ret = reset_control_deassert(cdns_phy->phy_rst);
if (ret)
return ret;
}
/*
* Wait for cmn_ready assertion
* PHY_PMA_CMN_CTRL1[0] == 1
*/
ret = regmap_field_read_poll_timeout(cdns_phy->phy_pma_cmn_ctrl_1,
read_val, read_val, 1000,
PLL_LOCK_TIMEOUT);
if (ret) {
dev_err(cdns_phy->dev, "Timeout waiting for CMN ready\n");
return ret;
}
if (inst->phy_type == TYPE_PCIE || inst->phy_type == TYPE_USB) {
ret = regmap_field_read_poll_timeout(cdns_phy->phy_pcs_iso_link_ctrl_1[inst->mlane],
read_val, !read_val, 1000,
PLL_LOCK_TIMEOUT);
if (ret == -ETIMEDOUT) {
dev_err(cdns_phy->dev, "Timeout waiting for PHY status ready\n");
return ret;
}
}
return 0;
}
static int cdns_torrent_phy_off(struct phy *phy)
{
struct cdns_torrent_inst *inst = phy_get_drvdata(phy);
struct cdns_torrent_phy *cdns_phy = dev_get_drvdata(phy->dev.parent);
int ret;
if (cdns_phy->nsubnodes != 1)
return 0;
ret = reset_control_assert(cdns_phy->phy_rst);
if (ret)
return ret;
return reset_control_assert(inst->lnk_rst);
}
static void cdns_torrent_dp_common_init(struct cdns_torrent_phy *cdns_phy,
struct cdns_torrent_inst *inst)
{
struct regmap *regmap = cdns_phy->regmap_dptx_phy_reg;
unsigned char lane_bits;
u32 val;
cdns_torrent_dp_write(regmap, PHY_AUX_CTRL, 0x0003); /* enable AUX */
/*
* Set lines power state to A0
* Set lines pll clk enable to 0
*/
cdns_torrent_dp_set_a0_pll(cdns_phy, inst, inst->num_lanes);
/*
* release phy_l0*_reset_n and pma_tx_elec_idle_ln_* based on
* used lanes
*/
lane_bits = (1 << inst->num_lanes) - 1;
val = cdns_torrent_dp_read(regmap, PHY_RESET);
val |= (0xF & lane_bits);
val &= ~(lane_bits << 4);
cdns_torrent_dp_write(regmap, PHY_RESET, val);
/* release pma_xcvr_pllclk_en_ln_*, only for the master lane */
val = cdns_torrent_dp_read(regmap, PHY_PMA_XCVR_PLLCLK_EN);
val |= 1;
cdns_torrent_dp_write(regmap, PHY_PMA_XCVR_PLLCLK_EN, val);
/*
* PHY PMA registers configuration functions
* Initialize PHY with max supported link rate, without SSC.
*/
if (cdns_phy->ref_clk_rate == CLK_19_2_MHZ)
cdns_torrent_dp_pma_cmn_vco_cfg_19_2mhz(cdns_phy,
cdns_phy->max_bit_rate,
false);
else if (cdns_phy->ref_clk_rate == CLK_25_MHZ)
cdns_torrent_dp_pma_cmn_vco_cfg_25mhz(cdns_phy,
cdns_phy->max_bit_rate,
false);
else if (cdns_phy->ref_clk_rate == CLK_100_MHZ)
cdns_torrent_dp_pma_cmn_vco_cfg_100mhz(cdns_phy,
cdns_phy->max_bit_rate,
false);
cdns_torrent_dp_pma_cmn_rate(cdns_phy, inst, cdns_phy->max_bit_rate,
inst->num_lanes);
/* take out of reset */
regmap_field_write(cdns_phy->phy_reset_ctrl, 0x1);
}
static int cdns_torrent_dp_start(struct cdns_torrent_phy *cdns_phy,
struct cdns_torrent_inst *inst,
struct phy *phy)
{
int ret;
ret = cdns_torrent_phy_on(phy);
if (ret)
return ret;
ret = cdns_torrent_dp_wait_pma_cmn_ready(cdns_phy);
if (ret)
return ret;
ret = cdns_torrent_dp_run(cdns_phy, inst, inst->num_lanes);
return ret;
}
static int cdns_torrent_dp_init(struct phy *phy)
{
struct cdns_torrent_inst *inst = phy_get_drvdata(phy);
struct cdns_torrent_phy *cdns_phy = dev_get_drvdata(phy->dev.parent);
int ret;
switch (cdns_phy->ref_clk_rate) {
case CLK_19_2_MHZ:
case CLK_25_MHZ:
case CLK_100_MHZ:
/* Valid Ref Clock Rate */
break;
default:
dev_err(cdns_phy->dev, "Unsupported Ref Clock Rate\n");
return -EINVAL;
}
ret = cdns_torrent_dp_get_pll(cdns_phy, TYPE_NONE);
if (ret)
return ret;
cdns_torrent_dp_common_init(cdns_phy, inst);
return cdns_torrent_dp_start(cdns_phy, inst, phy);
}
static int cdns_torrent_dp_multilink_init(struct cdns_torrent_phy *cdns_phy,
struct cdns_torrent_inst *inst,
struct phy *phy)
{
if (cdns_phy->ref_clk_rate != CLK_100_MHZ) {
dev_err(cdns_phy->dev, "Unsupported Ref Clock Rate\n");
return -EINVAL;
}
cdns_torrent_dp_common_init(cdns_phy, inst);
return cdns_torrent_dp_start(cdns_phy, inst, phy);
}
static int cdns_torrent_derived_refclk_enable(struct clk_hw *hw)
{
struct cdns_torrent_derived_refclk *derived_refclk = to_cdns_torrent_derived_refclk(hw);
regmap_field_write(derived_refclk->cmn_cdiag_refclk_ovrd_4, 1);
regmap_field_write(derived_refclk->phy_pipe_cmn_ctrl1_0, 1);
return 0;
}
static void cdns_torrent_derived_refclk_disable(struct clk_hw *hw)
{
struct cdns_torrent_derived_refclk *derived_refclk = to_cdns_torrent_derived_refclk(hw);
regmap_field_write(derived_refclk->phy_pipe_cmn_ctrl1_0, 0);
regmap_field_write(derived_refclk->cmn_cdiag_refclk_ovrd_4, 0);
}
static int cdns_torrent_derived_refclk_is_enabled(struct clk_hw *hw)
{
struct cdns_torrent_derived_refclk *derived_refclk = to_cdns_torrent_derived_refclk(hw);
int val;
regmap_field_read(derived_refclk->cmn_cdiag_refclk_ovrd_4, &val);
return !!val;
}
static const struct clk_ops cdns_torrent_derived_refclk_ops = {
.enable = cdns_torrent_derived_refclk_enable,
.disable = cdns_torrent_derived_refclk_disable,
.is_enabled = cdns_torrent_derived_refclk_is_enabled,
};
static int cdns_torrent_derived_refclk_register(struct cdns_torrent_phy *cdns_phy)
{
struct cdns_torrent_derived_refclk *derived_refclk;
struct device *dev = cdns_phy->dev;
struct clk_init_data *init;
const char *parent_name;
char clk_name[100];
struct clk_hw *hw;
struct clk *clk;
int ret;
derived_refclk = devm_kzalloc(dev, sizeof(*derived_refclk), GFP_KERNEL);
if (!derived_refclk)
return -ENOMEM;
snprintf(clk_name, sizeof(clk_name), "%s_%s", dev_name(dev),
clk_names[CDNS_TORRENT_DERIVED_REFCLK]);
clk = devm_clk_get_optional(dev, "phy_en_refclk");
if (IS_ERR(clk)) {
dev_err(dev, "No parent clock for derived_refclk\n");
return PTR_ERR(clk);
}
init = &derived_refclk->clk_data;
if (clk) {
parent_name = __clk_get_name(clk);
init->parent_names = &parent_name;
init->num_parents = 1;
}
init->ops = &cdns_torrent_derived_refclk_ops;
init->flags = 0;
init->name = clk_name;
derived_refclk->phy_pipe_cmn_ctrl1_0 = cdns_phy->phy_pipe_cmn_ctrl1_0;
derived_refclk->cmn_cdiag_refclk_ovrd_4 = cdns_phy->cmn_cdiag_refclk_ovrd_4;
derived_refclk->hw.init = init;
hw = &derived_refclk->hw;
ret = devm_clk_hw_register(dev, hw);
if (ret)
return ret;
cdns_phy->clk_hw_data->hws[CDNS_TORRENT_DERIVED_REFCLK] = hw;
return 0;
}
static int cdns_torrent_received_refclk_enable(struct clk_hw *hw)
{
struct cdns_torrent_received_refclk *received_refclk = to_cdns_torrent_received_refclk(hw);
regmap_field_write(received_refclk->phy_pipe_cmn_ctrl1_0, 1);
return 0;
}
static void cdns_torrent_received_refclk_disable(struct clk_hw *hw)
{
struct cdns_torrent_received_refclk *received_refclk = to_cdns_torrent_received_refclk(hw);
regmap_field_write(received_refclk->phy_pipe_cmn_ctrl1_0, 0);
}
static int cdns_torrent_received_refclk_is_enabled(struct clk_hw *hw)
{
struct cdns_torrent_received_refclk *received_refclk = to_cdns_torrent_received_refclk(hw);
int val, cmn_val;
regmap_field_read(received_refclk->phy_pipe_cmn_ctrl1_0, &val);
regmap_field_read(received_refclk->cmn_cdiag_refclk_ovrd_4, &cmn_val);
return val && !cmn_val;
}
static const struct clk_ops cdns_torrent_received_refclk_ops = {
.enable = cdns_torrent_received_refclk_enable,
.disable = cdns_torrent_received_refclk_disable,
.is_enabled = cdns_torrent_received_refclk_is_enabled,
};
static int cdns_torrent_received_refclk_register(struct cdns_torrent_phy *cdns_phy)
{
struct cdns_torrent_received_refclk *received_refclk;
struct device *dev = cdns_phy->dev;
struct clk_init_data *init;
const char *parent_name;
char clk_name[100];
struct clk_hw *hw;
struct clk *clk;
int ret;
received_refclk = devm_kzalloc(dev, sizeof(*received_refclk), GFP_KERNEL);
if (!received_refclk)
return -ENOMEM;
snprintf(clk_name, sizeof(clk_name), "%s_%s", dev_name(dev),
clk_names[CDNS_TORRENT_RECEIVED_REFCLK]);
clk = devm_clk_get_optional(dev, "phy_en_refclk");
if (IS_ERR(clk)) {
dev_err(dev, "No parent clock for received_refclk\n");
return PTR_ERR(clk);
}
init = &received_refclk->clk_data;
if (clk) {
parent_name = __clk_get_name(clk);
init->parent_names = &parent_name;
init->num_parents = 1;
}
init->ops = &cdns_torrent_received_refclk_ops;
init->flags = 0;
init->name = clk_name;
received_refclk->phy_pipe_cmn_ctrl1_0 = cdns_phy->phy_pipe_cmn_ctrl1_0;
received_refclk->cmn_cdiag_refclk_ovrd_4 = cdns_phy->cmn_cdiag_refclk_ovrd_4;
received_refclk->hw.init = init;
hw = &received_refclk->hw;
ret = devm_clk_hw_register(dev, hw);
if (ret)
return ret;
cdns_phy->clk_hw_data->hws[CDNS_TORRENT_RECEIVED_REFCLK] = hw;
return 0;
}
static int cdns_torrent_refclk_driver_enable(struct clk_hw *hw)
{
struct cdns_torrent_refclk_driver *refclk_driver = to_cdns_torrent_refclk_driver(hw);
regmap_field_write(refclk_driver->cmn_fields[CMN_CDIAG_REFCLK_DRV0_CTRL_6], 0);
regmap_field_write(refclk_driver->cmn_fields[CMN_CDIAG_REFCLK_DRV0_CTRL_5], 1);
regmap_field_write(refclk_driver->cmn_fields[CMN_CDIAG_REFCLK_DRV0_CTRL_1], 0);
return 0;
}
static void cdns_torrent_refclk_driver_disable(struct clk_hw *hw)
{
struct cdns_torrent_refclk_driver *refclk_driver = to_cdns_torrent_refclk_driver(hw);
regmap_field_write(refclk_driver->cmn_fields[CMN_CDIAG_REFCLK_DRV0_CTRL_1], 1);
}
static int cdns_torrent_refclk_driver_is_enabled(struct clk_hw *hw)
{
struct cdns_torrent_refclk_driver *refclk_driver = to_cdns_torrent_refclk_driver(hw);
int val;
regmap_field_read(refclk_driver->cmn_fields[CMN_CDIAG_REFCLK_DRV0_CTRL_1], &val);
return !val;
}
static u8 cdns_torrent_refclk_driver_get_parent(struct clk_hw *hw)
{
struct cdns_torrent_refclk_driver *refclk_driver = to_cdns_torrent_refclk_driver(hw);
unsigned int val;
regmap_field_read(refclk_driver->cmn_fields[CMN_CDIAG_REFCLK_DRV0_CTRL_4], &val);
return clk_mux_val_to_index(hw, cdns_torrent_refclk_driver_mux_table, 0, val);
}
static int cdns_torrent_refclk_driver_set_parent(struct clk_hw *hw, u8 index)
{
struct cdns_torrent_refclk_driver *refclk_driver = to_cdns_torrent_refclk_driver(hw);
unsigned int val;
val = cdns_torrent_refclk_driver_mux_table[index];
return regmap_field_write(refclk_driver->cmn_fields[CMN_CDIAG_REFCLK_DRV0_CTRL_4], val);
}
static const struct clk_ops cdns_torrent_refclk_driver_ops = {
.enable = cdns_torrent_refclk_driver_enable,
.disable = cdns_torrent_refclk_driver_disable,
.is_enabled = cdns_torrent_refclk_driver_is_enabled,
.determine_rate = __clk_mux_determine_rate,
.set_parent = cdns_torrent_refclk_driver_set_parent,
.get_parent = cdns_torrent_refclk_driver_get_parent,
};
static int cdns_torrent_refclk_driver_register(struct cdns_torrent_phy *cdns_phy)
{
struct cdns_torrent_refclk_driver *refclk_driver;
struct device *dev = cdns_phy->dev;
struct regmap_field *field;
struct clk_init_data *init;
const char **parent_names;
unsigned int num_parents;
struct regmap *regmap;
char clk_name[100];
struct clk_hw *hw;
int i, ret;
refclk_driver = devm_kzalloc(dev, sizeof(*refclk_driver), GFP_KERNEL);
if (!refclk_driver)
return -ENOMEM;
num_parents = ARRAY_SIZE(refclk_driver_parent_index);
parent_names = devm_kzalloc(dev, (sizeof(char *) * num_parents), GFP_KERNEL);
if (!parent_names)
return -ENOMEM;
for (i = 0; i < num_parents; i++) {
hw = cdns_phy->clk_hw_data->hws[refclk_driver_parent_index[i]];
if (IS_ERR_OR_NULL(hw)) {
dev_err(dev, "No parent clock for refclk driver clock\n");
return IS_ERR(hw) ? PTR_ERR(hw) : -ENOENT;
}
parent_names[i] = clk_hw_get_name(hw);
}
snprintf(clk_name, sizeof(clk_name), "%s_%s", dev_name(dev),
clk_names[CDNS_TORRENT_REFCLK_DRIVER]);
init = &refclk_driver->clk_data;
init->ops = &cdns_torrent_refclk_driver_ops;
init->flags = CLK_SET_RATE_NO_REPARENT;
init->parent_names = parent_names;
init->num_parents = num_parents;
init->name = clk_name;
regmap = cdns_phy->regmap_common_cdb;
for (i = 0; i < REFCLK_OUT_NUM_CMN_CONFIG; i++) {
field = devm_regmap_field_alloc(dev, regmap, refclk_out_cmn_cfg[i]);
if (IS_ERR(field)) {
dev_err(dev, "Refclk driver CMN reg field init failed\n");
return PTR_ERR(field);
}
refclk_driver->cmn_fields[i] = field;
}
/* Enable Derived reference clock as default */
regmap_field_write(refclk_driver->cmn_fields[CMN_CDIAG_REFCLK_DRV0_CTRL_4], 1);
refclk_driver->hw.init = init;
hw = &refclk_driver->hw;
ret = devm_clk_hw_register(dev, hw);
if (ret)
return ret;
cdns_phy->clk_hw_data->hws[CDNS_TORRENT_REFCLK_DRIVER] = hw;
return 0;
}
static struct regmap *cdns_regmap_init(struct device *dev, void __iomem *base,
u32 block_offset,
u8 reg_offset_shift,
const struct regmap_config *config)
{
struct cdns_regmap_cdb_context *ctx;
ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return ERR_PTR(-ENOMEM);
ctx->dev = dev;
ctx->base = base + block_offset;
ctx->reg_offset_shift = reg_offset_shift;
return devm_regmap_init(dev, NULL, ctx, config);
}
static int cdns_torrent_dp_regfield_init(struct cdns_torrent_phy *cdns_phy)
{
struct device *dev = cdns_phy->dev;
struct regmap_field *field;
struct regmap *regmap;
regmap = cdns_phy->regmap_dptx_phy_reg;
field = devm_regmap_field_alloc(dev, regmap, phy_reset_ctrl);
if (IS_ERR(field)) {
dev_err(dev, "PHY_RESET reg field init failed\n");
return PTR_ERR(field);
}
cdns_phy->phy_reset_ctrl = field;
return 0;
}
static int cdns_torrent_regfield_init(struct cdns_torrent_phy *cdns_phy)
{
struct device *dev = cdns_phy->dev;
struct regmap_field *field;
struct regmap *regmap;
int i;
regmap = cdns_phy->regmap_phy_pcs_common_cdb;
field = devm_regmap_field_alloc(dev, regmap, phy_pll_cfg);
if (IS_ERR(field)) {
dev_err(dev, "PHY_PLL_CFG reg field init failed\n");
return PTR_ERR(field);
}
cdns_phy->phy_pll_cfg = field;
regmap = cdns_phy->regmap_phy_pcs_common_cdb;
field = devm_regmap_field_alloc(dev, regmap, phy_pipe_cmn_ctrl1_0);
if (IS_ERR(field)) {
dev_err(dev, "phy_pipe_cmn_ctrl1_0 reg field init failed\n");
return PTR_ERR(field);
}
cdns_phy->phy_pipe_cmn_ctrl1_0 = field;
regmap = cdns_phy->regmap_common_cdb;
field = devm_regmap_field_alloc(dev, regmap, cmn_cdiag_refclk_ovrd_4);
if (IS_ERR(field)) {
dev_err(dev, "cmn_cdiag_refclk_ovrd_4 reg field init failed\n");
return PTR_ERR(field);
}
cdns_phy->cmn_cdiag_refclk_ovrd_4 = field;
regmap = cdns_phy->regmap_phy_pma_common_cdb;
field = devm_regmap_field_alloc(dev, regmap, phy_pma_cmn_ctrl_1);
if (IS_ERR(field)) {
dev_err(dev, "PHY_PMA_CMN_CTRL1 reg field init failed\n");
return PTR_ERR(field);
}
cdns_phy->phy_pma_cmn_ctrl_1 = field;
regmap = cdns_phy->regmap_phy_pma_common_cdb;
field = devm_regmap_field_alloc(dev, regmap, phy_pma_cmn_ctrl_2);
if (IS_ERR(field)) {
dev_err(dev, "PHY_PMA_CMN_CTRL2 reg field init failed\n");
return PTR_ERR(field);
}
cdns_phy->phy_pma_cmn_ctrl_2 = field;
regmap = cdns_phy->regmap_phy_pma_common_cdb;
field = devm_regmap_field_alloc(dev, regmap, phy_pma_pll_raw_ctrl);
if (IS_ERR(field)) {
dev_err(dev, "PHY_PMA_PLL_RAW_CTRL reg field init failed\n");
return PTR_ERR(field);
}
cdns_phy->phy_pma_pll_raw_ctrl = field;
for (i = 0; i < MAX_NUM_LANES; i++) {
regmap = cdns_phy->regmap_phy_pcs_lane_cdb[i];
field = devm_regmap_field_alloc(dev, regmap, phy_pcs_iso_link_ctrl_1);
if (IS_ERR(field)) {
dev_err(dev, "PHY_PCS_ISO_LINK_CTRL reg field init for ln %d failed\n", i);
return PTR_ERR(field);
}
cdns_phy->phy_pcs_iso_link_ctrl_1[i] = field;
}
return 0;
}
static int cdns_torrent_dp_regmap_init(struct cdns_torrent_phy *cdns_phy)
{
void __iomem *base = cdns_phy->base;
struct device *dev = cdns_phy->dev;
struct regmap *regmap;
u8 reg_offset_shift;
u32 block_offset;
reg_offset_shift = cdns_phy->init_data->reg_offset_shift;
block_offset = TORRENT_DPTX_PHY_OFFSET;
regmap = cdns_regmap_init(dev, base, block_offset,
reg_offset_shift,
&cdns_torrent_dptx_phy_config);
if (IS_ERR(regmap)) {
dev_err(dev, "Failed to init DPTX PHY regmap\n");
return PTR_ERR(regmap);
}
cdns_phy->regmap_dptx_phy_reg = regmap;
return 0;
}
static int cdns_torrent_regmap_init(struct cdns_torrent_phy *cdns_phy)
{
void __iomem *sd_base = cdns_phy->sd_base;
u8 block_offset_shift, reg_offset_shift;
struct device *dev = cdns_phy->dev;
struct regmap *regmap;
u32 block_offset;
int i;
block_offset_shift = cdns_phy->init_data->block_offset_shift;
reg_offset_shift = cdns_phy->init_data->reg_offset_shift;
for (i = 0; i < MAX_NUM_LANES; i++) {
block_offset = TORRENT_TX_LANE_CDB_OFFSET(i, block_offset_shift,
reg_offset_shift);
regmap = cdns_regmap_init(dev, sd_base, block_offset,
reg_offset_shift,
&cdns_torrent_tx_lane_cdb_config[i]);
if (IS_ERR(regmap)) {
dev_err(dev, "Failed to init tx lane CDB regmap\n");
return PTR_ERR(regmap);
}
cdns_phy->regmap_tx_lane_cdb[i] = regmap;
block_offset = TORRENT_RX_LANE_CDB_OFFSET(i, block_offset_shift,
reg_offset_shift);
regmap = cdns_regmap_init(dev, sd_base, block_offset,
reg_offset_shift,
&cdns_torrent_rx_lane_cdb_config[i]);
if (IS_ERR(regmap)) {
dev_err(dev, "Failed to init rx lane CDB regmap\n");
return PTR_ERR(regmap);
}
cdns_phy->regmap_rx_lane_cdb[i] = regmap;
block_offset = TORRENT_PHY_PCS_LANE_CDB_OFFSET(i, block_offset_shift,
reg_offset_shift);
regmap = cdns_regmap_init(dev, sd_base, block_offset,
reg_offset_shift,
&cdns_torrent_phy_pcs_lane_cdb_config[i]);
if (IS_ERR(regmap)) {
dev_err(dev, "Failed to init PHY PCS lane CDB regmap\n");
return PTR_ERR(regmap);
}
cdns_phy->regmap_phy_pcs_lane_cdb[i] = regmap;
}
block_offset = TORRENT_COMMON_CDB_OFFSET;
regmap = cdns_regmap_init(dev, sd_base, block_offset,
reg_offset_shift,
&cdns_torrent_common_cdb_config);
if (IS_ERR(regmap)) {
dev_err(dev, "Failed to init common CDB regmap\n");
return PTR_ERR(regmap);
}
cdns_phy->regmap_common_cdb = regmap;
block_offset = TORRENT_PHY_PCS_COMMON_OFFSET(block_offset_shift);
regmap = cdns_regmap_init(dev, sd_base, block_offset,
reg_offset_shift,
&cdns_torrent_phy_pcs_cmn_cdb_config);
if (IS_ERR(regmap)) {
dev_err(dev, "Failed to init PHY PCS common CDB regmap\n");
return PTR_ERR(regmap);
}
cdns_phy->regmap_phy_pcs_common_cdb = regmap;
block_offset = TORRENT_PHY_PMA_COMMON_OFFSET(block_offset_shift);
regmap = cdns_regmap_init(dev, sd_base, block_offset,
reg_offset_shift,
&cdns_torrent_phy_pma_cmn_cdb_config);
if (IS_ERR(regmap)) {
dev_err(dev, "Failed to init PHY PMA common CDB regmap\n");
return PTR_ERR(regmap);
}
cdns_phy->regmap_phy_pma_common_cdb = regmap;
return 0;
}
static int cdns_torrent_phy_init(struct phy *phy)
{
struct cdns_torrent_phy *cdns_phy = dev_get_drvdata(phy->dev.parent);
const struct cdns_torrent_data *init_data = cdns_phy->init_data;
struct cdns_torrent_vals *cmn_vals, *tx_ln_vals, *rx_ln_vals;
enum cdns_torrent_ref_clk ref_clk = cdns_phy->ref_clk_rate;
struct cdns_torrent_vals *link_cmn_vals, *xcvr_diag_vals;
struct cdns_torrent_inst *inst = phy_get_drvdata(phy);
enum cdns_torrent_phy_type phy_type = inst->phy_type;
enum cdns_torrent_ssc_mode ssc = inst->ssc_mode;
struct cdns_torrent_vals *phy_pma_cmn_vals;
struct cdns_torrent_vals *pcs_cmn_vals;
struct cdns_reg_pairs *reg_pairs;
struct regmap *regmap;
u32 num_regs;
int i, j;
if (cdns_phy->nsubnodes > 1) {
if (phy_type == TYPE_DP)
return cdns_torrent_dp_multilink_init(cdns_phy, inst, phy);
return 0;
}
/**
* Spread spectrum generation is not required or supported
* for SGMII/QSGMII/USXGMII
*/
if (phy_type == TYPE_SGMII || phy_type == TYPE_QSGMII || phy_type == TYPE_USXGMII)
ssc = NO_SSC;
/* PHY configuration specific registers for single link */
link_cmn_vals = cdns_torrent_get_tbl_vals(&init_data->link_cmn_vals_tbl,
CLK_ANY, CLK_ANY,
phy_type, TYPE_NONE,
ANY_SSC);
if (link_cmn_vals) {
reg_pairs = link_cmn_vals->reg_pairs;
num_regs = link_cmn_vals->num_regs;
regmap = cdns_phy->regmap_common_cdb;
/**
* First array value in link_cmn_vals must be of
* PHY_PLL_CFG register
*/
regmap_field_write(cdns_phy->phy_pll_cfg, reg_pairs[0].val);
for (i = 1; i < num_regs; i++)
regmap_write(regmap, reg_pairs[i].off,
reg_pairs[i].val);
}
xcvr_diag_vals = cdns_torrent_get_tbl_vals(&init_data->xcvr_diag_vals_tbl,
CLK_ANY, CLK_ANY,
phy_type, TYPE_NONE,
ANY_SSC);
if (xcvr_diag_vals) {
reg_pairs = xcvr_diag_vals->reg_pairs;
num_regs = xcvr_diag_vals->num_regs;
for (i = 0; i < inst->num_lanes; i++) {
regmap = cdns_phy->regmap_tx_lane_cdb[i + inst->mlane];
for (j = 0; j < num_regs; j++)
regmap_write(regmap, reg_pairs[j].off,
reg_pairs[j].val);
}
}
/* PHY PCS common registers configurations */
pcs_cmn_vals = cdns_torrent_get_tbl_vals(&init_data->pcs_cmn_vals_tbl,
CLK_ANY, CLK_ANY,
phy_type, TYPE_NONE,
ANY_SSC);
if (pcs_cmn_vals) {
reg_pairs = pcs_cmn_vals->reg_pairs;
num_regs = pcs_cmn_vals->num_regs;
regmap = cdns_phy->regmap_phy_pcs_common_cdb;
for (i = 0; i < num_regs; i++)
regmap_write(regmap, reg_pairs[i].off,
reg_pairs[i].val);
}
/* PHY PMA common registers configurations */
phy_pma_cmn_vals = cdns_torrent_get_tbl_vals(&init_data->phy_pma_cmn_vals_tbl,
CLK_ANY, CLK_ANY,
phy_type, TYPE_NONE,
ANY_SSC);
if (phy_pma_cmn_vals) {
reg_pairs = phy_pma_cmn_vals->reg_pairs;
num_regs = phy_pma_cmn_vals->num_regs;
regmap = cdns_phy->regmap_phy_pma_common_cdb;
for (i = 0; i < num_regs; i++)
regmap_write(regmap, reg_pairs[i].off,
reg_pairs[i].val);
}
/* PMA common registers configurations */
cmn_vals = cdns_torrent_get_tbl_vals(&init_data->cmn_vals_tbl,
ref_clk, ref_clk,
phy_type, TYPE_NONE,
ssc);
if (cmn_vals) {
reg_pairs = cmn_vals->reg_pairs;
num_regs = cmn_vals->num_regs;
regmap = cdns_phy->regmap_common_cdb;
for (i = 0; i < num_regs; i++)
regmap_write(regmap, reg_pairs[i].off,
reg_pairs[i].val);
}
/* PMA TX lane registers configurations */
tx_ln_vals = cdns_torrent_get_tbl_vals(&init_data->tx_ln_vals_tbl,
ref_clk, ref_clk,
phy_type, TYPE_NONE,
ssc);
if (tx_ln_vals) {
reg_pairs = tx_ln_vals->reg_pairs;
num_regs = tx_ln_vals->num_regs;
for (i = 0; i < inst->num_lanes; i++) {
regmap = cdns_phy->regmap_tx_lane_cdb[i + inst->mlane];
for (j = 0; j < num_regs; j++)
regmap_write(regmap, reg_pairs[j].off,
reg_pairs[j].val);
}
}
/* PMA RX lane registers configurations */
rx_ln_vals = cdns_torrent_get_tbl_vals(&init_data->rx_ln_vals_tbl,
ref_clk, ref_clk,
phy_type, TYPE_NONE,
ssc);
if (rx_ln_vals) {
reg_pairs = rx_ln_vals->reg_pairs;
num_regs = rx_ln_vals->num_regs;
for (i = 0; i < inst->num_lanes; i++) {
regmap = cdns_phy->regmap_rx_lane_cdb[i + inst->mlane];
for (j = 0; j < num_regs; j++)
regmap_write(regmap, reg_pairs[j].off,
reg_pairs[j].val);
}
}
if (phy_type == TYPE_DP)
return cdns_torrent_dp_init(phy);
return 0;
}
static const struct phy_ops cdns_torrent_phy_ops = {
.init = cdns_torrent_phy_init,
.configure = cdns_torrent_dp_configure,
.power_on = cdns_torrent_phy_on,
.power_off = cdns_torrent_phy_off,
.owner = THIS_MODULE,
};
static int cdns_torrent_noop_phy_on(struct phy *phy)
{
/* Give 5ms to 10ms delay for the PIPE clock to be stable */
usleep_range(5000, 10000);
return 0;
}
static const struct phy_ops noop_ops = {
.power_on = cdns_torrent_noop_phy_on,
.owner = THIS_MODULE,
};
static
int cdns_torrent_phy_configure_multilink(struct cdns_torrent_phy *cdns_phy)
{
const struct cdns_torrent_data *init_data = cdns_phy->init_data;
struct cdns_torrent_vals *cmn_vals, *tx_ln_vals, *rx_ln_vals;
enum cdns_torrent_ref_clk ref_clk = cdns_phy->ref_clk_rate;
struct cdns_torrent_vals *link_cmn_vals, *xcvr_diag_vals;
enum cdns_torrent_phy_type phy_t1, phy_t2;
struct cdns_torrent_vals *pcs_cmn_vals;
int i, j, node, mlane, num_lanes, ret;
struct cdns_reg_pairs *reg_pairs;
enum cdns_torrent_ssc_mode ssc;
struct regmap *regmap;
u32 num_regs;
/* Maximum 2 links (subnodes) are supported */
if (cdns_phy->nsubnodes != 2)
return -EINVAL;
phy_t1 = cdns_phy->phys[0].phy_type;
phy_t2 = cdns_phy->phys[1].phy_type;
/**
* First configure the PHY for first link with phy_t1. Get the array
* values as [phy_t1][phy_t2][ssc].
*/
for (node = 0; node < cdns_phy->nsubnodes; node++) {
if (node == 1) {
/**
* If first link with phy_t1 is configured, then
* configure the PHY for second link with phy_t2.
* Get the array values as [phy_t2][phy_t1][ssc].
*/
swap(phy_t1, phy_t2);
}
mlane = cdns_phy->phys[node].mlane;
ssc = cdns_phy->phys[node].ssc_mode;
num_lanes = cdns_phy->phys[node].num_lanes;
/**
* PHY configuration specific registers:
* link_cmn_vals depend on combination of PHY types being
* configured and are common for both PHY types, so array
* values should be same for [phy_t1][phy_t2][ssc] and
* [phy_t2][phy_t1][ssc].
* xcvr_diag_vals also depend on combination of PHY types
* being configured, but these can be different for particular
* PHY type and are per lane.
*/
link_cmn_vals = cdns_torrent_get_tbl_vals(&init_data->link_cmn_vals_tbl,
CLK_ANY, CLK_ANY,
phy_t1, phy_t2, ANY_SSC);
if (link_cmn_vals) {
reg_pairs = link_cmn_vals->reg_pairs;
num_regs = link_cmn_vals->num_regs;
regmap = cdns_phy->regmap_common_cdb;
/**
* First array value in link_cmn_vals must be of
* PHY_PLL_CFG register
*/
regmap_field_write(cdns_phy->phy_pll_cfg,
reg_pairs[0].val);
for (i = 1; i < num_regs; i++)
regmap_write(regmap, reg_pairs[i].off,
reg_pairs[i].val);
}
xcvr_diag_vals = cdns_torrent_get_tbl_vals(&init_data->xcvr_diag_vals_tbl,
CLK_ANY, CLK_ANY,
phy_t1, phy_t2, ANY_SSC);
if (xcvr_diag_vals) {
reg_pairs = xcvr_diag_vals->reg_pairs;
num_regs = xcvr_diag_vals->num_regs;
for (i = 0; i < num_lanes; i++) {
regmap = cdns_phy->regmap_tx_lane_cdb[i + mlane];
for (j = 0; j < num_regs; j++)
regmap_write(regmap, reg_pairs[j].off,
reg_pairs[j].val);
}
}
/* PHY PCS common registers configurations */
pcs_cmn_vals = cdns_torrent_get_tbl_vals(&init_data->pcs_cmn_vals_tbl,
CLK_ANY, CLK_ANY,
phy_t1, phy_t2, ANY_SSC);
if (pcs_cmn_vals) {
reg_pairs = pcs_cmn_vals->reg_pairs;
num_regs = pcs_cmn_vals->num_regs;
regmap = cdns_phy->regmap_phy_pcs_common_cdb;
for (i = 0; i < num_regs; i++)
regmap_write(regmap, reg_pairs[i].off,
reg_pairs[i].val);
}
/* PMA common registers configurations */
cmn_vals = cdns_torrent_get_tbl_vals(&init_data->cmn_vals_tbl,
ref_clk, ref_clk,
phy_t1, phy_t2, ssc);
if (cmn_vals) {
reg_pairs = cmn_vals->reg_pairs;
num_regs = cmn_vals->num_regs;
regmap = cdns_phy->regmap_common_cdb;
for (i = 0; i < num_regs; i++)
regmap_write(regmap, reg_pairs[i].off,
reg_pairs[i].val);
}
/* PMA TX lane registers configurations */
tx_ln_vals = cdns_torrent_get_tbl_vals(&init_data->tx_ln_vals_tbl,
ref_clk, ref_clk,
phy_t1, phy_t2, ssc);
if (tx_ln_vals) {
reg_pairs = tx_ln_vals->reg_pairs;
num_regs = tx_ln_vals->num_regs;
for (i = 0; i < num_lanes; i++) {
regmap = cdns_phy->regmap_tx_lane_cdb[i + mlane];
for (j = 0; j < num_regs; j++)
regmap_write(regmap, reg_pairs[j].off,
reg_pairs[j].val);
}
}
/* PMA RX lane registers configurations */
rx_ln_vals = cdns_torrent_get_tbl_vals(&init_data->rx_ln_vals_tbl,
ref_clk, ref_clk,
phy_t1, phy_t2, ssc);
if (rx_ln_vals) {
reg_pairs = rx_ln_vals->reg_pairs;
num_regs = rx_ln_vals->num_regs;
for (i = 0; i < num_lanes; i++) {
regmap = cdns_phy->regmap_rx_lane_cdb[i + mlane];
for (j = 0; j < num_regs; j++)
regmap_write(regmap, reg_pairs[j].off,
reg_pairs[j].val);
}
}
if (phy_t1 == TYPE_DP) {
ret = cdns_torrent_dp_get_pll(cdns_phy, phy_t2);
if (ret)
return ret;
}
reset_control_deassert(cdns_phy->phys[node].lnk_rst);
}
/* Take the PHY out of reset */
ret = reset_control_deassert(cdns_phy->phy_rst);
if (ret)
return ret;
return 0;
}
static void cdns_torrent_clk_cleanup(struct cdns_torrent_phy *cdns_phy)
{
struct device *dev = cdns_phy->dev;
of_clk_del_provider(dev->of_node);
}
static int cdns_torrent_clk_register(struct cdns_torrent_phy *cdns_phy)
{
struct device *dev = cdns_phy->dev;
struct device_node *node = dev->of_node;
struct clk_hw_onecell_data *data;
int ret;
data = devm_kzalloc(dev, struct_size(data, hws, CDNS_TORRENT_OUTPUT_CLOCKS), GFP_KERNEL);
if (!data)
return -ENOMEM;
data->num = CDNS_TORRENT_OUTPUT_CLOCKS;
cdns_phy->clk_hw_data = data;
ret = cdns_torrent_derived_refclk_register(cdns_phy);
if (ret) {
dev_err(dev, "failed to register derived refclk\n");
return ret;
}
ret = cdns_torrent_received_refclk_register(cdns_phy);
if (ret) {
dev_err(dev, "failed to register received refclk\n");
return ret;
}
ret = cdns_torrent_refclk_driver_register(cdns_phy);
if (ret) {
dev_err(dev, "failed to register refclk driver\n");
return ret;
}
ret = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, data);
if (ret) {
dev_err(dev, "Failed to add clock provider: %s\n", node->name);
return ret;
}
return 0;
}
static int cdns_torrent_reset(struct cdns_torrent_phy *cdns_phy)
{
struct device *dev = cdns_phy->dev;
cdns_phy->phy_rst = devm_reset_control_get_exclusive_by_index(dev, 0);
if (IS_ERR(cdns_phy->phy_rst)) {
dev_err(dev, "%s: failed to get reset\n",
dev->of_node->full_name);
return PTR_ERR(cdns_phy->phy_rst);
}
cdns_phy->apb_rst = devm_reset_control_get_optional_exclusive(dev, "torrent_apb");
if (IS_ERR(cdns_phy->apb_rst)) {
dev_err(dev, "%s: failed to get apb reset\n",
dev->of_node->full_name);
return PTR_ERR(cdns_phy->apb_rst);
}
return 0;
}
static int cdns_torrent_clk(struct cdns_torrent_phy *cdns_phy)
{
struct device *dev = cdns_phy->dev;
unsigned long ref_clk_rate;
int ret;
cdns_phy->clk = devm_clk_get(dev, "refclk");
if (IS_ERR(cdns_phy->clk)) {
dev_err(dev, "phy ref clock not found\n");
return PTR_ERR(cdns_phy->clk);
}
ret = clk_prepare_enable(cdns_phy->clk);
if (ret) {
dev_err(cdns_phy->dev, "Failed to prepare ref clock\n");
return ret;
}
ref_clk_rate = clk_get_rate(cdns_phy->clk);
if (!ref_clk_rate) {
dev_err(cdns_phy->dev, "Failed to get ref clock rate\n");
clk_disable_unprepare(cdns_phy->clk);
return -EINVAL;
}
switch (ref_clk_rate) {
case REF_CLK_19_2MHZ:
cdns_phy->ref_clk_rate = CLK_19_2_MHZ;
break;
case REF_CLK_25MHZ:
cdns_phy->ref_clk_rate = CLK_25_MHZ;
break;
case REF_CLK_100MHZ:
cdns_phy->ref_clk_rate = CLK_100_MHZ;
break;
case REF_CLK_156_25MHZ:
cdns_phy->ref_clk_rate = CLK_156_25_MHZ;
break;
default:
dev_err(cdns_phy->dev, "Invalid Ref Clock Rate\n");
clk_disable_unprepare(cdns_phy->clk);
return -EINVAL;
}
return 0;
}
static int cdns_torrent_phy_probe(struct platform_device *pdev)
{
struct cdns_torrent_phy *cdns_phy;
struct device *dev = &pdev->dev;
struct phy_provider *phy_provider;
const struct cdns_torrent_data *data;
struct device_node *child;
int ret, subnodes, node = 0, i;
u32 total_num_lanes = 0;
int already_configured;
u8 init_dp_regmap = 0;
u32 phy_type;
/* Get init data for this PHY */
data = of_device_get_match_data(dev);
if (!data)
return -EINVAL;
cdns_phy = devm_kzalloc(dev, sizeof(*cdns_phy), GFP_KERNEL);
if (!cdns_phy)
return -ENOMEM;
dev_set_drvdata(dev, cdns_phy);
cdns_phy->dev = dev;
cdns_phy->init_data = data;
cdns_phy->sd_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(cdns_phy->sd_base))
return PTR_ERR(cdns_phy->sd_base);
subnodes = of_get_available_child_count(dev->of_node);
if (subnodes == 0) {
dev_err(dev, "No available link subnodes found\n");
return -EINVAL;
}
ret = cdns_torrent_regmap_init(cdns_phy);
if (ret)
return ret;
ret = cdns_torrent_regfield_init(cdns_phy);
if (ret)
return ret;
ret = cdns_torrent_clk_register(cdns_phy);
if (ret)
return ret;
regmap_field_read(cdns_phy->phy_pma_cmn_ctrl_1, &already_configured);
if (!already_configured) {
ret = cdns_torrent_reset(cdns_phy);
if (ret)
goto clk_cleanup;
ret = cdns_torrent_clk(cdns_phy);
if (ret)
goto clk_cleanup;
/* Enable APB */
reset_control_deassert(cdns_phy->apb_rst);
}
for_each_available_child_of_node(dev->of_node, child) {
struct phy *gphy;
/* PHY subnode name must be 'phy'. */
if (!(of_node_name_eq(child, "phy")))
continue;
cdns_phy->phys[node].lnk_rst =
of_reset_control_array_get_exclusive(child);
if (IS_ERR(cdns_phy->phys[node].lnk_rst)) {
dev_err(dev, "%s: failed to get reset\n",
child->full_name);
ret = PTR_ERR(cdns_phy->phys[node].lnk_rst);
goto put_lnk_rst;
}
if (of_property_read_u32(child, "reg",
&cdns_phy->phys[node].mlane)) {
dev_err(dev, "%s: No \"reg\"-property.\n",
child->full_name);
ret = -EINVAL;
goto put_child;
}
if (of_property_read_u32(child, "cdns,phy-type", &phy_type)) {
dev_err(dev, "%s: No \"cdns,phy-type\"-property.\n",
child->full_name);
ret = -EINVAL;
goto put_child;
}
switch (phy_type) {
case PHY_TYPE_PCIE:
cdns_phy->phys[node].phy_type = TYPE_PCIE;
break;
case PHY_TYPE_DP:
cdns_phy->phys[node].phy_type = TYPE_DP;
break;
case PHY_TYPE_SGMII:
cdns_phy->phys[node].phy_type = TYPE_SGMII;
break;
case PHY_TYPE_QSGMII:
cdns_phy->phys[node].phy_type = TYPE_QSGMII;
break;
case PHY_TYPE_USB3:
cdns_phy->phys[node].phy_type = TYPE_USB;
break;
case PHY_TYPE_USXGMII:
cdns_phy->phys[node].phy_type = TYPE_USXGMII;
break;
default:
dev_err(dev, "Unsupported protocol\n");
ret = -EINVAL;
goto put_child;
}
if (of_property_read_u32(child, "cdns,num-lanes",
&cdns_phy->phys[node].num_lanes)) {
dev_err(dev, "%s: No \"cdns,num-lanes\"-property.\n",
child->full_name);
ret = -EINVAL;
goto put_child;
}
total_num_lanes += cdns_phy->phys[node].num_lanes;
/* Get SSC mode */
cdns_phy->phys[node].ssc_mode = NO_SSC;
of_property_read_u32(child, "cdns,ssc-mode",
&cdns_phy->phys[node].ssc_mode);
if (!already_configured)
gphy = devm_phy_create(dev, child, &cdns_torrent_phy_ops);
else
gphy = devm_phy_create(dev, child, &noop_ops);
if (IS_ERR(gphy)) {
ret = PTR_ERR(gphy);
goto put_child;
}
if (cdns_phy->phys[node].phy_type == TYPE_DP) {
switch (cdns_phy->phys[node].num_lanes) {
case 1:
case 2:
case 4:
/* valid number of lanes */
break;
default:
dev_err(dev, "unsupported number of lanes: %d\n",
cdns_phy->phys[node].num_lanes);
ret = -EINVAL;
goto put_child;
}
cdns_phy->max_bit_rate = DEFAULT_MAX_BIT_RATE;
of_property_read_u32(child, "cdns,max-bit-rate",
&cdns_phy->max_bit_rate);
switch (cdns_phy->max_bit_rate) {
case 1620:
case 2160:
case 2430:
case 2700:
case 3240:
case 4320:
case 5400:
case 8100:
/* valid bit rate */
break;
default:
dev_err(dev, "unsupported max bit rate: %dMbps\n",
cdns_phy->max_bit_rate);
ret = -EINVAL;
goto put_child;
}
/* DPTX registers */
cdns_phy->base = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(cdns_phy->base)) {
ret = PTR_ERR(cdns_phy->base);
goto put_child;
}
if (!init_dp_regmap) {
ret = cdns_torrent_dp_regmap_init(cdns_phy);
if (ret)
goto put_child;
ret = cdns_torrent_dp_regfield_init(cdns_phy);
if (ret)
goto put_child;
init_dp_regmap++;
}
dev_dbg(dev, "DP max bit rate %d.%03d Gbps\n",
cdns_phy->max_bit_rate / 1000,
cdns_phy->max_bit_rate % 1000);
gphy->attrs.bus_width = cdns_phy->phys[node].num_lanes;
gphy->attrs.max_link_rate = cdns_phy->max_bit_rate;
gphy->attrs.mode = PHY_MODE_DP;
}
cdns_phy->phys[node].phy = gphy;
phy_set_drvdata(gphy, &cdns_phy->phys[node]);
node++;
}
cdns_phy->nsubnodes = node;
if (total_num_lanes > MAX_NUM_LANES) {
dev_err(dev, "Invalid lane configuration\n");
ret = -EINVAL;
goto put_lnk_rst;
}
if (cdns_phy->nsubnodes > 1 && !already_configured) {
ret = cdns_torrent_phy_configure_multilink(cdns_phy);
if (ret)
goto put_lnk_rst;
}
phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
if (IS_ERR(phy_provider)) {
ret = PTR_ERR(phy_provider);
goto put_lnk_rst;
}
if (cdns_phy->nsubnodes > 1)
dev_dbg(dev, "Multi-link: %s (%d lanes) & %s (%d lanes)",
cdns_torrent_get_phy_type(cdns_phy->phys[0].phy_type),
cdns_phy->phys[0].num_lanes,
cdns_torrent_get_phy_type(cdns_phy->phys[1].phy_type),
cdns_phy->phys[1].num_lanes);
else
dev_dbg(dev, "Single link: %s (%d lanes)",
cdns_torrent_get_phy_type(cdns_phy->phys[0].phy_type),
cdns_phy->phys[0].num_lanes);
return 0;
put_child:
node++;
put_lnk_rst:
for (i = 0; i < node; i++)
reset_control_put(cdns_phy->phys[i].lnk_rst);
of_node_put(child);
reset_control_assert(cdns_phy->apb_rst);
clk_disable_unprepare(cdns_phy->clk);
clk_cleanup:
cdns_torrent_clk_cleanup(cdns_phy);
return ret;
}
static void cdns_torrent_phy_remove(struct platform_device *pdev)
{
struct cdns_torrent_phy *cdns_phy = platform_get_drvdata(pdev);
int i;
reset_control_assert(cdns_phy->phy_rst);
reset_control_assert(cdns_phy->apb_rst);
for (i = 0; i < cdns_phy->nsubnodes; i++) {
reset_control_assert(cdns_phy->phys[i].lnk_rst);
reset_control_put(cdns_phy->phys[i].lnk_rst);
}
clk_disable_unprepare(cdns_phy->clk);
cdns_torrent_clk_cleanup(cdns_phy);
}
/* USB and DP link configuration */
static struct cdns_reg_pairs usb_dp_link_cmn_regs[] = {
{0x0002, PHY_PLL_CFG},
{0x8600, CMN_PDIAG_PLL0_CLK_SEL_M0}
};
static struct cdns_reg_pairs usb_dp_xcvr_diag_ln_regs[] = {
{0x0000, XCVR_DIAG_HSCLK_SEL},
{0x0001, XCVR_DIAG_HSCLK_DIV},
{0x0041, XCVR_DIAG_PLLDRC_CTRL}
};
static struct cdns_reg_pairs dp_usb_xcvr_diag_ln_regs[] = {
{0x0001, XCVR_DIAG_HSCLK_SEL},
{0x0009, XCVR_DIAG_PLLDRC_CTRL}
};
static struct cdns_torrent_vals usb_dp_link_cmn_vals = {
.reg_pairs = usb_dp_link_cmn_regs,
.num_regs = ARRAY_SIZE(usb_dp_link_cmn_regs),
};
static struct cdns_torrent_vals usb_dp_xcvr_diag_ln_vals = {
.reg_pairs = usb_dp_xcvr_diag_ln_regs,
.num_regs = ARRAY_SIZE(usb_dp_xcvr_diag_ln_regs),
};
static struct cdns_torrent_vals dp_usb_xcvr_diag_ln_vals = {
.reg_pairs = dp_usb_xcvr_diag_ln_regs,
.num_regs = ARRAY_SIZE(dp_usb_xcvr_diag_ln_regs),
};
/* TI USXGMII configuration: Enable cmn_refclk_rcv_out_en */
static struct cdns_reg_pairs ti_usxgmii_phy_pma_cmn_regs[] = {
{0x0040, PHY_PMA_CMN_CTRL1},
};
static struct cdns_torrent_vals ti_usxgmii_phy_pma_cmn_vals = {
.reg_pairs = ti_usxgmii_phy_pma_cmn_regs,
.num_regs = ARRAY_SIZE(ti_usxgmii_phy_pma_cmn_regs),
};
/* Single USXGMII link configuration */
static struct cdns_reg_pairs sl_usxgmii_link_cmn_regs[] = {
{0x0000, PHY_PLL_CFG},
{0x0400, CMN_PDIAG_PLL0_CLK_SEL_M0}
};
static struct cdns_reg_pairs sl_usxgmii_xcvr_diag_ln_regs[] = {
{0x0000, XCVR_DIAG_HSCLK_SEL},
{0x0001, XCVR_DIAG_HSCLK_DIV},
{0x0001, XCVR_DIAG_PLLDRC_CTRL}
};
static struct cdns_torrent_vals sl_usxgmii_link_cmn_vals = {
.reg_pairs = sl_usxgmii_link_cmn_regs,
.num_regs = ARRAY_SIZE(sl_usxgmii_link_cmn_regs),
};
static struct cdns_torrent_vals sl_usxgmii_xcvr_diag_ln_vals = {
.reg_pairs = sl_usxgmii_xcvr_diag_ln_regs,
.num_regs = ARRAY_SIZE(sl_usxgmii_xcvr_diag_ln_regs),
};
/* Single link USXGMII, 156.25 MHz Ref clk, no SSC */
static struct cdns_reg_pairs sl_usxgmii_156_25_no_ssc_cmn_regs[] = {
{0x0014, CMN_SSM_BIAS_TMR},
{0x0028, CMN_PLLSM0_PLLPRE_TMR},
{0x00A4, CMN_PLLSM0_PLLLOCK_TMR},
{0x0028, CMN_PLLSM1_PLLPRE_TMR},
{0x00A4, CMN_PLLSM1_PLLLOCK_TMR},
{0x0062, CMN_BGCAL_INIT_TMR},
{0x0062, CMN_BGCAL_ITER_TMR},
{0x0014, CMN_IBCAL_INIT_TMR},
{0x0018, CMN_TXPUCAL_INIT_TMR},
{0x0005, CMN_TXPUCAL_ITER_TMR},
{0x0018, CMN_TXPDCAL_INIT_TMR},
{0x0005, CMN_TXPDCAL_ITER_TMR},
{0x024A, CMN_RXCAL_INIT_TMR},
{0x0005, CMN_RXCAL_ITER_TMR},
{0x000B, CMN_SD_CAL_REFTIM_START},
{0x0132, CMN_SD_CAL_PLLCNT_START},
{0x0028, CMN_PDIAG_PLL1_CP_PADJ_M0},
{0x0014, CMN_PLL0_DSM_FBH_OVRD_M0},
{0x0014, CMN_PLL1_DSM_FBH_OVRD_M0},
{0x0005, CMN_PLL0_DSM_FBL_OVRD_M0},
{0x0005, CMN_PLL1_DSM_FBL_OVRD_M0},
{0x061B, CMN_PLL0_VCOCAL_INIT_TMR},
{0x061B, CMN_PLL1_VCOCAL_INIT_TMR},
{0x0019, CMN_PLL0_VCOCAL_ITER_TMR},
{0x0019, CMN_PLL1_VCOCAL_ITER_TMR},
{0x1354, CMN_PLL0_VCOCAL_REFTIM_START},
{0x1354, CMN_PLL1_VCOCAL_REFTIM_START},
{0x1354, CMN_PLL0_VCOCAL_PLLCNT_START},
{0x1354, CMN_PLL1_VCOCAL_PLLCNT_START},
{0x0003, CMN_PLL0_VCOCAL_TCTRL},
{0x0003, CMN_PLL1_VCOCAL_TCTRL},
{0x0138, CMN_PLL0_LOCK_REFCNT_START},
{0x0138, CMN_PLL1_LOCK_REFCNT_START},
{0x0138, CMN_PLL0_LOCK_PLLCNT_START},
{0x0138, CMN_PLL1_LOCK_PLLCNT_START}
};
static struct cdns_reg_pairs usxgmii_156_25_no_ssc_tx_ln_regs[] = {
{0x07A2, TX_RCVDET_ST_TMR},
{0x00F3, TX_PSC_A0},
{0x04A2, TX_PSC_A2},
{0x04A2, TX_PSC_A3},
{0x0000, TX_TXCC_CPOST_MULT_00},
{0x0000, XCVR_DIAG_PSC_OVRD}
};
static struct cdns_reg_pairs usxgmii_156_25_no_ssc_rx_ln_regs[] = {
{0x0014, RX_SDCAL0_INIT_TMR},
{0x0062, RX_SDCAL0_ITER_TMR},
{0x0014, RX_SDCAL1_INIT_TMR},
{0x0062, RX_SDCAL1_ITER_TMR},
{0x091D, RX_PSC_A0},
{0x0900, RX_PSC_A2},
{0x0100, RX_PSC_A3},
{0x0030, RX_REE_SMGM_CTRL1},
{0x03C7, RX_REE_GCSM1_EQENM_PH1},
{0x01C7, RX_REE_GCSM1_EQENM_PH2},
{0x0000, RX_DIAG_DFE_CTRL},
{0x0019, RX_REE_TAP1_CLIP},
{0x0019, RX_REE_TAP2TON_CLIP},
{0x00B9, RX_DIAG_NQST_CTRL},
{0x0C21, RX_DIAG_DFE_AMP_TUNE_2},
{0x0002, RX_DIAG_DFE_AMP_TUNE_3},
{0x0033, RX_DIAG_PI_RATE},
{0x0001, RX_DIAG_ACYA},
{0x018C, RX_CDRLF_CNFG}
};
static struct cdns_torrent_vals sl_usxgmii_156_25_no_ssc_cmn_vals = {
.reg_pairs = sl_usxgmii_156_25_no_ssc_cmn_regs,
.num_regs = ARRAY_SIZE(sl_usxgmii_156_25_no_ssc_cmn_regs),
};
static struct cdns_torrent_vals usxgmii_156_25_no_ssc_tx_ln_vals = {
.reg_pairs = usxgmii_156_25_no_ssc_tx_ln_regs,
.num_regs = ARRAY_SIZE(usxgmii_156_25_no_ssc_tx_ln_regs),
};
static struct cdns_torrent_vals usxgmii_156_25_no_ssc_rx_ln_vals = {
.reg_pairs = usxgmii_156_25_no_ssc_rx_ln_regs,
.num_regs = ARRAY_SIZE(usxgmii_156_25_no_ssc_rx_ln_regs),
};
/* PCIe and DP link configuration */
static struct cdns_reg_pairs pcie_dp_link_cmn_regs[] = {
{0x0003, PHY_PLL_CFG},
{0x0601, CMN_PDIAG_PLL0_CLK_SEL_M0},
{0x0400, CMN_PDIAG_PLL0_CLK_SEL_M1}
};
static struct cdns_reg_pairs pcie_dp_xcvr_diag_ln_regs[] = {
{0x0000, XCVR_DIAG_HSCLK_SEL},
{0x0001, XCVR_DIAG_HSCLK_DIV},
{0x0012, XCVR_DIAG_PLLDRC_CTRL}
};
static struct cdns_reg_pairs dp_pcie_xcvr_diag_ln_regs[] = {
{0x0001, XCVR_DIAG_HSCLK_SEL},
{0x0009, XCVR_DIAG_PLLDRC_CTRL}
};
static struct cdns_torrent_vals pcie_dp_link_cmn_vals = {
.reg_pairs = pcie_dp_link_cmn_regs,
.num_regs = ARRAY_SIZE(pcie_dp_link_cmn_regs),
};
static struct cdns_torrent_vals pcie_dp_xcvr_diag_ln_vals = {
.reg_pairs = pcie_dp_xcvr_diag_ln_regs,
.num_regs = ARRAY_SIZE(pcie_dp_xcvr_diag_ln_regs),
};
static struct cdns_torrent_vals dp_pcie_xcvr_diag_ln_vals = {
.reg_pairs = dp_pcie_xcvr_diag_ln_regs,
.num_regs = ARRAY_SIZE(dp_pcie_xcvr_diag_ln_regs),
};
/* DP Multilink, 100 MHz Ref clk, no SSC */
static struct cdns_reg_pairs dp_100_no_ssc_cmn_regs[] = {
{0x007F, CMN_TXPUCAL_TUNE},
{0x007F, CMN_TXPDCAL_TUNE}
};
static struct cdns_reg_pairs dp_100_no_ssc_tx_ln_regs[] = {
{0x00FB, TX_PSC_A0},
{0x04AA, TX_PSC_A2},
{0x04AA, TX_PSC_A3},
{0x000F, XCVR_DIAG_BIDI_CTRL}
};
static struct cdns_reg_pairs dp_100_no_ssc_rx_ln_regs[] = {
{0x0000, RX_PSC_A0},
{0x0000, RX_PSC_A2},
{0x0000, RX_PSC_A3},
{0x0000, RX_PSC_CAL},
{0x0000, RX_REE_GCSM1_CTRL},
{0x0000, RX_REE_GCSM2_CTRL},
{0x0000, RX_REE_PERGCSM_CTRL}
};
static struct cdns_torrent_vals dp_100_no_ssc_cmn_vals = {
.reg_pairs = dp_100_no_ssc_cmn_regs,
.num_regs = ARRAY_SIZE(dp_100_no_ssc_cmn_regs),
};
static struct cdns_torrent_vals dp_100_no_ssc_tx_ln_vals = {
.reg_pairs = dp_100_no_ssc_tx_ln_regs,
.num_regs = ARRAY_SIZE(dp_100_no_ssc_tx_ln_regs),
};
static struct cdns_torrent_vals dp_100_no_ssc_rx_ln_vals = {
.reg_pairs = dp_100_no_ssc_rx_ln_regs,
.num_regs = ARRAY_SIZE(dp_100_no_ssc_rx_ln_regs),
};
/* Single DisplayPort(DP) link configuration */
static struct cdns_reg_pairs sl_dp_link_cmn_regs[] = {
{0x0000, PHY_PLL_CFG},
};
static struct cdns_reg_pairs sl_dp_xcvr_diag_ln_regs[] = {
{0x0000, XCVR_DIAG_HSCLK_SEL},
{0x0001, XCVR_DIAG_PLLDRC_CTRL}
};
static struct cdns_torrent_vals sl_dp_link_cmn_vals = {
.reg_pairs = sl_dp_link_cmn_regs,
.num_regs = ARRAY_SIZE(sl_dp_link_cmn_regs),
};
static struct cdns_torrent_vals sl_dp_xcvr_diag_ln_vals = {
.reg_pairs = sl_dp_xcvr_diag_ln_regs,
.num_regs = ARRAY_SIZE(sl_dp_xcvr_diag_ln_regs),
};
/* Single DP, 19.2 MHz Ref clk, no SSC */
static struct cdns_reg_pairs sl_dp_19_2_no_ssc_cmn_regs[] = {
{0x0014, CMN_SSM_BIAS_TMR},
{0x0027, CMN_PLLSM0_PLLPRE_TMR},
{0x00A1, CMN_PLLSM0_PLLLOCK_TMR},
{0x0027, CMN_PLLSM1_PLLPRE_TMR},
{0x00A1, CMN_PLLSM1_PLLLOCK_TMR},
{0x0060, CMN_BGCAL_INIT_TMR},
{0x0060, CMN_BGCAL_ITER_TMR},
{0x0014, CMN_IBCAL_INIT_TMR},
{0x0018, CMN_TXPUCAL_INIT_TMR},
{0x0005, CMN_TXPUCAL_ITER_TMR},
{0x0018, CMN_TXPDCAL_INIT_TMR},
{0x0005, CMN_TXPDCAL_ITER_TMR},
{0x0240, CMN_RXCAL_INIT_TMR},
{0x0005, CMN_RXCAL_ITER_TMR},
{0x0002, CMN_SD_CAL_INIT_TMR},
{0x0002, CMN_SD_CAL_ITER_TMR},
{0x000B, CMN_SD_CAL_REFTIM_START},
{0x0137, CMN_SD_CAL_PLLCNT_START},
{0x0509, CMN_PDIAG_PLL0_CP_PADJ_M0},
{0x0F00, CMN_PDIAG_PLL0_CP_IADJ_M0},
{0x0F08, CMN_PDIAG_PLL0_FILT_PADJ_M0},
{0x0004, CMN_PLL0_DSM_DIAG_M0},
{0x0509, CMN_PDIAG_PLL1_CP_PADJ_M0},
{0x0F00, CMN_PDIAG_PLL1_CP_IADJ_M0},
{0x0F08, CMN_PDIAG_PLL1_FILT_PADJ_M0},
{0x0004, CMN_PLL1_DSM_DIAG_M0},
{0x00C0, CMN_PLL0_VCOCAL_INIT_TMR},
{0x0004, CMN_PLL0_VCOCAL_ITER_TMR},
{0x00C0, CMN_PLL1_VCOCAL_INIT_TMR},
{0x0004, CMN_PLL1_VCOCAL_ITER_TMR},
{0x0260, CMN_PLL0_VCOCAL_REFTIM_START},
{0x0003, CMN_PLL0_VCOCAL_TCTRL},
{0x0260, CMN_PLL1_VCOCAL_REFTIM_START},
{0x0003, CMN_PLL1_VCOCAL_TCTRL}
};
static struct cdns_reg_pairs sl_dp_19_2_no_ssc_tx_ln_regs[] = {
{0x0780, TX_RCVDET_ST_TMR},
{0x00FB, TX_PSC_A0},
{0x04AA, TX_PSC_A2},
{0x04AA, TX_PSC_A3},
{0x000F, XCVR_DIAG_BIDI_CTRL}
};
static struct cdns_reg_pairs sl_dp_19_2_no_ssc_rx_ln_regs[] = {
{0x0000, RX_PSC_A0},
{0x0000, RX_PSC_A2},
{0x0000, RX_PSC_A3},
{0x0000, RX_PSC_CAL},
{0x0000, RX_REE_GCSM1_CTRL},
{0x0000, RX_REE_GCSM2_CTRL},
{0x0000, RX_REE_PERGCSM_CTRL}
};
static struct cdns_torrent_vals sl_dp_19_2_no_ssc_cmn_vals = {
.reg_pairs = sl_dp_19_2_no_ssc_cmn_regs,
.num_regs = ARRAY_SIZE(sl_dp_19_2_no_ssc_cmn_regs),
};
static struct cdns_torrent_vals sl_dp_19_2_no_ssc_tx_ln_vals = {
.reg_pairs = sl_dp_19_2_no_ssc_tx_ln_regs,
.num_regs = ARRAY_SIZE(sl_dp_19_2_no_ssc_tx_ln_regs),
};
static struct cdns_torrent_vals sl_dp_19_2_no_ssc_rx_ln_vals = {
.reg_pairs = sl_dp_19_2_no_ssc_rx_ln_regs,
.num_regs = ARRAY_SIZE(sl_dp_19_2_no_ssc_rx_ln_regs),
};
/* Single DP, 25 MHz Ref clk, no SSC */
static struct cdns_reg_pairs sl_dp_25_no_ssc_cmn_regs[] = {
{0x0019, CMN_SSM_BIAS_TMR},
{0x0032, CMN_PLLSM0_PLLPRE_TMR},
{0x00D1, CMN_PLLSM0_PLLLOCK_TMR},
{0x0032, CMN_PLLSM1_PLLPRE_TMR},
{0x00D1, CMN_PLLSM1_PLLLOCK_TMR},
{0x007D, CMN_BGCAL_INIT_TMR},
{0x007D, CMN_BGCAL_ITER_TMR},
{0x0019, CMN_IBCAL_INIT_TMR},
{0x001E, CMN_TXPUCAL_INIT_TMR},
{0x0006, CMN_TXPUCAL_ITER_TMR},
{0x001E, CMN_TXPDCAL_INIT_TMR},
{0x0006, CMN_TXPDCAL_ITER_TMR},
{0x02EE, CMN_RXCAL_INIT_TMR},
{0x0006, CMN_RXCAL_ITER_TMR},
{0x0002, CMN_SD_CAL_INIT_TMR},
{0x0002, CMN_SD_CAL_ITER_TMR},
{0x000E, CMN_SD_CAL_REFTIM_START},
{0x012B, CMN_SD_CAL_PLLCNT_START},
{0x0509, CMN_PDIAG_PLL0_CP_PADJ_M0},
{0x0F00, CMN_PDIAG_PLL0_CP_IADJ_M0},
{0x0F08, CMN_PDIAG_PLL0_FILT_PADJ_M0},
{0x0004, CMN_PLL0_DSM_DIAG_M0},
{0x0509, CMN_PDIAG_PLL1_CP_PADJ_M0},
{0x0F00, CMN_PDIAG_PLL1_CP_IADJ_M0},
{0x0F08, CMN_PDIAG_PLL1_FILT_PADJ_M0},
{0x0004, CMN_PLL1_DSM_DIAG_M0},
{0x00FA, CMN_PLL0_VCOCAL_INIT_TMR},
{0x0004, CMN_PLL0_VCOCAL_ITER_TMR},
{0x00FA, CMN_PLL1_VCOCAL_INIT_TMR},
{0x0004, CMN_PLL1_VCOCAL_ITER_TMR},
{0x0317, CMN_PLL0_VCOCAL_REFTIM_START},
{0x0003, CMN_PLL0_VCOCAL_TCTRL},
{0x0317, CMN_PLL1_VCOCAL_REFTIM_START},
{0x0003, CMN_PLL1_VCOCAL_TCTRL}
};
static struct cdns_reg_pairs sl_dp_25_no_ssc_tx_ln_regs[] = {
{0x09C4, TX_RCVDET_ST_TMR},
{0x00FB, TX_PSC_A0},
{0x04AA, TX_PSC_A2},
{0x04AA, TX_PSC_A3},
{0x000F, XCVR_DIAG_BIDI_CTRL}
};
static struct cdns_reg_pairs sl_dp_25_no_ssc_rx_ln_regs[] = {
{0x0000, RX_PSC_A0},
{0x0000, RX_PSC_A2},
{0x0000, RX_PSC_A3},
{0x0000, RX_PSC_CAL},
{0x0000, RX_REE_GCSM1_CTRL},
{0x0000, RX_REE_GCSM2_CTRL},
{0x0000, RX_REE_PERGCSM_CTRL}
};
static struct cdns_torrent_vals sl_dp_25_no_ssc_cmn_vals = {
.reg_pairs = sl_dp_25_no_ssc_cmn_regs,
.num_regs = ARRAY_SIZE(sl_dp_25_no_ssc_cmn_regs),
};
static struct cdns_torrent_vals sl_dp_25_no_ssc_tx_ln_vals = {
.reg_pairs = sl_dp_25_no_ssc_tx_ln_regs,
.num_regs = ARRAY_SIZE(sl_dp_25_no_ssc_tx_ln_regs),
};
static struct cdns_torrent_vals sl_dp_25_no_ssc_rx_ln_vals = {
.reg_pairs = sl_dp_25_no_ssc_rx_ln_regs,
.num_regs = ARRAY_SIZE(sl_dp_25_no_ssc_rx_ln_regs),
};
/* Single DP, 100 MHz Ref clk, no SSC */
static struct cdns_reg_pairs sl_dp_100_no_ssc_cmn_regs[] = {
{0x0003, CMN_PLL0_VCOCAL_TCTRL},
{0x0003, CMN_PLL1_VCOCAL_TCTRL}
};
static struct cdns_reg_pairs sl_dp_100_no_ssc_tx_ln_regs[] = {
{0x00FB, TX_PSC_A0},
{0x04AA, TX_PSC_A2},
{0x04AA, TX_PSC_A3},
{0x000F, XCVR_DIAG_BIDI_CTRL}
};
static struct cdns_reg_pairs sl_dp_100_no_ssc_rx_ln_regs[] = {
{0x0000, RX_PSC_A0},
{0x0000, RX_PSC_A2},
{0x0000, RX_PSC_A3},
{0x0000, RX_PSC_CAL},
{0x0000, RX_REE_GCSM1_CTRL},
{0x0000, RX_REE_GCSM2_CTRL},
{0x0000, RX_REE_PERGCSM_CTRL}
};
static struct cdns_torrent_vals sl_dp_100_no_ssc_cmn_vals = {
.reg_pairs = sl_dp_100_no_ssc_cmn_regs,
.num_regs = ARRAY_SIZE(sl_dp_100_no_ssc_cmn_regs),
};
static struct cdns_torrent_vals sl_dp_100_no_ssc_tx_ln_vals = {
.reg_pairs = sl_dp_100_no_ssc_tx_ln_regs,
.num_regs = ARRAY_SIZE(sl_dp_100_no_ssc_tx_ln_regs),
};
static struct cdns_torrent_vals sl_dp_100_no_ssc_rx_ln_vals = {
.reg_pairs = sl_dp_100_no_ssc_rx_ln_regs,
.num_regs = ARRAY_SIZE(sl_dp_100_no_ssc_rx_ln_regs),
};
/* USB and SGMII/QSGMII link configuration */
static struct cdns_reg_pairs usb_sgmii_link_cmn_regs[] = {
{0x0002, PHY_PLL_CFG},
{0x8600, CMN_PDIAG_PLL0_CLK_SEL_M0},
{0x0601, CMN_PDIAG_PLL1_CLK_SEL_M0}
};
static struct cdns_reg_pairs usb_sgmii_xcvr_diag_ln_regs[] = {
{0x0000, XCVR_DIAG_HSCLK_SEL},
{0x0001, XCVR_DIAG_HSCLK_DIV},
{0x0041, XCVR_DIAG_PLLDRC_CTRL}
};
static struct cdns_reg_pairs sgmii_usb_xcvr_diag_ln_regs[] = {
{0x0011, XCVR_DIAG_HSCLK_SEL},
{0x0003, XCVR_DIAG_HSCLK_DIV},
{0x009B, XCVR_DIAG_PLLDRC_CTRL}
};
static struct cdns_torrent_vals usb_sgmii_link_cmn_vals = {
.reg_pairs = usb_sgmii_link_cmn_regs,
.num_regs = ARRAY_SIZE(usb_sgmii_link_cmn_regs),
};
static struct cdns_torrent_vals usb_sgmii_xcvr_diag_ln_vals = {
.reg_pairs = usb_sgmii_xcvr_diag_ln_regs,
.num_regs = ARRAY_SIZE(usb_sgmii_xcvr_diag_ln_regs),
};
static struct cdns_torrent_vals sgmii_usb_xcvr_diag_ln_vals = {
.reg_pairs = sgmii_usb_xcvr_diag_ln_regs,
.num_regs = ARRAY_SIZE(sgmii_usb_xcvr_diag_ln_regs),
};
/* PCIe and USB Unique SSC link configuration */
static struct cdns_reg_pairs pcie_usb_link_cmn_regs[] = {
{0x0003, PHY_PLL_CFG},
{0x0601, CMN_PDIAG_PLL0_CLK_SEL_M0},
{0x0400, CMN_PDIAG_PLL0_CLK_SEL_M1},
{0x8600, CMN_PDIAG_PLL1_CLK_SEL_M0}
};
static struct cdns_reg_pairs pcie_usb_xcvr_diag_ln_regs[] = {
{0x0000, XCVR_DIAG_HSCLK_SEL},
{0x0001, XCVR_DIAG_HSCLK_DIV},
{0x0012, XCVR_DIAG_PLLDRC_CTRL}
};
static struct cdns_reg_pairs usb_pcie_xcvr_diag_ln_regs[] = {
{0x0011, XCVR_DIAG_HSCLK_SEL},
{0x0001, XCVR_DIAG_HSCLK_DIV},
{0x00C9, XCVR_DIAG_PLLDRC_CTRL}
};
static struct cdns_torrent_vals pcie_usb_link_cmn_vals = {
.reg_pairs = pcie_usb_link_cmn_regs,
.num_regs = ARRAY_SIZE(pcie_usb_link_cmn_regs),
};
static struct cdns_torrent_vals pcie_usb_xcvr_diag_ln_vals = {
.reg_pairs = pcie_usb_xcvr_diag_ln_regs,
.num_regs = ARRAY_SIZE(pcie_usb_xcvr_diag_ln_regs),
};
static struct cdns_torrent_vals usb_pcie_xcvr_diag_ln_vals = {
.reg_pairs = usb_pcie_xcvr_diag_ln_regs,
.num_regs = ARRAY_SIZE(usb_pcie_xcvr_diag_ln_regs),
};
/* USB 100 MHz Ref clk, internal SSC */
static struct cdns_reg_pairs usb_100_int_ssc_cmn_regs[] = {
{0x0004, CMN_PLL0_DSM_DIAG_M0},
{0x0004, CMN_PLL0_DSM_DIAG_M1},
{0x0004, CMN_PLL1_DSM_DIAG_M0},
{0x0509, CMN_PDIAG_PLL0_CP_PADJ_M0},
{0x0509, CMN_PDIAG_PLL0_CP_PADJ_M1},
{0x0509, CMN_PDIAG_PLL1_CP_PADJ_M0},
{0x0F00, CMN_PDIAG_PLL0_CP_IADJ_M0},
{0x0F00, CMN_PDIAG_PLL0_CP_IADJ_M1},
{0x0F00, CMN_PDIAG_PLL1_CP_IADJ_M0},
{0x0F08, CMN_PDIAG_PLL0_FILT_PADJ_M0},
{0x0F08, CMN_PDIAG_PLL0_FILT_PADJ_M1},
{0x0F08, CMN_PDIAG_PLL1_FILT_PADJ_M0},
{0x0064, CMN_PLL0_INTDIV_M0},
{0x0050, CMN_PLL0_INTDIV_M1},
{0x0064, CMN_PLL1_INTDIV_M0},
{0x0002, CMN_PLL0_FRACDIVH_M0},
{0x0002, CMN_PLL0_FRACDIVH_M1},
{0x0002, CMN_PLL1_FRACDIVH_M0},
{0x0044, CMN_PLL0_HIGH_THR_M0},
{0x0036, CMN_PLL0_HIGH_THR_M1},
{0x0044, CMN_PLL1_HIGH_THR_M0},
{0x0002, CMN_PDIAG_PLL0_CTRL_M0},
{0x0002, CMN_PDIAG_PLL0_CTRL_M1},
{0x0002, CMN_PDIAG_PLL1_CTRL_M0},
{0x0001, CMN_PLL0_SS_CTRL1_M0},
{0x0001, CMN_PLL0_SS_CTRL1_M1},
{0x0001, CMN_PLL1_SS_CTRL1_M0},
{0x011B, CMN_PLL0_SS_CTRL2_M0},
{0x011B, CMN_PLL0_SS_CTRL2_M1},
{0x011B, CMN_PLL1_SS_CTRL2_M0},
{0x006E, CMN_PLL0_SS_CTRL3_M0},
{0x0058, CMN_PLL0_SS_CTRL3_M1},
{0x006E, CMN_PLL1_SS_CTRL3_M0},
{0x000E, CMN_PLL0_SS_CTRL4_M0},
{0x0012, CMN_PLL0_SS_CTRL4_M1},
{0x000E, CMN_PLL1_SS_CTRL4_M0},
{0x0C5E, CMN_PLL0_VCOCAL_REFTIM_START},
{0x0C5E, CMN_PLL1_VCOCAL_REFTIM_START},
{0x0C56, CMN_PLL0_VCOCAL_PLLCNT_START},
{0x0C56, CMN_PLL1_VCOCAL_PLLCNT_START},
{0x00C7, CMN_PLL0_LOCK_REFCNT_START},
{0x00C7, CMN_PLL1_LOCK_REFCNT_START},
{0x00C7, CMN_PLL0_LOCK_PLLCNT_START},
{0x00C7, CMN_PLL1_LOCK_PLLCNT_START},
{0x0005, CMN_PLL0_LOCK_PLLCNT_THR},
{0x0005, CMN_PLL1_LOCK_PLLCNT_THR},
{0x8200, CMN_CDIAG_CDB_PWRI_OVRD},
{0x8200, CMN_CDIAG_XCVRC_PWRI_OVRD},
{0x007F, CMN_TXPUCAL_TUNE},
{0x007F, CMN_TXPDCAL_TUNE}
};
static struct cdns_torrent_vals usb_100_int_ssc_cmn_vals = {
.reg_pairs = usb_100_int_ssc_cmn_regs,
.num_regs = ARRAY_SIZE(usb_100_int_ssc_cmn_regs),
};
/* Single USB link configuration */
static struct cdns_reg_pairs sl_usb_link_cmn_regs[] = {
{0x0000, PHY_PLL_CFG},
{0x8600, CMN_PDIAG_PLL0_CLK_SEL_M0}
};
static struct cdns_reg_pairs sl_usb_xcvr_diag_ln_regs[] = {
{0x0000, XCVR_DIAG_HSCLK_SEL},
{0x0001, XCVR_DIAG_HSCLK_DIV},
{0x0041, XCVR_DIAG_PLLDRC_CTRL}
};
static struct cdns_torrent_vals sl_usb_link_cmn_vals = {
.reg_pairs = sl_usb_link_cmn_regs,
.num_regs = ARRAY_SIZE(sl_usb_link_cmn_regs),
};
static struct cdns_torrent_vals sl_usb_xcvr_diag_ln_vals = {
.reg_pairs = sl_usb_xcvr_diag_ln_regs,
.num_regs = ARRAY_SIZE(sl_usb_xcvr_diag_ln_regs),
};
/* USB PHY PCS common configuration */
static struct cdns_reg_pairs usb_phy_pcs_cmn_regs[] = {
{0x0A0A, PHY_PIPE_USB3_GEN2_PRE_CFG0},
{0x1000, PHY_PIPE_USB3_GEN2_POST_CFG0},
{0x0010, PHY_PIPE_USB3_GEN2_POST_CFG1}
};
static struct cdns_torrent_vals usb_phy_pcs_cmn_vals = {
.reg_pairs = usb_phy_pcs_cmn_regs,
.num_regs = ARRAY_SIZE(usb_phy_pcs_cmn_regs),
};
/* USB 100 MHz Ref clk, no SSC */
static struct cdns_reg_pairs sl_usb_100_no_ssc_cmn_regs[] = {
{0x0028, CMN_PDIAG_PLL1_CP_PADJ_M0},
{0x001E, CMN_PLL1_DSM_FBH_OVRD_M0},
{0x000C, CMN_PLL1_DSM_FBL_OVRD_M0},
{0x0003, CMN_PLL0_VCOCAL_TCTRL},
{0x0003, CMN_PLL1_VCOCAL_TCTRL},
{0x8200, CMN_CDIAG_CDB_PWRI_OVRD},
{0x8200, CMN_CDIAG_XCVRC_PWRI_OVRD}
};
static struct cdns_torrent_vals sl_usb_100_no_ssc_cmn_vals = {
.reg_pairs = sl_usb_100_no_ssc_cmn_regs,
.num_regs = ARRAY_SIZE(sl_usb_100_no_ssc_cmn_regs),
};
static struct cdns_reg_pairs usb_100_no_ssc_cmn_regs[] = {
{0x8200, CMN_CDIAG_CDB_PWRI_OVRD},
{0x8200, CMN_CDIAG_XCVRC_PWRI_OVRD},
{0x007F, CMN_TXPUCAL_TUNE},
{0x007F, CMN_TXPDCAL_TUNE}
};
static struct cdns_reg_pairs usb_100_no_ssc_tx_ln_regs[] = {
{0x02FF, TX_PSC_A0},
{0x06AF, TX_PSC_A1},
{0x06AE, TX_PSC_A2},
{0x06AE, TX_PSC_A3},
{0x2A82, TX_TXCC_CTRL},
{0x0014, TX_TXCC_CPOST_MULT_01},
{0x0003, XCVR_DIAG_PSC_OVRD}
};
static struct cdns_reg_pairs usb_100_no_ssc_rx_ln_regs[] = {
{0x0D1D, RX_PSC_A0},
{0x0D1D, RX_PSC_A1},
{0x0D00, RX_PSC_A2},
{0x0500, RX_PSC_A3},
{0x0013, RX_SIGDET_HL_FILT_TMR},
{0x0000, RX_REE_GCSM1_CTRL},
{0x0C02, RX_REE_ATTEN_THR},
{0x0330, RX_REE_SMGM_CTRL1},
{0x0300, RX_REE_SMGM_CTRL2},
{0x0019, RX_REE_TAP1_CLIP},
{0x0019, RX_REE_TAP2TON_CLIP},
{0x1004, RX_DIAG_SIGDET_TUNE},
{0x00F9, RX_DIAG_NQST_CTRL},
{0x0C01, RX_DIAG_DFE_AMP_TUNE_2},
{0x0002, RX_DIAG_DFE_AMP_TUNE_3},
{0x0000, RX_DIAG_PI_CAP},
{0x0031, RX_DIAG_PI_RATE},
{0x0001, RX_DIAG_ACYA},
{0x018C, RX_CDRLF_CNFG},
{0x0003, RX_CDRLF_CNFG3}
};
static struct cdns_torrent_vals usb_100_no_ssc_cmn_vals = {
.reg_pairs = usb_100_no_ssc_cmn_regs,
.num_regs = ARRAY_SIZE(usb_100_no_ssc_cmn_regs),
};
static struct cdns_torrent_vals usb_100_no_ssc_tx_ln_vals = {
.reg_pairs = usb_100_no_ssc_tx_ln_regs,
.num_regs = ARRAY_SIZE(usb_100_no_ssc_tx_ln_regs),
};
static struct cdns_torrent_vals usb_100_no_ssc_rx_ln_vals = {
.reg_pairs = usb_100_no_ssc_rx_ln_regs,
.num_regs = ARRAY_SIZE(usb_100_no_ssc_rx_ln_regs),
};
/* Single link USB, 100 MHz Ref clk, internal SSC */
static struct cdns_reg_pairs sl_usb_100_int_ssc_cmn_regs[] = {
{0x0004, CMN_PLL0_DSM_DIAG_M0},
{0x0004, CMN_PLL1_DSM_DIAG_M0},
{0x0509, CMN_PDIAG_PLL0_CP_PADJ_M0},
{0x0509, CMN_PDIAG_PLL1_CP_PADJ_M0},
{0x0F00, CMN_PDIAG_PLL0_CP_IADJ_M0},
{0x0F00, CMN_PDIAG_PLL1_CP_IADJ_M0},
{0x0F08, CMN_PDIAG_PLL0_FILT_PADJ_M0},
{0x0F08, CMN_PDIAG_PLL1_FILT_PADJ_M0},
{0x0064, CMN_PLL0_INTDIV_M0},
{0x0064, CMN_PLL1_INTDIV_M0},
{0x0002, CMN_PLL0_FRACDIVH_M0},
{0x0002, CMN_PLL1_FRACDIVH_M0},
{0x0044, CMN_PLL0_HIGH_THR_M0},
{0x0044, CMN_PLL1_HIGH_THR_M0},
{0x0002, CMN_PDIAG_PLL0_CTRL_M0},
{0x0002, CMN_PDIAG_PLL1_CTRL_M0},
{0x0001, CMN_PLL0_SS_CTRL1_M0},
{0x0001, CMN_PLL1_SS_CTRL1_M0},
{0x011B, CMN_PLL0_SS_CTRL2_M0},
{0x011B, CMN_PLL1_SS_CTRL2_M0},
{0x006E, CMN_PLL0_SS_CTRL3_M0},
{0x006E, CMN_PLL1_SS_CTRL3_M0},
{0x000E, CMN_PLL0_SS_CTRL4_M0},
{0x000E, CMN_PLL1_SS_CTRL4_M0},
{0x0C5E, CMN_PLL0_VCOCAL_REFTIM_START},
{0x0C5E, CMN_PLL1_VCOCAL_REFTIM_START},
{0x0C56, CMN_PLL0_VCOCAL_PLLCNT_START},
{0x0C56, CMN_PLL1_VCOCAL_PLLCNT_START},
{0x0003, CMN_PLL0_VCOCAL_TCTRL},
{0x0003, CMN_PLL1_VCOCAL_TCTRL},
{0x00C7, CMN_PLL0_LOCK_REFCNT_START},
{0x00C7, CMN_PLL1_LOCK_REFCNT_START},
{0x00C7, CMN_PLL0_LOCK_PLLCNT_START},
{0x00C7, CMN_PLL1_LOCK_PLLCNT_START},
{0x0005, CMN_PLL0_LOCK_PLLCNT_THR},
{0x0005, CMN_PLL1_LOCK_PLLCNT_THR},
{0x8200, CMN_CDIAG_CDB_PWRI_OVRD},
{0x8200, CMN_CDIAG_XCVRC_PWRI_OVRD}
};
static struct cdns_torrent_vals sl_usb_100_int_ssc_cmn_vals = {
.reg_pairs = sl_usb_100_int_ssc_cmn_regs,
.num_regs = ARRAY_SIZE(sl_usb_100_int_ssc_cmn_regs),
};
/* PCIe and SGMII/QSGMII Unique SSC link configuration */
static struct cdns_reg_pairs pcie_sgmii_link_cmn_regs[] = {
{0x0003, PHY_PLL_CFG},
{0x0601, CMN_PDIAG_PLL0_CLK_SEL_M0},
{0x0400, CMN_PDIAG_PLL0_CLK_SEL_M1},
{0x0601, CMN_PDIAG_PLL1_CLK_SEL_M0}
};
static struct cdns_reg_pairs pcie_sgmii_xcvr_diag_ln_regs[] = {
{0x0000, XCVR_DIAG_HSCLK_SEL},
{0x0001, XCVR_DIAG_HSCLK_DIV},
{0x0012, XCVR_DIAG_PLLDRC_CTRL}
};
static struct cdns_reg_pairs sgmii_pcie_xcvr_diag_ln_regs[] = {
{0x0011, XCVR_DIAG_HSCLK_SEL},
{0x0003, XCVR_DIAG_HSCLK_DIV},
{0x009B, XCVR_DIAG_PLLDRC_CTRL}
};
static struct cdns_torrent_vals pcie_sgmii_link_cmn_vals = {
.reg_pairs = pcie_sgmii_link_cmn_regs,
.num_regs = ARRAY_SIZE(pcie_sgmii_link_cmn_regs),
};
static struct cdns_torrent_vals pcie_sgmii_xcvr_diag_ln_vals = {
.reg_pairs = pcie_sgmii_xcvr_diag_ln_regs,
.num_regs = ARRAY_SIZE(pcie_sgmii_xcvr_diag_ln_regs),
};
static struct cdns_torrent_vals sgmii_pcie_xcvr_diag_ln_vals = {
.reg_pairs = sgmii_pcie_xcvr_diag_ln_regs,
.num_regs = ARRAY_SIZE(sgmii_pcie_xcvr_diag_ln_regs),
};
/* SGMII 100 MHz Ref clk, no SSC */
static struct cdns_reg_pairs sl_sgmii_100_no_ssc_cmn_regs[] = {
{0x0028, CMN_PDIAG_PLL1_CP_PADJ_M0},
{0x001E, CMN_PLL1_DSM_FBH_OVRD_M0},
{0x000C, CMN_PLL1_DSM_FBL_OVRD_M0},
{0x0003, CMN_PLL0_VCOCAL_TCTRL},
{0x0003, CMN_PLL1_VCOCAL_TCTRL}
};
static struct cdns_torrent_vals sl_sgmii_100_no_ssc_cmn_vals = {
.reg_pairs = sl_sgmii_100_no_ssc_cmn_regs,
.num_regs = ARRAY_SIZE(sl_sgmii_100_no_ssc_cmn_regs),
};
static struct cdns_reg_pairs sgmii_100_no_ssc_cmn_regs[] = {
{0x007F, CMN_TXPUCAL_TUNE},
{0x007F, CMN_TXPDCAL_TUNE}
};
static struct cdns_reg_pairs sgmii_100_no_ssc_tx_ln_regs[] = {
{0x00F3, TX_PSC_A0},
{0x04A2, TX_PSC_A2},
{0x04A2, TX_PSC_A3},
{0x0000, TX_TXCC_CPOST_MULT_00},
{0x00B3, DRV_DIAG_TX_DRV}
};
static struct cdns_reg_pairs ti_sgmii_100_no_ssc_tx_ln_regs[] = {
{0x00F3, TX_PSC_A0},
{0x04A2, TX_PSC_A2},
{0x04A2, TX_PSC_A3},
{0x0000, TX_TXCC_CPOST_MULT_00},
{0x00B3, DRV_DIAG_TX_DRV},
{0x4000, XCVR_DIAG_RXCLK_CTRL},
};
static struct cdns_reg_pairs sgmii_100_no_ssc_rx_ln_regs[] = {
{0x091D, RX_PSC_A0},
{0x0900, RX_PSC_A2},
{0x0100, RX_PSC_A3},
{0x03C7, RX_REE_GCSM1_EQENM_PH1},
{0x01C7, RX_REE_GCSM1_EQENM_PH2},
{0x0000, RX_DIAG_DFE_CTRL},
{0x0019, RX_REE_TAP1_CLIP},
{0x0019, RX_REE_TAP2TON_CLIP},
{0x0098, RX_DIAG_NQST_CTRL},
{0x0C01, RX_DIAG_DFE_AMP_TUNE_2},
{0x0000, RX_DIAG_DFE_AMP_TUNE_3},
{0x0000, RX_DIAG_PI_CAP},
{0x0010, RX_DIAG_PI_RATE},
{0x0001, RX_DIAG_ACYA},
{0x018C, RX_CDRLF_CNFG},
};
static struct cdns_torrent_vals sgmii_100_no_ssc_cmn_vals = {
.reg_pairs = sgmii_100_no_ssc_cmn_regs,
.num_regs = ARRAY_SIZE(sgmii_100_no_ssc_cmn_regs),
};
static struct cdns_torrent_vals sgmii_100_no_ssc_tx_ln_vals = {
.reg_pairs = sgmii_100_no_ssc_tx_ln_regs,
.num_regs = ARRAY_SIZE(sgmii_100_no_ssc_tx_ln_regs),
};
static struct cdns_torrent_vals ti_sgmii_100_no_ssc_tx_ln_vals = {
.reg_pairs = ti_sgmii_100_no_ssc_tx_ln_regs,
.num_regs = ARRAY_SIZE(ti_sgmii_100_no_ssc_tx_ln_regs),
};
static struct cdns_torrent_vals sgmii_100_no_ssc_rx_ln_vals = {
.reg_pairs = sgmii_100_no_ssc_rx_ln_regs,
.num_regs = ARRAY_SIZE(sgmii_100_no_ssc_rx_ln_regs),
};
/* SGMII 100 MHz Ref clk, internal SSC */
static struct cdns_reg_pairs sgmii_100_int_ssc_cmn_regs[] = {
{0x0004, CMN_PLL0_DSM_DIAG_M0},
{0x0004, CMN_PLL0_DSM_DIAG_M1},
{0x0004, CMN_PLL1_DSM_DIAG_M0},
{0x0509, CMN_PDIAG_PLL0_CP_PADJ_M0},
{0x0509, CMN_PDIAG_PLL0_CP_PADJ_M1},
{0x0509, CMN_PDIAG_PLL1_CP_PADJ_M0},
{0x0F00, CMN_PDIAG_PLL0_CP_IADJ_M0},
{0x0F00, CMN_PDIAG_PLL0_CP_IADJ_M1},
{0x0F00, CMN_PDIAG_PLL1_CP_IADJ_M0},
{0x0F08, CMN_PDIAG_PLL0_FILT_PADJ_M0},
{0x0F08, CMN_PDIAG_PLL0_FILT_PADJ_M1},
{0x0F08, CMN_PDIAG_PLL1_FILT_PADJ_M0},
{0x0064, CMN_PLL0_INTDIV_M0},
{0x0050, CMN_PLL0_INTDIV_M1},
{0x0064, CMN_PLL1_INTDIV_M0},
{0x0002, CMN_PLL0_FRACDIVH_M0},
{0x0002, CMN_PLL0_FRACDIVH_M1},
{0x0002, CMN_PLL1_FRACDIVH_M0},
{0x0044, CMN_PLL0_HIGH_THR_M0},
{0x0036, CMN_PLL0_HIGH_THR_M1},
{0x0044, CMN_PLL1_HIGH_THR_M0},
{0x0002, CMN_PDIAG_PLL0_CTRL_M0},
{0x0002, CMN_PDIAG_PLL0_CTRL_M1},
{0x0002, CMN_PDIAG_PLL1_CTRL_M0},
{0x0001, CMN_PLL0_SS_CTRL1_M0},
{0x0001, CMN_PLL0_SS_CTRL1_M1},
{0x0001, CMN_PLL1_SS_CTRL1_M0},
{0x011B, CMN_PLL0_SS_CTRL2_M0},
{0x011B, CMN_PLL0_SS_CTRL2_M1},
{0x011B, CMN_PLL1_SS_CTRL2_M0},
{0x006E, CMN_PLL0_SS_CTRL3_M0},
{0x0058, CMN_PLL0_SS_CTRL3_M1},
{0x006E, CMN_PLL1_SS_CTRL3_M0},
{0x000E, CMN_PLL0_SS_CTRL4_M0},
{0x0012, CMN_PLL0_SS_CTRL4_M1},
{0x000E, CMN_PLL1_SS_CTRL4_M0},
{0x0C5E, CMN_PLL0_VCOCAL_REFTIM_START},
{0x0C5E, CMN_PLL1_VCOCAL_REFTIM_START},
{0x0C56, CMN_PLL0_VCOCAL_PLLCNT_START},
{0x0C56, CMN_PLL1_VCOCAL_PLLCNT_START},
{0x00C7, CMN_PLL0_LOCK_REFCNT_START},
{0x00C7, CMN_PLL1_LOCK_REFCNT_START},
{0x00C7, CMN_PLL0_LOCK_PLLCNT_START},
{0x00C7, CMN_PLL1_LOCK_PLLCNT_START},
{0x0005, CMN_PLL0_LOCK_PLLCNT_THR},
{0x0005, CMN_PLL1_LOCK_PLLCNT_THR},
{0x007F, CMN_TXPUCAL_TUNE},
{0x007F, CMN_TXPDCAL_TUNE}
};
static struct cdns_torrent_vals sgmii_100_int_ssc_cmn_vals = {
.reg_pairs = sgmii_100_int_ssc_cmn_regs,
.num_regs = ARRAY_SIZE(sgmii_100_int_ssc_cmn_regs),
};
/* QSGMII 100 MHz Ref clk, no SSC */
static struct cdns_reg_pairs sl_qsgmii_100_no_ssc_cmn_regs[] = {
{0x0028, CMN_PDIAG_PLL1_CP_PADJ_M0},
{0x001E, CMN_PLL1_DSM_FBH_OVRD_M0},
{0x000C, CMN_PLL1_DSM_FBL_OVRD_M0},
{0x0003, CMN_PLL0_VCOCAL_TCTRL},
{0x0003, CMN_PLL1_VCOCAL_TCTRL}
};
static struct cdns_torrent_vals sl_qsgmii_100_no_ssc_cmn_vals = {
.reg_pairs = sl_qsgmii_100_no_ssc_cmn_regs,
.num_regs = ARRAY_SIZE(sl_qsgmii_100_no_ssc_cmn_regs),
};
static struct cdns_reg_pairs qsgmii_100_no_ssc_cmn_regs[] = {
{0x007F, CMN_TXPUCAL_TUNE},
{0x007F, CMN_TXPDCAL_TUNE}
};
static struct cdns_reg_pairs qsgmii_100_no_ssc_tx_ln_regs[] = {
{0x00F3, TX_PSC_A0},
{0x04A2, TX_PSC_A2},
{0x04A2, TX_PSC_A3},
{0x0000, TX_TXCC_CPOST_MULT_00},
{0x0011, TX_TXCC_MGNFS_MULT_100},
{0x0003, DRV_DIAG_TX_DRV}
};
static struct cdns_reg_pairs ti_qsgmii_100_no_ssc_tx_ln_regs[] = {
{0x00F3, TX_PSC_A0},
{0x04A2, TX_PSC_A2},
{0x04A2, TX_PSC_A3},
{0x0000, TX_TXCC_CPOST_MULT_00},
{0x0011, TX_TXCC_MGNFS_MULT_100},
{0x0003, DRV_DIAG_TX_DRV},
{0x4000, XCVR_DIAG_RXCLK_CTRL},
};
static struct cdns_reg_pairs qsgmii_100_no_ssc_rx_ln_regs[] = {
{0x091D, RX_PSC_A0},
{0x0900, RX_PSC_A2},
{0x0100, RX_PSC_A3},
{0x03C7, RX_REE_GCSM1_EQENM_PH1},
{0x01C7, RX_REE_GCSM1_EQENM_PH2},
{0x0000, RX_DIAG_DFE_CTRL},
{0x0019, RX_REE_TAP1_CLIP},
{0x0019, RX_REE_TAP2TON_CLIP},
{0x0098, RX_DIAG_NQST_CTRL},
{0x0C01, RX_DIAG_DFE_AMP_TUNE_2},
{0x0000, RX_DIAG_DFE_AMP_TUNE_3},
{0x0000, RX_DIAG_PI_CAP},
{0x0010, RX_DIAG_PI_RATE},
{0x0001, RX_DIAG_ACYA},
{0x018C, RX_CDRLF_CNFG},
};
static struct cdns_torrent_vals qsgmii_100_no_ssc_cmn_vals = {
.reg_pairs = qsgmii_100_no_ssc_cmn_regs,
.num_regs = ARRAY_SIZE(qsgmii_100_no_ssc_cmn_regs),
};
static struct cdns_torrent_vals qsgmii_100_no_ssc_tx_ln_vals = {
.reg_pairs = qsgmii_100_no_ssc_tx_ln_regs,
.num_regs = ARRAY_SIZE(qsgmii_100_no_ssc_tx_ln_regs),
};
static struct cdns_torrent_vals ti_qsgmii_100_no_ssc_tx_ln_vals = {
.reg_pairs = ti_qsgmii_100_no_ssc_tx_ln_regs,
.num_regs = ARRAY_SIZE(ti_qsgmii_100_no_ssc_tx_ln_regs),
};
static struct cdns_torrent_vals qsgmii_100_no_ssc_rx_ln_vals = {
.reg_pairs = qsgmii_100_no_ssc_rx_ln_regs,
.num_regs = ARRAY_SIZE(qsgmii_100_no_ssc_rx_ln_regs),
};
/* QSGMII 100 MHz Ref clk, internal SSC */
static struct cdns_reg_pairs qsgmii_100_int_ssc_cmn_regs[] = {
{0x0004, CMN_PLL0_DSM_DIAG_M0},
{0x0004, CMN_PLL0_DSM_DIAG_M1},
{0x0004, CMN_PLL1_DSM_DIAG_M0},
{0x0509, CMN_PDIAG_PLL0_CP_PADJ_M0},
{0x0509, CMN_PDIAG_PLL0_CP_PADJ_M1},
{0x0509, CMN_PDIAG_PLL1_CP_PADJ_M0},
{0x0F00, CMN_PDIAG_PLL0_CP_IADJ_M0},
{0x0F00, CMN_PDIAG_PLL0_CP_IADJ_M1},
{0x0F00, CMN_PDIAG_PLL1_CP_IADJ_M0},
{0x0F08, CMN_PDIAG_PLL0_FILT_PADJ_M0},
{0x0F08, CMN_PDIAG_PLL0_FILT_PADJ_M1},
{0x0F08, CMN_PDIAG_PLL1_FILT_PADJ_M0},
{0x0064, CMN_PLL0_INTDIV_M0},
{0x0050, CMN_PLL0_INTDIV_M1},
{0x0064, CMN_PLL1_INTDIV_M0},
{0x0002, CMN_PLL0_FRACDIVH_M0},
{0x0002, CMN_PLL0_FRACDIVH_M1},
{0x0002, CMN_PLL1_FRACDIVH_M0},
{0x0044, CMN_PLL0_HIGH_THR_M0},
{0x0036, CMN_PLL0_HIGH_THR_M1},
{0x0044, CMN_PLL1_HIGH_THR_M0},
{0x0002, CMN_PDIAG_PLL0_CTRL_M0},
{0x0002, CMN_PDIAG_PLL0_CTRL_M1},
{0x0002, CMN_PDIAG_PLL1_CTRL_M0},
{0x0001, CMN_PLL0_SS_CTRL1_M0},
{0x0001, CMN_PLL0_SS_CTRL1_M1},
{0x0001, CMN_PLL1_SS_CTRL1_M0},
{0x011B, CMN_PLL0_SS_CTRL2_M0},
{0x011B, CMN_PLL0_SS_CTRL2_M1},
{0x011B, CMN_PLL1_SS_CTRL2_M0},
{0x006E, CMN_PLL0_SS_CTRL3_M0},
{0x0058, CMN_PLL0_SS_CTRL3_M1},
{0x006E, CMN_PLL1_SS_CTRL3_M0},
{0x000E, CMN_PLL0_SS_CTRL4_M0},
{0x0012, CMN_PLL0_SS_CTRL4_M1},
{0x000E, CMN_PLL1_SS_CTRL4_M0},
{0x0C5E, CMN_PLL0_VCOCAL_REFTIM_START},
{0x0C5E, CMN_PLL1_VCOCAL_REFTIM_START},
{0x0C56, CMN_PLL0_VCOCAL_PLLCNT_START},
{0x0C56, CMN_PLL1_VCOCAL_PLLCNT_START},
{0x00C7, CMN_PLL0_LOCK_REFCNT_START},
{0x00C7, CMN_PLL1_LOCK_REFCNT_START},
{0x00C7, CMN_PLL0_LOCK_PLLCNT_START},
{0x00C7, CMN_PLL1_LOCK_PLLCNT_START},
{0x0005, CMN_PLL0_LOCK_PLLCNT_THR},
{0x0005, CMN_PLL1_LOCK_PLLCNT_THR},
{0x007F, CMN_TXPUCAL_TUNE},
{0x007F, CMN_TXPDCAL_TUNE}
};
static struct cdns_torrent_vals qsgmii_100_int_ssc_cmn_vals = {
.reg_pairs = qsgmii_100_int_ssc_cmn_regs,
.num_regs = ARRAY_SIZE(qsgmii_100_int_ssc_cmn_regs),
};
/* Single SGMII/QSGMII link configuration */
static struct cdns_reg_pairs sl_sgmii_link_cmn_regs[] = {
{0x0000, PHY_PLL_CFG},
{0x0601, CMN_PDIAG_PLL0_CLK_SEL_M0}
};
static struct cdns_reg_pairs sl_sgmii_xcvr_diag_ln_regs[] = {
{0x0000, XCVR_DIAG_HSCLK_SEL},
{0x0003, XCVR_DIAG_HSCLK_DIV},
{0x0013, XCVR_DIAG_PLLDRC_CTRL}
};
static struct cdns_torrent_vals sl_sgmii_link_cmn_vals = {
.reg_pairs = sl_sgmii_link_cmn_regs,
.num_regs = ARRAY_SIZE(sl_sgmii_link_cmn_regs),
};
static struct cdns_torrent_vals sl_sgmii_xcvr_diag_ln_vals = {
.reg_pairs = sl_sgmii_xcvr_diag_ln_regs,
.num_regs = ARRAY_SIZE(sl_sgmii_xcvr_diag_ln_regs),
};
/* Multi link PCIe, 100 MHz Ref clk, internal SSC */
static struct cdns_reg_pairs pcie_100_int_ssc_cmn_regs[] = {
{0x0004, CMN_PLL0_DSM_DIAG_M0},
{0x0004, CMN_PLL0_DSM_DIAG_M1},
{0x0004, CMN_PLL1_DSM_DIAG_M0},
{0x0509, CMN_PDIAG_PLL0_CP_PADJ_M0},
{0x0509, CMN_PDIAG_PLL0_CP_PADJ_M1},
{0x0509, CMN_PDIAG_PLL1_CP_PADJ_M0},
{0x0F00, CMN_PDIAG_PLL0_CP_IADJ_M0},
{0x0F00, CMN_PDIAG_PLL0_CP_IADJ_M1},
{0x0F00, CMN_PDIAG_PLL1_CP_IADJ_M0},
{0x0F08, CMN_PDIAG_PLL0_FILT_PADJ_M0},
{0x0F08, CMN_PDIAG_PLL0_FILT_PADJ_M1},
{0x0F08, CMN_PDIAG_PLL1_FILT_PADJ_M0},
{0x0064, CMN_PLL0_INTDIV_M0},
{0x0050, CMN_PLL0_INTDIV_M1},
{0x0064, CMN_PLL1_INTDIV_M0},
{0x0002, CMN_PLL0_FRACDIVH_M0},
{0x0002, CMN_PLL0_FRACDIVH_M1},
{0x0002, CMN_PLL1_FRACDIVH_M0},
{0x0044, CMN_PLL0_HIGH_THR_M0},
{0x0036, CMN_PLL0_HIGH_THR_M1},
{0x0044, CMN_PLL1_HIGH_THR_M0},
{0x0002, CMN_PDIAG_PLL0_CTRL_M0},
{0x0002, CMN_PDIAG_PLL0_CTRL_M1},
{0x0002, CMN_PDIAG_PLL1_CTRL_M0},
{0x0001, CMN_PLL0_SS_CTRL1_M0},
{0x0001, CMN_PLL0_SS_CTRL1_M1},
{0x0001, CMN_PLL1_SS_CTRL1_M0},
{0x011B, CMN_PLL0_SS_CTRL2_M0},
{0x011B, CMN_PLL0_SS_CTRL2_M1},
{0x011B, CMN_PLL1_SS_CTRL2_M0},
{0x006E, CMN_PLL0_SS_CTRL3_M0},
{0x0058, CMN_PLL0_SS_CTRL3_M1},
{0x006E, CMN_PLL1_SS_CTRL3_M0},
{0x000E, CMN_PLL0_SS_CTRL4_M0},
{0x0012, CMN_PLL0_SS_CTRL4_M1},
{0x000E, CMN_PLL1_SS_CTRL4_M0},
{0x0C5E, CMN_PLL0_VCOCAL_REFTIM_START},
{0x0C5E, CMN_PLL1_VCOCAL_REFTIM_START},
{0x0C56, CMN_PLL0_VCOCAL_PLLCNT_START},
{0x0C56, CMN_PLL1_VCOCAL_PLLCNT_START},
{0x00C7, CMN_PLL0_LOCK_REFCNT_START},
{0x00C7, CMN_PLL1_LOCK_REFCNT_START},
{0x00C7, CMN_PLL0_LOCK_PLLCNT_START},
{0x00C7, CMN_PLL1_LOCK_PLLCNT_START},
{0x0005, CMN_PLL0_LOCK_PLLCNT_THR},
{0x0005, CMN_PLL1_LOCK_PLLCNT_THR}
};
static struct cdns_torrent_vals pcie_100_int_ssc_cmn_vals = {
.reg_pairs = pcie_100_int_ssc_cmn_regs,
.num_regs = ARRAY_SIZE(pcie_100_int_ssc_cmn_regs),
};
/* Single link PCIe, 100 MHz Ref clk, internal SSC */
static struct cdns_reg_pairs sl_pcie_100_int_ssc_cmn_regs[] = {
{0x0004, CMN_PLL0_DSM_DIAG_M0},
{0x0004, CMN_PLL0_DSM_DIAG_M1},
{0x0004, CMN_PLL1_DSM_DIAG_M0},
{0x0509, CMN_PDIAG_PLL0_CP_PADJ_M0},
{0x0509, CMN_PDIAG_PLL0_CP_PADJ_M1},
{0x0509, CMN_PDIAG_PLL1_CP_PADJ_M0},
{0x0F00, CMN_PDIAG_PLL0_CP_IADJ_M0},
{0x0F00, CMN_PDIAG_PLL0_CP_IADJ_M1},
{0x0F00, CMN_PDIAG_PLL1_CP_IADJ_M0},
{0x0F08, CMN_PDIAG_PLL0_FILT_PADJ_M0},
{0x0F08, CMN_PDIAG_PLL0_FILT_PADJ_M1},
{0x0F08, CMN_PDIAG_PLL1_FILT_PADJ_M0},
{0x0064, CMN_PLL0_INTDIV_M0},
{0x0050, CMN_PLL0_INTDIV_M1},
{0x0050, CMN_PLL1_INTDIV_M0},
{0x0002, CMN_PLL0_FRACDIVH_M0},
{0x0002, CMN_PLL0_FRACDIVH_M1},
{0x0002, CMN_PLL1_FRACDIVH_M0},
{0x0044, CMN_PLL0_HIGH_THR_M0},
{0x0036, CMN_PLL0_HIGH_THR_M1},
{0x0036, CMN_PLL1_HIGH_THR_M0},
{0x0002, CMN_PDIAG_PLL0_CTRL_M0},
{0x0002, CMN_PDIAG_PLL0_CTRL_M1},
{0x0002, CMN_PDIAG_PLL1_CTRL_M0},
{0x0001, CMN_PLL0_SS_CTRL1_M0},
{0x0001, CMN_PLL0_SS_CTRL1_M1},
{0x0001, CMN_PLL1_SS_CTRL1_M0},
{0x011B, CMN_PLL0_SS_CTRL2_M0},
{0x011B, CMN_PLL0_SS_CTRL2_M1},
{0x011B, CMN_PLL1_SS_CTRL2_M0},
{0x006E, CMN_PLL0_SS_CTRL3_M0},
{0x0058, CMN_PLL0_SS_CTRL3_M1},
{0x0058, CMN_PLL1_SS_CTRL3_M0},
{0x000E, CMN_PLL0_SS_CTRL4_M0},
{0x0012, CMN_PLL0_SS_CTRL4_M1},
{0x0012, CMN_PLL1_SS_CTRL4_M0},
{0x0C5E, CMN_PLL0_VCOCAL_REFTIM_START},
{0x0C5E, CMN_PLL1_VCOCAL_REFTIM_START},
{0x0C56, CMN_PLL0_VCOCAL_PLLCNT_START},
{0x0C56, CMN_PLL1_VCOCAL_PLLCNT_START},
{0x00C7, CMN_PLL0_LOCK_REFCNT_START},
{0x00C7, CMN_PLL1_LOCK_REFCNT_START},
{0x00C7, CMN_PLL0_LOCK_PLLCNT_START},
{0x00C7, CMN_PLL1_LOCK_PLLCNT_START},
{0x0005, CMN_PLL0_LOCK_PLLCNT_THR},
{0x0005, CMN_PLL1_LOCK_PLLCNT_THR}
};
static struct cdns_torrent_vals sl_pcie_100_int_ssc_cmn_vals = {
.reg_pairs = sl_pcie_100_int_ssc_cmn_regs,
.num_regs = ARRAY_SIZE(sl_pcie_100_int_ssc_cmn_regs),
};
/* PCIe, 100 MHz Ref clk, no SSC & external SSC */
static struct cdns_reg_pairs pcie_100_ext_no_ssc_cmn_regs[] = {
{0x0028, CMN_PDIAG_PLL1_CP_PADJ_M0},
{0x001E, CMN_PLL1_DSM_FBH_OVRD_M0},
{0x000C, CMN_PLL1_DSM_FBL_OVRD_M0}
};
static struct cdns_reg_pairs pcie_100_ext_no_ssc_rx_ln_regs[] = {
{0x0019, RX_REE_TAP1_CLIP},
{0x0019, RX_REE_TAP2TON_CLIP},
{0x0001, RX_DIAG_ACYA}
};
static struct cdns_torrent_vals pcie_100_no_ssc_cmn_vals = {
.reg_pairs = pcie_100_ext_no_ssc_cmn_regs,
.num_regs = ARRAY_SIZE(pcie_100_ext_no_ssc_cmn_regs),
};
static struct cdns_torrent_vals pcie_100_no_ssc_rx_ln_vals = {
.reg_pairs = pcie_100_ext_no_ssc_rx_ln_regs,
.num_regs = ARRAY_SIZE(pcie_100_ext_no_ssc_rx_ln_regs),
};
static struct cdns_torrent_vals_entry link_cmn_vals_entries[] = {
{CDNS_TORRENT_KEY_ANYCLK(TYPE_DP, TYPE_NONE), &sl_dp_link_cmn_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_DP, TYPE_PCIE), &pcie_dp_link_cmn_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_DP, TYPE_USB), &usb_dp_link_cmn_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_PCIE, TYPE_NONE), NULL},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_PCIE, TYPE_SGMII), &pcie_sgmii_link_cmn_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_PCIE, TYPE_QSGMII), &pcie_sgmii_link_cmn_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_PCIE, TYPE_USB), &pcie_usb_link_cmn_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_PCIE, TYPE_DP), &pcie_dp_link_cmn_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_SGMII, TYPE_NONE), &sl_sgmii_link_cmn_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_SGMII, TYPE_PCIE), &pcie_sgmii_link_cmn_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_SGMII, TYPE_USB), &usb_sgmii_link_cmn_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_QSGMII, TYPE_NONE), &sl_sgmii_link_cmn_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_QSGMII, TYPE_PCIE), &pcie_sgmii_link_cmn_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_QSGMII, TYPE_USB), &usb_sgmii_link_cmn_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_NONE), &sl_usb_link_cmn_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_PCIE), &pcie_usb_link_cmn_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_SGMII), &usb_sgmii_link_cmn_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_QSGMII), &usb_sgmii_link_cmn_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_DP), &usb_dp_link_cmn_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_USXGMII, TYPE_NONE), &sl_usxgmii_link_cmn_vals},
};
static struct cdns_torrent_vals_entry xcvr_diag_vals_entries[] = {
{CDNS_TORRENT_KEY_ANYCLK(TYPE_DP, TYPE_NONE), &sl_dp_xcvr_diag_ln_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_DP, TYPE_PCIE), &dp_pcie_xcvr_diag_ln_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_DP, TYPE_USB), &dp_usb_xcvr_diag_ln_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_PCIE, TYPE_NONE), NULL},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_PCIE, TYPE_SGMII), &pcie_sgmii_xcvr_diag_ln_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_PCIE, TYPE_QSGMII), &pcie_sgmii_xcvr_diag_ln_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_PCIE, TYPE_USB), &pcie_usb_xcvr_diag_ln_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_PCIE, TYPE_DP), &pcie_dp_xcvr_diag_ln_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_SGMII, TYPE_NONE), &sl_sgmii_xcvr_diag_ln_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_SGMII, TYPE_PCIE), &sgmii_pcie_xcvr_diag_ln_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_SGMII, TYPE_USB), &sgmii_usb_xcvr_diag_ln_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_QSGMII, TYPE_NONE), &sl_sgmii_xcvr_diag_ln_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_QSGMII, TYPE_PCIE), &sgmii_pcie_xcvr_diag_ln_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_QSGMII, TYPE_USB), &sgmii_usb_xcvr_diag_ln_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_NONE), &sl_usb_xcvr_diag_ln_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_PCIE), &usb_pcie_xcvr_diag_ln_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_SGMII), &usb_sgmii_xcvr_diag_ln_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_QSGMII), &usb_sgmii_xcvr_diag_ln_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_DP), &usb_dp_xcvr_diag_ln_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_USXGMII, TYPE_NONE), &sl_usxgmii_xcvr_diag_ln_vals},
};
static struct cdns_torrent_vals_entry pcs_cmn_vals_entries[] = {
{CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_NONE), &usb_phy_pcs_cmn_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_PCIE), &usb_phy_pcs_cmn_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_SGMII), &usb_phy_pcs_cmn_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_QSGMII), &usb_phy_pcs_cmn_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_DP), &usb_phy_pcs_cmn_vals},
};
static struct cdns_torrent_vals_entry cmn_vals_entries[] = {
{CDNS_TORRENT_KEY(CLK_19_2_MHZ, CLK_19_2_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_19_2_no_ssc_cmn_vals},
{CDNS_TORRENT_KEY(CLK_25_MHZ, CLK_25_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_25_no_ssc_cmn_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_100_no_ssc_cmn_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_DP, TYPE_PCIE, NO_SSC), &dp_100_no_ssc_cmn_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_DP, TYPE_USB, NO_SSC), &sl_dp_100_no_ssc_cmn_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_NONE, NO_SSC), NULL},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_NONE, EXTERNAL_SSC), NULL},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_NONE, INTERNAL_SSC), &sl_pcie_100_int_ssc_cmn_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_SGMII, NO_SSC), &pcie_100_no_ssc_cmn_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_SGMII, EXTERNAL_SSC), &pcie_100_no_ssc_cmn_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_SGMII, INTERNAL_SSC), &pcie_100_int_ssc_cmn_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_QSGMII, NO_SSC), &pcie_100_no_ssc_cmn_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_QSGMII, EXTERNAL_SSC), &pcie_100_no_ssc_cmn_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_QSGMII, INTERNAL_SSC), &pcie_100_int_ssc_cmn_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_USB, NO_SSC), &pcie_100_no_ssc_cmn_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_USB, EXTERNAL_SSC), &pcie_100_no_ssc_cmn_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_USB, INTERNAL_SSC), &pcie_100_int_ssc_cmn_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_DP, NO_SSC), NULL},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_NONE, NO_SSC), &sl_sgmii_100_no_ssc_cmn_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, NO_SSC), &sgmii_100_no_ssc_cmn_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, EXTERNAL_SSC), &sgmii_100_no_ssc_cmn_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, INTERNAL_SSC), &sgmii_100_int_ssc_cmn_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, NO_SSC), &sgmii_100_no_ssc_cmn_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, EXTERNAL_SSC), &sgmii_100_no_ssc_cmn_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, INTERNAL_SSC), &sgmii_100_no_ssc_cmn_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_NONE, NO_SSC), &sl_qsgmii_100_no_ssc_cmn_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, NO_SSC), &qsgmii_100_no_ssc_cmn_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, EXTERNAL_SSC), &qsgmii_100_no_ssc_cmn_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, INTERNAL_SSC), &qsgmii_100_int_ssc_cmn_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, NO_SSC), &qsgmii_100_no_ssc_cmn_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, EXTERNAL_SSC), &qsgmii_100_no_ssc_cmn_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, INTERNAL_SSC), &qsgmii_100_no_ssc_cmn_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_NONE, NO_SSC), &sl_usb_100_no_ssc_cmn_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_NONE, EXTERNAL_SSC), &sl_usb_100_no_ssc_cmn_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_NONE, INTERNAL_SSC), &sl_usb_100_int_ssc_cmn_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE, NO_SSC), &usb_100_no_ssc_cmn_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE, EXTERNAL_SSC), &usb_100_no_ssc_cmn_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE, INTERNAL_SSC), &usb_100_int_ssc_cmn_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_SGMII, NO_SSC), &sl_usb_100_no_ssc_cmn_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_SGMII, EXTERNAL_SSC), &sl_usb_100_no_ssc_cmn_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_SGMII, INTERNAL_SSC), &sl_usb_100_int_ssc_cmn_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_QSGMII, NO_SSC), &sl_usb_100_no_ssc_cmn_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_QSGMII, EXTERNAL_SSC), &sl_usb_100_no_ssc_cmn_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_QSGMII, INTERNAL_SSC), &sl_usb_100_int_ssc_cmn_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_DP, NO_SSC), &usb_100_no_ssc_cmn_vals},
{CDNS_TORRENT_KEY(CLK_156_25_MHZ, CLK_156_25_MHZ, TYPE_USXGMII, TYPE_NONE, NO_SSC), &sl_usxgmii_156_25_no_ssc_cmn_vals},
};
static struct cdns_torrent_vals_entry cdns_tx_ln_vals_entries[] = {
{CDNS_TORRENT_KEY(CLK_19_2_MHZ, CLK_19_2_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_19_2_no_ssc_tx_ln_vals},
{CDNS_TORRENT_KEY(CLK_25_MHZ, CLK_25_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_25_no_ssc_tx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_100_no_ssc_tx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_DP, TYPE_PCIE, NO_SSC), &dp_100_no_ssc_tx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_DP, TYPE_USB, NO_SSC), &dp_100_no_ssc_tx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_NONE, NO_SSC), NULL},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_NONE, EXTERNAL_SSC), NULL},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_NONE, INTERNAL_SSC), NULL},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_SGMII, NO_SSC), NULL},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_SGMII, EXTERNAL_SSC), NULL},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_SGMII, INTERNAL_SSC), NULL},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_QSGMII, NO_SSC), NULL},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_QSGMII, EXTERNAL_SSC), NULL},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_QSGMII, INTERNAL_SSC), NULL},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_USB, NO_SSC), NULL},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_USB, EXTERNAL_SSC), NULL},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_USB, INTERNAL_SSC), NULL},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_DP, NO_SSC), NULL},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_NONE, NO_SSC), &sgmii_100_no_ssc_tx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, NO_SSC), &sgmii_100_no_ssc_tx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, EXTERNAL_SSC), &sgmii_100_no_ssc_tx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, INTERNAL_SSC), &sgmii_100_no_ssc_tx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, NO_SSC), &sgmii_100_no_ssc_tx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, EXTERNAL_SSC), &sgmii_100_no_ssc_tx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, INTERNAL_SSC), &sgmii_100_no_ssc_tx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_NONE, NO_SSC), &qsgmii_100_no_ssc_tx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, NO_SSC), &qsgmii_100_no_ssc_tx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, EXTERNAL_SSC), &qsgmii_100_no_ssc_tx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, INTERNAL_SSC), &qsgmii_100_no_ssc_tx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, NO_SSC), &qsgmii_100_no_ssc_tx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, EXTERNAL_SSC), &qsgmii_100_no_ssc_tx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, INTERNAL_SSC), &qsgmii_100_no_ssc_tx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_NONE, NO_SSC), &usb_100_no_ssc_tx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_NONE, EXTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_NONE, INTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE, NO_SSC), &usb_100_no_ssc_tx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE, EXTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE, INTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_SGMII, NO_SSC), &usb_100_no_ssc_tx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_SGMII, EXTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_SGMII, INTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_QSGMII, NO_SSC), &usb_100_no_ssc_tx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_QSGMII, EXTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_QSGMII, INTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_DP, NO_SSC), &usb_100_no_ssc_tx_ln_vals},
{CDNS_TORRENT_KEY(CLK_156_25_MHZ, CLK_156_25_MHZ, TYPE_USXGMII, TYPE_NONE, NO_SSC), &usxgmii_156_25_no_ssc_tx_ln_vals},
};
static struct cdns_torrent_vals_entry cdns_rx_ln_vals_entries[] = {
{CDNS_TORRENT_KEY(CLK_19_2_MHZ, CLK_19_2_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_19_2_no_ssc_rx_ln_vals},
{CDNS_TORRENT_KEY(CLK_25_MHZ, CLK_25_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_25_no_ssc_rx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_100_no_ssc_rx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_DP, TYPE_PCIE, NO_SSC), &dp_100_no_ssc_rx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_DP, TYPE_USB, NO_SSC), &dp_100_no_ssc_rx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_NONE, NO_SSC), &pcie_100_no_ssc_rx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_NONE, EXTERNAL_SSC), &pcie_100_no_ssc_rx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_NONE, INTERNAL_SSC), &pcie_100_no_ssc_rx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_SGMII, NO_SSC), &pcie_100_no_ssc_rx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_SGMII, EXTERNAL_SSC), &pcie_100_no_ssc_rx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_SGMII, INTERNAL_SSC), &pcie_100_no_ssc_rx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_QSGMII, NO_SSC), &pcie_100_no_ssc_rx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_QSGMII, EXTERNAL_SSC), &pcie_100_no_ssc_rx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_QSGMII, INTERNAL_SSC), &pcie_100_no_ssc_rx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_USB, NO_SSC), &pcie_100_no_ssc_rx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_USB, EXTERNAL_SSC), &pcie_100_no_ssc_rx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_USB, INTERNAL_SSC), &pcie_100_no_ssc_rx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_DP, NO_SSC), &pcie_100_no_ssc_rx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_NONE, NO_SSC), &sgmii_100_no_ssc_rx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, NO_SSC), &sgmii_100_no_ssc_rx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, EXTERNAL_SSC), &sgmii_100_no_ssc_rx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, INTERNAL_SSC), &sgmii_100_no_ssc_rx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, NO_SSC), &sgmii_100_no_ssc_rx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, EXTERNAL_SSC), &sgmii_100_no_ssc_rx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, INTERNAL_SSC), &sgmii_100_no_ssc_rx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_NONE, NO_SSC), &qsgmii_100_no_ssc_rx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, NO_SSC), &qsgmii_100_no_ssc_rx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, EXTERNAL_SSC), &qsgmii_100_no_ssc_rx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, INTERNAL_SSC), &qsgmii_100_no_ssc_rx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, NO_SSC), &qsgmii_100_no_ssc_rx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, EXTERNAL_SSC), &qsgmii_100_no_ssc_rx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, INTERNAL_SSC), &qsgmii_100_no_ssc_rx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_NONE, NO_SSC), &usb_100_no_ssc_rx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_NONE, EXTERNAL_SSC), &usb_100_no_ssc_rx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_NONE, INTERNAL_SSC), &usb_100_no_ssc_rx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE, NO_SSC), &usb_100_no_ssc_rx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE, EXTERNAL_SSC), &usb_100_no_ssc_rx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE, INTERNAL_SSC), &usb_100_no_ssc_rx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_SGMII, NO_SSC), &usb_100_no_ssc_rx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_SGMII, EXTERNAL_SSC), &usb_100_no_ssc_rx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_SGMII, INTERNAL_SSC), &usb_100_no_ssc_rx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_QSGMII, NO_SSC), &usb_100_no_ssc_rx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_QSGMII, EXTERNAL_SSC), &usb_100_no_ssc_rx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_QSGMII, INTERNAL_SSC), &usb_100_no_ssc_rx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_DP, NO_SSC), &usb_100_no_ssc_rx_ln_vals},
{CDNS_TORRENT_KEY(CLK_156_25_MHZ, CLK_156_25_MHZ, TYPE_USXGMII, TYPE_NONE, NO_SSC), &usxgmii_156_25_no_ssc_rx_ln_vals},
};
static const struct cdns_torrent_data cdns_map_torrent = {
.block_offset_shift = 0x2,
.reg_offset_shift = 0x2,
.link_cmn_vals_tbl = {
.entries = link_cmn_vals_entries,
.num_entries = ARRAY_SIZE(link_cmn_vals_entries),
},
.xcvr_diag_vals_tbl = {
.entries = xcvr_diag_vals_entries,
.num_entries = ARRAY_SIZE(xcvr_diag_vals_entries),
},
.pcs_cmn_vals_tbl = {
.entries = pcs_cmn_vals_entries,
.num_entries = ARRAY_SIZE(pcs_cmn_vals_entries),
},
.cmn_vals_tbl = {
.entries = cmn_vals_entries,
.num_entries = ARRAY_SIZE(cmn_vals_entries),
},
.tx_ln_vals_tbl = {
.entries = cdns_tx_ln_vals_entries,
.num_entries = ARRAY_SIZE(cdns_tx_ln_vals_entries),
},
.rx_ln_vals_tbl = {
.entries = cdns_rx_ln_vals_entries,
.num_entries = ARRAY_SIZE(cdns_rx_ln_vals_entries),
},
};
static struct cdns_torrent_vals_entry j721e_phy_pma_cmn_vals_entries[] = {
{CDNS_TORRENT_KEY_ANYCLK(TYPE_USXGMII, TYPE_NONE), &ti_usxgmii_phy_pma_cmn_vals},
};
static struct cdns_torrent_vals_entry ti_tx_ln_vals_entries[] = {
{CDNS_TORRENT_KEY(CLK_19_2_MHZ, CLK_19_2_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_19_2_no_ssc_tx_ln_vals},
{CDNS_TORRENT_KEY(CLK_25_MHZ, CLK_25_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_25_no_ssc_tx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_100_no_ssc_tx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_DP, TYPE_PCIE, NO_SSC), &dp_100_no_ssc_tx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_DP, TYPE_USB, NO_SSC), &dp_100_no_ssc_tx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_NONE, NO_SSC), NULL},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_NONE, EXTERNAL_SSC), NULL},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_NONE, INTERNAL_SSC), NULL},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_SGMII, NO_SSC), NULL},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_SGMII, EXTERNAL_SSC), NULL},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_SGMII, INTERNAL_SSC), NULL},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_QSGMII, NO_SSC), NULL},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_QSGMII, EXTERNAL_SSC), NULL},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_QSGMII, INTERNAL_SSC), NULL},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_USB, NO_SSC), NULL},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_USB, EXTERNAL_SSC), NULL},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_USB, INTERNAL_SSC), NULL},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_DP, NO_SSC), NULL},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_NONE, NO_SSC), &ti_sgmii_100_no_ssc_tx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, NO_SSC), &ti_sgmii_100_no_ssc_tx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, EXTERNAL_SSC), &ti_sgmii_100_no_ssc_tx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, INTERNAL_SSC), &ti_sgmii_100_no_ssc_tx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, NO_SSC), &ti_sgmii_100_no_ssc_tx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, EXTERNAL_SSC), &ti_sgmii_100_no_ssc_tx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, INTERNAL_SSC), &ti_sgmii_100_no_ssc_tx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_NONE, NO_SSC), &ti_qsgmii_100_no_ssc_tx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, NO_SSC), &ti_qsgmii_100_no_ssc_tx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, EXTERNAL_SSC), &ti_qsgmii_100_no_ssc_tx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, INTERNAL_SSC), &ti_qsgmii_100_no_ssc_tx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, NO_SSC), &ti_qsgmii_100_no_ssc_tx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, EXTERNAL_SSC), &ti_qsgmii_100_no_ssc_tx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, INTERNAL_SSC), &ti_qsgmii_100_no_ssc_tx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_NONE, NO_SSC), &usb_100_no_ssc_tx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_NONE, EXTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_NONE, INTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE, NO_SSC), &usb_100_no_ssc_tx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE, EXTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE, INTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_SGMII, NO_SSC), &usb_100_no_ssc_tx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_SGMII, EXTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_SGMII, INTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_QSGMII, NO_SSC), &usb_100_no_ssc_tx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_QSGMII, EXTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_QSGMII, INTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_DP, NO_SSC), &usb_100_no_ssc_tx_ln_vals},
{CDNS_TORRENT_KEY(CLK_156_25_MHZ, CLK_156_25_MHZ, TYPE_USXGMII, TYPE_NONE, NO_SSC), &usxgmii_156_25_no_ssc_tx_ln_vals},
};
static const struct cdns_torrent_data ti_j721e_map_torrent = {
.block_offset_shift = 0x0,
.reg_offset_shift = 0x1,
.link_cmn_vals_tbl = {
.entries = link_cmn_vals_entries,
.num_entries = ARRAY_SIZE(link_cmn_vals_entries),
},
.xcvr_diag_vals_tbl = {
.entries = xcvr_diag_vals_entries,
.num_entries = ARRAY_SIZE(xcvr_diag_vals_entries),
},
.pcs_cmn_vals_tbl = {
.entries = pcs_cmn_vals_entries,
.num_entries = ARRAY_SIZE(pcs_cmn_vals_entries),
},
.phy_pma_cmn_vals_tbl = {
.entries = j721e_phy_pma_cmn_vals_entries,
.num_entries = ARRAY_SIZE(j721e_phy_pma_cmn_vals_entries),
},
.cmn_vals_tbl = {
.entries = cmn_vals_entries,
.num_entries = ARRAY_SIZE(cmn_vals_entries),
},
.tx_ln_vals_tbl = {
.entries = ti_tx_ln_vals_entries,
.num_entries = ARRAY_SIZE(ti_tx_ln_vals_entries),
},
.rx_ln_vals_tbl = {
.entries = cdns_rx_ln_vals_entries,
.num_entries = ARRAY_SIZE(cdns_rx_ln_vals_entries),
},
};
static const struct of_device_id cdns_torrent_phy_of_match[] = {
{
.compatible = "cdns,torrent-phy",
.data = &cdns_map_torrent,
},
{
.compatible = "ti,j721e-serdes-10g",
.data = &ti_j721e_map_torrent,
},
{}
};
MODULE_DEVICE_TABLE(of, cdns_torrent_phy_of_match);
static struct platform_driver cdns_torrent_phy_driver = {
.probe = cdns_torrent_phy_probe,
.remove_new = cdns_torrent_phy_remove,
.driver = {
.name = "cdns-torrent-phy",
.of_match_table = cdns_torrent_phy_of_match,
}
};
module_platform_driver(cdns_torrent_phy_driver);
MODULE_AUTHOR("Cadence Design Systems, Inc.");
MODULE_DESCRIPTION("Cadence Torrent PHY driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/phy/cadence/phy-cadence-torrent.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright: 2017-2018 Cadence Design Systems, Inc.
*/
#include <linux/bitfield.h>
#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
#include <linux/phy/phy.h>
#include <linux/phy/phy-mipi-dphy.h>
#define REG_WAKEUP_TIME_NS 800
#define DPHY_PLL_RATE_HZ 108000000
#define POLL_TIMEOUT_US 1000
/* DPHY registers */
#define DPHY_PMA_CMN(reg) (reg)
#define DPHY_PMA_LCLK(reg) (0x100 + (reg))
#define DPHY_PMA_LDATA(lane, reg) (0x200 + ((lane) * 0x100) + (reg))
#define DPHY_PMA_RCLK(reg) (0x600 + (reg))
#define DPHY_PMA_RDATA(lane, reg) (0x700 + ((lane) * 0x100) + (reg))
#define DPHY_PCS(reg) (0xb00 + (reg))
#define DPHY_CMN_SSM DPHY_PMA_CMN(0x20)
#define DPHY_CMN_SSM_EN BIT(0)
#define DPHY_CMN_TX_MODE_EN BIT(9)
#define DPHY_CMN_PWM DPHY_PMA_CMN(0x40)
#define DPHY_CMN_PWM_DIV(x) ((x) << 20)
#define DPHY_CMN_PWM_LOW(x) ((x) << 10)
#define DPHY_CMN_PWM_HIGH(x) (x)
#define DPHY_CMN_FBDIV DPHY_PMA_CMN(0x4c)
#define DPHY_CMN_FBDIV_VAL(low, high) (((high) << 11) | ((low) << 22))
#define DPHY_CMN_FBDIV_FROM_REG (BIT(10) | BIT(21))
#define DPHY_CMN_OPIPDIV DPHY_PMA_CMN(0x50)
#define DPHY_CMN_IPDIV_FROM_REG BIT(0)
#define DPHY_CMN_IPDIV(x) ((x) << 1)
#define DPHY_CMN_OPDIV_FROM_REG BIT(6)
#define DPHY_CMN_OPDIV(x) ((x) << 7)
#define DPHY_BAND_CFG DPHY_PCS(0x0)
#define DPHY_BAND_CFG_LEFT_BAND GENMASK(4, 0)
#define DPHY_BAND_CFG_RIGHT_BAND GENMASK(9, 5)
#define DPHY_PSM_CFG DPHY_PCS(0x4)
#define DPHY_PSM_CFG_FROM_REG BIT(0)
#define DPHY_PSM_CLK_DIV(x) ((x) << 1)
#define DSI_HBP_FRAME_OVERHEAD 12
#define DSI_HSA_FRAME_OVERHEAD 14
#define DSI_HFP_FRAME_OVERHEAD 6
#define DSI_HSS_VSS_VSE_FRAME_OVERHEAD 4
#define DSI_BLANKING_FRAME_OVERHEAD 6
#define DSI_NULL_FRAME_OVERHEAD 6
#define DSI_EOT_PKT_SIZE 4
#define DPHY_TX_J721E_WIZ_PLL_CTRL 0xF04
#define DPHY_TX_J721E_WIZ_STATUS 0xF08
#define DPHY_TX_J721E_WIZ_RST_CTRL 0xF0C
#define DPHY_TX_J721E_WIZ_PSM_FREQ 0xF10
#define DPHY_TX_J721E_WIZ_IPDIV GENMASK(4, 0)
#define DPHY_TX_J721E_WIZ_OPDIV GENMASK(13, 8)
#define DPHY_TX_J721E_WIZ_FBDIV GENMASK(25, 16)
#define DPHY_TX_J721E_WIZ_LANE_RSTB BIT(31)
#define DPHY_TX_WIZ_PLL_LOCK BIT(31)
#define DPHY_TX_WIZ_O_CMN_READY BIT(31)
struct cdns_dphy_cfg {
u8 pll_ipdiv;
u8 pll_opdiv;
u16 pll_fbdiv;
unsigned int nlanes;
};
enum cdns_dphy_clk_lane_cfg {
DPHY_CLK_CFG_LEFT_DRIVES_ALL = 0,
DPHY_CLK_CFG_LEFT_DRIVES_RIGHT = 1,
DPHY_CLK_CFG_LEFT_DRIVES_LEFT = 2,
DPHY_CLK_CFG_RIGHT_DRIVES_ALL = 3,
};
struct cdns_dphy;
struct cdns_dphy_ops {
int (*probe)(struct cdns_dphy *dphy);
void (*remove)(struct cdns_dphy *dphy);
void (*set_psm_div)(struct cdns_dphy *dphy, u8 div);
void (*set_clk_lane_cfg)(struct cdns_dphy *dphy,
enum cdns_dphy_clk_lane_cfg cfg);
void (*set_pll_cfg)(struct cdns_dphy *dphy,
const struct cdns_dphy_cfg *cfg);
unsigned long (*get_wakeup_time_ns)(struct cdns_dphy *dphy);
};
struct cdns_dphy {
struct cdns_dphy_cfg cfg;
void __iomem *regs;
struct clk *psm_clk;
struct clk *pll_ref_clk;
const struct cdns_dphy_ops *ops;
struct phy *phy;
};
/* Order of bands is important since the index is the band number. */
static const unsigned int tx_bands[] = {
80, 100, 120, 160, 200, 240, 320, 390, 450, 510, 560, 640, 690, 770,
870, 950, 1000, 1200, 1400, 1600, 1800, 2000, 2200, 2500
};
static int cdns_dsi_get_dphy_pll_cfg(struct cdns_dphy *dphy,
struct cdns_dphy_cfg *cfg,
struct phy_configure_opts_mipi_dphy *opts,
unsigned int *dsi_hfp_ext)
{
unsigned long pll_ref_hz = clk_get_rate(dphy->pll_ref_clk);
u64 dlane_bps;
memset(cfg, 0, sizeof(*cfg));
if (pll_ref_hz < 9600000 || pll_ref_hz >= 150000000)
return -EINVAL;
else if (pll_ref_hz < 19200000)
cfg->pll_ipdiv = 1;
else if (pll_ref_hz < 38400000)
cfg->pll_ipdiv = 2;
else if (pll_ref_hz < 76800000)
cfg->pll_ipdiv = 4;
else
cfg->pll_ipdiv = 8;
dlane_bps = opts->hs_clk_rate;
if (dlane_bps > 2500000000UL || dlane_bps < 160000000UL)
return -EINVAL;
else if (dlane_bps >= 1250000000)
cfg->pll_opdiv = 1;
else if (dlane_bps >= 630000000)
cfg->pll_opdiv = 2;
else if (dlane_bps >= 320000000)
cfg->pll_opdiv = 4;
else if (dlane_bps >= 160000000)
cfg->pll_opdiv = 8;
cfg->pll_fbdiv = DIV_ROUND_UP_ULL(dlane_bps * 2 * cfg->pll_opdiv *
cfg->pll_ipdiv,
pll_ref_hz);
return 0;
}
static int cdns_dphy_setup_psm(struct cdns_dphy *dphy)
{
unsigned long psm_clk_hz = clk_get_rate(dphy->psm_clk);
unsigned long psm_div;
if (!psm_clk_hz || psm_clk_hz > 100000000)
return -EINVAL;
psm_div = DIV_ROUND_CLOSEST(psm_clk_hz, 1000000);
if (dphy->ops->set_psm_div)
dphy->ops->set_psm_div(dphy, psm_div);
return 0;
}
static void cdns_dphy_set_clk_lane_cfg(struct cdns_dphy *dphy,
enum cdns_dphy_clk_lane_cfg cfg)
{
if (dphy->ops->set_clk_lane_cfg)
dphy->ops->set_clk_lane_cfg(dphy, cfg);
}
static void cdns_dphy_set_pll_cfg(struct cdns_dphy *dphy,
const struct cdns_dphy_cfg *cfg)
{
if (dphy->ops->set_pll_cfg)
dphy->ops->set_pll_cfg(dphy, cfg);
}
static unsigned long cdns_dphy_get_wakeup_time_ns(struct cdns_dphy *dphy)
{
return dphy->ops->get_wakeup_time_ns(dphy);
}
static unsigned long cdns_dphy_ref_get_wakeup_time_ns(struct cdns_dphy *dphy)
{
/* Default wakeup time is 800 ns (in a simulated environment). */
return 800;
}
static void cdns_dphy_ref_set_pll_cfg(struct cdns_dphy *dphy,
const struct cdns_dphy_cfg *cfg)
{
u32 fbdiv_low, fbdiv_high;
fbdiv_low = (cfg->pll_fbdiv / 4) - 2;
fbdiv_high = cfg->pll_fbdiv - fbdiv_low - 2;
writel(DPHY_CMN_IPDIV_FROM_REG | DPHY_CMN_OPDIV_FROM_REG |
DPHY_CMN_IPDIV(cfg->pll_ipdiv) |
DPHY_CMN_OPDIV(cfg->pll_opdiv),
dphy->regs + DPHY_CMN_OPIPDIV);
writel(DPHY_CMN_FBDIV_FROM_REG |
DPHY_CMN_FBDIV_VAL(fbdiv_low, fbdiv_high),
dphy->regs + DPHY_CMN_FBDIV);
writel(DPHY_CMN_PWM_HIGH(6) | DPHY_CMN_PWM_LOW(0x101) |
DPHY_CMN_PWM_DIV(0x8),
dphy->regs + DPHY_CMN_PWM);
}
static void cdns_dphy_ref_set_psm_div(struct cdns_dphy *dphy, u8 div)
{
writel(DPHY_PSM_CFG_FROM_REG | DPHY_PSM_CLK_DIV(div),
dphy->regs + DPHY_PSM_CFG);
}
static unsigned long cdns_dphy_j721e_get_wakeup_time_ns(struct cdns_dphy *dphy)
{
/* Minimum wakeup time as per MIPI D-PHY spec v1.2 */
return 1000000;
}
static void cdns_dphy_j721e_set_pll_cfg(struct cdns_dphy *dphy,
const struct cdns_dphy_cfg *cfg)
{
u32 status;
/*
* set the PWM and PLL Byteclk divider settings to recommended values
* which is same as that of in ref ops
*/
writel(DPHY_CMN_PWM_HIGH(6) | DPHY_CMN_PWM_LOW(0x101) |
DPHY_CMN_PWM_DIV(0x8),
dphy->regs + DPHY_CMN_PWM);
writel((FIELD_PREP(DPHY_TX_J721E_WIZ_IPDIV, cfg->pll_ipdiv) |
FIELD_PREP(DPHY_TX_J721E_WIZ_OPDIV, cfg->pll_opdiv) |
FIELD_PREP(DPHY_TX_J721E_WIZ_FBDIV, cfg->pll_fbdiv)),
dphy->regs + DPHY_TX_J721E_WIZ_PLL_CTRL);
writel(DPHY_TX_J721E_WIZ_LANE_RSTB,
dphy->regs + DPHY_TX_J721E_WIZ_RST_CTRL);
readl_poll_timeout(dphy->regs + DPHY_TX_J721E_WIZ_PLL_CTRL, status,
(status & DPHY_TX_WIZ_PLL_LOCK), 0, POLL_TIMEOUT_US);
readl_poll_timeout(dphy->regs + DPHY_TX_J721E_WIZ_STATUS, status,
(status & DPHY_TX_WIZ_O_CMN_READY), 0,
POLL_TIMEOUT_US);
}
static void cdns_dphy_j721e_set_psm_div(struct cdns_dphy *dphy, u8 div)
{
writel(div, dphy->regs + DPHY_TX_J721E_WIZ_PSM_FREQ);
}
/*
* This is the reference implementation of DPHY hooks. Specific integration of
* this IP may have to re-implement some of them depending on how they decided
* to wire things in the SoC.
*/
static const struct cdns_dphy_ops ref_dphy_ops = {
.get_wakeup_time_ns = cdns_dphy_ref_get_wakeup_time_ns,
.set_pll_cfg = cdns_dphy_ref_set_pll_cfg,
.set_psm_div = cdns_dphy_ref_set_psm_div,
};
static const struct cdns_dphy_ops j721e_dphy_ops = {
.get_wakeup_time_ns = cdns_dphy_j721e_get_wakeup_time_ns,
.set_pll_cfg = cdns_dphy_j721e_set_pll_cfg,
.set_psm_div = cdns_dphy_j721e_set_psm_div,
};
static int cdns_dphy_config_from_opts(struct phy *phy,
struct phy_configure_opts_mipi_dphy *opts,
struct cdns_dphy_cfg *cfg)
{
struct cdns_dphy *dphy = phy_get_drvdata(phy);
unsigned int dsi_hfp_ext = 0;
int ret;
ret = phy_mipi_dphy_config_validate(opts);
if (ret)
return ret;
ret = cdns_dsi_get_dphy_pll_cfg(dphy, cfg,
opts, &dsi_hfp_ext);
if (ret)
return ret;
opts->wakeup = cdns_dphy_get_wakeup_time_ns(dphy) / 1000;
return 0;
}
static int cdns_dphy_tx_get_band_ctrl(unsigned long hs_clk_rate)
{
unsigned int rate;
int i;
rate = hs_clk_rate / 1000000UL;
if (rate < tx_bands[0])
return -EOPNOTSUPP;
for (i = 0; i < ARRAY_SIZE(tx_bands) - 1; i++) {
if (rate >= tx_bands[i] && rate < tx_bands[i + 1])
return i;
}
return -EOPNOTSUPP;
}
static int cdns_dphy_validate(struct phy *phy, enum phy_mode mode, int submode,
union phy_configure_opts *opts)
{
struct cdns_dphy_cfg cfg = { 0 };
if (mode != PHY_MODE_MIPI_DPHY)
return -EINVAL;
return cdns_dphy_config_from_opts(phy, &opts->mipi_dphy, &cfg);
}
static int cdns_dphy_configure(struct phy *phy, union phy_configure_opts *opts)
{
struct cdns_dphy *dphy = phy_get_drvdata(phy);
struct cdns_dphy_cfg cfg = { 0 };
int ret, band_ctrl;
unsigned int reg;
ret = cdns_dphy_config_from_opts(phy, &opts->mipi_dphy, &cfg);
if (ret)
return ret;
/*
* Configure the internal PSM clk divider so that the DPHY has a
* 1MHz clk (or something close).
*/
ret = cdns_dphy_setup_psm(dphy);
if (ret)
return ret;
/*
* Configure attach clk lanes to data lanes: the DPHY has 2 clk lanes
* and 8 data lanes, each clk lane can be attache different set of
* data lanes. The 2 groups are named 'left' and 'right', so here we
* just say that we want the 'left' clk lane to drive the 'left' data
* lanes.
*/
cdns_dphy_set_clk_lane_cfg(dphy, DPHY_CLK_CFG_LEFT_DRIVES_LEFT);
/*
* Configure the DPHY PLL that will be used to generate the TX byte
* clk.
*/
cdns_dphy_set_pll_cfg(dphy, &cfg);
band_ctrl = cdns_dphy_tx_get_band_ctrl(opts->mipi_dphy.hs_clk_rate);
if (band_ctrl < 0)
return band_ctrl;
reg = FIELD_PREP(DPHY_BAND_CFG_LEFT_BAND, band_ctrl) |
FIELD_PREP(DPHY_BAND_CFG_RIGHT_BAND, band_ctrl);
writel(reg, dphy->regs + DPHY_BAND_CFG);
return 0;
}
static int cdns_dphy_power_on(struct phy *phy)
{
struct cdns_dphy *dphy = phy_get_drvdata(phy);
clk_prepare_enable(dphy->psm_clk);
clk_prepare_enable(dphy->pll_ref_clk);
/* Start TX state machine. */
writel(DPHY_CMN_SSM_EN | DPHY_CMN_TX_MODE_EN,
dphy->regs + DPHY_CMN_SSM);
return 0;
}
static int cdns_dphy_power_off(struct phy *phy)
{
struct cdns_dphy *dphy = phy_get_drvdata(phy);
clk_disable_unprepare(dphy->pll_ref_clk);
clk_disable_unprepare(dphy->psm_clk);
return 0;
}
static const struct phy_ops cdns_dphy_ops = {
.configure = cdns_dphy_configure,
.validate = cdns_dphy_validate,
.power_on = cdns_dphy_power_on,
.power_off = cdns_dphy_power_off,
};
static int cdns_dphy_probe(struct platform_device *pdev)
{
struct phy_provider *phy_provider;
struct cdns_dphy *dphy;
int ret;
dphy = devm_kzalloc(&pdev->dev, sizeof(*dphy), GFP_KERNEL);
if (!dphy)
return -ENOMEM;
dev_set_drvdata(&pdev->dev, dphy);
dphy->ops = of_device_get_match_data(&pdev->dev);
if (!dphy->ops)
return -EINVAL;
dphy->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dphy->regs))
return PTR_ERR(dphy->regs);
dphy->psm_clk = devm_clk_get(&pdev->dev, "psm");
if (IS_ERR(dphy->psm_clk))
return PTR_ERR(dphy->psm_clk);
dphy->pll_ref_clk = devm_clk_get(&pdev->dev, "pll_ref");
if (IS_ERR(dphy->pll_ref_clk))
return PTR_ERR(dphy->pll_ref_clk);
if (dphy->ops->probe) {
ret = dphy->ops->probe(dphy);
if (ret)
return ret;
}
dphy->phy = devm_phy_create(&pdev->dev, NULL, &cdns_dphy_ops);
if (IS_ERR(dphy->phy)) {
dev_err(&pdev->dev, "failed to create PHY\n");
if (dphy->ops->remove)
dphy->ops->remove(dphy);
return PTR_ERR(dphy->phy);
}
phy_set_drvdata(dphy->phy, dphy);
phy_provider = devm_of_phy_provider_register(&pdev->dev,
of_phy_simple_xlate);
return PTR_ERR_OR_ZERO(phy_provider);
}
static void cdns_dphy_remove(struct platform_device *pdev)
{
struct cdns_dphy *dphy = dev_get_drvdata(&pdev->dev);
if (dphy->ops->remove)
dphy->ops->remove(dphy);
}
static const struct of_device_id cdns_dphy_of_match[] = {
{ .compatible = "cdns,dphy", .data = &ref_dphy_ops },
{ .compatible = "ti,j721e-dphy", .data = &j721e_dphy_ops },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, cdns_dphy_of_match);
static struct platform_driver cdns_dphy_platform_driver = {
.probe = cdns_dphy_probe,
.remove_new = cdns_dphy_remove,
.driver = {
.name = "cdns-mipi-dphy",
.of_match_table = cdns_dphy_of_match,
},
};
module_platform_driver(cdns_dphy_platform_driver);
MODULE_AUTHOR("Maxime Ripard <[email protected]>");
MODULE_DESCRIPTION("Cadence MIPI D-PHY Driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/phy/cadence/cdns-dphy.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Cadence Sierra PHY Driver
*
* Copyright (c) 2018 Cadence Design Systems
* Author: Alan Douglas <[email protected]>
*
*/
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/reset.h>
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <dt-bindings/phy/phy.h>
#include <dt-bindings/phy/phy-cadence.h>
#define NUM_SSC_MODE 3
#define NUM_PHY_TYPE 5
/* PHY register offsets */
#define SIERRA_COMMON_CDB_OFFSET 0x0
#define SIERRA_MACRO_ID_REG 0x0
#define SIERRA_CMN_PLLLC_GEN_PREG 0x42
#define SIERRA_CMN_PLLLC_FBDIV_INT_MODE0_PREG 0x43
#define SIERRA_CMN_PLLLC_DCOCAL_CTRL_PREG 0x45
#define SIERRA_CMN_PLLLC_INIT_PREG 0x46
#define SIERRA_CMN_PLLLC_ITERTMR_PREG 0x47
#define SIERRA_CMN_PLLLC_MODE_PREG 0x48
#define SIERRA_CMN_PLLLC_LF_COEFF_MODE1_PREG 0x49
#define SIERRA_CMN_PLLLC_LF_COEFF_MODE0_PREG 0x4A
#define SIERRA_CMN_PLLLC_LOCK_CNTSTART_PREG 0x4B
#define SIERRA_CMN_PLLLC_LOCKSEARCH_PREG 0x4C
#define SIERRA_CMN_PLLLC_CLK1_PREG 0x4D
#define SIERRA_CMN_PLLLC_CLK0_PREG 0x4E
#define SIERRA_CMN_PLLLC_BWCAL_MODE1_PREG 0x4F
#define SIERRA_CMN_PLLLC_BWCAL_MODE0_PREG 0x50
#define SIERRA_CMN_PLLLC_DSMCORR_PREG 0x51
#define SIERRA_CMN_PLLLC_SS_PREG 0x52
#define SIERRA_CMN_PLLLC_SS_AMP_STEP_SIZE_PREG 0x53
#define SIERRA_CMN_PLLLC_SSTWOPT_PREG 0x54
#define SIERRA_CMN_PLLCSM_PLLEN_TMR_PREG 0x5D
#define SIERRA_CMN_PLLCSM_PLLPRE_TMR_PREG 0x5E
#define SIERRA_CMN_PLLLC_SS_TIME_STEPSIZE_MODE_PREG 0x62
#define SIERRA_CMN_PLLLC_LOCK_DELAY_CTRL_PREG 0x63
#define SIERRA_SDOSCCAL_CLK_CNT_PREG 0x6E
#define SIERRA_CMN_REFRCV_PREG 0x98
#define SIERRA_CMN_RESCAL_CTRLA_PREG 0xA0
#define SIERRA_CMN_REFRCV1_PREG 0xB8
#define SIERRA_CMN_PLLLC1_GEN_PREG 0xC2
#define SIERRA_CMN_PLLLC1_FBDIV_INT_PREG 0xC3
#define SIERRA_CMN_PLLLC1_DCOCAL_CTRL_PREG 0xC5
#define SIERRA_CMN_PLLLC1_LF_COEFF_MODE0_PREG 0xCA
#define SIERRA_CMN_PLLLC1_CLK0_PREG 0xCE
#define SIERRA_CMN_PLLLC1_BWCAL_MODE0_PREG 0xD0
#define SIERRA_CMN_PLLLC1_SS_TIME_STEPSIZE_MODE_PREG 0xE2
#define SIERRA_LANE_CDB_OFFSET(ln, block_offset, reg_offset) \
((0x4000 << (block_offset)) + \
(((ln) << 9) << (reg_offset)))
#define SIERRA_DET_STANDEC_A_PREG 0x000
#define SIERRA_DET_STANDEC_B_PREG 0x001
#define SIERRA_DET_STANDEC_C_PREG 0x002
#define SIERRA_DET_STANDEC_D_PREG 0x003
#define SIERRA_DET_STANDEC_E_PREG 0x004
#define SIERRA_PSM_LANECAL_DLY_A1_RESETS_PREG 0x008
#define SIERRA_PSM_A0IN_TMR_PREG 0x009
#define SIERRA_PSM_A3IN_TMR_PREG 0x00C
#define SIERRA_PSM_DIAG_PREG 0x015
#define SIERRA_PSC_LN_A3_PREG 0x023
#define SIERRA_PSC_LN_A4_PREG 0x024
#define SIERRA_PSC_LN_IDLE_PREG 0x026
#define SIERRA_PSC_TX_A0_PREG 0x028
#define SIERRA_PSC_TX_A1_PREG 0x029
#define SIERRA_PSC_TX_A2_PREG 0x02A
#define SIERRA_PSC_TX_A3_PREG 0x02B
#define SIERRA_PSC_RX_A0_PREG 0x030
#define SIERRA_PSC_RX_A1_PREG 0x031
#define SIERRA_PSC_RX_A2_PREG 0x032
#define SIERRA_PSC_RX_A3_PREG 0x033
#define SIERRA_PLLCTRL_FBDIV_MODE01_PREG 0x039
#define SIERRA_PLLCTRL_SUBRATE_PREG 0x03A
#define SIERRA_PLLCTRL_GEN_A_PREG 0x03B
#define SIERRA_PLLCTRL_GEN_D_PREG 0x03E
#define SIERRA_PLLCTRL_CPGAIN_MODE_PREG 0x03F
#define SIERRA_PLLCTRL_STATUS_PREG 0x044
#define SIERRA_CLKPATH_BIASTRIM_PREG 0x04B
#define SIERRA_DFE_BIASTRIM_PREG 0x04C
#define SIERRA_DRVCTRL_ATTEN_PREG 0x06A
#define SIERRA_DRVCTRL_BOOST_PREG 0x06F
#define SIERRA_LANE_TX_RECEIVER_DETECT_PREG 0x071
#define SIERRA_TX_RCVDET_OVRD_PREG 0x072
#define SIERRA_CLKPATHCTRL_TMR_PREG 0x081
#define SIERRA_RX_CREQ_FLTR_A_MODE3_PREG 0x085
#define SIERRA_RX_CREQ_FLTR_A_MODE2_PREG 0x086
#define SIERRA_RX_CREQ_FLTR_A_MODE1_PREG 0x087
#define SIERRA_RX_CREQ_FLTR_A_MODE0_PREG 0x088
#define SIERRA_CREQ_DCBIASATTEN_OVR_PREG 0x08C
#define SIERRA_CREQ_CCLKDET_MODE01_PREG 0x08E
#define SIERRA_RX_CTLE_CAL_PREG 0x08F
#define SIERRA_RX_CTLE_MAINTENANCE_PREG 0x091
#define SIERRA_CREQ_FSMCLK_SEL_PREG 0x092
#define SIERRA_CREQ_EQ_CTRL_PREG 0x093
#define SIERRA_CREQ_SPARE_PREG 0x096
#define SIERRA_CREQ_EQ_OPEN_EYE_THRESH_PREG 0x097
#define SIERRA_CTLELUT_CTRL_PREG 0x098
#define SIERRA_DEQ_BLK_TAU_CTRL1_PREG 0x0AC
#define SIERRA_DEQ_BLK_TAU_CTRL4_PREG 0x0AF
#define SIERRA_DFE_ECMP_RATESEL_PREG 0x0C0
#define SIERRA_DFE_SMP_RATESEL_PREG 0x0C1
#define SIERRA_DEQ_PHALIGN_CTRL 0x0C4
#define SIERRA_DEQ_CONCUR_CTRL1_PREG 0x0C8
#define SIERRA_DEQ_CONCUR_CTRL2_PREG 0x0C9
#define SIERRA_DEQ_EPIPWR_CTRL2_PREG 0x0CD
#define SIERRA_DEQ_FAST_MAINT_CYCLES_PREG 0x0CE
#define SIERRA_DEQ_ERRCMP_CTRL_PREG 0x0D0
#define SIERRA_DEQ_OFFSET_CTRL_PREG 0x0D8
#define SIERRA_DEQ_GAIN_CTRL_PREG 0x0E0
#define SIERRA_DEQ_VGATUNE_CTRL_PREG 0x0E1
#define SIERRA_DEQ_GLUT0 0x0E8
#define SIERRA_DEQ_GLUT1 0x0E9
#define SIERRA_DEQ_GLUT2 0x0EA
#define SIERRA_DEQ_GLUT3 0x0EB
#define SIERRA_DEQ_GLUT4 0x0EC
#define SIERRA_DEQ_GLUT5 0x0ED
#define SIERRA_DEQ_GLUT6 0x0EE
#define SIERRA_DEQ_GLUT7 0x0EF
#define SIERRA_DEQ_GLUT8 0x0F0
#define SIERRA_DEQ_GLUT9 0x0F1
#define SIERRA_DEQ_GLUT10 0x0F2
#define SIERRA_DEQ_GLUT11 0x0F3
#define SIERRA_DEQ_GLUT12 0x0F4
#define SIERRA_DEQ_GLUT13 0x0F5
#define SIERRA_DEQ_GLUT14 0x0F6
#define SIERRA_DEQ_GLUT15 0x0F7
#define SIERRA_DEQ_GLUT16 0x0F8
#define SIERRA_POSTPRECUR_EN_CEPH_CTRL_PREG 0x0F9
#define SIERRA_TAU_EN_CEPH2TO0_PREG 0x0FB
#define SIERRA_TAU_EN_CEPH5TO3_PREG 0x0FC
#define SIERRA_DEQ_ALUT0 0x108
#define SIERRA_DEQ_ALUT1 0x109
#define SIERRA_DEQ_ALUT2 0x10A
#define SIERRA_DEQ_ALUT3 0x10B
#define SIERRA_DEQ_ALUT4 0x10C
#define SIERRA_DEQ_ALUT5 0x10D
#define SIERRA_DEQ_ALUT6 0x10E
#define SIERRA_DEQ_ALUT7 0x10F
#define SIERRA_DEQ_ALUT8 0x110
#define SIERRA_DEQ_ALUT9 0x111
#define SIERRA_DEQ_ALUT10 0x112
#define SIERRA_DEQ_ALUT11 0x113
#define SIERRA_DEQ_ALUT12 0x114
#define SIERRA_DEQ_ALUT13 0x115
#define SIERRA_OEPH_EN_CTRL_PREG 0x124
#define SIERRA_DEQ_DFETAP_CTRL_PREG 0x128
#define SIERRA_DEQ_DFETAP0 0x129
#define SIERRA_DEQ_DFETAP1 0x12B
#define SIERRA_DEQ_DFETAP2 0x12D
#define SIERRA_DEQ_DFETAP3 0x12F
#define SIERRA_DEQ_DFETAP4 0x131
#define SIERRA_DFE_EN_1010_IGNORE_PREG 0x134
#define SIERRA_DEQ_PRECUR_PREG 0x138
#define SIERRA_DEQ_POSTCUR_PREG 0x140
#define SIERRA_DEQ_POSTCUR_DECR_PREG 0x142
#define SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG 0x150
#define SIERRA_DEQ_TAU_CTRL2_PREG 0x151
#define SIERRA_DEQ_TAU_CTRL3_PREG 0x152
#define SIERRA_DEQ_OPENEYE_CTRL_PREG 0x158
#define SIERRA_DEQ_CONCUR_EPIOFFSET_MODE_PREG 0x159
#define SIERRA_DEQ_PICTRL_PREG 0x161
#define SIERRA_CPICAL_TMRVAL_MODE1_PREG 0x170
#define SIERRA_CPICAL_TMRVAL_MODE0_PREG 0x171
#define SIERRA_CPICAL_PICNT_MODE1_PREG 0x174
#define SIERRA_CPI_OUTBUF_RATESEL_PREG 0x17C
#define SIERRA_CPI_RESBIAS_BIN_PREG 0x17E
#define SIERRA_CPI_TRIM_PREG 0x17F
#define SIERRA_CPICAL_RES_STARTCODE_MODE23_PREG 0x183
#define SIERRA_CPICAL_RES_STARTCODE_MODE01_PREG 0x184
#define SIERRA_EPI_CTRL_PREG 0x187
#define SIERRA_LFPSDET_SUPPORT_PREG 0x188
#define SIERRA_LFPSFILT_NS_PREG 0x18A
#define SIERRA_LFPSFILT_RD_PREG 0x18B
#define SIERRA_LFPSFILT_MP_PREG 0x18C
#define SIERRA_SIGDET_SUPPORT_PREG 0x190
#define SIERRA_SDFILT_H2L_A_PREG 0x191
#define SIERRA_SDFILT_L2H_PREG 0x193
#define SIERRA_RXBUFFER_CTLECTRL_PREG 0x19E
#define SIERRA_RXBUFFER_RCDFECTRL_PREG 0x19F
#define SIERRA_RXBUFFER_DFECTRL_PREG 0x1A0
#define SIERRA_LN_SPARE_REG_PREG 0x1B0
#define SIERRA_DEQ_TAU_CTRL1_FAST_MAINT_PREG 0x14F
#define SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG 0x150
/* PHY PCS common registers */
#define SIERRA_PHY_PCS_COMMON_OFFSET(block_offset) \
(0xc000 << (block_offset))
#define SIERRA_PHY_PIPE_CMN_CTRL1 0x0
#define SIERRA_PHY_PLL_CFG 0xe
/* PHY PCS lane registers */
#define SIERRA_PHY_PCS_LANE_CDB_OFFSET(ln, block_offset, reg_offset) \
((0xD000 << (block_offset)) + \
(((ln) << 8) << (reg_offset)))
#define SIERRA_PHY_ISO_LINK_CTRL 0xB
/* PHY PMA common registers */
#define SIERRA_PHY_PMA_COMMON_OFFSET(block_offset) \
(0xE000 << (block_offset))
#define SIERRA_PHY_PMA_CMN_CTRL 0x000
/* PHY PMA lane registers */
#define SIERRA_PHY_PMA_LANE_CDB_OFFSET(ln, block_offset, reg_offset) \
((0xF000 << (block_offset)) + \
(((ln) << 8) << (reg_offset)))
#define SIERRA_PHY_PMA_XCVR_CTRL 0x000
#define SIERRA_MACRO_ID 0x00007364
#define SIERRA_MAX_LANES 16
#define PLL_LOCK_TIME 100000
#define CDNS_SIERRA_OUTPUT_CLOCKS 3
#define CDNS_SIERRA_INPUT_CLOCKS 3
enum cdns_sierra_clock_input {
PHY_CLK,
CMN_REFCLK_DIG_DIV,
CMN_REFCLK1_DIG_DIV,
};
#define SIERRA_NUM_CMN_PLLC 2
#define SIERRA_NUM_CMN_PLLC_PARENTS 2
static const struct reg_field macro_id_type =
REG_FIELD(SIERRA_MACRO_ID_REG, 0, 15);
static const struct reg_field phy_pll_cfg_1 =
REG_FIELD(SIERRA_PHY_PLL_CFG, 1, 1);
static const struct reg_field pma_cmn_ready =
REG_FIELD(SIERRA_PHY_PMA_CMN_CTRL, 0, 0);
static const struct reg_field pllctrl_lock =
REG_FIELD(SIERRA_PLLCTRL_STATUS_PREG, 0, 0);
static const struct reg_field phy_iso_link_ctrl_1 =
REG_FIELD(SIERRA_PHY_ISO_LINK_CTRL, 1, 1);
static const struct reg_field cmn_plllc_clk1outdiv_preg =
REG_FIELD(SIERRA_CMN_PLLLC_CLK1_PREG, 0, 6);
static const struct reg_field cmn_plllc_clk1_en_preg =
REG_FIELD(SIERRA_CMN_PLLLC_CLK1_PREG, 12, 12);
static const char * const clk_names[] = {
[CDNS_SIERRA_PLL_CMNLC] = "pll_cmnlc",
[CDNS_SIERRA_PLL_CMNLC1] = "pll_cmnlc1",
[CDNS_SIERRA_DERIVED_REFCLK] = "refclk_der",
};
enum cdns_sierra_cmn_plllc {
CMN_PLLLC,
CMN_PLLLC1,
};
struct cdns_sierra_pll_mux_reg_fields {
struct reg_field pfdclk_sel_preg;
struct reg_field plllc1en_field;
struct reg_field termen_field;
};
static const struct cdns_sierra_pll_mux_reg_fields cmn_plllc_pfdclk1_sel_preg[] = {
[CMN_PLLLC] = {
.pfdclk_sel_preg = REG_FIELD(SIERRA_CMN_PLLLC_GEN_PREG, 1, 1),
.plllc1en_field = REG_FIELD(SIERRA_CMN_REFRCV1_PREG, 8, 8),
.termen_field = REG_FIELD(SIERRA_CMN_REFRCV1_PREG, 0, 0),
},
[CMN_PLLLC1] = {
.pfdclk_sel_preg = REG_FIELD(SIERRA_CMN_PLLLC1_GEN_PREG, 1, 1),
.plllc1en_field = REG_FIELD(SIERRA_CMN_REFRCV_PREG, 8, 8),
.termen_field = REG_FIELD(SIERRA_CMN_REFRCV_PREG, 0, 0),
},
};
struct cdns_sierra_pll_mux {
struct clk_hw hw;
struct regmap_field *pfdclk_sel_preg;
struct regmap_field *plllc1en_field;
struct regmap_field *termen_field;
struct clk_init_data clk_data;
};
#define to_cdns_sierra_pll_mux(_hw) \
container_of(_hw, struct cdns_sierra_pll_mux, hw)
#define PLL0_REFCLK_NAME "pll0_refclk"
#define PLL1_REFCLK_NAME "pll1_refclk"
static const struct clk_parent_data pll_mux_parent_data[][SIERRA_NUM_CMN_PLLC_PARENTS] = {
[CMN_PLLLC] = {
{ .fw_name = PLL0_REFCLK_NAME },
{ .fw_name = PLL1_REFCLK_NAME }
},
[CMN_PLLLC1] = {
{ .fw_name = PLL1_REFCLK_NAME },
{ .fw_name = PLL0_REFCLK_NAME }
},
};
static u32 cdns_sierra_pll_mux_table[][SIERRA_NUM_CMN_PLLC_PARENTS] = {
[CMN_PLLLC] = { 0, 1 },
[CMN_PLLLC1] = { 1, 0 },
};
struct cdns_sierra_derived_refclk {
struct clk_hw hw;
struct regmap_field *cmn_plllc_clk1outdiv_preg;
struct regmap_field *cmn_plllc_clk1_en_preg;
struct clk_init_data clk_data;
};
#define to_cdns_sierra_derived_refclk(_hw) \
container_of(_hw, struct cdns_sierra_derived_refclk, hw)
enum cdns_sierra_phy_type {
TYPE_NONE,
TYPE_PCIE,
TYPE_USB,
TYPE_SGMII,
TYPE_QSGMII
};
enum cdns_sierra_ssc_mode {
NO_SSC,
EXTERNAL_SSC,
INTERNAL_SSC
};
struct cdns_sierra_inst {
struct phy *phy;
enum cdns_sierra_phy_type phy_type;
u32 num_lanes;
u32 mlane;
struct reset_control *lnk_rst;
enum cdns_sierra_ssc_mode ssc_mode;
};
struct cdns_reg_pairs {
u16 val;
u32 off;
};
struct cdns_sierra_vals {
const struct cdns_reg_pairs *reg_pairs;
u32 num_regs;
};
struct cdns_sierra_data {
u32 id_value;
u8 block_offset_shift;
u8 reg_offset_shift;
struct cdns_sierra_vals *pcs_cmn_vals[NUM_PHY_TYPE][NUM_PHY_TYPE]
[NUM_SSC_MODE];
struct cdns_sierra_vals *phy_pma_ln_vals[NUM_PHY_TYPE][NUM_PHY_TYPE]
[NUM_SSC_MODE];
struct cdns_sierra_vals *pma_cmn_vals[NUM_PHY_TYPE][NUM_PHY_TYPE]
[NUM_SSC_MODE];
struct cdns_sierra_vals *pma_ln_vals[NUM_PHY_TYPE][NUM_PHY_TYPE]
[NUM_SSC_MODE];
};
struct cdns_regmap_cdb_context {
struct device *dev;
void __iomem *base;
u8 reg_offset_shift;
};
struct cdns_sierra_phy {
struct device *dev;
const struct cdns_sierra_data *init_data;
struct cdns_sierra_inst phys[SIERRA_MAX_LANES];
struct reset_control *phy_rst;
struct reset_control *apb_rst;
struct regmap *regmap_lane_cdb[SIERRA_MAX_LANES];
struct regmap *regmap_phy_pcs_common_cdb;
struct regmap *regmap_phy_pcs_lane_cdb[SIERRA_MAX_LANES];
struct regmap *regmap_phy_pma_common_cdb;
struct regmap *regmap_phy_pma_lane_cdb[SIERRA_MAX_LANES];
struct regmap *regmap_common_cdb;
struct regmap_field *macro_id_type;
struct regmap_field *phy_pll_cfg_1;
struct regmap_field *pma_cmn_ready;
struct regmap_field *pllctrl_lock[SIERRA_MAX_LANES];
struct regmap_field *phy_iso_link_ctrl_1[SIERRA_MAX_LANES];
struct regmap_field *cmn_refrcv_refclk_plllc1en_preg[SIERRA_NUM_CMN_PLLC];
struct regmap_field *cmn_refrcv_refclk_termen_preg[SIERRA_NUM_CMN_PLLC];
struct regmap_field *cmn_plllc_pfdclk1_sel_preg[SIERRA_NUM_CMN_PLLC];
struct clk *input_clks[CDNS_SIERRA_INPUT_CLOCKS];
int nsubnodes;
u32 num_lanes;
bool autoconf;
int already_configured;
struct clk *pll_clks[SIERRA_NUM_CMN_PLLC];
struct clk_hw_onecell_data clk_data;
};
static int cdns_regmap_write(void *context, unsigned int reg, unsigned int val)
{
struct cdns_regmap_cdb_context *ctx = context;
u32 offset = reg << ctx->reg_offset_shift;
writew(val, ctx->base + offset);
return 0;
}
static int cdns_regmap_read(void *context, unsigned int reg, unsigned int *val)
{
struct cdns_regmap_cdb_context *ctx = context;
u32 offset = reg << ctx->reg_offset_shift;
*val = readw(ctx->base + offset);
return 0;
}
#define SIERRA_LANE_CDB_REGMAP_CONF(n) \
{ \
.name = "sierra_lane" n "_cdb", \
.reg_stride = 1, \
.fast_io = true, \
.reg_write = cdns_regmap_write, \
.reg_read = cdns_regmap_read, \
}
static const struct regmap_config cdns_sierra_lane_cdb_config[] = {
SIERRA_LANE_CDB_REGMAP_CONF("0"),
SIERRA_LANE_CDB_REGMAP_CONF("1"),
SIERRA_LANE_CDB_REGMAP_CONF("2"),
SIERRA_LANE_CDB_REGMAP_CONF("3"),
SIERRA_LANE_CDB_REGMAP_CONF("4"),
SIERRA_LANE_CDB_REGMAP_CONF("5"),
SIERRA_LANE_CDB_REGMAP_CONF("6"),
SIERRA_LANE_CDB_REGMAP_CONF("7"),
SIERRA_LANE_CDB_REGMAP_CONF("8"),
SIERRA_LANE_CDB_REGMAP_CONF("9"),
SIERRA_LANE_CDB_REGMAP_CONF("10"),
SIERRA_LANE_CDB_REGMAP_CONF("11"),
SIERRA_LANE_CDB_REGMAP_CONF("12"),
SIERRA_LANE_CDB_REGMAP_CONF("13"),
SIERRA_LANE_CDB_REGMAP_CONF("14"),
SIERRA_LANE_CDB_REGMAP_CONF("15"),
};
static const struct regmap_config cdns_sierra_common_cdb_config = {
.name = "sierra_common_cdb",
.reg_stride = 1,
.fast_io = true,
.reg_write = cdns_regmap_write,
.reg_read = cdns_regmap_read,
};
static const struct regmap_config cdns_sierra_phy_pcs_cmn_cdb_config = {
.name = "sierra_phy_pcs_cmn_cdb",
.reg_stride = 1,
.fast_io = true,
.reg_write = cdns_regmap_write,
.reg_read = cdns_regmap_read,
};
#define SIERRA_PHY_PCS_LANE_CDB_REGMAP_CONF(n) \
{ \
.name = "sierra_phy_pcs_lane" n "_cdb", \
.reg_stride = 1, \
.fast_io = true, \
.reg_write = cdns_regmap_write, \
.reg_read = cdns_regmap_read, \
}
static const struct regmap_config cdns_sierra_phy_pcs_lane_cdb_config[] = {
SIERRA_PHY_PCS_LANE_CDB_REGMAP_CONF("0"),
SIERRA_PHY_PCS_LANE_CDB_REGMAP_CONF("1"),
SIERRA_PHY_PCS_LANE_CDB_REGMAP_CONF("2"),
SIERRA_PHY_PCS_LANE_CDB_REGMAP_CONF("3"),
SIERRA_PHY_PCS_LANE_CDB_REGMAP_CONF("4"),
SIERRA_PHY_PCS_LANE_CDB_REGMAP_CONF("5"),
SIERRA_PHY_PCS_LANE_CDB_REGMAP_CONF("6"),
SIERRA_PHY_PCS_LANE_CDB_REGMAP_CONF("7"),
SIERRA_PHY_PCS_LANE_CDB_REGMAP_CONF("8"),
SIERRA_PHY_PCS_LANE_CDB_REGMAP_CONF("9"),
SIERRA_PHY_PCS_LANE_CDB_REGMAP_CONF("10"),
SIERRA_PHY_PCS_LANE_CDB_REGMAP_CONF("11"),
SIERRA_PHY_PCS_LANE_CDB_REGMAP_CONF("12"),
SIERRA_PHY_PCS_LANE_CDB_REGMAP_CONF("13"),
SIERRA_PHY_PCS_LANE_CDB_REGMAP_CONF("14"),
SIERRA_PHY_PCS_LANE_CDB_REGMAP_CONF("15"),
};
static const struct regmap_config cdns_sierra_phy_pma_cmn_cdb_config = {
.name = "sierra_phy_pma_cmn_cdb",
.reg_stride = 1,
.fast_io = true,
.reg_write = cdns_regmap_write,
.reg_read = cdns_regmap_read,
};
#define SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF(n) \
{ \
.name = "sierra_phy_pma_lane" n "_cdb", \
.reg_stride = 1, \
.fast_io = true, \
.reg_write = cdns_regmap_write, \
.reg_read = cdns_regmap_read, \
}
static const struct regmap_config cdns_sierra_phy_pma_lane_cdb_config[] = {
SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("0"),
SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("1"),
SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("2"),
SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("3"),
SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("4"),
SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("5"),
SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("6"),
SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("7"),
SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("8"),
SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("9"),
SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("10"),
SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("11"),
SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("12"),
SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("13"),
SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("14"),
SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("15"),
};
static int cdns_sierra_phy_init(struct phy *gphy)
{
struct cdns_sierra_inst *ins = phy_get_drvdata(gphy);
struct cdns_sierra_phy *phy = dev_get_drvdata(gphy->dev.parent);
const struct cdns_sierra_data *init_data = phy->init_data;
struct cdns_sierra_vals *pma_cmn_vals, *pma_ln_vals;
enum cdns_sierra_phy_type phy_type = ins->phy_type;
enum cdns_sierra_ssc_mode ssc = ins->ssc_mode;
struct cdns_sierra_vals *phy_pma_ln_vals;
const struct cdns_reg_pairs *reg_pairs;
struct cdns_sierra_vals *pcs_cmn_vals;
struct regmap *regmap;
u32 num_regs;
int i, j;
/* Initialise the PHY registers, unless auto configured */
if (phy->autoconf || phy->already_configured || phy->nsubnodes > 1)
return 0;
clk_set_rate(phy->input_clks[CMN_REFCLK_DIG_DIV], 25000000);
clk_set_rate(phy->input_clks[CMN_REFCLK1_DIG_DIV], 25000000);
/* PHY PCS common registers configurations */
pcs_cmn_vals = init_data->pcs_cmn_vals[phy_type][TYPE_NONE][ssc];
if (pcs_cmn_vals) {
reg_pairs = pcs_cmn_vals->reg_pairs;
num_regs = pcs_cmn_vals->num_regs;
regmap = phy->regmap_phy_pcs_common_cdb;
for (i = 0; i < num_regs; i++)
regmap_write(regmap, reg_pairs[i].off, reg_pairs[i].val);
}
/* PHY PMA lane registers configurations */
phy_pma_ln_vals = init_data->phy_pma_ln_vals[phy_type][TYPE_NONE][ssc];
if (phy_pma_ln_vals) {
reg_pairs = phy_pma_ln_vals->reg_pairs;
num_regs = phy_pma_ln_vals->num_regs;
for (i = 0; i < ins->num_lanes; i++) {
regmap = phy->regmap_phy_pma_lane_cdb[i + ins->mlane];
for (j = 0; j < num_regs; j++)
regmap_write(regmap, reg_pairs[j].off, reg_pairs[j].val);
}
}
/* PMA common registers configurations */
pma_cmn_vals = init_data->pma_cmn_vals[phy_type][TYPE_NONE][ssc];
if (pma_cmn_vals) {
reg_pairs = pma_cmn_vals->reg_pairs;
num_regs = pma_cmn_vals->num_regs;
regmap = phy->regmap_common_cdb;
for (i = 0; i < num_regs; i++)
regmap_write(regmap, reg_pairs[i].off, reg_pairs[i].val);
}
/* PMA lane registers configurations */
pma_ln_vals = init_data->pma_ln_vals[phy_type][TYPE_NONE][ssc];
if (pma_ln_vals) {
reg_pairs = pma_ln_vals->reg_pairs;
num_regs = pma_ln_vals->num_regs;
for (i = 0; i < ins->num_lanes; i++) {
regmap = phy->regmap_lane_cdb[i + ins->mlane];
for (j = 0; j < num_regs; j++)
regmap_write(regmap, reg_pairs[j].off, reg_pairs[j].val);
}
}
return 0;
}
static int cdns_sierra_phy_on(struct phy *gphy)
{
struct cdns_sierra_phy *sp = dev_get_drvdata(gphy->dev.parent);
struct cdns_sierra_inst *ins = phy_get_drvdata(gphy);
struct device *dev = sp->dev;
u32 val;
int ret;
if (sp->nsubnodes == 1) {
/* Take the PHY out of reset */
ret = reset_control_deassert(sp->phy_rst);
if (ret) {
dev_err(dev, "Failed to take the PHY out of reset\n");
return ret;
}
}
/* Take the PHY lane group out of reset */
ret = reset_control_deassert(ins->lnk_rst);
if (ret) {
dev_err(dev, "Failed to take the PHY lane out of reset\n");
return ret;
}
if (ins->phy_type == TYPE_PCIE || ins->phy_type == TYPE_USB) {
ret = regmap_field_read_poll_timeout(sp->phy_iso_link_ctrl_1[ins->mlane],
val, !val, 1000, PLL_LOCK_TIME);
if (ret) {
dev_err(dev, "Timeout waiting for PHY status ready\n");
return ret;
}
}
/*
* Wait for cmn_ready assertion
* PHY_PMA_CMN_CTRL[0] == 1
*/
ret = regmap_field_read_poll_timeout(sp->pma_cmn_ready, val, val,
1000, PLL_LOCK_TIME);
if (ret) {
dev_err(dev, "Timeout waiting for CMN ready\n");
return ret;
}
ret = regmap_field_read_poll_timeout(sp->pllctrl_lock[ins->mlane],
val, val, 1000, PLL_LOCK_TIME);
if (ret < 0)
dev_err(dev, "PLL lock of lane failed\n");
return ret;
}
static int cdns_sierra_phy_off(struct phy *gphy)
{
struct cdns_sierra_inst *ins = phy_get_drvdata(gphy);
return reset_control_assert(ins->lnk_rst);
}
static int cdns_sierra_phy_reset(struct phy *gphy)
{
struct cdns_sierra_phy *sp = dev_get_drvdata(gphy->dev.parent);
reset_control_assert(sp->phy_rst);
reset_control_deassert(sp->phy_rst);
return 0;
};
static const struct phy_ops ops = {
.init = cdns_sierra_phy_init,
.power_on = cdns_sierra_phy_on,
.power_off = cdns_sierra_phy_off,
.reset = cdns_sierra_phy_reset,
.owner = THIS_MODULE,
};
static int cdns_sierra_noop_phy_on(struct phy *gphy)
{
usleep_range(5000, 10000);
return 0;
}
static const struct phy_ops noop_ops = {
.power_on = cdns_sierra_noop_phy_on,
.owner = THIS_MODULE,
};
static u8 cdns_sierra_pll_mux_get_parent(struct clk_hw *hw)
{
struct cdns_sierra_pll_mux *mux = to_cdns_sierra_pll_mux(hw);
struct regmap_field *plllc1en_field = mux->plllc1en_field;
struct regmap_field *termen_field = mux->termen_field;
struct regmap_field *field = mux->pfdclk_sel_preg;
unsigned int val;
int index;
regmap_field_read(field, &val);
if (strstr(clk_hw_get_name(hw), clk_names[CDNS_SIERRA_PLL_CMNLC1])) {
index = clk_mux_val_to_index(hw, cdns_sierra_pll_mux_table[CMN_PLLLC1], 0, val);
if (index == 1) {
regmap_field_write(plllc1en_field, 1);
regmap_field_write(termen_field, 1);
}
} else {
index = clk_mux_val_to_index(hw, cdns_sierra_pll_mux_table[CMN_PLLLC], 0, val);
}
return index;
}
static int cdns_sierra_pll_mux_set_parent(struct clk_hw *hw, u8 index)
{
struct cdns_sierra_pll_mux *mux = to_cdns_sierra_pll_mux(hw);
struct regmap_field *plllc1en_field = mux->plllc1en_field;
struct regmap_field *termen_field = mux->termen_field;
struct regmap_field *field = mux->pfdclk_sel_preg;
int val, ret;
ret = regmap_field_write(plllc1en_field, 0);
ret |= regmap_field_write(termen_field, 0);
if (index == 1) {
ret |= regmap_field_write(plllc1en_field, 1);
ret |= regmap_field_write(termen_field, 1);
}
if (strstr(clk_hw_get_name(hw), clk_names[CDNS_SIERRA_PLL_CMNLC1]))
val = cdns_sierra_pll_mux_table[CMN_PLLLC1][index];
else
val = cdns_sierra_pll_mux_table[CMN_PLLLC][index];
ret |= regmap_field_write(field, val);
return ret;
}
static const struct clk_ops cdns_sierra_pll_mux_ops = {
.determine_rate = __clk_mux_determine_rate,
.set_parent = cdns_sierra_pll_mux_set_parent,
.get_parent = cdns_sierra_pll_mux_get_parent,
};
static int cdns_sierra_pll_mux_register(struct cdns_sierra_phy *sp,
struct regmap_field *pfdclk1_sel_field,
struct regmap_field *plllc1en_field,
struct regmap_field *termen_field,
int clk_index)
{
struct cdns_sierra_pll_mux *mux;
struct device *dev = sp->dev;
struct clk_init_data *init;
char clk_name[100];
int ret;
mux = devm_kzalloc(dev, sizeof(*mux), GFP_KERNEL);
if (!mux)
return -ENOMEM;
snprintf(clk_name, sizeof(clk_name), "%s_%s", dev_name(dev), clk_names[clk_index]);
init = &mux->clk_data;
init->ops = &cdns_sierra_pll_mux_ops;
init->flags = CLK_SET_RATE_NO_REPARENT;
init->parent_data = pll_mux_parent_data[clk_index];
init->num_parents = SIERRA_NUM_CMN_PLLC_PARENTS;
init->name = clk_name;
mux->pfdclk_sel_preg = pfdclk1_sel_field;
mux->plllc1en_field = plllc1en_field;
mux->termen_field = termen_field;
mux->hw.init = init;
ret = devm_clk_hw_register(dev, &mux->hw);
if (ret)
return ret;
sp->clk_data.hws[clk_index] = &mux->hw;
sp->pll_clks[clk_index] = devm_clk_hw_get_clk(dev, &mux->hw,
clk_names[clk_index]);
return 0;
}
static int cdns_sierra_phy_register_pll_mux(struct cdns_sierra_phy *sp)
{
struct regmap_field *pfdclk1_sel_field;
struct regmap_field *plllc1en_field;
struct regmap_field *termen_field;
struct device *dev = sp->dev;
int ret = 0, i, clk_index;
clk_index = CDNS_SIERRA_PLL_CMNLC;
for (i = 0; i < SIERRA_NUM_CMN_PLLC; i++, clk_index++) {
pfdclk1_sel_field = sp->cmn_plllc_pfdclk1_sel_preg[i];
plllc1en_field = sp->cmn_refrcv_refclk_plllc1en_preg[i];
termen_field = sp->cmn_refrcv_refclk_termen_preg[i];
ret = cdns_sierra_pll_mux_register(sp, pfdclk1_sel_field, plllc1en_field,
termen_field, clk_index);
if (ret) {
dev_err(dev, "Fail to register cmn plllc mux\n");
return ret;
}
}
return 0;
}
static int cdns_sierra_derived_refclk_enable(struct clk_hw *hw)
{
struct cdns_sierra_derived_refclk *derived_refclk = to_cdns_sierra_derived_refclk(hw);
regmap_field_write(derived_refclk->cmn_plllc_clk1_en_preg, 0x1);
/* Programming to get 100Mhz clock output in ref_der_clk_out 5GHz VCO/50 = 100MHz */
regmap_field_write(derived_refclk->cmn_plllc_clk1outdiv_preg, 0x2E);
return 0;
}
static void cdns_sierra_derived_refclk_disable(struct clk_hw *hw)
{
struct cdns_sierra_derived_refclk *derived_refclk = to_cdns_sierra_derived_refclk(hw);
regmap_field_write(derived_refclk->cmn_plllc_clk1_en_preg, 0);
}
static int cdns_sierra_derived_refclk_is_enabled(struct clk_hw *hw)
{
struct cdns_sierra_derived_refclk *derived_refclk = to_cdns_sierra_derived_refclk(hw);
int val;
regmap_field_read(derived_refclk->cmn_plllc_clk1_en_preg, &val);
return !!val;
}
static const struct clk_ops cdns_sierra_derived_refclk_ops = {
.enable = cdns_sierra_derived_refclk_enable,
.disable = cdns_sierra_derived_refclk_disable,
.is_enabled = cdns_sierra_derived_refclk_is_enabled,
};
static int cdns_sierra_derived_refclk_register(struct cdns_sierra_phy *sp)
{
struct cdns_sierra_derived_refclk *derived_refclk;
struct device *dev = sp->dev;
struct regmap_field *field;
struct clk_init_data *init;
struct regmap *regmap;
char clk_name[100];
int ret;
derived_refclk = devm_kzalloc(dev, sizeof(*derived_refclk), GFP_KERNEL);
if (!derived_refclk)
return -ENOMEM;
snprintf(clk_name, sizeof(clk_name), "%s_%s", dev_name(dev),
clk_names[CDNS_SIERRA_DERIVED_REFCLK]);
init = &derived_refclk->clk_data;
init->ops = &cdns_sierra_derived_refclk_ops;
init->flags = 0;
init->name = clk_name;
regmap = sp->regmap_common_cdb;
field = devm_regmap_field_alloc(dev, regmap, cmn_plllc_clk1outdiv_preg);
if (IS_ERR(field)) {
dev_err(dev, "cmn_plllc_clk1outdiv_preg reg field init failed\n");
return PTR_ERR(field);
}
derived_refclk->cmn_plllc_clk1outdiv_preg = field;
field = devm_regmap_field_alloc(dev, regmap, cmn_plllc_clk1_en_preg);
if (IS_ERR(field)) {
dev_err(dev, "cmn_plllc_clk1_en_preg reg field init failed\n");
return PTR_ERR(field);
}
derived_refclk->cmn_plllc_clk1_en_preg = field;
derived_refclk->hw.init = init;
ret = devm_clk_hw_register(dev, &derived_refclk->hw);
if (ret)
return ret;
sp->clk_data.hws[CDNS_SIERRA_DERIVED_REFCLK] = &derived_refclk->hw;
return 0;
}
static void cdns_sierra_clk_unregister(struct cdns_sierra_phy *sp)
{
struct device *dev = sp->dev;
struct device_node *node = dev->of_node;
of_clk_del_provider(node);
}
static int cdns_sierra_clk_register(struct cdns_sierra_phy *sp)
{
struct device *dev = sp->dev;
struct device_node *node = dev->of_node;
int ret;
ret = cdns_sierra_phy_register_pll_mux(sp);
if (ret) {
dev_err(dev, "Failed to pll mux clocks\n");
return ret;
}
ret = cdns_sierra_derived_refclk_register(sp);
if (ret) {
dev_err(dev, "Failed to register derived refclk\n");
return ret;
}
sp->clk_data.num = CDNS_SIERRA_OUTPUT_CLOCKS;
ret = of_clk_add_hw_provider(node, of_clk_hw_onecell_get,
&sp->clk_data);
if (ret)
dev_err(dev, "Failed to add clock provider: %s\n", node->name);
return ret;
}
static int cdns_sierra_get_optional(struct cdns_sierra_inst *inst,
struct device_node *child)
{
u32 phy_type;
if (of_property_read_u32(child, "reg", &inst->mlane))
return -EINVAL;
if (of_property_read_u32(child, "cdns,num-lanes", &inst->num_lanes))
return -EINVAL;
if (of_property_read_u32(child, "cdns,phy-type", &phy_type))
return -EINVAL;
switch (phy_type) {
case PHY_TYPE_PCIE:
inst->phy_type = TYPE_PCIE;
break;
case PHY_TYPE_USB3:
inst->phy_type = TYPE_USB;
break;
case PHY_TYPE_SGMII:
inst->phy_type = TYPE_SGMII;
break;
case PHY_TYPE_QSGMII:
inst->phy_type = TYPE_QSGMII;
break;
default:
return -EINVAL;
}
inst->ssc_mode = EXTERNAL_SSC;
of_property_read_u32(child, "cdns,ssc-mode", &inst->ssc_mode);
return 0;
}
static struct regmap *cdns_regmap_init(struct device *dev, void __iomem *base,
u32 block_offset, u8 reg_offset_shift,
const struct regmap_config *config)
{
struct cdns_regmap_cdb_context *ctx;
ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return ERR_PTR(-ENOMEM);
ctx->dev = dev;
ctx->base = base + block_offset;
ctx->reg_offset_shift = reg_offset_shift;
return devm_regmap_init(dev, NULL, ctx, config);
}
static int cdns_regfield_init(struct cdns_sierra_phy *sp)
{
struct device *dev = sp->dev;
struct regmap_field *field;
struct reg_field reg_field;
struct regmap *regmap;
int i;
regmap = sp->regmap_common_cdb;
field = devm_regmap_field_alloc(dev, regmap, macro_id_type);
if (IS_ERR(field)) {
dev_err(dev, "MACRO_ID_TYPE reg field init failed\n");
return PTR_ERR(field);
}
sp->macro_id_type = field;
for (i = 0; i < SIERRA_NUM_CMN_PLLC; i++) {
reg_field = cmn_plllc_pfdclk1_sel_preg[i].pfdclk_sel_preg;
field = devm_regmap_field_alloc(dev, regmap, reg_field);
if (IS_ERR(field)) {
dev_err(dev, "PLLLC%d_PFDCLK1_SEL failed\n", i);
return PTR_ERR(field);
}
sp->cmn_plllc_pfdclk1_sel_preg[i] = field;
reg_field = cmn_plllc_pfdclk1_sel_preg[i].plllc1en_field;
field = devm_regmap_field_alloc(dev, regmap, reg_field);
if (IS_ERR(field)) {
dev_err(dev, "REFRCV%d_REFCLK_PLLLC1EN failed\n", i);
return PTR_ERR(field);
}
sp->cmn_refrcv_refclk_plllc1en_preg[i] = field;
reg_field = cmn_plllc_pfdclk1_sel_preg[i].termen_field;
field = devm_regmap_field_alloc(dev, regmap, reg_field);
if (IS_ERR(field)) {
dev_err(dev, "REFRCV%d_REFCLK_TERMEN failed\n", i);
return PTR_ERR(field);
}
sp->cmn_refrcv_refclk_termen_preg[i] = field;
}
regmap = sp->regmap_phy_pcs_common_cdb;
field = devm_regmap_field_alloc(dev, regmap, phy_pll_cfg_1);
if (IS_ERR(field)) {
dev_err(dev, "PHY_PLL_CFG_1 reg field init failed\n");
return PTR_ERR(field);
}
sp->phy_pll_cfg_1 = field;
regmap = sp->regmap_phy_pma_common_cdb;
field = devm_regmap_field_alloc(dev, regmap, pma_cmn_ready);
if (IS_ERR(field)) {
dev_err(dev, "PHY_PMA_CMN_CTRL reg field init failed\n");
return PTR_ERR(field);
}
sp->pma_cmn_ready = field;
for (i = 0; i < SIERRA_MAX_LANES; i++) {
regmap = sp->regmap_lane_cdb[i];
field = devm_regmap_field_alloc(dev, regmap, pllctrl_lock);
if (IS_ERR(field)) {
dev_err(dev, "P%d_ENABLE reg field init failed\n", i);
return PTR_ERR(field);
}
sp->pllctrl_lock[i] = field;
}
for (i = 0; i < SIERRA_MAX_LANES; i++) {
regmap = sp->regmap_phy_pcs_lane_cdb[i];
field = devm_regmap_field_alloc(dev, regmap, phy_iso_link_ctrl_1);
if (IS_ERR(field)) {
dev_err(dev, "PHY_ISO_LINK_CTRL reg field init for lane %d failed\n", i);
return PTR_ERR(field);
}
sp->phy_iso_link_ctrl_1[i] = field;
}
return 0;
}
static int cdns_regmap_init_blocks(struct cdns_sierra_phy *sp,
void __iomem *base, u8 block_offset_shift,
u8 reg_offset_shift)
{
struct device *dev = sp->dev;
struct regmap *regmap;
u32 block_offset;
int i;
for (i = 0; i < SIERRA_MAX_LANES; i++) {
block_offset = SIERRA_LANE_CDB_OFFSET(i, block_offset_shift,
reg_offset_shift);
regmap = cdns_regmap_init(dev, base, block_offset,
reg_offset_shift,
&cdns_sierra_lane_cdb_config[i]);
if (IS_ERR(regmap)) {
dev_err(dev, "Failed to init lane CDB regmap\n");
return PTR_ERR(regmap);
}
sp->regmap_lane_cdb[i] = regmap;
}
regmap = cdns_regmap_init(dev, base, SIERRA_COMMON_CDB_OFFSET,
reg_offset_shift,
&cdns_sierra_common_cdb_config);
if (IS_ERR(regmap)) {
dev_err(dev, "Failed to init common CDB regmap\n");
return PTR_ERR(regmap);
}
sp->regmap_common_cdb = regmap;
block_offset = SIERRA_PHY_PCS_COMMON_OFFSET(block_offset_shift);
regmap = cdns_regmap_init(dev, base, block_offset, reg_offset_shift,
&cdns_sierra_phy_pcs_cmn_cdb_config);
if (IS_ERR(regmap)) {
dev_err(dev, "Failed to init PHY PCS common CDB regmap\n");
return PTR_ERR(regmap);
}
sp->regmap_phy_pcs_common_cdb = regmap;
for (i = 0; i < SIERRA_MAX_LANES; i++) {
block_offset = SIERRA_PHY_PCS_LANE_CDB_OFFSET(i, block_offset_shift,
reg_offset_shift);
regmap = cdns_regmap_init(dev, base, block_offset,
reg_offset_shift,
&cdns_sierra_phy_pcs_lane_cdb_config[i]);
if (IS_ERR(regmap)) {
dev_err(dev, "Failed to init PHY PCS lane CDB regmap\n");
return PTR_ERR(regmap);
}
sp->regmap_phy_pcs_lane_cdb[i] = regmap;
}
block_offset = SIERRA_PHY_PMA_COMMON_OFFSET(block_offset_shift);
regmap = cdns_regmap_init(dev, base, block_offset, reg_offset_shift,
&cdns_sierra_phy_pma_cmn_cdb_config);
if (IS_ERR(regmap)) {
dev_err(dev, "Failed to init PHY PMA common CDB regmap\n");
return PTR_ERR(regmap);
}
sp->regmap_phy_pma_common_cdb = regmap;
for (i = 0; i < SIERRA_MAX_LANES; i++) {
block_offset = SIERRA_PHY_PMA_LANE_CDB_OFFSET(i, block_offset_shift,
reg_offset_shift);
regmap = cdns_regmap_init(dev, base, block_offset,
reg_offset_shift,
&cdns_sierra_phy_pma_lane_cdb_config[i]);
if (IS_ERR(regmap)) {
dev_err(dev, "Failed to init PHY PMA lane CDB regmap\n");
return PTR_ERR(regmap);
}
sp->regmap_phy_pma_lane_cdb[i] = regmap;
}
return 0;
}
static int cdns_sierra_phy_get_clocks(struct cdns_sierra_phy *sp,
struct device *dev)
{
struct clk *clk;
int ret;
clk = devm_clk_get_optional(dev, "cmn_refclk_dig_div");
if (IS_ERR(clk)) {
dev_err(dev, "cmn_refclk_dig_div clock not found\n");
ret = PTR_ERR(clk);
return ret;
}
sp->input_clks[CMN_REFCLK_DIG_DIV] = clk;
clk = devm_clk_get_optional(dev, "cmn_refclk1_dig_div");
if (IS_ERR(clk)) {
dev_err(dev, "cmn_refclk1_dig_div clock not found\n");
ret = PTR_ERR(clk);
return ret;
}
sp->input_clks[CMN_REFCLK1_DIG_DIV] = clk;
return 0;
}
static int cdns_sierra_phy_clk(struct cdns_sierra_phy *sp)
{
struct device *dev = sp->dev;
struct clk *clk;
int ret;
clk = devm_clk_get_optional(dev, "phy_clk");
if (IS_ERR(clk)) {
dev_err(dev, "failed to get clock phy_clk\n");
return PTR_ERR(clk);
}
sp->input_clks[PHY_CLK] = clk;
ret = clk_prepare_enable(sp->input_clks[PHY_CLK]);
if (ret)
return ret;
return 0;
}
static int cdns_sierra_phy_enable_clocks(struct cdns_sierra_phy *sp)
{
int ret;
ret = clk_prepare_enable(sp->pll_clks[CDNS_SIERRA_PLL_CMNLC]);
if (ret)
return ret;
ret = clk_prepare_enable(sp->pll_clks[CDNS_SIERRA_PLL_CMNLC1]);
if (ret)
goto err_pll_cmnlc1;
return 0;
err_pll_cmnlc1:
clk_disable_unprepare(sp->pll_clks[CDNS_SIERRA_PLL_CMNLC]);
return ret;
}
static void cdns_sierra_phy_disable_clocks(struct cdns_sierra_phy *sp)
{
clk_disable_unprepare(sp->pll_clks[CDNS_SIERRA_PLL_CMNLC1]);
clk_disable_unprepare(sp->pll_clks[CDNS_SIERRA_PLL_CMNLC]);
if (!sp->already_configured)
clk_disable_unprepare(sp->input_clks[PHY_CLK]);
}
static int cdns_sierra_phy_get_resets(struct cdns_sierra_phy *sp,
struct device *dev)
{
struct reset_control *rst;
rst = devm_reset_control_get_exclusive(dev, "sierra_reset");
if (IS_ERR(rst)) {
dev_err(dev, "failed to get reset\n");
return PTR_ERR(rst);
}
sp->phy_rst = rst;
rst = devm_reset_control_get_optional_exclusive(dev, "sierra_apb");
if (IS_ERR(rst)) {
dev_err(dev, "failed to get apb reset\n");
return PTR_ERR(rst);
}
sp->apb_rst = rst;
return 0;
}
static int cdns_sierra_phy_configure_multilink(struct cdns_sierra_phy *sp)
{
const struct cdns_sierra_data *init_data = sp->init_data;
struct cdns_sierra_vals *pma_cmn_vals, *pma_ln_vals;
enum cdns_sierra_phy_type phy_t1, phy_t2;
struct cdns_sierra_vals *phy_pma_ln_vals;
const struct cdns_reg_pairs *reg_pairs;
struct cdns_sierra_vals *pcs_cmn_vals;
int i, j, node, mlane, num_lanes, ret;
enum cdns_sierra_ssc_mode ssc;
struct regmap *regmap;
u32 num_regs;
/* Maximum 2 links (subnodes) are supported */
if (sp->nsubnodes != 2)
return -EINVAL;
clk_set_rate(sp->input_clks[CMN_REFCLK_DIG_DIV], 25000000);
clk_set_rate(sp->input_clks[CMN_REFCLK1_DIG_DIV], 25000000);
/* PHY configured to use both PLL LC and LC1 */
regmap_field_write(sp->phy_pll_cfg_1, 0x1);
phy_t1 = sp->phys[0].phy_type;
phy_t2 = sp->phys[1].phy_type;
/*
* PHY configuration for multi-link operation is done in two steps.
* e.g. Consider a case for a 4 lane PHY with PCIe using 2 lanes and QSGMII other 2 lanes.
* Sierra PHY has 2 PLLs, viz. PLLLC and PLLLC1. So in this case, PLLLC is used for PCIe
* and PLLLC1 is used for QSGMII. PHY is configured in two steps as described below.
*
* [1] For first step, phy_t1 = TYPE_PCIE and phy_t2 = TYPE_QSGMII
* So the register values are selected as [TYPE_PCIE][TYPE_QSGMII][ssc].
* This will configure PHY registers associated for PCIe (i.e. first protocol)
* involving PLLLC registers and registers for first 2 lanes of PHY.
* [2] In second step, the variables phy_t1 and phy_t2 are swapped. So now,
* phy_t1 = TYPE_QSGMII and phy_t2 = TYPE_PCIE. And the register values are selected as
* [TYPE_QSGMII][TYPE_PCIE][ssc].
* This will configure PHY registers associated for QSGMII (i.e. second protocol)
* involving PLLLC1 registers and registers for other 2 lanes of PHY.
*
* This completes the PHY configuration for multilink operation. This approach enables
* dividing the large number of PHY register configurations into protocol specific
* smaller groups.
*/
for (node = 0; node < sp->nsubnodes; node++) {
if (node == 1) {
/*
* If first link with phy_t1 is configured, then configure the PHY for
* second link with phy_t2. Get the array values as [phy_t2][phy_t1][ssc].
*/
swap(phy_t1, phy_t2);
}
mlane = sp->phys[node].mlane;
ssc = sp->phys[node].ssc_mode;
num_lanes = sp->phys[node].num_lanes;
/* PHY PCS common registers configurations */
pcs_cmn_vals = init_data->pcs_cmn_vals[phy_t1][phy_t2][ssc];
if (pcs_cmn_vals) {
reg_pairs = pcs_cmn_vals->reg_pairs;
num_regs = pcs_cmn_vals->num_regs;
regmap = sp->regmap_phy_pcs_common_cdb;
for (i = 0; i < num_regs; i++)
regmap_write(regmap, reg_pairs[i].off, reg_pairs[i].val);
}
/* PHY PMA lane registers configurations */
phy_pma_ln_vals = init_data->phy_pma_ln_vals[phy_t1][phy_t2][ssc];
if (phy_pma_ln_vals) {
reg_pairs = phy_pma_ln_vals->reg_pairs;
num_regs = phy_pma_ln_vals->num_regs;
for (i = 0; i < num_lanes; i++) {
regmap = sp->regmap_phy_pma_lane_cdb[i + mlane];
for (j = 0; j < num_regs; j++)
regmap_write(regmap, reg_pairs[j].off, reg_pairs[j].val);
}
}
/* PMA common registers configurations */
pma_cmn_vals = init_data->pma_cmn_vals[phy_t1][phy_t2][ssc];
if (pma_cmn_vals) {
reg_pairs = pma_cmn_vals->reg_pairs;
num_regs = pma_cmn_vals->num_regs;
regmap = sp->regmap_common_cdb;
for (i = 0; i < num_regs; i++)
regmap_write(regmap, reg_pairs[i].off, reg_pairs[i].val);
}
/* PMA lane registers configurations */
pma_ln_vals = init_data->pma_ln_vals[phy_t1][phy_t2][ssc];
if (pma_ln_vals) {
reg_pairs = pma_ln_vals->reg_pairs;
num_regs = pma_ln_vals->num_regs;
for (i = 0; i < num_lanes; i++) {
regmap = sp->regmap_lane_cdb[i + mlane];
for (j = 0; j < num_regs; j++)
regmap_write(regmap, reg_pairs[j].off, reg_pairs[j].val);
}
}
if (phy_t1 == TYPE_SGMII || phy_t1 == TYPE_QSGMII)
reset_control_deassert(sp->phys[node].lnk_rst);
}
/* Take the PHY out of reset */
ret = reset_control_deassert(sp->phy_rst);
if (ret)
return ret;
return 0;
}
static int cdns_sierra_phy_probe(struct platform_device *pdev)
{
struct cdns_sierra_phy *sp;
struct phy_provider *phy_provider;
struct device *dev = &pdev->dev;
const struct cdns_sierra_data *data;
unsigned int id_value;
int ret, node = 0;
void __iomem *base;
struct device_node *dn = dev->of_node, *child;
if (of_get_child_count(dn) == 0)
return -ENODEV;
/* Get init data for this PHY */
data = of_device_get_match_data(dev);
if (!data)
return -EINVAL;
sp = devm_kzalloc(dev, struct_size(sp, clk_data.hws,
CDNS_SIERRA_OUTPUT_CLOCKS),
GFP_KERNEL);
if (!sp)
return -ENOMEM;
dev_set_drvdata(dev, sp);
sp->dev = dev;
sp->init_data = data;
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base)) {
dev_err(dev, "missing \"reg\"\n");
return PTR_ERR(base);
}
ret = cdns_regmap_init_blocks(sp, base, data->block_offset_shift,
data->reg_offset_shift);
if (ret)
return ret;
ret = cdns_regfield_init(sp);
if (ret)
return ret;
platform_set_drvdata(pdev, sp);
ret = cdns_sierra_phy_get_clocks(sp, dev);
if (ret)
return ret;
ret = cdns_sierra_clk_register(sp);
if (ret)
return ret;
ret = cdns_sierra_phy_enable_clocks(sp);
if (ret)
goto unregister_clk;
regmap_field_read(sp->pma_cmn_ready, &sp->already_configured);
if (!sp->already_configured) {
ret = cdns_sierra_phy_clk(sp);
if (ret)
goto clk_disable;
ret = cdns_sierra_phy_get_resets(sp, dev);
if (ret)
goto clk_disable;
/* Enable APB */
reset_control_deassert(sp->apb_rst);
}
/* Check that PHY is present */
regmap_field_read(sp->macro_id_type, &id_value);
if (sp->init_data->id_value != id_value) {
ret = -EINVAL;
goto ctrl_assert;
}
sp->autoconf = of_property_read_bool(dn, "cdns,autoconf");
for_each_available_child_of_node(dn, child) {
struct phy *gphy;
if (!(of_node_name_eq(child, "phy") ||
of_node_name_eq(child, "link")))
continue;
sp->phys[node].lnk_rst =
of_reset_control_array_get_exclusive(child);
if (IS_ERR(sp->phys[node].lnk_rst)) {
dev_err(dev, "failed to get reset %s\n",
child->full_name);
ret = PTR_ERR(sp->phys[node].lnk_rst);
of_node_put(child);
goto put_control;
}
if (!sp->autoconf) {
ret = cdns_sierra_get_optional(&sp->phys[node], child);
if (ret) {
dev_err(dev, "missing property in node %s\n",
child->name);
of_node_put(child);
reset_control_put(sp->phys[node].lnk_rst);
goto put_control;
}
}
sp->num_lanes += sp->phys[node].num_lanes;
if (!sp->already_configured)
gphy = devm_phy_create(dev, child, &ops);
else
gphy = devm_phy_create(dev, child, &noop_ops);
if (IS_ERR(gphy)) {
ret = PTR_ERR(gphy);
of_node_put(child);
reset_control_put(sp->phys[node].lnk_rst);
goto put_control;
}
sp->phys[node].phy = gphy;
phy_set_drvdata(gphy, &sp->phys[node]);
node++;
}
sp->nsubnodes = node;
if (sp->num_lanes > SIERRA_MAX_LANES) {
ret = -EINVAL;
dev_err(dev, "Invalid lane configuration\n");
goto put_control;
}
/* If more than one subnode, configure the PHY as multilink */
if (!sp->already_configured && !sp->autoconf && sp->nsubnodes > 1) {
ret = cdns_sierra_phy_configure_multilink(sp);
if (ret)
goto put_control;
}
pm_runtime_enable(dev);
phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
if (IS_ERR(phy_provider)) {
ret = PTR_ERR(phy_provider);
goto put_control;
}
return 0;
put_control:
while (--node >= 0)
reset_control_put(sp->phys[node].lnk_rst);
ctrl_assert:
if (!sp->already_configured)
reset_control_assert(sp->apb_rst);
clk_disable:
cdns_sierra_phy_disable_clocks(sp);
unregister_clk:
cdns_sierra_clk_unregister(sp);
return ret;
}
static void cdns_sierra_phy_remove(struct platform_device *pdev)
{
struct cdns_sierra_phy *phy = platform_get_drvdata(pdev);
int i;
reset_control_assert(phy->phy_rst);
reset_control_assert(phy->apb_rst);
pm_runtime_disable(&pdev->dev);
cdns_sierra_phy_disable_clocks(phy);
/*
* The device level resets will be put automatically.
* Need to put the subnode resets here though.
*/
for (i = 0; i < phy->nsubnodes; i++) {
reset_control_assert(phy->phys[i].lnk_rst);
reset_control_put(phy->phys[i].lnk_rst);
}
cdns_sierra_clk_unregister(phy);
}
/* SGMII PHY PMA lane configuration */
static struct cdns_reg_pairs sgmii_phy_pma_ln_regs[] = {
{0x9010, SIERRA_PHY_PMA_XCVR_CTRL}
};
static struct cdns_sierra_vals sgmii_phy_pma_ln_vals = {
.reg_pairs = sgmii_phy_pma_ln_regs,
.num_regs = ARRAY_SIZE(sgmii_phy_pma_ln_regs),
};
/* SGMII refclk 100MHz, no ssc, opt3 and GE1 links using PLL LC1 */
static const struct cdns_reg_pairs sgmii_100_no_ssc_plllc1_opt3_cmn_regs[] = {
{0x002D, SIERRA_CMN_PLLLC1_FBDIV_INT_PREG},
{0x2085, SIERRA_CMN_PLLLC1_LF_COEFF_MODE0_PREG},
{0x1005, SIERRA_CMN_PLLLC1_CLK0_PREG},
{0x0000, SIERRA_CMN_PLLLC1_BWCAL_MODE0_PREG},
{0x0800, SIERRA_CMN_PLLLC1_SS_TIME_STEPSIZE_MODE_PREG}
};
static const struct cdns_reg_pairs sgmii_100_no_ssc_plllc1_opt3_ln_regs[] = {
{0x688E, SIERRA_DET_STANDEC_D_PREG},
{0x0004, SIERRA_PSC_LN_IDLE_PREG},
{0x0FFE, SIERRA_PSC_RX_A0_PREG},
{0x0106, SIERRA_PLLCTRL_FBDIV_MODE01_PREG},
{0x0013, SIERRA_PLLCTRL_SUBRATE_PREG},
{0x0003, SIERRA_PLLCTRL_GEN_A_PREG},
{0x0106, SIERRA_PLLCTRL_GEN_D_PREG},
{0x5231, SIERRA_PLLCTRL_CPGAIN_MODE_PREG },
{0x0000, SIERRA_DRVCTRL_ATTEN_PREG},
{0x9702, SIERRA_DRVCTRL_BOOST_PREG},
{0x0051, SIERRA_RX_CREQ_FLTR_A_MODE0_PREG},
{0x3C0E, SIERRA_CREQ_CCLKDET_MODE01_PREG},
{0x3220, SIERRA_CREQ_FSMCLK_SEL_PREG},
{0x0000, SIERRA_CREQ_EQ_CTRL_PREG},
{0x0002, SIERRA_DEQ_PHALIGN_CTRL},
{0x0186, SIERRA_DEQ_GLUT0},
{0x0186, SIERRA_DEQ_GLUT1},
{0x0186, SIERRA_DEQ_GLUT2},
{0x0186, SIERRA_DEQ_GLUT3},
{0x0186, SIERRA_DEQ_GLUT4},
{0x0861, SIERRA_DEQ_ALUT0},
{0x07E0, SIERRA_DEQ_ALUT1},
{0x079E, SIERRA_DEQ_ALUT2},
{0x071D, SIERRA_DEQ_ALUT3},
{0x03F5, SIERRA_DEQ_DFETAP_CTRL_PREG},
{0x0C01, SIERRA_DEQ_TAU_CTRL1_FAST_MAINT_PREG},
{0x3C40, SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG},
{0x1C04, SIERRA_DEQ_TAU_CTRL2_PREG},
{0x0033, SIERRA_DEQ_PICTRL_PREG},
{0x0000, SIERRA_CPI_OUTBUF_RATESEL_PREG},
{0x0B6D, SIERRA_CPI_RESBIAS_BIN_PREG},
{0x0102, SIERRA_RXBUFFER_CTLECTRL_PREG},
{0x0002, SIERRA_RXBUFFER_RCDFECTRL_PREG}
};
static struct cdns_sierra_vals sgmii_100_no_ssc_plllc1_opt3_cmn_vals = {
.reg_pairs = sgmii_100_no_ssc_plllc1_opt3_cmn_regs,
.num_regs = ARRAY_SIZE(sgmii_100_no_ssc_plllc1_opt3_cmn_regs),
};
static struct cdns_sierra_vals sgmii_100_no_ssc_plllc1_opt3_ln_vals = {
.reg_pairs = sgmii_100_no_ssc_plllc1_opt3_ln_regs,
.num_regs = ARRAY_SIZE(sgmii_100_no_ssc_plllc1_opt3_ln_regs),
};
/* QSGMII PHY PMA lane configuration */
static struct cdns_reg_pairs qsgmii_phy_pma_ln_regs[] = {
{0x9010, SIERRA_PHY_PMA_XCVR_CTRL}
};
static struct cdns_sierra_vals qsgmii_phy_pma_ln_vals = {
.reg_pairs = qsgmii_phy_pma_ln_regs,
.num_regs = ARRAY_SIZE(qsgmii_phy_pma_ln_regs),
};
/* QSGMII refclk 100MHz, 20b, opt1, No BW cal, no ssc, PLL LC1 */
static const struct cdns_reg_pairs qsgmii_100_no_ssc_plllc1_cmn_regs[] = {
{0x2085, SIERRA_CMN_PLLLC1_LF_COEFF_MODE0_PREG},
{0x0000, SIERRA_CMN_PLLLC1_BWCAL_MODE0_PREG},
{0x0000, SIERRA_CMN_PLLLC1_SS_TIME_STEPSIZE_MODE_PREG}
};
static const struct cdns_reg_pairs qsgmii_100_no_ssc_plllc1_ln_regs[] = {
{0xFC08, SIERRA_DET_STANDEC_A_PREG},
{0x0252, SIERRA_DET_STANDEC_E_PREG},
{0x0004, SIERRA_PSC_LN_IDLE_PREG},
{0x0FFE, SIERRA_PSC_RX_A0_PREG},
{0x0011, SIERRA_PLLCTRL_SUBRATE_PREG},
{0x0001, SIERRA_PLLCTRL_GEN_A_PREG},
{0x5233, SIERRA_PLLCTRL_CPGAIN_MODE_PREG},
{0x0000, SIERRA_DRVCTRL_ATTEN_PREG},
{0x0089, SIERRA_RX_CREQ_FLTR_A_MODE0_PREG},
{0x3C3C, SIERRA_CREQ_CCLKDET_MODE01_PREG},
{0x3222, SIERRA_CREQ_FSMCLK_SEL_PREG},
{0x0000, SIERRA_CREQ_EQ_CTRL_PREG},
{0x8422, SIERRA_CTLELUT_CTRL_PREG},
{0x4111, SIERRA_DFE_ECMP_RATESEL_PREG},
{0x4111, SIERRA_DFE_SMP_RATESEL_PREG},
{0x0002, SIERRA_DEQ_PHALIGN_CTRL},
{0x9595, SIERRA_DEQ_VGATUNE_CTRL_PREG},
{0x0186, SIERRA_DEQ_GLUT0},
{0x0186, SIERRA_DEQ_GLUT1},
{0x0186, SIERRA_DEQ_GLUT2},
{0x0186, SIERRA_DEQ_GLUT3},
{0x0186, SIERRA_DEQ_GLUT4},
{0x0861, SIERRA_DEQ_ALUT0},
{0x07E0, SIERRA_DEQ_ALUT1},
{0x079E, SIERRA_DEQ_ALUT2},
{0x071D, SIERRA_DEQ_ALUT3},
{0x03F5, SIERRA_DEQ_DFETAP_CTRL_PREG},
{0x0C01, SIERRA_DEQ_TAU_CTRL1_FAST_MAINT_PREG},
{0x3C40, SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG},
{0x1C04, SIERRA_DEQ_TAU_CTRL2_PREG},
{0x0033, SIERRA_DEQ_PICTRL_PREG},
{0x0660, SIERRA_CPICAL_TMRVAL_MODE0_PREG},
{0x00D5, SIERRA_CPI_OUTBUF_RATESEL_PREG},
{0x0B6D, SIERRA_CPI_RESBIAS_BIN_PREG},
{0x0102, SIERRA_RXBUFFER_CTLECTRL_PREG},
{0x0002, SIERRA_RXBUFFER_RCDFECTRL_PREG}
};
static struct cdns_sierra_vals qsgmii_100_no_ssc_plllc1_cmn_vals = {
.reg_pairs = qsgmii_100_no_ssc_plllc1_cmn_regs,
.num_regs = ARRAY_SIZE(qsgmii_100_no_ssc_plllc1_cmn_regs),
};
static struct cdns_sierra_vals qsgmii_100_no_ssc_plllc1_ln_vals = {
.reg_pairs = qsgmii_100_no_ssc_plllc1_ln_regs,
.num_regs = ARRAY_SIZE(qsgmii_100_no_ssc_plllc1_ln_regs),
};
/* PCIE PHY PCS common configuration */
static struct cdns_reg_pairs pcie_phy_pcs_cmn_regs[] = {
{0x0430, SIERRA_PHY_PIPE_CMN_CTRL1}
};
static struct cdns_sierra_vals pcie_phy_pcs_cmn_vals = {
.reg_pairs = pcie_phy_pcs_cmn_regs,
.num_regs = ARRAY_SIZE(pcie_phy_pcs_cmn_regs),
};
/* refclk100MHz_32b_PCIe_cmn_pll_no_ssc, pcie_links_using_plllc, pipe_bw_3 */
static const struct cdns_reg_pairs pcie_100_no_ssc_plllc_cmn_regs[] = {
{0x2105, SIERRA_CMN_PLLLC_LF_COEFF_MODE1_PREG},
{0x2105, SIERRA_CMN_PLLLC_LF_COEFF_MODE0_PREG},
{0x8A06, SIERRA_CMN_PLLLC_BWCAL_MODE1_PREG},
{0x8A06, SIERRA_CMN_PLLLC_BWCAL_MODE0_PREG}
};
/*
* refclk100MHz_32b_PCIe_ln_no_ssc, multilink, using_plllc,
* cmn_pllcy_anaclk0_1Ghz, xcvr_pllclk_fullrt_500mhz
*/
static const struct cdns_reg_pairs ml_pcie_100_no_ssc_ln_regs[] = {
{0xFC08, SIERRA_DET_STANDEC_A_PREG},
{0x001D, SIERRA_PSM_A3IN_TMR_PREG},
{0x0004, SIERRA_PSC_LN_A3_PREG},
{0x0004, SIERRA_PSC_LN_A4_PREG},
{0x0004, SIERRA_PSC_LN_IDLE_PREG},
{0x1555, SIERRA_DFE_BIASTRIM_PREG},
{0x9703, SIERRA_DRVCTRL_BOOST_PREG},
{0x8055, SIERRA_RX_CREQ_FLTR_A_MODE3_PREG},
{0x80BB, SIERRA_RX_CREQ_FLTR_A_MODE2_PREG},
{0x8351, SIERRA_RX_CREQ_FLTR_A_MODE1_PREG},
{0x8349, SIERRA_RX_CREQ_FLTR_A_MODE0_PREG},
{0x0002, SIERRA_CREQ_DCBIASATTEN_OVR_PREG},
{0x9800, SIERRA_RX_CTLE_CAL_PREG},
{0x5624, SIERRA_DEQ_CONCUR_CTRL2_PREG},
{0x000F, SIERRA_DEQ_EPIPWR_CTRL2_PREG},
{0x00FF, SIERRA_DEQ_FAST_MAINT_CYCLES_PREG},
{0x4C4C, SIERRA_DEQ_ERRCMP_CTRL_PREG},
{0x02FA, SIERRA_DEQ_OFFSET_CTRL_PREG},
{0x02FA, SIERRA_DEQ_GAIN_CTRL_PREG},
{0x0041, SIERRA_DEQ_GLUT0},
{0x0082, SIERRA_DEQ_GLUT1},
{0x00C3, SIERRA_DEQ_GLUT2},
{0x0145, SIERRA_DEQ_GLUT3},
{0x0186, SIERRA_DEQ_GLUT4},
{0x09E7, SIERRA_DEQ_ALUT0},
{0x09A6, SIERRA_DEQ_ALUT1},
{0x0965, SIERRA_DEQ_ALUT2},
{0x08E3, SIERRA_DEQ_ALUT3},
{0x00FA, SIERRA_DEQ_DFETAP0},
{0x00FA, SIERRA_DEQ_DFETAP1},
{0x00FA, SIERRA_DEQ_DFETAP2},
{0x00FA, SIERRA_DEQ_DFETAP3},
{0x00FA, SIERRA_DEQ_DFETAP4},
{0x000F, SIERRA_DEQ_PRECUR_PREG},
{0x0280, SIERRA_DEQ_POSTCUR_PREG},
{0x8F00, SIERRA_DEQ_POSTCUR_DECR_PREG},
{0x3C0F, SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG},
{0x1C0C, SIERRA_DEQ_TAU_CTRL2_PREG},
{0x0100, SIERRA_DEQ_TAU_CTRL3_PREG},
{0x5E82, SIERRA_DEQ_OPENEYE_CTRL_PREG},
{0x002B, SIERRA_CPI_TRIM_PREG},
{0x0003, SIERRA_EPI_CTRL_PREG},
{0x803F, SIERRA_SDFILT_H2L_A_PREG},
{0x0004, SIERRA_RXBUFFER_CTLECTRL_PREG},
{0x2010, SIERRA_RXBUFFER_RCDFECTRL_PREG},
{0x4432, SIERRA_RXBUFFER_DFECTRL_PREG}
};
static struct cdns_sierra_vals pcie_100_no_ssc_plllc_cmn_vals = {
.reg_pairs = pcie_100_no_ssc_plllc_cmn_regs,
.num_regs = ARRAY_SIZE(pcie_100_no_ssc_plllc_cmn_regs),
};
static struct cdns_sierra_vals ml_pcie_100_no_ssc_ln_vals = {
.reg_pairs = ml_pcie_100_no_ssc_ln_regs,
.num_regs = ARRAY_SIZE(ml_pcie_100_no_ssc_ln_regs),
};
/*
* TI J721E:
* refclk100MHz_32b_PCIe_ln_no_ssc, multilink, using_plllc,
* cmn_pllcy_anaclk0_1Ghz, xcvr_pllclk_fullrt_500mhz
*/
static const struct cdns_reg_pairs ti_ml_pcie_100_no_ssc_ln_regs[] = {
{0xFC08, SIERRA_DET_STANDEC_A_PREG},
{0x001D, SIERRA_PSM_A3IN_TMR_PREG},
{0x0004, SIERRA_PSC_LN_A3_PREG},
{0x0004, SIERRA_PSC_LN_A4_PREG},
{0x0004, SIERRA_PSC_LN_IDLE_PREG},
{0x1555, SIERRA_DFE_BIASTRIM_PREG},
{0x9703, SIERRA_DRVCTRL_BOOST_PREG},
{0x8055, SIERRA_RX_CREQ_FLTR_A_MODE3_PREG},
{0x80BB, SIERRA_RX_CREQ_FLTR_A_MODE2_PREG},
{0x8351, SIERRA_RX_CREQ_FLTR_A_MODE1_PREG},
{0x8349, SIERRA_RX_CREQ_FLTR_A_MODE0_PREG},
{0x0002, SIERRA_CREQ_DCBIASATTEN_OVR_PREG},
{0x9800, SIERRA_RX_CTLE_CAL_PREG},
{0x5624, SIERRA_DEQ_CONCUR_CTRL2_PREG},
{0x000F, SIERRA_DEQ_EPIPWR_CTRL2_PREG},
{0x00FF, SIERRA_DEQ_FAST_MAINT_CYCLES_PREG},
{0x4C4C, SIERRA_DEQ_ERRCMP_CTRL_PREG},
{0x02FA, SIERRA_DEQ_OFFSET_CTRL_PREG},
{0x02FA, SIERRA_DEQ_GAIN_CTRL_PREG},
{0x0041, SIERRA_DEQ_GLUT0},
{0x0082, SIERRA_DEQ_GLUT1},
{0x00C3, SIERRA_DEQ_GLUT2},
{0x0145, SIERRA_DEQ_GLUT3},
{0x0186, SIERRA_DEQ_GLUT4},
{0x09E7, SIERRA_DEQ_ALUT0},
{0x09A6, SIERRA_DEQ_ALUT1},
{0x0965, SIERRA_DEQ_ALUT2},
{0x08E3, SIERRA_DEQ_ALUT3},
{0x00FA, SIERRA_DEQ_DFETAP0},
{0x00FA, SIERRA_DEQ_DFETAP1},
{0x00FA, SIERRA_DEQ_DFETAP2},
{0x00FA, SIERRA_DEQ_DFETAP3},
{0x00FA, SIERRA_DEQ_DFETAP4},
{0x000F, SIERRA_DEQ_PRECUR_PREG},
{0x0280, SIERRA_DEQ_POSTCUR_PREG},
{0x8F00, SIERRA_DEQ_POSTCUR_DECR_PREG},
{0x3C0F, SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG},
{0x1C0C, SIERRA_DEQ_TAU_CTRL2_PREG},
{0x0100, SIERRA_DEQ_TAU_CTRL3_PREG},
{0x5E82, SIERRA_DEQ_OPENEYE_CTRL_PREG},
{0x002B, SIERRA_CPI_TRIM_PREG},
{0x0003, SIERRA_EPI_CTRL_PREG},
{0x803F, SIERRA_SDFILT_H2L_A_PREG},
{0x0004, SIERRA_RXBUFFER_CTLECTRL_PREG},
{0x2010, SIERRA_RXBUFFER_RCDFECTRL_PREG},
{0x4432, SIERRA_RXBUFFER_DFECTRL_PREG},
{0x0002, SIERRA_TX_RCVDET_OVRD_PREG}
};
static struct cdns_sierra_vals ti_ml_pcie_100_no_ssc_ln_vals = {
.reg_pairs = ti_ml_pcie_100_no_ssc_ln_regs,
.num_regs = ARRAY_SIZE(ti_ml_pcie_100_no_ssc_ln_regs),
};
/* refclk100MHz_32b_PCIe_cmn_pll_int_ssc, pcie_links_using_plllc, pipe_bw_3 */
static const struct cdns_reg_pairs pcie_100_int_ssc_plllc_cmn_regs[] = {
{0x000E, SIERRA_CMN_PLLLC_MODE_PREG},
{0x4006, SIERRA_CMN_PLLLC_LF_COEFF_MODE1_PREG},
{0x4006, SIERRA_CMN_PLLLC_LF_COEFF_MODE0_PREG},
{0x0000, SIERRA_CMN_PLLLC_BWCAL_MODE1_PREG},
{0x0000, SIERRA_CMN_PLLLC_BWCAL_MODE0_PREG},
{0x0581, SIERRA_CMN_PLLLC_DSMCORR_PREG},
{0x7F80, SIERRA_CMN_PLLLC_SS_PREG},
{0x0041, SIERRA_CMN_PLLLC_SS_AMP_STEP_SIZE_PREG},
{0x0464, SIERRA_CMN_PLLLC_SSTWOPT_PREG},
{0x0D0D, SIERRA_CMN_PLLLC_SS_TIME_STEPSIZE_MODE_PREG},
{0x0060, SIERRA_CMN_PLLLC_LOCK_DELAY_CTRL_PREG}
};
/*
* refclk100MHz_32b_PCIe_ln_int_ssc, multilink, using_plllc,
* cmn_pllcy_anaclk0_1Ghz, xcvr_pllclk_fullrt_500mhz
*/
static const struct cdns_reg_pairs ml_pcie_100_int_ssc_ln_regs[] = {
{0xFC08, SIERRA_DET_STANDEC_A_PREG},
{0x001D, SIERRA_PSM_A3IN_TMR_PREG},
{0x0004, SIERRA_PSC_LN_A3_PREG},
{0x0004, SIERRA_PSC_LN_A4_PREG},
{0x0004, SIERRA_PSC_LN_IDLE_PREG},
{0x1555, SIERRA_DFE_BIASTRIM_PREG},
{0x9703, SIERRA_DRVCTRL_BOOST_PREG},
{0x813E, SIERRA_CLKPATHCTRL_TMR_PREG},
{0x8047, SIERRA_RX_CREQ_FLTR_A_MODE3_PREG},
{0x808F, SIERRA_RX_CREQ_FLTR_A_MODE2_PREG},
{0x808F, SIERRA_RX_CREQ_FLTR_A_MODE1_PREG},
{0x808F, SIERRA_RX_CREQ_FLTR_A_MODE0_PREG},
{0x0002, SIERRA_CREQ_DCBIASATTEN_OVR_PREG},
{0x9800, SIERRA_RX_CTLE_CAL_PREG},
{0x033C, SIERRA_RX_CTLE_MAINTENANCE_PREG},
{0x44CC, SIERRA_CREQ_EQ_OPEN_EYE_THRESH_PREG},
{0x5624, SIERRA_DEQ_CONCUR_CTRL2_PREG},
{0x000F, SIERRA_DEQ_EPIPWR_CTRL2_PREG},
{0x00FF, SIERRA_DEQ_FAST_MAINT_CYCLES_PREG},
{0x4C4C, SIERRA_DEQ_ERRCMP_CTRL_PREG},
{0x02FA, SIERRA_DEQ_OFFSET_CTRL_PREG},
{0x02FA, SIERRA_DEQ_GAIN_CTRL_PREG},
{0x0041, SIERRA_DEQ_GLUT0},
{0x0082, SIERRA_DEQ_GLUT1},
{0x00C3, SIERRA_DEQ_GLUT2},
{0x0145, SIERRA_DEQ_GLUT3},
{0x0186, SIERRA_DEQ_GLUT4},
{0x09E7, SIERRA_DEQ_ALUT0},
{0x09A6, SIERRA_DEQ_ALUT1},
{0x0965, SIERRA_DEQ_ALUT2},
{0x08E3, SIERRA_DEQ_ALUT3},
{0x00FA, SIERRA_DEQ_DFETAP0},
{0x00FA, SIERRA_DEQ_DFETAP1},
{0x00FA, SIERRA_DEQ_DFETAP2},
{0x00FA, SIERRA_DEQ_DFETAP3},
{0x00FA, SIERRA_DEQ_DFETAP4},
{0x000F, SIERRA_DEQ_PRECUR_PREG},
{0x0280, SIERRA_DEQ_POSTCUR_PREG},
{0x8F00, SIERRA_DEQ_POSTCUR_DECR_PREG},
{0x3C0F, SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG},
{0x1C0C, SIERRA_DEQ_TAU_CTRL2_PREG},
{0x0100, SIERRA_DEQ_TAU_CTRL3_PREG},
{0x5E82, SIERRA_DEQ_OPENEYE_CTRL_PREG},
{0x002B, SIERRA_CPI_TRIM_PREG},
{0x0003, SIERRA_EPI_CTRL_PREG},
{0x803F, SIERRA_SDFILT_H2L_A_PREG},
{0x0004, SIERRA_RXBUFFER_CTLECTRL_PREG},
{0x2010, SIERRA_RXBUFFER_RCDFECTRL_PREG},
{0x4432, SIERRA_RXBUFFER_DFECTRL_PREG}
};
static struct cdns_sierra_vals pcie_100_int_ssc_plllc_cmn_vals = {
.reg_pairs = pcie_100_int_ssc_plllc_cmn_regs,
.num_regs = ARRAY_SIZE(pcie_100_int_ssc_plllc_cmn_regs),
};
static struct cdns_sierra_vals ml_pcie_100_int_ssc_ln_vals = {
.reg_pairs = ml_pcie_100_int_ssc_ln_regs,
.num_regs = ARRAY_SIZE(ml_pcie_100_int_ssc_ln_regs),
};
/*
* TI J721E:
* refclk100MHz_32b_PCIe_ln_int_ssc, multilink, using_plllc,
* cmn_pllcy_anaclk0_1Ghz, xcvr_pllclk_fullrt_500mhz
*/
static const struct cdns_reg_pairs ti_ml_pcie_100_int_ssc_ln_regs[] = {
{0xFC08, SIERRA_DET_STANDEC_A_PREG},
{0x001D, SIERRA_PSM_A3IN_TMR_PREG},
{0x0004, SIERRA_PSC_LN_A3_PREG},
{0x0004, SIERRA_PSC_LN_A4_PREG},
{0x0004, SIERRA_PSC_LN_IDLE_PREG},
{0x1555, SIERRA_DFE_BIASTRIM_PREG},
{0x9703, SIERRA_DRVCTRL_BOOST_PREG},
{0x813E, SIERRA_CLKPATHCTRL_TMR_PREG},
{0x8047, SIERRA_RX_CREQ_FLTR_A_MODE3_PREG},
{0x808F, SIERRA_RX_CREQ_FLTR_A_MODE2_PREG},
{0x808F, SIERRA_RX_CREQ_FLTR_A_MODE1_PREG},
{0x808F, SIERRA_RX_CREQ_FLTR_A_MODE0_PREG},
{0x0002, SIERRA_CREQ_DCBIASATTEN_OVR_PREG},
{0x9800, SIERRA_RX_CTLE_CAL_PREG},
{0x033C, SIERRA_RX_CTLE_MAINTENANCE_PREG},
{0x44CC, SIERRA_CREQ_EQ_OPEN_EYE_THRESH_PREG},
{0x5624, SIERRA_DEQ_CONCUR_CTRL2_PREG},
{0x000F, SIERRA_DEQ_EPIPWR_CTRL2_PREG},
{0x00FF, SIERRA_DEQ_FAST_MAINT_CYCLES_PREG},
{0x4C4C, SIERRA_DEQ_ERRCMP_CTRL_PREG},
{0x02FA, SIERRA_DEQ_OFFSET_CTRL_PREG},
{0x02FA, SIERRA_DEQ_GAIN_CTRL_PREG},
{0x0041, SIERRA_DEQ_GLUT0},
{0x0082, SIERRA_DEQ_GLUT1},
{0x00C3, SIERRA_DEQ_GLUT2},
{0x0145, SIERRA_DEQ_GLUT3},
{0x0186, SIERRA_DEQ_GLUT4},
{0x09E7, SIERRA_DEQ_ALUT0},
{0x09A6, SIERRA_DEQ_ALUT1},
{0x0965, SIERRA_DEQ_ALUT2},
{0x08E3, SIERRA_DEQ_ALUT3},
{0x00FA, SIERRA_DEQ_DFETAP0},
{0x00FA, SIERRA_DEQ_DFETAP1},
{0x00FA, SIERRA_DEQ_DFETAP2},
{0x00FA, SIERRA_DEQ_DFETAP3},
{0x00FA, SIERRA_DEQ_DFETAP4},
{0x000F, SIERRA_DEQ_PRECUR_PREG},
{0x0280, SIERRA_DEQ_POSTCUR_PREG},
{0x8F00, SIERRA_DEQ_POSTCUR_DECR_PREG},
{0x3C0F, SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG},
{0x1C0C, SIERRA_DEQ_TAU_CTRL2_PREG},
{0x0100, SIERRA_DEQ_TAU_CTRL3_PREG},
{0x5E82, SIERRA_DEQ_OPENEYE_CTRL_PREG},
{0x002B, SIERRA_CPI_TRIM_PREG},
{0x0003, SIERRA_EPI_CTRL_PREG},
{0x803F, SIERRA_SDFILT_H2L_A_PREG},
{0x0004, SIERRA_RXBUFFER_CTLECTRL_PREG},
{0x2010, SIERRA_RXBUFFER_RCDFECTRL_PREG},
{0x4432, SIERRA_RXBUFFER_DFECTRL_PREG},
{0x0002, SIERRA_TX_RCVDET_OVRD_PREG}
};
static struct cdns_sierra_vals ti_ml_pcie_100_int_ssc_ln_vals = {
.reg_pairs = ti_ml_pcie_100_int_ssc_ln_regs,
.num_regs = ARRAY_SIZE(ti_ml_pcie_100_int_ssc_ln_regs),
};
/* refclk100MHz_32b_PCIe_cmn_pll_ext_ssc, pcie_links_using_plllc, pipe_bw_3 */
static const struct cdns_reg_pairs pcie_100_ext_ssc_plllc_cmn_regs[] = {
{0x2106, SIERRA_CMN_PLLLC_LF_COEFF_MODE1_PREG},
{0x2106, SIERRA_CMN_PLLLC_LF_COEFF_MODE0_PREG},
{0x8A06, SIERRA_CMN_PLLLC_BWCAL_MODE1_PREG},
{0x8A06, SIERRA_CMN_PLLLC_BWCAL_MODE0_PREG},
{0x1B1B, SIERRA_CMN_PLLLC_SS_TIME_STEPSIZE_MODE_PREG}
};
/*
* refclk100MHz_32b_PCIe_ln_ext_ssc, multilink, using_plllc,
* cmn_pllcy_anaclk0_1Ghz, xcvr_pllclk_fullrt_500mhz
*/
static const struct cdns_reg_pairs ml_pcie_100_ext_ssc_ln_regs[] = {
{0xFC08, SIERRA_DET_STANDEC_A_PREG},
{0x001D, SIERRA_PSM_A3IN_TMR_PREG},
{0x0004, SIERRA_PSC_LN_A3_PREG},
{0x0004, SIERRA_PSC_LN_A4_PREG},
{0x0004, SIERRA_PSC_LN_IDLE_PREG},
{0x1555, SIERRA_DFE_BIASTRIM_PREG},
{0x9703, SIERRA_DRVCTRL_BOOST_PREG},
{0x813E, SIERRA_CLKPATHCTRL_TMR_PREG},
{0x8047, SIERRA_RX_CREQ_FLTR_A_MODE3_PREG},
{0x808F, SIERRA_RX_CREQ_FLTR_A_MODE2_PREG},
{0x808F, SIERRA_RX_CREQ_FLTR_A_MODE1_PREG},
{0x808F, SIERRA_RX_CREQ_FLTR_A_MODE0_PREG},
{0x0002, SIERRA_CREQ_DCBIASATTEN_OVR_PREG},
{0x9800, SIERRA_RX_CTLE_CAL_PREG},
{0x033C, SIERRA_RX_CTLE_MAINTENANCE_PREG},
{0x44CC, SIERRA_CREQ_EQ_OPEN_EYE_THRESH_PREG},
{0x5624, SIERRA_DEQ_CONCUR_CTRL2_PREG},
{0x000F, SIERRA_DEQ_EPIPWR_CTRL2_PREG},
{0x00FF, SIERRA_DEQ_FAST_MAINT_CYCLES_PREG},
{0x4C4C, SIERRA_DEQ_ERRCMP_CTRL_PREG},
{0x02FA, SIERRA_DEQ_OFFSET_CTRL_PREG},
{0x02FA, SIERRA_DEQ_GAIN_CTRL_PREG},
{0x0041, SIERRA_DEQ_GLUT0},
{0x0082, SIERRA_DEQ_GLUT1},
{0x00C3, SIERRA_DEQ_GLUT2},
{0x0145, SIERRA_DEQ_GLUT3},
{0x0186, SIERRA_DEQ_GLUT4},
{0x09E7, SIERRA_DEQ_ALUT0},
{0x09A6, SIERRA_DEQ_ALUT1},
{0x0965, SIERRA_DEQ_ALUT2},
{0x08E3, SIERRA_DEQ_ALUT3},
{0x00FA, SIERRA_DEQ_DFETAP0},
{0x00FA, SIERRA_DEQ_DFETAP1},
{0x00FA, SIERRA_DEQ_DFETAP2},
{0x00FA, SIERRA_DEQ_DFETAP3},
{0x00FA, SIERRA_DEQ_DFETAP4},
{0x000F, SIERRA_DEQ_PRECUR_PREG},
{0x0280, SIERRA_DEQ_POSTCUR_PREG},
{0x8F00, SIERRA_DEQ_POSTCUR_DECR_PREG},
{0x3C0F, SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG},
{0x1C0C, SIERRA_DEQ_TAU_CTRL2_PREG},
{0x0100, SIERRA_DEQ_TAU_CTRL3_PREG},
{0x5E82, SIERRA_DEQ_OPENEYE_CTRL_PREG},
{0x002B, SIERRA_CPI_TRIM_PREG},
{0x0003, SIERRA_EPI_CTRL_PREG},
{0x803F, SIERRA_SDFILT_H2L_A_PREG},
{0x0004, SIERRA_RXBUFFER_CTLECTRL_PREG},
{0x2010, SIERRA_RXBUFFER_RCDFECTRL_PREG},
{0x4432, SIERRA_RXBUFFER_DFECTRL_PREG}
};
static struct cdns_sierra_vals pcie_100_ext_ssc_plllc_cmn_vals = {
.reg_pairs = pcie_100_ext_ssc_plllc_cmn_regs,
.num_regs = ARRAY_SIZE(pcie_100_ext_ssc_plllc_cmn_regs),
};
static struct cdns_sierra_vals ml_pcie_100_ext_ssc_ln_vals = {
.reg_pairs = ml_pcie_100_ext_ssc_ln_regs,
.num_regs = ARRAY_SIZE(ml_pcie_100_ext_ssc_ln_regs),
};
/*
* TI J721E:
* refclk100MHz_32b_PCIe_ln_ext_ssc, multilink, using_plllc,
* cmn_pllcy_anaclk0_1Ghz, xcvr_pllclk_fullrt_500mhz
*/
static const struct cdns_reg_pairs ti_ml_pcie_100_ext_ssc_ln_regs[] = {
{0xFC08, SIERRA_DET_STANDEC_A_PREG},
{0x001D, SIERRA_PSM_A3IN_TMR_PREG},
{0x0004, SIERRA_PSC_LN_A3_PREG},
{0x0004, SIERRA_PSC_LN_A4_PREG},
{0x0004, SIERRA_PSC_LN_IDLE_PREG},
{0x1555, SIERRA_DFE_BIASTRIM_PREG},
{0x9703, SIERRA_DRVCTRL_BOOST_PREG},
{0x813E, SIERRA_CLKPATHCTRL_TMR_PREG},
{0x8047, SIERRA_RX_CREQ_FLTR_A_MODE3_PREG},
{0x808F, SIERRA_RX_CREQ_FLTR_A_MODE2_PREG},
{0x808F, SIERRA_RX_CREQ_FLTR_A_MODE1_PREG},
{0x808F, SIERRA_RX_CREQ_FLTR_A_MODE0_PREG},
{0x0002, SIERRA_CREQ_DCBIASATTEN_OVR_PREG},
{0x9800, SIERRA_RX_CTLE_CAL_PREG},
{0x033C, SIERRA_RX_CTLE_MAINTENANCE_PREG},
{0x44CC, SIERRA_CREQ_EQ_OPEN_EYE_THRESH_PREG},
{0x5624, SIERRA_DEQ_CONCUR_CTRL2_PREG},
{0x000F, SIERRA_DEQ_EPIPWR_CTRL2_PREG},
{0x00FF, SIERRA_DEQ_FAST_MAINT_CYCLES_PREG},
{0x4C4C, SIERRA_DEQ_ERRCMP_CTRL_PREG},
{0x02FA, SIERRA_DEQ_OFFSET_CTRL_PREG},
{0x02FA, SIERRA_DEQ_GAIN_CTRL_PREG},
{0x0041, SIERRA_DEQ_GLUT0},
{0x0082, SIERRA_DEQ_GLUT1},
{0x00C3, SIERRA_DEQ_GLUT2},
{0x0145, SIERRA_DEQ_GLUT3},
{0x0186, SIERRA_DEQ_GLUT4},
{0x09E7, SIERRA_DEQ_ALUT0},
{0x09A6, SIERRA_DEQ_ALUT1},
{0x0965, SIERRA_DEQ_ALUT2},
{0x08E3, SIERRA_DEQ_ALUT3},
{0x00FA, SIERRA_DEQ_DFETAP0},
{0x00FA, SIERRA_DEQ_DFETAP1},
{0x00FA, SIERRA_DEQ_DFETAP2},
{0x00FA, SIERRA_DEQ_DFETAP3},
{0x00FA, SIERRA_DEQ_DFETAP4},
{0x000F, SIERRA_DEQ_PRECUR_PREG},
{0x0280, SIERRA_DEQ_POSTCUR_PREG},
{0x8F00, SIERRA_DEQ_POSTCUR_DECR_PREG},
{0x3C0F, SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG},
{0x1C0C, SIERRA_DEQ_TAU_CTRL2_PREG},
{0x0100, SIERRA_DEQ_TAU_CTRL3_PREG},
{0x5E82, SIERRA_DEQ_OPENEYE_CTRL_PREG},
{0x002B, SIERRA_CPI_TRIM_PREG},
{0x0003, SIERRA_EPI_CTRL_PREG},
{0x803F, SIERRA_SDFILT_H2L_A_PREG},
{0x0004, SIERRA_RXBUFFER_CTLECTRL_PREG},
{0x2010, SIERRA_RXBUFFER_RCDFECTRL_PREG},
{0x4432, SIERRA_RXBUFFER_DFECTRL_PREG},
{0x0002, SIERRA_TX_RCVDET_OVRD_PREG}
};
static struct cdns_sierra_vals ti_ml_pcie_100_ext_ssc_ln_vals = {
.reg_pairs = ti_ml_pcie_100_ext_ssc_ln_regs,
.num_regs = ARRAY_SIZE(ti_ml_pcie_100_ext_ssc_ln_regs),
};
/* refclk100MHz_32b_PCIe_cmn_pll_no_ssc */
static const struct cdns_reg_pairs cdns_pcie_cmn_regs_no_ssc[] = {
{0x2105, SIERRA_CMN_PLLLC_LF_COEFF_MODE1_PREG},
{0x2105, SIERRA_CMN_PLLLC_LF_COEFF_MODE0_PREG},
{0x8A06, SIERRA_CMN_PLLLC_BWCAL_MODE1_PREG},
{0x8A06, SIERRA_CMN_PLLLC_BWCAL_MODE0_PREG}
};
/* refclk100MHz_32b_PCIe_ln_no_ssc */
static const struct cdns_reg_pairs cdns_pcie_ln_regs_no_ssc[] = {
{0xFC08, SIERRA_DET_STANDEC_A_PREG},
{0x001D, SIERRA_PSM_A3IN_TMR_PREG},
{0x1555, SIERRA_DFE_BIASTRIM_PREG},
{0x9703, SIERRA_DRVCTRL_BOOST_PREG},
{0x8055, SIERRA_RX_CREQ_FLTR_A_MODE3_PREG},
{0x80BB, SIERRA_RX_CREQ_FLTR_A_MODE2_PREG},
{0x8351, SIERRA_RX_CREQ_FLTR_A_MODE1_PREG},
{0x8349, SIERRA_RX_CREQ_FLTR_A_MODE0_PREG},
{0x0002, SIERRA_CREQ_DCBIASATTEN_OVR_PREG},
{0x9800, SIERRA_RX_CTLE_CAL_PREG},
{0x5624, SIERRA_DEQ_CONCUR_CTRL2_PREG},
{0x000F, SIERRA_DEQ_EPIPWR_CTRL2_PREG},
{0x00FF, SIERRA_DEQ_FAST_MAINT_CYCLES_PREG},
{0x4C4C, SIERRA_DEQ_ERRCMP_CTRL_PREG},
{0x02FA, SIERRA_DEQ_OFFSET_CTRL_PREG},
{0x02FA, SIERRA_DEQ_GAIN_CTRL_PREG},
{0x0041, SIERRA_DEQ_GLUT0},
{0x0082, SIERRA_DEQ_GLUT1},
{0x00C3, SIERRA_DEQ_GLUT2},
{0x0145, SIERRA_DEQ_GLUT3},
{0x0186, SIERRA_DEQ_GLUT4},
{0x09E7, SIERRA_DEQ_ALUT0},
{0x09A6, SIERRA_DEQ_ALUT1},
{0x0965, SIERRA_DEQ_ALUT2},
{0x08E3, SIERRA_DEQ_ALUT3},
{0x00FA, SIERRA_DEQ_DFETAP0},
{0x00FA, SIERRA_DEQ_DFETAP1},
{0x00FA, SIERRA_DEQ_DFETAP2},
{0x00FA, SIERRA_DEQ_DFETAP3},
{0x00FA, SIERRA_DEQ_DFETAP4},
{0x000F, SIERRA_DEQ_PRECUR_PREG},
{0x0280, SIERRA_DEQ_POSTCUR_PREG},
{0x8F00, SIERRA_DEQ_POSTCUR_DECR_PREG},
{0x3C0F, SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG},
{0x1C0C, SIERRA_DEQ_TAU_CTRL2_PREG},
{0x0100, SIERRA_DEQ_TAU_CTRL3_PREG},
{0x5E82, SIERRA_DEQ_OPENEYE_CTRL_PREG},
{0x002B, SIERRA_CPI_TRIM_PREG},
{0x0003, SIERRA_EPI_CTRL_PREG},
{0x803F, SIERRA_SDFILT_H2L_A_PREG},
{0x0004, SIERRA_RXBUFFER_CTLECTRL_PREG},
{0x2010, SIERRA_RXBUFFER_RCDFECTRL_PREG},
{0x4432, SIERRA_RXBUFFER_DFECTRL_PREG}
};
static struct cdns_sierra_vals pcie_100_no_ssc_cmn_vals = {
.reg_pairs = cdns_pcie_cmn_regs_no_ssc,
.num_regs = ARRAY_SIZE(cdns_pcie_cmn_regs_no_ssc),
};
static struct cdns_sierra_vals pcie_100_no_ssc_ln_vals = {
.reg_pairs = cdns_pcie_ln_regs_no_ssc,
.num_regs = ARRAY_SIZE(cdns_pcie_ln_regs_no_ssc),
};
/* refclk100MHz_32b_PCIe_cmn_pll_int_ssc */
static const struct cdns_reg_pairs cdns_pcie_cmn_regs_int_ssc[] = {
{0x000E, SIERRA_CMN_PLLLC_MODE_PREG},
{0x4006, SIERRA_CMN_PLLLC_LF_COEFF_MODE1_PREG},
{0x4006, SIERRA_CMN_PLLLC_LF_COEFF_MODE0_PREG},
{0x0000, SIERRA_CMN_PLLLC_BWCAL_MODE1_PREG},
{0x0000, SIERRA_CMN_PLLLC_BWCAL_MODE0_PREG},
{0x0581, SIERRA_CMN_PLLLC_DSMCORR_PREG},
{0x7F80, SIERRA_CMN_PLLLC_SS_PREG},
{0x0041, SIERRA_CMN_PLLLC_SS_AMP_STEP_SIZE_PREG},
{0x0464, SIERRA_CMN_PLLLC_SSTWOPT_PREG},
{0x0D0D, SIERRA_CMN_PLLLC_SS_TIME_STEPSIZE_MODE_PREG},
{0x0060, SIERRA_CMN_PLLLC_LOCK_DELAY_CTRL_PREG}
};
/* refclk100MHz_32b_PCIe_ln_int_ssc */
static const struct cdns_reg_pairs cdns_pcie_ln_regs_int_ssc[] = {
{0xFC08, SIERRA_DET_STANDEC_A_PREG},
{0x001D, SIERRA_PSM_A3IN_TMR_PREG},
{0x1555, SIERRA_DFE_BIASTRIM_PREG},
{0x9703, SIERRA_DRVCTRL_BOOST_PREG},
{0x813E, SIERRA_CLKPATHCTRL_TMR_PREG},
{0x8047, SIERRA_RX_CREQ_FLTR_A_MODE3_PREG},
{0x808F, SIERRA_RX_CREQ_FLTR_A_MODE2_PREG},
{0x808F, SIERRA_RX_CREQ_FLTR_A_MODE1_PREG},
{0x808F, SIERRA_RX_CREQ_FLTR_A_MODE0_PREG},
{0x0002, SIERRA_CREQ_DCBIASATTEN_OVR_PREG},
{0x9800, SIERRA_RX_CTLE_CAL_PREG},
{0x033C, SIERRA_RX_CTLE_MAINTENANCE_PREG},
{0x44CC, SIERRA_CREQ_EQ_OPEN_EYE_THRESH_PREG},
{0x5624, SIERRA_DEQ_CONCUR_CTRL2_PREG},
{0x000F, SIERRA_DEQ_EPIPWR_CTRL2_PREG},
{0x00FF, SIERRA_DEQ_FAST_MAINT_CYCLES_PREG},
{0x4C4C, SIERRA_DEQ_ERRCMP_CTRL_PREG},
{0x02FA, SIERRA_DEQ_OFFSET_CTRL_PREG},
{0x02FA, SIERRA_DEQ_GAIN_CTRL_PREG},
{0x0041, SIERRA_DEQ_GLUT0},
{0x0082, SIERRA_DEQ_GLUT1},
{0x00C3, SIERRA_DEQ_GLUT2},
{0x0145, SIERRA_DEQ_GLUT3},
{0x0186, SIERRA_DEQ_GLUT4},
{0x09E7, SIERRA_DEQ_ALUT0},
{0x09A6, SIERRA_DEQ_ALUT1},
{0x0965, SIERRA_DEQ_ALUT2},
{0x08E3, SIERRA_DEQ_ALUT3},
{0x00FA, SIERRA_DEQ_DFETAP0},
{0x00FA, SIERRA_DEQ_DFETAP1},
{0x00FA, SIERRA_DEQ_DFETAP2},
{0x00FA, SIERRA_DEQ_DFETAP3},
{0x00FA, SIERRA_DEQ_DFETAP4},
{0x000F, SIERRA_DEQ_PRECUR_PREG},
{0x0280, SIERRA_DEQ_POSTCUR_PREG},
{0x8F00, SIERRA_DEQ_POSTCUR_DECR_PREG},
{0x3C0F, SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG},
{0x1C0C, SIERRA_DEQ_TAU_CTRL2_PREG},
{0x0100, SIERRA_DEQ_TAU_CTRL3_PREG},
{0x5E82, SIERRA_DEQ_OPENEYE_CTRL_PREG},
{0x002B, SIERRA_CPI_TRIM_PREG},
{0x0003, SIERRA_EPI_CTRL_PREG},
{0x803F, SIERRA_SDFILT_H2L_A_PREG},
{0x0004, SIERRA_RXBUFFER_CTLECTRL_PREG},
{0x2010, SIERRA_RXBUFFER_RCDFECTRL_PREG},
{0x4432, SIERRA_RXBUFFER_DFECTRL_PREG}
};
static struct cdns_sierra_vals pcie_100_int_ssc_cmn_vals = {
.reg_pairs = cdns_pcie_cmn_regs_int_ssc,
.num_regs = ARRAY_SIZE(cdns_pcie_cmn_regs_int_ssc),
};
static struct cdns_sierra_vals pcie_100_int_ssc_ln_vals = {
.reg_pairs = cdns_pcie_ln_regs_int_ssc,
.num_regs = ARRAY_SIZE(cdns_pcie_ln_regs_int_ssc),
};
/* refclk100MHz_32b_PCIe_cmn_pll_ext_ssc */
static const struct cdns_reg_pairs cdns_pcie_cmn_regs_ext_ssc[] = {
{0x2106, SIERRA_CMN_PLLLC_LF_COEFF_MODE1_PREG},
{0x2106, SIERRA_CMN_PLLLC_LF_COEFF_MODE0_PREG},
{0x8A06, SIERRA_CMN_PLLLC_BWCAL_MODE1_PREG},
{0x8A06, SIERRA_CMN_PLLLC_BWCAL_MODE0_PREG},
{0x1B1B, SIERRA_CMN_PLLLC_SS_TIME_STEPSIZE_MODE_PREG}
};
/* refclk100MHz_32b_PCIe_ln_ext_ssc */
static const struct cdns_reg_pairs cdns_pcie_ln_regs_ext_ssc[] = {
{0xFC08, SIERRA_DET_STANDEC_A_PREG},
{0x001D, SIERRA_PSM_A3IN_TMR_PREG},
{0x1555, SIERRA_DFE_BIASTRIM_PREG},
{0x9703, SIERRA_DRVCTRL_BOOST_PREG},
{0x813E, SIERRA_CLKPATHCTRL_TMR_PREG},
{0x8047, SIERRA_RX_CREQ_FLTR_A_MODE3_PREG},
{0x808F, SIERRA_RX_CREQ_FLTR_A_MODE2_PREG},
{0x808F, SIERRA_RX_CREQ_FLTR_A_MODE1_PREG},
{0x808F, SIERRA_RX_CREQ_FLTR_A_MODE0_PREG},
{0x0002, SIERRA_CREQ_DCBIASATTEN_OVR_PREG},
{0x9800, SIERRA_RX_CTLE_CAL_PREG},
{0x033C, SIERRA_RX_CTLE_MAINTENANCE_PREG},
{0x44CC, SIERRA_CREQ_EQ_OPEN_EYE_THRESH_PREG},
{0x5624, SIERRA_DEQ_CONCUR_CTRL2_PREG},
{0x000F, SIERRA_DEQ_EPIPWR_CTRL2_PREG},
{0x00FF, SIERRA_DEQ_FAST_MAINT_CYCLES_PREG},
{0x4C4C, SIERRA_DEQ_ERRCMP_CTRL_PREG},
{0x02FA, SIERRA_DEQ_OFFSET_CTRL_PREG},
{0x02FA, SIERRA_DEQ_GAIN_CTRL_PREG},
{0x0041, SIERRA_DEQ_GLUT0},
{0x0082, SIERRA_DEQ_GLUT1},
{0x00C3, SIERRA_DEQ_GLUT2},
{0x0145, SIERRA_DEQ_GLUT3},
{0x0186, SIERRA_DEQ_GLUT4},
{0x09E7, SIERRA_DEQ_ALUT0},
{0x09A6, SIERRA_DEQ_ALUT1},
{0x0965, SIERRA_DEQ_ALUT2},
{0x08E3, SIERRA_DEQ_ALUT3},
{0x00FA, SIERRA_DEQ_DFETAP0},
{0x00FA, SIERRA_DEQ_DFETAP1},
{0x00FA, SIERRA_DEQ_DFETAP2},
{0x00FA, SIERRA_DEQ_DFETAP3},
{0x00FA, SIERRA_DEQ_DFETAP4},
{0x000F, SIERRA_DEQ_PRECUR_PREG},
{0x0280, SIERRA_DEQ_POSTCUR_PREG},
{0x8F00, SIERRA_DEQ_POSTCUR_DECR_PREG},
{0x3C0F, SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG},
{0x1C0C, SIERRA_DEQ_TAU_CTRL2_PREG},
{0x0100, SIERRA_DEQ_TAU_CTRL3_PREG},
{0x5E82, SIERRA_DEQ_OPENEYE_CTRL_PREG},
{0x002B, SIERRA_CPI_TRIM_PREG},
{0x0003, SIERRA_EPI_CTRL_PREG},
{0x803F, SIERRA_SDFILT_H2L_A_PREG},
{0x0004, SIERRA_RXBUFFER_CTLECTRL_PREG},
{0x2010, SIERRA_RXBUFFER_RCDFECTRL_PREG},
{0x4432, SIERRA_RXBUFFER_DFECTRL_PREG}
};
static struct cdns_sierra_vals pcie_100_ext_ssc_cmn_vals = {
.reg_pairs = cdns_pcie_cmn_regs_ext_ssc,
.num_regs = ARRAY_SIZE(cdns_pcie_cmn_regs_ext_ssc),
};
static struct cdns_sierra_vals pcie_100_ext_ssc_ln_vals = {
.reg_pairs = cdns_pcie_ln_regs_ext_ssc,
.num_regs = ARRAY_SIZE(cdns_pcie_ln_regs_ext_ssc),
};
/* refclk100MHz_20b_USB_cmn_pll_ext_ssc */
static const struct cdns_reg_pairs cdns_usb_cmn_regs_ext_ssc[] = {
{0x2085, SIERRA_CMN_PLLLC_LF_COEFF_MODE1_PREG},
{0x2085, SIERRA_CMN_PLLLC_LF_COEFF_MODE0_PREG},
{0x0000, SIERRA_CMN_PLLLC_BWCAL_MODE0_PREG},
{0x0000, SIERRA_CMN_PLLLC_SS_TIME_STEPSIZE_MODE_PREG}
};
/* refclk100MHz_20b_USB_ln_ext_ssc */
static const struct cdns_reg_pairs cdns_usb_ln_regs_ext_ssc[] = {
{0xFE0A, SIERRA_DET_STANDEC_A_PREG},
{0x000F, SIERRA_DET_STANDEC_B_PREG},
{0x55A5, SIERRA_DET_STANDEC_C_PREG},
{0x69ad, SIERRA_DET_STANDEC_D_PREG},
{0x0241, SIERRA_DET_STANDEC_E_PREG},
{0x0110, SIERRA_PSM_LANECAL_DLY_A1_RESETS_PREG},
{0x0014, SIERRA_PSM_A0IN_TMR_PREG},
{0xCF00, SIERRA_PSM_DIAG_PREG},
{0x001F, SIERRA_PSC_TX_A0_PREG},
{0x0007, SIERRA_PSC_TX_A1_PREG},
{0x0003, SIERRA_PSC_TX_A2_PREG},
{0x0003, SIERRA_PSC_TX_A3_PREG},
{0x0FFF, SIERRA_PSC_RX_A0_PREG},
{0x0003, SIERRA_PSC_RX_A1_PREG},
{0x0003, SIERRA_PSC_RX_A2_PREG},
{0x0001, SIERRA_PSC_RX_A3_PREG},
{0x0001, SIERRA_PLLCTRL_SUBRATE_PREG},
{0x0406, SIERRA_PLLCTRL_GEN_D_PREG},
{0x5233, SIERRA_PLLCTRL_CPGAIN_MODE_PREG},
{0x00CA, SIERRA_CLKPATH_BIASTRIM_PREG},
{0x2512, SIERRA_DFE_BIASTRIM_PREG},
{0x0000, SIERRA_DRVCTRL_ATTEN_PREG},
{0x823E, SIERRA_CLKPATHCTRL_TMR_PREG},
{0x078F, SIERRA_RX_CREQ_FLTR_A_MODE1_PREG},
{0x078F, SIERRA_RX_CREQ_FLTR_A_MODE0_PREG},
{0x7B3C, SIERRA_CREQ_CCLKDET_MODE01_PREG},
{0x023C, SIERRA_RX_CTLE_MAINTENANCE_PREG},
{0x3232, SIERRA_CREQ_FSMCLK_SEL_PREG},
{0x0000, SIERRA_CREQ_EQ_CTRL_PREG},
{0x0000, SIERRA_CREQ_SPARE_PREG},
{0xCC44, SIERRA_CREQ_EQ_OPEN_EYE_THRESH_PREG},
{0x8452, SIERRA_CTLELUT_CTRL_PREG},
{0x4121, SIERRA_DFE_ECMP_RATESEL_PREG},
{0x4121, SIERRA_DFE_SMP_RATESEL_PREG},
{0x0003, SIERRA_DEQ_PHALIGN_CTRL},
{0x3200, SIERRA_DEQ_CONCUR_CTRL1_PREG},
{0x5064, SIERRA_DEQ_CONCUR_CTRL2_PREG},
{0x0030, SIERRA_DEQ_EPIPWR_CTRL2_PREG},
{0x0048, SIERRA_DEQ_FAST_MAINT_CYCLES_PREG},
{0x5A5A, SIERRA_DEQ_ERRCMP_CTRL_PREG},
{0x02F5, SIERRA_DEQ_OFFSET_CTRL_PREG},
{0x02F5, SIERRA_DEQ_GAIN_CTRL_PREG},
{0x9999, SIERRA_DEQ_VGATUNE_CTRL_PREG},
{0x0014, SIERRA_DEQ_GLUT0},
{0x0014, SIERRA_DEQ_GLUT1},
{0x0014, SIERRA_DEQ_GLUT2},
{0x0014, SIERRA_DEQ_GLUT3},
{0x0014, SIERRA_DEQ_GLUT4},
{0x0014, SIERRA_DEQ_GLUT5},
{0x0014, SIERRA_DEQ_GLUT6},
{0x0014, SIERRA_DEQ_GLUT7},
{0x0014, SIERRA_DEQ_GLUT8},
{0x0014, SIERRA_DEQ_GLUT9},
{0x0014, SIERRA_DEQ_GLUT10},
{0x0014, SIERRA_DEQ_GLUT11},
{0x0014, SIERRA_DEQ_GLUT12},
{0x0014, SIERRA_DEQ_GLUT13},
{0x0014, SIERRA_DEQ_GLUT14},
{0x0014, SIERRA_DEQ_GLUT15},
{0x0014, SIERRA_DEQ_GLUT16},
{0x0BAE, SIERRA_DEQ_ALUT0},
{0x0AEB, SIERRA_DEQ_ALUT1},
{0x0A28, SIERRA_DEQ_ALUT2},
{0x0965, SIERRA_DEQ_ALUT3},
{0x08A2, SIERRA_DEQ_ALUT4},
{0x07DF, SIERRA_DEQ_ALUT5},
{0x071C, SIERRA_DEQ_ALUT6},
{0x0659, SIERRA_DEQ_ALUT7},
{0x0596, SIERRA_DEQ_ALUT8},
{0x0514, SIERRA_DEQ_ALUT9},
{0x0492, SIERRA_DEQ_ALUT10},
{0x0410, SIERRA_DEQ_ALUT11},
{0x038E, SIERRA_DEQ_ALUT12},
{0x030C, SIERRA_DEQ_ALUT13},
{0x03F4, SIERRA_DEQ_DFETAP_CTRL_PREG},
{0x0001, SIERRA_DFE_EN_1010_IGNORE_PREG},
{0x3C01, SIERRA_DEQ_TAU_CTRL1_FAST_MAINT_PREG},
{0x3C40, SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG},
{0x1C08, SIERRA_DEQ_TAU_CTRL2_PREG},
{0x0033, SIERRA_DEQ_PICTRL_PREG},
{0x0400, SIERRA_CPICAL_TMRVAL_MODE1_PREG},
{0x0330, SIERRA_CPICAL_TMRVAL_MODE0_PREG},
{0x01FF, SIERRA_CPICAL_PICNT_MODE1_PREG},
{0x0009, SIERRA_CPI_OUTBUF_RATESEL_PREG},
{0x3232, SIERRA_CPICAL_RES_STARTCODE_MODE23_PREG},
{0x0005, SIERRA_LFPSDET_SUPPORT_PREG},
{0x000F, SIERRA_LFPSFILT_NS_PREG},
{0x0009, SIERRA_LFPSFILT_RD_PREG},
{0x0001, SIERRA_LFPSFILT_MP_PREG},
{0x6013, SIERRA_SIGDET_SUPPORT_PREG},
{0x8013, SIERRA_SDFILT_H2L_A_PREG},
{0x8009, SIERRA_SDFILT_L2H_PREG},
{0x0024, SIERRA_RXBUFFER_CTLECTRL_PREG},
{0x0020, SIERRA_RXBUFFER_RCDFECTRL_PREG},
{0x4243, SIERRA_RXBUFFER_DFECTRL_PREG}
};
static struct cdns_sierra_vals usb_100_ext_ssc_cmn_vals = {
.reg_pairs = cdns_usb_cmn_regs_ext_ssc,
.num_regs = ARRAY_SIZE(cdns_usb_cmn_regs_ext_ssc),
};
static struct cdns_sierra_vals usb_100_ext_ssc_ln_vals = {
.reg_pairs = cdns_usb_ln_regs_ext_ssc,
.num_regs = ARRAY_SIZE(cdns_usb_ln_regs_ext_ssc),
};
/* SGMII PHY common configuration */
static const struct cdns_reg_pairs sgmii_pma_cmn_vals[] = {
{0x0180, SIERRA_SDOSCCAL_CLK_CNT_PREG},
{0x6000, SIERRA_CMN_REFRCV_PREG},
{0x0031, SIERRA_CMN_RESCAL_CTRLA_PREG},
{0x001C, SIERRA_CMN_PLLLC_FBDIV_INT_MODE0_PREG},
{0x2106, SIERRA_CMN_PLLLC_LF_COEFF_MODE0_PREG},
{0x0000, SIERRA_CMN_PLLLC_LOCKSEARCH_PREG},
{0x8103, SIERRA_CMN_PLLLC_CLK0_PREG},
{0x0000, SIERRA_CMN_PLLLC_BWCAL_MODE0_PREG},
{0x0027, SIERRA_CMN_PLLCSM_PLLEN_TMR_PREG},
{0x0062, SIERRA_CMN_PLLCSM_PLLPRE_TMR_PREG},
{0x0800, SIERRA_CMN_PLLLC_SS_TIME_STEPSIZE_MODE_PREG},
{0x0000, SIERRA_CMN_PLLLC_INIT_PREG},
{0x0000, SIERRA_CMN_PLLLC_ITERTMR_PREG},
{0x0020, SIERRA_CMN_PLLLC_LOCK_CNTSTART_PREG},
{0x0013, SIERRA_CMN_PLLLC_DCOCAL_CTRL_PREG},
{0x0013, SIERRA_CMN_PLLLC1_DCOCAL_CTRL_PREG},
};
static struct cdns_sierra_vals sgmii_cmn_vals = {
.reg_pairs = sgmii_pma_cmn_vals,
.num_regs = ARRAY_SIZE(sgmii_pma_cmn_vals),
};
/* SGMII PHY lane configuration */
static const struct cdns_reg_pairs sgmii_ln_regs[] = {
{0x691E, SIERRA_DET_STANDEC_D_PREG},
{0x0FFE, SIERRA_PSC_RX_A0_PREG},
{0x0104, SIERRA_PLLCTRL_FBDIV_MODE01_PREG},
{0x0013, SIERRA_PLLCTRL_SUBRATE_PREG},
{0x0106, SIERRA_PLLCTRL_GEN_D_PREG},
{0x5234, SIERRA_PLLCTRL_CPGAIN_MODE_PREG},
{0x0000, SIERRA_DRVCTRL_ATTEN_PREG},
{0x00AB, SIERRA_RX_CREQ_FLTR_A_MODE0_PREG},
{0x3C0E, SIERRA_CREQ_CCLKDET_MODE01_PREG},
{0x3220, SIERRA_CREQ_FSMCLK_SEL_PREG},
{0x0000, SIERRA_CREQ_EQ_CTRL_PREG},
{0x6320, SIERRA_DEQ_CONCUR_EPIOFFSET_MODE_PREG},
{0x0000, SIERRA_CPI_OUTBUF_RATESEL_PREG},
{0x15A2, SIERRA_LN_SPARE_REG_PREG},
{0x7900, SIERRA_DEQ_BLK_TAU_CTRL1_PREG},
{0x2202, SIERRA_DEQ_BLK_TAU_CTRL4_PREG},
{0x2206, SIERRA_DEQ_TAU_CTRL2_PREG},
{0x0005, SIERRA_LANE_TX_RECEIVER_DETECT_PREG},
{0x8001, SIERRA_CREQ_SPARE_PREG},
{0x0000, SIERRA_DEQ_CONCUR_CTRL1_PREG},
{0xD004, SIERRA_DEQ_CONCUR_CTRL2_PREG},
{0x0101, SIERRA_DEQ_GLUT9},
{0x0101, SIERRA_DEQ_GLUT10},
{0x0101, SIERRA_DEQ_GLUT11},
{0x0101, SIERRA_DEQ_GLUT12},
{0x0000, SIERRA_DEQ_GLUT13},
{0x0000, SIERRA_DEQ_GLUT16},
{0x0000, SIERRA_POSTPRECUR_EN_CEPH_CTRL_PREG},
{0x0000, SIERRA_TAU_EN_CEPH2TO0_PREG},
{0x0003, SIERRA_TAU_EN_CEPH5TO3_PREG},
{0x0101, SIERRA_DEQ_ALUT8},
{0x0101, SIERRA_DEQ_ALUT9},
{0x0100, SIERRA_DEQ_ALUT10},
{0x0000, SIERRA_OEPH_EN_CTRL_PREG},
{0x5425, SIERRA_DEQ_OPENEYE_CTRL_PREG},
{0x7458, SIERRA_CPICAL_RES_STARTCODE_MODE23_PREG},
{0x321F, SIERRA_CPICAL_RES_STARTCODE_MODE01_PREG},
};
static struct cdns_sierra_vals sgmii_pma_ln_vals = {
.reg_pairs = sgmii_ln_regs,
.num_regs = ARRAY_SIZE(sgmii_ln_regs),
};
static const struct cdns_sierra_data cdns_map_sierra = {
.id_value = SIERRA_MACRO_ID,
.block_offset_shift = 0x2,
.reg_offset_shift = 0x2,
.pcs_cmn_vals = {
[TYPE_PCIE] = {
[TYPE_NONE] = {
[NO_SSC] = &pcie_phy_pcs_cmn_vals,
[EXTERNAL_SSC] = &pcie_phy_pcs_cmn_vals,
[INTERNAL_SSC] = &pcie_phy_pcs_cmn_vals,
},
[TYPE_SGMII] = {
[NO_SSC] = &pcie_phy_pcs_cmn_vals,
[EXTERNAL_SSC] = &pcie_phy_pcs_cmn_vals,
[INTERNAL_SSC] = &pcie_phy_pcs_cmn_vals,
},
[TYPE_QSGMII] = {
[NO_SSC] = &pcie_phy_pcs_cmn_vals,
[EXTERNAL_SSC] = &pcie_phy_pcs_cmn_vals,
[INTERNAL_SSC] = &pcie_phy_pcs_cmn_vals,
},
},
},
.pma_cmn_vals = {
[TYPE_PCIE] = {
[TYPE_NONE] = {
[NO_SSC] = &pcie_100_no_ssc_cmn_vals,
[EXTERNAL_SSC] = &pcie_100_ext_ssc_cmn_vals,
[INTERNAL_SSC] = &pcie_100_int_ssc_cmn_vals,
},
[TYPE_SGMII] = {
[NO_SSC] = &pcie_100_no_ssc_plllc_cmn_vals,
[EXTERNAL_SSC] = &pcie_100_ext_ssc_plllc_cmn_vals,
[INTERNAL_SSC] = &pcie_100_int_ssc_plllc_cmn_vals,
},
[TYPE_QSGMII] = {
[NO_SSC] = &pcie_100_no_ssc_plllc_cmn_vals,
[EXTERNAL_SSC] = &pcie_100_ext_ssc_plllc_cmn_vals,
[INTERNAL_SSC] = &pcie_100_int_ssc_plllc_cmn_vals,
},
},
[TYPE_USB] = {
[TYPE_NONE] = {
[EXTERNAL_SSC] = &usb_100_ext_ssc_cmn_vals,
},
},
[TYPE_SGMII] = {
[TYPE_NONE] = {
[NO_SSC] = &sgmii_cmn_vals,
},
[TYPE_PCIE] = {
[NO_SSC] = &sgmii_100_no_ssc_plllc1_opt3_cmn_vals,
[EXTERNAL_SSC] = &sgmii_100_no_ssc_plllc1_opt3_cmn_vals,
[INTERNAL_SSC] = &sgmii_100_no_ssc_plllc1_opt3_cmn_vals,
},
},
[TYPE_QSGMII] = {
[TYPE_PCIE] = {
[NO_SSC] = &qsgmii_100_no_ssc_plllc1_cmn_vals,
[EXTERNAL_SSC] = &qsgmii_100_no_ssc_plllc1_cmn_vals,
[INTERNAL_SSC] = &qsgmii_100_no_ssc_plllc1_cmn_vals,
},
},
},
.pma_ln_vals = {
[TYPE_PCIE] = {
[TYPE_NONE] = {
[NO_SSC] = &pcie_100_no_ssc_ln_vals,
[EXTERNAL_SSC] = &pcie_100_ext_ssc_ln_vals,
[INTERNAL_SSC] = &pcie_100_int_ssc_ln_vals,
},
[TYPE_SGMII] = {
[NO_SSC] = &ml_pcie_100_no_ssc_ln_vals,
[EXTERNAL_SSC] = &ml_pcie_100_ext_ssc_ln_vals,
[INTERNAL_SSC] = &ml_pcie_100_int_ssc_ln_vals,
},
[TYPE_QSGMII] = {
[NO_SSC] = &ml_pcie_100_no_ssc_ln_vals,
[EXTERNAL_SSC] = &ml_pcie_100_ext_ssc_ln_vals,
[INTERNAL_SSC] = &ml_pcie_100_int_ssc_ln_vals,
},
},
[TYPE_USB] = {
[TYPE_NONE] = {
[EXTERNAL_SSC] = &usb_100_ext_ssc_ln_vals,
},
},
[TYPE_SGMII] = {
[TYPE_NONE] = {
[NO_SSC] = &sgmii_pma_ln_vals,
},
[TYPE_PCIE] = {
[NO_SSC] = &sgmii_100_no_ssc_plllc1_opt3_ln_vals,
[EXTERNAL_SSC] = &sgmii_100_no_ssc_plllc1_opt3_ln_vals,
[INTERNAL_SSC] = &sgmii_100_no_ssc_plllc1_opt3_ln_vals,
},
},
[TYPE_QSGMII] = {
[TYPE_PCIE] = {
[NO_SSC] = &qsgmii_100_no_ssc_plllc1_ln_vals,
[EXTERNAL_SSC] = &qsgmii_100_no_ssc_plllc1_ln_vals,
[INTERNAL_SSC] = &qsgmii_100_no_ssc_plllc1_ln_vals,
},
},
},
};
static const struct cdns_sierra_data cdns_ti_map_sierra = {
.id_value = SIERRA_MACRO_ID,
.block_offset_shift = 0x0,
.reg_offset_shift = 0x1,
.pcs_cmn_vals = {
[TYPE_PCIE] = {
[TYPE_NONE] = {
[NO_SSC] = &pcie_phy_pcs_cmn_vals,
[EXTERNAL_SSC] = &pcie_phy_pcs_cmn_vals,
[INTERNAL_SSC] = &pcie_phy_pcs_cmn_vals,
},
[TYPE_SGMII] = {
[NO_SSC] = &pcie_phy_pcs_cmn_vals,
[EXTERNAL_SSC] = &pcie_phy_pcs_cmn_vals,
[INTERNAL_SSC] = &pcie_phy_pcs_cmn_vals,
},
[TYPE_QSGMII] = {
[NO_SSC] = &pcie_phy_pcs_cmn_vals,
[EXTERNAL_SSC] = &pcie_phy_pcs_cmn_vals,
[INTERNAL_SSC] = &pcie_phy_pcs_cmn_vals,
},
},
},
.phy_pma_ln_vals = {
[TYPE_SGMII] = {
[TYPE_PCIE] = {
[NO_SSC] = &sgmii_phy_pma_ln_vals,
[EXTERNAL_SSC] = &sgmii_phy_pma_ln_vals,
[INTERNAL_SSC] = &sgmii_phy_pma_ln_vals,
},
},
[TYPE_QSGMII] = {
[TYPE_PCIE] = {
[NO_SSC] = &qsgmii_phy_pma_ln_vals,
[EXTERNAL_SSC] = &qsgmii_phy_pma_ln_vals,
[INTERNAL_SSC] = &qsgmii_phy_pma_ln_vals,
},
},
},
.pma_cmn_vals = {
[TYPE_PCIE] = {
[TYPE_NONE] = {
[NO_SSC] = &pcie_100_no_ssc_cmn_vals,
[EXTERNAL_SSC] = &pcie_100_ext_ssc_cmn_vals,
[INTERNAL_SSC] = &pcie_100_int_ssc_cmn_vals,
},
[TYPE_SGMII] = {
[NO_SSC] = &pcie_100_no_ssc_plllc_cmn_vals,
[EXTERNAL_SSC] = &pcie_100_ext_ssc_plllc_cmn_vals,
[INTERNAL_SSC] = &pcie_100_int_ssc_plllc_cmn_vals,
},
[TYPE_QSGMII] = {
[NO_SSC] = &pcie_100_no_ssc_plllc_cmn_vals,
[EXTERNAL_SSC] = &pcie_100_ext_ssc_plllc_cmn_vals,
[INTERNAL_SSC] = &pcie_100_int_ssc_plllc_cmn_vals,
},
},
[TYPE_USB] = {
[TYPE_NONE] = {
[EXTERNAL_SSC] = &usb_100_ext_ssc_cmn_vals,
},
},
[TYPE_SGMII] = {
[TYPE_PCIE] = {
[NO_SSC] = &sgmii_100_no_ssc_plllc1_opt3_cmn_vals,
[EXTERNAL_SSC] = &sgmii_100_no_ssc_plllc1_opt3_cmn_vals,
[INTERNAL_SSC] = &sgmii_100_no_ssc_plllc1_opt3_cmn_vals,
},
},
[TYPE_QSGMII] = {
[TYPE_PCIE] = {
[NO_SSC] = &qsgmii_100_no_ssc_plllc1_cmn_vals,
[EXTERNAL_SSC] = &qsgmii_100_no_ssc_plllc1_cmn_vals,
[INTERNAL_SSC] = &qsgmii_100_no_ssc_plllc1_cmn_vals,
},
},
},
.pma_ln_vals = {
[TYPE_PCIE] = {
[TYPE_NONE] = {
[NO_SSC] = &pcie_100_no_ssc_ln_vals,
[EXTERNAL_SSC] = &pcie_100_ext_ssc_ln_vals,
[INTERNAL_SSC] = &pcie_100_int_ssc_ln_vals,
},
[TYPE_SGMII] = {
[NO_SSC] = &ti_ml_pcie_100_no_ssc_ln_vals,
[EXTERNAL_SSC] = &ti_ml_pcie_100_ext_ssc_ln_vals,
[INTERNAL_SSC] = &ti_ml_pcie_100_int_ssc_ln_vals,
},
[TYPE_QSGMII] = {
[NO_SSC] = &ti_ml_pcie_100_no_ssc_ln_vals,
[EXTERNAL_SSC] = &ti_ml_pcie_100_ext_ssc_ln_vals,
[INTERNAL_SSC] = &ti_ml_pcie_100_int_ssc_ln_vals,
},
},
[TYPE_USB] = {
[TYPE_NONE] = {
[EXTERNAL_SSC] = &usb_100_ext_ssc_ln_vals,
},
},
[TYPE_SGMII] = {
[TYPE_PCIE] = {
[NO_SSC] = &sgmii_100_no_ssc_plllc1_opt3_ln_vals,
[EXTERNAL_SSC] = &sgmii_100_no_ssc_plllc1_opt3_ln_vals,
[INTERNAL_SSC] = &sgmii_100_no_ssc_plllc1_opt3_ln_vals,
},
},
[TYPE_QSGMII] = {
[TYPE_PCIE] = {
[NO_SSC] = &qsgmii_100_no_ssc_plllc1_ln_vals,
[EXTERNAL_SSC] = &qsgmii_100_no_ssc_plllc1_ln_vals,
[INTERNAL_SSC] = &qsgmii_100_no_ssc_plllc1_ln_vals,
},
},
},
};
static const struct of_device_id cdns_sierra_id_table[] = {
{
.compatible = "cdns,sierra-phy-t0",
.data = &cdns_map_sierra,
},
{
.compatible = "ti,sierra-phy-t0",
.data = &cdns_ti_map_sierra,
},
{}
};
MODULE_DEVICE_TABLE(of, cdns_sierra_id_table);
static struct platform_driver cdns_sierra_driver = {
.probe = cdns_sierra_phy_probe,
.remove_new = cdns_sierra_phy_remove,
.driver = {
.name = "cdns-sierra-phy",
.of_match_table = cdns_sierra_id_table,
},
};
module_platform_driver(cdns_sierra_driver);
MODULE_ALIAS("platform:cdns_sierra");
MODULE_AUTHOR("Cadence Design Systems");
MODULE_DESCRIPTION("CDNS sierra phy driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/phy/cadence/phy-cadence-sierra.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2022 Texas Instruments Incorporated - https://www.ti.com/
*/
#include <linux/bitfield.h>
#include <linux/bitops.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/phy/phy.h>
#include <linux/phy/phy-mipi-dphy.h>
#include <linux/platform_device.h>
#include <linux/sys_soc.h>
#define DPHY_PMA_CMN(reg) (reg)
#define DPHY_PCS(reg) (0xb00 + (reg))
#define DPHY_ISO(reg) (0xc00 + (reg))
#define DPHY_WRAP(reg) (0x1000 + (reg))
#define DPHY_CMN_SSM DPHY_PMA_CMN(0x20)
#define DPHY_CMN_RX_MODE_EN BIT(10)
#define DPHY_CMN_RX_BANDGAP_TIMER_MASK GENMASK(8, 1)
#define DPHY_CMN_SSM_EN BIT(0)
#define DPHY_CMN_RX_BANDGAP_TIMER 0x14
#define DPHY_BAND_CFG DPHY_PCS(0x0)
#define DPHY_BAND_CFG_RIGHT_BAND GENMASK(9, 5)
#define DPHY_BAND_CFG_LEFT_BAND GENMASK(4, 0)
#define DPHY_POWER_ISLAND_EN_DATA DPHY_PCS(0x8)
#define DPHY_POWER_ISLAND_EN_DATA_VAL 0xaaaaaaaa
#define DPHY_POWER_ISLAND_EN_CLK DPHY_PCS(0xc)
#define DPHY_POWER_ISLAND_EN_CLK_VAL 0xaa
#define DPHY_LANE DPHY_WRAP(0x0)
#define DPHY_LANE_RESET_CMN_EN BIT(23)
#define DPHY_ISO_CL_CTRL_L DPHY_ISO(0x10)
#define DPHY_ISO_DL_CTRL_L0 DPHY_ISO(0x14)
#define DPHY_ISO_DL_CTRL_L1 DPHY_ISO(0x20)
#define DPHY_ISO_DL_CTRL_L2 DPHY_ISO(0x30)
#define DPHY_ISO_DL_CTRL_L3 DPHY_ISO(0x3c)
#define DPHY_ISO_LANE_READY_BIT 0
#define DPHY_ISO_LANE_READY_TIMEOUT_MS 100UL
#define DPHY_LANES_MIN 1
#define DPHY_LANES_MAX 4
struct cdns_dphy_rx {
void __iomem *regs;
struct device *dev;
struct phy *phy;
};
struct cdns_dphy_rx_band {
/* Rates are in Mbps. */
unsigned int min_rate;
unsigned int max_rate;
};
struct cdns_dphy_soc_data {
bool has_hw_cmn_rstb;
};
/* Order of bands is important since the index is the band number. */
static const struct cdns_dphy_rx_band bands[] = {
{ 80, 100 }, { 100, 120 }, { 120, 160 }, { 160, 200 }, { 200, 240 },
{ 240, 280 }, { 280, 320 }, { 320, 360 }, { 360, 400 }, { 400, 480 },
{ 480, 560 }, { 560, 640 }, { 640, 720 }, { 720, 800 }, { 800, 880 },
{ 880, 1040 }, { 1040, 1200 }, { 1200, 1350 }, { 1350, 1500 },
{ 1500, 1750 }, { 1750, 2000 }, { 2000, 2250 }, { 2250, 2500 }
};
static int cdns_dphy_rx_power_on(struct phy *phy)
{
struct cdns_dphy_rx *dphy = phy_get_drvdata(phy);
/* Start RX state machine. */
writel(DPHY_CMN_SSM_EN | DPHY_CMN_RX_MODE_EN |
FIELD_PREP(DPHY_CMN_RX_BANDGAP_TIMER_MASK,
DPHY_CMN_RX_BANDGAP_TIMER),
dphy->regs + DPHY_CMN_SSM);
return 0;
}
static int cdns_dphy_rx_power_off(struct phy *phy)
{
struct cdns_dphy_rx *dphy = phy_get_drvdata(phy);
writel(0, dphy->regs + DPHY_CMN_SSM);
return 0;
}
static int cdns_dphy_rx_get_band_ctrl(unsigned long hs_clk_rate)
{
unsigned int rate, i;
rate = hs_clk_rate / 1000000UL;
/* Since CSI-2 clock is DDR, the bit rate is twice the clock rate. */
rate *= 2;
if (rate < bands[0].min_rate)
return -EOPNOTSUPP;
for (i = 0; i < ARRAY_SIZE(bands); i++)
if (rate < bands[i].max_rate)
return i;
return -EOPNOTSUPP;
}
static inline int cdns_dphy_rx_wait_for_bit(void __iomem *addr,
unsigned int bit)
{
u32 val;
return readl_relaxed_poll_timeout(addr, val, val & BIT(bit), 10,
DPHY_ISO_LANE_READY_TIMEOUT_MS * 1000);
}
static int cdns_dphy_rx_wait_lane_ready(struct cdns_dphy_rx *dphy,
unsigned int lanes)
{
static const u32 data_lane_ctrl[] = {DPHY_ISO_DL_CTRL_L0,
DPHY_ISO_DL_CTRL_L1,
DPHY_ISO_DL_CTRL_L2,
DPHY_ISO_DL_CTRL_L3};
void __iomem *reg = dphy->regs;
unsigned int i;
int ret;
/* Clock lane */
ret = cdns_dphy_rx_wait_for_bit(reg + DPHY_ISO_CL_CTRL_L,
DPHY_ISO_LANE_READY_BIT);
if (ret)
return ret;
for (i = 0; i < lanes; i++) {
ret = cdns_dphy_rx_wait_for_bit(reg + data_lane_ctrl[i],
DPHY_ISO_LANE_READY_BIT);
if (ret)
return ret;
}
return 0;
}
static struct cdns_dphy_soc_data j721e_soc_data = {
.has_hw_cmn_rstb = true,
};
static const struct soc_device_attribute cdns_dphy_socinfo[] = {
{
.family = "J721E",
.revision = "SR1.0",
.data = &j721e_soc_data,
},
{/* sentinel */}
};
static int cdns_dphy_rx_configure(struct phy *phy,
union phy_configure_opts *opts)
{
struct cdns_dphy_rx *dphy = phy_get_drvdata(phy);
unsigned int reg, lanes = opts->mipi_dphy.lanes;
const struct cdns_dphy_soc_data *soc_data = NULL;
const struct soc_device_attribute *soc;
int band_ctrl, ret;
soc = soc_device_match(cdns_dphy_socinfo);
if (soc && soc->data)
soc_data = soc->data;
if (!soc || (soc_data && !soc_data->has_hw_cmn_rstb)) {
reg = DPHY_LANE_RESET_CMN_EN;
writel(reg, dphy->regs + DPHY_LANE);
}
/* Data lanes. Minimum one lane is mandatory. */
if (lanes < DPHY_LANES_MIN || lanes > DPHY_LANES_MAX)
return -EINVAL;
band_ctrl = cdns_dphy_rx_get_band_ctrl(opts->mipi_dphy.hs_clk_rate);
if (band_ctrl < 0)
return band_ctrl;
reg = FIELD_PREP(DPHY_BAND_CFG_LEFT_BAND, band_ctrl) |
FIELD_PREP(DPHY_BAND_CFG_RIGHT_BAND, band_ctrl);
writel(reg, dphy->regs + DPHY_BAND_CFG);
/*
* Set the required power island phase 2 time. This is mandated by DPHY
* specs.
*/
reg = DPHY_POWER_ISLAND_EN_DATA_VAL;
writel(reg, dphy->regs + DPHY_POWER_ISLAND_EN_DATA);
reg = DPHY_POWER_ISLAND_EN_CLK_VAL;
writel(reg, dphy->regs + DPHY_POWER_ISLAND_EN_CLK);
ret = cdns_dphy_rx_wait_lane_ready(dphy, lanes);
if (ret) {
dev_err(dphy->dev, "DPHY wait for lane ready timeout\n");
return ret;
}
return 0;
}
static int cdns_dphy_rx_validate(struct phy *phy, enum phy_mode mode,
int submode, union phy_configure_opts *opts)
{
int ret;
if (mode != PHY_MODE_MIPI_DPHY)
return -EINVAL;
ret = cdns_dphy_rx_get_band_ctrl(opts->mipi_dphy.hs_clk_rate);
if (ret < 0)
return ret;
return phy_mipi_dphy_config_validate(&opts->mipi_dphy);
}
static const struct phy_ops cdns_dphy_rx_ops = {
.power_on = cdns_dphy_rx_power_on,
.power_off = cdns_dphy_rx_power_off,
.configure = cdns_dphy_rx_configure,
.validate = cdns_dphy_rx_validate,
};
static int cdns_dphy_rx_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct phy_provider *provider;
struct cdns_dphy_rx *dphy;
dphy = devm_kzalloc(dev, sizeof(*dphy), GFP_KERNEL);
if (!dphy)
return -ENOMEM;
dev_set_drvdata(dev, dphy);
dphy->dev = dev;
dphy->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dphy->regs))
return PTR_ERR(dphy->regs);
dphy->phy = devm_phy_create(dev, NULL, &cdns_dphy_rx_ops);
if (IS_ERR(dphy->phy)) {
dev_err(dev, "Failed to create PHY: %ld\n", PTR_ERR(dphy->phy));
return PTR_ERR(dphy->phy);
}
phy_set_drvdata(dphy->phy, dphy);
provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
if (IS_ERR(provider)) {
dev_err(dev, "Failed to register PHY provider: %ld\n",
PTR_ERR(provider));
return PTR_ERR(provider);
}
return 0;
}
static const struct of_device_id cdns_dphy_rx_of_match[] = {
{ .compatible = "cdns,dphy-rx" },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, cdns_dphy_rx_of_match);
static struct platform_driver cdns_dphy_rx_platform_driver = {
.probe = cdns_dphy_rx_probe,
.driver = {
.name = "cdns-mipi-dphy-rx",
.of_match_table = cdns_dphy_rx_of_match,
},
};
module_platform_driver(cdns_dphy_rx_platform_driver);
MODULE_AUTHOR("Pratyush Yadav <[email protected]>");
MODULE_DESCRIPTION("Cadence D-PHY Rx Driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/phy/cadence/cdns-dphy-rx.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Salvo PHY is a 28nm PHY, it is a legacy PHY, and only
* for USB3 and USB2.
*
* Copyright (c) 2019-2020 NXP
*/
#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#define USB3_PHY_OFFSET 0x0
#define USB2_PHY_OFFSET 0x38000
/* USB3 PHY register definition */
#define PHY_PMA_CMN_CTRL1 0xC800
#define TB_ADDR_CMN_DIAG_HSCLK_SEL 0x01e0
#define TB_ADDR_CMN_PLL0_VCOCAL_INIT_TMR 0x0084
#define TB_ADDR_CMN_PLL0_VCOCAL_ITER_TMR 0x0085
#define TB_ADDR_CMN_PLL0_INTDIV 0x0094
#define TB_ADDR_CMN_PLL0_FRACDIV 0x0095
#define TB_ADDR_CMN_PLL0_HIGH_THR 0x0096
#define TB_ADDR_CMN_PLL0_SS_CTRL1 0x0098
#define TB_ADDR_CMN_PLL0_SS_CTRL2 0x0099
#define TB_ADDR_CMN_PLL0_DSM_DIAG 0x0097
#define TB_ADDR_CMN_DIAG_PLL0_OVRD 0x01c2
#define TB_ADDR_CMN_DIAG_PLL0_FBH_OVRD 0x01c0
#define TB_ADDR_CMN_DIAG_PLL0_FBL_OVRD 0x01c1
#define TB_ADDR_CMN_DIAG_PLL0_V2I_TUNE 0x01C5
#define TB_ADDR_CMN_DIAG_PLL0_CP_TUNE 0x01C6
#define TB_ADDR_CMN_DIAG_PLL0_LF_PROG 0x01C7
#define TB_ADDR_CMN_DIAG_PLL0_TEST_MODE 0x01c4
#define TB_ADDR_CMN_PSM_CLK_CTRL 0x0061
#define TB_ADDR_XCVR_DIAG_RX_LANE_CAL_RST_TMR 0x40ea
#define TB_ADDR_XCVR_PSM_RCTRL 0x4001
#define TB_ADDR_TX_PSC_A0 0x4100
#define TB_ADDR_TX_PSC_A1 0x4101
#define TB_ADDR_TX_PSC_A2 0x4102
#define TB_ADDR_TX_PSC_A3 0x4103
#define TB_ADDR_TX_DIAG_ECTRL_OVRD 0x41f5
#define TB_ADDR_TX_PSC_CAL 0x4106
#define TB_ADDR_TX_PSC_RDY 0x4107
#define TB_ADDR_RX_PSC_A0 0x8000
#define TB_ADDR_RX_PSC_A1 0x8001
#define TB_ADDR_RX_PSC_A2 0x8002
#define TB_ADDR_RX_PSC_A3 0x8003
#define TB_ADDR_RX_PSC_CAL 0x8006
#define TB_ADDR_RX_PSC_RDY 0x8007
#define TB_ADDR_TX_TXCC_MGNLS_MULT_000 0x4058
#define TB_ADDR_TX_DIAG_BGREF_PREDRV_DELAY 0x41e7
#define TB_ADDR_RX_SLC_CU_ITER_TMR 0x80e3
#define TB_ADDR_RX_SIGDET_HL_FILT_TMR 0x8090
#define TB_ADDR_RX_SAMP_DAC_CTRL 0x8058
#define TB_ADDR_RX_DIAG_SIGDET_TUNE 0x81dc
#define TB_ADDR_RX_DIAG_LFPSDET_TUNE2 0x81df
#define TB_ADDR_RX_DIAG_BS_TM 0x81f5
#define TB_ADDR_RX_DIAG_DFE_CTRL1 0x81d3
#define TB_ADDR_RX_DIAG_ILL_IQE_TRIM4 0x81c7
#define TB_ADDR_RX_DIAG_ILL_E_TRIM0 0x81c2
#define TB_ADDR_RX_DIAG_ILL_IQ_TRIM0 0x81c1
#define TB_ADDR_RX_DIAG_ILL_IQE_TRIM6 0x81c9
#define TB_ADDR_RX_DIAG_RXFE_TM3 0x81f8
#define TB_ADDR_RX_DIAG_RXFE_TM4 0x81f9
#define TB_ADDR_RX_DIAG_LFPSDET_TUNE 0x81dd
#define TB_ADDR_RX_DIAG_DFE_CTRL3 0x81d5
#define TB_ADDR_RX_DIAG_SC2C_DELAY 0x81e1
#define TB_ADDR_RX_REE_VGA_GAIN_NODFE 0x81bf
#define TB_ADDR_XCVR_PSM_CAL_TMR 0x4002
#define TB_ADDR_XCVR_PSM_A0BYP_TMR 0x4004
#define TB_ADDR_XCVR_PSM_A0IN_TMR 0x4003
#define TB_ADDR_XCVR_PSM_A1IN_TMR 0x4005
#define TB_ADDR_XCVR_PSM_A2IN_TMR 0x4006
#define TB_ADDR_XCVR_PSM_A3IN_TMR 0x4007
#define TB_ADDR_XCVR_PSM_A4IN_TMR 0x4008
#define TB_ADDR_XCVR_PSM_A5IN_TMR 0x4009
#define TB_ADDR_XCVR_PSM_A0OUT_TMR 0x400a
#define TB_ADDR_XCVR_PSM_A1OUT_TMR 0x400b
#define TB_ADDR_XCVR_PSM_A2OUT_TMR 0x400c
#define TB_ADDR_XCVR_PSM_A3OUT_TMR 0x400d
#define TB_ADDR_XCVR_PSM_A4OUT_TMR 0x400e
#define TB_ADDR_XCVR_PSM_A5OUT_TMR 0x400f
#define TB_ADDR_TX_RCVDET_EN_TMR 0x4122
#define TB_ADDR_TX_RCVDET_ST_TMR 0x4123
#define TB_ADDR_XCVR_DIAG_LANE_FCM_EN_MGN_TMR 0x40f2
#define TB_ADDR_TX_RCVDETSC_CTRL 0x4124
/* USB2 PHY register definition */
#define UTMI_REG15 0xaf
#define UTMI_AFE_RX_REG0 0x0d
#define UTMI_AFE_RX_REG5 0x12
#define UTMI_AFE_BC_REG4 0x29
/* Align UTMI_AFE_RX_REG0 bit[7:6] define */
enum usb2_disconn_threshold {
USB2_DISCONN_THRESHOLD_575 = 0x0,
USB2_DISCONN_THRESHOLD_610 = 0x1,
USB2_DISCONN_THRESHOLD_645 = 0x3,
};
#define RX_USB2_DISCONN_MASK GENMASK(7, 6)
/* TB_ADDR_TX_RCVDETSC_CTRL */
#define RXDET_IN_P3_32KHZ BIT(0)
/*
* UTMI_REG15
*
* Gate how many us for the txvalid signal until analog
* HS/FS transmitters have powered up
*/
#define TXVALID_GATE_THRESHOLD_HS_MASK (BIT(4) | BIT(5))
/* 0us, txvalid is ready just after HS/FS transmitters have powered up */
#define TXVALID_GATE_THRESHOLD_HS_0US (BIT(4) | BIT(5))
#define SET_B_SESSION_VALID (BIT(6) | BIT(5))
#define CLR_B_SESSION_VALID (BIT(6))
struct cdns_reg_pairs {
u16 val;
u32 off;
};
struct cdns_salvo_data {
u8 reg_offset_shift;
const struct cdns_reg_pairs *init_sequence_val;
u8 init_sequence_length;
};
struct cdns_salvo_phy {
struct phy *phy;
struct clk *clk;
void __iomem *base;
struct cdns_salvo_data *data;
enum usb2_disconn_threshold usb2_disconn;
};
static const struct of_device_id cdns_salvo_phy_of_match[];
static const struct cdns_salvo_data cdns_nxp_salvo_data;
static bool cdns_is_nxp_phy(struct cdns_salvo_phy *salvo_phy)
{
return salvo_phy->data == &cdns_nxp_salvo_data;
}
static u16 cdns_salvo_read(struct cdns_salvo_phy *salvo_phy, u32 offset, u32 reg)
{
return (u16)readl(salvo_phy->base + offset +
reg * (1 << salvo_phy->data->reg_offset_shift));
}
static void cdns_salvo_write(struct cdns_salvo_phy *salvo_phy, u32 offset,
u32 reg, u16 val)
{
writel(val, salvo_phy->base + offset +
reg * (1 << salvo_phy->data->reg_offset_shift));
}
/*
* Below bringup sequence pair are from Cadence PHY's User Guide
* and NXP platform tuning results.
*/
static const struct cdns_reg_pairs cdns_nxp_sequence_pair[] = {
{0x0830, PHY_PMA_CMN_CTRL1},
{0x0010, TB_ADDR_CMN_DIAG_HSCLK_SEL},
{0x00f0, TB_ADDR_CMN_PLL0_VCOCAL_INIT_TMR},
{0x0018, TB_ADDR_CMN_PLL0_VCOCAL_ITER_TMR},
{0x00d0, TB_ADDR_CMN_PLL0_INTDIV},
{0x4aaa, TB_ADDR_CMN_PLL0_FRACDIV},
{0x0034, TB_ADDR_CMN_PLL0_HIGH_THR},
{0x01ee, TB_ADDR_CMN_PLL0_SS_CTRL1},
{0x7f03, TB_ADDR_CMN_PLL0_SS_CTRL2},
{0x0020, TB_ADDR_CMN_PLL0_DSM_DIAG},
{0x0000, TB_ADDR_CMN_DIAG_PLL0_OVRD},
{0x0000, TB_ADDR_CMN_DIAG_PLL0_FBH_OVRD},
{0x0000, TB_ADDR_CMN_DIAG_PLL0_FBL_OVRD},
{0x0007, TB_ADDR_CMN_DIAG_PLL0_V2I_TUNE},
{0x0027, TB_ADDR_CMN_DIAG_PLL0_CP_TUNE},
{0x0008, TB_ADDR_CMN_DIAG_PLL0_LF_PROG},
{0x0022, TB_ADDR_CMN_DIAG_PLL0_TEST_MODE},
{0x000a, TB_ADDR_CMN_PSM_CLK_CTRL},
{0x0139, TB_ADDR_XCVR_DIAG_RX_LANE_CAL_RST_TMR},
{0xbefc, TB_ADDR_XCVR_PSM_RCTRL},
{0x7799, TB_ADDR_TX_PSC_A0},
{0x7798, TB_ADDR_TX_PSC_A1},
{0x509b, TB_ADDR_TX_PSC_A2},
{0x0003, TB_ADDR_TX_DIAG_ECTRL_OVRD},
{0x509b, TB_ADDR_TX_PSC_A3},
{0x2090, TB_ADDR_TX_PSC_CAL},
{0x2090, TB_ADDR_TX_PSC_RDY},
{0xA6FD, TB_ADDR_RX_PSC_A0},
{0xA6FD, TB_ADDR_RX_PSC_A1},
{0xA410, TB_ADDR_RX_PSC_A2},
{0x2410, TB_ADDR_RX_PSC_A3},
{0x23FF, TB_ADDR_RX_PSC_CAL},
{0x2010, TB_ADDR_RX_PSC_RDY},
{0x0020, TB_ADDR_TX_TXCC_MGNLS_MULT_000},
{0x00ff, TB_ADDR_TX_DIAG_BGREF_PREDRV_DELAY},
{0x0002, TB_ADDR_RX_SLC_CU_ITER_TMR},
{0x0013, TB_ADDR_RX_SIGDET_HL_FILT_TMR},
{0x0000, TB_ADDR_RX_SAMP_DAC_CTRL},
{0x1004, TB_ADDR_RX_DIAG_SIGDET_TUNE},
{0x4041, TB_ADDR_RX_DIAG_LFPSDET_TUNE2},
{0x0480, TB_ADDR_RX_DIAG_BS_TM},
{0x8006, TB_ADDR_RX_DIAG_DFE_CTRL1},
{0x003f, TB_ADDR_RX_DIAG_ILL_IQE_TRIM4},
{0x543f, TB_ADDR_RX_DIAG_ILL_E_TRIM0},
{0x543f, TB_ADDR_RX_DIAG_ILL_IQ_TRIM0},
{0x0000, TB_ADDR_RX_DIAG_ILL_IQE_TRIM6},
{0x8000, TB_ADDR_RX_DIAG_RXFE_TM3},
{0x0003, TB_ADDR_RX_DIAG_RXFE_TM4},
{0x2408, TB_ADDR_RX_DIAG_LFPSDET_TUNE},
{0x05ca, TB_ADDR_RX_DIAG_DFE_CTRL3},
{0x0258, TB_ADDR_RX_DIAG_SC2C_DELAY},
{0x1fff, TB_ADDR_RX_REE_VGA_GAIN_NODFE},
{0x02c6, TB_ADDR_XCVR_PSM_CAL_TMR},
{0x0002, TB_ADDR_XCVR_PSM_A0BYP_TMR},
{0x02c6, TB_ADDR_XCVR_PSM_A0IN_TMR},
{0x0010, TB_ADDR_XCVR_PSM_A1IN_TMR},
{0x0010, TB_ADDR_XCVR_PSM_A2IN_TMR},
{0x0010, TB_ADDR_XCVR_PSM_A3IN_TMR},
{0x0010, TB_ADDR_XCVR_PSM_A4IN_TMR},
{0x0010, TB_ADDR_XCVR_PSM_A5IN_TMR},
{0x0002, TB_ADDR_XCVR_PSM_A0OUT_TMR},
{0x0002, TB_ADDR_XCVR_PSM_A1OUT_TMR},
{0x0002, TB_ADDR_XCVR_PSM_A2OUT_TMR},
{0x0002, TB_ADDR_XCVR_PSM_A3OUT_TMR},
{0x0002, TB_ADDR_XCVR_PSM_A4OUT_TMR},
{0x0002, TB_ADDR_XCVR_PSM_A5OUT_TMR},
/* Change rx detect parameter */
{0x0960, TB_ADDR_TX_RCVDET_EN_TMR},
{0x01e0, TB_ADDR_TX_RCVDET_ST_TMR},
{0x0090, TB_ADDR_XCVR_DIAG_LANE_FCM_EN_MGN_TMR},
};
static int cdns_salvo_phy_init(struct phy *phy)
{
struct cdns_salvo_phy *salvo_phy = phy_get_drvdata(phy);
struct cdns_salvo_data *data = salvo_phy->data;
int ret, i;
u16 value;
ret = clk_prepare_enable(salvo_phy->clk);
if (ret)
return ret;
for (i = 0; i < data->init_sequence_length; i++) {
const struct cdns_reg_pairs *reg_pair = data->init_sequence_val + i;
cdns_salvo_write(salvo_phy, USB3_PHY_OFFSET, reg_pair->off, reg_pair->val);
}
/* RXDET_IN_P3_32KHZ, Receiver detect slow clock enable */
value = cdns_salvo_read(salvo_phy, USB3_PHY_OFFSET, TB_ADDR_TX_RCVDETSC_CTRL);
value |= RXDET_IN_P3_32KHZ;
cdns_salvo_write(salvo_phy, USB3_PHY_OFFSET, TB_ADDR_TX_RCVDETSC_CTRL,
RXDET_IN_P3_32KHZ);
value = cdns_salvo_read(salvo_phy, USB2_PHY_OFFSET, UTMI_REG15);
value &= ~TXVALID_GATE_THRESHOLD_HS_MASK;
cdns_salvo_write(salvo_phy, USB2_PHY_OFFSET, UTMI_REG15,
value | TXVALID_GATE_THRESHOLD_HS_0US);
cdns_salvo_write(salvo_phy, USB2_PHY_OFFSET, UTMI_AFE_RX_REG5, 0x5);
value = cdns_salvo_read(salvo_phy, USB2_PHY_OFFSET, UTMI_AFE_RX_REG0);
value &= ~RX_USB2_DISCONN_MASK;
value = FIELD_PREP(RX_USB2_DISCONN_MASK, salvo_phy->usb2_disconn);
cdns_salvo_write(salvo_phy, USB2_PHY_OFFSET, UTMI_AFE_RX_REG0, value);
udelay(10);
clk_disable_unprepare(salvo_phy->clk);
return ret;
}
static int cdns_salvo_phy_power_on(struct phy *phy)
{
struct cdns_salvo_phy *salvo_phy = phy_get_drvdata(phy);
return clk_prepare_enable(salvo_phy->clk);
}
static int cdns_salvo_phy_power_off(struct phy *phy)
{
struct cdns_salvo_phy *salvo_phy = phy_get_drvdata(phy);
clk_disable_unprepare(salvo_phy->clk);
return 0;
}
static int cdns_salvo_set_mode(struct phy *phy, enum phy_mode mode, int submode)
{
struct cdns_salvo_phy *salvo_phy = phy_get_drvdata(phy);
if (!cdns_is_nxp_phy(salvo_phy))
return 0;
if (mode == PHY_MODE_USB_DEVICE)
cdns_salvo_write(salvo_phy, USB2_PHY_OFFSET, UTMI_AFE_BC_REG4,
SET_B_SESSION_VALID);
else
cdns_salvo_write(salvo_phy, USB2_PHY_OFFSET, UTMI_AFE_BC_REG4,
CLR_B_SESSION_VALID);
return 0;
}
static const struct phy_ops cdns_salvo_phy_ops = {
.init = cdns_salvo_phy_init,
.power_on = cdns_salvo_phy_power_on,
.power_off = cdns_salvo_phy_power_off,
.owner = THIS_MODULE,
.set_mode = cdns_salvo_set_mode,
};
static int cdns_salvo_phy_probe(struct platform_device *pdev)
{
struct phy_provider *phy_provider;
struct device *dev = &pdev->dev;
struct cdns_salvo_phy *salvo_phy;
struct cdns_salvo_data *data;
u32 val;
data = (struct cdns_salvo_data *)of_device_get_match_data(dev);
salvo_phy = devm_kzalloc(dev, sizeof(*salvo_phy), GFP_KERNEL);
if (!salvo_phy)
return -ENOMEM;
salvo_phy->data = data;
salvo_phy->clk = devm_clk_get_optional(dev, "salvo_phy_clk");
if (IS_ERR(salvo_phy->clk))
return PTR_ERR(salvo_phy->clk);
if (of_property_read_u32(dev->of_node, "cdns,usb2-disconnect-threshold-microvolt", &val))
val = 575;
if (val < 610)
salvo_phy->usb2_disconn = USB2_DISCONN_THRESHOLD_575;
else if (val < 645)
salvo_phy->usb2_disconn = USB2_DISCONN_THRESHOLD_610;
else
salvo_phy->usb2_disconn = USB2_DISCONN_THRESHOLD_645;
salvo_phy->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(salvo_phy->base))
return PTR_ERR(salvo_phy->base);
salvo_phy->phy = devm_phy_create(dev, NULL, &cdns_salvo_phy_ops);
if (IS_ERR(salvo_phy->phy))
return PTR_ERR(salvo_phy->phy);
phy_set_drvdata(salvo_phy->phy, salvo_phy);
phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
return PTR_ERR_OR_ZERO(phy_provider);
}
static const struct cdns_salvo_data cdns_nxp_salvo_data = {
2,
cdns_nxp_sequence_pair,
ARRAY_SIZE(cdns_nxp_sequence_pair),
};
static const struct of_device_id cdns_salvo_phy_of_match[] = {
{
.compatible = "nxp,salvo-phy",
.data = &cdns_nxp_salvo_data,
},
{}
};
MODULE_DEVICE_TABLE(of, cdns_salvo_phy_of_match);
static struct platform_driver cdns_salvo_phy_driver = {
.probe = cdns_salvo_phy_probe,
.driver = {
.name = "cdns-salvo-phy",
.of_match_table = cdns_salvo_phy_of_match,
}
};
module_platform_driver(cdns_salvo_phy_driver);
MODULE_AUTHOR("Peter Chen <[email protected]>");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Cadence SALVO PHY Driver");
| linux-master | drivers/phy/cadence/phy-cadence-salvo.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Renesas R-Car Gen3 PCIe PHY driver
*
* Copyright (C) 2018 Cogent Embedded, Inc.
*/
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/spinlock.h>
#define PHY_CTRL 0x4000 /* R8A77980 only */
/* PHY control register (PHY_CTRL) */
#define PHY_CTRL_PHY_PWDN BIT(2)
struct rcar_gen3_phy {
struct phy *phy;
spinlock_t lock;
void __iomem *base;
};
static void rcar_gen3_phy_pcie_modify_reg(struct phy *p, unsigned int reg,
u32 clear, u32 set)
{
struct rcar_gen3_phy *phy = phy_get_drvdata(p);
void __iomem *base = phy->base;
unsigned long flags;
u32 value;
spin_lock_irqsave(&phy->lock, flags);
value = readl(base + reg);
value &= ~clear;
value |= set;
writel(value, base + reg);
spin_unlock_irqrestore(&phy->lock, flags);
}
static int r8a77980_phy_pcie_power_on(struct phy *p)
{
/* Power on the PCIe PHY */
rcar_gen3_phy_pcie_modify_reg(p, PHY_CTRL, PHY_CTRL_PHY_PWDN, 0);
return 0;
}
static int r8a77980_phy_pcie_power_off(struct phy *p)
{
/* Power off the PCIe PHY */
rcar_gen3_phy_pcie_modify_reg(p, PHY_CTRL, 0, PHY_CTRL_PHY_PWDN);
return 0;
}
static const struct phy_ops r8a77980_phy_pcie_ops = {
.power_on = r8a77980_phy_pcie_power_on,
.power_off = r8a77980_phy_pcie_power_off,
.owner = THIS_MODULE,
};
static const struct of_device_id rcar_gen3_phy_pcie_match_table[] = {
{ .compatible = "renesas,r8a77980-pcie-phy" },
{ }
};
MODULE_DEVICE_TABLE(of, rcar_gen3_phy_pcie_match_table);
static int rcar_gen3_phy_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct phy_provider *provider;
struct rcar_gen3_phy *phy;
void __iomem *base;
int error;
if (!dev->of_node) {
dev_err(dev,
"This driver must only be instantiated from the device tree\n");
return -EINVAL;
}
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
if (!phy)
return -ENOMEM;
spin_lock_init(&phy->lock);
phy->base = base;
/*
* devm_phy_create() will call pm_runtime_enable(&phy->dev);
* And then, phy-core will manage runtime PM for this device.
*/
pm_runtime_enable(dev);
phy->phy = devm_phy_create(dev, NULL, &r8a77980_phy_pcie_ops);
if (IS_ERR(phy->phy)) {
dev_err(dev, "Failed to create PCIe PHY\n");
error = PTR_ERR(phy->phy);
goto error;
}
phy_set_drvdata(phy->phy, phy);
provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
if (IS_ERR(provider)) {
dev_err(dev, "Failed to register PHY provider\n");
error = PTR_ERR(provider);
goto error;
}
return 0;
error:
pm_runtime_disable(dev);
return error;
}
static void rcar_gen3_phy_pcie_remove(struct platform_device *pdev)
{
pm_runtime_disable(&pdev->dev);
};
static struct platform_driver rcar_gen3_phy_driver = {
.driver = {
.name = "phy_rcar_gen3_pcie",
.of_match_table = rcar_gen3_phy_pcie_match_table,
},
.probe = rcar_gen3_phy_pcie_probe,
.remove_new = rcar_gen3_phy_pcie_remove,
};
module_platform_driver(rcar_gen3_phy_driver);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Renesas R-Car Gen3 PCIe PHY");
MODULE_AUTHOR("Sergei Shtylyov <[email protected]>");
| linux-master | drivers/phy/renesas/phy-rcar-gen3-pcie.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Renesas R-Car Gen3 for USB3.0 PHY driver
*
* Copyright (C) 2017 Renesas Electronics Corporation
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#define USB30_CLKSET0 0x034
#define USB30_CLKSET1 0x036
#define USB30_SSC_SET 0x038
#define USB30_PHY_ENABLE 0x060
#define USB30_VBUS_EN 0x064
/* USB30_CLKSET0 */
#define CLKSET0_PRIVATE 0x05c0
#define CLKSET0_USB30_FSEL_USB_EXTAL 0x0002
/* USB30_CLKSET1 */
#define CLKSET1_USB30_PLL_MULTI_SHIFT 6
#define CLKSET1_USB30_PLL_MULTI_USB_EXTAL (0x64 << \
CLKSET1_USB30_PLL_MULTI_SHIFT)
#define CLKSET1_PHYRESET BIT(4) /* 1: reset */
#define CLKSET1_REF_CLKDIV BIT(3) /* 1: USB_EXTAL */
#define CLKSET1_PRIVATE_2_1 BIT(1) /* Write B'01 */
#define CLKSET1_REF_CLK_SEL BIT(0) /* 1: USB3S0_CLK_P */
/* USB30_SSC_SET */
#define SSC_SET_SSC_EN BIT(12)
#define SSC_SET_RANGE_SHIFT 9
#define SSC_SET_RANGE_4980 (0x0 << SSC_SET_RANGE_SHIFT)
#define SSC_SET_RANGE_4492 (0x1 << SSC_SET_RANGE_SHIFT)
#define SSC_SET_RANGE_4003 (0x2 << SSC_SET_RANGE_SHIFT)
/* USB30_PHY_ENABLE */
#define PHY_ENABLE_RESET_EN BIT(4)
/* USB30_VBUS_EN */
#define VBUS_EN_VBUS_EN BIT(1)
struct rcar_gen3_usb3 {
void __iomem *base;
struct phy *phy;
u32 ssc_range;
bool usb3s_clk;
bool usb_extal;
};
static void write_clkset1_for_usb_extal(struct rcar_gen3_usb3 *r, bool reset)
{
u16 val = CLKSET1_USB30_PLL_MULTI_USB_EXTAL |
CLKSET1_REF_CLKDIV | CLKSET1_PRIVATE_2_1;
if (reset)
val |= CLKSET1_PHYRESET;
writew(val, r->base + USB30_CLKSET1);
}
static void rcar_gen3_phy_usb3_enable_ssc(struct rcar_gen3_usb3 *r)
{
u16 val = SSC_SET_SSC_EN;
switch (r->ssc_range) {
case 4980:
val |= SSC_SET_RANGE_4980;
break;
case 4492:
val |= SSC_SET_RANGE_4492;
break;
case 4003:
val |= SSC_SET_RANGE_4003;
break;
default:
dev_err(&r->phy->dev, "%s: unsupported range (%x)\n", __func__,
r->ssc_range);
return;
}
writew(val, r->base + USB30_SSC_SET);
}
static void rcar_gen3_phy_usb3_select_usb_extal(struct rcar_gen3_usb3 *r)
{
write_clkset1_for_usb_extal(r, false);
if (r->ssc_range)
rcar_gen3_phy_usb3_enable_ssc(r);
writew(CLKSET0_PRIVATE | CLKSET0_USB30_FSEL_USB_EXTAL,
r->base + USB30_CLKSET0);
writew(PHY_ENABLE_RESET_EN, r->base + USB30_PHY_ENABLE);
write_clkset1_for_usb_extal(r, true);
usleep_range(10, 20);
write_clkset1_for_usb_extal(r, false);
}
static int rcar_gen3_phy_usb3_init(struct phy *p)
{
struct rcar_gen3_usb3 *r = phy_get_drvdata(p);
dev_vdbg(&r->phy->dev, "%s: enter (%d, %d, %d)\n", __func__,
r->usb3s_clk, r->usb_extal, r->ssc_range);
if (!r->usb3s_clk && r->usb_extal)
rcar_gen3_phy_usb3_select_usb_extal(r);
/* Enables VBUS detection anyway */
writew(VBUS_EN_VBUS_EN, r->base + USB30_VBUS_EN);
return 0;
}
static const struct phy_ops rcar_gen3_phy_usb3_ops = {
.init = rcar_gen3_phy_usb3_init,
.owner = THIS_MODULE,
};
static const struct of_device_id rcar_gen3_phy_usb3_match_table[] = {
{ .compatible = "renesas,rcar-gen3-usb3-phy" },
{ }
};
MODULE_DEVICE_TABLE(of, rcar_gen3_phy_usb3_match_table);
static int rcar_gen3_phy_usb3_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct rcar_gen3_usb3 *r;
struct phy_provider *provider;
int ret = 0;
struct clk *clk;
if (!dev->of_node) {
dev_err(dev, "This driver needs device tree\n");
return -EINVAL;
}
r = devm_kzalloc(dev, sizeof(*r), GFP_KERNEL);
if (!r)
return -ENOMEM;
r->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(r->base))
return PTR_ERR(r->base);
clk = devm_clk_get(dev, "usb3s_clk");
if (!IS_ERR(clk) && !clk_prepare_enable(clk)) {
r->usb3s_clk = !!clk_get_rate(clk);
clk_disable_unprepare(clk);
}
clk = devm_clk_get(dev, "usb_extal");
if (!IS_ERR(clk) && !clk_prepare_enable(clk)) {
r->usb_extal = !!clk_get_rate(clk);
clk_disable_unprepare(clk);
}
if (!r->usb3s_clk && !r->usb_extal) {
dev_err(dev, "This driver needs usb3s_clk and/or usb_extal\n");
ret = -EINVAL;
goto error;
}
/*
* devm_phy_create() will call pm_runtime_enable(&phy->dev);
* And then, phy-core will manage runtime pm for this device.
*/
pm_runtime_enable(dev);
r->phy = devm_phy_create(dev, NULL, &rcar_gen3_phy_usb3_ops);
if (IS_ERR(r->phy)) {
dev_err(dev, "Failed to create USB3 PHY\n");
ret = PTR_ERR(r->phy);
goto error;
}
of_property_read_u32(dev->of_node, "renesas,ssc-range", &r->ssc_range);
platform_set_drvdata(pdev, r);
phy_set_drvdata(r->phy, r);
provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
if (IS_ERR(provider)) {
dev_err(dev, "Failed to register PHY provider\n");
ret = PTR_ERR(provider);
goto error;
}
return 0;
error:
pm_runtime_disable(dev);
return ret;
}
static void rcar_gen3_phy_usb3_remove(struct platform_device *pdev)
{
pm_runtime_disable(&pdev->dev);
};
static struct platform_driver rcar_gen3_phy_usb3_driver = {
.driver = {
.name = "phy_rcar_gen3_usb3",
.of_match_table = rcar_gen3_phy_usb3_match_table,
},
.probe = rcar_gen3_phy_usb3_probe,
.remove_new = rcar_gen3_phy_usb3_remove,
};
module_platform_driver(rcar_gen3_phy_usb3_driver);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Renesas R-Car Gen3 USB 3.0 PHY");
MODULE_AUTHOR("Yoshihiro Shimoda <[email protected]>");
| linux-master | drivers/phy/renesas/phy-rcar-gen3-usb3.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Renesas R-Car Gen3 for USB2.0 PHY driver
*
* Copyright (C) 2015-2017 Renesas Electronics Corporation
*
* This is based on the phy-rcar-gen2 driver:
* Copyright (C) 2014 Renesas Solutions Corp.
* Copyright (C) 2014 Cogent Embedded, Inc.
*/
#include <linux/extcon-provider.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
#include <linux/string.h>
#include <linux/usb/of.h>
#include <linux/workqueue.h>
/******* USB2.0 Host registers (original offset is +0x200) *******/
#define USB2_INT_ENABLE 0x000
#define USB2_USBCTR 0x00c
#define USB2_SPD_RSM_TIMSET 0x10c
#define USB2_OC_TIMSET 0x110
#define USB2_COMMCTRL 0x600
#define USB2_OBINTSTA 0x604
#define USB2_OBINTEN 0x608
#define USB2_VBCTRL 0x60c
#define USB2_LINECTRL1 0x610
#define USB2_ADPCTRL 0x630
/* INT_ENABLE */
#define USB2_INT_ENABLE_UCOM_INTEN BIT(3)
#define USB2_INT_ENABLE_USBH_INTB_EN BIT(2) /* For EHCI */
#define USB2_INT_ENABLE_USBH_INTA_EN BIT(1) /* For OHCI */
/* USBCTR */
#define USB2_USBCTR_DIRPD BIT(2)
#define USB2_USBCTR_PLL_RST BIT(1)
/* SPD_RSM_TIMSET */
#define USB2_SPD_RSM_TIMSET_INIT 0x014e029b
/* OC_TIMSET */
#define USB2_OC_TIMSET_INIT 0x000209ab
/* COMMCTRL */
#define USB2_COMMCTRL_OTG_PERI BIT(31) /* 1 = Peripheral mode */
/* OBINTSTA and OBINTEN */
#define USB2_OBINT_SESSVLDCHG BIT(12)
#define USB2_OBINT_IDDIGCHG BIT(11)
#define USB2_OBINT_BITS (USB2_OBINT_SESSVLDCHG | \
USB2_OBINT_IDDIGCHG)
/* VBCTRL */
#define USB2_VBCTRL_OCCLREN BIT(16)
#define USB2_VBCTRL_DRVVBUSSEL BIT(8)
#define USB2_VBCTRL_VBOUT BIT(0)
/* LINECTRL1 */
#define USB2_LINECTRL1_DPRPD_EN BIT(19)
#define USB2_LINECTRL1_DP_RPD BIT(18)
#define USB2_LINECTRL1_DMRPD_EN BIT(17)
#define USB2_LINECTRL1_DM_RPD BIT(16)
#define USB2_LINECTRL1_OPMODE_NODRV BIT(6)
/* ADPCTRL */
#define USB2_ADPCTRL_OTGSESSVLD BIT(20)
#define USB2_ADPCTRL_IDDIG BIT(19)
#define USB2_ADPCTRL_IDPULLUP BIT(5) /* 1 = ID sampling is enabled */
#define USB2_ADPCTRL_DRVVBUS BIT(4)
/* RZ/G2L specific */
#define USB2_OBINT_IDCHG_EN BIT(0)
#define USB2_LINECTRL1_USB2_IDMON BIT(0)
#define NUM_OF_PHYS 4
enum rcar_gen3_phy_index {
PHY_INDEX_BOTH_HC,
PHY_INDEX_OHCI,
PHY_INDEX_EHCI,
PHY_INDEX_HSUSB
};
static const u32 rcar_gen3_int_enable[NUM_OF_PHYS] = {
USB2_INT_ENABLE_USBH_INTB_EN | USB2_INT_ENABLE_USBH_INTA_EN,
USB2_INT_ENABLE_USBH_INTA_EN,
USB2_INT_ENABLE_USBH_INTB_EN,
0
};
struct rcar_gen3_phy {
struct phy *phy;
struct rcar_gen3_chan *ch;
u32 int_enable_bits;
bool initialized;
bool otg_initialized;
bool powered;
};
struct rcar_gen3_chan {
void __iomem *base;
struct device *dev; /* platform_device's device */
struct extcon_dev *extcon;
struct rcar_gen3_phy rphys[NUM_OF_PHYS];
struct regulator *vbus;
struct work_struct work;
struct mutex lock; /* protects rphys[...].powered */
enum usb_dr_mode dr_mode;
int irq;
u32 obint_enable_bits;
bool extcon_host;
bool is_otg_channel;
bool uses_otg_pins;
bool soc_no_adp_ctrl;
};
struct rcar_gen3_phy_drv_data {
const struct phy_ops *phy_usb2_ops;
bool no_adp_ctrl;
};
/*
* Combination about is_otg_channel and uses_otg_pins:
*
* Parameters || Behaviors
* is_otg_channel | uses_otg_pins || irqs | role sysfs
* ---------------------+---------------++--------------+------------
* true | true || enabled | enabled
* true | false || disabled | enabled
* false | any || disabled | disabled
*/
static void rcar_gen3_phy_usb2_work(struct work_struct *work)
{
struct rcar_gen3_chan *ch = container_of(work, struct rcar_gen3_chan,
work);
if (ch->extcon_host) {
extcon_set_state_sync(ch->extcon, EXTCON_USB_HOST, true);
extcon_set_state_sync(ch->extcon, EXTCON_USB, false);
} else {
extcon_set_state_sync(ch->extcon, EXTCON_USB_HOST, false);
extcon_set_state_sync(ch->extcon, EXTCON_USB, true);
}
}
static void rcar_gen3_set_host_mode(struct rcar_gen3_chan *ch, int host)
{
void __iomem *usb2_base = ch->base;
u32 val = readl(usb2_base + USB2_COMMCTRL);
dev_vdbg(ch->dev, "%s: %08x, %d\n", __func__, val, host);
if (host)
val &= ~USB2_COMMCTRL_OTG_PERI;
else
val |= USB2_COMMCTRL_OTG_PERI;
writel(val, usb2_base + USB2_COMMCTRL);
}
static void rcar_gen3_set_linectrl(struct rcar_gen3_chan *ch, int dp, int dm)
{
void __iomem *usb2_base = ch->base;
u32 val = readl(usb2_base + USB2_LINECTRL1);
dev_vdbg(ch->dev, "%s: %08x, %d, %d\n", __func__, val, dp, dm);
val &= ~(USB2_LINECTRL1_DP_RPD | USB2_LINECTRL1_DM_RPD);
if (dp)
val |= USB2_LINECTRL1_DP_RPD;
if (dm)
val |= USB2_LINECTRL1_DM_RPD;
writel(val, usb2_base + USB2_LINECTRL1);
}
static void rcar_gen3_enable_vbus_ctrl(struct rcar_gen3_chan *ch, int vbus)
{
void __iomem *usb2_base = ch->base;
u32 vbus_ctrl_reg = USB2_ADPCTRL;
u32 vbus_ctrl_val = USB2_ADPCTRL_DRVVBUS;
u32 val;
dev_vdbg(ch->dev, "%s: %08x, %d\n", __func__, val, vbus);
if (ch->soc_no_adp_ctrl) {
vbus_ctrl_reg = USB2_VBCTRL;
vbus_ctrl_val = USB2_VBCTRL_VBOUT;
}
val = readl(usb2_base + vbus_ctrl_reg);
if (vbus)
val |= vbus_ctrl_val;
else
val &= ~vbus_ctrl_val;
writel(val, usb2_base + vbus_ctrl_reg);
}
static void rcar_gen3_control_otg_irq(struct rcar_gen3_chan *ch, int enable)
{
void __iomem *usb2_base = ch->base;
u32 val = readl(usb2_base + USB2_OBINTEN);
if (ch->uses_otg_pins && enable)
val |= ch->obint_enable_bits;
else
val &= ~ch->obint_enable_bits;
writel(val, usb2_base + USB2_OBINTEN);
}
static void rcar_gen3_init_for_host(struct rcar_gen3_chan *ch)
{
rcar_gen3_set_linectrl(ch, 1, 1);
rcar_gen3_set_host_mode(ch, 1);
rcar_gen3_enable_vbus_ctrl(ch, 1);
ch->extcon_host = true;
schedule_work(&ch->work);
}
static void rcar_gen3_init_for_peri(struct rcar_gen3_chan *ch)
{
rcar_gen3_set_linectrl(ch, 0, 1);
rcar_gen3_set_host_mode(ch, 0);
rcar_gen3_enable_vbus_ctrl(ch, 0);
ch->extcon_host = false;
schedule_work(&ch->work);
}
static void rcar_gen3_init_for_b_host(struct rcar_gen3_chan *ch)
{
void __iomem *usb2_base = ch->base;
u32 val;
val = readl(usb2_base + USB2_LINECTRL1);
writel(val | USB2_LINECTRL1_OPMODE_NODRV, usb2_base + USB2_LINECTRL1);
rcar_gen3_set_linectrl(ch, 1, 1);
rcar_gen3_set_host_mode(ch, 1);
rcar_gen3_enable_vbus_ctrl(ch, 0);
val = readl(usb2_base + USB2_LINECTRL1);
writel(val & ~USB2_LINECTRL1_OPMODE_NODRV, usb2_base + USB2_LINECTRL1);
}
static void rcar_gen3_init_for_a_peri(struct rcar_gen3_chan *ch)
{
rcar_gen3_set_linectrl(ch, 0, 1);
rcar_gen3_set_host_mode(ch, 0);
rcar_gen3_enable_vbus_ctrl(ch, 1);
}
static void rcar_gen3_init_from_a_peri_to_a_host(struct rcar_gen3_chan *ch)
{
rcar_gen3_control_otg_irq(ch, 0);
rcar_gen3_enable_vbus_ctrl(ch, 1);
rcar_gen3_init_for_host(ch);
rcar_gen3_control_otg_irq(ch, 1);
}
static bool rcar_gen3_check_id(struct rcar_gen3_chan *ch)
{
if (!ch->uses_otg_pins)
return (ch->dr_mode == USB_DR_MODE_HOST) ? false : true;
if (ch->soc_no_adp_ctrl)
return !!(readl(ch->base + USB2_LINECTRL1) & USB2_LINECTRL1_USB2_IDMON);
return !!(readl(ch->base + USB2_ADPCTRL) & USB2_ADPCTRL_IDDIG);
}
static void rcar_gen3_device_recognition(struct rcar_gen3_chan *ch)
{
if (!rcar_gen3_check_id(ch))
rcar_gen3_init_for_host(ch);
else
rcar_gen3_init_for_peri(ch);
}
static bool rcar_gen3_is_host(struct rcar_gen3_chan *ch)
{
return !(readl(ch->base + USB2_COMMCTRL) & USB2_COMMCTRL_OTG_PERI);
}
static enum phy_mode rcar_gen3_get_phy_mode(struct rcar_gen3_chan *ch)
{
if (rcar_gen3_is_host(ch))
return PHY_MODE_USB_HOST;
return PHY_MODE_USB_DEVICE;
}
static bool rcar_gen3_is_any_rphy_initialized(struct rcar_gen3_chan *ch)
{
int i;
for (i = 0; i < NUM_OF_PHYS; i++) {
if (ch->rphys[i].initialized)
return true;
}
return false;
}
static bool rcar_gen3_needs_init_otg(struct rcar_gen3_chan *ch)
{
int i;
for (i = 0; i < NUM_OF_PHYS; i++) {
if (ch->rphys[i].otg_initialized)
return false;
}
return true;
}
static bool rcar_gen3_are_all_rphys_power_off(struct rcar_gen3_chan *ch)
{
int i;
for (i = 0; i < NUM_OF_PHYS; i++) {
if (ch->rphys[i].powered)
return false;
}
return true;
}
static ssize_t role_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct rcar_gen3_chan *ch = dev_get_drvdata(dev);
bool is_b_device;
enum phy_mode cur_mode, new_mode;
if (!ch->is_otg_channel || !rcar_gen3_is_any_rphy_initialized(ch))
return -EIO;
if (sysfs_streq(buf, "host"))
new_mode = PHY_MODE_USB_HOST;
else if (sysfs_streq(buf, "peripheral"))
new_mode = PHY_MODE_USB_DEVICE;
else
return -EINVAL;
/* is_b_device: true is B-Device. false is A-Device. */
is_b_device = rcar_gen3_check_id(ch);
cur_mode = rcar_gen3_get_phy_mode(ch);
/* If current and new mode is the same, this returns the error */
if (cur_mode == new_mode)
return -EINVAL;
if (new_mode == PHY_MODE_USB_HOST) { /* And is_host must be false */
if (!is_b_device) /* A-Peripheral */
rcar_gen3_init_from_a_peri_to_a_host(ch);
else /* B-Peripheral */
rcar_gen3_init_for_b_host(ch);
} else { /* And is_host must be true */
if (!is_b_device) /* A-Host */
rcar_gen3_init_for_a_peri(ch);
else /* B-Host */
rcar_gen3_init_for_peri(ch);
}
return count;
}
static ssize_t role_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct rcar_gen3_chan *ch = dev_get_drvdata(dev);
if (!ch->is_otg_channel || !rcar_gen3_is_any_rphy_initialized(ch))
return -EIO;
return sprintf(buf, "%s\n", rcar_gen3_is_host(ch) ? "host" :
"peripheral");
}
static DEVICE_ATTR_RW(role);
static void rcar_gen3_init_otg(struct rcar_gen3_chan *ch)
{
void __iomem *usb2_base = ch->base;
u32 val;
/* Should not use functions of read-modify-write a register */
val = readl(usb2_base + USB2_LINECTRL1);
val = (val & ~USB2_LINECTRL1_DP_RPD) | USB2_LINECTRL1_DPRPD_EN |
USB2_LINECTRL1_DMRPD_EN | USB2_LINECTRL1_DM_RPD;
writel(val, usb2_base + USB2_LINECTRL1);
if (!ch->soc_no_adp_ctrl) {
val = readl(usb2_base + USB2_VBCTRL);
val &= ~USB2_VBCTRL_OCCLREN;
writel(val | USB2_VBCTRL_DRVVBUSSEL, usb2_base + USB2_VBCTRL);
val = readl(usb2_base + USB2_ADPCTRL);
writel(val | USB2_ADPCTRL_IDPULLUP, usb2_base + USB2_ADPCTRL);
}
msleep(20);
writel(0xffffffff, usb2_base + USB2_OBINTSTA);
writel(ch->obint_enable_bits, usb2_base + USB2_OBINTEN);
rcar_gen3_device_recognition(ch);
}
static irqreturn_t rcar_gen3_phy_usb2_irq(int irq, void *_ch)
{
struct rcar_gen3_chan *ch = _ch;
void __iomem *usb2_base = ch->base;
u32 status = readl(usb2_base + USB2_OBINTSTA);
irqreturn_t ret = IRQ_NONE;
if (status & ch->obint_enable_bits) {
dev_vdbg(ch->dev, "%s: %08x\n", __func__, status);
writel(ch->obint_enable_bits, usb2_base + USB2_OBINTSTA);
rcar_gen3_device_recognition(ch);
ret = IRQ_HANDLED;
}
return ret;
}
static int rcar_gen3_phy_usb2_init(struct phy *p)
{
struct rcar_gen3_phy *rphy = phy_get_drvdata(p);
struct rcar_gen3_chan *channel = rphy->ch;
void __iomem *usb2_base = channel->base;
u32 val;
int ret;
if (!rcar_gen3_is_any_rphy_initialized(channel) && channel->irq >= 0) {
INIT_WORK(&channel->work, rcar_gen3_phy_usb2_work);
ret = request_irq(channel->irq, rcar_gen3_phy_usb2_irq,
IRQF_SHARED, dev_name(channel->dev), channel);
if (ret < 0) {
dev_err(channel->dev, "No irq handler (%d)\n", channel->irq);
return ret;
}
}
/* Initialize USB2 part */
val = readl(usb2_base + USB2_INT_ENABLE);
val |= USB2_INT_ENABLE_UCOM_INTEN | rphy->int_enable_bits;
writel(val, usb2_base + USB2_INT_ENABLE);
writel(USB2_SPD_RSM_TIMSET_INIT, usb2_base + USB2_SPD_RSM_TIMSET);
writel(USB2_OC_TIMSET_INIT, usb2_base + USB2_OC_TIMSET);
/* Initialize otg part */
if (channel->is_otg_channel) {
if (rcar_gen3_needs_init_otg(channel))
rcar_gen3_init_otg(channel);
rphy->otg_initialized = true;
}
rphy->initialized = true;
return 0;
}
static int rcar_gen3_phy_usb2_exit(struct phy *p)
{
struct rcar_gen3_phy *rphy = phy_get_drvdata(p);
struct rcar_gen3_chan *channel = rphy->ch;
void __iomem *usb2_base = channel->base;
u32 val;
rphy->initialized = false;
if (channel->is_otg_channel)
rphy->otg_initialized = false;
val = readl(usb2_base + USB2_INT_ENABLE);
val &= ~rphy->int_enable_bits;
if (!rcar_gen3_is_any_rphy_initialized(channel))
val &= ~USB2_INT_ENABLE_UCOM_INTEN;
writel(val, usb2_base + USB2_INT_ENABLE);
if (channel->irq >= 0 && !rcar_gen3_is_any_rphy_initialized(channel))
free_irq(channel->irq, channel);
return 0;
}
static int rcar_gen3_phy_usb2_power_on(struct phy *p)
{
struct rcar_gen3_phy *rphy = phy_get_drvdata(p);
struct rcar_gen3_chan *channel = rphy->ch;
void __iomem *usb2_base = channel->base;
u32 val;
int ret = 0;
mutex_lock(&channel->lock);
if (!rcar_gen3_are_all_rphys_power_off(channel))
goto out;
if (channel->vbus) {
ret = regulator_enable(channel->vbus);
if (ret)
goto out;
}
val = readl(usb2_base + USB2_USBCTR);
val |= USB2_USBCTR_PLL_RST;
writel(val, usb2_base + USB2_USBCTR);
val &= ~USB2_USBCTR_PLL_RST;
writel(val, usb2_base + USB2_USBCTR);
out:
/* The powered flag should be set for any other phys anyway */
rphy->powered = true;
mutex_unlock(&channel->lock);
return 0;
}
static int rcar_gen3_phy_usb2_power_off(struct phy *p)
{
struct rcar_gen3_phy *rphy = phy_get_drvdata(p);
struct rcar_gen3_chan *channel = rphy->ch;
int ret = 0;
mutex_lock(&channel->lock);
rphy->powered = false;
if (!rcar_gen3_are_all_rphys_power_off(channel))
goto out;
if (channel->vbus)
ret = regulator_disable(channel->vbus);
out:
mutex_unlock(&channel->lock);
return ret;
}
static const struct phy_ops rcar_gen3_phy_usb2_ops = {
.init = rcar_gen3_phy_usb2_init,
.exit = rcar_gen3_phy_usb2_exit,
.power_on = rcar_gen3_phy_usb2_power_on,
.power_off = rcar_gen3_phy_usb2_power_off,
.owner = THIS_MODULE,
};
static const struct phy_ops rz_g1c_phy_usb2_ops = {
.init = rcar_gen3_phy_usb2_init,
.exit = rcar_gen3_phy_usb2_exit,
.owner = THIS_MODULE,
};
static const struct rcar_gen3_phy_drv_data rcar_gen3_phy_usb2_data = {
.phy_usb2_ops = &rcar_gen3_phy_usb2_ops,
.no_adp_ctrl = false,
};
static const struct rcar_gen3_phy_drv_data rz_g1c_phy_usb2_data = {
.phy_usb2_ops = &rz_g1c_phy_usb2_ops,
.no_adp_ctrl = false,
};
static const struct rcar_gen3_phy_drv_data rz_g2l_phy_usb2_data = {
.phy_usb2_ops = &rcar_gen3_phy_usb2_ops,
.no_adp_ctrl = true,
};
static const struct of_device_id rcar_gen3_phy_usb2_match_table[] = {
{
.compatible = "renesas,usb2-phy-r8a77470",
.data = &rz_g1c_phy_usb2_data,
},
{
.compatible = "renesas,usb2-phy-r8a7795",
.data = &rcar_gen3_phy_usb2_data,
},
{
.compatible = "renesas,usb2-phy-r8a7796",
.data = &rcar_gen3_phy_usb2_data,
},
{
.compatible = "renesas,usb2-phy-r8a77965",
.data = &rcar_gen3_phy_usb2_data,
},
{
.compatible = "renesas,rzg2l-usb2-phy",
.data = &rz_g2l_phy_usb2_data,
},
{
.compatible = "renesas,rcar-gen3-usb2-phy",
.data = &rcar_gen3_phy_usb2_data,
},
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, rcar_gen3_phy_usb2_match_table);
static const unsigned int rcar_gen3_phy_cable[] = {
EXTCON_USB,
EXTCON_USB_HOST,
EXTCON_NONE,
};
static struct phy *rcar_gen3_phy_usb2_xlate(struct device *dev,
struct of_phandle_args *args)
{
struct rcar_gen3_chan *ch = dev_get_drvdata(dev);
if (args->args_count == 0) /* For old version dts */
return ch->rphys[PHY_INDEX_BOTH_HC].phy;
else if (args->args_count > 1) /* Prevent invalid args count */
return ERR_PTR(-ENODEV);
if (args->args[0] >= NUM_OF_PHYS)
return ERR_PTR(-ENODEV);
return ch->rphys[args->args[0]].phy;
}
static enum usb_dr_mode rcar_gen3_get_dr_mode(struct device_node *np)
{
enum usb_dr_mode candidate = USB_DR_MODE_UNKNOWN;
int i;
/*
* If one of device nodes has other dr_mode except UNKNOWN,
* this function returns UNKNOWN. To achieve backward compatibility,
* this loop starts the index as 0.
*/
for (i = 0; i < NUM_OF_PHYS; i++) {
enum usb_dr_mode mode = of_usb_get_dr_mode_by_phy(np, i);
if (mode != USB_DR_MODE_UNKNOWN) {
if (candidate == USB_DR_MODE_UNKNOWN)
candidate = mode;
else if (candidate != mode)
return USB_DR_MODE_UNKNOWN;
}
}
return candidate;
}
static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev)
{
const struct rcar_gen3_phy_drv_data *phy_data;
struct device *dev = &pdev->dev;
struct rcar_gen3_chan *channel;
struct phy_provider *provider;
int ret = 0, i;
if (!dev->of_node) {
dev_err(dev, "This driver needs device tree\n");
return -EINVAL;
}
channel = devm_kzalloc(dev, sizeof(*channel), GFP_KERNEL);
if (!channel)
return -ENOMEM;
channel->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(channel->base))
return PTR_ERR(channel->base);
channel->obint_enable_bits = USB2_OBINT_BITS;
/* get irq number here and request_irq for OTG in phy_init */
channel->irq = platform_get_irq_optional(pdev, 0);
channel->dr_mode = rcar_gen3_get_dr_mode(dev->of_node);
if (channel->dr_mode != USB_DR_MODE_UNKNOWN) {
int ret;
channel->is_otg_channel = true;
channel->uses_otg_pins = !of_property_read_bool(dev->of_node,
"renesas,no-otg-pins");
channel->extcon = devm_extcon_dev_allocate(dev,
rcar_gen3_phy_cable);
if (IS_ERR(channel->extcon))
return PTR_ERR(channel->extcon);
ret = devm_extcon_dev_register(dev, channel->extcon);
if (ret < 0) {
dev_err(dev, "Failed to register extcon\n");
return ret;
}
}
/*
* devm_phy_create() will call pm_runtime_enable(&phy->dev);
* And then, phy-core will manage runtime pm for this device.
*/
pm_runtime_enable(dev);
phy_data = of_device_get_match_data(dev);
if (!phy_data) {
ret = -EINVAL;
goto error;
}
channel->soc_no_adp_ctrl = phy_data->no_adp_ctrl;
if (phy_data->no_adp_ctrl)
channel->obint_enable_bits = USB2_OBINT_IDCHG_EN;
mutex_init(&channel->lock);
for (i = 0; i < NUM_OF_PHYS; i++) {
channel->rphys[i].phy = devm_phy_create(dev, NULL,
phy_data->phy_usb2_ops);
if (IS_ERR(channel->rphys[i].phy)) {
dev_err(dev, "Failed to create USB2 PHY\n");
ret = PTR_ERR(channel->rphys[i].phy);
goto error;
}
channel->rphys[i].ch = channel;
channel->rphys[i].int_enable_bits = rcar_gen3_int_enable[i];
phy_set_drvdata(channel->rphys[i].phy, &channel->rphys[i]);
}
channel->vbus = devm_regulator_get_optional(dev, "vbus");
if (IS_ERR(channel->vbus)) {
if (PTR_ERR(channel->vbus) == -EPROBE_DEFER) {
ret = PTR_ERR(channel->vbus);
goto error;
}
channel->vbus = NULL;
}
platform_set_drvdata(pdev, channel);
channel->dev = dev;
provider = devm_of_phy_provider_register(dev, rcar_gen3_phy_usb2_xlate);
if (IS_ERR(provider)) {
dev_err(dev, "Failed to register PHY provider\n");
ret = PTR_ERR(provider);
goto error;
} else if (channel->is_otg_channel) {
int ret;
ret = device_create_file(dev, &dev_attr_role);
if (ret < 0)
goto error;
}
return 0;
error:
pm_runtime_disable(dev);
return ret;
}
static void rcar_gen3_phy_usb2_remove(struct platform_device *pdev)
{
struct rcar_gen3_chan *channel = platform_get_drvdata(pdev);
if (channel->is_otg_channel)
device_remove_file(&pdev->dev, &dev_attr_role);
pm_runtime_disable(&pdev->dev);
};
static struct platform_driver rcar_gen3_phy_usb2_driver = {
.driver = {
.name = "phy_rcar_gen3_usb2",
.of_match_table = rcar_gen3_phy_usb2_match_table,
},
.probe = rcar_gen3_phy_usb2_probe,
.remove_new = rcar_gen3_phy_usb2_remove,
};
module_platform_driver(rcar_gen3_phy_usb2_driver);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Renesas R-Car Gen3 USB 2.0 PHY");
MODULE_AUTHOR("Yoshihiro Shimoda <[email protected]>");
| linux-master | drivers/phy/renesas/phy-rcar-gen3-usb2.c |
// SPDX-License-Identifier: GPL-2.0
/* Renesas Ethernet SERDES device driver
*
* Copyright (C) 2022 Renesas Electronics Corporation
*/
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/phy.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
#define R8A779F0_ETH_SERDES_NUM 3
#define R8A779F0_ETH_SERDES_OFFSET 0x0400
#define R8A779F0_ETH_SERDES_BANK_SELECT 0x03fc
#define R8A779F0_ETH_SERDES_TIMEOUT_US 100000
#define R8A779F0_ETH_SERDES_NUM_RETRY_LINKUP 3
struct r8a779f0_eth_serdes_drv_data;
struct r8a779f0_eth_serdes_channel {
struct r8a779f0_eth_serdes_drv_data *dd;
struct phy *phy;
void __iomem *addr;
phy_interface_t phy_interface;
int speed;
int index;
};
struct r8a779f0_eth_serdes_drv_data {
void __iomem *addr;
struct platform_device *pdev;
struct reset_control *reset;
struct r8a779f0_eth_serdes_channel channel[R8A779F0_ETH_SERDES_NUM];
bool initialized;
};
/*
* The datasheet describes initialization procedure without any information
* about registers' name/bits. So, this is all black magic to initialize
* the hardware.
*/
static void r8a779f0_eth_serdes_write32(void __iomem *addr, u32 offs, u32 bank, u32 data)
{
iowrite32(bank, addr + R8A779F0_ETH_SERDES_BANK_SELECT);
iowrite32(data, addr + offs);
}
static int
r8a779f0_eth_serdes_reg_wait(struct r8a779f0_eth_serdes_channel *channel,
u32 offs, u32 bank, u32 mask, u32 expected)
{
int ret;
u32 val;
iowrite32(bank, channel->addr + R8A779F0_ETH_SERDES_BANK_SELECT);
ret = readl_poll_timeout_atomic(channel->addr + offs, val,
(val & mask) == expected,
1, R8A779F0_ETH_SERDES_TIMEOUT_US);
if (ret)
dev_dbg(&channel->phy->dev,
"%s: index %d, offs %x, bank %x, mask %x, expected %x\n",
__func__, channel->index, offs, bank, mask, expected);
return ret;
}
static int
r8a779f0_eth_serdes_common_init_ram(struct r8a779f0_eth_serdes_drv_data *dd)
{
struct r8a779f0_eth_serdes_channel *channel;
int i, ret;
for (i = 0; i < R8A779F0_ETH_SERDES_NUM; i++) {
channel = &dd->channel[i];
ret = r8a779f0_eth_serdes_reg_wait(channel, 0x026c, 0x180, BIT(0), 0x01);
if (ret)
return ret;
}
r8a779f0_eth_serdes_write32(dd->addr, 0x026c, 0x180, 0x03);
return ret;
}
static int
r8a779f0_eth_serdes_common_setting(struct r8a779f0_eth_serdes_channel *channel)
{
struct r8a779f0_eth_serdes_drv_data *dd = channel->dd;
switch (channel->phy_interface) {
case PHY_INTERFACE_MODE_SGMII:
r8a779f0_eth_serdes_write32(dd->addr, 0x0244, 0x180, 0x0097);
r8a779f0_eth_serdes_write32(dd->addr, 0x01d0, 0x180, 0x0060);
r8a779f0_eth_serdes_write32(dd->addr, 0x01d8, 0x180, 0x2200);
r8a779f0_eth_serdes_write32(dd->addr, 0x01d4, 0x180, 0x0000);
r8a779f0_eth_serdes_write32(dd->addr, 0x01e0, 0x180, 0x003d);
return 0;
default:
return -EOPNOTSUPP;
}
}
static int
r8a779f0_eth_serdes_chan_setting(struct r8a779f0_eth_serdes_channel *channel)
{
int ret;
switch (channel->phy_interface) {
case PHY_INTERFACE_MODE_SGMII:
r8a779f0_eth_serdes_write32(channel->addr, 0x0000, 0x380, 0x2000);
r8a779f0_eth_serdes_write32(channel->addr, 0x01c0, 0x180, 0x0011);
r8a779f0_eth_serdes_write32(channel->addr, 0x0248, 0x180, 0x0540);
r8a779f0_eth_serdes_write32(channel->addr, 0x0258, 0x180, 0x0015);
r8a779f0_eth_serdes_write32(channel->addr, 0x0144, 0x180, 0x0100);
r8a779f0_eth_serdes_write32(channel->addr, 0x01a0, 0x180, 0x0000);
r8a779f0_eth_serdes_write32(channel->addr, 0x00d0, 0x180, 0x0002);
r8a779f0_eth_serdes_write32(channel->addr, 0x0150, 0x180, 0x0003);
r8a779f0_eth_serdes_write32(channel->addr, 0x00c8, 0x180, 0x0100);
r8a779f0_eth_serdes_write32(channel->addr, 0x0148, 0x180, 0x0100);
r8a779f0_eth_serdes_write32(channel->addr, 0x0174, 0x180, 0x0000);
r8a779f0_eth_serdes_write32(channel->addr, 0x0160, 0x180, 0x0007);
r8a779f0_eth_serdes_write32(channel->addr, 0x01ac, 0x180, 0x0000);
r8a779f0_eth_serdes_write32(channel->addr, 0x00c4, 0x180, 0x0310);
r8a779f0_eth_serdes_write32(channel->addr, 0x00c8, 0x180, 0x0101);
ret = r8a779f0_eth_serdes_reg_wait(channel, 0x00c8, 0x0180, BIT(0), 0);
if (ret)
return ret;
r8a779f0_eth_serdes_write32(channel->addr, 0x0148, 0x180, 0x0101);
ret = r8a779f0_eth_serdes_reg_wait(channel, 0x0148, 0x0180, BIT(0), 0);
if (ret)
return ret;
r8a779f0_eth_serdes_write32(channel->addr, 0x00c4, 0x180, 0x1310);
r8a779f0_eth_serdes_write32(channel->addr, 0x00d8, 0x180, 0x1800);
r8a779f0_eth_serdes_write32(channel->addr, 0x00dc, 0x180, 0x0000);
r8a779f0_eth_serdes_write32(channel->addr, 0x001c, 0x300, 0x0001);
r8a779f0_eth_serdes_write32(channel->addr, 0x0000, 0x380, 0x2100);
ret = r8a779f0_eth_serdes_reg_wait(channel, 0x0000, 0x0380, BIT(8), 0);
if (ret)
return ret;
if (channel->speed == 1000)
r8a779f0_eth_serdes_write32(channel->addr, 0x0000, 0x1f00, 0x0140);
else if (channel->speed == 100)
r8a779f0_eth_serdes_write32(channel->addr, 0x0000, 0x1f00, 0x2100);
/* For AN_ON */
r8a779f0_eth_serdes_write32(channel->addr, 0x0004, 0x1f80, 0x0005);
r8a779f0_eth_serdes_write32(channel->addr, 0x0028, 0x1f80, 0x07a1);
r8a779f0_eth_serdes_write32(channel->addr, 0x0000, 0x1f80, 0x0208);
break;
default:
return -EOPNOTSUPP;
}
return 0;
}
static int
r8a779f0_eth_serdes_chan_speed(struct r8a779f0_eth_serdes_channel *channel)
{
int ret;
switch (channel->phy_interface) {
case PHY_INTERFACE_MODE_SGMII:
/* For AN_ON */
if (channel->speed == 1000)
r8a779f0_eth_serdes_write32(channel->addr, 0x0000, 0x1f00, 0x1140);
else if (channel->speed == 100)
r8a779f0_eth_serdes_write32(channel->addr, 0x0000, 0x1f00, 0x3100);
ret = r8a779f0_eth_serdes_reg_wait(channel, 0x0008, 0x1f80, BIT(0), 1);
if (ret)
return ret;
r8a779f0_eth_serdes_write32(channel->addr, 0x0008, 0x1f80, 0x0000);
break;
default:
return -EOPNOTSUPP;
}
return 0;
}
static int r8a779f0_eth_serdes_monitor_linkup(struct r8a779f0_eth_serdes_channel *channel)
{
int i, ret;
for (i = 0; i < R8A779F0_ETH_SERDES_NUM_RETRY_LINKUP; i++) {
ret = r8a779f0_eth_serdes_reg_wait(channel, 0x0004, 0x300,
BIT(2), BIT(2));
if (!ret)
break;
/* restart */
r8a779f0_eth_serdes_write32(channel->addr, 0x0144, 0x180, 0x0100);
udelay(1);
r8a779f0_eth_serdes_write32(channel->addr, 0x0144, 0x180, 0x0000);
}
return ret;
}
static int r8a779f0_eth_serdes_hw_init(struct r8a779f0_eth_serdes_channel *channel)
{
struct r8a779f0_eth_serdes_drv_data *dd = channel->dd;
int i, ret;
if (dd->initialized)
return 0;
ret = r8a779f0_eth_serdes_common_init_ram(dd);
if (ret)
return ret;
for (i = 0; i < R8A779F0_ETH_SERDES_NUM; i++) {
ret = r8a779f0_eth_serdes_reg_wait(&dd->channel[i], 0x0000,
0x300, BIT(15), 0);
if (ret)
return ret;
}
for (i = 0; i < R8A779F0_ETH_SERDES_NUM; i++)
r8a779f0_eth_serdes_write32(dd->channel[i].addr, 0x03d4, 0x380, 0x0443);
ret = r8a779f0_eth_serdes_common_setting(channel);
if (ret)
return ret;
for (i = 0; i < R8A779F0_ETH_SERDES_NUM; i++)
r8a779f0_eth_serdes_write32(dd->channel[i].addr, 0x03d0, 0x380, 0x0001);
r8a779f0_eth_serdes_write32(dd->addr, 0x0000, 0x380, 0x8000);
ret = r8a779f0_eth_serdes_common_init_ram(dd);
if (ret)
return ret;
return r8a779f0_eth_serdes_reg_wait(&dd->channel[0], 0x0000, 0x380, BIT(15), 0);
}
static int r8a779f0_eth_serdes_init(struct phy *p)
{
struct r8a779f0_eth_serdes_channel *channel = phy_get_drvdata(p);
int ret;
ret = r8a779f0_eth_serdes_hw_init(channel);
if (!ret)
channel->dd->initialized = true;
return ret;
}
static int r8a779f0_eth_serdes_hw_init_late(struct r8a779f0_eth_serdes_channel
*channel)
{
int ret;
ret = r8a779f0_eth_serdes_chan_setting(channel);
if (ret)
return ret;
ret = r8a779f0_eth_serdes_chan_speed(channel);
if (ret)
return ret;
r8a779f0_eth_serdes_write32(channel->addr, 0x03c0, 0x380, 0x0000);
r8a779f0_eth_serdes_write32(channel->addr, 0x03d0, 0x380, 0x0000);
return r8a779f0_eth_serdes_monitor_linkup(channel);
}
static int r8a779f0_eth_serdes_power_on(struct phy *p)
{
struct r8a779f0_eth_serdes_channel *channel = phy_get_drvdata(p);
return r8a779f0_eth_serdes_hw_init_late(channel);
}
static int r8a779f0_eth_serdes_set_mode(struct phy *p, enum phy_mode mode,
int submode)
{
struct r8a779f0_eth_serdes_channel *channel = phy_get_drvdata(p);
if (mode != PHY_MODE_ETHERNET)
return -EOPNOTSUPP;
switch (submode) {
case PHY_INTERFACE_MODE_GMII:
case PHY_INTERFACE_MODE_SGMII:
case PHY_INTERFACE_MODE_USXGMII:
channel->phy_interface = submode;
return 0;
default:
return -EOPNOTSUPP;
}
}
static int r8a779f0_eth_serdes_set_speed(struct phy *p, int speed)
{
struct r8a779f0_eth_serdes_channel *channel = phy_get_drvdata(p);
channel->speed = speed;
return 0;
}
static const struct phy_ops r8a779f0_eth_serdes_ops = {
.init = r8a779f0_eth_serdes_init,
.power_on = r8a779f0_eth_serdes_power_on,
.set_mode = r8a779f0_eth_serdes_set_mode,
.set_speed = r8a779f0_eth_serdes_set_speed,
};
static struct phy *r8a779f0_eth_serdes_xlate(struct device *dev,
struct of_phandle_args *args)
{
struct r8a779f0_eth_serdes_drv_data *dd = dev_get_drvdata(dev);
if (args->args[0] >= R8A779F0_ETH_SERDES_NUM)
return ERR_PTR(-ENODEV);
return dd->channel[args->args[0]].phy;
}
static const struct of_device_id r8a779f0_eth_serdes_of_table[] = {
{ .compatible = "renesas,r8a779f0-ether-serdes", },
{ }
};
MODULE_DEVICE_TABLE(of, r8a779f0_eth_serdes_of_table);
static int r8a779f0_eth_serdes_probe(struct platform_device *pdev)
{
struct r8a779f0_eth_serdes_drv_data *dd;
struct phy_provider *provider;
int i;
dd = devm_kzalloc(&pdev->dev, sizeof(*dd), GFP_KERNEL);
if (!dd)
return -ENOMEM;
platform_set_drvdata(pdev, dd);
dd->pdev = pdev;
dd->addr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dd->addr))
return PTR_ERR(dd->addr);
dd->reset = devm_reset_control_get(&pdev->dev, NULL);
if (IS_ERR(dd->reset))
return PTR_ERR(dd->reset);
reset_control_reset(dd->reset);
for (i = 0; i < R8A779F0_ETH_SERDES_NUM; i++) {
struct r8a779f0_eth_serdes_channel *channel = &dd->channel[i];
channel->phy = devm_phy_create(&pdev->dev, NULL,
&r8a779f0_eth_serdes_ops);
if (IS_ERR(channel->phy))
return PTR_ERR(channel->phy);
channel->addr = dd->addr + R8A779F0_ETH_SERDES_OFFSET * i;
channel->dd = dd;
channel->index = i;
phy_set_drvdata(channel->phy, channel);
}
provider = devm_of_phy_provider_register(&pdev->dev,
r8a779f0_eth_serdes_xlate);
if (IS_ERR(provider))
return PTR_ERR(provider);
pm_runtime_enable(&pdev->dev);
pm_runtime_get_sync(&pdev->dev);
return 0;
}
static void r8a779f0_eth_serdes_remove(struct platform_device *pdev)
{
pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev);
platform_set_drvdata(pdev, NULL);
}
static struct platform_driver r8a779f0_eth_serdes_driver_platform = {
.probe = r8a779f0_eth_serdes_probe,
.remove_new = r8a779f0_eth_serdes_remove,
.driver = {
.name = "r8a779f0_eth_serdes",
.of_match_table = r8a779f0_eth_serdes_of_table,
}
};
module_platform_driver(r8a779f0_eth_serdes_driver_platform);
MODULE_AUTHOR("Yoshihiro Shimoda");
MODULE_DESCRIPTION("Renesas Ethernet SERDES device driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/phy/renesas/r8a779f0-ether-serdes.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Renesas R-Car Gen2 PHY driver
*
* Copyright (C) 2014 Renesas Solutions Corp.
* Copyright (C) 2014 Cogent Embedded, Inc.
* Copyright (C) 2019 Renesas Electronics Corp.
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/spinlock.h>
#include <linux/atomic.h>
#define USBHS_LPSTS 0x02
#define USBHS_UGCTRL 0x80
#define USBHS_UGCTRL2 0x84
#define USBHS_UGSTS 0x88 /* From technical update */
/* Low Power Status register (LPSTS) */
#define USBHS_LPSTS_SUSPM 0x4000
/* USB General control register (UGCTRL) */
#define USBHS_UGCTRL_CONNECT 0x00000004
#define USBHS_UGCTRL_PLLRESET 0x00000001
/* USB General control register 2 (UGCTRL2) */
#define USBHS_UGCTRL2_USB2SEL 0x80000000
#define USBHS_UGCTRL2_USB2SEL_PCI 0x00000000
#define USBHS_UGCTRL2_USB2SEL_USB30 0x80000000
#define USBHS_UGCTRL2_USB0SEL 0x00000030
#define USBHS_UGCTRL2_USB0SEL_PCI 0x00000010
#define USBHS_UGCTRL2_USB0SEL_HS_USB 0x00000030
#define USBHS_UGCTRL2_USB0SEL_USB20 0x00000010
#define USBHS_UGCTRL2_USB0SEL_HS_USB20 0x00000020
/* USB General status register (UGSTS) */
#define USBHS_UGSTS_LOCK 0x00000100 /* From technical update */
#define PHYS_PER_CHANNEL 2
struct rcar_gen2_phy {
struct phy *phy;
struct rcar_gen2_channel *channel;
int number;
u32 select_value;
};
struct rcar_gen2_channel {
struct device_node *of_node;
struct rcar_gen2_phy_driver *drv;
struct rcar_gen2_phy phys[PHYS_PER_CHANNEL];
int selected_phy;
u32 select_mask;
};
struct rcar_gen2_phy_driver {
void __iomem *base;
struct clk *clk;
spinlock_t lock;
int num_channels;
struct rcar_gen2_channel *channels;
};
struct rcar_gen2_phy_data {
const struct phy_ops *gen2_phy_ops;
const u32 (*select_value)[PHYS_PER_CHANNEL];
const u32 num_channels;
};
static int rcar_gen2_phy_init(struct phy *p)
{
struct rcar_gen2_phy *phy = phy_get_drvdata(p);
struct rcar_gen2_channel *channel = phy->channel;
struct rcar_gen2_phy_driver *drv = channel->drv;
unsigned long flags;
u32 ugctrl2;
/*
* Try to acquire exclusive access to PHY. The first driver calling
* phy_init() on a given channel wins, and all attempts to use another
* PHY on this channel will fail until phy_exit() is called by the first
* driver. Achieving this with cmpxcgh() should be SMP-safe.
*/
if (cmpxchg(&channel->selected_phy, -1, phy->number) != -1)
return -EBUSY;
clk_prepare_enable(drv->clk);
spin_lock_irqsave(&drv->lock, flags);
ugctrl2 = readl(drv->base + USBHS_UGCTRL2);
ugctrl2 &= ~channel->select_mask;
ugctrl2 |= phy->select_value;
writel(ugctrl2, drv->base + USBHS_UGCTRL2);
spin_unlock_irqrestore(&drv->lock, flags);
return 0;
}
static int rcar_gen2_phy_exit(struct phy *p)
{
struct rcar_gen2_phy *phy = phy_get_drvdata(p);
struct rcar_gen2_channel *channel = phy->channel;
clk_disable_unprepare(channel->drv->clk);
channel->selected_phy = -1;
return 0;
}
static int rcar_gen2_phy_power_on(struct phy *p)
{
struct rcar_gen2_phy *phy = phy_get_drvdata(p);
struct rcar_gen2_phy_driver *drv = phy->channel->drv;
void __iomem *base = drv->base;
unsigned long flags;
u32 value;
int err = 0, i;
/* Skip if it's not USBHS */
if (phy->select_value != USBHS_UGCTRL2_USB0SEL_HS_USB)
return 0;
spin_lock_irqsave(&drv->lock, flags);
/* Power on USBHS PHY */
value = readl(base + USBHS_UGCTRL);
value &= ~USBHS_UGCTRL_PLLRESET;
writel(value, base + USBHS_UGCTRL);
value = readw(base + USBHS_LPSTS);
value |= USBHS_LPSTS_SUSPM;
writew(value, base + USBHS_LPSTS);
for (i = 0; i < 20; i++) {
value = readl(base + USBHS_UGSTS);
if ((value & USBHS_UGSTS_LOCK) == USBHS_UGSTS_LOCK) {
value = readl(base + USBHS_UGCTRL);
value |= USBHS_UGCTRL_CONNECT;
writel(value, base + USBHS_UGCTRL);
goto out;
}
udelay(1);
}
/* Timed out waiting for the PLL lock */
err = -ETIMEDOUT;
out:
spin_unlock_irqrestore(&drv->lock, flags);
return err;
}
static int rcar_gen2_phy_power_off(struct phy *p)
{
struct rcar_gen2_phy *phy = phy_get_drvdata(p);
struct rcar_gen2_phy_driver *drv = phy->channel->drv;
void __iomem *base = drv->base;
unsigned long flags;
u32 value;
/* Skip if it's not USBHS */
if (phy->select_value != USBHS_UGCTRL2_USB0SEL_HS_USB)
return 0;
spin_lock_irqsave(&drv->lock, flags);
/* Power off USBHS PHY */
value = readl(base + USBHS_UGCTRL);
value &= ~USBHS_UGCTRL_CONNECT;
writel(value, base + USBHS_UGCTRL);
value = readw(base + USBHS_LPSTS);
value &= ~USBHS_LPSTS_SUSPM;
writew(value, base + USBHS_LPSTS);
value = readl(base + USBHS_UGCTRL);
value |= USBHS_UGCTRL_PLLRESET;
writel(value, base + USBHS_UGCTRL);
spin_unlock_irqrestore(&drv->lock, flags);
return 0;
}
static int rz_g1c_phy_power_on(struct phy *p)
{
struct rcar_gen2_phy *phy = phy_get_drvdata(p);
struct rcar_gen2_phy_driver *drv = phy->channel->drv;
void __iomem *base = drv->base;
unsigned long flags;
u32 value;
spin_lock_irqsave(&drv->lock, flags);
/* Power on USBHS PHY */
value = readl(base + USBHS_UGCTRL);
value &= ~USBHS_UGCTRL_PLLRESET;
writel(value, base + USBHS_UGCTRL);
/* As per the data sheet wait 340 micro sec for power stable */
udelay(340);
if (phy->select_value == USBHS_UGCTRL2_USB0SEL_HS_USB20) {
value = readw(base + USBHS_LPSTS);
value |= USBHS_LPSTS_SUSPM;
writew(value, base + USBHS_LPSTS);
}
spin_unlock_irqrestore(&drv->lock, flags);
return 0;
}
static int rz_g1c_phy_power_off(struct phy *p)
{
struct rcar_gen2_phy *phy = phy_get_drvdata(p);
struct rcar_gen2_phy_driver *drv = phy->channel->drv;
void __iomem *base = drv->base;
unsigned long flags;
u32 value;
spin_lock_irqsave(&drv->lock, flags);
/* Power off USBHS PHY */
if (phy->select_value == USBHS_UGCTRL2_USB0SEL_HS_USB20) {
value = readw(base + USBHS_LPSTS);
value &= ~USBHS_LPSTS_SUSPM;
writew(value, base + USBHS_LPSTS);
}
value = readl(base + USBHS_UGCTRL);
value |= USBHS_UGCTRL_PLLRESET;
writel(value, base + USBHS_UGCTRL);
spin_unlock_irqrestore(&drv->lock, flags);
return 0;
}
static const struct phy_ops rcar_gen2_phy_ops = {
.init = rcar_gen2_phy_init,
.exit = rcar_gen2_phy_exit,
.power_on = rcar_gen2_phy_power_on,
.power_off = rcar_gen2_phy_power_off,
.owner = THIS_MODULE,
};
static const struct phy_ops rz_g1c_phy_ops = {
.init = rcar_gen2_phy_init,
.exit = rcar_gen2_phy_exit,
.power_on = rz_g1c_phy_power_on,
.power_off = rz_g1c_phy_power_off,
.owner = THIS_MODULE,
};
static const u32 pci_select_value[][PHYS_PER_CHANNEL] = {
[0] = { USBHS_UGCTRL2_USB0SEL_PCI, USBHS_UGCTRL2_USB0SEL_HS_USB },
[2] = { USBHS_UGCTRL2_USB2SEL_PCI, USBHS_UGCTRL2_USB2SEL_USB30 },
};
static const u32 usb20_select_value[][PHYS_PER_CHANNEL] = {
{ USBHS_UGCTRL2_USB0SEL_USB20, USBHS_UGCTRL2_USB0SEL_HS_USB20 },
};
static const struct rcar_gen2_phy_data rcar_gen2_usb_phy_data = {
.gen2_phy_ops = &rcar_gen2_phy_ops,
.select_value = pci_select_value,
.num_channels = ARRAY_SIZE(pci_select_value),
};
static const struct rcar_gen2_phy_data rz_g1c_usb_phy_data = {
.gen2_phy_ops = &rz_g1c_phy_ops,
.select_value = usb20_select_value,
.num_channels = ARRAY_SIZE(usb20_select_value),
};
static const struct of_device_id rcar_gen2_phy_match_table[] = {
{
.compatible = "renesas,usb-phy-r8a77470",
.data = &rz_g1c_usb_phy_data,
},
{
.compatible = "renesas,usb-phy-r8a7790",
.data = &rcar_gen2_usb_phy_data,
},
{
.compatible = "renesas,usb-phy-r8a7791",
.data = &rcar_gen2_usb_phy_data,
},
{
.compatible = "renesas,usb-phy-r8a7794",
.data = &rcar_gen2_usb_phy_data,
},
{
.compatible = "renesas,rcar-gen2-usb-phy",
.data = &rcar_gen2_usb_phy_data,
},
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, rcar_gen2_phy_match_table);
static struct phy *rcar_gen2_phy_xlate(struct device *dev,
struct of_phandle_args *args)
{
struct rcar_gen2_phy_driver *drv;
struct device_node *np = args->np;
int i;
drv = dev_get_drvdata(dev);
if (!drv)
return ERR_PTR(-EINVAL);
for (i = 0; i < drv->num_channels; i++) {
if (np == drv->channels[i].of_node)
break;
}
if (i >= drv->num_channels || args->args[0] >= 2)
return ERR_PTR(-ENODEV);
return drv->channels[i].phys[args->args[0]].phy;
}
static const u32 select_mask[] = {
[0] = USBHS_UGCTRL2_USB0SEL,
[2] = USBHS_UGCTRL2_USB2SEL,
};
static int rcar_gen2_phy_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct rcar_gen2_phy_driver *drv;
struct phy_provider *provider;
struct device_node *np;
void __iomem *base;
struct clk *clk;
const struct rcar_gen2_phy_data *data;
int i = 0;
if (!dev->of_node) {
dev_err(dev,
"This driver is required to be instantiated from device tree\n");
return -EINVAL;
}
clk = devm_clk_get(dev, "usbhs");
if (IS_ERR(clk)) {
dev_err(dev, "Can't get USBHS clock\n");
return PTR_ERR(clk);
}
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
drv = devm_kzalloc(dev, sizeof(*drv), GFP_KERNEL);
if (!drv)
return -ENOMEM;
spin_lock_init(&drv->lock);
drv->clk = clk;
drv->base = base;
data = of_device_get_match_data(dev);
if (!data)
return -EINVAL;
drv->num_channels = of_get_child_count(dev->of_node);
drv->channels = devm_kcalloc(dev, drv->num_channels,
sizeof(struct rcar_gen2_channel),
GFP_KERNEL);
if (!drv->channels)
return -ENOMEM;
for_each_child_of_node(dev->of_node, np) {
struct rcar_gen2_channel *channel = drv->channels + i;
u32 channel_num;
int error, n;
channel->of_node = np;
channel->drv = drv;
channel->selected_phy = -1;
error = of_property_read_u32(np, "reg", &channel_num);
if (error || channel_num >= data->num_channels) {
dev_err(dev, "Invalid \"reg\" property\n");
of_node_put(np);
return error;
}
channel->select_mask = select_mask[channel_num];
for (n = 0; n < PHYS_PER_CHANNEL; n++) {
struct rcar_gen2_phy *phy = &channel->phys[n];
phy->channel = channel;
phy->number = n;
phy->select_value = data->select_value[channel_num][n];
phy->phy = devm_phy_create(dev, NULL,
data->gen2_phy_ops);
if (IS_ERR(phy->phy)) {
dev_err(dev, "Failed to create PHY\n");
of_node_put(np);
return PTR_ERR(phy->phy);
}
phy_set_drvdata(phy->phy, phy);
}
i++;
}
provider = devm_of_phy_provider_register(dev, rcar_gen2_phy_xlate);
if (IS_ERR(provider)) {
dev_err(dev, "Failed to register PHY provider\n");
return PTR_ERR(provider);
}
dev_set_drvdata(dev, drv);
return 0;
}
static struct platform_driver rcar_gen2_phy_driver = {
.driver = {
.name = "phy_rcar_gen2",
.of_match_table = rcar_gen2_phy_match_table,
},
.probe = rcar_gen2_phy_probe,
};
module_platform_driver(rcar_gen2_phy_driver);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Renesas R-Car Gen2 PHY");
MODULE_AUTHOR("Sergei Shtylyov <[email protected]>");
| linux-master | drivers/phy/renesas/phy-rcar-gen2.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* tusb1210.c - TUSB1210 USB ULPI PHY driver
*
* Copyright (C) 2015 Intel Corporation
*
* Author: Heikki Krogerus <[email protected]>
*/
#include <linux/module.h>
#include <linux/bitfield.h>
#include <linux/delay.h>
#include <linux/ulpi/driver.h>
#include <linux/ulpi/regs.h>
#include <linux/gpio/consumer.h>
#include <linux/phy/ulpi_phy.h>
#include <linux/power_supply.h>
#include <linux/property.h>
#include <linux/workqueue.h>
#define TUSB1211_POWER_CONTROL 0x3d
#define TUSB1211_POWER_CONTROL_SET 0x3e
#define TUSB1211_POWER_CONTROL_CLEAR 0x3f
#define TUSB1211_POWER_CONTROL_SW_CONTROL BIT(0)
#define TUSB1211_POWER_CONTROL_DET_COMP BIT(1)
#define TUSB1211_POWER_CONTROL_DP_VSRC_EN BIT(6)
#define TUSB1210_VENDOR_SPECIFIC2 0x80
#define TUSB1210_VENDOR_SPECIFIC2_IHSTX_MASK GENMASK(3, 0)
#define TUSB1210_VENDOR_SPECIFIC2_ZHSDRV_MASK GENMASK(5, 4)
#define TUSB1210_VENDOR_SPECIFIC2_DP_MASK BIT(6)
#define TUSB1211_VENDOR_SPECIFIC3 0x85
#define TUSB1211_VENDOR_SPECIFIC3_SET 0x86
#define TUSB1211_VENDOR_SPECIFIC3_CLEAR 0x87
#define TUSB1211_VENDOR_SPECIFIC3_SW_USB_DET BIT(4)
#define TUSB1211_VENDOR_SPECIFIC3_CHGD_IDP_SRC_EN BIT(6)
#define TUSB1210_RESET_TIME_MS 50
#define TUSB1210_CHG_DET_MAX_RETRIES 5
/* TUSB1210 charger detection work states */
enum tusb1210_chg_det_state {
TUSB1210_CHG_DET_CONNECTING,
TUSB1210_CHG_DET_START_DET,
TUSB1210_CHG_DET_READ_DET,
TUSB1210_CHG_DET_FINISH_DET,
TUSB1210_CHG_DET_CONNECTED,
TUSB1210_CHG_DET_DISCONNECTING,
TUSB1210_CHG_DET_DISCONNECTING_DONE,
TUSB1210_CHG_DET_DISCONNECTED,
};
struct tusb1210 {
struct ulpi *ulpi;
struct phy *phy;
struct gpio_desc *gpio_reset;
struct gpio_desc *gpio_cs;
u8 otg_ctrl;
u8 vendor_specific2;
#ifdef CONFIG_POWER_SUPPLY
enum power_supply_usb_type chg_type;
enum tusb1210_chg_det_state chg_det_state;
int chg_det_retries;
struct delayed_work chg_det_work;
struct notifier_block psy_nb;
struct power_supply *psy;
struct power_supply *charger;
#endif
};
static int tusb1210_ulpi_write(struct tusb1210 *tusb, u8 reg, u8 val)
{
int ret;
ret = ulpi_write(tusb->ulpi, reg, val);
if (ret)
dev_err(&tusb->ulpi->dev, "error %d writing val 0x%02x to reg 0x%02x\n",
ret, val, reg);
return ret;
}
static int tusb1210_ulpi_read(struct tusb1210 *tusb, u8 reg, u8 *val)
{
int ret;
ret = ulpi_read(tusb->ulpi, reg);
if (ret >= 0) {
*val = ret;
ret = 0;
} else {
dev_err(&tusb->ulpi->dev, "error %d reading reg 0x%02x\n", ret, reg);
}
return ret;
}
static int tusb1210_power_on(struct phy *phy)
{
struct tusb1210 *tusb = phy_get_drvdata(phy);
gpiod_set_value_cansleep(tusb->gpio_reset, 1);
gpiod_set_value_cansleep(tusb->gpio_cs, 1);
msleep(TUSB1210_RESET_TIME_MS);
/* Restore the optional eye diagram optimization value */
tusb1210_ulpi_write(tusb, TUSB1210_VENDOR_SPECIFIC2, tusb->vendor_specific2);
return 0;
}
static int tusb1210_power_off(struct phy *phy)
{
struct tusb1210 *tusb = phy_get_drvdata(phy);
gpiod_set_value_cansleep(tusb->gpio_reset, 0);
gpiod_set_value_cansleep(tusb->gpio_cs, 0);
return 0;
}
static int tusb1210_set_mode(struct phy *phy, enum phy_mode mode, int submode)
{
struct tusb1210 *tusb = phy_get_drvdata(phy);
int ret;
u8 reg;
ret = tusb1210_ulpi_read(tusb, ULPI_OTG_CTRL, ®);
if (ret < 0)
return ret;
switch (mode) {
case PHY_MODE_USB_HOST:
reg |= (ULPI_OTG_CTRL_DRVVBUS_EXT
| ULPI_OTG_CTRL_ID_PULLUP
| ULPI_OTG_CTRL_DP_PULLDOWN
| ULPI_OTG_CTRL_DM_PULLDOWN);
tusb1210_ulpi_write(tusb, ULPI_OTG_CTRL, reg);
reg |= ULPI_OTG_CTRL_DRVVBUS;
break;
case PHY_MODE_USB_DEVICE:
reg &= ~(ULPI_OTG_CTRL_DRVVBUS
| ULPI_OTG_CTRL_DP_PULLDOWN
| ULPI_OTG_CTRL_DM_PULLDOWN);
tusb1210_ulpi_write(tusb, ULPI_OTG_CTRL, reg);
reg &= ~ULPI_OTG_CTRL_DRVVBUS_EXT;
break;
default:
/* nothing */
return 0;
}
tusb->otg_ctrl = reg;
return tusb1210_ulpi_write(tusb, ULPI_OTG_CTRL, reg);
}
#ifdef CONFIG_POWER_SUPPLY
static const char * const tusb1210_chg_det_states[] = {
"CHG_DET_CONNECTING",
"CHG_DET_START_DET",
"CHG_DET_READ_DET",
"CHG_DET_FINISH_DET",
"CHG_DET_CONNECTED",
"CHG_DET_DISCONNECTING",
"CHG_DET_DISCONNECTING_DONE",
"CHG_DET_DISCONNECTED",
};
static void tusb1210_reset(struct tusb1210 *tusb)
{
gpiod_set_value_cansleep(tusb->gpio_reset, 0);
usleep_range(200, 500);
gpiod_set_value_cansleep(tusb->gpio_reset, 1);
}
static void tusb1210_chg_det_set_type(struct tusb1210 *tusb,
enum power_supply_usb_type type)
{
dev_dbg(&tusb->ulpi->dev, "charger type: %d\n", type);
tusb->chg_type = type;
tusb->chg_det_retries = 0;
power_supply_changed(tusb->psy);
}
static void tusb1210_chg_det_set_state(struct tusb1210 *tusb,
enum tusb1210_chg_det_state new_state,
int delay_ms)
{
if (delay_ms)
dev_dbg(&tusb->ulpi->dev, "chg_det new state %s in %d ms\n",
tusb1210_chg_det_states[new_state], delay_ms);
tusb->chg_det_state = new_state;
mod_delayed_work(system_long_wq, &tusb->chg_det_work,
msecs_to_jiffies(delay_ms));
}
static void tusb1210_chg_det_handle_ulpi_error(struct tusb1210 *tusb)
{
tusb1210_reset(tusb);
if (tusb->chg_det_retries < TUSB1210_CHG_DET_MAX_RETRIES) {
tusb->chg_det_retries++;
tusb1210_chg_det_set_state(tusb, TUSB1210_CHG_DET_START_DET,
TUSB1210_RESET_TIME_MS);
} else {
tusb1210_chg_det_set_state(tusb, TUSB1210_CHG_DET_FINISH_DET,
TUSB1210_RESET_TIME_MS);
}
}
/*
* Boards using a TUSB121x for charger-detection have 3 power_supply class devs:
*
* tusb1211-charger-detect(1) -> charger -> fuel-gauge
*
* To determine if an USB charger is connected to the board, the online prop of
* the charger psy needs to be read. Since the tusb1211-charger-detect psy is
* the start of the supplier -> supplied-to chain, power_supply_am_i_supplied()
* cannot be used here.
*
* Instead, below is a list of the power_supply names of known chargers for
* these boards and the charger psy is looked up by name from this list.
*
* (1) modelling the external USB charger
*/
static const char * const tusb1210_chargers[] = {
"bq24190-charger",
};
static bool tusb1210_get_online(struct tusb1210 *tusb)
{
union power_supply_propval val;
int i;
for (i = 0; i < ARRAY_SIZE(tusb1210_chargers) && !tusb->charger; i++)
tusb->charger = power_supply_get_by_name(tusb1210_chargers[i]);
if (!tusb->charger)
return false;
if (power_supply_get_property(tusb->charger, POWER_SUPPLY_PROP_ONLINE, &val))
return false;
return val.intval;
}
static void tusb1210_chg_det_work(struct work_struct *work)
{
struct tusb1210 *tusb = container_of(work, struct tusb1210, chg_det_work.work);
bool vbus_present = tusb1210_get_online(tusb);
int ret;
u8 val;
dev_dbg(&tusb->ulpi->dev, "chg_det state %s vbus_present %d\n",
tusb1210_chg_det_states[tusb->chg_det_state], vbus_present);
switch (tusb->chg_det_state) {
case TUSB1210_CHG_DET_CONNECTING:
tusb->chg_type = POWER_SUPPLY_USB_TYPE_UNKNOWN;
tusb->chg_det_retries = 0;
/* Power on USB controller for ulpi_read()/_write() */
ret = pm_runtime_resume_and_get(tusb->ulpi->dev.parent);
if (ret < 0) {
dev_err(&tusb->ulpi->dev, "error %d runtime-resuming\n", ret);
/* Should never happen, skip charger detection */
tusb1210_chg_det_set_state(tusb, TUSB1210_CHG_DET_CONNECTED, 0);
return;
}
tusb1210_chg_det_set_state(tusb, TUSB1210_CHG_DET_START_DET, 0);
break;
case TUSB1210_CHG_DET_START_DET:
/*
* Use the builtin charger detection FSM to keep things simple.
* This only detects DCP / SDP. This is good enough for the few
* boards which actually rely on the phy for charger detection.
*/
mutex_lock(&tusb->phy->mutex);
ret = tusb1210_ulpi_write(tusb, TUSB1211_VENDOR_SPECIFIC3_SET,
TUSB1211_VENDOR_SPECIFIC3_SW_USB_DET);
mutex_unlock(&tusb->phy->mutex);
if (ret) {
tusb1210_chg_det_handle_ulpi_error(tusb);
break;
}
/* Wait 400 ms for the charger detection FSM to finish */
tusb1210_chg_det_set_state(tusb, TUSB1210_CHG_DET_READ_DET, 400);
break;
case TUSB1210_CHG_DET_READ_DET:
mutex_lock(&tusb->phy->mutex);
ret = tusb1210_ulpi_read(tusb, TUSB1211_POWER_CONTROL, &val);
mutex_unlock(&tusb->phy->mutex);
if (ret) {
tusb1210_chg_det_handle_ulpi_error(tusb);
break;
}
if (val & TUSB1211_POWER_CONTROL_DET_COMP)
tusb1210_chg_det_set_type(tusb, POWER_SUPPLY_USB_TYPE_DCP);
else
tusb1210_chg_det_set_type(tusb, POWER_SUPPLY_USB_TYPE_SDP);
tusb1210_chg_det_set_state(tusb, TUSB1210_CHG_DET_FINISH_DET, 0);
break;
case TUSB1210_CHG_DET_FINISH_DET:
mutex_lock(&tusb->phy->mutex);
/* Set SW_CONTROL to stop the charger-det FSM */
ret = tusb1210_ulpi_write(tusb, TUSB1211_POWER_CONTROL_SET,
TUSB1211_POWER_CONTROL_SW_CONTROL);
/* Clear DP_VSRC_EN which may have been enabled by the charger-det FSM */
ret |= tusb1210_ulpi_write(tusb, TUSB1211_POWER_CONTROL_CLEAR,
TUSB1211_POWER_CONTROL_DP_VSRC_EN);
/* Clear CHGD_IDP_SRC_EN (may have been enabled by the charger-det FSM) */
ret |= tusb1210_ulpi_write(tusb, TUSB1211_VENDOR_SPECIFIC3_CLEAR,
TUSB1211_VENDOR_SPECIFIC3_CHGD_IDP_SRC_EN);
/* If any of the above fails reset the phy */
if (ret) {
tusb1210_reset(tusb);
msleep(TUSB1210_RESET_TIME_MS);
}
/* Restore phy-parameters and OTG_CTRL register */
tusb1210_ulpi_write(tusb, ULPI_OTG_CTRL, tusb->otg_ctrl);
tusb1210_ulpi_write(tusb, TUSB1210_VENDOR_SPECIFIC2,
tusb->vendor_specific2);
mutex_unlock(&tusb->phy->mutex);
pm_runtime_put(tusb->ulpi->dev.parent);
tusb1210_chg_det_set_state(tusb, TUSB1210_CHG_DET_CONNECTED, 0);
break;
case TUSB1210_CHG_DET_CONNECTED:
if (!vbus_present)
tusb1210_chg_det_set_state(tusb, TUSB1210_CHG_DET_DISCONNECTING, 0);
break;
case TUSB1210_CHG_DET_DISCONNECTING:
/*
* The phy seems to take approx. 600ms longer then the charger
* chip (which is used to get vbus_present) to determine Vbus
* session end. Wait 800ms to ensure the phy has detected and
* signalled Vbus session end.
*/
tusb1210_chg_det_set_state(tusb, TUSB1210_CHG_DET_DISCONNECTING_DONE, 800);
break;
case TUSB1210_CHG_DET_DISCONNECTING_DONE:
/*
* The phy often stops reacting to ulpi_read()/_write requests
* after a Vbus-session end. Reset it to work around this.
*/
tusb1210_reset(tusb);
tusb1210_chg_det_set_type(tusb, POWER_SUPPLY_USB_TYPE_UNKNOWN);
tusb1210_chg_det_set_state(tusb, TUSB1210_CHG_DET_DISCONNECTED, 0);
break;
case TUSB1210_CHG_DET_DISCONNECTED:
if (vbus_present)
tusb1210_chg_det_set_state(tusb, TUSB1210_CHG_DET_CONNECTING, 0);
break;
}
}
static int tusb1210_psy_notifier(struct notifier_block *nb,
unsigned long event, void *ptr)
{
struct tusb1210 *tusb = container_of(nb, struct tusb1210, psy_nb);
struct power_supply *psy = ptr;
if (psy != tusb->psy && psy->desc->type == POWER_SUPPLY_TYPE_USB)
queue_delayed_work(system_long_wq, &tusb->chg_det_work, 0);
return NOTIFY_OK;
}
static int tusb1210_psy_get_prop(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val)
{
struct tusb1210 *tusb = power_supply_get_drvdata(psy);
switch (psp) {
case POWER_SUPPLY_PROP_ONLINE:
val->intval = tusb1210_get_online(tusb);
break;
case POWER_SUPPLY_PROP_USB_TYPE:
val->intval = tusb->chg_type;
break;
case POWER_SUPPLY_PROP_CURRENT_MAX:
if (tusb->chg_type == POWER_SUPPLY_USB_TYPE_DCP)
val->intval = 2000000;
else
val->intval = 500000;
break;
default:
return -EINVAL;
}
return 0;
}
static const enum power_supply_usb_type tusb1210_psy_usb_types[] = {
POWER_SUPPLY_USB_TYPE_SDP,
POWER_SUPPLY_USB_TYPE_DCP,
POWER_SUPPLY_USB_TYPE_UNKNOWN,
};
static const enum power_supply_property tusb1210_psy_props[] = {
POWER_SUPPLY_PROP_ONLINE,
POWER_SUPPLY_PROP_USB_TYPE,
POWER_SUPPLY_PROP_CURRENT_MAX,
};
static const struct power_supply_desc tusb1210_psy_desc = {
.name = "tusb1211-charger-detect",
.type = POWER_SUPPLY_TYPE_USB,
.usb_types = tusb1210_psy_usb_types,
.num_usb_types = ARRAY_SIZE(tusb1210_psy_usb_types),
.properties = tusb1210_psy_props,
.num_properties = ARRAY_SIZE(tusb1210_psy_props),
.get_property = tusb1210_psy_get_prop,
};
/* Setup charger detection if requested, on errors continue without chg-det */
static void tusb1210_probe_charger_detect(struct tusb1210 *tusb)
{
struct power_supply_config psy_cfg = { .drv_data = tusb };
struct device *dev = &tusb->ulpi->dev;
int ret;
if (!device_property_read_bool(dev->parent, "linux,phy_charger_detect"))
return;
if (tusb->ulpi->id.product != 0x1508) {
dev_err(dev, "error charger detection is only supported on the TUSB1211\n");
return;
}
ret = tusb1210_ulpi_read(tusb, ULPI_OTG_CTRL, &tusb->otg_ctrl);
if (ret)
return;
tusb->psy = power_supply_register(dev, &tusb1210_psy_desc, &psy_cfg);
if (IS_ERR(tusb->psy))
return;
/*
* Delay initial run by 2 seconds to allow the charger driver,
* which is used to determine vbus_present, to load.
*/
tusb->chg_det_state = TUSB1210_CHG_DET_DISCONNECTED;
INIT_DELAYED_WORK(&tusb->chg_det_work, tusb1210_chg_det_work);
queue_delayed_work(system_long_wq, &tusb->chg_det_work, 2 * HZ);
tusb->psy_nb.notifier_call = tusb1210_psy_notifier;
power_supply_reg_notifier(&tusb->psy_nb);
}
static void tusb1210_remove_charger_detect(struct tusb1210 *tusb)
{
if (!IS_ERR_OR_NULL(tusb->psy)) {
power_supply_unreg_notifier(&tusb->psy_nb);
cancel_delayed_work_sync(&tusb->chg_det_work);
power_supply_unregister(tusb->psy);
}
if (tusb->charger)
power_supply_put(tusb->charger);
}
#else
static void tusb1210_probe_charger_detect(struct tusb1210 *tusb) { }
static void tusb1210_remove_charger_detect(struct tusb1210 *tusb) { }
#endif
static const struct phy_ops phy_ops = {
.power_on = tusb1210_power_on,
.power_off = tusb1210_power_off,
.set_mode = tusb1210_set_mode,
.owner = THIS_MODULE,
};
static int tusb1210_probe(struct ulpi *ulpi)
{
struct tusb1210 *tusb;
u8 val, reg;
int ret;
tusb = devm_kzalloc(&ulpi->dev, sizeof(*tusb), GFP_KERNEL);
if (!tusb)
return -ENOMEM;
tusb->ulpi = ulpi;
tusb->gpio_reset = devm_gpiod_get_optional(&ulpi->dev, "reset",
GPIOD_OUT_LOW);
if (IS_ERR(tusb->gpio_reset))
return PTR_ERR(tusb->gpio_reset);
gpiod_set_value_cansleep(tusb->gpio_reset, 1);
tusb->gpio_cs = devm_gpiod_get_optional(&ulpi->dev, "cs",
GPIOD_OUT_LOW);
if (IS_ERR(tusb->gpio_cs))
return PTR_ERR(tusb->gpio_cs);
gpiod_set_value_cansleep(tusb->gpio_cs, 1);
/*
* VENDOR_SPECIFIC2 register in TUSB1210 can be used for configuring eye
* diagram optimization and DP/DM swap.
*/
ret = tusb1210_ulpi_read(tusb, TUSB1210_VENDOR_SPECIFIC2, ®);
if (ret)
return ret;
/* High speed output drive strength configuration */
if (!device_property_read_u8(&ulpi->dev, "ihstx", &val))
u8p_replace_bits(®, val, (u8)TUSB1210_VENDOR_SPECIFIC2_IHSTX_MASK);
/* High speed output impedance configuration */
if (!device_property_read_u8(&ulpi->dev, "zhsdrv", &val))
u8p_replace_bits(®, val, (u8)TUSB1210_VENDOR_SPECIFIC2_ZHSDRV_MASK);
/* DP/DM swap control */
if (!device_property_read_u8(&ulpi->dev, "datapolarity", &val))
u8p_replace_bits(®, val, (u8)TUSB1210_VENDOR_SPECIFIC2_DP_MASK);
ret = tusb1210_ulpi_write(tusb, TUSB1210_VENDOR_SPECIFIC2, reg);
if (ret)
return ret;
tusb->vendor_specific2 = reg;
tusb1210_probe_charger_detect(tusb);
tusb->phy = ulpi_phy_create(ulpi, &phy_ops);
if (IS_ERR(tusb->phy)) {
ret = PTR_ERR(tusb->phy);
goto err_remove_charger;
}
phy_set_drvdata(tusb->phy, tusb);
ulpi_set_drvdata(ulpi, tusb);
return 0;
err_remove_charger:
tusb1210_remove_charger_detect(tusb);
return ret;
}
static void tusb1210_remove(struct ulpi *ulpi)
{
struct tusb1210 *tusb = ulpi_get_drvdata(ulpi);
ulpi_phy_destroy(ulpi, tusb->phy);
tusb1210_remove_charger_detect(tusb);
}
#define TI_VENDOR_ID 0x0451
static const struct ulpi_device_id tusb1210_ulpi_id[] = {
{ TI_VENDOR_ID, 0x1507, }, /* TUSB1210 */
{ TI_VENDOR_ID, 0x1508, }, /* TUSB1211 */
{ },
};
MODULE_DEVICE_TABLE(ulpi, tusb1210_ulpi_id);
static struct ulpi_driver tusb1210_driver = {
.id_table = tusb1210_ulpi_id,
.probe = tusb1210_probe,
.remove = tusb1210_remove,
.driver = {
.name = "tusb1210",
.owner = THIS_MODULE,
},
};
module_ulpi_driver(tusb1210_driver);
MODULE_AUTHOR("Intel Corporation");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("TUSB1210 ULPI PHY driver");
| linux-master | drivers/phy/ti/phy-tusb1210.c |
// SPDX-License-Identifier: GPL-2.0
/*
* PCIe SERDES driver for AM654x SoC
*
* Copyright (C) 2018 - 2019 Texas Instruments Incorporated - http://www.ti.com/
* Author: Kishon Vijay Abraham I <[email protected]>
*/
#include <dt-bindings/phy/phy.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/mfd/syscon.h>
#include <linux/mux/consumer.h>
#include <linux/of_address.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#define CMU_R004 0x4
#define CMU_R060 0x60
#define CMU_R07C 0x7c
#define CMU_R088 0x88
#define CMU_R0D0 0xd0
#define CMU_R0E8 0xe8
#define LANE_R048 0x248
#define LANE_R058 0x258
#define LANE_R06c 0x26c
#define LANE_R070 0x270
#define LANE_R070 0x270
#define LANE_R19C 0x39c
#define COMLANE_R004 0xa04
#define COMLANE_R138 0xb38
#define VERSION_VAL 0x70
#define COMLANE_R190 0xb90
#define COMLANE_R194 0xb94
#define COMRXEQ_R004 0x1404
#define COMRXEQ_R008 0x1408
#define COMRXEQ_R00C 0x140c
#define COMRXEQ_R014 0x1414
#define COMRXEQ_R018 0x1418
#define COMRXEQ_R01C 0x141c
#define COMRXEQ_R04C 0x144c
#define COMRXEQ_R088 0x1488
#define COMRXEQ_R094 0x1494
#define COMRXEQ_R098 0x1498
#define SERDES_CTRL 0x1fd0
#define WIZ_LANEXCTL_STS 0x1fe0
#define TX0_DISABLE_STATE 0x4
#define TX0_SLEEP_STATE 0x5
#define TX0_SNOOZE_STATE 0x6
#define TX0_ENABLE_STATE 0x7
#define RX0_DISABLE_STATE 0x4
#define RX0_SLEEP_STATE 0x5
#define RX0_SNOOZE_STATE 0x6
#define RX0_ENABLE_STATE 0x7
#define WIZ_PLL_CTRL 0x1ff4
#define PLL_DISABLE_STATE 0x4
#define PLL_SLEEP_STATE 0x5
#define PLL_SNOOZE_STATE 0x6
#define PLL_ENABLE_STATE 0x7
#define PLL_LOCK_TIME 100000 /* in microseconds */
#define SLEEP_TIME 100 /* in microseconds */
#define LANE_USB3 0x0
#define LANE_PCIE0_LANE0 0x1
#define LANE_PCIE1_LANE0 0x0
#define LANE_PCIE0_LANE1 0x1
#define SERDES_NUM_CLOCKS 3
#define AM654_SERDES_CTRL_CLKSEL_MASK GENMASK(7, 4)
#define AM654_SERDES_CTRL_CLKSEL_SHIFT 4
struct serdes_am654_clk_mux {
struct clk_hw hw;
struct regmap *regmap;
unsigned int reg;
int clk_id;
struct clk_init_data clk_data;
};
#define to_serdes_am654_clk_mux(_hw) \
container_of(_hw, struct serdes_am654_clk_mux, hw)
static const struct regmap_config serdes_am654_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
.fast_io = true,
.max_register = 0x1ffc,
};
enum serdes_am654_fields {
/* CMU PLL Control */
CMU_PLL_CTRL,
LANE_PLL_CTRL_RXEQ_RXIDLE,
/* CMU VCO bias current and VREG setting */
AHB_PMA_CM_VCO_VBIAS_VREG,
AHB_PMA_CM_VCO_BIAS_VREG,
AHB_PMA_CM_SR,
AHB_SSC_GEN_Z_O_20_13,
/* AHB PMA Lane Configuration */
AHB_PMA_LN_AGC_THSEL_VREGH,
/* AGC and Signal detect threshold for Gen3 */
AHB_PMA_LN_GEN3_AGC_SD_THSEL,
AHB_PMA_LN_RX_SELR_GEN3,
AHB_PMA_LN_TX_DRV,
/* CMU Master Reset */
CMU_MASTER_CDN,
/* P2S ring buffer initial startup pointer difference */
P2S_RBUF_PTR_DIFF,
CONFIG_VERSION,
/* Lane 1 Master Reset */
L1_MASTER_CDN,
/* CMU OK Status */
CMU_OK_I_0,
/* Mid-speed initial calibration control */
COMRXEQ_MS_INIT_CTRL_7_0,
/* High-speed initial calibration control */
COMRXEQ_HS_INIT_CAL_7_0,
/* Mid-speed recalibration control */
COMRXEQ_MS_RECAL_CTRL_7_0,
/* High-speed recalibration control */
COMRXEQ_HS_RECAL_CTRL_7_0,
/* ATT configuration */
COMRXEQ_CSR_ATT_CONFIG,
/* Edge based boost adaptation window length */
COMRXEQ_CSR_EBSTADAPT_WIN_LEN,
/* COMRXEQ control 3 & 4 */
COMRXEQ_CTRL_3_4,
/* COMRXEQ control 14, 15 and 16*/
COMRXEQ_CTRL_14_15_16,
/* Threshold for errors in pattern data */
COMRXEQ_CSR_DLEV_ERR_THRESH,
/* COMRXEQ control 25 */
COMRXEQ_CTRL_25,
/* Mid-speed rate change calibration control */
CSR_RXEQ_RATE_CHANGE_CAL_RUN_RATE2_O,
/* High-speed rate change calibration control */
COMRXEQ_HS_RCHANGE_CTRL_7_0,
/* Serdes reset */
POR_EN,
/* Tx Enable Value */
TX0_ENABLE,
/* Rx Enable Value */
RX0_ENABLE,
/* PLL Enable Value */
PLL_ENABLE,
/* PLL ready for use */
PLL_OK,
/* sentinel */
MAX_FIELDS
};
static const struct reg_field serdes_am654_reg_fields[] = {
[CMU_PLL_CTRL] = REG_FIELD(CMU_R004, 8, 15),
[AHB_PMA_CM_VCO_VBIAS_VREG] = REG_FIELD(CMU_R060, 8, 15),
[CMU_MASTER_CDN] = REG_FIELD(CMU_R07C, 24, 31),
[AHB_PMA_CM_VCO_BIAS_VREG] = REG_FIELD(CMU_R088, 24, 31),
[AHB_PMA_CM_SR] = REG_FIELD(CMU_R0D0, 24, 31),
[AHB_SSC_GEN_Z_O_20_13] = REG_FIELD(CMU_R0E8, 8, 15),
[LANE_PLL_CTRL_RXEQ_RXIDLE] = REG_FIELD(LANE_R048, 8, 15),
[AHB_PMA_LN_AGC_THSEL_VREGH] = REG_FIELD(LANE_R058, 16, 23),
[AHB_PMA_LN_GEN3_AGC_SD_THSEL] = REG_FIELD(LANE_R06c, 0, 7),
[AHB_PMA_LN_RX_SELR_GEN3] = REG_FIELD(LANE_R070, 16, 23),
[AHB_PMA_LN_TX_DRV] = REG_FIELD(LANE_R19C, 16, 23),
[P2S_RBUF_PTR_DIFF] = REG_FIELD(COMLANE_R004, 0, 7),
[CONFIG_VERSION] = REG_FIELD(COMLANE_R138, 16, 23),
[L1_MASTER_CDN] = REG_FIELD(COMLANE_R190, 8, 15),
[CMU_OK_I_0] = REG_FIELD(COMLANE_R194, 19, 19),
[COMRXEQ_MS_INIT_CTRL_7_0] = REG_FIELD(COMRXEQ_R004, 24, 31),
[COMRXEQ_HS_INIT_CAL_7_0] = REG_FIELD(COMRXEQ_R008, 0, 7),
[COMRXEQ_MS_RECAL_CTRL_7_0] = REG_FIELD(COMRXEQ_R00C, 8, 15),
[COMRXEQ_HS_RECAL_CTRL_7_0] = REG_FIELD(COMRXEQ_R00C, 16, 23),
[COMRXEQ_CSR_ATT_CONFIG] = REG_FIELD(COMRXEQ_R014, 16, 23),
[COMRXEQ_CSR_EBSTADAPT_WIN_LEN] = REG_FIELD(COMRXEQ_R018, 16, 23),
[COMRXEQ_CTRL_3_4] = REG_FIELD(COMRXEQ_R01C, 8, 15),
[COMRXEQ_CTRL_14_15_16] = REG_FIELD(COMRXEQ_R04C, 0, 7),
[COMRXEQ_CSR_DLEV_ERR_THRESH] = REG_FIELD(COMRXEQ_R088, 16, 23),
[COMRXEQ_CTRL_25] = REG_FIELD(COMRXEQ_R094, 24, 31),
[CSR_RXEQ_RATE_CHANGE_CAL_RUN_RATE2_O] = REG_FIELD(COMRXEQ_R098, 8, 15),
[COMRXEQ_HS_RCHANGE_CTRL_7_0] = REG_FIELD(COMRXEQ_R098, 16, 23),
[POR_EN] = REG_FIELD(SERDES_CTRL, 29, 29),
[TX0_ENABLE] = REG_FIELD(WIZ_LANEXCTL_STS, 29, 31),
[RX0_ENABLE] = REG_FIELD(WIZ_LANEXCTL_STS, 13, 15),
[PLL_ENABLE] = REG_FIELD(WIZ_PLL_CTRL, 29, 31),
[PLL_OK] = REG_FIELD(WIZ_PLL_CTRL, 28, 28),
};
struct serdes_am654 {
struct regmap *regmap;
struct regmap_field *fields[MAX_FIELDS];
struct device *dev;
struct mux_control *control;
bool busy;
u32 type;
struct device_node *of_node;
struct clk_onecell_data clk_data;
struct clk *clks[SERDES_NUM_CLOCKS];
};
static int serdes_am654_enable_pll(struct serdes_am654 *phy)
{
int ret;
u32 val;
ret = regmap_field_write(phy->fields[PLL_ENABLE], PLL_ENABLE_STATE);
if (ret)
return ret;
return regmap_field_read_poll_timeout(phy->fields[PLL_OK], val, val,
1000, PLL_LOCK_TIME);
}
static void serdes_am654_disable_pll(struct serdes_am654 *phy)
{
struct device *dev = phy->dev;
int ret;
ret = regmap_field_write(phy->fields[PLL_ENABLE], PLL_DISABLE_STATE);
if (ret)
dev_err(dev, "Failed to disable PLL\n");
}
static int serdes_am654_enable_txrx(struct serdes_am654 *phy)
{
int ret = 0;
/* Enable TX */
ret |= regmap_field_write(phy->fields[TX0_ENABLE], TX0_ENABLE_STATE);
/* Enable RX */
ret |= regmap_field_write(phy->fields[RX0_ENABLE], RX0_ENABLE_STATE);
if (ret)
return -EIO;
return 0;
}
static int serdes_am654_disable_txrx(struct serdes_am654 *phy)
{
int ret = 0;
/* Disable TX */
ret |= regmap_field_write(phy->fields[TX0_ENABLE], TX0_DISABLE_STATE);
/* Disable RX */
ret |= regmap_field_write(phy->fields[RX0_ENABLE], RX0_DISABLE_STATE);
if (ret)
return -EIO;
return 0;
}
static int serdes_am654_power_on(struct phy *x)
{
struct serdes_am654 *phy = phy_get_drvdata(x);
struct device *dev = phy->dev;
int ret;
u32 val;
ret = serdes_am654_enable_pll(phy);
if (ret) {
dev_err(dev, "Failed to enable PLL\n");
return ret;
}
ret = serdes_am654_enable_txrx(phy);
if (ret) {
dev_err(dev, "Failed to enable TX RX\n");
return ret;
}
return regmap_field_read_poll_timeout(phy->fields[CMU_OK_I_0], val,
val, SLEEP_TIME, PLL_LOCK_TIME);
}
static int serdes_am654_power_off(struct phy *x)
{
struct serdes_am654 *phy = phy_get_drvdata(x);
serdes_am654_disable_txrx(phy);
serdes_am654_disable_pll(phy);
return 0;
}
#define SERDES_AM654_CFG(offset, a, b, val) \
regmap_update_bits(phy->regmap, (offset),\
GENMASK((a), (b)), (val) << (b))
static int serdes_am654_usb3_init(struct serdes_am654 *phy)
{
SERDES_AM654_CFG(0x0000, 31, 24, 0x17);
SERDES_AM654_CFG(0x0004, 15, 8, 0x02);
SERDES_AM654_CFG(0x0004, 7, 0, 0x0e);
SERDES_AM654_CFG(0x0008, 23, 16, 0x2e);
SERDES_AM654_CFG(0x0008, 31, 24, 0x2e);
SERDES_AM654_CFG(0x0060, 7, 0, 0x4b);
SERDES_AM654_CFG(0x0060, 15, 8, 0x98);
SERDES_AM654_CFG(0x0060, 23, 16, 0x60);
SERDES_AM654_CFG(0x00d0, 31, 24, 0x45);
SERDES_AM654_CFG(0x00e8, 15, 8, 0x0e);
SERDES_AM654_CFG(0x0220, 7, 0, 0x34);
SERDES_AM654_CFG(0x0220, 15, 8, 0x34);
SERDES_AM654_CFG(0x0220, 31, 24, 0x37);
SERDES_AM654_CFG(0x0224, 7, 0, 0x37);
SERDES_AM654_CFG(0x0224, 15, 8, 0x37);
SERDES_AM654_CFG(0x0228, 23, 16, 0x37);
SERDES_AM654_CFG(0x0228, 31, 24, 0x37);
SERDES_AM654_CFG(0x022c, 7, 0, 0x37);
SERDES_AM654_CFG(0x022c, 15, 8, 0x37);
SERDES_AM654_CFG(0x0230, 15, 8, 0x2a);
SERDES_AM654_CFG(0x0230, 23, 16, 0x2a);
SERDES_AM654_CFG(0x0240, 23, 16, 0x10);
SERDES_AM654_CFG(0x0240, 31, 24, 0x34);
SERDES_AM654_CFG(0x0244, 7, 0, 0x40);
SERDES_AM654_CFG(0x0244, 23, 16, 0x34);
SERDES_AM654_CFG(0x0248, 15, 8, 0x0d);
SERDES_AM654_CFG(0x0258, 15, 8, 0x16);
SERDES_AM654_CFG(0x0258, 23, 16, 0x84);
SERDES_AM654_CFG(0x0258, 31, 24, 0xf2);
SERDES_AM654_CFG(0x025c, 7, 0, 0x21);
SERDES_AM654_CFG(0x0260, 7, 0, 0x27);
SERDES_AM654_CFG(0x0260, 15, 8, 0x04);
SERDES_AM654_CFG(0x0268, 15, 8, 0x04);
SERDES_AM654_CFG(0x0288, 15, 8, 0x2c);
SERDES_AM654_CFG(0x0330, 31, 24, 0xa0);
SERDES_AM654_CFG(0x0338, 23, 16, 0x03);
SERDES_AM654_CFG(0x0338, 31, 24, 0x00);
SERDES_AM654_CFG(0x033c, 7, 0, 0x00);
SERDES_AM654_CFG(0x0344, 31, 24, 0x18);
SERDES_AM654_CFG(0x034c, 7, 0, 0x18);
SERDES_AM654_CFG(0x039c, 23, 16, 0x3b);
SERDES_AM654_CFG(0x0a04, 7, 0, 0x03);
SERDES_AM654_CFG(0x0a14, 31, 24, 0x3c);
SERDES_AM654_CFG(0x0a18, 15, 8, 0x3c);
SERDES_AM654_CFG(0x0a38, 7, 0, 0x3e);
SERDES_AM654_CFG(0x0a38, 15, 8, 0x3e);
SERDES_AM654_CFG(0x0ae0, 7, 0, 0x07);
SERDES_AM654_CFG(0x0b6c, 23, 16, 0xcd);
SERDES_AM654_CFG(0x0b6c, 31, 24, 0x04);
SERDES_AM654_CFG(0x0b98, 23, 16, 0x03);
SERDES_AM654_CFG(0x1400, 7, 0, 0x3f);
SERDES_AM654_CFG(0x1404, 23, 16, 0x6f);
SERDES_AM654_CFG(0x1404, 31, 24, 0x6f);
SERDES_AM654_CFG(0x140c, 7, 0, 0x6f);
SERDES_AM654_CFG(0x140c, 15, 8, 0x6f);
SERDES_AM654_CFG(0x1410, 15, 8, 0x27);
SERDES_AM654_CFG(0x1414, 7, 0, 0x0c);
SERDES_AM654_CFG(0x1414, 23, 16, 0x07);
SERDES_AM654_CFG(0x1418, 23, 16, 0x40);
SERDES_AM654_CFG(0x141c, 7, 0, 0x00);
SERDES_AM654_CFG(0x141c, 15, 8, 0x1f);
SERDES_AM654_CFG(0x1428, 31, 24, 0x08);
SERDES_AM654_CFG(0x1434, 31, 24, 0x00);
SERDES_AM654_CFG(0x1444, 7, 0, 0x94);
SERDES_AM654_CFG(0x1460, 31, 24, 0x7f);
SERDES_AM654_CFG(0x1464, 7, 0, 0x43);
SERDES_AM654_CFG(0x1464, 23, 16, 0x6f);
SERDES_AM654_CFG(0x1464, 31, 24, 0x43);
SERDES_AM654_CFG(0x1484, 23, 16, 0x8f);
SERDES_AM654_CFG(0x1498, 7, 0, 0x4f);
SERDES_AM654_CFG(0x1498, 23, 16, 0x4f);
SERDES_AM654_CFG(0x007c, 31, 24, 0x0d);
SERDES_AM654_CFG(0x0b90, 15, 8, 0x0f);
return 0;
}
static int serdes_am654_pcie_init(struct serdes_am654 *phy)
{
int ret = 0;
ret |= regmap_field_write(phy->fields[CMU_PLL_CTRL], 0x2);
ret |= regmap_field_write(phy->fields[AHB_PMA_CM_VCO_VBIAS_VREG], 0x98);
ret |= regmap_field_write(phy->fields[AHB_PMA_CM_VCO_BIAS_VREG], 0x98);
ret |= regmap_field_write(phy->fields[AHB_PMA_CM_SR], 0x45);
ret |= regmap_field_write(phy->fields[AHB_SSC_GEN_Z_O_20_13], 0xe);
ret |= regmap_field_write(phy->fields[LANE_PLL_CTRL_RXEQ_RXIDLE], 0x5);
ret |= regmap_field_write(phy->fields[AHB_PMA_LN_AGC_THSEL_VREGH], 0x83);
ret |= regmap_field_write(phy->fields[AHB_PMA_LN_GEN3_AGC_SD_THSEL], 0x83);
ret |= regmap_field_write(phy->fields[AHB_PMA_LN_RX_SELR_GEN3], 0x81);
ret |= regmap_field_write(phy->fields[AHB_PMA_LN_TX_DRV], 0x3b);
ret |= regmap_field_write(phy->fields[P2S_RBUF_PTR_DIFF], 0x3);
ret |= regmap_field_write(phy->fields[CONFIG_VERSION], VERSION_VAL);
ret |= regmap_field_write(phy->fields[COMRXEQ_MS_INIT_CTRL_7_0], 0xf);
ret |= regmap_field_write(phy->fields[COMRXEQ_HS_INIT_CAL_7_0], 0x4f);
ret |= regmap_field_write(phy->fields[COMRXEQ_MS_RECAL_CTRL_7_0], 0xf);
ret |= regmap_field_write(phy->fields[COMRXEQ_HS_RECAL_CTRL_7_0], 0x4f);
ret |= regmap_field_write(phy->fields[COMRXEQ_CSR_ATT_CONFIG], 0x7);
ret |= regmap_field_write(phy->fields[COMRXEQ_CSR_EBSTADAPT_WIN_LEN], 0x7f);
ret |= regmap_field_write(phy->fields[COMRXEQ_CTRL_3_4], 0xf);
ret |= regmap_field_write(phy->fields[COMRXEQ_CTRL_14_15_16], 0x9a);
ret |= regmap_field_write(phy->fields[COMRXEQ_CSR_DLEV_ERR_THRESH], 0x32);
ret |= regmap_field_write(phy->fields[COMRXEQ_CTRL_25], 0x80);
ret |= regmap_field_write(phy->fields[CSR_RXEQ_RATE_CHANGE_CAL_RUN_RATE2_O], 0xf);
ret |= regmap_field_write(phy->fields[COMRXEQ_HS_RCHANGE_CTRL_7_0], 0x4f);
ret |= regmap_field_write(phy->fields[CMU_MASTER_CDN], 0x1);
ret |= regmap_field_write(phy->fields[L1_MASTER_CDN], 0x2);
if (ret)
return -EIO;
return 0;
}
static int serdes_am654_init(struct phy *x)
{
struct serdes_am654 *phy = phy_get_drvdata(x);
switch (phy->type) {
case PHY_TYPE_PCIE:
return serdes_am654_pcie_init(phy);
case PHY_TYPE_USB3:
return serdes_am654_usb3_init(phy);
default:
return -EINVAL;
}
}
static int serdes_am654_reset(struct phy *x)
{
struct serdes_am654 *phy = phy_get_drvdata(x);
int ret = 0;
serdes_am654_disable_pll(phy);
serdes_am654_disable_txrx(phy);
ret |= regmap_field_write(phy->fields[POR_EN], 0x1);
mdelay(1);
ret |= regmap_field_write(phy->fields[POR_EN], 0x0);
if (ret)
return -EIO;
return 0;
}
static void serdes_am654_release(struct phy *x)
{
struct serdes_am654 *phy = phy_get_drvdata(x);
phy->type = PHY_NONE;
phy->busy = false;
mux_control_deselect(phy->control);
}
static struct phy *serdes_am654_xlate(struct device *dev,
struct of_phandle_args *args)
{
struct serdes_am654 *am654_phy;
struct phy *phy;
int ret;
phy = of_phy_simple_xlate(dev, args);
if (IS_ERR(phy))
return phy;
am654_phy = phy_get_drvdata(phy);
if (am654_phy->busy)
return ERR_PTR(-EBUSY);
ret = mux_control_select(am654_phy->control, args->args[1]);
if (ret) {
dev_err(dev, "Failed to select SERDES Lane Function\n");
return ERR_PTR(ret);
}
am654_phy->busy = true;
am654_phy->type = args->args[0];
return phy;
}
static const struct phy_ops ops = {
.reset = serdes_am654_reset,
.init = serdes_am654_init,
.power_on = serdes_am654_power_on,
.power_off = serdes_am654_power_off,
.release = serdes_am654_release,
.owner = THIS_MODULE,
};
#define SERDES_NUM_MUX_COMBINATIONS 16
#define LICLK 0
#define EXT_REFCLK 1
#define RICLK 2
static const int
serdes_am654_mux_table[SERDES_NUM_MUX_COMBINATIONS][SERDES_NUM_CLOCKS] = {
/*
* Each combination maps to one of
* "Figure 12-1986. SerDes Reference Clock Distribution"
* in TRM.
*/
/* Parent of CMU refclk, Left output, Right output
* either of EXT_REFCLK, LICLK, RICLK
*/
{ EXT_REFCLK, EXT_REFCLK, EXT_REFCLK }, /* 0000 */
{ RICLK, EXT_REFCLK, EXT_REFCLK }, /* 0001 */
{ EXT_REFCLK, RICLK, LICLK }, /* 0010 */
{ RICLK, RICLK, EXT_REFCLK }, /* 0011 */
{ LICLK, EXT_REFCLK, EXT_REFCLK }, /* 0100 */
{ EXT_REFCLK, EXT_REFCLK, EXT_REFCLK }, /* 0101 */
{ LICLK, RICLK, LICLK }, /* 0110 */
{ EXT_REFCLK, RICLK, LICLK }, /* 0111 */
{ EXT_REFCLK, EXT_REFCLK, LICLK }, /* 1000 */
{ RICLK, EXT_REFCLK, LICLK }, /* 1001 */
{ EXT_REFCLK, RICLK, EXT_REFCLK }, /* 1010 */
{ RICLK, RICLK, EXT_REFCLK }, /* 1011 */
{ LICLK, EXT_REFCLK, LICLK }, /* 1100 */
{ EXT_REFCLK, EXT_REFCLK, LICLK }, /* 1101 */
{ LICLK, RICLK, EXT_REFCLK }, /* 1110 */
{ EXT_REFCLK, RICLK, EXT_REFCLK }, /* 1111 */
};
static u8 serdes_am654_clk_mux_get_parent(struct clk_hw *hw)
{
struct serdes_am654_clk_mux *mux = to_serdes_am654_clk_mux(hw);
struct regmap *regmap = mux->regmap;
unsigned int reg = mux->reg;
unsigned int val;
regmap_read(regmap, reg, &val);
val &= AM654_SERDES_CTRL_CLKSEL_MASK;
val >>= AM654_SERDES_CTRL_CLKSEL_SHIFT;
return serdes_am654_mux_table[val][mux->clk_id];
}
static int serdes_am654_clk_mux_set_parent(struct clk_hw *hw, u8 index)
{
struct serdes_am654_clk_mux *mux = to_serdes_am654_clk_mux(hw);
struct regmap *regmap = mux->regmap;
const char *name = clk_hw_get_name(hw);
unsigned int reg = mux->reg;
int clk_id = mux->clk_id;
int parents[SERDES_NUM_CLOCKS];
const int *p;
u32 val;
int found, i;
int ret;
/* get existing setting */
regmap_read(regmap, reg, &val);
val &= AM654_SERDES_CTRL_CLKSEL_MASK;
val >>= AM654_SERDES_CTRL_CLKSEL_SHIFT;
for (i = 0; i < SERDES_NUM_CLOCKS; i++)
parents[i] = serdes_am654_mux_table[val][i];
/* change parent of this clock. others left intact */
parents[clk_id] = index;
/* Find the match */
for (val = 0; val < SERDES_NUM_MUX_COMBINATIONS; val++) {
p = serdes_am654_mux_table[val];
found = 1;
for (i = 0; i < SERDES_NUM_CLOCKS; i++) {
if (parents[i] != p[i]) {
found = 0;
break;
}
}
if (found)
break;
}
if (!found) {
/*
* This can never happen, unless we missed
* a valid combination in serdes_am654_mux_table.
*/
WARN(1, "Failed to find the parent of %s clock\n", name);
return -EINVAL;
}
val <<= AM654_SERDES_CTRL_CLKSEL_SHIFT;
ret = regmap_update_bits(regmap, reg, AM654_SERDES_CTRL_CLKSEL_MASK,
val);
return ret;
}
static const struct clk_ops serdes_am654_clk_mux_ops = {
.determine_rate = __clk_mux_determine_rate,
.set_parent = serdes_am654_clk_mux_set_parent,
.get_parent = serdes_am654_clk_mux_get_parent,
};
static int serdes_am654_clk_register(struct serdes_am654 *am654_phy,
const char *clock_name, int clock_num)
{
struct device_node *node = am654_phy->of_node;
struct device *dev = am654_phy->dev;
struct serdes_am654_clk_mux *mux;
struct device_node *regmap_node;
const char **parent_names;
struct clk_init_data *init;
unsigned int num_parents;
struct regmap *regmap;
const __be32 *addr;
unsigned int reg;
struct clk *clk;
int ret = 0;
mux = devm_kzalloc(dev, sizeof(*mux), GFP_KERNEL);
if (!mux)
return -ENOMEM;
init = &mux->clk_data;
regmap_node = of_parse_phandle(node, "ti,serdes-clk", 0);
if (!regmap_node) {
dev_err(dev, "Fail to get serdes-clk node\n");
ret = -ENODEV;
goto out_put_node;
}
regmap = syscon_node_to_regmap(regmap_node->parent);
if (IS_ERR(regmap)) {
dev_err(dev, "Fail to get Syscon regmap\n");
ret = PTR_ERR(regmap);
goto out_put_node;
}
num_parents = of_clk_get_parent_count(node);
if (num_parents < 2) {
dev_err(dev, "SERDES clock must have parents\n");
ret = -EINVAL;
goto out_put_node;
}
parent_names = devm_kzalloc(dev, (sizeof(char *) * num_parents),
GFP_KERNEL);
if (!parent_names) {
ret = -ENOMEM;
goto out_put_node;
}
of_clk_parent_fill(node, parent_names, num_parents);
addr = of_get_address(regmap_node, 0, NULL, NULL);
if (!addr) {
ret = -EINVAL;
goto out_put_node;
}
reg = be32_to_cpu(*addr);
init->ops = &serdes_am654_clk_mux_ops;
init->flags = CLK_SET_RATE_NO_REPARENT;
init->parent_names = parent_names;
init->num_parents = num_parents;
init->name = clock_name;
mux->regmap = regmap;
mux->reg = reg;
mux->clk_id = clock_num;
mux->hw.init = init;
clk = devm_clk_register(dev, &mux->hw);
if (IS_ERR(clk)) {
ret = PTR_ERR(clk);
goto out_put_node;
}
am654_phy->clks[clock_num] = clk;
out_put_node:
of_node_put(regmap_node);
return ret;
}
static const struct of_device_id serdes_am654_id_table[] = {
{
.compatible = "ti,phy-am654-serdes",
},
{}
};
MODULE_DEVICE_TABLE(of, serdes_am654_id_table);
static int serdes_am654_regfield_init(struct serdes_am654 *am654_phy)
{
struct regmap *regmap = am654_phy->regmap;
struct device *dev = am654_phy->dev;
int i;
for (i = 0; i < MAX_FIELDS; i++) {
am654_phy->fields[i] = devm_regmap_field_alloc(dev,
regmap,
serdes_am654_reg_fields[i]);
if (IS_ERR(am654_phy->fields[i])) {
dev_err(dev, "Unable to allocate regmap field %d\n", i);
return PTR_ERR(am654_phy->fields[i]);
}
}
return 0;
}
static int serdes_am654_probe(struct platform_device *pdev)
{
struct phy_provider *phy_provider;
struct device *dev = &pdev->dev;
struct device_node *node = dev->of_node;
struct clk_onecell_data *clk_data;
struct serdes_am654 *am654_phy;
struct mux_control *control;
const char *clock_name;
struct regmap *regmap;
void __iomem *base;
struct phy *phy;
int ret;
int i;
am654_phy = devm_kzalloc(dev, sizeof(*am654_phy), GFP_KERNEL);
if (!am654_phy)
return -ENOMEM;
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
regmap = devm_regmap_init_mmio(dev, base, &serdes_am654_regmap_config);
if (IS_ERR(regmap)) {
dev_err(dev, "Failed to initialize regmap\n");
return PTR_ERR(regmap);
}
control = devm_mux_control_get(dev, NULL);
if (IS_ERR(control))
return PTR_ERR(control);
am654_phy->dev = dev;
am654_phy->of_node = node;
am654_phy->regmap = regmap;
am654_phy->control = control;
am654_phy->type = PHY_NONE;
ret = serdes_am654_regfield_init(am654_phy);
if (ret) {
dev_err(dev, "Failed to initialize regfields\n");
return ret;
}
platform_set_drvdata(pdev, am654_phy);
for (i = 0; i < SERDES_NUM_CLOCKS; i++) {
ret = of_property_read_string_index(node, "clock-output-names",
i, &clock_name);
if (ret) {
dev_err(dev, "Failed to get clock name\n");
return ret;
}
ret = serdes_am654_clk_register(am654_phy, clock_name, i);
if (ret) {
dev_err(dev, "Failed to initialize clock %s\n",
clock_name);
return ret;
}
}
clk_data = &am654_phy->clk_data;
clk_data->clks = am654_phy->clks;
clk_data->clk_num = SERDES_NUM_CLOCKS;
ret = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
if (ret)
return ret;
pm_runtime_enable(dev);
phy = devm_phy_create(dev, NULL, &ops);
if (IS_ERR(phy)) {
ret = PTR_ERR(phy);
goto clk_err;
}
phy_set_drvdata(phy, am654_phy);
phy_provider = devm_of_phy_provider_register(dev, serdes_am654_xlate);
if (IS_ERR(phy_provider)) {
ret = PTR_ERR(phy_provider);
goto clk_err;
}
return 0;
clk_err:
of_clk_del_provider(node);
pm_runtime_disable(dev);
return ret;
}
static void serdes_am654_remove(struct platform_device *pdev)
{
struct serdes_am654 *am654_phy = platform_get_drvdata(pdev);
struct device_node *node = am654_phy->of_node;
pm_runtime_disable(&pdev->dev);
of_clk_del_provider(node);
}
static struct platform_driver serdes_am654_driver = {
.probe = serdes_am654_probe,
.remove_new = serdes_am654_remove,
.driver = {
.name = "phy-am654",
.of_match_table = serdes_am654_id_table,
},
};
module_platform_driver(serdes_am654_driver);
MODULE_AUTHOR("Texas Instruments Inc.");
MODULE_DESCRIPTION("TI AM654x SERDES driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/phy/ti/phy-am654-serdes.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* omap-usb2.c - USB PHY, talking to USB controller on TI SoCs.
*
* Copyright (C) 2012-2020 Texas Instruments Incorporated - http://www.ti.com
* Author: Kishon Vijay Abraham I <[email protected]>
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/phy/omap_control_phy.h>
#include <linux/phy/omap_usb.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/slab.h>
#include <linux/sys_soc.h>
#include <linux/usb/phy_companion.h>
#define USB2PHY_ANA_CONFIG1 0x4c
#define USB2PHY_DISCON_BYP_LATCH BIT(31)
#define USB2PHY_CHRG_DET 0x14
#define USB2PHY_CHRG_DET_USE_CHG_DET_REG BIT(29)
#define USB2PHY_CHRG_DET_DIS_CHG_DET BIT(28)
/* SoC Specific USB2_OTG register definitions */
#define AM654_USB2_OTG_PD BIT(8)
#define AM654_USB2_VBUS_DET_EN BIT(5)
#define AM654_USB2_VBUSVALID_DET_EN BIT(4)
#define OMAP_DEV_PHY_PD BIT(0)
#define OMAP_USB2_PHY_PD BIT(28)
#define AM437X_USB2_PHY_PD BIT(0)
#define AM437X_USB2_OTG_PD BIT(1)
#define AM437X_USB2_OTGVDET_EN BIT(19)
#define AM437X_USB2_OTGSESSEND_EN BIT(20)
/* Driver Flags */
#define OMAP_USB2_HAS_START_SRP BIT(0)
#define OMAP_USB2_HAS_SET_VBUS BIT(1)
#define OMAP_USB2_CALIBRATE_FALSE_DISCONNECT BIT(2)
#define OMAP_USB2_DISABLE_CHRG_DET BIT(3)
struct omap_usb {
struct usb_phy phy;
struct phy_companion *comparator;
void __iomem *pll_ctrl_base;
void __iomem *phy_base;
struct device *dev;
struct device *control_dev;
struct clk *wkupclk;
struct clk *optclk;
u8 flags;
struct regmap *syscon_phy_power; /* ctrl. reg. acces */
unsigned int power_reg; /* power reg. index within syscon */
u32 mask;
u32 power_on;
u32 power_off;
};
#define phy_to_omapusb(x) container_of((x), struct omap_usb, phy)
struct usb_phy_data {
const char *label;
u8 flags;
u32 mask;
u32 power_on;
u32 power_off;
};
static inline u32 omap_usb_readl(void __iomem *addr, unsigned int offset)
{
return __raw_readl(addr + offset);
}
static inline void omap_usb_writel(void __iomem *addr, unsigned int offset,
u32 data)
{
__raw_writel(data, addr + offset);
}
/**
* omap_usb2_set_comparator() - links the comparator present in the system with this phy
*
* @comparator: the companion phy(comparator) for this phy
*
* The phy companion driver should call this API passing the phy_companion
* filled with set_vbus and start_srp to be used by usb phy.
*
* For use by phy companion driver
*/
int omap_usb2_set_comparator(struct phy_companion *comparator)
{
struct omap_usb *phy;
struct usb_phy *x = usb_get_phy(USB_PHY_TYPE_USB2);
if (IS_ERR(x))
return -ENODEV;
phy = phy_to_omapusb(x);
phy->comparator = comparator;
return 0;
}
EXPORT_SYMBOL_GPL(omap_usb2_set_comparator);
static int omap_usb_set_vbus(struct usb_otg *otg, bool enabled)
{
struct omap_usb *phy = phy_to_omapusb(otg->usb_phy);
if (!phy->comparator)
return -ENODEV;
return phy->comparator->set_vbus(phy->comparator, enabled);
}
static int omap_usb_start_srp(struct usb_otg *otg)
{
struct omap_usb *phy = phy_to_omapusb(otg->usb_phy);
if (!phy->comparator)
return -ENODEV;
return phy->comparator->start_srp(phy->comparator);
}
static int omap_usb_set_host(struct usb_otg *otg, struct usb_bus *host)
{
otg->host = host;
if (!host)
otg->state = OTG_STATE_UNDEFINED;
return 0;
}
static int omap_usb_set_peripheral(struct usb_otg *otg,
struct usb_gadget *gadget)
{
otg->gadget = gadget;
if (!gadget)
otg->state = OTG_STATE_UNDEFINED;
return 0;
}
static int omap_usb_phy_power(struct omap_usb *phy, int on)
{
u32 val;
int ret;
if (!phy->syscon_phy_power) {
omap_control_phy_power(phy->control_dev, on);
return 0;
}
if (on)
val = phy->power_on;
else
val = phy->power_off;
ret = regmap_update_bits(phy->syscon_phy_power, phy->power_reg,
phy->mask, val);
return ret;
}
static int omap_usb_power_off(struct phy *x)
{
struct omap_usb *phy = phy_get_drvdata(x);
return omap_usb_phy_power(phy, false);
}
static int omap_usb_power_on(struct phy *x)
{
struct omap_usb *phy = phy_get_drvdata(x);
return omap_usb_phy_power(phy, true);
}
static int omap_usb2_disable_clocks(struct omap_usb *phy)
{
clk_disable_unprepare(phy->wkupclk);
if (!IS_ERR(phy->optclk))
clk_disable_unprepare(phy->optclk);
return 0;
}
static int omap_usb2_enable_clocks(struct omap_usb *phy)
{
int ret;
ret = clk_prepare_enable(phy->wkupclk);
if (ret < 0) {
dev_err(phy->dev, "Failed to enable wkupclk %d\n", ret);
goto err0;
}
if (!IS_ERR(phy->optclk)) {
ret = clk_prepare_enable(phy->optclk);
if (ret < 0) {
dev_err(phy->dev, "Failed to enable optclk %d\n", ret);
goto err1;
}
}
return 0;
err1:
clk_disable_unprepare(phy->wkupclk);
err0:
return ret;
}
static int omap_usb_init(struct phy *x)
{
struct omap_usb *phy = phy_get_drvdata(x);
u32 val;
omap_usb2_enable_clocks(phy);
if (phy->flags & OMAP_USB2_CALIBRATE_FALSE_DISCONNECT) {
/*
*
* Reduce the sensitivity of internal PHY by enabling the
* DISCON_BYP_LATCH of the USB2PHY_ANA_CONFIG1 register. This
* resolves issues with certain devices which can otherwise
* be prone to false disconnects.
*
*/
val = omap_usb_readl(phy->phy_base, USB2PHY_ANA_CONFIG1);
val |= USB2PHY_DISCON_BYP_LATCH;
omap_usb_writel(phy->phy_base, USB2PHY_ANA_CONFIG1, val);
}
if (phy->flags & OMAP_USB2_DISABLE_CHRG_DET) {
val = omap_usb_readl(phy->phy_base, USB2PHY_CHRG_DET);
val |= USB2PHY_CHRG_DET_USE_CHG_DET_REG |
USB2PHY_CHRG_DET_DIS_CHG_DET;
omap_usb_writel(phy->phy_base, USB2PHY_CHRG_DET, val);
}
return 0;
}
static int omap_usb_exit(struct phy *x)
{
struct omap_usb *phy = phy_get_drvdata(x);
return omap_usb2_disable_clocks(phy);
}
static const struct phy_ops ops = {
.init = omap_usb_init,
.exit = omap_usb_exit,
.power_on = omap_usb_power_on,
.power_off = omap_usb_power_off,
.owner = THIS_MODULE,
};
static const struct usb_phy_data omap_usb2_data = {
.label = "omap_usb2",
.flags = OMAP_USB2_HAS_START_SRP | OMAP_USB2_HAS_SET_VBUS,
.mask = OMAP_DEV_PHY_PD,
.power_off = OMAP_DEV_PHY_PD,
};
static const struct usb_phy_data omap5_usb2_data = {
.label = "omap5_usb2",
.flags = 0,
.mask = OMAP_DEV_PHY_PD,
.power_off = OMAP_DEV_PHY_PD,
};
static const struct usb_phy_data dra7x_usb2_data = {
.label = "dra7x_usb2",
.flags = OMAP_USB2_CALIBRATE_FALSE_DISCONNECT,
.mask = OMAP_DEV_PHY_PD,
.power_off = OMAP_DEV_PHY_PD,
};
static const struct usb_phy_data dra7x_usb2_phy2_data = {
.label = "dra7x_usb2_phy2",
.flags = OMAP_USB2_CALIBRATE_FALSE_DISCONNECT,
.mask = OMAP_USB2_PHY_PD,
.power_off = OMAP_USB2_PHY_PD,
};
static const struct usb_phy_data am437x_usb2_data = {
.label = "am437x_usb2",
.flags = 0,
.mask = AM437X_USB2_PHY_PD | AM437X_USB2_OTG_PD |
AM437X_USB2_OTGVDET_EN | AM437X_USB2_OTGSESSEND_EN,
.power_on = AM437X_USB2_OTGVDET_EN | AM437X_USB2_OTGSESSEND_EN,
.power_off = AM437X_USB2_PHY_PD | AM437X_USB2_OTG_PD,
};
static const struct usb_phy_data am654_usb2_data = {
.label = "am654_usb2",
.flags = OMAP_USB2_CALIBRATE_FALSE_DISCONNECT,
.mask = AM654_USB2_OTG_PD | AM654_USB2_VBUS_DET_EN |
AM654_USB2_VBUSVALID_DET_EN,
.power_on = AM654_USB2_VBUS_DET_EN | AM654_USB2_VBUSVALID_DET_EN,
.power_off = AM654_USB2_OTG_PD,
};
static const struct of_device_id omap_usb2_id_table[] = {
{
.compatible = "ti,omap-usb2",
.data = &omap_usb2_data,
},
{
.compatible = "ti,omap5-usb2",
.data = &omap5_usb2_data,
},
{
.compatible = "ti,dra7x-usb2",
.data = &dra7x_usb2_data,
},
{
.compatible = "ti,dra7x-usb2-phy2",
.data = &dra7x_usb2_phy2_data,
},
{
.compatible = "ti,am437x-usb2",
.data = &am437x_usb2_data,
},
{
.compatible = "ti,am654-usb2",
.data = &am654_usb2_data,
},
{},
};
MODULE_DEVICE_TABLE(of, omap_usb2_id_table);
static void omap_usb2_init_errata(struct omap_usb *phy)
{
static const struct soc_device_attribute am65x_sr10_soc_devices[] = {
{ .family = "AM65X", .revision = "SR1.0" },
{ /* sentinel */ }
};
/*
* Errata i2075: USB2PHY: USB2PHY Charger Detect is Enabled by
* Default Without VBUS Presence.
*
* AM654x SR1.0 has a silicon bug due to which D+ is pulled high after
* POR, which could cause enumeration failure with some USB hubs.
* Disabling the USB2_PHY Charger Detect function will put D+
* into the normal state.
*/
if (soc_device_match(am65x_sr10_soc_devices))
phy->flags |= OMAP_USB2_DISABLE_CHRG_DET;
}
static int omap_usb2_probe(struct platform_device *pdev)
{
struct omap_usb *phy;
struct phy *generic_phy;
struct phy_provider *phy_provider;
struct usb_otg *otg;
struct device_node *node = pdev->dev.of_node;
struct device_node *control_node;
struct platform_device *control_pdev;
const struct of_device_id *of_id;
struct usb_phy_data *phy_data;
of_id = of_match_device(omap_usb2_id_table, &pdev->dev);
if (!of_id)
return -EINVAL;
phy_data = (struct usb_phy_data *)of_id->data;
phy = devm_kzalloc(&pdev->dev, sizeof(*phy), GFP_KERNEL);
if (!phy)
return -ENOMEM;
otg = devm_kzalloc(&pdev->dev, sizeof(*otg), GFP_KERNEL);
if (!otg)
return -ENOMEM;
phy->dev = &pdev->dev;
phy->phy.dev = phy->dev;
phy->phy.label = phy_data->label;
phy->phy.otg = otg;
phy->phy.type = USB_PHY_TYPE_USB2;
phy->mask = phy_data->mask;
phy->power_on = phy_data->power_on;
phy->power_off = phy_data->power_off;
phy->flags = phy_data->flags;
omap_usb2_init_errata(phy);
phy->phy_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(phy->phy_base))
return PTR_ERR(phy->phy_base);
phy->syscon_phy_power = syscon_regmap_lookup_by_phandle(node,
"syscon-phy-power");
if (IS_ERR(phy->syscon_phy_power)) {
dev_dbg(&pdev->dev,
"can't get syscon-phy-power, using control device\n");
phy->syscon_phy_power = NULL;
control_node = of_parse_phandle(node, "ctrl-module", 0);
if (!control_node) {
dev_err(&pdev->dev,
"Failed to get control device phandle\n");
return -EINVAL;
}
control_pdev = of_find_device_by_node(control_node);
if (!control_pdev) {
dev_err(&pdev->dev, "Failed to get control device\n");
return -EINVAL;
}
phy->control_dev = &control_pdev->dev;
} else {
if (of_property_read_u32_index(node,
"syscon-phy-power", 1,
&phy->power_reg)) {
dev_err(&pdev->dev,
"couldn't get power reg. offset\n");
return -EINVAL;
}
}
phy->wkupclk = devm_clk_get(phy->dev, "wkupclk");
if (IS_ERR(phy->wkupclk)) {
if (PTR_ERR(phy->wkupclk) == -EPROBE_DEFER)
return -EPROBE_DEFER;
dev_warn(&pdev->dev, "unable to get wkupclk %ld, trying old name\n",
PTR_ERR(phy->wkupclk));
phy->wkupclk = devm_clk_get(phy->dev, "usb_phy_cm_clk32k");
if (IS_ERR(phy->wkupclk))
return dev_err_probe(&pdev->dev, PTR_ERR(phy->wkupclk),
"unable to get usb_phy_cm_clk32k\n");
dev_warn(&pdev->dev,
"found usb_phy_cm_clk32k, please fix DTS\n");
}
phy->optclk = devm_clk_get(phy->dev, "refclk");
if (IS_ERR(phy->optclk)) {
if (PTR_ERR(phy->optclk) == -EPROBE_DEFER)
return -EPROBE_DEFER;
dev_dbg(&pdev->dev, "unable to get refclk, trying old name\n");
phy->optclk = devm_clk_get(phy->dev, "usb_otg_ss_refclk960m");
if (IS_ERR(phy->optclk)) {
if (PTR_ERR(phy->optclk) != -EPROBE_DEFER) {
dev_dbg(&pdev->dev,
"unable to get usb_otg_ss_refclk960m\n");
}
} else {
dev_warn(&pdev->dev,
"found usb_otg_ss_refclk960m, please fix DTS\n");
}
}
otg->set_host = omap_usb_set_host;
otg->set_peripheral = omap_usb_set_peripheral;
if (phy_data->flags & OMAP_USB2_HAS_SET_VBUS)
otg->set_vbus = omap_usb_set_vbus;
if (phy_data->flags & OMAP_USB2_HAS_START_SRP)
otg->start_srp = omap_usb_start_srp;
otg->usb_phy = &phy->phy;
platform_set_drvdata(pdev, phy);
pm_runtime_enable(phy->dev);
generic_phy = devm_phy_create(phy->dev, NULL, &ops);
if (IS_ERR(generic_phy)) {
pm_runtime_disable(phy->dev);
return PTR_ERR(generic_phy);
}
phy_set_drvdata(generic_phy, phy);
omap_usb_power_off(generic_phy);
phy_provider = devm_of_phy_provider_register(phy->dev,
of_phy_simple_xlate);
if (IS_ERR(phy_provider)) {
pm_runtime_disable(phy->dev);
return PTR_ERR(phy_provider);
}
usb_add_phy_dev(&phy->phy);
return 0;
}
static void omap_usb2_remove(struct platform_device *pdev)
{
struct omap_usb *phy = platform_get_drvdata(pdev);
usb_remove_phy(&phy->phy);
pm_runtime_disable(phy->dev);
}
static struct platform_driver omap_usb2_driver = {
.probe = omap_usb2_probe,
.remove_new = omap_usb2_remove,
.driver = {
.name = "omap-usb2",
.of_match_table = omap_usb2_id_table,
},
};
module_platform_driver(omap_usb2_driver);
MODULE_ALIAS("platform:omap_usb2");
MODULE_AUTHOR("Texas Instruments Inc.");
MODULE_DESCRIPTION("OMAP USB2 phy driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/phy/ti/phy-omap-usb2.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Wrapper driver for SERDES used in J721E
*
* Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
* Author: Kishon Vijay Abraham I <[email protected]>
*/
#include <dt-bindings/phy/phy.h>
#include <dt-bindings/phy/phy-ti.h>
#include <linux/slab.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/gpio.h>
#include <linux/gpio/consumer.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/mfd/syscon.h>
#include <linux/mux/consumer.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/reset-controller.h>
#define REF_CLK_19_2MHZ 19200000
#define REF_CLK_25MHZ 25000000
#define REF_CLK_100MHZ 100000000
#define REF_CLK_156_25MHZ 156250000
/* SCM offsets */
#define SERDES_SUP_CTRL 0x4400
/* SERDES offsets */
#define WIZ_SERDES_CTRL 0x404
#define WIZ_SERDES_TOP_CTRL 0x408
#define WIZ_SERDES_RST 0x40c
#define WIZ_SERDES_TYPEC 0x410
#define WIZ_LANECTL(n) (0x480 + (0x40 * (n)))
#define WIZ_LANEDIV(n) (0x484 + (0x40 * (n)))
#define WIZ_MAX_INPUT_CLOCKS 4
/* To include mux clocks, divider clocks and gate clocks */
#define WIZ_MAX_OUTPUT_CLOCKS 32
#define WIZ_MAX_LANES 4
#define WIZ_MUX_NUM_CLOCKS 3
#define WIZ_DIV_NUM_CLOCKS_16G 2
#define WIZ_DIV_NUM_CLOCKS_10G 1
#define WIZ_SERDES_TYPEC_LN10_SWAP BIT(30)
enum wiz_lane_standard_mode {
LANE_MODE_GEN1,
LANE_MODE_GEN2,
LANE_MODE_GEN3,
LANE_MODE_GEN4,
};
/*
* List of master lanes used for lane swapping
*/
enum wiz_typec_master_lane {
LANE0 = 0,
LANE2 = 2,
};
enum wiz_refclk_mux_sel {
PLL0_REFCLK,
PLL1_REFCLK,
REFCLK_DIG,
};
enum wiz_refclk_div_sel {
CMN_REFCLK_DIG_DIV,
CMN_REFCLK1_DIG_DIV,
};
enum wiz_clock_input {
WIZ_CORE_REFCLK,
WIZ_EXT_REFCLK,
WIZ_CORE_REFCLK1,
WIZ_EXT_REFCLK1,
};
static const struct reg_field por_en = REG_FIELD(WIZ_SERDES_CTRL, 31, 31);
static const struct reg_field phy_reset_n = REG_FIELD(WIZ_SERDES_RST, 31, 31);
static const struct reg_field phy_en_refclk = REG_FIELD(WIZ_SERDES_RST, 30, 30);
static const struct reg_field pll1_refclk_mux_sel =
REG_FIELD(WIZ_SERDES_RST, 29, 29);
static const struct reg_field pll1_refclk_mux_sel_2 =
REG_FIELD(WIZ_SERDES_RST, 22, 23);
static const struct reg_field pll0_refclk_mux_sel =
REG_FIELD(WIZ_SERDES_RST, 28, 28);
static const struct reg_field pll0_refclk_mux_sel_2 =
REG_FIELD(WIZ_SERDES_RST, 28, 29);
static const struct reg_field refclk_dig_sel_16g =
REG_FIELD(WIZ_SERDES_RST, 24, 25);
static const struct reg_field refclk_dig_sel_10g =
REG_FIELD(WIZ_SERDES_RST, 24, 24);
static const struct reg_field pma_cmn_refclk_int_mode =
REG_FIELD(WIZ_SERDES_TOP_CTRL, 28, 29);
static const struct reg_field pma_cmn_refclk1_int_mode =
REG_FIELD(WIZ_SERDES_TOP_CTRL, 20, 21);
static const struct reg_field pma_cmn_refclk_mode =
REG_FIELD(WIZ_SERDES_TOP_CTRL, 30, 31);
static const struct reg_field pma_cmn_refclk_dig_div =
REG_FIELD(WIZ_SERDES_TOP_CTRL, 26, 27);
static const struct reg_field pma_cmn_refclk1_dig_div =
REG_FIELD(WIZ_SERDES_TOP_CTRL, 24, 25);
static const struct reg_field sup_pll0_refclk_mux_sel =
REG_FIELD(SERDES_SUP_CTRL, 0, 1);
static const struct reg_field sup_pll1_refclk_mux_sel =
REG_FIELD(SERDES_SUP_CTRL, 2, 3);
static const struct reg_field sup_pma_cmn_refclk1_int_mode =
REG_FIELD(SERDES_SUP_CTRL, 4, 5);
static const struct reg_field sup_refclk_dig_sel_10g =
REG_FIELD(SERDES_SUP_CTRL, 6, 7);
static const struct reg_field sup_legacy_clk_override =
REG_FIELD(SERDES_SUP_CTRL, 8, 8);
static const char * const output_clk_names[] = {
[TI_WIZ_PLL0_REFCLK] = "pll0-refclk",
[TI_WIZ_PLL1_REFCLK] = "pll1-refclk",
[TI_WIZ_REFCLK_DIG] = "refclk-dig",
[TI_WIZ_PHY_EN_REFCLK] = "phy-en-refclk",
};
static const struct reg_field p_enable[WIZ_MAX_LANES] = {
REG_FIELD(WIZ_LANECTL(0), 30, 31),
REG_FIELD(WIZ_LANECTL(1), 30, 31),
REG_FIELD(WIZ_LANECTL(2), 30, 31),
REG_FIELD(WIZ_LANECTL(3), 30, 31),
};
enum p_enable { P_ENABLE = 2, P_ENABLE_FORCE = 1, P_ENABLE_DISABLE = 0 };
static const struct reg_field p_align[WIZ_MAX_LANES] = {
REG_FIELD(WIZ_LANECTL(0), 29, 29),
REG_FIELD(WIZ_LANECTL(1), 29, 29),
REG_FIELD(WIZ_LANECTL(2), 29, 29),
REG_FIELD(WIZ_LANECTL(3), 29, 29),
};
static const struct reg_field p_raw_auto_start[WIZ_MAX_LANES] = {
REG_FIELD(WIZ_LANECTL(0), 28, 28),
REG_FIELD(WIZ_LANECTL(1), 28, 28),
REG_FIELD(WIZ_LANECTL(2), 28, 28),
REG_FIELD(WIZ_LANECTL(3), 28, 28),
};
static const struct reg_field p_standard_mode[WIZ_MAX_LANES] = {
REG_FIELD(WIZ_LANECTL(0), 24, 25),
REG_FIELD(WIZ_LANECTL(1), 24, 25),
REG_FIELD(WIZ_LANECTL(2), 24, 25),
REG_FIELD(WIZ_LANECTL(3), 24, 25),
};
static const struct reg_field p0_fullrt_div[WIZ_MAX_LANES] = {
REG_FIELD(WIZ_LANECTL(0), 22, 23),
REG_FIELD(WIZ_LANECTL(1), 22, 23),
REG_FIELD(WIZ_LANECTL(2), 22, 23),
REG_FIELD(WIZ_LANECTL(3), 22, 23),
};
static const struct reg_field p0_mac_src_sel[WIZ_MAX_LANES] = {
REG_FIELD(WIZ_LANECTL(0), 20, 21),
REG_FIELD(WIZ_LANECTL(1), 20, 21),
REG_FIELD(WIZ_LANECTL(2), 20, 21),
REG_FIELD(WIZ_LANECTL(3), 20, 21),
};
static const struct reg_field p0_rxfclk_sel[WIZ_MAX_LANES] = {
REG_FIELD(WIZ_LANECTL(0), 6, 7),
REG_FIELD(WIZ_LANECTL(1), 6, 7),
REG_FIELD(WIZ_LANECTL(2), 6, 7),
REG_FIELD(WIZ_LANECTL(3), 6, 7),
};
static const struct reg_field p0_refclk_sel[WIZ_MAX_LANES] = {
REG_FIELD(WIZ_LANECTL(0), 18, 19),
REG_FIELD(WIZ_LANECTL(1), 18, 19),
REG_FIELD(WIZ_LANECTL(2), 18, 19),
REG_FIELD(WIZ_LANECTL(3), 18, 19),
};
static const struct reg_field p_mac_div_sel0[WIZ_MAX_LANES] = {
REG_FIELD(WIZ_LANEDIV(0), 16, 22),
REG_FIELD(WIZ_LANEDIV(1), 16, 22),
REG_FIELD(WIZ_LANEDIV(2), 16, 22),
REG_FIELD(WIZ_LANEDIV(3), 16, 22),
};
static const struct reg_field p_mac_div_sel1[WIZ_MAX_LANES] = {
REG_FIELD(WIZ_LANEDIV(0), 0, 8),
REG_FIELD(WIZ_LANEDIV(1), 0, 8),
REG_FIELD(WIZ_LANEDIV(2), 0, 8),
REG_FIELD(WIZ_LANEDIV(3), 0, 8),
};
static const struct reg_field typec_ln10_swap =
REG_FIELD(WIZ_SERDES_TYPEC, 30, 30);
static const struct reg_field typec_ln23_swap =
REG_FIELD(WIZ_SERDES_TYPEC, 31, 31);
struct wiz_clk_mux {
struct clk_hw hw;
struct regmap_field *field;
const u32 *table;
struct clk_init_data clk_data;
};
#define to_wiz_clk_mux(_hw) container_of(_hw, struct wiz_clk_mux, hw)
struct wiz_clk_divider {
struct clk_hw hw;
struct regmap_field *field;
const struct clk_div_table *table;
struct clk_init_data clk_data;
};
#define to_wiz_clk_div(_hw) container_of(_hw, struct wiz_clk_divider, hw)
struct wiz_clk_mux_sel {
u32 table[WIZ_MAX_INPUT_CLOCKS];
const char *node_name;
u32 num_parents;
u32 parents[WIZ_MAX_INPUT_CLOCKS];
};
struct wiz_clk_div_sel {
const struct clk_div_table *table;
const char *node_name;
};
struct wiz_phy_en_refclk {
struct clk_hw hw;
struct regmap_field *phy_en_refclk;
struct clk_init_data clk_data;
};
#define to_wiz_phy_en_refclk(_hw) container_of(_hw, struct wiz_phy_en_refclk, hw)
static const struct wiz_clk_mux_sel clk_mux_sel_16g[] = {
{
/*
* Mux value to be configured for each of the input clocks
* in the order populated in device tree
*/
.table = { 1, 0 },
.node_name = "pll0-refclk",
},
{
.table = { 1, 0 },
.node_name = "pll1-refclk",
},
{
.table = { 1, 3, 0, 2 },
.node_name = "refclk-dig",
},
};
static const struct wiz_clk_mux_sel clk_mux_sel_10g[] = {
{
/*
* Mux value to be configured for each of the input clocks
* in the order populated in device tree
*/
.num_parents = 2,
.parents = { WIZ_CORE_REFCLK, WIZ_EXT_REFCLK },
.table = { 1, 0 },
.node_name = "pll0-refclk",
},
{
.num_parents = 2,
.parents = { WIZ_CORE_REFCLK, WIZ_EXT_REFCLK },
.table = { 1, 0 },
.node_name = "pll1-refclk",
},
{
.num_parents = 2,
.parents = { WIZ_CORE_REFCLK, WIZ_EXT_REFCLK },
.table = { 1, 0 },
.node_name = "refclk-dig",
},
};
static const struct wiz_clk_mux_sel clk_mux_sel_10g_2_refclk[] = {
{
.num_parents = 3,
.parents = { WIZ_CORE_REFCLK, WIZ_CORE_REFCLK1, WIZ_EXT_REFCLK },
.table = { 2, 3, 0 },
.node_name = "pll0-refclk",
},
{
.num_parents = 3,
.parents = { WIZ_CORE_REFCLK, WIZ_CORE_REFCLK1, WIZ_EXT_REFCLK },
.table = { 2, 3, 0 },
.node_name = "pll1-refclk",
},
{
.num_parents = 3,
.parents = { WIZ_CORE_REFCLK, WIZ_CORE_REFCLK1, WIZ_EXT_REFCLK },
.table = { 2, 3, 0 },
.node_name = "refclk-dig",
},
};
static const struct clk_div_table clk_div_table[] = {
{ .val = 0, .div = 1, },
{ .val = 1, .div = 2, },
{ .val = 2, .div = 4, },
{ .val = 3, .div = 8, },
{ /* sentinel */ },
};
static const struct wiz_clk_div_sel clk_div_sel[] = {
{
.table = clk_div_table,
.node_name = "cmn-refclk-dig-div",
},
{
.table = clk_div_table,
.node_name = "cmn-refclk1-dig-div",
},
};
enum wiz_type {
J721E_WIZ_16G,
J721E_WIZ_10G, /* Also for J7200 SR1.0 */
AM64_WIZ_10G,
J7200_WIZ_10G, /* J7200 SR2.0 */
J784S4_WIZ_10G,
J721S2_WIZ_10G,
};
struct wiz_data {
enum wiz_type type;
const struct reg_field *pll0_refclk_mux_sel;
const struct reg_field *pll1_refclk_mux_sel;
const struct reg_field *refclk_dig_sel;
const struct reg_field *pma_cmn_refclk1_dig_div;
const struct reg_field *pma_cmn_refclk1_int_mode;
const struct wiz_clk_mux_sel *clk_mux_sel;
unsigned int clk_div_sel_num;
};
#define WIZ_TYPEC_DIR_DEBOUNCE_MIN 100 /* ms */
#define WIZ_TYPEC_DIR_DEBOUNCE_MAX 1000
struct wiz {
struct regmap *regmap;
struct regmap *scm_regmap;
enum wiz_type type;
const struct wiz_clk_mux_sel *clk_mux_sel;
const struct wiz_clk_div_sel *clk_div_sel;
unsigned int clk_div_sel_num;
struct regmap_field *por_en;
struct regmap_field *phy_reset_n;
struct regmap_field *phy_en_refclk;
struct regmap_field *p_enable[WIZ_MAX_LANES];
struct regmap_field *p_align[WIZ_MAX_LANES];
struct regmap_field *p_raw_auto_start[WIZ_MAX_LANES];
struct regmap_field *p_standard_mode[WIZ_MAX_LANES];
struct regmap_field *p_mac_div_sel0[WIZ_MAX_LANES];
struct regmap_field *p_mac_div_sel1[WIZ_MAX_LANES];
struct regmap_field *p0_fullrt_div[WIZ_MAX_LANES];
struct regmap_field *p0_mac_src_sel[WIZ_MAX_LANES];
struct regmap_field *p0_rxfclk_sel[WIZ_MAX_LANES];
struct regmap_field *p0_refclk_sel[WIZ_MAX_LANES];
struct regmap_field *pma_cmn_refclk_int_mode;
struct regmap_field *pma_cmn_refclk1_int_mode;
struct regmap_field *pma_cmn_refclk_mode;
struct regmap_field *pma_cmn_refclk_dig_div;
struct regmap_field *pma_cmn_refclk1_dig_div;
struct regmap_field *mux_sel_field[WIZ_MUX_NUM_CLOCKS];
struct regmap_field *div_sel_field[WIZ_DIV_NUM_CLOCKS_16G];
struct regmap_field *typec_ln10_swap;
struct regmap_field *typec_ln23_swap;
struct regmap_field *sup_legacy_clk_override;
struct device *dev;
u32 num_lanes;
struct platform_device *serdes_pdev;
struct reset_controller_dev wiz_phy_reset_dev;
struct gpio_desc *gpio_typec_dir;
int typec_dir_delay;
u32 lane_phy_type[WIZ_MAX_LANES];
u32 master_lane_num[WIZ_MAX_LANES];
struct clk *input_clks[WIZ_MAX_INPUT_CLOCKS];
struct clk *output_clks[WIZ_MAX_OUTPUT_CLOCKS];
struct clk_onecell_data clk_data;
const struct wiz_data *data;
};
static int wiz_reset(struct wiz *wiz)
{
int ret;
ret = regmap_field_write(wiz->por_en, 0x1);
if (ret)
return ret;
mdelay(1);
ret = regmap_field_write(wiz->por_en, 0x0);
if (ret)
return ret;
return 0;
}
static int wiz_p_mac_div_sel(struct wiz *wiz)
{
u32 num_lanes = wiz->num_lanes;
int ret;
int i;
for (i = 0; i < num_lanes; i++) {
if (wiz->lane_phy_type[i] == PHY_TYPE_SGMII ||
wiz->lane_phy_type[i] == PHY_TYPE_QSGMII ||
wiz->lane_phy_type[i] == PHY_TYPE_USXGMII) {
ret = regmap_field_write(wiz->p_mac_div_sel0[i], 1);
if (ret)
return ret;
ret = regmap_field_write(wiz->p_mac_div_sel1[i], 2);
if (ret)
return ret;
}
}
return 0;
}
static int wiz_mode_select(struct wiz *wiz)
{
u32 num_lanes = wiz->num_lanes;
enum wiz_lane_standard_mode mode;
int ret;
int i;
for (i = 0; i < num_lanes; i++) {
if (wiz->lane_phy_type[i] == PHY_TYPE_DP) {
mode = LANE_MODE_GEN1;
} else if (wiz->lane_phy_type[i] == PHY_TYPE_QSGMII) {
mode = LANE_MODE_GEN2;
} else if (wiz->lane_phy_type[i] == PHY_TYPE_USXGMII) {
ret = regmap_field_write(wiz->p0_mac_src_sel[i], 0x3);
ret = regmap_field_write(wiz->p0_rxfclk_sel[i], 0x3);
ret = regmap_field_write(wiz->p0_refclk_sel[i], 0x3);
mode = LANE_MODE_GEN1;
} else {
continue;
}
ret = regmap_field_write(wiz->p_standard_mode[i], mode);
if (ret)
return ret;
}
return 0;
}
static int wiz_init_raw_interface(struct wiz *wiz, bool enable)
{
u32 num_lanes = wiz->num_lanes;
int i;
int ret;
for (i = 0; i < num_lanes; i++) {
ret = regmap_field_write(wiz->p_align[i], enable);
if (ret)
return ret;
ret = regmap_field_write(wiz->p_raw_auto_start[i], enable);
if (ret)
return ret;
}
return 0;
}
static int wiz_init(struct wiz *wiz)
{
struct device *dev = wiz->dev;
int ret;
ret = wiz_reset(wiz);
if (ret) {
dev_err(dev, "WIZ reset failed\n");
return ret;
}
ret = wiz_mode_select(wiz);
if (ret) {
dev_err(dev, "WIZ mode select failed\n");
return ret;
}
ret = wiz_p_mac_div_sel(wiz);
if (ret) {
dev_err(dev, "Configuring P0 MAC DIV SEL failed\n");
return ret;
}
ret = wiz_init_raw_interface(wiz, true);
if (ret) {
dev_err(dev, "WIZ interface initialization failed\n");
return ret;
}
return 0;
}
static int wiz_regfield_init(struct wiz *wiz)
{
struct regmap *regmap = wiz->regmap;
struct regmap *scm_regmap = wiz->regmap; /* updated later to scm_regmap if applicable */
int num_lanes = wiz->num_lanes;
struct device *dev = wiz->dev;
const struct wiz_data *data = wiz->data;
int i;
wiz->por_en = devm_regmap_field_alloc(dev, regmap, por_en);
if (IS_ERR(wiz->por_en)) {
dev_err(dev, "POR_EN reg field init failed\n");
return PTR_ERR(wiz->por_en);
}
wiz->phy_reset_n = devm_regmap_field_alloc(dev, regmap,
phy_reset_n);
if (IS_ERR(wiz->phy_reset_n)) {
dev_err(dev, "PHY_RESET_N reg field init failed\n");
return PTR_ERR(wiz->phy_reset_n);
}
wiz->pma_cmn_refclk_int_mode =
devm_regmap_field_alloc(dev, regmap, pma_cmn_refclk_int_mode);
if (IS_ERR(wiz->pma_cmn_refclk_int_mode)) {
dev_err(dev, "PMA_CMN_REFCLK_INT_MODE reg field init failed\n");
return PTR_ERR(wiz->pma_cmn_refclk_int_mode);
}
wiz->pma_cmn_refclk_mode =
devm_regmap_field_alloc(dev, regmap, pma_cmn_refclk_mode);
if (IS_ERR(wiz->pma_cmn_refclk_mode)) {
dev_err(dev, "PMA_CMN_REFCLK_MODE reg field init failed\n");
return PTR_ERR(wiz->pma_cmn_refclk_mode);
}
wiz->div_sel_field[CMN_REFCLK_DIG_DIV] =
devm_regmap_field_alloc(dev, regmap, pma_cmn_refclk_dig_div);
if (IS_ERR(wiz->div_sel_field[CMN_REFCLK_DIG_DIV])) {
dev_err(dev, "PMA_CMN_REFCLK_DIG_DIV reg field init failed\n");
return PTR_ERR(wiz->div_sel_field[CMN_REFCLK_DIG_DIV]);
}
if (data->pma_cmn_refclk1_dig_div) {
wiz->div_sel_field[CMN_REFCLK1_DIG_DIV] =
devm_regmap_field_alloc(dev, regmap,
*data->pma_cmn_refclk1_dig_div);
if (IS_ERR(wiz->div_sel_field[CMN_REFCLK1_DIG_DIV])) {
dev_err(dev, "PMA_CMN_REFCLK1_DIG_DIV reg field init failed\n");
return PTR_ERR(wiz->div_sel_field[CMN_REFCLK1_DIG_DIV]);
}
}
if (wiz->scm_regmap) {
scm_regmap = wiz->scm_regmap;
wiz->sup_legacy_clk_override =
devm_regmap_field_alloc(dev, scm_regmap, sup_legacy_clk_override);
if (IS_ERR(wiz->sup_legacy_clk_override)) {
dev_err(dev, "SUP_LEGACY_CLK_OVERRIDE reg field init failed\n");
return PTR_ERR(wiz->sup_legacy_clk_override);
}
}
wiz->mux_sel_field[PLL0_REFCLK] =
devm_regmap_field_alloc(dev, scm_regmap, *data->pll0_refclk_mux_sel);
if (IS_ERR(wiz->mux_sel_field[PLL0_REFCLK])) {
dev_err(dev, "PLL0_REFCLK_SEL reg field init failed\n");
return PTR_ERR(wiz->mux_sel_field[PLL0_REFCLK]);
}
wiz->mux_sel_field[PLL1_REFCLK] =
devm_regmap_field_alloc(dev, scm_regmap, *data->pll1_refclk_mux_sel);
if (IS_ERR(wiz->mux_sel_field[PLL1_REFCLK])) {
dev_err(dev, "PLL1_REFCLK_SEL reg field init failed\n");
return PTR_ERR(wiz->mux_sel_field[PLL1_REFCLK]);
}
wiz->mux_sel_field[REFCLK_DIG] = devm_regmap_field_alloc(dev, scm_regmap,
*data->refclk_dig_sel);
if (IS_ERR(wiz->mux_sel_field[REFCLK_DIG])) {
dev_err(dev, "REFCLK_DIG_SEL reg field init failed\n");
return PTR_ERR(wiz->mux_sel_field[REFCLK_DIG]);
}
if (data->pma_cmn_refclk1_int_mode) {
wiz->pma_cmn_refclk1_int_mode =
devm_regmap_field_alloc(dev, scm_regmap, *data->pma_cmn_refclk1_int_mode);
if (IS_ERR(wiz->pma_cmn_refclk1_int_mode)) {
dev_err(dev, "PMA_CMN_REFCLK1_INT_MODE reg field init failed\n");
return PTR_ERR(wiz->pma_cmn_refclk1_int_mode);
}
}
for (i = 0; i < num_lanes; i++) {
wiz->p_enable[i] = devm_regmap_field_alloc(dev, regmap,
p_enable[i]);
if (IS_ERR(wiz->p_enable[i])) {
dev_err(dev, "P%d_ENABLE reg field init failed\n", i);
return PTR_ERR(wiz->p_enable[i]);
}
wiz->p_align[i] = devm_regmap_field_alloc(dev, regmap,
p_align[i]);
if (IS_ERR(wiz->p_align[i])) {
dev_err(dev, "P%d_ALIGN reg field init failed\n", i);
return PTR_ERR(wiz->p_align[i]);
}
wiz->p_raw_auto_start[i] =
devm_regmap_field_alloc(dev, regmap, p_raw_auto_start[i]);
if (IS_ERR(wiz->p_raw_auto_start[i])) {
dev_err(dev, "P%d_RAW_AUTO_START reg field init fail\n",
i);
return PTR_ERR(wiz->p_raw_auto_start[i]);
}
wiz->p_standard_mode[i] =
devm_regmap_field_alloc(dev, regmap, p_standard_mode[i]);
if (IS_ERR(wiz->p_standard_mode[i])) {
dev_err(dev, "P%d_STANDARD_MODE reg field init fail\n",
i);
return PTR_ERR(wiz->p_standard_mode[i]);
}
wiz->p0_fullrt_div[i] = devm_regmap_field_alloc(dev, regmap, p0_fullrt_div[i]);
if (IS_ERR(wiz->p0_fullrt_div[i])) {
dev_err(dev, "P%d_FULLRT_DIV reg field init failed\n", i);
return PTR_ERR(wiz->p0_fullrt_div[i]);
}
wiz->p0_mac_src_sel[i] = devm_regmap_field_alloc(dev, regmap, p0_mac_src_sel[i]);
if (IS_ERR(wiz->p0_mac_src_sel[i])) {
dev_err(dev, "P%d_MAC_SRC_SEL reg field init failed\n", i);
return PTR_ERR(wiz->p0_mac_src_sel[i]);
}
wiz->p0_rxfclk_sel[i] = devm_regmap_field_alloc(dev, regmap, p0_rxfclk_sel[i]);
if (IS_ERR(wiz->p0_rxfclk_sel[i])) {
dev_err(dev, "P%d_RXFCLK_SEL reg field init failed\n", i);
return PTR_ERR(wiz->p0_rxfclk_sel[i]);
}
wiz->p0_refclk_sel[i] = devm_regmap_field_alloc(dev, regmap, p0_refclk_sel[i]);
if (IS_ERR(wiz->p0_refclk_sel[i])) {
dev_err(dev, "P%d_REFCLK_SEL reg field init failed\n", i);
return PTR_ERR(wiz->p0_refclk_sel[i]);
}
wiz->p_mac_div_sel0[i] =
devm_regmap_field_alloc(dev, regmap, p_mac_div_sel0[i]);
if (IS_ERR(wiz->p_mac_div_sel0[i])) {
dev_err(dev, "P%d_MAC_DIV_SEL0 reg field init fail\n",
i);
return PTR_ERR(wiz->p_mac_div_sel0[i]);
}
wiz->p_mac_div_sel1[i] =
devm_regmap_field_alloc(dev, regmap, p_mac_div_sel1[i]);
if (IS_ERR(wiz->p_mac_div_sel1[i])) {
dev_err(dev, "P%d_MAC_DIV_SEL1 reg field init fail\n",
i);
return PTR_ERR(wiz->p_mac_div_sel1[i]);
}
}
wiz->typec_ln10_swap = devm_regmap_field_alloc(dev, regmap,
typec_ln10_swap);
if (IS_ERR(wiz->typec_ln10_swap)) {
dev_err(dev, "LN10_SWAP reg field init failed\n");
return PTR_ERR(wiz->typec_ln10_swap);
}
wiz->typec_ln23_swap = devm_regmap_field_alloc(dev, regmap,
typec_ln23_swap);
if (IS_ERR(wiz->typec_ln23_swap)) {
dev_err(dev, "LN23_SWAP reg field init failed\n");
return PTR_ERR(wiz->typec_ln23_swap);
}
wiz->phy_en_refclk = devm_regmap_field_alloc(dev, regmap, phy_en_refclk);
if (IS_ERR(wiz->phy_en_refclk)) {
dev_err(dev, "PHY_EN_REFCLK reg field init failed\n");
return PTR_ERR(wiz->phy_en_refclk);
}
return 0;
}
static int wiz_phy_en_refclk_enable(struct clk_hw *hw)
{
struct wiz_phy_en_refclk *wiz_phy_en_refclk = to_wiz_phy_en_refclk(hw);
struct regmap_field *phy_en_refclk = wiz_phy_en_refclk->phy_en_refclk;
regmap_field_write(phy_en_refclk, 1);
return 0;
}
static void wiz_phy_en_refclk_disable(struct clk_hw *hw)
{
struct wiz_phy_en_refclk *wiz_phy_en_refclk = to_wiz_phy_en_refclk(hw);
struct regmap_field *phy_en_refclk = wiz_phy_en_refclk->phy_en_refclk;
regmap_field_write(phy_en_refclk, 0);
}
static int wiz_phy_en_refclk_is_enabled(struct clk_hw *hw)
{
struct wiz_phy_en_refclk *wiz_phy_en_refclk = to_wiz_phy_en_refclk(hw);
struct regmap_field *phy_en_refclk = wiz_phy_en_refclk->phy_en_refclk;
int val;
regmap_field_read(phy_en_refclk, &val);
return !!val;
}
static const struct clk_ops wiz_phy_en_refclk_ops = {
.enable = wiz_phy_en_refclk_enable,
.disable = wiz_phy_en_refclk_disable,
.is_enabled = wiz_phy_en_refclk_is_enabled,
};
static int wiz_phy_en_refclk_register(struct wiz *wiz)
{
struct wiz_phy_en_refclk *wiz_phy_en_refclk;
struct device *dev = wiz->dev;
struct clk_init_data *init;
struct clk *clk;
char *clk_name;
unsigned int sz;
wiz_phy_en_refclk = devm_kzalloc(dev, sizeof(*wiz_phy_en_refclk), GFP_KERNEL);
if (!wiz_phy_en_refclk)
return -ENOMEM;
init = &wiz_phy_en_refclk->clk_data;
init->ops = &wiz_phy_en_refclk_ops;
init->flags = 0;
sz = strlen(dev_name(dev)) + strlen(output_clk_names[TI_WIZ_PHY_EN_REFCLK]) + 2;
clk_name = kzalloc(sz, GFP_KERNEL);
if (!clk_name)
return -ENOMEM;
snprintf(clk_name, sz, "%s_%s", dev_name(dev), output_clk_names[TI_WIZ_PHY_EN_REFCLK]);
init->name = clk_name;
wiz_phy_en_refclk->phy_en_refclk = wiz->phy_en_refclk;
wiz_phy_en_refclk->hw.init = init;
clk = devm_clk_register(dev, &wiz_phy_en_refclk->hw);
kfree(clk_name);
if (IS_ERR(clk))
return PTR_ERR(clk);
wiz->output_clks[TI_WIZ_PHY_EN_REFCLK] = clk;
return 0;
}
static u8 wiz_clk_mux_get_parent(struct clk_hw *hw)
{
struct wiz_clk_mux *mux = to_wiz_clk_mux(hw);
struct regmap_field *field = mux->field;
unsigned int val;
regmap_field_read(field, &val);
return clk_mux_val_to_index(hw, (u32 *)mux->table, 0, val);
}
static int wiz_clk_mux_set_parent(struct clk_hw *hw, u8 index)
{
struct wiz_clk_mux *mux = to_wiz_clk_mux(hw);
struct regmap_field *field = mux->field;
int val;
val = mux->table[index];
return regmap_field_write(field, val);
}
static const struct clk_ops wiz_clk_mux_ops = {
.determine_rate = __clk_mux_determine_rate,
.set_parent = wiz_clk_mux_set_parent,
.get_parent = wiz_clk_mux_get_parent,
};
static int wiz_mux_clk_register(struct wiz *wiz, struct regmap_field *field,
const struct wiz_clk_mux_sel *mux_sel, int clk_index)
{
struct device *dev = wiz->dev;
struct clk_init_data *init;
const char **parent_names;
unsigned int num_parents;
struct wiz_clk_mux *mux;
char clk_name[100];
struct clk *clk;
int ret = 0, i;
mux = devm_kzalloc(dev, sizeof(*mux), GFP_KERNEL);
if (!mux)
return -ENOMEM;
num_parents = mux_sel->num_parents;
parent_names = kzalloc((sizeof(char *) * num_parents), GFP_KERNEL);
if (!parent_names)
return -ENOMEM;
for (i = 0; i < num_parents; i++) {
clk = wiz->input_clks[mux_sel->parents[i]];
if (IS_ERR_OR_NULL(clk)) {
dev_err(dev, "Failed to get parent clk for %s\n",
output_clk_names[clk_index]);
ret = -EINVAL;
goto err;
}
parent_names[i] = __clk_get_name(clk);
}
snprintf(clk_name, sizeof(clk_name), "%s_%s", dev_name(dev), output_clk_names[clk_index]);
init = &mux->clk_data;
init->ops = &wiz_clk_mux_ops;
init->flags = CLK_SET_RATE_NO_REPARENT;
init->parent_names = parent_names;
init->num_parents = num_parents;
init->name = clk_name;
mux->field = field;
mux->table = mux_sel->table;
mux->hw.init = init;
clk = devm_clk_register(dev, &mux->hw);
if (IS_ERR(clk)) {
ret = PTR_ERR(clk);
goto err;
}
wiz->output_clks[clk_index] = clk;
err:
kfree(parent_names);
return ret;
}
static int wiz_mux_of_clk_register(struct wiz *wiz, struct device_node *node,
struct regmap_field *field, const u32 *table)
{
struct device *dev = wiz->dev;
struct clk_init_data *init;
const char **parent_names;
unsigned int num_parents;
struct wiz_clk_mux *mux;
char clk_name[100];
struct clk *clk;
int ret;
mux = devm_kzalloc(dev, sizeof(*mux), GFP_KERNEL);
if (!mux)
return -ENOMEM;
num_parents = of_clk_get_parent_count(node);
if (num_parents < 2) {
dev_err(dev, "SERDES clock must have parents\n");
return -EINVAL;
}
parent_names = devm_kzalloc(dev, (sizeof(char *) * num_parents),
GFP_KERNEL);
if (!parent_names)
return -ENOMEM;
of_clk_parent_fill(node, parent_names, num_parents);
snprintf(clk_name, sizeof(clk_name), "%s_%s", dev_name(dev),
node->name);
init = &mux->clk_data;
init->ops = &wiz_clk_mux_ops;
init->flags = CLK_SET_RATE_NO_REPARENT;
init->parent_names = parent_names;
init->num_parents = num_parents;
init->name = clk_name;
mux->field = field;
mux->table = table;
mux->hw.init = init;
clk = devm_clk_register(dev, &mux->hw);
if (IS_ERR(clk))
return PTR_ERR(clk);
ret = of_clk_add_provider(node, of_clk_src_simple_get, clk);
if (ret)
dev_err(dev, "Failed to add clock provider: %s\n", clk_name);
return ret;
}
static unsigned long wiz_clk_div_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct wiz_clk_divider *div = to_wiz_clk_div(hw);
struct regmap_field *field = div->field;
int val;
regmap_field_read(field, &val);
return divider_recalc_rate(hw, parent_rate, val, div->table, 0x0, 2);
}
static long wiz_clk_div_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *prate)
{
struct wiz_clk_divider *div = to_wiz_clk_div(hw);
return divider_round_rate(hw, rate, prate, div->table, 2, 0x0);
}
static int wiz_clk_div_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct wiz_clk_divider *div = to_wiz_clk_div(hw);
struct regmap_field *field = div->field;
int val;
val = divider_get_val(rate, parent_rate, div->table, 2, 0x0);
if (val < 0)
return val;
return regmap_field_write(field, val);
}
static const struct clk_ops wiz_clk_div_ops = {
.recalc_rate = wiz_clk_div_recalc_rate,
.round_rate = wiz_clk_div_round_rate,
.set_rate = wiz_clk_div_set_rate,
};
static int wiz_div_clk_register(struct wiz *wiz, struct device_node *node,
struct regmap_field *field,
const struct clk_div_table *table)
{
struct device *dev = wiz->dev;
struct wiz_clk_divider *div;
struct clk_init_data *init;
const char **parent_names;
char clk_name[100];
struct clk *clk;
int ret;
div = devm_kzalloc(dev, sizeof(*div), GFP_KERNEL);
if (!div)
return -ENOMEM;
snprintf(clk_name, sizeof(clk_name), "%s_%s", dev_name(dev),
node->name);
parent_names = devm_kzalloc(dev, sizeof(char *), GFP_KERNEL);
if (!parent_names)
return -ENOMEM;
of_clk_parent_fill(node, parent_names, 1);
init = &div->clk_data;
init->ops = &wiz_clk_div_ops;
init->flags = 0;
init->parent_names = parent_names;
init->num_parents = 1;
init->name = clk_name;
div->field = field;
div->table = table;
div->hw.init = init;
clk = devm_clk_register(dev, &div->hw);
if (IS_ERR(clk))
return PTR_ERR(clk);
ret = of_clk_add_provider(node, of_clk_src_simple_get, clk);
if (ret)
dev_err(dev, "Failed to add clock provider: %s\n", clk_name);
return ret;
}
static void wiz_clock_cleanup(struct wiz *wiz, struct device_node *node)
{
const struct wiz_clk_mux_sel *clk_mux_sel = wiz->clk_mux_sel;
struct device *dev = wiz->dev;
struct device_node *clk_node;
int i;
switch (wiz->type) {
case AM64_WIZ_10G:
case J7200_WIZ_10G:
case J784S4_WIZ_10G:
case J721S2_WIZ_10G:
of_clk_del_provider(dev->of_node);
return;
default:
break;
}
for (i = 0; i < WIZ_MUX_NUM_CLOCKS; i++) {
clk_node = of_get_child_by_name(node, clk_mux_sel[i].node_name);
of_clk_del_provider(clk_node);
of_node_put(clk_node);
}
for (i = 0; i < wiz->clk_div_sel_num; i++) {
clk_node = of_get_child_by_name(node, clk_div_sel[i].node_name);
of_clk_del_provider(clk_node);
of_node_put(clk_node);
}
of_clk_del_provider(wiz->dev->of_node);
}
static int wiz_clock_register(struct wiz *wiz)
{
const struct wiz_clk_mux_sel *clk_mux_sel = wiz->clk_mux_sel;
struct device *dev = wiz->dev;
struct device_node *node = dev->of_node;
int clk_index;
int ret;
int i;
clk_index = TI_WIZ_PLL0_REFCLK;
for (i = 0; i < WIZ_MUX_NUM_CLOCKS; i++, clk_index++) {
ret = wiz_mux_clk_register(wiz, wiz->mux_sel_field[i], &clk_mux_sel[i], clk_index);
if (ret) {
dev_err(dev, "Failed to register clk: %s\n", output_clk_names[clk_index]);
return ret;
}
}
ret = wiz_phy_en_refclk_register(wiz);
if (ret) {
dev_err(dev, "Failed to add phy-en-refclk\n");
return ret;
}
wiz->clk_data.clks = wiz->output_clks;
wiz->clk_data.clk_num = WIZ_MAX_OUTPUT_CLOCKS;
ret = of_clk_add_provider(node, of_clk_src_onecell_get, &wiz->clk_data);
if (ret)
dev_err(dev, "Failed to add clock provider: %s\n", node->name);
return ret;
}
static int wiz_clock_init(struct wiz *wiz, struct device_node *node)
{
const struct wiz_clk_mux_sel *clk_mux_sel = wiz->clk_mux_sel;
struct device *dev = wiz->dev;
struct device_node *clk_node;
const char *node_name;
unsigned long rate;
struct clk *clk;
int ret;
int i;
clk = devm_clk_get(dev, "core_ref_clk");
if (IS_ERR(clk)) {
dev_err(dev, "core_ref_clk clock not found\n");
ret = PTR_ERR(clk);
return ret;
}
wiz->input_clks[WIZ_CORE_REFCLK] = clk;
rate = clk_get_rate(clk);
if (rate >= 100000000)
regmap_field_write(wiz->pma_cmn_refclk_int_mode, 0x1);
else
regmap_field_write(wiz->pma_cmn_refclk_int_mode, 0x3);
switch (wiz->type) {
case AM64_WIZ_10G:
case J7200_WIZ_10G:
switch (rate) {
case REF_CLK_100MHZ:
regmap_field_write(wiz->div_sel_field[CMN_REFCLK_DIG_DIV], 0x2);
break;
case REF_CLK_156_25MHZ:
regmap_field_write(wiz->div_sel_field[CMN_REFCLK_DIG_DIV], 0x3);
break;
default:
regmap_field_write(wiz->div_sel_field[CMN_REFCLK_DIG_DIV], 0);
break;
}
break;
default:
break;
}
if (wiz->data->pma_cmn_refclk1_int_mode) {
clk = devm_clk_get(dev, "core_ref1_clk");
if (IS_ERR(clk)) {
dev_err(dev, "core_ref1_clk clock not found\n");
ret = PTR_ERR(clk);
return ret;
}
wiz->input_clks[WIZ_CORE_REFCLK1] = clk;
rate = clk_get_rate(clk);
if (rate >= 100000000)
regmap_field_write(wiz->pma_cmn_refclk1_int_mode, 0x1);
else
regmap_field_write(wiz->pma_cmn_refclk1_int_mode, 0x3);
}
clk = devm_clk_get(dev, "ext_ref_clk");
if (IS_ERR(clk)) {
dev_err(dev, "ext_ref_clk clock not found\n");
ret = PTR_ERR(clk);
return ret;
}
wiz->input_clks[WIZ_EXT_REFCLK] = clk;
rate = clk_get_rate(clk);
if (rate >= 100000000)
regmap_field_write(wiz->pma_cmn_refclk_mode, 0x0);
else
regmap_field_write(wiz->pma_cmn_refclk_mode, 0x2);
switch (wiz->type) {
case AM64_WIZ_10G:
case J7200_WIZ_10G:
case J784S4_WIZ_10G:
case J721S2_WIZ_10G:
ret = wiz_clock_register(wiz);
if (ret)
dev_err(dev, "Failed to register wiz clocks\n");
return ret;
default:
break;
}
for (i = 0; i < WIZ_MUX_NUM_CLOCKS; i++) {
node_name = clk_mux_sel[i].node_name;
clk_node = of_get_child_by_name(node, node_name);
if (!clk_node) {
dev_err(dev, "Unable to get %s node\n", node_name);
ret = -EINVAL;
goto err;
}
ret = wiz_mux_of_clk_register(wiz, clk_node, wiz->mux_sel_field[i],
clk_mux_sel[i].table);
if (ret) {
dev_err(dev, "Failed to register %s clock\n",
node_name);
of_node_put(clk_node);
goto err;
}
of_node_put(clk_node);
}
for (i = 0; i < wiz->clk_div_sel_num; i++) {
node_name = clk_div_sel[i].node_name;
clk_node = of_get_child_by_name(node, node_name);
if (!clk_node) {
dev_err(dev, "Unable to get %s node\n", node_name);
ret = -EINVAL;
goto err;
}
ret = wiz_div_clk_register(wiz, clk_node, wiz->div_sel_field[i],
clk_div_sel[i].table);
if (ret) {
dev_err(dev, "Failed to register %s clock\n",
node_name);
of_node_put(clk_node);
goto err;
}
of_node_put(clk_node);
}
return 0;
err:
wiz_clock_cleanup(wiz, node);
return ret;
}
static int wiz_phy_reset_assert(struct reset_controller_dev *rcdev,
unsigned long id)
{
struct device *dev = rcdev->dev;
struct wiz *wiz = dev_get_drvdata(dev);
int ret = 0;
if (id == 0) {
ret = regmap_field_write(wiz->phy_reset_n, false);
return ret;
}
ret = regmap_field_write(wiz->p_enable[id - 1], P_ENABLE_DISABLE);
return ret;
}
static int wiz_phy_fullrt_div(struct wiz *wiz, int lane)
{
switch (wiz->type) {
case AM64_WIZ_10G:
if (wiz->lane_phy_type[lane] == PHY_TYPE_PCIE)
return regmap_field_write(wiz->p0_fullrt_div[lane], 0x1);
break;
case J721E_WIZ_16G:
case J721E_WIZ_10G:
case J7200_WIZ_10G:
case J721S2_WIZ_10G:
if (wiz->lane_phy_type[lane] == PHY_TYPE_SGMII)
return regmap_field_write(wiz->p0_fullrt_div[lane], 0x2);
break;
default:
return 0;
}
return 0;
}
static int wiz_phy_reset_deassert(struct reset_controller_dev *rcdev,
unsigned long id)
{
struct device *dev = rcdev->dev;
struct wiz *wiz = dev_get_drvdata(dev);
int ret;
if (id == 0) {
/* if typec-dir gpio was specified, set LN10 SWAP bit based on that */
if (wiz->gpio_typec_dir) {
if (wiz->typec_dir_delay)
msleep_interruptible(wiz->typec_dir_delay);
if (gpiod_get_value_cansleep(wiz->gpio_typec_dir))
regmap_field_write(wiz->typec_ln10_swap, 1);
else
regmap_field_write(wiz->typec_ln10_swap, 0);
} else {
/* if no typec-dir gpio is specified and PHY type is USB3
* with master lane number is '0' or '2', then set LN10 or
* LN23 SWAP bit to '1' respectively.
*/
u32 num_lanes = wiz->num_lanes;
int i;
for (i = 0; i < num_lanes; i++) {
if (wiz->lane_phy_type[i] == PHY_TYPE_USB3) {
switch (wiz->master_lane_num[i]) {
case LANE0:
regmap_field_write(wiz->typec_ln10_swap, 1);
break;
case LANE2:
regmap_field_write(wiz->typec_ln23_swap, 1);
break;
default:
break;
}
}
}
}
}
if (id == 0) {
ret = regmap_field_write(wiz->phy_reset_n, true);
return ret;
}
ret = wiz_phy_fullrt_div(wiz, id - 1);
if (ret)
return ret;
if (wiz->lane_phy_type[id - 1] == PHY_TYPE_DP)
ret = regmap_field_write(wiz->p_enable[id - 1], P_ENABLE);
else
ret = regmap_field_write(wiz->p_enable[id - 1], P_ENABLE_FORCE);
return ret;
}
static const struct reset_control_ops wiz_phy_reset_ops = {
.assert = wiz_phy_reset_assert,
.deassert = wiz_phy_reset_deassert,
};
static const struct regmap_config wiz_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
.fast_io = true,
};
static struct wiz_data j721e_16g_data = {
.type = J721E_WIZ_16G,
.pll0_refclk_mux_sel = &pll0_refclk_mux_sel,
.pll1_refclk_mux_sel = &pll1_refclk_mux_sel,
.refclk_dig_sel = &refclk_dig_sel_16g,
.pma_cmn_refclk1_dig_div = &pma_cmn_refclk1_dig_div,
.clk_mux_sel = clk_mux_sel_16g,
.clk_div_sel_num = WIZ_DIV_NUM_CLOCKS_16G,
};
static struct wiz_data j721e_10g_data = {
.type = J721E_WIZ_10G,
.pll0_refclk_mux_sel = &pll0_refclk_mux_sel,
.pll1_refclk_mux_sel = &pll1_refclk_mux_sel,
.refclk_dig_sel = &refclk_dig_sel_10g,
.clk_mux_sel = clk_mux_sel_10g,
.clk_div_sel_num = WIZ_DIV_NUM_CLOCKS_10G,
};
static struct wiz_data am64_10g_data = {
.type = AM64_WIZ_10G,
.pll0_refclk_mux_sel = &pll0_refclk_mux_sel,
.pll1_refclk_mux_sel = &pll1_refclk_mux_sel,
.refclk_dig_sel = &refclk_dig_sel_10g,
.clk_mux_sel = clk_mux_sel_10g,
.clk_div_sel_num = WIZ_DIV_NUM_CLOCKS_10G,
};
static struct wiz_data j7200_pg2_10g_data = {
.type = J7200_WIZ_10G,
.pll0_refclk_mux_sel = &sup_pll0_refclk_mux_sel,
.pll1_refclk_mux_sel = &sup_pll1_refclk_mux_sel,
.refclk_dig_sel = &sup_refclk_dig_sel_10g,
.pma_cmn_refclk1_int_mode = &sup_pma_cmn_refclk1_int_mode,
.clk_mux_sel = clk_mux_sel_10g_2_refclk,
.clk_div_sel_num = WIZ_DIV_NUM_CLOCKS_10G,
};
static struct wiz_data j784s4_10g_data = {
.type = J784S4_WIZ_10G,
.pll0_refclk_mux_sel = &pll0_refclk_mux_sel_2,
.pll1_refclk_mux_sel = &pll1_refclk_mux_sel_2,
.refclk_dig_sel = &refclk_dig_sel_16g,
.pma_cmn_refclk1_int_mode = &pma_cmn_refclk1_int_mode,
.clk_mux_sel = clk_mux_sel_10g_2_refclk,
.clk_div_sel_num = WIZ_DIV_NUM_CLOCKS_10G,
};
static struct wiz_data j721s2_10g_data = {
.type = J721S2_WIZ_10G,
.pll0_refclk_mux_sel = &pll0_refclk_mux_sel,
.pll1_refclk_mux_sel = &pll1_refclk_mux_sel,
.refclk_dig_sel = &refclk_dig_sel_10g,
.clk_mux_sel = clk_mux_sel_10g,
.clk_div_sel_num = WIZ_DIV_NUM_CLOCKS_10G,
};
static const struct of_device_id wiz_id_table[] = {
{
.compatible = "ti,j721e-wiz-16g", .data = &j721e_16g_data,
},
{
.compatible = "ti,j721e-wiz-10g", .data = &j721e_10g_data,
},
{
.compatible = "ti,am64-wiz-10g", .data = &am64_10g_data,
},
{
.compatible = "ti,j7200-wiz-10g", .data = &j7200_pg2_10g_data,
},
{
.compatible = "ti,j784s4-wiz-10g", .data = &j784s4_10g_data,
},
{
.compatible = "ti,j721s2-wiz-10g", .data = &j721s2_10g_data,
},
{}
};
MODULE_DEVICE_TABLE(of, wiz_id_table);
static int wiz_get_lane_phy_types(struct device *dev, struct wiz *wiz)
{
struct device_node *serdes, *subnode;
serdes = of_get_child_by_name(dev->of_node, "serdes");
if (!serdes) {
dev_err(dev, "%s: Getting \"serdes\"-node failed\n", __func__);
return -EINVAL;
}
for_each_child_of_node(serdes, subnode) {
u32 reg, num_lanes = 1, phy_type = PHY_NONE;
int ret, i;
if (!(of_node_name_eq(subnode, "phy") ||
of_node_name_eq(subnode, "link")))
continue;
ret = of_property_read_u32(subnode, "reg", ®);
if (ret) {
of_node_put(subnode);
dev_err(dev,
"%s: Reading \"reg\" from \"%s\" failed: %d\n",
__func__, subnode->name, ret);
return ret;
}
of_property_read_u32(subnode, "cdns,num-lanes", &num_lanes);
of_property_read_u32(subnode, "cdns,phy-type", &phy_type);
dev_dbg(dev, "%s: Lanes %u-%u have phy-type %u\n", __func__,
reg, reg + num_lanes - 1, phy_type);
for (i = reg; i < reg + num_lanes; i++) {
wiz->master_lane_num[i] = reg;
wiz->lane_phy_type[i] = phy_type;
}
}
return 0;
}
static int wiz_probe(struct platform_device *pdev)
{
struct reset_controller_dev *phy_reset_dev;
struct device *dev = &pdev->dev;
struct device_node *node = dev->of_node;
struct platform_device *serdes_pdev;
bool already_configured = false;
struct device_node *child_node;
struct regmap *regmap;
struct resource res;
void __iomem *base;
struct wiz *wiz;
int ret, val, i;
u32 num_lanes;
const struct wiz_data *data;
wiz = devm_kzalloc(dev, sizeof(*wiz), GFP_KERNEL);
if (!wiz)
return -ENOMEM;
data = of_device_get_match_data(dev);
if (!data) {
dev_err(dev, "NULL device data\n");
return -EINVAL;
}
wiz->data = data;
wiz->type = data->type;
child_node = of_get_child_by_name(node, "serdes");
if (!child_node) {
dev_err(dev, "Failed to get SERDES child DT node\n");
return -ENODEV;
}
ret = of_address_to_resource(child_node, 0, &res);
if (ret) {
dev_err(dev, "Failed to get memory resource\n");
goto err_addr_to_resource;
}
base = devm_ioremap(dev, res.start, resource_size(&res));
if (!base) {
ret = -ENOMEM;
goto err_addr_to_resource;
}
regmap = devm_regmap_init_mmio(dev, base, &wiz_regmap_config);
if (IS_ERR(regmap)) {
dev_err(dev, "Failed to initialize regmap\n");
ret = PTR_ERR(regmap);
goto err_addr_to_resource;
}
wiz->scm_regmap = syscon_regmap_lookup_by_phandle(node, "ti,scm");
if (IS_ERR(wiz->scm_regmap)) {
if (wiz->type == J7200_WIZ_10G) {
dev_err(dev, "Couldn't get ti,scm regmap\n");
ret = -ENODEV;
goto err_addr_to_resource;
}
wiz->scm_regmap = NULL;
}
ret = of_property_read_u32(node, "num-lanes", &num_lanes);
if (ret) {
dev_err(dev, "Failed to read num-lanes property\n");
goto err_addr_to_resource;
}
if (num_lanes > WIZ_MAX_LANES) {
dev_err(dev, "Cannot support %d lanes\n", num_lanes);
ret = -ENODEV;
goto err_addr_to_resource;
}
wiz->gpio_typec_dir = devm_gpiod_get_optional(dev, "typec-dir",
GPIOD_IN);
if (IS_ERR(wiz->gpio_typec_dir)) {
ret = PTR_ERR(wiz->gpio_typec_dir);
if (ret != -EPROBE_DEFER)
dev_err(dev, "Failed to request typec-dir gpio: %d\n",
ret);
goto err_addr_to_resource;
}
if (wiz->gpio_typec_dir) {
ret = of_property_read_u32(node, "typec-dir-debounce-ms",
&wiz->typec_dir_delay);
if (ret && ret != -EINVAL) {
dev_err(dev, "Invalid typec-dir-debounce property\n");
goto err_addr_to_resource;
}
/* use min. debounce from Type-C spec if not provided in DT */
if (ret == -EINVAL)
wiz->typec_dir_delay = WIZ_TYPEC_DIR_DEBOUNCE_MIN;
if (wiz->typec_dir_delay < WIZ_TYPEC_DIR_DEBOUNCE_MIN ||
wiz->typec_dir_delay > WIZ_TYPEC_DIR_DEBOUNCE_MAX) {
ret = -EINVAL;
dev_err(dev, "Invalid typec-dir-debounce property\n");
goto err_addr_to_resource;
}
}
ret = wiz_get_lane_phy_types(dev, wiz);
if (ret)
goto err_addr_to_resource;
wiz->dev = dev;
wiz->regmap = regmap;
wiz->num_lanes = num_lanes;
wiz->clk_mux_sel = data->clk_mux_sel;
wiz->clk_div_sel = clk_div_sel;
wiz->clk_div_sel_num = data->clk_div_sel_num;
platform_set_drvdata(pdev, wiz);
ret = wiz_regfield_init(wiz);
if (ret) {
dev_err(dev, "Failed to initialize regfields\n");
goto err_addr_to_resource;
}
/* Enable supplemental Control override if available */
if (wiz->scm_regmap)
regmap_field_write(wiz->sup_legacy_clk_override, 1);
phy_reset_dev = &wiz->wiz_phy_reset_dev;
phy_reset_dev->dev = dev;
phy_reset_dev->ops = &wiz_phy_reset_ops,
phy_reset_dev->owner = THIS_MODULE,
phy_reset_dev->of_node = node;
/* Reset for each of the lane and one for the entire SERDES */
phy_reset_dev->nr_resets = num_lanes + 1;
ret = devm_reset_controller_register(dev, phy_reset_dev);
if (ret < 0) {
dev_warn(dev, "Failed to register reset controller\n");
goto err_addr_to_resource;
}
pm_runtime_enable(dev);
ret = pm_runtime_get_sync(dev);
if (ret < 0) {
dev_err(dev, "pm_runtime_get_sync failed\n");
goto err_get_sync;
}
ret = wiz_clock_init(wiz, node);
if (ret < 0) {
dev_warn(dev, "Failed to initialize clocks\n");
goto err_get_sync;
}
for (i = 0; i < wiz->num_lanes; i++) {
regmap_field_read(wiz->p_enable[i], &val);
if (val & (P_ENABLE | P_ENABLE_FORCE)) {
already_configured = true;
break;
}
}
if (!already_configured) {
ret = wiz_init(wiz);
if (ret) {
dev_err(dev, "WIZ initialization failed\n");
goto err_wiz_init;
}
}
serdes_pdev = of_platform_device_create(child_node, NULL, dev);
if (!serdes_pdev) {
dev_WARN(dev, "Unable to create SERDES platform device\n");
ret = -ENOMEM;
goto err_wiz_init;
}
wiz->serdes_pdev = serdes_pdev;
of_node_put(child_node);
return 0;
err_wiz_init:
wiz_clock_cleanup(wiz, node);
err_get_sync:
pm_runtime_put(dev);
pm_runtime_disable(dev);
err_addr_to_resource:
of_node_put(child_node);
return ret;
}
static void wiz_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *node = dev->of_node;
struct platform_device *serdes_pdev;
struct wiz *wiz;
wiz = dev_get_drvdata(dev);
serdes_pdev = wiz->serdes_pdev;
of_platform_device_destroy(&serdes_pdev->dev, NULL);
wiz_clock_cleanup(wiz, node);
pm_runtime_put(dev);
pm_runtime_disable(dev);
}
static struct platform_driver wiz_driver = {
.probe = wiz_probe,
.remove_new = wiz_remove,
.driver = {
.name = "wiz",
.of_match_table = wiz_id_table,
},
};
module_platform_driver(wiz_driver);
MODULE_AUTHOR("Texas Instruments Inc.");
MODULE_DESCRIPTION("TI J721E WIZ driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/phy/ti/phy-j721e-wiz.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* omap-control-phy.c - The PHY part of control module.
*
* Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com
* Author: Kishon Vijay Abraham I <[email protected]>
*/
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/phy/omap_control_phy.h>
/**
* omap_control_pcie_pcs - set the PCS delay count
* @dev: the control module device
* @delay: 8 bit delay value
*/
void omap_control_pcie_pcs(struct device *dev, u8 delay)
{
u32 val;
struct omap_control_phy *control_phy;
if (IS_ERR_OR_NULL(dev)) {
pr_err("%s: invalid device\n", __func__);
return;
}
control_phy = dev_get_drvdata(dev);
if (!control_phy) {
dev_err(dev, "%s: invalid control phy device\n", __func__);
return;
}
if (control_phy->type != OMAP_CTRL_TYPE_PCIE) {
dev_err(dev, "%s: unsupported operation\n", __func__);
return;
}
val = readl(control_phy->pcie_pcs);
val &= ~(OMAP_CTRL_PCIE_PCS_MASK <<
OMAP_CTRL_PCIE_PCS_DELAY_COUNT_SHIFT);
val |= (delay << OMAP_CTRL_PCIE_PCS_DELAY_COUNT_SHIFT);
writel(val, control_phy->pcie_pcs);
}
EXPORT_SYMBOL_GPL(omap_control_pcie_pcs);
/**
* omap_control_phy_power - power on/off the phy using control module reg
* @dev: the control module device
* @on: 0 or 1, based on powering on or off the PHY
*/
void omap_control_phy_power(struct device *dev, int on)
{
u32 val;
unsigned long rate;
struct omap_control_phy *control_phy;
if (IS_ERR_OR_NULL(dev)) {
pr_err("%s: invalid device\n", __func__);
return;
}
control_phy = dev_get_drvdata(dev);
if (!control_phy) {
dev_err(dev, "%s: invalid control phy device\n", __func__);
return;
}
if (control_phy->type == OMAP_CTRL_TYPE_OTGHS)
return;
val = readl(control_phy->power);
switch (control_phy->type) {
case OMAP_CTRL_TYPE_USB2:
if (on)
val &= ~OMAP_CTRL_DEV_PHY_PD;
else
val |= OMAP_CTRL_DEV_PHY_PD;
break;
case OMAP_CTRL_TYPE_PCIE:
case OMAP_CTRL_TYPE_PIPE3:
rate = clk_get_rate(control_phy->sys_clk);
rate = rate/1000000;
if (on) {
val &= ~(OMAP_CTRL_PIPE3_PHY_PWRCTL_CLK_CMD_MASK |
OMAP_CTRL_PIPE3_PHY_PWRCTL_CLK_FREQ_MASK);
val |= OMAP_CTRL_PIPE3_PHY_TX_RX_POWERON <<
OMAP_CTRL_PIPE3_PHY_PWRCTL_CLK_CMD_SHIFT;
val |= rate <<
OMAP_CTRL_PIPE3_PHY_PWRCTL_CLK_FREQ_SHIFT;
} else {
val &= ~OMAP_CTRL_PIPE3_PHY_PWRCTL_CLK_CMD_MASK;
val |= OMAP_CTRL_PIPE3_PHY_TX_RX_POWEROFF <<
OMAP_CTRL_PIPE3_PHY_PWRCTL_CLK_CMD_SHIFT;
}
break;
case OMAP_CTRL_TYPE_DRA7USB2:
if (on)
val &= ~OMAP_CTRL_USB2_PHY_PD;
else
val |= OMAP_CTRL_USB2_PHY_PD;
break;
case OMAP_CTRL_TYPE_AM437USB2:
if (on) {
val &= ~(AM437X_CTRL_USB2_PHY_PD |
AM437X_CTRL_USB2_OTG_PD);
val |= (AM437X_CTRL_USB2_OTGVDET_EN |
AM437X_CTRL_USB2_OTGSESSEND_EN);
} else {
val &= ~(AM437X_CTRL_USB2_OTGVDET_EN |
AM437X_CTRL_USB2_OTGSESSEND_EN);
val |= (AM437X_CTRL_USB2_PHY_PD |
AM437X_CTRL_USB2_OTG_PD);
}
break;
default:
dev_err(dev, "%s: type %d not recognized\n",
__func__, control_phy->type);
break;
}
writel(val, control_phy->power);
}
EXPORT_SYMBOL_GPL(omap_control_phy_power);
/**
* omap_control_usb_host_mode - set AVALID, VBUSVALID and ID pin in grounded
* @ctrl_phy: struct omap_control_phy *
*
* Writes to the mailbox register to notify the usb core that a usb
* device has been connected.
*/
static void omap_control_usb_host_mode(struct omap_control_phy *ctrl_phy)
{
u32 val;
val = readl(ctrl_phy->otghs_control);
val &= ~(OMAP_CTRL_DEV_IDDIG | OMAP_CTRL_DEV_SESSEND);
val |= OMAP_CTRL_DEV_AVALID | OMAP_CTRL_DEV_VBUSVALID;
writel(val, ctrl_phy->otghs_control);
}
/**
* omap_control_usb_device_mode - set AVALID, VBUSVALID and ID pin in high
* impedance
* @ctrl_phy: struct omap_control_phy *
*
* Writes to the mailbox register to notify the usb core that it has been
* connected to a usb host.
*/
static void omap_control_usb_device_mode(struct omap_control_phy *ctrl_phy)
{
u32 val;
val = readl(ctrl_phy->otghs_control);
val &= ~OMAP_CTRL_DEV_SESSEND;
val |= OMAP_CTRL_DEV_IDDIG | OMAP_CTRL_DEV_AVALID |
OMAP_CTRL_DEV_VBUSVALID;
writel(val, ctrl_phy->otghs_control);
}
/**
* omap_control_usb_set_sessionend - Enable SESSIONEND and IDIG to high
* impedance
* @ctrl_phy: struct omap_control_phy *
*
* Writes to the mailbox register to notify the usb core it's now in
* disconnected state.
*/
static void omap_control_usb_set_sessionend(struct omap_control_phy *ctrl_phy)
{
u32 val;
val = readl(ctrl_phy->otghs_control);
val &= ~(OMAP_CTRL_DEV_AVALID | OMAP_CTRL_DEV_VBUSVALID);
val |= OMAP_CTRL_DEV_IDDIG | OMAP_CTRL_DEV_SESSEND;
writel(val, ctrl_phy->otghs_control);
}
/**
* omap_control_usb_set_mode - Calls to functions to set USB in one of host mode
* or device mode or to denote disconnected state
* @dev: the control module device
* @mode: The mode to which usb should be configured
*
* This is an API to write to the mailbox register to notify the usb core that
* a usb device has been connected.
*/
void omap_control_usb_set_mode(struct device *dev,
enum omap_control_usb_mode mode)
{
struct omap_control_phy *ctrl_phy;
if (IS_ERR_OR_NULL(dev))
return;
ctrl_phy = dev_get_drvdata(dev);
if (!ctrl_phy) {
dev_err(dev, "Invalid control phy device\n");
return;
}
if (ctrl_phy->type != OMAP_CTRL_TYPE_OTGHS)
return;
switch (mode) {
case USB_MODE_HOST:
omap_control_usb_host_mode(ctrl_phy);
break;
case USB_MODE_DEVICE:
omap_control_usb_device_mode(ctrl_phy);
break;
case USB_MODE_DISCONNECT:
omap_control_usb_set_sessionend(ctrl_phy);
break;
default:
dev_vdbg(dev, "invalid omap control usb mode\n");
}
}
EXPORT_SYMBOL_GPL(omap_control_usb_set_mode);
static const enum omap_control_phy_type otghs_data = OMAP_CTRL_TYPE_OTGHS;
static const enum omap_control_phy_type usb2_data = OMAP_CTRL_TYPE_USB2;
static const enum omap_control_phy_type pipe3_data = OMAP_CTRL_TYPE_PIPE3;
static const enum omap_control_phy_type pcie_data = OMAP_CTRL_TYPE_PCIE;
static const enum omap_control_phy_type dra7usb2_data = OMAP_CTRL_TYPE_DRA7USB2;
static const enum omap_control_phy_type am437usb2_data = OMAP_CTRL_TYPE_AM437USB2;
static const struct of_device_id omap_control_phy_id_table[] = {
{
.compatible = "ti,control-phy-otghs",
.data = &otghs_data,
},
{
.compatible = "ti,control-phy-usb2",
.data = &usb2_data,
},
{
.compatible = "ti,control-phy-pipe3",
.data = &pipe3_data,
},
{
.compatible = "ti,control-phy-pcie",
.data = &pcie_data,
},
{
.compatible = "ti,control-phy-usb2-dra7",
.data = &dra7usb2_data,
},
{
.compatible = "ti,control-phy-usb2-am437",
.data = &am437usb2_data,
},
{},
};
MODULE_DEVICE_TABLE(of, omap_control_phy_id_table);
static int omap_control_phy_probe(struct platform_device *pdev)
{
const struct of_device_id *of_id;
struct omap_control_phy *control_phy;
of_id = of_match_device(omap_control_phy_id_table, &pdev->dev);
if (!of_id)
return -EINVAL;
control_phy = devm_kzalloc(&pdev->dev, sizeof(*control_phy),
GFP_KERNEL);
if (!control_phy)
return -ENOMEM;
control_phy->dev = &pdev->dev;
control_phy->type = *(enum omap_control_phy_type *)of_id->data;
if (control_phy->type == OMAP_CTRL_TYPE_OTGHS) {
control_phy->otghs_control =
devm_platform_ioremap_resource_byname(pdev, "otghs_control");
if (IS_ERR(control_phy->otghs_control))
return PTR_ERR(control_phy->otghs_control);
} else {
control_phy->power =
devm_platform_ioremap_resource_byname(pdev, "power");
if (IS_ERR(control_phy->power)) {
dev_err(&pdev->dev, "Couldn't get power register\n");
return PTR_ERR(control_phy->power);
}
}
if (control_phy->type == OMAP_CTRL_TYPE_PIPE3 ||
control_phy->type == OMAP_CTRL_TYPE_PCIE) {
control_phy->sys_clk = devm_clk_get(control_phy->dev,
"sys_clkin");
if (IS_ERR(control_phy->sys_clk)) {
pr_err("%s: unable to get sys_clkin\n", __func__);
return -EINVAL;
}
}
if (control_phy->type == OMAP_CTRL_TYPE_PCIE) {
control_phy->pcie_pcs =
devm_platform_ioremap_resource_byname(pdev, "pcie_pcs");
if (IS_ERR(control_phy->pcie_pcs))
return PTR_ERR(control_phy->pcie_pcs);
}
dev_set_drvdata(control_phy->dev, control_phy);
return 0;
}
static struct platform_driver omap_control_phy_driver = {
.probe = omap_control_phy_probe,
.driver = {
.name = "omap-control-phy",
.of_match_table = omap_control_phy_id_table,
},
};
static int __init omap_control_phy_init(void)
{
return platform_driver_register(&omap_control_phy_driver);
}
subsys_initcall(omap_control_phy_init);
static void __exit omap_control_phy_exit(void)
{
platform_driver_unregister(&omap_control_phy_driver);
}
module_exit(omap_control_phy_exit);
MODULE_ALIAS("platform:omap_control_phy");
MODULE_AUTHOR("Texas Instruments Inc.");
MODULE_DESCRIPTION("OMAP Control Module PHY Driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/phy/ti/phy-omap-control.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* phy-da8xx-usb - TI DaVinci DA8xx USB PHY driver
*
* Copyright (C) 2016 David Lechner <[email protected]>
*/
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/mfd/da8xx-cfgchip.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/phy/phy.h>
#include <linux/platform_data/phy-da8xx-usb.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#define PHY_INIT_BITS (CFGCHIP2_SESENDEN | CFGCHIP2_VBDTCTEN)
struct da8xx_usb_phy {
struct phy_provider *phy_provider;
struct phy *usb11_phy;
struct phy *usb20_phy;
struct clk *usb11_clk;
struct clk *usb20_clk;
struct regmap *regmap;
};
static int da8xx_usb11_phy_power_on(struct phy *phy)
{
struct da8xx_usb_phy *d_phy = phy_get_drvdata(phy);
int ret;
ret = clk_prepare_enable(d_phy->usb11_clk);
if (ret)
return ret;
regmap_write_bits(d_phy->regmap, CFGCHIP(2), CFGCHIP2_USB1SUSPENDM,
CFGCHIP2_USB1SUSPENDM);
return 0;
}
static int da8xx_usb11_phy_power_off(struct phy *phy)
{
struct da8xx_usb_phy *d_phy = phy_get_drvdata(phy);
regmap_write_bits(d_phy->regmap, CFGCHIP(2), CFGCHIP2_USB1SUSPENDM, 0);
clk_disable_unprepare(d_phy->usb11_clk);
return 0;
}
static const struct phy_ops da8xx_usb11_phy_ops = {
.power_on = da8xx_usb11_phy_power_on,
.power_off = da8xx_usb11_phy_power_off,
.owner = THIS_MODULE,
};
static int da8xx_usb20_phy_power_on(struct phy *phy)
{
struct da8xx_usb_phy *d_phy = phy_get_drvdata(phy);
int ret;
ret = clk_prepare_enable(d_phy->usb20_clk);
if (ret)
return ret;
regmap_write_bits(d_phy->regmap, CFGCHIP(2), CFGCHIP2_OTGPWRDN, 0);
return 0;
}
static int da8xx_usb20_phy_power_off(struct phy *phy)
{
struct da8xx_usb_phy *d_phy = phy_get_drvdata(phy);
regmap_write_bits(d_phy->regmap, CFGCHIP(2), CFGCHIP2_OTGPWRDN,
CFGCHIP2_OTGPWRDN);
clk_disable_unprepare(d_phy->usb20_clk);
return 0;
}
static int da8xx_usb20_phy_set_mode(struct phy *phy,
enum phy_mode mode, int submode)
{
struct da8xx_usb_phy *d_phy = phy_get_drvdata(phy);
u32 val;
switch (mode) {
case PHY_MODE_USB_HOST: /* Force VBUS valid, ID = 0 */
val = CFGCHIP2_OTGMODE_FORCE_HOST;
break;
case PHY_MODE_USB_DEVICE: /* Force VBUS valid, ID = 1 */
val = CFGCHIP2_OTGMODE_FORCE_DEVICE;
break;
case PHY_MODE_USB_OTG: /* Don't override the VBUS/ID comparators */
val = CFGCHIP2_OTGMODE_NO_OVERRIDE;
break;
default:
return -EINVAL;
}
regmap_write_bits(d_phy->regmap, CFGCHIP(2), CFGCHIP2_OTGMODE_MASK,
val);
return 0;
}
static const struct phy_ops da8xx_usb20_phy_ops = {
.power_on = da8xx_usb20_phy_power_on,
.power_off = da8xx_usb20_phy_power_off,
.set_mode = da8xx_usb20_phy_set_mode,
.owner = THIS_MODULE,
};
static struct phy *da8xx_usb_phy_of_xlate(struct device *dev,
struct of_phandle_args *args)
{
struct da8xx_usb_phy *d_phy = dev_get_drvdata(dev);
if (!d_phy)
return ERR_PTR(-ENODEV);
switch (args->args[0]) {
case 0:
return d_phy->usb20_phy;
case 1:
return d_phy->usb11_phy;
default:
return ERR_PTR(-EINVAL);
}
}
static int da8xx_usb_phy_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct da8xx_usb_phy_platform_data *pdata = dev->platform_data;
struct device_node *node = dev->of_node;
struct da8xx_usb_phy *d_phy;
d_phy = devm_kzalloc(dev, sizeof(*d_phy), GFP_KERNEL);
if (!d_phy)
return -ENOMEM;
if (pdata)
d_phy->regmap = pdata->cfgchip;
else
d_phy->regmap = syscon_regmap_lookup_by_compatible(
"ti,da830-cfgchip");
if (IS_ERR(d_phy->regmap)) {
dev_err(dev, "Failed to get syscon\n");
return PTR_ERR(d_phy->regmap);
}
d_phy->usb11_clk = devm_clk_get(dev, "usb1_clk48");
if (IS_ERR(d_phy->usb11_clk)) {
dev_err(dev, "Failed to get usb1_clk48\n");
return PTR_ERR(d_phy->usb11_clk);
}
d_phy->usb20_clk = devm_clk_get(dev, "usb0_clk48");
if (IS_ERR(d_phy->usb20_clk)) {
dev_err(dev, "Failed to get usb0_clk48\n");
return PTR_ERR(d_phy->usb20_clk);
}
d_phy->usb11_phy = devm_phy_create(dev, node, &da8xx_usb11_phy_ops);
if (IS_ERR(d_phy->usb11_phy)) {
dev_err(dev, "Failed to create usb11 phy\n");
return PTR_ERR(d_phy->usb11_phy);
}
d_phy->usb20_phy = devm_phy_create(dev, node, &da8xx_usb20_phy_ops);
if (IS_ERR(d_phy->usb20_phy)) {
dev_err(dev, "Failed to create usb20 phy\n");
return PTR_ERR(d_phy->usb20_phy);
}
platform_set_drvdata(pdev, d_phy);
phy_set_drvdata(d_phy->usb11_phy, d_phy);
phy_set_drvdata(d_phy->usb20_phy, d_phy);
if (node) {
d_phy->phy_provider = devm_of_phy_provider_register(dev,
da8xx_usb_phy_of_xlate);
if (IS_ERR(d_phy->phy_provider)) {
dev_err(dev, "Failed to create phy provider\n");
return PTR_ERR(d_phy->phy_provider);
}
} else {
int ret;
ret = phy_create_lookup(d_phy->usb11_phy, "usb-phy",
"ohci-da8xx");
if (ret)
dev_warn(dev, "Failed to create usb11 phy lookup\n");
ret = phy_create_lookup(d_phy->usb20_phy, "usb-phy",
"musb-da8xx");
if (ret)
dev_warn(dev, "Failed to create usb20 phy lookup\n");
}
regmap_write_bits(d_phy->regmap, CFGCHIP(2),
PHY_INIT_BITS, PHY_INIT_BITS);
return 0;
}
static void da8xx_usb_phy_remove(struct platform_device *pdev)
{
struct da8xx_usb_phy *d_phy = platform_get_drvdata(pdev);
if (!pdev->dev.of_node) {
phy_remove_lookup(d_phy->usb20_phy, "usb-phy", "musb-da8xx");
phy_remove_lookup(d_phy->usb11_phy, "usb-phy", "ohci-da8xx");
}
}
static const struct of_device_id da8xx_usb_phy_ids[] = {
{ .compatible = "ti,da830-usb-phy" },
{ }
};
MODULE_DEVICE_TABLE(of, da8xx_usb_phy_ids);
static struct platform_driver da8xx_usb_phy_driver = {
.probe = da8xx_usb_phy_probe,
.remove_new = da8xx_usb_phy_remove,
.driver = {
.name = "da8xx-usb-phy",
.of_match_table = da8xx_usb_phy_ids,
},
};
module_platform_driver(da8xx_usb_phy_driver);
MODULE_ALIAS("platform:da8xx-usb-phy");
MODULE_AUTHOR("David Lechner <[email protected]>");
MODULE_DESCRIPTION("TI DA8xx USB PHY driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/phy/ti/phy-da8xx-usb.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/io.h>
#include <linux/usb/phy_companion.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/pm_runtime.h>
#include <linux/delay.h>
#include <linux/phy/phy.h>
#include <linux/of_platform.h>
#include <linux/mfd/syscon.h>
/*
* TRM has two sets of USB_CTRL registers.. The correct register bits
* are in TRM section 24.9.8.2 USB_CTRL Register. The TRM documents the
* phy as being SR70LX Synopsys USB 2.0 OTG nanoPHY. It also seems at
* least dm816x rev c ignores writes to USB_CTRL register, but the TI
* kernel is writing to those so it's possible that later revisions
* have worknig USB_CTRL register.
*
* Also note that At least USB_CTRL register seems to be dm816x specific
* according to the TRM. It's possible that USBPHY_CTRL is more generic,
* but that would have to be checked against the SR70LX documentation
* which does not seem to be publicly available.
*
* Finally, the phy on dm814x and am335x is different from dm816x.
*/
#define DM816X_USB_CTRL_PHYCLKSRC BIT(8) /* 1 = PLL ref clock */
#define DM816X_USB_CTRL_PHYSLEEP1 BIT(1) /* Enable the first phy */
#define DM816X_USB_CTRL_PHYSLEEP0 BIT(0) /* Enable the second phy */
#define DM816X_USBPHY_CTRL_TXRISETUNE 1
#define DM816X_USBPHY_CTRL_TXVREFTUNE 0xc
#define DM816X_USBPHY_CTRL_TXPREEMTUNE 0x2
struct dm816x_usb_phy {
struct regmap *syscon;
struct device *dev;
unsigned int instance;
struct clk *refclk;
struct usb_phy phy;
unsigned int usb_ctrl; /* Shared between phy0 and phy1 */
unsigned int usbphy_ctrl;
};
static int dm816x_usb_phy_set_host(struct usb_otg *otg, struct usb_bus *host)
{
otg->host = host;
if (!host)
otg->state = OTG_STATE_UNDEFINED;
return 0;
}
static int dm816x_usb_phy_set_peripheral(struct usb_otg *otg,
struct usb_gadget *gadget)
{
otg->gadget = gadget;
if (!gadget)
otg->state = OTG_STATE_UNDEFINED;
return 0;
}
static int dm816x_usb_phy_init(struct phy *x)
{
struct dm816x_usb_phy *phy = phy_get_drvdata(x);
unsigned int val;
if (clk_get_rate(phy->refclk) != 24000000)
dev_warn(phy->dev, "nonstandard phy refclk\n");
/* Set PLL ref clock and put phys to sleep */
regmap_update_bits(phy->syscon, phy->usb_ctrl,
DM816X_USB_CTRL_PHYCLKSRC |
DM816X_USB_CTRL_PHYSLEEP1 |
DM816X_USB_CTRL_PHYSLEEP0,
0);
regmap_read(phy->syscon, phy->usb_ctrl, &val);
if ((val & 3) != 0)
dev_info(phy->dev,
"Working dm816x USB_CTRL! (0x%08x)\n",
val);
/*
* TI kernel sets these values for "symmetrical eye diagram and
* better signal quality" so let's assume somebody checked the
* values with a scope and set them here too.
*/
regmap_read(phy->syscon, phy->usbphy_ctrl, &val);
val |= DM816X_USBPHY_CTRL_TXRISETUNE |
DM816X_USBPHY_CTRL_TXVREFTUNE |
DM816X_USBPHY_CTRL_TXPREEMTUNE;
regmap_write(phy->syscon, phy->usbphy_ctrl, val);
return 0;
}
static const struct phy_ops ops = {
.init = dm816x_usb_phy_init,
.owner = THIS_MODULE,
};
static int __maybe_unused dm816x_usb_phy_runtime_suspend(struct device *dev)
{
struct dm816x_usb_phy *phy = dev_get_drvdata(dev);
unsigned int mask, val;
int error = 0;
mask = BIT(phy->instance);
val = ~BIT(phy->instance);
error = regmap_update_bits(phy->syscon, phy->usb_ctrl,
mask, val);
if (error)
dev_err(phy->dev, "phy%i failed to power off\n",
phy->instance);
clk_disable(phy->refclk);
return 0;
}
static int __maybe_unused dm816x_usb_phy_runtime_resume(struct device *dev)
{
struct dm816x_usb_phy *phy = dev_get_drvdata(dev);
unsigned int mask, val;
int error;
error = clk_enable(phy->refclk);
if (error)
return error;
/*
* Note that at least dm816x rev c does not seem to do
* anything with the USB_CTRL register. But let's follow
* what the TI tree is doing in case later revisions use
* USB_CTRL.
*/
mask = BIT(phy->instance);
val = BIT(phy->instance);
error = regmap_update_bits(phy->syscon, phy->usb_ctrl,
mask, val);
if (error) {
dev_err(phy->dev, "phy%i failed to power on\n",
phy->instance);
clk_disable(phy->refclk);
return error;
}
return 0;
}
static UNIVERSAL_DEV_PM_OPS(dm816x_usb_phy_pm_ops,
dm816x_usb_phy_runtime_suspend,
dm816x_usb_phy_runtime_resume,
NULL);
#ifdef CONFIG_OF
static const struct of_device_id dm816x_usb_phy_id_table[] = {
{
.compatible = "ti,dm8168-usb-phy",
},
{},
};
MODULE_DEVICE_TABLE(of, dm816x_usb_phy_id_table);
#endif
static int dm816x_usb_phy_probe(struct platform_device *pdev)
{
struct dm816x_usb_phy *phy;
struct resource *res;
struct phy *generic_phy;
struct phy_provider *phy_provider;
struct usb_otg *otg;
const struct of_device_id *of_id;
int error;
of_id = of_match_device(of_match_ptr(dm816x_usb_phy_id_table),
&pdev->dev);
if (!of_id)
return -EINVAL;
phy = devm_kzalloc(&pdev->dev, sizeof(*phy), GFP_KERNEL);
if (!phy)
return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -ENOENT;
phy->syscon = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
"syscon");
if (IS_ERR(phy->syscon))
return PTR_ERR(phy->syscon);
/*
* According to sprs614e.pdf, the first usb_ctrl is shared and
* the second instance for usb_ctrl is reserved.. Also the
* register bits are different from earlier TRMs.
*/
phy->usb_ctrl = 0x20;
phy->usbphy_ctrl = (res->start & 0xff) + 4;
if (phy->usbphy_ctrl == 0x2c)
phy->instance = 1;
otg = devm_kzalloc(&pdev->dev, sizeof(*otg), GFP_KERNEL);
if (!otg)
return -ENOMEM;
phy->dev = &pdev->dev;
phy->phy.dev = phy->dev;
phy->phy.label = "dm8168_usb_phy";
phy->phy.otg = otg;
phy->phy.type = USB_PHY_TYPE_USB2;
otg->set_host = dm816x_usb_phy_set_host;
otg->set_peripheral = dm816x_usb_phy_set_peripheral;
otg->usb_phy = &phy->phy;
platform_set_drvdata(pdev, phy);
phy->refclk = devm_clk_get(phy->dev, "refclk");
if (IS_ERR(phy->refclk))
return PTR_ERR(phy->refclk);
error = clk_prepare(phy->refclk);
if (error)
return error;
pm_runtime_enable(phy->dev);
generic_phy = devm_phy_create(phy->dev, NULL, &ops);
if (IS_ERR(generic_phy)) {
error = PTR_ERR(generic_phy);
goto clk_unprepare;
}
phy_set_drvdata(generic_phy, phy);
phy_provider = devm_of_phy_provider_register(phy->dev,
of_phy_simple_xlate);
if (IS_ERR(phy_provider)) {
error = PTR_ERR(phy_provider);
goto clk_unprepare;
}
usb_add_phy_dev(&phy->phy);
return 0;
clk_unprepare:
pm_runtime_disable(phy->dev);
clk_unprepare(phy->refclk);
return error;
}
static void dm816x_usb_phy_remove(struct platform_device *pdev)
{
struct dm816x_usb_phy *phy = platform_get_drvdata(pdev);
usb_remove_phy(&phy->phy);
pm_runtime_disable(phy->dev);
clk_unprepare(phy->refclk);
}
static struct platform_driver dm816x_usb_phy_driver = {
.probe = dm816x_usb_phy_probe,
.remove_new = dm816x_usb_phy_remove,
.driver = {
.name = "dm816x-usb-phy",
.pm = &dm816x_usb_phy_pm_ops,
.of_match_table = of_match_ptr(dm816x_usb_phy_id_table),
},
};
module_platform_driver(dm816x_usb_phy_driver);
MODULE_ALIAS("platform:dm816x_usb");
MODULE_AUTHOR("Tony Lindgren <[email protected]>");
MODULE_DESCRIPTION("dm816x usb phy driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/phy/ti/phy-dm816x-usb.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* twl4030_usb - TWL4030 USB transceiver, talking to OMAP OTG controller
*
* Copyright (C) 2004-2007 Texas Instruments
* Copyright (C) 2008 Nokia Corporation
* Contact: Felipe Balbi <[email protected]>
*
* Current status:
* - HS USB ULPI mode works.
* - 3-pin mode support may be added in future.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/workqueue.h>
#include <linux/io.h>
#include <linux/delay.h>
#include <linux/usb/otg.h>
#include <linux/phy/phy.h>
#include <linux/pm_runtime.h>
#include <linux/usb/musb.h>
#include <linux/usb/ulpi.h>
#include <linux/mfd/twl.h>
#include <linux/regulator/consumer.h>
#include <linux/err.h>
#include <linux/slab.h>
/* Register defines */
#define MCPC_CTRL 0x30
#define MCPC_CTRL_RTSOL (1 << 7)
#define MCPC_CTRL_EXTSWR (1 << 6)
#define MCPC_CTRL_EXTSWC (1 << 5)
#define MCPC_CTRL_VOICESW (1 << 4)
#define MCPC_CTRL_OUT64K (1 << 3)
#define MCPC_CTRL_RTSCTSSW (1 << 2)
#define MCPC_CTRL_HS_UART (1 << 0)
#define MCPC_IO_CTRL 0x33
#define MCPC_IO_CTRL_MICBIASEN (1 << 5)
#define MCPC_IO_CTRL_CTS_NPU (1 << 4)
#define MCPC_IO_CTRL_RXD_PU (1 << 3)
#define MCPC_IO_CTRL_TXDTYP (1 << 2)
#define MCPC_IO_CTRL_CTSTYP (1 << 1)
#define MCPC_IO_CTRL_RTSTYP (1 << 0)
#define MCPC_CTRL2 0x36
#define MCPC_CTRL2_MCPC_CK_EN (1 << 0)
#define OTHER_FUNC_CTRL 0x80
#define OTHER_FUNC_CTRL_BDIS_ACON_EN (1 << 4)
#define OTHER_FUNC_CTRL_FIVEWIRE_MODE (1 << 2)
#define OTHER_IFC_CTRL 0x83
#define OTHER_IFC_CTRL_OE_INT_EN (1 << 6)
#define OTHER_IFC_CTRL_CEA2011_MODE (1 << 5)
#define OTHER_IFC_CTRL_FSLSSERIALMODE_4PIN (1 << 4)
#define OTHER_IFC_CTRL_HIZ_ULPI_60MHZ_OUT (1 << 3)
#define OTHER_IFC_CTRL_HIZ_ULPI (1 << 2)
#define OTHER_IFC_CTRL_ALT_INT_REROUTE (1 << 0)
#define OTHER_INT_EN_RISE 0x86
#define OTHER_INT_EN_FALL 0x89
#define OTHER_INT_STS 0x8C
#define OTHER_INT_LATCH 0x8D
#define OTHER_INT_VB_SESS_VLD (1 << 7)
#define OTHER_INT_DM_HI (1 << 6) /* not valid for "latch" reg */
#define OTHER_INT_DP_HI (1 << 5) /* not valid for "latch" reg */
#define OTHER_INT_BDIS_ACON (1 << 3) /* not valid for "fall" regs */
#define OTHER_INT_MANU (1 << 1)
#define OTHER_INT_ABNORMAL_STRESS (1 << 0)
#define ID_STATUS 0x96
#define ID_RES_FLOAT (1 << 4)
#define ID_RES_440K (1 << 3)
#define ID_RES_200K (1 << 2)
#define ID_RES_102K (1 << 1)
#define ID_RES_GND (1 << 0)
#define POWER_CTRL 0xAC
#define POWER_CTRL_OTG_ENAB (1 << 5)
#define OTHER_IFC_CTRL2 0xAF
#define OTHER_IFC_CTRL2_ULPI_STP_LOW (1 << 4)
#define OTHER_IFC_CTRL2_ULPI_TXEN_POL (1 << 3)
#define OTHER_IFC_CTRL2_ULPI_4PIN_2430 (1 << 2)
#define OTHER_IFC_CTRL2_USB_INT_OUTSEL_MASK (3 << 0) /* bits 0 and 1 */
#define OTHER_IFC_CTRL2_USB_INT_OUTSEL_INT1N (0 << 0)
#define OTHER_IFC_CTRL2_USB_INT_OUTSEL_INT2N (1 << 0)
#define REG_CTRL_EN 0xB2
#define REG_CTRL_ERROR 0xB5
#define ULPI_I2C_CONFLICT_INTEN (1 << 0)
#define OTHER_FUNC_CTRL2 0xB8
#define OTHER_FUNC_CTRL2_VBAT_TIMER_EN (1 << 0)
/* following registers do not have separate _clr and _set registers */
#define VBUS_DEBOUNCE 0xC0
#define ID_DEBOUNCE 0xC1
#define VBAT_TIMER 0xD3
#define PHY_PWR_CTRL 0xFD
#define PHY_PWR_PHYPWD (1 << 0)
#define PHY_CLK_CTRL 0xFE
#define PHY_CLK_CTRL_CLOCKGATING_EN (1 << 2)
#define PHY_CLK_CTRL_CLK32K_EN (1 << 1)
#define REQ_PHY_DPLL_CLK (1 << 0)
#define PHY_CLK_CTRL_STS 0xFF
#define PHY_DPLL_CLK (1 << 0)
/* In module TWL_MODULE_PM_MASTER */
#define STS_HW_CONDITIONS 0x0F
/* In module TWL_MODULE_PM_RECEIVER */
#define VUSB_DEDICATED1 0x7D
#define VUSB_DEDICATED2 0x7E
#define VUSB1V5_DEV_GRP 0x71
#define VUSB1V5_TYPE 0x72
#define VUSB1V5_REMAP 0x73
#define VUSB1V8_DEV_GRP 0x74
#define VUSB1V8_TYPE 0x75
#define VUSB1V8_REMAP 0x76
#define VUSB3V1_DEV_GRP 0x77
#define VUSB3V1_TYPE 0x78
#define VUSB3V1_REMAP 0x79
/* In module TWL4030_MODULE_INTBR */
#define PMBR1 0x0D
#define GPIO_USB_4PIN_ULPI_2430C (3 << 0)
static irqreturn_t twl4030_usb_irq(int irq, void *_twl);
/*
* If VBUS is valid or ID is ground, then we know a
* cable is present and we need to be runtime-enabled
*/
static inline bool cable_present(enum musb_vbus_id_status stat)
{
return stat == MUSB_VBUS_VALID ||
stat == MUSB_ID_GROUND;
}
struct twl4030_usb {
struct usb_phy phy;
struct device *dev;
/* TWL4030 internal USB regulator supplies */
struct regulator *usb1v5;
struct regulator *usb1v8;
struct regulator *usb3v1;
/* for vbus reporting with irqs disabled */
struct mutex lock;
/* pin configuration */
enum twl4030_usb_mode usb_mode;
int irq;
enum musb_vbus_id_status linkstat;
atomic_t connected;
bool vbus_supplied;
bool musb_mailbox_pending;
unsigned long runtime_suspended:1;
unsigned long needs_resume:1;
struct delayed_work id_workaround_work;
};
/* internal define on top of container_of */
#define phy_to_twl(x) container_of((x), struct twl4030_usb, phy)
/*-------------------------------------------------------------------------*/
static int twl4030_i2c_write_u8_verify(struct twl4030_usb *twl,
u8 module, u8 data, u8 address)
{
u8 check = 0xFF;
if ((twl_i2c_write_u8(module, data, address) >= 0) &&
(twl_i2c_read_u8(module, &check, address) >= 0) &&
(check == data))
return 0;
dev_dbg(twl->dev, "Write%d[%d,0x%x] wrote %02x but read %02x\n",
1, module, address, check, data);
/* Failed once: Try again */
if ((twl_i2c_write_u8(module, data, address) >= 0) &&
(twl_i2c_read_u8(module, &check, address) >= 0) &&
(check == data))
return 0;
dev_dbg(twl->dev, "Write%d[%d,0x%x] wrote %02x but read %02x\n",
2, module, address, check, data);
/* Failed again: Return error */
return -EBUSY;
}
#define twl4030_usb_write_verify(twl, address, data) \
twl4030_i2c_write_u8_verify(twl, TWL_MODULE_USB, (data), (address))
static inline int twl4030_usb_write(struct twl4030_usb *twl,
u8 address, u8 data)
{
int ret = 0;
ret = twl_i2c_write_u8(TWL_MODULE_USB, data, address);
if (ret < 0)
dev_dbg(twl->dev,
"TWL4030:USB:Write[0x%x] Error %d\n", address, ret);
return ret;
}
static inline int twl4030_readb(struct twl4030_usb *twl, u8 module, u8 address)
{
u8 data;
int ret = 0;
ret = twl_i2c_read_u8(module, &data, address);
if (ret >= 0)
ret = data;
else
dev_dbg(twl->dev,
"TWL4030:readb[0x%x,0x%x] Error %d\n",
module, address, ret);
return ret;
}
static inline int twl4030_usb_read(struct twl4030_usb *twl, u8 address)
{
return twl4030_readb(twl, TWL_MODULE_USB, address);
}
/*-------------------------------------------------------------------------*/
static inline int
twl4030_usb_set_bits(struct twl4030_usb *twl, u8 reg, u8 bits)
{
return twl4030_usb_write(twl, ULPI_SET(reg), bits);
}
static inline int
twl4030_usb_clear_bits(struct twl4030_usb *twl, u8 reg, u8 bits)
{
return twl4030_usb_write(twl, ULPI_CLR(reg), bits);
}
/*-------------------------------------------------------------------------*/
static bool twl4030_is_driving_vbus(struct twl4030_usb *twl)
{
int ret;
ret = twl4030_usb_read(twl, PHY_CLK_CTRL_STS);
if (ret < 0 || !(ret & PHY_DPLL_CLK))
/*
* if clocks are off, registers are not updated,
* but we can assume we don't drive VBUS in this case
*/
return false;
ret = twl4030_usb_read(twl, ULPI_OTG_CTRL);
if (ret < 0)
return false;
return (ret & (ULPI_OTG_DRVVBUS | ULPI_OTG_CHRGVBUS)) ? true : false;
}
static enum musb_vbus_id_status
twl4030_usb_linkstat(struct twl4030_usb *twl)
{
int status;
enum musb_vbus_id_status linkstat = MUSB_UNKNOWN;
twl->vbus_supplied = false;
/*
* For ID/VBUS sensing, see manual section 15.4.8 ...
* except when using only battery backup power, two
* comparators produce VBUS_PRES and ID_PRES signals,
* which don't match docs elsewhere. But ... BIT(7)
* and BIT(2) of STS_HW_CONDITIONS, respectively, do
* seem to match up. If either is true the USB_PRES
* signal is active, the OTG module is activated, and
* its interrupt may be raised (may wake the system).
*/
status = twl4030_readb(twl, TWL_MODULE_PM_MASTER, STS_HW_CONDITIONS);
if (status < 0)
dev_err(twl->dev, "USB link status err %d\n", status);
else if (status & (BIT(7) | BIT(2))) {
if (status & BIT(7)) {
if (twl4030_is_driving_vbus(twl))
status &= ~BIT(7);
else
twl->vbus_supplied = true;
}
if (status & BIT(2))
linkstat = MUSB_ID_GROUND;
else if (status & BIT(7))
linkstat = MUSB_VBUS_VALID;
else
linkstat = MUSB_VBUS_OFF;
} else {
if (twl->linkstat != MUSB_UNKNOWN)
linkstat = MUSB_VBUS_OFF;
}
kobject_uevent(&twl->dev->kobj, linkstat == MUSB_VBUS_VALID
? KOBJ_ONLINE : KOBJ_OFFLINE);
dev_dbg(twl->dev, "HW_CONDITIONS 0x%02x/%d; link %d\n",
status, status, linkstat);
/* REVISIT this assumes host and peripheral controllers
* are registered, and that both are active...
*/
return linkstat;
}
static void twl4030_usb_set_mode(struct twl4030_usb *twl, int mode)
{
twl->usb_mode = mode;
switch (mode) {
case T2_USB_MODE_ULPI:
twl4030_usb_clear_bits(twl, ULPI_IFC_CTRL,
ULPI_IFC_CTRL_CARKITMODE);
twl4030_usb_set_bits(twl, POWER_CTRL, POWER_CTRL_OTG_ENAB);
twl4030_usb_clear_bits(twl, ULPI_FUNC_CTRL,
ULPI_FUNC_CTRL_XCVRSEL_MASK |
ULPI_FUNC_CTRL_OPMODE_MASK);
break;
case -1:
/* FIXME: power on defaults */
break;
default:
dev_err(twl->dev, "unsupported T2 transceiver mode %d\n",
mode);
break;
}
}
static void twl4030_i2c_access(struct twl4030_usb *twl, int on)
{
unsigned long timeout;
int val = twl4030_usb_read(twl, PHY_CLK_CTRL);
if (val >= 0) {
if (on) {
/* enable DPLL to access PHY registers over I2C */
val |= REQ_PHY_DPLL_CLK;
WARN_ON(twl4030_usb_write_verify(twl, PHY_CLK_CTRL,
(u8)val) < 0);
timeout = jiffies + HZ;
while (!(twl4030_usb_read(twl, PHY_CLK_CTRL_STS) &
PHY_DPLL_CLK)
&& time_before(jiffies, timeout))
udelay(10);
if (!(twl4030_usb_read(twl, PHY_CLK_CTRL_STS) &
PHY_DPLL_CLK))
dev_err(twl->dev, "Timeout setting T2 HSUSB "
"PHY DPLL clock\n");
} else {
/* let ULPI control the DPLL clock */
val &= ~REQ_PHY_DPLL_CLK;
WARN_ON(twl4030_usb_write_verify(twl, PHY_CLK_CTRL,
(u8)val) < 0);
}
}
}
static void __twl4030_phy_power(struct twl4030_usb *twl, int on)
{
u8 pwr = twl4030_usb_read(twl, PHY_PWR_CTRL);
if (on)
pwr &= ~PHY_PWR_PHYPWD;
else
pwr |= PHY_PWR_PHYPWD;
WARN_ON(twl4030_usb_write_verify(twl, PHY_PWR_CTRL, pwr) < 0);
}
static int twl4030_usb_runtime_suspend(struct device *dev);
static int twl4030_usb_runtime_resume(struct device *dev);
static int __maybe_unused twl4030_usb_suspend(struct device *dev)
{
struct twl4030_usb *twl = dev_get_drvdata(dev);
/*
* we need enabled runtime on resume,
* so turn irq off here, so we do not get it early
* note: wakeup on usb plug works independently of this
*/
dev_dbg(twl->dev, "%s\n", __func__);
disable_irq(twl->irq);
if (!twl->runtime_suspended && !atomic_read(&twl->connected)) {
twl4030_usb_runtime_suspend(dev);
twl->needs_resume = 1;
}
return 0;
}
static int __maybe_unused twl4030_usb_resume(struct device *dev)
{
struct twl4030_usb *twl = dev_get_drvdata(dev);
dev_dbg(twl->dev, "%s\n", __func__);
enable_irq(twl->irq);
if (twl->needs_resume)
twl4030_usb_runtime_resume(dev);
/* check whether cable status changed */
twl4030_usb_irq(0, twl);
twl->runtime_suspended = 0;
return 0;
}
static int __maybe_unused twl4030_usb_runtime_suspend(struct device *dev)
{
struct twl4030_usb *twl = dev_get_drvdata(dev);
dev_dbg(twl->dev, "%s\n", __func__);
__twl4030_phy_power(twl, 0);
regulator_disable(twl->usb1v5);
regulator_disable(twl->usb1v8);
regulator_disable(twl->usb3v1);
twl->runtime_suspended = 1;
return 0;
}
static int __maybe_unused twl4030_usb_runtime_resume(struct device *dev)
{
struct twl4030_usb *twl = dev_get_drvdata(dev);
int res;
dev_dbg(twl->dev, "%s\n", __func__);
res = regulator_enable(twl->usb3v1);
if (res)
dev_err(twl->dev, "Failed to enable usb3v1\n");
res = regulator_enable(twl->usb1v8);
if (res)
dev_err(twl->dev, "Failed to enable usb1v8\n");
/*
* Disabling usb3v1 regulator (= writing 0 to VUSB3V1_DEV_GRP
* in twl4030) resets the VUSB_DEDICATED2 register. This reset
* enables VUSB3V1_SLEEP bit that remaps usb3v1 ACTIVE state to
* SLEEP. We work around this by clearing the bit after usv3v1
* is re-activated. This ensures that VUSB3V1 is really active.
*/
twl_i2c_write_u8(TWL_MODULE_PM_RECEIVER, 0, VUSB_DEDICATED2);
res = regulator_enable(twl->usb1v5);
if (res)
dev_err(twl->dev, "Failed to enable usb1v5\n");
__twl4030_phy_power(twl, 1);
twl4030_usb_write(twl, PHY_CLK_CTRL,
twl4030_usb_read(twl, PHY_CLK_CTRL) |
(PHY_CLK_CTRL_CLOCKGATING_EN |
PHY_CLK_CTRL_CLK32K_EN));
twl4030_i2c_access(twl, 1);
twl4030_usb_set_mode(twl, twl->usb_mode);
if (twl->usb_mode == T2_USB_MODE_ULPI)
twl4030_i2c_access(twl, 0);
/*
* According to the TPS65950 TRM, there has to be at least 50ms
* delay between setting POWER_CTRL_OTG_ENAB and enabling charging
* so wait here so that a fully enabled phy can be expected after
* resume
*/
msleep(50);
return 0;
}
static int twl4030_phy_power_off(struct phy *phy)
{
struct twl4030_usb *twl = phy_get_drvdata(phy);
dev_dbg(twl->dev, "%s\n", __func__);
return 0;
}
static int twl4030_phy_power_on(struct phy *phy)
{
struct twl4030_usb *twl = phy_get_drvdata(phy);
dev_dbg(twl->dev, "%s\n", __func__);
pm_runtime_get_sync(twl->dev);
schedule_delayed_work(&twl->id_workaround_work, HZ);
pm_runtime_mark_last_busy(twl->dev);
pm_runtime_put_autosuspend(twl->dev);
return 0;
}
static int twl4030_usb_ldo_init(struct twl4030_usb *twl)
{
/* Enable writing to power configuration registers */
twl_i2c_write_u8(TWL_MODULE_PM_MASTER, TWL4030_PM_MASTER_KEY_CFG1,
TWL4030_PM_MASTER_PROTECT_KEY);
twl_i2c_write_u8(TWL_MODULE_PM_MASTER, TWL4030_PM_MASTER_KEY_CFG2,
TWL4030_PM_MASTER_PROTECT_KEY);
/* Keep VUSB3V1 LDO in sleep state until VBUS/ID change detected*/
/*twl_i2c_write_u8(TWL_MODULE_PM_RECEIVER, 0, VUSB_DEDICATED2);*/
/* input to VUSB3V1 LDO is from VBAT, not VBUS */
twl_i2c_write_u8(TWL_MODULE_PM_RECEIVER, 0x14, VUSB_DEDICATED1);
/* Initialize 3.1V regulator */
twl_i2c_write_u8(TWL_MODULE_PM_RECEIVER, 0, VUSB3V1_DEV_GRP);
twl->usb3v1 = devm_regulator_get(twl->dev, "usb3v1");
if (IS_ERR(twl->usb3v1))
return -ENODEV;
twl_i2c_write_u8(TWL_MODULE_PM_RECEIVER, 0, VUSB3V1_TYPE);
/* Initialize 1.5V regulator */
twl_i2c_write_u8(TWL_MODULE_PM_RECEIVER, 0, VUSB1V5_DEV_GRP);
twl->usb1v5 = devm_regulator_get(twl->dev, "usb1v5");
if (IS_ERR(twl->usb1v5))
return -ENODEV;
twl_i2c_write_u8(TWL_MODULE_PM_RECEIVER, 0, VUSB1V5_TYPE);
/* Initialize 1.8V regulator */
twl_i2c_write_u8(TWL_MODULE_PM_RECEIVER, 0, VUSB1V8_DEV_GRP);
twl->usb1v8 = devm_regulator_get(twl->dev, "usb1v8");
if (IS_ERR(twl->usb1v8))
return -ENODEV;
twl_i2c_write_u8(TWL_MODULE_PM_RECEIVER, 0, VUSB1V8_TYPE);
/* disable access to power configuration registers */
twl_i2c_write_u8(TWL_MODULE_PM_MASTER, 0,
TWL4030_PM_MASTER_PROTECT_KEY);
return 0;
}
static ssize_t vbus_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct twl4030_usb *twl = dev_get_drvdata(dev);
int ret = -EINVAL;
mutex_lock(&twl->lock);
ret = sprintf(buf, "%s\n",
twl->vbus_supplied ? "on" : "off");
mutex_unlock(&twl->lock);
return ret;
}
static DEVICE_ATTR_RO(vbus);
static irqreturn_t twl4030_usb_irq(int irq, void *_twl)
{
struct twl4030_usb *twl = _twl;
enum musb_vbus_id_status status;
int err;
status = twl4030_usb_linkstat(twl);
mutex_lock(&twl->lock);
twl->linkstat = status;
mutex_unlock(&twl->lock);
if (cable_present(status)) {
if (atomic_add_unless(&twl->connected, 1, 1)) {
dev_dbg(twl->dev, "%s: cable connected %i\n",
__func__, status);
pm_runtime_get_sync(twl->dev);
twl->musb_mailbox_pending = true;
}
} else {
if (atomic_add_unless(&twl->connected, -1, 0)) {
dev_dbg(twl->dev, "%s: cable disconnected %i\n",
__func__, status);
pm_runtime_mark_last_busy(twl->dev);
pm_runtime_put_autosuspend(twl->dev);
twl->musb_mailbox_pending = true;
}
}
if (twl->musb_mailbox_pending) {
err = musb_mailbox(status);
if (!err)
twl->musb_mailbox_pending = false;
}
/* don't schedule during sleep - irq works right then */
if (status == MUSB_ID_GROUND && pm_runtime_active(twl->dev)) {
cancel_delayed_work(&twl->id_workaround_work);
schedule_delayed_work(&twl->id_workaround_work, HZ);
}
if (irq)
sysfs_notify(&twl->dev->kobj, NULL, "vbus");
return IRQ_HANDLED;
}
static void twl4030_id_workaround_work(struct work_struct *work)
{
struct twl4030_usb *twl = container_of(work, struct twl4030_usb,
id_workaround_work.work);
twl4030_usb_irq(0, twl);
}
static int twl4030_phy_init(struct phy *phy)
{
struct twl4030_usb *twl = phy_get_drvdata(phy);
pm_runtime_get_sync(twl->dev);
twl->linkstat = MUSB_UNKNOWN;
schedule_delayed_work(&twl->id_workaround_work, HZ);
pm_runtime_mark_last_busy(twl->dev);
pm_runtime_put_autosuspend(twl->dev);
return 0;
}
static int twl4030_set_peripheral(struct usb_otg *otg,
struct usb_gadget *gadget)
{
if (!otg)
return -ENODEV;
otg->gadget = gadget;
if (!gadget)
otg->state = OTG_STATE_UNDEFINED;
return 0;
}
static int twl4030_set_host(struct usb_otg *otg, struct usb_bus *host)
{
if (!otg)
return -ENODEV;
otg->host = host;
if (!host)
otg->state = OTG_STATE_UNDEFINED;
return 0;
}
static const struct phy_ops ops = {
.init = twl4030_phy_init,
.power_on = twl4030_phy_power_on,
.power_off = twl4030_phy_power_off,
.owner = THIS_MODULE,
};
static const struct dev_pm_ops twl4030_usb_pm_ops = {
SET_RUNTIME_PM_OPS(twl4030_usb_runtime_suspend,
twl4030_usb_runtime_resume, NULL)
SET_SYSTEM_SLEEP_PM_OPS(twl4030_usb_suspend, twl4030_usb_resume)
};
static int twl4030_usb_probe(struct platform_device *pdev)
{
struct twl4030_usb_data *pdata = dev_get_platdata(&pdev->dev);
struct twl4030_usb *twl;
struct phy *phy;
int status, err;
struct usb_otg *otg;
struct device_node *np = pdev->dev.of_node;
struct phy_provider *phy_provider;
twl = devm_kzalloc(&pdev->dev, sizeof(*twl), GFP_KERNEL);
if (!twl)
return -ENOMEM;
if (np)
of_property_read_u32(np, "usb_mode",
(enum twl4030_usb_mode *)&twl->usb_mode);
else if (pdata) {
twl->usb_mode = pdata->usb_mode;
} else {
dev_err(&pdev->dev, "twl4030 initialized without pdata\n");
return -EINVAL;
}
otg = devm_kzalloc(&pdev->dev, sizeof(*otg), GFP_KERNEL);
if (!otg)
return -ENOMEM;
twl->dev = &pdev->dev;
twl->irq = platform_get_irq(pdev, 0);
twl->vbus_supplied = false;
twl->linkstat = MUSB_UNKNOWN;
twl->musb_mailbox_pending = false;
twl->phy.dev = twl->dev;
twl->phy.label = "twl4030";
twl->phy.otg = otg;
twl->phy.type = USB_PHY_TYPE_USB2;
otg->usb_phy = &twl->phy;
otg->set_host = twl4030_set_host;
otg->set_peripheral = twl4030_set_peripheral;
phy = devm_phy_create(twl->dev, NULL, &ops);
if (IS_ERR(phy)) {
dev_dbg(&pdev->dev, "Failed to create PHY\n");
return PTR_ERR(phy);
}
phy_set_drvdata(phy, twl);
phy_provider = devm_of_phy_provider_register(twl->dev,
of_phy_simple_xlate);
if (IS_ERR(phy_provider))
return PTR_ERR(phy_provider);
/* init mutex for workqueue */
mutex_init(&twl->lock);
INIT_DELAYED_WORK(&twl->id_workaround_work, twl4030_id_workaround_work);
err = twl4030_usb_ldo_init(twl);
if (err) {
dev_err(&pdev->dev, "ldo init failed\n");
return err;
}
usb_add_phy_dev(&twl->phy);
platform_set_drvdata(pdev, twl);
if (device_create_file(&pdev->dev, &dev_attr_vbus))
dev_warn(&pdev->dev, "could not create sysfs file\n");
ATOMIC_INIT_NOTIFIER_HEAD(&twl->phy.notifier);
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_set_autosuspend_delay(&pdev->dev, 2000);
pm_runtime_enable(&pdev->dev);
pm_runtime_get_sync(&pdev->dev);
/* Our job is to use irqs and status from the power module
* to keep the transceiver disabled when nothing's connected.
*
* FIXME we actually shouldn't start enabling it until the
* USB controller drivers have said they're ready, by calling
* set_host() and/or set_peripheral() ... OTG_capable boards
* need both handles, otherwise just one suffices.
*/
status = devm_request_threaded_irq(twl->dev, twl->irq, NULL,
twl4030_usb_irq, IRQF_TRIGGER_FALLING |
IRQF_TRIGGER_RISING | IRQF_ONESHOT, "twl4030_usb", twl);
if (status < 0) {
dev_dbg(&pdev->dev, "can't get IRQ %d, err %d\n",
twl->irq, status);
return status;
}
if (pdata)
err = phy_create_lookup(phy, "usb", "musb-hdrc.0");
if (err)
return err;
pm_runtime_mark_last_busy(&pdev->dev);
pm_runtime_put_autosuspend(twl->dev);
dev_info(&pdev->dev, "Initialized TWL4030 USB module\n");
return 0;
}
static void twl4030_usb_remove(struct platform_device *pdev)
{
struct twl4030_usb *twl = platform_get_drvdata(pdev);
int val;
usb_remove_phy(&twl->phy);
pm_runtime_get_sync(twl->dev);
cancel_delayed_work_sync(&twl->id_workaround_work);
device_remove_file(twl->dev, &dev_attr_vbus);
/* set transceiver mode to power on defaults */
twl4030_usb_set_mode(twl, -1);
/* idle ulpi before powering off */
if (cable_present(twl->linkstat))
pm_runtime_put_noidle(twl->dev);
pm_runtime_mark_last_busy(twl->dev);
pm_runtime_dont_use_autosuspend(&pdev->dev);
pm_runtime_put_sync(twl->dev);
pm_runtime_disable(twl->dev);
/* autogate 60MHz ULPI clock,
* clear dpll clock request for i2c access,
* disable 32KHz
*/
val = twl4030_usb_read(twl, PHY_CLK_CTRL);
if (val >= 0) {
val |= PHY_CLK_CTRL_CLOCKGATING_EN;
val &= ~(PHY_CLK_CTRL_CLK32K_EN | REQ_PHY_DPLL_CLK);
twl4030_usb_write(twl, PHY_CLK_CTRL, (u8)val);
}
/* disable complete OTG block */
twl4030_usb_clear_bits(twl, POWER_CTRL, POWER_CTRL_OTG_ENAB);
}
#ifdef CONFIG_OF
static const struct of_device_id twl4030_usb_id_table[] = {
{ .compatible = "ti,twl4030-usb" },
{}
};
MODULE_DEVICE_TABLE(of, twl4030_usb_id_table);
#endif
static struct platform_driver twl4030_usb_driver = {
.probe = twl4030_usb_probe,
.remove_new = twl4030_usb_remove,
.driver = {
.name = "twl4030_usb",
.pm = &twl4030_usb_pm_ops,
.of_match_table = of_match_ptr(twl4030_usb_id_table),
},
};
static int __init twl4030_usb_init(void)
{
return platform_driver_register(&twl4030_usb_driver);
}
subsys_initcall(twl4030_usb_init);
static void __exit twl4030_usb_exit(void)
{
platform_driver_unregister(&twl4030_usb_driver);
}
module_exit(twl4030_usb_exit);
MODULE_ALIAS("platform:twl4030_usb");
MODULE_AUTHOR("Texas Instruments, Inc, Nokia Corporation");
MODULE_DESCRIPTION("TWL4030 USB transceiver driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/phy/ti/phy-twl4030-usb.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Texas Instruments CPSW Port's PHY Interface Mode selection Driver
*
* Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
*
* Based on cpsw-phy-sel.c driver created by Mugunthan V N <[email protected]>
*/
#include <linux/platform_device.h>
#include <linux/module.h>
#include <linux/mfd/syscon.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_net.h>
#include <linux/phy.h>
#include <linux/phy/phy.h>
#include <linux/regmap.h>
/* AM33xx SoC specific definitions for the CONTROL port */
#define AM33XX_GMII_SEL_MODE_MII 0
#define AM33XX_GMII_SEL_MODE_RMII 1
#define AM33XX_GMII_SEL_MODE_RGMII 2
/* J72xx SoC specific definitions for the CONTROL port */
#define J72XX_GMII_SEL_MODE_SGMII 3
#define J72XX_GMII_SEL_MODE_QSGMII 4
#define J72XX_GMII_SEL_MODE_USXGMII 5
#define J72XX_GMII_SEL_MODE_QSGMII_SUB 6
#define PHY_GMII_PORT(n) BIT((n) - 1)
enum {
PHY_GMII_SEL_PORT_MODE = 0,
PHY_GMII_SEL_RGMII_ID_MODE,
PHY_GMII_SEL_RMII_IO_CLK_EN,
PHY_GMII_SEL_LAST,
};
struct phy_gmii_sel_phy_priv {
struct phy_gmii_sel_priv *priv;
u32 id;
struct phy *if_phy;
int rmii_clock_external;
int phy_if_mode;
struct regmap_field *fields[PHY_GMII_SEL_LAST];
};
struct phy_gmii_sel_soc_data {
u32 num_ports;
u32 features;
const struct reg_field (*regfields)[PHY_GMII_SEL_LAST];
bool use_of_data;
u64 extra_modes;
u32 num_qsgmii_main_ports;
};
struct phy_gmii_sel_priv {
struct device *dev;
const struct phy_gmii_sel_soc_data *soc_data;
struct regmap *regmap;
struct phy_provider *phy_provider;
struct phy_gmii_sel_phy_priv *if_phys;
u32 num_ports;
u32 reg_offset;
u32 qsgmii_main_ports;
};
static int phy_gmii_sel_mode(struct phy *phy, enum phy_mode mode, int submode)
{
struct phy_gmii_sel_phy_priv *if_phy = phy_get_drvdata(phy);
const struct phy_gmii_sel_soc_data *soc_data = if_phy->priv->soc_data;
struct device *dev = if_phy->priv->dev;
struct regmap_field *regfield;
int ret, rgmii_id = 0;
u32 gmii_sel_mode = 0;
if (mode != PHY_MODE_ETHERNET)
return -EINVAL;
switch (submode) {
case PHY_INTERFACE_MODE_RMII:
gmii_sel_mode = AM33XX_GMII_SEL_MODE_RMII;
break;
case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_RGMII_RXID:
gmii_sel_mode = AM33XX_GMII_SEL_MODE_RGMII;
break;
case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RGMII_TXID:
gmii_sel_mode = AM33XX_GMII_SEL_MODE_RGMII;
rgmii_id = 1;
break;
case PHY_INTERFACE_MODE_MII:
case PHY_INTERFACE_MODE_GMII:
gmii_sel_mode = AM33XX_GMII_SEL_MODE_MII;
break;
case PHY_INTERFACE_MODE_QSGMII:
if (!(soc_data->extra_modes & BIT(PHY_INTERFACE_MODE_QSGMII)))
goto unsupported;
if (if_phy->priv->qsgmii_main_ports & BIT(if_phy->id - 1))
gmii_sel_mode = J72XX_GMII_SEL_MODE_QSGMII;
else
gmii_sel_mode = J72XX_GMII_SEL_MODE_QSGMII_SUB;
break;
case PHY_INTERFACE_MODE_SGMII:
if (!(soc_data->extra_modes & BIT(PHY_INTERFACE_MODE_SGMII)))
goto unsupported;
else
gmii_sel_mode = J72XX_GMII_SEL_MODE_SGMII;
break;
case PHY_INTERFACE_MODE_USXGMII:
if (!(soc_data->extra_modes & BIT(PHY_INTERFACE_MODE_USXGMII)))
goto unsupported;
else
gmii_sel_mode = J72XX_GMII_SEL_MODE_USXGMII;
break;
default:
goto unsupported;
}
if_phy->phy_if_mode = submode;
dev_dbg(dev, "%s id:%u mode:%u rgmii_id:%d rmii_clk_ext:%d\n",
__func__, if_phy->id, submode, rgmii_id,
if_phy->rmii_clock_external);
regfield = if_phy->fields[PHY_GMII_SEL_PORT_MODE];
ret = regmap_field_write(regfield, gmii_sel_mode);
if (ret) {
dev_err(dev, "port%u: set mode fail %d", if_phy->id, ret);
return ret;
}
if (soc_data->features & BIT(PHY_GMII_SEL_RGMII_ID_MODE) &&
if_phy->fields[PHY_GMII_SEL_RGMII_ID_MODE]) {
regfield = if_phy->fields[PHY_GMII_SEL_RGMII_ID_MODE];
ret = regmap_field_write(regfield, rgmii_id);
if (ret)
return ret;
}
if (soc_data->features & BIT(PHY_GMII_SEL_RMII_IO_CLK_EN) &&
if_phy->fields[PHY_GMII_SEL_RMII_IO_CLK_EN]) {
regfield = if_phy->fields[PHY_GMII_SEL_RMII_IO_CLK_EN];
ret = regmap_field_write(regfield,
if_phy->rmii_clock_external);
}
return 0;
unsupported:
dev_warn(dev, "port%u: unsupported mode: \"%s\"\n",
if_phy->id, phy_modes(submode));
return -EINVAL;
}
static const
struct reg_field phy_gmii_sel_fields_am33xx[][PHY_GMII_SEL_LAST] = {
{
[PHY_GMII_SEL_PORT_MODE] = REG_FIELD(0x650, 0, 1),
[PHY_GMII_SEL_RGMII_ID_MODE] = REG_FIELD(0x650, 4, 4),
[PHY_GMII_SEL_RMII_IO_CLK_EN] = REG_FIELD(0x650, 6, 6),
},
{
[PHY_GMII_SEL_PORT_MODE] = REG_FIELD(0x650, 2, 3),
[PHY_GMII_SEL_RGMII_ID_MODE] = REG_FIELD(0x650, 5, 5),
[PHY_GMII_SEL_RMII_IO_CLK_EN] = REG_FIELD(0x650, 7, 7),
},
};
static const
struct phy_gmii_sel_soc_data phy_gmii_sel_soc_am33xx = {
.num_ports = 2,
.features = BIT(PHY_GMII_SEL_RGMII_ID_MODE) |
BIT(PHY_GMII_SEL_RMII_IO_CLK_EN),
.regfields = phy_gmii_sel_fields_am33xx,
};
static const
struct reg_field phy_gmii_sel_fields_dra7[][PHY_GMII_SEL_LAST] = {
{
[PHY_GMII_SEL_PORT_MODE] = REG_FIELD(0x554, 0, 1),
},
{
[PHY_GMII_SEL_PORT_MODE] = REG_FIELD(0x554, 4, 5),
},
};
static const
struct phy_gmii_sel_soc_data phy_gmii_sel_soc_dra7 = {
.num_ports = 2,
.regfields = phy_gmii_sel_fields_dra7,
};
static const
struct phy_gmii_sel_soc_data phy_gmii_sel_soc_dm814 = {
.num_ports = 2,
.features = BIT(PHY_GMII_SEL_RGMII_ID_MODE),
.regfields = phy_gmii_sel_fields_am33xx,
};
static const
struct reg_field phy_gmii_sel_fields_am654[][PHY_GMII_SEL_LAST] = {
{ [PHY_GMII_SEL_PORT_MODE] = REG_FIELD(0x0, 0, 2), },
{ [PHY_GMII_SEL_PORT_MODE] = REG_FIELD(0x4, 0, 2), },
{ [PHY_GMII_SEL_PORT_MODE] = REG_FIELD(0x8, 0, 2), },
{ [PHY_GMII_SEL_PORT_MODE] = REG_FIELD(0xC, 0, 2), },
{ [PHY_GMII_SEL_PORT_MODE] = REG_FIELD(0x10, 0, 2), },
{ [PHY_GMII_SEL_PORT_MODE] = REG_FIELD(0x14, 0, 2), },
{ [PHY_GMII_SEL_PORT_MODE] = REG_FIELD(0x18, 0, 2), },
{ [PHY_GMII_SEL_PORT_MODE] = REG_FIELD(0x1C, 0, 2), },
};
static const
struct phy_gmii_sel_soc_data phy_gmii_sel_soc_am654 = {
.use_of_data = true,
.regfields = phy_gmii_sel_fields_am654,
};
static const
struct phy_gmii_sel_soc_data phy_gmii_sel_cpsw5g_soc_j7200 = {
.use_of_data = true,
.regfields = phy_gmii_sel_fields_am654,
.extra_modes = BIT(PHY_INTERFACE_MODE_QSGMII) | BIT(PHY_INTERFACE_MODE_SGMII),
.num_ports = 4,
.num_qsgmii_main_ports = 1,
};
static const
struct phy_gmii_sel_soc_data phy_gmii_sel_cpsw9g_soc_j721e = {
.use_of_data = true,
.regfields = phy_gmii_sel_fields_am654,
.extra_modes = BIT(PHY_INTERFACE_MODE_QSGMII) | BIT(PHY_INTERFACE_MODE_SGMII),
.num_ports = 8,
.num_qsgmii_main_ports = 2,
};
static const
struct phy_gmii_sel_soc_data phy_gmii_sel_cpsw9g_soc_j784s4 = {
.use_of_data = true,
.regfields = phy_gmii_sel_fields_am654,
.extra_modes = BIT(PHY_INTERFACE_MODE_QSGMII) |
BIT(PHY_INTERFACE_MODE_USXGMII),
.num_ports = 8,
.num_qsgmii_main_ports = 2,
};
static const struct of_device_id phy_gmii_sel_id_table[] = {
{
.compatible = "ti,am3352-phy-gmii-sel",
.data = &phy_gmii_sel_soc_am33xx,
},
{
.compatible = "ti,dra7xx-phy-gmii-sel",
.data = &phy_gmii_sel_soc_dra7,
},
{
.compatible = "ti,am43xx-phy-gmii-sel",
.data = &phy_gmii_sel_soc_am33xx,
},
{
.compatible = "ti,dm814-phy-gmii-sel",
.data = &phy_gmii_sel_soc_dm814,
},
{
.compatible = "ti,am654-phy-gmii-sel",
.data = &phy_gmii_sel_soc_am654,
},
{
.compatible = "ti,j7200-cpsw5g-phy-gmii-sel",
.data = &phy_gmii_sel_cpsw5g_soc_j7200,
},
{
.compatible = "ti,j721e-cpsw9g-phy-gmii-sel",
.data = &phy_gmii_sel_cpsw9g_soc_j721e,
},
{
.compatible = "ti,j784s4-cpsw9g-phy-gmii-sel",
.data = &phy_gmii_sel_cpsw9g_soc_j784s4,
},
{}
};
MODULE_DEVICE_TABLE(of, phy_gmii_sel_id_table);
static const struct phy_ops phy_gmii_sel_ops = {
.set_mode = phy_gmii_sel_mode,
.owner = THIS_MODULE,
};
static struct phy *phy_gmii_sel_of_xlate(struct device *dev,
struct of_phandle_args *args)
{
struct phy_gmii_sel_priv *priv = dev_get_drvdata(dev);
int phy_id = args->args[0];
if (args->args_count < 1)
return ERR_PTR(-EINVAL);
if (!priv || !priv->if_phys)
return ERR_PTR(-ENODEV);
if (priv->soc_data->features & BIT(PHY_GMII_SEL_RMII_IO_CLK_EN) &&
args->args_count < 2)
return ERR_PTR(-EINVAL);
if (phy_id > priv->num_ports)
return ERR_PTR(-EINVAL);
if (phy_id != priv->if_phys[phy_id - 1].id)
return ERR_PTR(-EINVAL);
phy_id--;
if (priv->soc_data->features & BIT(PHY_GMII_SEL_RMII_IO_CLK_EN))
priv->if_phys[phy_id].rmii_clock_external = args->args[1];
dev_dbg(dev, "%s id:%u ext:%d\n", __func__,
priv->if_phys[phy_id].id, args->args[1]);
return priv->if_phys[phy_id].if_phy;
}
static int phy_gmii_init_phy(struct phy_gmii_sel_priv *priv, int port,
struct phy_gmii_sel_phy_priv *if_phy)
{
const struct phy_gmii_sel_soc_data *soc_data = priv->soc_data;
struct device *dev = priv->dev;
const struct reg_field *fields;
struct regmap_field *regfield;
struct reg_field field;
int ret;
if_phy->id = port;
if_phy->priv = priv;
fields = soc_data->regfields[port - 1];
field = *fields++;
field.reg += priv->reg_offset;
dev_dbg(dev, "%s field %x %d %d\n", __func__,
field.reg, field.msb, field.lsb);
regfield = devm_regmap_field_alloc(dev, priv->regmap, field);
if (IS_ERR(regfield))
return PTR_ERR(regfield);
if_phy->fields[PHY_GMII_SEL_PORT_MODE] = regfield;
field = *fields++;
field.reg += priv->reg_offset;
if (soc_data->features & BIT(PHY_GMII_SEL_RGMII_ID_MODE)) {
regfield = devm_regmap_field_alloc(dev,
priv->regmap,
field);
if (IS_ERR(regfield))
return PTR_ERR(regfield);
if_phy->fields[PHY_GMII_SEL_RGMII_ID_MODE] = regfield;
dev_dbg(dev, "%s field %x %d %d\n", __func__,
field.reg, field.msb, field.lsb);
}
field = *fields;
field.reg += priv->reg_offset;
if (soc_data->features & BIT(PHY_GMII_SEL_RMII_IO_CLK_EN)) {
regfield = devm_regmap_field_alloc(dev,
priv->regmap,
field);
if (IS_ERR(regfield))
return PTR_ERR(regfield);
if_phy->fields[PHY_GMII_SEL_RMII_IO_CLK_EN] = regfield;
dev_dbg(dev, "%s field %x %d %d\n", __func__,
field.reg, field.msb, field.lsb);
}
if_phy->if_phy = devm_phy_create(dev,
priv->dev->of_node,
&phy_gmii_sel_ops);
if (IS_ERR(if_phy->if_phy)) {
ret = PTR_ERR(if_phy->if_phy);
dev_err(dev, "Failed to create phy%d %d\n", port, ret);
return ret;
}
phy_set_drvdata(if_phy->if_phy, if_phy);
return 0;
}
static int phy_gmii_sel_init_ports(struct phy_gmii_sel_priv *priv)
{
const struct phy_gmii_sel_soc_data *soc_data = priv->soc_data;
struct phy_gmii_sel_phy_priv *if_phys;
struct device *dev = priv->dev;
int i, ret;
if (soc_data->use_of_data) {
const __be32 *offset;
u64 size;
offset = of_get_address(dev->of_node, 0, &size, NULL);
if (!offset)
return -EINVAL;
priv->num_ports = size / sizeof(u32);
if (!priv->num_ports)
return -EINVAL;
priv->reg_offset = __be32_to_cpu(*offset);
}
if_phys = devm_kcalloc(dev, priv->num_ports,
sizeof(*if_phys), GFP_KERNEL);
if (!if_phys)
return -ENOMEM;
dev_dbg(dev, "%s %d\n", __func__, priv->num_ports);
for (i = 0; i < priv->num_ports; i++) {
ret = phy_gmii_init_phy(priv, i + 1, &if_phys[i]);
if (ret)
return ret;
}
priv->if_phys = if_phys;
return 0;
}
static int phy_gmii_sel_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
const struct phy_gmii_sel_soc_data *soc_data;
struct device_node *node = dev->of_node;
const struct of_device_id *of_id;
struct phy_gmii_sel_priv *priv;
u32 main_ports = 1;
int ret;
u32 i;
of_id = of_match_node(phy_gmii_sel_id_table, pdev->dev.of_node);
if (!of_id)
return -EINVAL;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->dev = &pdev->dev;
priv->soc_data = of_id->data;
soc_data = priv->soc_data;
priv->num_ports = priv->soc_data->num_ports;
priv->qsgmii_main_ports = 0;
/*
* Based on the compatible, try to read the appropriate number of
* QSGMII main ports from the "ti,qsgmii-main-ports" property from
* the device-tree node.
*/
for (i = 0; i < soc_data->num_qsgmii_main_ports; i++) {
of_property_read_u32_index(node, "ti,qsgmii-main-ports", i, &main_ports);
/*
* Ensure that main_ports is within bounds.
*/
if (main_ports < 1 || main_ports > soc_data->num_ports) {
dev_err(dev, "Invalid qsgmii main port provided\n");
return -EINVAL;
}
priv->qsgmii_main_ports |= PHY_GMII_PORT(main_ports);
}
priv->regmap = syscon_node_to_regmap(node->parent);
if (IS_ERR(priv->regmap)) {
priv->regmap = device_node_to_regmap(node);
if (IS_ERR(priv->regmap)) {
ret = PTR_ERR(priv->regmap);
dev_err(dev, "Failed to get syscon %d\n", ret);
return ret;
}
}
ret = phy_gmii_sel_init_ports(priv);
if (ret)
return ret;
dev_set_drvdata(&pdev->dev, priv);
priv->phy_provider =
devm_of_phy_provider_register(dev,
phy_gmii_sel_of_xlate);
if (IS_ERR(priv->phy_provider)) {
ret = PTR_ERR(priv->phy_provider);
dev_err(dev, "Failed to create phy provider %d\n", ret);
return ret;
}
return 0;
}
static struct platform_driver phy_gmii_sel_driver = {
.probe = phy_gmii_sel_probe,
.driver = {
.name = "phy-gmii-sel",
.of_match_table = phy_gmii_sel_id_table,
},
};
module_platform_driver(phy_gmii_sel_driver);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Grygorii Strashko <[email protected]>");
MODULE_DESCRIPTION("TI CPSW Port's PHY Interface Mode selection Driver");
| linux-master | drivers/phy/ti/phy-gmii-sel.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* phy-ti-pipe3 - PIPE3 PHY driver.
*
* Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com
* Author: Kishon Vijay Abraham I <[email protected]>
*/
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/phy/phy.h>
#include <linux/of.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/pm_runtime.h>
#include <linux/delay.h>
#include <linux/phy/omap_control_phy.h>
#include <linux/of_platform.h>
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
#define PLL_STATUS 0x00000004
#define PLL_GO 0x00000008
#define PLL_CONFIGURATION1 0x0000000C
#define PLL_CONFIGURATION2 0x00000010
#define PLL_CONFIGURATION3 0x00000014
#define PLL_CONFIGURATION4 0x00000020
#define PLL_REGM_MASK 0x001FFE00
#define PLL_REGM_SHIFT 0x9
#define PLL_REGM_F_MASK 0x0003FFFF
#define PLL_REGM_F_SHIFT 0x0
#define PLL_REGN_MASK 0x000001FE
#define PLL_REGN_SHIFT 0x1
#define PLL_SELFREQDCO_MASK 0x0000000E
#define PLL_SELFREQDCO_SHIFT 0x1
#define PLL_SD_MASK 0x0003FC00
#define PLL_SD_SHIFT 10
#define SET_PLL_GO 0x1
#define PLL_LDOPWDN BIT(15)
#define PLL_TICOPWDN BIT(16)
#define PLL_LOCK 0x2
#define PLL_IDLE 0x1
#define SATA_PLL_SOFT_RESET BIT(18)
#define PIPE3_PHY_PWRCTL_CLK_CMD_MASK GENMASK(21, 14)
#define PIPE3_PHY_PWRCTL_CLK_CMD_SHIFT 14
#define PIPE3_PHY_PWRCTL_CLK_FREQ_MASK GENMASK(31, 22)
#define PIPE3_PHY_PWRCTL_CLK_FREQ_SHIFT 22
#define PIPE3_PHY_RX_POWERON (0x1 << PIPE3_PHY_PWRCTL_CLK_CMD_SHIFT)
#define PIPE3_PHY_TX_POWERON (0x2 << PIPE3_PHY_PWRCTL_CLK_CMD_SHIFT)
#define PCIE_PCS_MASK 0xFF0000
#define PCIE_PCS_DELAY_COUNT_SHIFT 0x10
#define PIPE3_PHY_RX_ANA_PROGRAMMABILITY 0x0000000C
#define INTERFACE_MASK GENMASK(31, 27)
#define INTERFACE_SHIFT 27
#define INTERFACE_MODE_USBSS BIT(4)
#define INTERFACE_MODE_SATA_1P5 BIT(3)
#define INTERFACE_MODE_SATA_3P0 BIT(2)
#define INTERFACE_MODE_PCIE BIT(0)
#define LOSD_MASK GENMASK(17, 14)
#define LOSD_SHIFT 14
#define MEM_PLLDIV GENMASK(6, 5)
#define PIPE3_PHY_RX_TRIM 0x0000001C
#define MEM_DLL_TRIM_SEL_MASK GENMASK(31, 30)
#define MEM_DLL_TRIM_SHIFT 30
#define PIPE3_PHY_RX_DLL 0x00000024
#define MEM_DLL_PHINT_RATE_MASK GENMASK(31, 30)
#define MEM_DLL_PHINT_RATE_SHIFT 30
#define PIPE3_PHY_RX_DIGITAL_MODES 0x00000028
#define MEM_HS_RATE_MASK GENMASK(28, 27)
#define MEM_HS_RATE_SHIFT 27
#define MEM_OVRD_HS_RATE BIT(26)
#define MEM_OVRD_HS_RATE_SHIFT 26
#define MEM_CDR_FASTLOCK BIT(23)
#define MEM_CDR_FASTLOCK_SHIFT 23
#define MEM_CDR_LBW_MASK GENMASK(22, 21)
#define MEM_CDR_LBW_SHIFT 21
#define MEM_CDR_STEPCNT_MASK GENMASK(20, 19)
#define MEM_CDR_STEPCNT_SHIFT 19
#define MEM_CDR_STL_MASK GENMASK(18, 16)
#define MEM_CDR_STL_SHIFT 16
#define MEM_CDR_THR_MASK GENMASK(15, 13)
#define MEM_CDR_THR_SHIFT 13
#define MEM_CDR_THR_MODE BIT(12)
#define MEM_CDR_THR_MODE_SHIFT 12
#define MEM_CDR_2NDO_SDM_MODE BIT(11)
#define MEM_CDR_2NDO_SDM_MODE_SHIFT 11
#define PIPE3_PHY_RX_EQUALIZER 0x00000038
#define MEM_EQLEV_MASK GENMASK(31, 16)
#define MEM_EQLEV_SHIFT 16
#define MEM_EQFTC_MASK GENMASK(15, 11)
#define MEM_EQFTC_SHIFT 11
#define MEM_EQCTL_MASK GENMASK(10, 7)
#define MEM_EQCTL_SHIFT 7
#define MEM_OVRD_EQLEV BIT(2)
#define MEM_OVRD_EQLEV_SHIFT 2
#define MEM_OVRD_EQFTC BIT(1)
#define MEM_OVRD_EQFTC_SHIFT 1
#define SATA_PHY_RX_IO_AND_A2D_OVERRIDES 0x44
#define MEM_CDR_LOS_SOURCE_MASK GENMASK(10, 9)
#define MEM_CDR_LOS_SOURCE_SHIFT 9
/*
* This is an Empirical value that works, need to confirm the actual
* value required for the PIPE3PHY_PLL_CONFIGURATION2.PLL_IDLE status
* to be correctly reflected in the PIPE3PHY_PLL_STATUS register.
*/
#define PLL_IDLE_TIME 100 /* in milliseconds */
#define PLL_LOCK_TIME 100 /* in milliseconds */
enum pipe3_mode { PIPE3_MODE_PCIE = 1,
PIPE3_MODE_SATA,
PIPE3_MODE_USBSS };
struct pipe3_dpll_params {
u16 m;
u8 n;
u8 freq:3;
u8 sd;
u32 mf;
};
struct pipe3_dpll_map {
unsigned long rate;
struct pipe3_dpll_params params;
};
struct pipe3_settings {
u8 ana_interface;
u8 ana_losd;
u8 dig_fastlock;
u8 dig_lbw;
u8 dig_stepcnt;
u8 dig_stl;
u8 dig_thr;
u8 dig_thr_mode;
u8 dig_2ndo_sdm_mode;
u8 dig_hs_rate;
u8 dig_ovrd_hs_rate;
u8 dll_trim_sel;
u8 dll_phint_rate;
u8 eq_lev;
u8 eq_ftc;
u8 eq_ctl;
u8 eq_ovrd_lev;
u8 eq_ovrd_ftc;
};
struct ti_pipe3 {
void __iomem *pll_ctrl_base;
void __iomem *phy_rx;
void __iomem *phy_tx;
struct device *dev;
struct device *control_dev;
struct clk *wkupclk;
struct clk *sys_clk;
struct clk *refclk;
struct clk *div_clk;
struct pipe3_dpll_map *dpll_map;
struct regmap *phy_power_syscon; /* ctrl. reg. acces */
struct regmap *pcs_syscon; /* ctrl. reg. acces */
struct regmap *dpll_reset_syscon; /* ctrl. reg. acces */
unsigned int dpll_reset_reg; /* reg. index within syscon */
unsigned int power_reg; /* power reg. index within syscon */
unsigned int pcie_pcs_reg; /* pcs reg. index in syscon */
bool sata_refclk_enabled;
enum pipe3_mode mode;
struct pipe3_settings settings;
};
static struct pipe3_dpll_map dpll_map_usb[] = {
{12000000, {1250, 5, 4, 20, 0} }, /* 12 MHz */
{16800000, {3125, 20, 4, 20, 0} }, /* 16.8 MHz */
{19200000, {1172, 8, 4, 20, 65537} }, /* 19.2 MHz */
{20000000, {1000, 7, 4, 10, 0} }, /* 20 MHz */
{26000000, {1250, 12, 4, 20, 0} }, /* 26 MHz */
{38400000, {3125, 47, 4, 20, 92843} }, /* 38.4 MHz */
{ }, /* Terminator */
};
static struct pipe3_dpll_map dpll_map_sata[] = {
{12000000, {625, 4, 4, 6, 0} }, /* 12 MHz */
{16800000, {625, 6, 4, 7, 0} }, /* 16.8 MHz */
{19200000, {625, 7, 4, 6, 0} }, /* 19.2 MHz */
{20000000, {750, 9, 4, 6, 0} }, /* 20 MHz */
{26000000, {750, 12, 4, 6, 0} }, /* 26 MHz */
{38400000, {625, 15, 4, 6, 0} }, /* 38.4 MHz */
{ }, /* Terminator */
};
struct pipe3_data {
enum pipe3_mode mode;
struct pipe3_dpll_map *dpll_map;
struct pipe3_settings settings;
};
static struct pipe3_data data_usb = {
.mode = PIPE3_MODE_USBSS,
.dpll_map = dpll_map_usb,
.settings = {
/* DRA75x TRM Table 26-17 Preferred USB3_PHY_RX SCP Register Settings */
.ana_interface = INTERFACE_MODE_USBSS,
.ana_losd = 0xa,
.dig_fastlock = 1,
.dig_lbw = 3,
.dig_stepcnt = 0,
.dig_stl = 0x3,
.dig_thr = 1,
.dig_thr_mode = 1,
.dig_2ndo_sdm_mode = 0,
.dig_hs_rate = 0,
.dig_ovrd_hs_rate = 1,
.dll_trim_sel = 0x2,
.dll_phint_rate = 0x3,
.eq_lev = 0,
.eq_ftc = 0,
.eq_ctl = 0x9,
.eq_ovrd_lev = 0,
.eq_ovrd_ftc = 0,
},
};
static struct pipe3_data data_sata = {
.mode = PIPE3_MODE_SATA,
.dpll_map = dpll_map_sata,
.settings = {
/* DRA75x TRM Table 26-9 Preferred SATA_PHY_RX SCP Register Settings */
.ana_interface = INTERFACE_MODE_SATA_3P0,
.ana_losd = 0x5,
.dig_fastlock = 1,
.dig_lbw = 3,
.dig_stepcnt = 0,
.dig_stl = 0x3,
.dig_thr = 1,
.dig_thr_mode = 1,
.dig_2ndo_sdm_mode = 0,
.dig_hs_rate = 0, /* Not in TRM preferred settings */
.dig_ovrd_hs_rate = 0, /* Not in TRM preferred settings */
.dll_trim_sel = 0x1,
.dll_phint_rate = 0x2, /* for 1.5 GHz DPLL clock */
.eq_lev = 0,
.eq_ftc = 0x1f,
.eq_ctl = 0,
.eq_ovrd_lev = 1,
.eq_ovrd_ftc = 1,
},
};
static struct pipe3_data data_pcie = {
.mode = PIPE3_MODE_PCIE,
.settings = {
/* DRA75x TRM Table 26-62 Preferred PCIe_PHY_RX SCP Register Settings */
.ana_interface = INTERFACE_MODE_PCIE,
.ana_losd = 0xa,
.dig_fastlock = 1,
.dig_lbw = 3,
.dig_stepcnt = 0,
.dig_stl = 0x3,
.dig_thr = 1,
.dig_thr_mode = 1,
.dig_2ndo_sdm_mode = 0,
.dig_hs_rate = 0,
.dig_ovrd_hs_rate = 0,
.dll_trim_sel = 0x2,
.dll_phint_rate = 0x3,
.eq_lev = 0,
.eq_ftc = 0x1f,
.eq_ctl = 1,
.eq_ovrd_lev = 0,
.eq_ovrd_ftc = 0,
},
};
static inline u32 ti_pipe3_readl(void __iomem *addr, unsigned offset)
{
return __raw_readl(addr + offset);
}
static inline void ti_pipe3_writel(void __iomem *addr, unsigned offset,
u32 data)
{
__raw_writel(data, addr + offset);
}
static struct pipe3_dpll_params *ti_pipe3_get_dpll_params(struct ti_pipe3 *phy)
{
unsigned long rate;
struct pipe3_dpll_map *dpll_map = phy->dpll_map;
rate = clk_get_rate(phy->sys_clk);
for (; dpll_map->rate; dpll_map++) {
if (rate == dpll_map->rate)
return &dpll_map->params;
}
dev_err(phy->dev, "No DPLL configuration for %lu Hz SYS CLK\n", rate);
return NULL;
}
static int ti_pipe3_enable_clocks(struct ti_pipe3 *phy);
static void ti_pipe3_disable_clocks(struct ti_pipe3 *phy);
static int ti_pipe3_power_off(struct phy *x)
{
int ret;
struct ti_pipe3 *phy = phy_get_drvdata(x);
if (!phy->phy_power_syscon) {
omap_control_phy_power(phy->control_dev, 0);
return 0;
}
ret = regmap_update_bits(phy->phy_power_syscon, phy->power_reg,
PIPE3_PHY_PWRCTL_CLK_CMD_MASK, 0);
return ret;
}
static void ti_pipe3_calibrate(struct ti_pipe3 *phy);
static int ti_pipe3_power_on(struct phy *x)
{
u32 val;
u32 mask;
unsigned long rate;
struct ti_pipe3 *phy = phy_get_drvdata(x);
bool rx_pending = false;
if (!phy->phy_power_syscon) {
omap_control_phy_power(phy->control_dev, 1);
return 0;
}
rate = clk_get_rate(phy->sys_clk);
if (!rate) {
dev_err(phy->dev, "Invalid clock rate\n");
return -EINVAL;
}
rate = rate / 1000000;
mask = OMAP_CTRL_PIPE3_PHY_PWRCTL_CLK_FREQ_MASK;
val = rate << OMAP_CTRL_PIPE3_PHY_PWRCTL_CLK_FREQ_SHIFT;
regmap_update_bits(phy->phy_power_syscon, phy->power_reg,
mask, val);
/*
* For PCIe, TX and RX must be powered on simultaneously.
* For USB and SATA, TX must be powered on before RX
*/
mask = OMAP_CTRL_PIPE3_PHY_PWRCTL_CLK_CMD_MASK;
if (phy->mode == PIPE3_MODE_SATA || phy->mode == PIPE3_MODE_USBSS) {
val = PIPE3_PHY_TX_POWERON;
rx_pending = true;
} else {
val = PIPE3_PHY_TX_POWERON | PIPE3_PHY_RX_POWERON;
}
regmap_update_bits(phy->phy_power_syscon, phy->power_reg,
mask, val);
if (rx_pending) {
val = PIPE3_PHY_TX_POWERON | PIPE3_PHY_RX_POWERON;
regmap_update_bits(phy->phy_power_syscon, phy->power_reg,
mask, val);
}
if (phy->mode == PIPE3_MODE_PCIE)
ti_pipe3_calibrate(phy);
return 0;
}
static int ti_pipe3_dpll_wait_lock(struct ti_pipe3 *phy)
{
u32 val;
unsigned long timeout;
timeout = jiffies + msecs_to_jiffies(PLL_LOCK_TIME);
do {
cpu_relax();
val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_STATUS);
if (val & PLL_LOCK)
return 0;
} while (!time_after(jiffies, timeout));
dev_err(phy->dev, "DPLL failed to lock\n");
return -EBUSY;
}
static int ti_pipe3_dpll_program(struct ti_pipe3 *phy)
{
u32 val;
struct pipe3_dpll_params *dpll_params;
dpll_params = ti_pipe3_get_dpll_params(phy);
if (!dpll_params)
return -EINVAL;
val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_CONFIGURATION1);
val &= ~PLL_REGN_MASK;
val |= dpll_params->n << PLL_REGN_SHIFT;
ti_pipe3_writel(phy->pll_ctrl_base, PLL_CONFIGURATION1, val);
val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_CONFIGURATION2);
val &= ~PLL_SELFREQDCO_MASK;
val |= dpll_params->freq << PLL_SELFREQDCO_SHIFT;
ti_pipe3_writel(phy->pll_ctrl_base, PLL_CONFIGURATION2, val);
val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_CONFIGURATION1);
val &= ~PLL_REGM_MASK;
val |= dpll_params->m << PLL_REGM_SHIFT;
ti_pipe3_writel(phy->pll_ctrl_base, PLL_CONFIGURATION1, val);
val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_CONFIGURATION4);
val &= ~PLL_REGM_F_MASK;
val |= dpll_params->mf << PLL_REGM_F_SHIFT;
ti_pipe3_writel(phy->pll_ctrl_base, PLL_CONFIGURATION4, val);
val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_CONFIGURATION3);
val &= ~PLL_SD_MASK;
val |= dpll_params->sd << PLL_SD_SHIFT;
ti_pipe3_writel(phy->pll_ctrl_base, PLL_CONFIGURATION3, val);
ti_pipe3_writel(phy->pll_ctrl_base, PLL_GO, SET_PLL_GO);
return ti_pipe3_dpll_wait_lock(phy);
}
static void ti_pipe3_calibrate(struct ti_pipe3 *phy)
{
u32 val;
struct pipe3_settings *s = &phy->settings;
val = ti_pipe3_readl(phy->phy_rx, PIPE3_PHY_RX_ANA_PROGRAMMABILITY);
val &= ~(INTERFACE_MASK | LOSD_MASK | MEM_PLLDIV);
val |= (s->ana_interface << INTERFACE_SHIFT | s->ana_losd << LOSD_SHIFT);
ti_pipe3_writel(phy->phy_rx, PIPE3_PHY_RX_ANA_PROGRAMMABILITY, val);
val = ti_pipe3_readl(phy->phy_rx, PIPE3_PHY_RX_DIGITAL_MODES);
val &= ~(MEM_HS_RATE_MASK | MEM_OVRD_HS_RATE | MEM_CDR_FASTLOCK |
MEM_CDR_LBW_MASK | MEM_CDR_STEPCNT_MASK | MEM_CDR_STL_MASK |
MEM_CDR_THR_MASK | MEM_CDR_THR_MODE | MEM_CDR_2NDO_SDM_MODE);
val |= s->dig_hs_rate << MEM_HS_RATE_SHIFT |
s->dig_ovrd_hs_rate << MEM_OVRD_HS_RATE_SHIFT |
s->dig_fastlock << MEM_CDR_FASTLOCK_SHIFT |
s->dig_lbw << MEM_CDR_LBW_SHIFT |
s->dig_stepcnt << MEM_CDR_STEPCNT_SHIFT |
s->dig_stl << MEM_CDR_STL_SHIFT |
s->dig_thr << MEM_CDR_THR_SHIFT |
s->dig_thr_mode << MEM_CDR_THR_MODE_SHIFT |
s->dig_2ndo_sdm_mode << MEM_CDR_2NDO_SDM_MODE_SHIFT;
ti_pipe3_writel(phy->phy_rx, PIPE3_PHY_RX_DIGITAL_MODES, val);
val = ti_pipe3_readl(phy->phy_rx, PIPE3_PHY_RX_TRIM);
val &= ~MEM_DLL_TRIM_SEL_MASK;
val |= s->dll_trim_sel << MEM_DLL_TRIM_SHIFT;
ti_pipe3_writel(phy->phy_rx, PIPE3_PHY_RX_TRIM, val);
val = ti_pipe3_readl(phy->phy_rx, PIPE3_PHY_RX_DLL);
val &= ~MEM_DLL_PHINT_RATE_MASK;
val |= s->dll_phint_rate << MEM_DLL_PHINT_RATE_SHIFT;
ti_pipe3_writel(phy->phy_rx, PIPE3_PHY_RX_DLL, val);
val = ti_pipe3_readl(phy->phy_rx, PIPE3_PHY_RX_EQUALIZER);
val &= ~(MEM_EQLEV_MASK | MEM_EQFTC_MASK | MEM_EQCTL_MASK |
MEM_OVRD_EQLEV | MEM_OVRD_EQFTC);
val |= s->eq_lev << MEM_EQLEV_SHIFT |
s->eq_ftc << MEM_EQFTC_SHIFT |
s->eq_ctl << MEM_EQCTL_SHIFT |
s->eq_ovrd_lev << MEM_OVRD_EQLEV_SHIFT |
s->eq_ovrd_ftc << MEM_OVRD_EQFTC_SHIFT;
ti_pipe3_writel(phy->phy_rx, PIPE3_PHY_RX_EQUALIZER, val);
if (phy->mode == PIPE3_MODE_SATA) {
val = ti_pipe3_readl(phy->phy_rx,
SATA_PHY_RX_IO_AND_A2D_OVERRIDES);
val &= ~MEM_CDR_LOS_SOURCE_MASK;
ti_pipe3_writel(phy->phy_rx, SATA_PHY_RX_IO_AND_A2D_OVERRIDES,
val);
}
}
static int ti_pipe3_init(struct phy *x)
{
struct ti_pipe3 *phy = phy_get_drvdata(x);
u32 val;
int ret = 0;
ti_pipe3_enable_clocks(phy);
/*
* Set pcie_pcs register to 0x96 for proper functioning of phy
* as recommended in AM572x TRM SPRUHZ6, section 18.5.2.2, table
* 18-1804.
*/
if (phy->mode == PIPE3_MODE_PCIE) {
if (!phy->pcs_syscon) {
omap_control_pcie_pcs(phy->control_dev, 0x96);
return 0;
}
val = 0x96 << OMAP_CTRL_PCIE_PCS_DELAY_COUNT_SHIFT;
ret = regmap_update_bits(phy->pcs_syscon, phy->pcie_pcs_reg,
PCIE_PCS_MASK, val);
return ret;
}
/* Bring it out of IDLE if it is IDLE */
val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_CONFIGURATION2);
if (val & PLL_IDLE) {
val &= ~PLL_IDLE;
ti_pipe3_writel(phy->pll_ctrl_base, PLL_CONFIGURATION2, val);
ret = ti_pipe3_dpll_wait_lock(phy);
}
/* SATA has issues if re-programmed when locked */
val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_STATUS);
if ((val & PLL_LOCK) && phy->mode == PIPE3_MODE_SATA)
return ret;
/* Program the DPLL */
ret = ti_pipe3_dpll_program(phy);
if (ret) {
ti_pipe3_disable_clocks(phy);
return -EINVAL;
}
ti_pipe3_calibrate(phy);
return ret;
}
static int ti_pipe3_exit(struct phy *x)
{
struct ti_pipe3 *phy = phy_get_drvdata(x);
u32 val;
unsigned long timeout;
/* If dpll_reset_syscon is not present we wont power down SATA DPLL
* due to Errata i783
*/
if (phy->mode == PIPE3_MODE_SATA && !phy->dpll_reset_syscon)
return 0;
/* PCIe doesn't have internal DPLL */
if (phy->mode != PIPE3_MODE_PCIE) {
/* Put DPLL in IDLE mode */
val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_CONFIGURATION2);
val |= PLL_IDLE;
ti_pipe3_writel(phy->pll_ctrl_base, PLL_CONFIGURATION2, val);
/* wait for LDO and Oscillator to power down */
timeout = jiffies + msecs_to_jiffies(PLL_IDLE_TIME);
do {
cpu_relax();
val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_STATUS);
if ((val & PLL_TICOPWDN) && (val & PLL_LDOPWDN))
break;
} while (!time_after(jiffies, timeout));
if (!(val & PLL_TICOPWDN) || !(val & PLL_LDOPWDN)) {
dev_err(phy->dev, "Failed to power down: PLL_STATUS 0x%x\n",
val);
return -EBUSY;
}
}
/* i783: SATA needs control bit toggle after PLL unlock */
if (phy->mode == PIPE3_MODE_SATA) {
regmap_update_bits(phy->dpll_reset_syscon, phy->dpll_reset_reg,
SATA_PLL_SOFT_RESET, SATA_PLL_SOFT_RESET);
regmap_update_bits(phy->dpll_reset_syscon, phy->dpll_reset_reg,
SATA_PLL_SOFT_RESET, 0);
}
ti_pipe3_disable_clocks(phy);
return 0;
}
static const struct phy_ops ops = {
.init = ti_pipe3_init,
.exit = ti_pipe3_exit,
.power_on = ti_pipe3_power_on,
.power_off = ti_pipe3_power_off,
.owner = THIS_MODULE,
};
static const struct of_device_id ti_pipe3_id_table[];
static int ti_pipe3_get_clk(struct ti_pipe3 *phy)
{
struct clk *clk;
struct device *dev = phy->dev;
phy->refclk = devm_clk_get(dev, "refclk");
if (IS_ERR(phy->refclk)) {
dev_err(dev, "unable to get refclk\n");
/* older DTBs have missing refclk in SATA PHY
* so don't bail out in case of SATA PHY.
*/
if (phy->mode != PIPE3_MODE_SATA)
return PTR_ERR(phy->refclk);
}
if (phy->mode != PIPE3_MODE_SATA) {
phy->wkupclk = devm_clk_get(dev, "wkupclk");
if (IS_ERR(phy->wkupclk)) {
dev_err(dev, "unable to get wkupclk\n");
return PTR_ERR(phy->wkupclk);
}
} else {
phy->wkupclk = ERR_PTR(-ENODEV);
}
if (phy->mode != PIPE3_MODE_PCIE || phy->phy_power_syscon) {
phy->sys_clk = devm_clk_get(dev, "sysclk");
if (IS_ERR(phy->sys_clk)) {
dev_err(dev, "unable to get sysclk\n");
return -EINVAL;
}
}
if (phy->mode == PIPE3_MODE_PCIE) {
clk = devm_clk_get(dev, "dpll_ref");
if (IS_ERR(clk)) {
dev_err(dev, "unable to get dpll ref clk\n");
return PTR_ERR(clk);
}
clk_set_rate(clk, 1500000000);
clk = devm_clk_get(dev, "dpll_ref_m2");
if (IS_ERR(clk)) {
dev_err(dev, "unable to get dpll ref m2 clk\n");
return PTR_ERR(clk);
}
clk_set_rate(clk, 100000000);
clk = devm_clk_get(dev, "phy-div");
if (IS_ERR(clk)) {
dev_err(dev, "unable to get phy-div clk\n");
return PTR_ERR(clk);
}
clk_set_rate(clk, 100000000);
phy->div_clk = devm_clk_get(dev, "div-clk");
if (IS_ERR(phy->div_clk)) {
dev_err(dev, "unable to get div-clk\n");
return PTR_ERR(phy->div_clk);
}
} else {
phy->div_clk = ERR_PTR(-ENODEV);
}
return 0;
}
static int ti_pipe3_get_sysctrl(struct ti_pipe3 *phy)
{
struct device *dev = phy->dev;
struct device_node *node = dev->of_node;
struct device_node *control_node;
struct platform_device *control_pdev;
phy->phy_power_syscon = syscon_regmap_lookup_by_phandle(node,
"syscon-phy-power");
if (IS_ERR(phy->phy_power_syscon)) {
dev_dbg(dev,
"can't get syscon-phy-power, using control device\n");
phy->phy_power_syscon = NULL;
} else {
if (of_property_read_u32_index(node,
"syscon-phy-power", 1,
&phy->power_reg)) {
dev_err(dev, "couldn't get power reg. offset\n");
return -EINVAL;
}
}
if (!phy->phy_power_syscon) {
control_node = of_parse_phandle(node, "ctrl-module", 0);
if (!control_node) {
dev_err(dev, "Failed to get control device phandle\n");
return -EINVAL;
}
control_pdev = of_find_device_by_node(control_node);
of_node_put(control_node);
if (!control_pdev) {
dev_err(dev, "Failed to get control device\n");
return -EINVAL;
}
phy->control_dev = &control_pdev->dev;
}
if (phy->mode == PIPE3_MODE_PCIE) {
phy->pcs_syscon = syscon_regmap_lookup_by_phandle(node,
"syscon-pcs");
if (IS_ERR(phy->pcs_syscon)) {
dev_dbg(dev,
"can't get syscon-pcs, using omap control\n");
phy->pcs_syscon = NULL;
} else {
if (of_property_read_u32_index(node,
"syscon-pcs", 1,
&phy->pcie_pcs_reg)) {
dev_err(dev,
"couldn't get pcie pcs reg. offset\n");
return -EINVAL;
}
}
}
if (phy->mode == PIPE3_MODE_SATA) {
phy->dpll_reset_syscon = syscon_regmap_lookup_by_phandle(node,
"syscon-pllreset");
if (IS_ERR(phy->dpll_reset_syscon)) {
dev_info(dev,
"can't get syscon-pllreset, sata dpll won't idle\n");
phy->dpll_reset_syscon = NULL;
} else {
if (of_property_read_u32_index(node,
"syscon-pllreset", 1,
&phy->dpll_reset_reg)) {
dev_err(dev,
"couldn't get pllreset reg. offset\n");
return -EINVAL;
}
}
}
return 0;
}
static int ti_pipe3_get_tx_rx_base(struct ti_pipe3 *phy)
{
struct device *dev = phy->dev;
struct platform_device *pdev = to_platform_device(dev);
phy->phy_rx = devm_platform_ioremap_resource_byname(pdev, "phy_rx");
if (IS_ERR(phy->phy_rx))
return PTR_ERR(phy->phy_rx);
phy->phy_tx = devm_platform_ioremap_resource_byname(pdev, "phy_tx");
return PTR_ERR_OR_ZERO(phy->phy_tx);
}
static int ti_pipe3_get_pll_base(struct ti_pipe3 *phy)
{
struct device *dev = phy->dev;
struct platform_device *pdev = to_platform_device(dev);
if (phy->mode == PIPE3_MODE_PCIE)
return 0;
phy->pll_ctrl_base =
devm_platform_ioremap_resource_byname(pdev, "pll_ctrl");
return PTR_ERR_OR_ZERO(phy->pll_ctrl_base);
}
static int ti_pipe3_probe(struct platform_device *pdev)
{
struct ti_pipe3 *phy;
struct phy *generic_phy;
struct phy_provider *phy_provider;
struct device *dev = &pdev->dev;
int ret;
const struct of_device_id *match;
struct pipe3_data *data;
phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
if (!phy)
return -ENOMEM;
match = of_match_device(ti_pipe3_id_table, dev);
if (!match)
return -EINVAL;
data = (struct pipe3_data *)match->data;
if (!data) {
dev_err(dev, "no driver data\n");
return -EINVAL;
}
phy->dev = dev;
phy->mode = data->mode;
phy->dpll_map = data->dpll_map;
phy->settings = data->settings;
ret = ti_pipe3_get_pll_base(phy);
if (ret)
return ret;
ret = ti_pipe3_get_tx_rx_base(phy);
if (ret)
return ret;
ret = ti_pipe3_get_sysctrl(phy);
if (ret)
return ret;
ret = ti_pipe3_get_clk(phy);
if (ret)
return ret;
platform_set_drvdata(pdev, phy);
pm_runtime_enable(dev);
/*
* Prevent auto-disable of refclk for SATA PHY due to Errata i783
*/
if (phy->mode == PIPE3_MODE_SATA) {
if (!IS_ERR(phy->refclk)) {
clk_prepare_enable(phy->refclk);
phy->sata_refclk_enabled = true;
}
}
generic_phy = devm_phy_create(dev, NULL, &ops);
if (IS_ERR(generic_phy))
return PTR_ERR(generic_phy);
phy_set_drvdata(generic_phy, phy);
ti_pipe3_power_off(generic_phy);
phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
return PTR_ERR_OR_ZERO(phy_provider);
}
static void ti_pipe3_remove(struct platform_device *pdev)
{
struct ti_pipe3 *phy = platform_get_drvdata(pdev);
if (phy->mode == PIPE3_MODE_SATA) {
clk_disable_unprepare(phy->refclk);
phy->sata_refclk_enabled = false;
}
pm_runtime_disable(&pdev->dev);
}
static int ti_pipe3_enable_clocks(struct ti_pipe3 *phy)
{
int ret = 0;
if (!IS_ERR(phy->refclk)) {
ret = clk_prepare_enable(phy->refclk);
if (ret) {
dev_err(phy->dev, "Failed to enable refclk %d\n", ret);
return ret;
}
}
if (!IS_ERR(phy->wkupclk)) {
ret = clk_prepare_enable(phy->wkupclk);
if (ret) {
dev_err(phy->dev, "Failed to enable wkupclk %d\n", ret);
goto disable_refclk;
}
}
if (!IS_ERR(phy->div_clk)) {
ret = clk_prepare_enable(phy->div_clk);
if (ret) {
dev_err(phy->dev, "Failed to enable div_clk %d\n", ret);
goto disable_wkupclk;
}
}
return 0;
disable_wkupclk:
if (!IS_ERR(phy->wkupclk))
clk_disable_unprepare(phy->wkupclk);
disable_refclk:
if (!IS_ERR(phy->refclk))
clk_disable_unprepare(phy->refclk);
return ret;
}
static void ti_pipe3_disable_clocks(struct ti_pipe3 *phy)
{
if (!IS_ERR(phy->wkupclk))
clk_disable_unprepare(phy->wkupclk);
if (!IS_ERR(phy->refclk))
clk_disable_unprepare(phy->refclk);
if (!IS_ERR(phy->div_clk))
clk_disable_unprepare(phy->div_clk);
}
static const struct of_device_id ti_pipe3_id_table[] = {
{
.compatible = "ti,phy-usb3",
.data = &data_usb,
},
{
.compatible = "ti,omap-usb3",
.data = &data_usb,
},
{
.compatible = "ti,phy-pipe3-sata",
.data = &data_sata,
},
{
.compatible = "ti,phy-pipe3-pcie",
.data = &data_pcie,
},
{}
};
MODULE_DEVICE_TABLE(of, ti_pipe3_id_table);
static struct platform_driver ti_pipe3_driver = {
.probe = ti_pipe3_probe,
.remove_new = ti_pipe3_remove,
.driver = {
.name = "ti-pipe3",
.of_match_table = ti_pipe3_id_table,
},
};
module_platform_driver(ti_pipe3_driver);
MODULE_ALIAS("platform:ti_pipe3");
MODULE_AUTHOR("Texas Instruments Inc.");
MODULE_DESCRIPTION("TI PIPE3 phy driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/phy/ti/phy-ti-pipe3.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* LED Class Core
*
* Copyright (C) 2005 John Lenz <[email protected]>
* Copyright (C) 2005-2007 Richard Purdie <[email protected]>
*/
#include <linux/ctype.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/leds.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/property.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/timer.h>
#include <uapi/linux/uleds.h>
#include <linux/of.h>
#include "leds.h"
static DEFINE_MUTEX(leds_lookup_lock);
static LIST_HEAD(leds_lookup_list);
static ssize_t brightness_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct led_classdev *led_cdev = dev_get_drvdata(dev);
/* no lock needed for this */
led_update_brightness(led_cdev);
return sprintf(buf, "%u\n", led_cdev->brightness);
}
static ssize_t brightness_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
struct led_classdev *led_cdev = dev_get_drvdata(dev);
unsigned long state;
ssize_t ret;
mutex_lock(&led_cdev->led_access);
if (led_sysfs_is_disabled(led_cdev)) {
ret = -EBUSY;
goto unlock;
}
ret = kstrtoul(buf, 10, &state);
if (ret)
goto unlock;
if (state == LED_OFF)
led_trigger_remove(led_cdev);
led_set_brightness(led_cdev, state);
flush_work(&led_cdev->set_brightness_work);
ret = size;
unlock:
mutex_unlock(&led_cdev->led_access);
return ret;
}
static DEVICE_ATTR_RW(brightness);
static ssize_t max_brightness_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct led_classdev *led_cdev = dev_get_drvdata(dev);
return sprintf(buf, "%u\n", led_cdev->max_brightness);
}
static DEVICE_ATTR_RO(max_brightness);
static ssize_t color_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
const char *color_text = "invalid";
struct led_classdev *led_cdev = dev_get_drvdata(dev);
if (led_cdev->color < LED_COLOR_ID_MAX)
color_text = led_colors[led_cdev->color];
return sysfs_emit(buf, "%s\n", color_text);
}
static DEVICE_ATTR_RO(color);
#ifdef CONFIG_LEDS_TRIGGERS
static BIN_ATTR(trigger, 0644, led_trigger_read, led_trigger_write, 0);
static struct bin_attribute *led_trigger_bin_attrs[] = {
&bin_attr_trigger,
NULL,
};
static const struct attribute_group led_trigger_group = {
.bin_attrs = led_trigger_bin_attrs,
};
#endif
static struct attribute *led_class_attrs[] = {
&dev_attr_brightness.attr,
&dev_attr_max_brightness.attr,
&dev_attr_color.attr,
NULL,
};
static const struct attribute_group led_group = {
.attrs = led_class_attrs,
};
static const struct attribute_group *led_groups[] = {
&led_group,
#ifdef CONFIG_LEDS_TRIGGERS
&led_trigger_group,
#endif
NULL,
};
#ifdef CONFIG_LEDS_BRIGHTNESS_HW_CHANGED
static ssize_t brightness_hw_changed_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct led_classdev *led_cdev = dev_get_drvdata(dev);
if (led_cdev->brightness_hw_changed == -1)
return -ENODATA;
return sprintf(buf, "%u\n", led_cdev->brightness_hw_changed);
}
static DEVICE_ATTR_RO(brightness_hw_changed);
static int led_add_brightness_hw_changed(struct led_classdev *led_cdev)
{
struct device *dev = led_cdev->dev;
int ret;
ret = device_create_file(dev, &dev_attr_brightness_hw_changed);
if (ret) {
dev_err(dev, "Error creating brightness_hw_changed\n");
return ret;
}
led_cdev->brightness_hw_changed_kn =
sysfs_get_dirent(dev->kobj.sd, "brightness_hw_changed");
if (!led_cdev->brightness_hw_changed_kn) {
dev_err(dev, "Error getting brightness_hw_changed kn\n");
device_remove_file(dev, &dev_attr_brightness_hw_changed);
return -ENXIO;
}
return 0;
}
static void led_remove_brightness_hw_changed(struct led_classdev *led_cdev)
{
sysfs_put(led_cdev->brightness_hw_changed_kn);
device_remove_file(led_cdev->dev, &dev_attr_brightness_hw_changed);
}
void led_classdev_notify_brightness_hw_changed(struct led_classdev *led_cdev, unsigned int brightness)
{
if (WARN_ON(!led_cdev->brightness_hw_changed_kn))
return;
led_cdev->brightness_hw_changed = brightness;
sysfs_notify_dirent(led_cdev->brightness_hw_changed_kn);
}
EXPORT_SYMBOL_GPL(led_classdev_notify_brightness_hw_changed);
#else
static int led_add_brightness_hw_changed(struct led_classdev *led_cdev)
{
return 0;
}
static void led_remove_brightness_hw_changed(struct led_classdev *led_cdev)
{
}
#endif
/**
* led_classdev_suspend - suspend an led_classdev.
* @led_cdev: the led_classdev to suspend.
*/
void led_classdev_suspend(struct led_classdev *led_cdev)
{
led_cdev->flags |= LED_SUSPENDED;
led_set_brightness_nopm(led_cdev, 0);
flush_work(&led_cdev->set_brightness_work);
}
EXPORT_SYMBOL_GPL(led_classdev_suspend);
/**
* led_classdev_resume - resume an led_classdev.
* @led_cdev: the led_classdev to resume.
*/
void led_classdev_resume(struct led_classdev *led_cdev)
{
led_set_brightness_nopm(led_cdev, led_cdev->brightness);
if (led_cdev->flash_resume)
led_cdev->flash_resume(led_cdev);
led_cdev->flags &= ~LED_SUSPENDED;
}
EXPORT_SYMBOL_GPL(led_classdev_resume);
#ifdef CONFIG_PM_SLEEP
static int led_suspend(struct device *dev)
{
struct led_classdev *led_cdev = dev_get_drvdata(dev);
if (led_cdev->flags & LED_CORE_SUSPENDRESUME)
led_classdev_suspend(led_cdev);
return 0;
}
static int led_resume(struct device *dev)
{
struct led_classdev *led_cdev = dev_get_drvdata(dev);
if (led_cdev->flags & LED_CORE_SUSPENDRESUME)
led_classdev_resume(led_cdev);
return 0;
}
#endif
static SIMPLE_DEV_PM_OPS(leds_class_dev_pm_ops, led_suspend, led_resume);
static struct led_classdev *led_module_get(struct device *led_dev)
{
struct led_classdev *led_cdev;
if (!led_dev)
return ERR_PTR(-EPROBE_DEFER);
led_cdev = dev_get_drvdata(led_dev);
if (!try_module_get(led_cdev->dev->parent->driver->owner)) {
put_device(led_cdev->dev);
return ERR_PTR(-ENODEV);
}
return led_cdev;
}
static const struct class leds_class = {
.name = "leds",
.dev_groups = led_groups,
.pm = &leds_class_dev_pm_ops,
};
/**
* of_led_get() - request a LED device via the LED framework
* @np: device node to get the LED device from
* @index: the index of the LED
*
* Returns the LED device parsed from the phandle specified in the "leds"
* property of a device tree node or a negative error-code on failure.
*/
struct led_classdev *of_led_get(struct device_node *np, int index)
{
struct device *led_dev;
struct device_node *led_node;
led_node = of_parse_phandle(np, "leds", index);
if (!led_node)
return ERR_PTR(-ENOENT);
led_dev = class_find_device_by_of_node(&leds_class, led_node);
of_node_put(led_node);
put_device(led_dev);
return led_module_get(led_dev);
}
EXPORT_SYMBOL_GPL(of_led_get);
/**
* led_put() - release a LED device
* @led_cdev: LED device
*/
void led_put(struct led_classdev *led_cdev)
{
module_put(led_cdev->dev->parent->driver->owner);
put_device(led_cdev->dev);
}
EXPORT_SYMBOL_GPL(led_put);
static void devm_led_release(struct device *dev, void *res)
{
struct led_classdev **p = res;
led_put(*p);
}
static struct led_classdev *__devm_led_get(struct device *dev, struct led_classdev *led)
{
struct led_classdev **dr;
dr = devres_alloc(devm_led_release, sizeof(struct led_classdev *), GFP_KERNEL);
if (!dr) {
led_put(led);
return ERR_PTR(-ENOMEM);
}
*dr = led;
devres_add(dev, dr);
return led;
}
/**
* devm_of_led_get - Resource-managed request of a LED device
* @dev: LED consumer
* @index: index of the LED to obtain in the consumer
*
* The device node of the device is parse to find the request LED device.
* The LED device returned from this function is automatically released
* on driver detach.
*
* @return a pointer to a LED device or ERR_PTR(errno) on failure.
*/
struct led_classdev *__must_check devm_of_led_get(struct device *dev,
int index)
{
struct led_classdev *led;
if (!dev)
return ERR_PTR(-EINVAL);
led = of_led_get(dev->of_node, index);
if (IS_ERR(led))
return led;
return __devm_led_get(dev, led);
}
EXPORT_SYMBOL_GPL(devm_of_led_get);
/**
* led_get() - request a LED device via the LED framework
* @dev: device for which to get the LED device
* @con_id: name of the LED from the device's point of view
*
* @return a pointer to a LED device or ERR_PTR(errno) on failure.
*/
struct led_classdev *led_get(struct device *dev, char *con_id)
{
struct led_lookup_data *lookup;
const char *provider = NULL;
struct device *led_dev;
mutex_lock(&leds_lookup_lock);
list_for_each_entry(lookup, &leds_lookup_list, list) {
if (!strcmp(lookup->dev_id, dev_name(dev)) &&
!strcmp(lookup->con_id, con_id)) {
provider = kstrdup_const(lookup->provider, GFP_KERNEL);
break;
}
}
mutex_unlock(&leds_lookup_lock);
if (!provider)
return ERR_PTR(-ENOENT);
led_dev = class_find_device_by_name(&leds_class, provider);
kfree_const(provider);
return led_module_get(led_dev);
}
EXPORT_SYMBOL_GPL(led_get);
/**
* devm_led_get() - request a LED device via the LED framework
* @dev: device for which to get the LED device
* @con_id: name of the LED from the device's point of view
*
* The LED device returned from this function is automatically released
* on driver detach.
*
* @return a pointer to a LED device or ERR_PTR(errno) on failure.
*/
struct led_classdev *devm_led_get(struct device *dev, char *con_id)
{
struct led_classdev *led;
led = led_get(dev, con_id);
if (IS_ERR(led))
return led;
return __devm_led_get(dev, led);
}
EXPORT_SYMBOL_GPL(devm_led_get);
/**
* led_add_lookup() - Add a LED lookup table entry
* @led_lookup: the lookup table entry to add
*
* Add a LED lookup table entry. On systems without devicetree the lookup table
* is used by led_get() to find LEDs.
*/
void led_add_lookup(struct led_lookup_data *led_lookup)
{
mutex_lock(&leds_lookup_lock);
list_add_tail(&led_lookup->list, &leds_lookup_list);
mutex_unlock(&leds_lookup_lock);
}
EXPORT_SYMBOL_GPL(led_add_lookup);
/**
* led_remove_lookup() - Remove a LED lookup table entry
* @led_lookup: the lookup table entry to remove
*/
void led_remove_lookup(struct led_lookup_data *led_lookup)
{
mutex_lock(&leds_lookup_lock);
list_del(&led_lookup->list);
mutex_unlock(&leds_lookup_lock);
}
EXPORT_SYMBOL_GPL(led_remove_lookup);
/**
* devm_of_led_get_optional - Resource-managed request of an optional LED device
* @dev: LED consumer
* @index: index of the LED to obtain in the consumer
*
* The device node of the device is parsed to find the requested LED device.
* The LED device returned from this function is automatically released
* on driver detach.
*
* @return a pointer to a LED device, ERR_PTR(errno) on failure and NULL if the
* led was not found.
*/
struct led_classdev *__must_check devm_of_led_get_optional(struct device *dev,
int index)
{
struct led_classdev *led;
led = devm_of_led_get(dev, index);
if (IS_ERR(led) && PTR_ERR(led) == -ENOENT)
return NULL;
return led;
}
EXPORT_SYMBOL_GPL(devm_of_led_get_optional);
static int led_classdev_next_name(const char *init_name, char *name,
size_t len)
{
unsigned int i = 0;
int ret = 0;
struct device *dev;
strscpy(name, init_name, len);
while ((ret < len) &&
(dev = class_find_device_by_name(&leds_class, name))) {
put_device(dev);
ret = snprintf(name, len, "%s_%u", init_name, ++i);
}
if (ret >= len)
return -ENOMEM;
return i;
}
/**
* led_classdev_register_ext - register a new object of led_classdev class
* with init data.
*
* @parent: parent of LED device
* @led_cdev: the led_classdev structure for this device.
* @init_data: LED class device initialization data
*/
int led_classdev_register_ext(struct device *parent,
struct led_classdev *led_cdev,
struct led_init_data *init_data)
{
char composed_name[LED_MAX_NAME_SIZE];
char final_name[LED_MAX_NAME_SIZE];
const char *proposed_name = composed_name;
int ret;
if (init_data) {
if (init_data->devname_mandatory && !init_data->devicename) {
dev_err(parent, "Mandatory device name is missing");
return -EINVAL;
}
ret = led_compose_name(parent, init_data, composed_name);
if (ret < 0)
return ret;
if (init_data->fwnode) {
fwnode_property_read_string(init_data->fwnode,
"linux,default-trigger",
&led_cdev->default_trigger);
if (fwnode_property_present(init_data->fwnode,
"retain-state-shutdown"))
led_cdev->flags |= LED_RETAIN_AT_SHUTDOWN;
fwnode_property_read_u32(init_data->fwnode,
"max-brightness",
&led_cdev->max_brightness);
if (fwnode_property_present(init_data->fwnode, "color"))
fwnode_property_read_u32(init_data->fwnode, "color",
&led_cdev->color);
}
} else {
proposed_name = led_cdev->name;
}
ret = led_classdev_next_name(proposed_name, final_name, sizeof(final_name));
if (ret < 0)
return ret;
if (led_cdev->color >= LED_COLOR_ID_MAX)
dev_warn(parent, "LED %s color identifier out of range\n", final_name);
mutex_init(&led_cdev->led_access);
mutex_lock(&led_cdev->led_access);
led_cdev->dev = device_create_with_groups(&leds_class, parent, 0,
led_cdev, led_cdev->groups, "%s", final_name);
if (IS_ERR(led_cdev->dev)) {
mutex_unlock(&led_cdev->led_access);
return PTR_ERR(led_cdev->dev);
}
if (init_data && init_data->fwnode)
device_set_node(led_cdev->dev, init_data->fwnode);
if (ret)
dev_warn(parent, "Led %s renamed to %s due to name collision",
proposed_name, dev_name(led_cdev->dev));
if (led_cdev->flags & LED_BRIGHT_HW_CHANGED) {
ret = led_add_brightness_hw_changed(led_cdev);
if (ret) {
device_unregister(led_cdev->dev);
led_cdev->dev = NULL;
mutex_unlock(&led_cdev->led_access);
return ret;
}
}
led_cdev->work_flags = 0;
#ifdef CONFIG_LEDS_TRIGGERS
init_rwsem(&led_cdev->trigger_lock);
#endif
#ifdef CONFIG_LEDS_BRIGHTNESS_HW_CHANGED
led_cdev->brightness_hw_changed = -1;
#endif
/* add to the list of leds */
down_write(&leds_list_lock);
list_add_tail(&led_cdev->node, &leds_list);
up_write(&leds_list_lock);
if (!led_cdev->max_brightness)
led_cdev->max_brightness = LED_FULL;
led_update_brightness(led_cdev);
led_init_core(led_cdev);
#ifdef CONFIG_LEDS_TRIGGERS
led_trigger_set_default(led_cdev);
#endif
mutex_unlock(&led_cdev->led_access);
dev_dbg(parent, "Registered led device: %s\n",
led_cdev->name);
return 0;
}
EXPORT_SYMBOL_GPL(led_classdev_register_ext);
/**
* led_classdev_unregister - unregisters a object of led_properties class.
* @led_cdev: the led device to unregister
*
* Unregisters a previously registered via led_classdev_register object.
*/
void led_classdev_unregister(struct led_classdev *led_cdev)
{
if (IS_ERR_OR_NULL(led_cdev->dev))
return;
#ifdef CONFIG_LEDS_TRIGGERS
down_write(&led_cdev->trigger_lock);
if (led_cdev->trigger)
led_trigger_set(led_cdev, NULL);
up_write(&led_cdev->trigger_lock);
#endif
led_cdev->flags |= LED_UNREGISTERING;
/* Stop blinking */
led_stop_software_blink(led_cdev);
if (!(led_cdev->flags & LED_RETAIN_AT_SHUTDOWN))
led_set_brightness(led_cdev, LED_OFF);
flush_work(&led_cdev->set_brightness_work);
if (led_cdev->flags & LED_BRIGHT_HW_CHANGED)
led_remove_brightness_hw_changed(led_cdev);
device_unregister(led_cdev->dev);
down_write(&leds_list_lock);
list_del(&led_cdev->node);
up_write(&leds_list_lock);
mutex_destroy(&led_cdev->led_access);
}
EXPORT_SYMBOL_GPL(led_classdev_unregister);
static void devm_led_classdev_release(struct device *dev, void *res)
{
led_classdev_unregister(*(struct led_classdev **)res);
}
/**
* devm_led_classdev_register_ext - resource managed led_classdev_register_ext()
*
* @parent: parent of LED device
* @led_cdev: the led_classdev structure for this device.
* @init_data: LED class device initialization data
*/
int devm_led_classdev_register_ext(struct device *parent,
struct led_classdev *led_cdev,
struct led_init_data *init_data)
{
struct led_classdev **dr;
int rc;
dr = devres_alloc(devm_led_classdev_release, sizeof(*dr), GFP_KERNEL);
if (!dr)
return -ENOMEM;
rc = led_classdev_register_ext(parent, led_cdev, init_data);
if (rc) {
devres_free(dr);
return rc;
}
*dr = led_cdev;
devres_add(parent, dr);
return 0;
}
EXPORT_SYMBOL_GPL(devm_led_classdev_register_ext);
static int devm_led_classdev_match(struct device *dev, void *res, void *data)
{
struct led_classdev **p = res;
if (WARN_ON(!p || !*p))
return 0;
return *p == data;
}
/**
* devm_led_classdev_unregister() - resource managed led_classdev_unregister()
* @dev: The device to unregister.
* @led_cdev: the led_classdev structure for this device.
*/
void devm_led_classdev_unregister(struct device *dev,
struct led_classdev *led_cdev)
{
WARN_ON(devres_release(dev,
devm_led_classdev_release,
devm_led_classdev_match, led_cdev));
}
EXPORT_SYMBOL_GPL(devm_led_classdev_unregister);
static int __init leds_init(void)
{
return class_register(&leds_class);
}
static void __exit leds_exit(void)
{
class_unregister(&leds_class);
}
subsys_initcall(leds_init);
module_exit(leds_exit);
MODULE_AUTHOR("John Lenz, Richard Purdie");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("LED Class Interface");
| linux-master | drivers/leds/led-class.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* leds-regulator.c - LED class driver for regulator driven LEDs.
*
* Copyright (C) 2009 Antonio Ospite <[email protected]>
*
* Inspired by leds-wm8350 driver.
*/
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/leds.h>
#include <linux/leds-regulator.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#define to_regulator_led(led_cdev) \
container_of(led_cdev, struct regulator_led, cdev)
struct regulator_led {
struct led_classdev cdev;
int enabled;
struct mutex mutex;
struct regulator *vcc;
};
static inline int led_regulator_get_max_brightness(struct regulator *supply)
{
int ret;
int voltage = regulator_list_voltage(supply, 0);
if (voltage <= 0)
return 1;
/* even if regulator can't change voltages,
* we still assume it can change status
* and the LED can be turned on and off.
*/
ret = regulator_set_voltage(supply, voltage, voltage);
if (ret < 0)
return 1;
return regulator_count_voltages(supply);
}
static int led_regulator_get_voltage(struct regulator *supply,
enum led_brightness brightness)
{
if (brightness == 0)
return -EINVAL;
return regulator_list_voltage(supply, brightness - 1);
}
static void regulator_led_enable(struct regulator_led *led)
{
int ret;
if (led->enabled)
return;
ret = regulator_enable(led->vcc);
if (ret != 0) {
dev_err(led->cdev.dev, "Failed to enable vcc: %d\n", ret);
return;
}
led->enabled = 1;
}
static void regulator_led_disable(struct regulator_led *led)
{
int ret;
if (!led->enabled)
return;
ret = regulator_disable(led->vcc);
if (ret != 0) {
dev_err(led->cdev.dev, "Failed to disable vcc: %d\n", ret);
return;
}
led->enabled = 0;
}
static int regulator_led_brightness_set(struct led_classdev *led_cdev,
enum led_brightness value)
{
struct regulator_led *led = to_regulator_led(led_cdev);
int voltage;
int ret = 0;
mutex_lock(&led->mutex);
if (value == LED_OFF) {
regulator_led_disable(led);
goto out;
}
if (led->cdev.max_brightness > 1) {
voltage = led_regulator_get_voltage(led->vcc, value);
dev_dbg(led->cdev.dev, "brightness: %d voltage: %d\n",
value, voltage);
ret = regulator_set_voltage(led->vcc, voltage, voltage);
if (ret != 0)
dev_err(led->cdev.dev, "Failed to set voltage %d: %d\n",
voltage, ret);
}
regulator_led_enable(led);
out:
mutex_unlock(&led->mutex);
return ret;
}
static int regulator_led_probe(struct platform_device *pdev)
{
struct led_regulator_platform_data *pdata =
dev_get_platdata(&pdev->dev);
struct device *dev = &pdev->dev;
struct led_init_data init_data = {};
struct regulator_led *led;
struct regulator *vcc;
int ret = 0;
vcc = devm_regulator_get_exclusive(dev, "vled");
if (IS_ERR(vcc)) {
dev_err(dev, "Cannot get vcc\n");
return PTR_ERR(vcc);
}
led = devm_kzalloc(dev, sizeof(*led), GFP_KERNEL);
if (led == NULL)
return -ENOMEM;
init_data.fwnode = dev->fwnode;
led->cdev.max_brightness = led_regulator_get_max_brightness(vcc);
/* Legacy platform data label assignment */
if (pdata) {
if (pdata->brightness > led->cdev.max_brightness) {
dev_err(dev, "Invalid default brightness %d\n",
pdata->brightness);
return -EINVAL;
}
led->cdev.brightness = pdata->brightness;
init_data.default_label = pdata->name;
}
led->cdev.brightness_set_blocking = regulator_led_brightness_set;
led->cdev.flags |= LED_CORE_SUSPENDRESUME;
led->vcc = vcc;
/* to handle correctly an already enabled regulator */
if (regulator_is_enabled(led->vcc))
led->enabled = 1;
mutex_init(&led->mutex);
platform_set_drvdata(pdev, led);
ret = led_classdev_register_ext(dev, &led->cdev, &init_data);
if (ret < 0)
return ret;
return 0;
}
static int regulator_led_remove(struct platform_device *pdev)
{
struct regulator_led *led = platform_get_drvdata(pdev);
led_classdev_unregister(&led->cdev);
regulator_led_disable(led);
return 0;
}
static const struct of_device_id regulator_led_of_match[] = {
{ .compatible = "regulator-led", },
{}
};
MODULE_DEVICE_TABLE(of, regulator_led_of_match);
static struct platform_driver regulator_led_driver = {
.driver = {
.name = "leds-regulator",
.of_match_table = regulator_led_of_match,
},
.probe = regulator_led_probe,
.remove = regulator_led_remove,
};
module_platform_driver(regulator_led_driver);
MODULE_AUTHOR("Antonio Ospite <[email protected]>");
MODULE_DESCRIPTION("Regulator driven LED driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:leds-regulator");
| linux-master | drivers/leds/leds-regulator.c |
// SPDX-License-Identifier: GPL-2.0+
#include <linux/leds.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#define A500_EC_LED_DELAY_USEC (100 * 1000)
enum {
REG_RESET_LEDS = 0x40,
REG_POWER_LED_ON = 0x42,
REG_CHARGE_LED_ON = 0x43,
REG_ANDROID_LEDS_OFF = 0x5a,
};
struct a500_led {
struct led_classdev cdev;
const struct reg_sequence *enable_seq;
struct a500_led *other;
struct regmap *rmap;
};
static const struct reg_sequence a500_ec_leds_reset_seq[] = {
REG_SEQ(REG_RESET_LEDS, 0x0, A500_EC_LED_DELAY_USEC),
REG_SEQ(REG_ANDROID_LEDS_OFF, 0x0, A500_EC_LED_DELAY_USEC),
};
static const struct reg_sequence a500_ec_white_led_enable_seq[] = {
REG_SEQ(REG_POWER_LED_ON, 0x0, A500_EC_LED_DELAY_USEC),
};
static const struct reg_sequence a500_ec_orange_led_enable_seq[] = {
REG_SEQ(REG_CHARGE_LED_ON, 0x0, A500_EC_LED_DELAY_USEC),
};
static int a500_ec_led_brightness_set(struct led_classdev *led_cdev,
enum led_brightness value)
{
struct a500_led *led = container_of(led_cdev, struct a500_led, cdev);
struct reg_sequence control_seq[2];
unsigned int num_regs = 1;
if (value) {
control_seq[0] = led->enable_seq[0];
} else {
/*
* There is no separate controls which can disable LEDs
* individually, there is only RESET_LEDS command that turns
* off both LEDs.
*
* RESET_LEDS turns off both LEDs, thus restore other LED if
* it's turned ON.
*/
if (led->other->cdev.brightness)
num_regs = 2;
control_seq[0] = a500_ec_leds_reset_seq[0];
control_seq[1] = led->other->enable_seq[0];
}
return regmap_multi_reg_write(led->rmap, control_seq, num_regs);
}
static int a500_ec_leds_probe(struct platform_device *pdev)
{
struct a500_led *white_led, *orange_led;
struct regmap *rmap;
int err;
rmap = dev_get_regmap(pdev->dev.parent, "KB930");
if (!rmap)
return -EINVAL;
/* reset and turn off LEDs */
regmap_multi_reg_write(rmap, a500_ec_leds_reset_seq, 2);
white_led = devm_kzalloc(&pdev->dev, sizeof(*white_led), GFP_KERNEL);
if (!white_led)
return -ENOMEM;
white_led->cdev.name = "power:white";
white_led->cdev.brightness_set_blocking = a500_ec_led_brightness_set;
white_led->cdev.flags = LED_CORE_SUSPENDRESUME;
white_led->cdev.max_brightness = 1;
white_led->enable_seq = a500_ec_white_led_enable_seq;
white_led->rmap = rmap;
orange_led = devm_kzalloc(&pdev->dev, sizeof(*orange_led), GFP_KERNEL);
if (!orange_led)
return -ENOMEM;
orange_led->cdev.name = "power:orange";
orange_led->cdev.brightness_set_blocking = a500_ec_led_brightness_set;
orange_led->cdev.flags = LED_CORE_SUSPENDRESUME;
orange_led->cdev.max_brightness = 1;
orange_led->enable_seq = a500_ec_orange_led_enable_seq;
orange_led->rmap = rmap;
white_led->other = orange_led;
orange_led->other = white_led;
err = devm_led_classdev_register(&pdev->dev, &white_led->cdev);
if (err) {
dev_err(&pdev->dev, "failed to register white LED\n");
return err;
}
err = devm_led_classdev_register(&pdev->dev, &orange_led->cdev);
if (err) {
dev_err(&pdev->dev, "failed to register orange LED\n");
return err;
}
return 0;
}
static struct platform_driver a500_ec_leds_driver = {
.driver = {
.name = "acer-a500-iconia-leds",
},
.probe = a500_ec_leds_probe,
};
module_platform_driver(a500_ec_leds_driver);
MODULE_DESCRIPTION("LED driver for Acer Iconia Tab A500 Power Button");
MODULE_AUTHOR("Dmitry Osipenko <[email protected]>");
MODULE_ALIAS("platform:acer-a500-iconia-leds");
MODULE_LICENSE("GPL");
| linux-master | drivers/leds/leds-acer-a500.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* LED Driver for Dialog DA9052 PMICs.
*
* Copyright(c) 2012 Dialog Semiconductor Ltd.
*
* Author: David Dajun Chen <[email protected]>
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/leds.h>
#include <linux/slab.h>
#include <linux/mfd/da9052/reg.h>
#include <linux/mfd/da9052/da9052.h>
#include <linux/mfd/da9052/pdata.h>
#define DA9052_OPENDRAIN_OUTPUT 2
#define DA9052_SET_HIGH_LVL_OUTPUT (1 << 3)
#define DA9052_MASK_UPPER_NIBBLE 0xF0
#define DA9052_MASK_LOWER_NIBBLE 0x0F
#define DA9052_NIBBLE_SHIFT 4
#define DA9052_MAX_BRIGHTNESS 0x5f
struct da9052_led {
struct led_classdev cdev;
struct da9052 *da9052;
unsigned char led_index;
unsigned char id;
};
static unsigned char led_reg[] = {
DA9052_LED_CONT_4_REG,
DA9052_LED_CONT_5_REG,
};
static int da9052_set_led_brightness(struct da9052_led *led,
enum led_brightness brightness)
{
u8 val;
int error;
val = (brightness & 0x7f) | DA9052_LED_CONT_DIM;
error = da9052_reg_write(led->da9052, led_reg[led->led_index], val);
if (error < 0)
dev_err(led->da9052->dev, "Failed to set led brightness, %d\n",
error);
return error;
}
static int da9052_led_set(struct led_classdev *led_cdev,
enum led_brightness value)
{
struct da9052_led *led =
container_of(led_cdev, struct da9052_led, cdev);
return da9052_set_led_brightness(led, value);
}
static int da9052_configure_leds(struct da9052 *da9052)
{
int error;
unsigned char register_value = DA9052_OPENDRAIN_OUTPUT
| DA9052_SET_HIGH_LVL_OUTPUT;
error = da9052_reg_update(da9052, DA9052_GPIO_14_15_REG,
DA9052_MASK_LOWER_NIBBLE,
register_value);
if (error < 0) {
dev_err(da9052->dev, "Failed to write GPIO 14-15 reg, %d\n",
error);
return error;
}
error = da9052_reg_update(da9052, DA9052_GPIO_14_15_REG,
DA9052_MASK_UPPER_NIBBLE,
register_value << DA9052_NIBBLE_SHIFT);
if (error < 0)
dev_err(da9052->dev, "Failed to write GPIO 14-15 reg, %d\n",
error);
return error;
}
static int da9052_led_probe(struct platform_device *pdev)
{
struct da9052_pdata *pdata;
struct da9052 *da9052;
struct led_platform_data *pled;
struct da9052_led *led = NULL;
int error = -ENODEV;
int i;
da9052 = dev_get_drvdata(pdev->dev.parent);
pdata = dev_get_platdata(da9052->dev);
if (pdata == NULL) {
dev_err(&pdev->dev, "No platform data\n");
goto err;
}
pled = pdata->pled;
if (pled == NULL) {
dev_err(&pdev->dev, "No platform data for LED\n");
goto err;
}
led = devm_kcalloc(&pdev->dev,
pled->num_leds, sizeof(struct da9052_led),
GFP_KERNEL);
if (!led) {
error = -ENOMEM;
goto err;
}
for (i = 0; i < pled->num_leds; i++) {
led[i].cdev.name = pled->leds[i].name;
led[i].cdev.brightness_set_blocking = da9052_led_set;
led[i].cdev.brightness = LED_OFF;
led[i].cdev.max_brightness = DA9052_MAX_BRIGHTNESS;
led[i].led_index = pled->leds[i].flags;
led[i].da9052 = dev_get_drvdata(pdev->dev.parent);
error = led_classdev_register(pdev->dev.parent, &led[i].cdev);
if (error) {
dev_err(&pdev->dev, "Failed to register led %d\n",
led[i].led_index);
goto err_register;
}
error = da9052_set_led_brightness(&led[i],
led[i].cdev.brightness);
if (error) {
dev_err(&pdev->dev, "Unable to init led %d\n",
led[i].led_index);
continue;
}
}
error = da9052_configure_leds(led->da9052);
if (error) {
dev_err(&pdev->dev, "Failed to configure GPIO LED%d\n", error);
goto err_register;
}
platform_set_drvdata(pdev, led);
return 0;
err_register:
for (i = i - 1; i >= 0; i--)
led_classdev_unregister(&led[i].cdev);
err:
return error;
}
static int da9052_led_remove(struct platform_device *pdev)
{
struct da9052_led *led = platform_get_drvdata(pdev);
struct da9052_pdata *pdata;
struct da9052 *da9052;
struct led_platform_data *pled;
int i;
da9052 = dev_get_drvdata(pdev->dev.parent);
pdata = dev_get_platdata(da9052->dev);
pled = pdata->pled;
for (i = 0; i < pled->num_leds; i++) {
da9052_set_led_brightness(&led[i], LED_OFF);
led_classdev_unregister(&led[i].cdev);
}
return 0;
}
static struct platform_driver da9052_led_driver = {
.driver = {
.name = "da9052-leds",
},
.probe = da9052_led_probe,
.remove = da9052_led_remove,
};
module_platform_driver(da9052_led_driver);
MODULE_AUTHOR("Dialog Semiconductor Ltd <[email protected]>");
MODULE_DESCRIPTION("LED driver for Dialog DA9052 PMIC");
MODULE_LICENSE("GPL");
| linux-master | drivers/leds/leds-da9052.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (c) 2017 Sebastian Reichel <[email protected]>
*/
#include <linux/leds.h>
#include <linux/mfd/motorola-cpcap.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#define CPCAP_LED_NO_CURRENT 0x0001
struct cpcap_led_info {
u16 reg;
u16 mask;
u16 limit;
u16 init_mask;
u16 init_val;
};
static const struct cpcap_led_info cpcap_led_red = {
.reg = CPCAP_REG_REDC,
.mask = 0x03FF,
.limit = 31,
};
static const struct cpcap_led_info cpcap_led_green = {
.reg = CPCAP_REG_GREENC,
.mask = 0x03FF,
.limit = 31,
};
static const struct cpcap_led_info cpcap_led_blue = {
.reg = CPCAP_REG_BLUEC,
.mask = 0x03FF,
.limit = 31,
};
/* aux display light */
static const struct cpcap_led_info cpcap_led_adl = {
.reg = CPCAP_REG_ADLC,
.mask = 0x000F,
.limit = 1,
.init_mask = 0x7FFF,
.init_val = 0x5FF0,
};
/* camera privacy led */
static const struct cpcap_led_info cpcap_led_cp = {
.reg = CPCAP_REG_CLEDC,
.mask = 0x0007,
.limit = 1,
.init_mask = 0x03FF,
.init_val = 0x0008,
};
struct cpcap_led {
struct led_classdev led;
const struct cpcap_led_info *info;
struct device *dev;
struct regmap *regmap;
struct mutex update_lock;
struct regulator *vdd;
bool powered;
u32 current_limit;
};
static u16 cpcap_led_val(u8 current_limit, u8 duty_cycle)
{
current_limit &= 0x1f; /* 5 bit */
duty_cycle &= 0x0f; /* 4 bit */
return current_limit << 4 | duty_cycle;
}
static int cpcap_led_set_power(struct cpcap_led *led, bool status)
{
int err;
if (status == led->powered)
return 0;
if (status)
err = regulator_enable(led->vdd);
else
err = regulator_disable(led->vdd);
if (err) {
dev_err(led->dev, "regulator failure: %d", err);
return err;
}
led->powered = status;
return 0;
}
static int cpcap_led_set(struct led_classdev *ledc, enum led_brightness value)
{
struct cpcap_led *led = container_of(ledc, struct cpcap_led, led);
int brightness;
int err;
mutex_lock(&led->update_lock);
if (value > LED_OFF) {
err = cpcap_led_set_power(led, true);
if (err)
goto exit;
}
if (value == LED_OFF) {
/* Avoid HW issue by turning off current before duty cycle */
err = regmap_update_bits(led->regmap,
led->info->reg, led->info->mask, CPCAP_LED_NO_CURRENT);
if (err) {
dev_err(led->dev, "regmap failed: %d", err);
goto exit;
}
brightness = cpcap_led_val(value, LED_OFF);
} else {
brightness = cpcap_led_val(value, LED_ON);
}
err = regmap_update_bits(led->regmap, led->info->reg, led->info->mask,
brightness);
if (err) {
dev_err(led->dev, "regmap failed: %d", err);
goto exit;
}
if (value == LED_OFF) {
err = cpcap_led_set_power(led, false);
if (err)
goto exit;
}
exit:
mutex_unlock(&led->update_lock);
return err;
}
static const struct of_device_id cpcap_led_of_match[] = {
{ .compatible = "motorola,cpcap-led-red", .data = &cpcap_led_red },
{ .compatible = "motorola,cpcap-led-green", .data = &cpcap_led_green },
{ .compatible = "motorola,cpcap-led-blue", .data = &cpcap_led_blue },
{ .compatible = "motorola,cpcap-led-adl", .data = &cpcap_led_adl },
{ .compatible = "motorola,cpcap-led-cp", .data = &cpcap_led_cp },
{},
};
MODULE_DEVICE_TABLE(of, cpcap_led_of_match);
static int cpcap_led_probe(struct platform_device *pdev)
{
struct cpcap_led *led;
int err;
led = devm_kzalloc(&pdev->dev, sizeof(*led), GFP_KERNEL);
if (!led)
return -ENOMEM;
platform_set_drvdata(pdev, led);
led->info = device_get_match_data(&pdev->dev);
led->dev = &pdev->dev;
if (led->info->reg == 0x0000) {
dev_err(led->dev, "Unsupported LED");
return -ENODEV;
}
led->regmap = dev_get_regmap(pdev->dev.parent, NULL);
if (!led->regmap)
return -ENODEV;
led->vdd = devm_regulator_get(&pdev->dev, "vdd");
if (IS_ERR(led->vdd)) {
err = PTR_ERR(led->vdd);
dev_err(led->dev, "Couldn't get regulator: %d", err);
return err;
}
err = device_property_read_string(&pdev->dev, "label", &led->led.name);
if (err) {
dev_err(led->dev, "Couldn't read LED label: %d", err);
return err;
}
if (led->info->init_mask) {
err = regmap_update_bits(led->regmap, led->info->reg,
led->info->init_mask, led->info->init_val);
if (err) {
dev_err(led->dev, "regmap failed: %d", err);
return err;
}
}
mutex_init(&led->update_lock);
led->led.max_brightness = led->info->limit;
led->led.brightness_set_blocking = cpcap_led_set;
err = devm_led_classdev_register(&pdev->dev, &led->led);
if (err) {
dev_err(led->dev, "Couldn't register LED: %d", err);
return err;
}
return 0;
}
static struct platform_driver cpcap_led_driver = {
.probe = cpcap_led_probe,
.driver = {
.name = "cpcap-led",
.of_match_table = cpcap_led_of_match,
},
};
module_platform_driver(cpcap_led_driver);
MODULE_DESCRIPTION("CPCAP LED driver");
MODULE_AUTHOR("Sebastian Reichel <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/leds/leds-cpcap.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2016 National Instruments Corp.
*/
#include <linux/acpi.h>
#include <linux/leds.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/spinlock.h>
#define NIC78BX_USER1_LED_MASK 0x3
#define NIC78BX_USER1_GREEN_LED BIT(0)
#define NIC78BX_USER1_YELLOW_LED BIT(1)
#define NIC78BX_USER2_LED_MASK 0xC
#define NIC78BX_USER2_GREEN_LED BIT(2)
#define NIC78BX_USER2_YELLOW_LED BIT(3)
#define NIC78BX_LOCK_REG_OFFSET 1
#define NIC78BX_LOCK_VALUE 0xA5
#define NIC78BX_UNLOCK_VALUE 0x5A
#define NIC78BX_USER_LED_IO_SIZE 2
struct nic78bx_led_data {
u16 io_base;
spinlock_t lock;
struct platform_device *pdev;
};
struct nic78bx_led {
u8 bit;
u8 mask;
struct nic78bx_led_data *data;
struct led_classdev cdev;
};
static inline struct nic78bx_led *to_nic78bx_led(struct led_classdev *cdev)
{
return container_of(cdev, struct nic78bx_led, cdev);
}
static void nic78bx_brightness_set(struct led_classdev *cdev,
enum led_brightness brightness)
{
struct nic78bx_led *nled = to_nic78bx_led(cdev);
unsigned long flags;
u8 value;
spin_lock_irqsave(&nled->data->lock, flags);
value = inb(nled->data->io_base);
if (brightness) {
value &= ~nled->mask;
value |= nled->bit;
} else {
value &= ~nled->bit;
}
outb(value, nled->data->io_base);
spin_unlock_irqrestore(&nled->data->lock, flags);
}
static enum led_brightness nic78bx_brightness_get(struct led_classdev *cdev)
{
struct nic78bx_led *nled = to_nic78bx_led(cdev);
unsigned long flags;
u8 value;
spin_lock_irqsave(&nled->data->lock, flags);
value = inb(nled->data->io_base);
spin_unlock_irqrestore(&nled->data->lock, flags);
return (value & nled->bit) ? 1 : LED_OFF;
}
static struct nic78bx_led nic78bx_leds[] = {
{
.bit = NIC78BX_USER1_GREEN_LED,
.mask = NIC78BX_USER1_LED_MASK,
.cdev = {
.name = "nilrt:green:user1",
.max_brightness = 1,
.brightness_set = nic78bx_brightness_set,
.brightness_get = nic78bx_brightness_get,
}
},
{
.bit = NIC78BX_USER1_YELLOW_LED,
.mask = NIC78BX_USER1_LED_MASK,
.cdev = {
.name = "nilrt:yellow:user1",
.max_brightness = 1,
.brightness_set = nic78bx_brightness_set,
.brightness_get = nic78bx_brightness_get,
}
},
{
.bit = NIC78BX_USER2_GREEN_LED,
.mask = NIC78BX_USER2_LED_MASK,
.cdev = {
.name = "nilrt:green:user2",
.max_brightness = 1,
.brightness_set = nic78bx_brightness_set,
.brightness_get = nic78bx_brightness_get,
}
},
{
.bit = NIC78BX_USER2_YELLOW_LED,
.mask = NIC78BX_USER2_LED_MASK,
.cdev = {
.name = "nilrt:yellow:user2",
.max_brightness = 1,
.brightness_set = nic78bx_brightness_set,
.brightness_get = nic78bx_brightness_get,
}
}
};
static int nic78bx_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct nic78bx_led_data *led_data;
struct resource *io_rc;
int ret, i;
led_data = devm_kzalloc(dev, sizeof(*led_data), GFP_KERNEL);
if (!led_data)
return -ENOMEM;
led_data->pdev = pdev;
platform_set_drvdata(pdev, led_data);
io_rc = platform_get_resource(pdev, IORESOURCE_IO, 0);
if (!io_rc) {
dev_err(dev, "missing IO resources\n");
return -EINVAL;
}
if (resource_size(io_rc) < NIC78BX_USER_LED_IO_SIZE) {
dev_err(dev, "IO region too small\n");
return -EINVAL;
}
if (!devm_request_region(dev, io_rc->start, resource_size(io_rc),
KBUILD_MODNAME)) {
dev_err(dev, "failed to get IO region\n");
return -EBUSY;
}
led_data->io_base = io_rc->start;
spin_lock_init(&led_data->lock);
for (i = 0; i < ARRAY_SIZE(nic78bx_leds); i++) {
nic78bx_leds[i].data = led_data;
ret = devm_led_classdev_register(dev, &nic78bx_leds[i].cdev);
if (ret)
return ret;
}
/* Unlock LED register */
outb(NIC78BX_UNLOCK_VALUE,
led_data->io_base + NIC78BX_LOCK_REG_OFFSET);
return ret;
}
static int nic78bx_remove(struct platform_device *pdev)
{
struct nic78bx_led_data *led_data = platform_get_drvdata(pdev);
/* Lock LED register */
outb(NIC78BX_LOCK_VALUE,
led_data->io_base + NIC78BX_LOCK_REG_OFFSET);
return 0;
}
static const struct acpi_device_id led_device_ids[] = {
{"NIC78B3", 0},
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, led_device_ids);
static struct platform_driver led_driver = {
.probe = nic78bx_probe,
.remove = nic78bx_remove,
.driver = {
.name = KBUILD_MODNAME,
.acpi_match_table = ACPI_PTR(led_device_ids),
},
};
module_platform_driver(led_driver);
MODULE_DESCRIPTION("National Instruments PXI User LEDs driver");
MODULE_AUTHOR("Hui Chun Ong <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/leds/leds-nic78bx.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* leds-blinkm.c
* (c) Jan-Simon Möller ([email protected])
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/jiffies.h>
#include <linux/i2c.h>
#include <linux/err.h>
#include <linux/mutex.h>
#include <linux/sysfs.h>
#include <linux/printk.h>
#include <linux/pm_runtime.h>
#include <linux/leds.h>
#include <linux/delay.h>
/* Addresses to scan - BlinkM is on 0x09 by default*/
static const unsigned short normal_i2c[] = { 0x09, I2C_CLIENT_END };
static int blinkm_transfer_hw(struct i2c_client *client, int cmd);
static int blinkm_test_run(struct i2c_client *client);
struct blinkm_led {
struct i2c_client *i2c_client;
struct led_classdev led_cdev;
int id;
};
#define cdev_to_blmled(c) container_of(c, struct blinkm_led, led_cdev)
struct blinkm_data {
struct i2c_client *i2c_client;
struct mutex update_lock;
/* used for led class interface */
struct blinkm_led blinkm_leds[3];
/* used for "blinkm" sysfs interface */
u8 red; /* color red */
u8 green; /* color green */
u8 blue; /* color blue */
/* next values to use for transfer */
u8 next_red; /* color red */
u8 next_green; /* color green */
u8 next_blue; /* color blue */
/* internal use */
u8 args[7]; /* set of args for transmission */
u8 i2c_addr; /* i2c addr */
u8 fw_ver; /* firmware version */
/* used, but not from userspace */
u8 hue; /* HSB hue */
u8 saturation; /* HSB saturation */
u8 brightness; /* HSB brightness */
u8 next_hue; /* HSB hue */
u8 next_saturation; /* HSB saturation */
u8 next_brightness; /* HSB brightness */
/* currently unused / todo */
u8 fade_speed; /* fade speed 1 - 255 */
s8 time_adjust; /* time adjust -128 - 127 */
u8 fade:1; /* fade on = 1, off = 0 */
u8 rand:1; /* rand fade mode on = 1 */
u8 script_id; /* script ID */
u8 script_repeats; /* repeats of script */
u8 script_startline; /* line to start */
};
/* Colors */
#define RED 0
#define GREEN 1
#define BLUE 2
/* mapping command names to cmd chars - see datasheet */
#define BLM_GO_RGB 0
#define BLM_FADE_RGB 1
#define BLM_FADE_HSB 2
#define BLM_FADE_RAND_RGB 3
#define BLM_FADE_RAND_HSB 4
#define BLM_PLAY_SCRIPT 5
#define BLM_STOP_SCRIPT 6
#define BLM_SET_FADE_SPEED 7
#define BLM_SET_TIME_ADJ 8
#define BLM_GET_CUR_RGB 9
#define BLM_WRITE_SCRIPT_LINE 10
#define BLM_READ_SCRIPT_LINE 11
#define BLM_SET_SCRIPT_LR 12 /* Length & Repeats */
#define BLM_SET_ADDR 13
#define BLM_GET_ADDR 14
#define BLM_GET_FW_VER 15
#define BLM_SET_STARTUP_PARAM 16
/* BlinkM Commands
* as extracted out of the datasheet:
*
* cmdchar = command (ascii)
* cmdbyte = command in hex
* nr_args = number of arguments (to send)
* nr_ret = number of return values (to read)
* dir = direction (0 = read, 1 = write, 2 = both)
*
*/
static const struct {
char cmdchar;
u8 cmdbyte;
u8 nr_args;
u8 nr_ret;
u8 dir:2;
} blinkm_cmds[17] = {
/* cmdchar, cmdbyte, nr_args, nr_ret, dir */
{ 'n', 0x6e, 3, 0, 1},
{ 'c', 0x63, 3, 0, 1},
{ 'h', 0x68, 3, 0, 1},
{ 'C', 0x43, 3, 0, 1},
{ 'H', 0x48, 3, 0, 1},
{ 'p', 0x70, 3, 0, 1},
{ 'o', 0x6f, 0, 0, 1},
{ 'f', 0x66, 1, 0, 1},
{ 't', 0x74, 1, 0, 1},
{ 'g', 0x67, 0, 3, 0},
{ 'W', 0x57, 7, 0, 1},
{ 'R', 0x52, 2, 5, 2},
{ 'L', 0x4c, 3, 0, 1},
{ 'A', 0x41, 4, 0, 1},
{ 'a', 0x61, 0, 1, 0},
{ 'Z', 0x5a, 0, 1, 0},
{ 'B', 0x42, 5, 0, 1},
};
static ssize_t show_color_common(struct device *dev, char *buf, int color)
{
struct i2c_client *client;
struct blinkm_data *data;
int ret;
client = to_i2c_client(dev);
data = i2c_get_clientdata(client);
ret = blinkm_transfer_hw(client, BLM_GET_CUR_RGB);
if (ret < 0)
return ret;
switch (color) {
case RED:
return sysfs_emit(buf, "%02X\n", data->red);
case GREEN:
return sysfs_emit(buf, "%02X\n", data->green);
case BLUE:
return sysfs_emit(buf, "%02X\n", data->blue);
default:
return -EINVAL;
}
return -EINVAL;
}
static int store_color_common(struct device *dev, const char *buf, int color)
{
struct i2c_client *client;
struct blinkm_data *data;
int ret;
u8 value;
client = to_i2c_client(dev);
data = i2c_get_clientdata(client);
ret = kstrtou8(buf, 10, &value);
if (ret < 0) {
dev_err(dev, "BlinkM: value too large!\n");
return ret;
}
switch (color) {
case RED:
data->next_red = value;
break;
case GREEN:
data->next_green = value;
break;
case BLUE:
data->next_blue = value;
break;
default:
return -EINVAL;
}
dev_dbg(dev, "next_red = %d, next_green = %d, next_blue = %d\n",
data->next_red, data->next_green, data->next_blue);
/* if mode ... */
ret = blinkm_transfer_hw(client, BLM_GO_RGB);
if (ret < 0) {
dev_err(dev, "BlinkM: can't set RGB\n");
return ret;
}
return 0;
}
static ssize_t red_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
return show_color_common(dev, buf, RED);
}
static ssize_t red_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
int ret;
ret = store_color_common(dev, buf, RED);
if (ret < 0)
return ret;
return count;
}
static DEVICE_ATTR_RW(red);
static ssize_t green_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
return show_color_common(dev, buf, GREEN);
}
static ssize_t green_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
int ret;
ret = store_color_common(dev, buf, GREEN);
if (ret < 0)
return ret;
return count;
}
static DEVICE_ATTR_RW(green);
static ssize_t blue_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
return show_color_common(dev, buf, BLUE);
}
static ssize_t blue_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
int ret;
ret = store_color_common(dev, buf, BLUE);
if (ret < 0)
return ret;
return count;
}
static DEVICE_ATTR_RW(blue);
static ssize_t test_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
return sysfs_emit(buf,
"#Write into test to start test sequence!#\n");
}
static ssize_t test_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct i2c_client *client;
int ret;
client = to_i2c_client(dev);
/*test */
ret = blinkm_test_run(client);
if (ret < 0)
return ret;
return count;
}
static DEVICE_ATTR_RW(test);
/* TODO: HSB, fade, timeadj, script ... */
static struct attribute *blinkm_attrs[] = {
&dev_attr_red.attr,
&dev_attr_green.attr,
&dev_attr_blue.attr,
&dev_attr_test.attr,
NULL,
};
static const struct attribute_group blinkm_group = {
.name = "blinkm",
.attrs = blinkm_attrs,
};
static int blinkm_write(struct i2c_client *client, int cmd, u8 *arg)
{
int result;
int i;
int arglen = blinkm_cmds[cmd].nr_args;
/* write out cmd to blinkm - always / default step */
result = i2c_smbus_write_byte(client, blinkm_cmds[cmd].cmdbyte);
if (result < 0)
return result;
/* no args to write out */
if (arglen == 0)
return 0;
for (i = 0; i < arglen; i++) {
/* repeat for arglen */
result = i2c_smbus_write_byte(client, arg[i]);
if (result < 0)
return result;
}
return 0;
}
static int blinkm_read(struct i2c_client *client, int cmd, u8 *arg)
{
int result;
int i;
int retlen = blinkm_cmds[cmd].nr_ret;
for (i = 0; i < retlen; i++) {
/* repeat for retlen */
result = i2c_smbus_read_byte(client);
if (result < 0)
return result;
arg[i] = result;
}
return 0;
}
static int blinkm_transfer_hw(struct i2c_client *client, int cmd)
{
/* the protocol is simple but non-standard:
* e.g. cmd 'g' (= 0x67) for "get device address"
* - which defaults to 0x09 - would be the sequence:
* a) write 0x67 to the device (byte write)
* b) read the value (0x09) back right after (byte read)
*
* Watch out for "unfinished" sequences (i.e. not enough reads
* or writes after a command. It will make the blinkM misbehave.
* Sequence is key here.
*/
/* args / return are in private data struct */
struct blinkm_data *data = i2c_get_clientdata(client);
/* We start hardware transfers which are not to be
* mixed with other commands. Aquire a lock now. */
if (mutex_lock_interruptible(&data->update_lock) < 0)
return -EAGAIN;
/* switch cmd - usually write before reads */
switch (cmd) {
case BLM_FADE_RAND_RGB:
case BLM_GO_RGB:
case BLM_FADE_RGB:
data->args[0] = data->next_red;
data->args[1] = data->next_green;
data->args[2] = data->next_blue;
blinkm_write(client, cmd, data->args);
data->red = data->args[0];
data->green = data->args[1];
data->blue = data->args[2];
break;
case BLM_FADE_HSB:
case BLM_FADE_RAND_HSB:
data->args[0] = data->next_hue;
data->args[1] = data->next_saturation;
data->args[2] = data->next_brightness;
blinkm_write(client, cmd, data->args);
data->hue = data->next_hue;
data->saturation = data->next_saturation;
data->brightness = data->next_brightness;
break;
case BLM_PLAY_SCRIPT:
data->args[0] = data->script_id;
data->args[1] = data->script_repeats;
data->args[2] = data->script_startline;
blinkm_write(client, cmd, data->args);
break;
case BLM_STOP_SCRIPT:
blinkm_write(client, cmd, NULL);
break;
case BLM_GET_CUR_RGB:
data->args[0] = data->red;
data->args[1] = data->green;
data->args[2] = data->blue;
blinkm_write(client, cmd, NULL);
blinkm_read(client, cmd, data->args);
data->red = data->args[0];
data->green = data->args[1];
data->blue = data->args[2];
break;
case BLM_GET_ADDR:
data->args[0] = data->i2c_addr;
blinkm_write(client, cmd, NULL);
blinkm_read(client, cmd, data->args);
data->i2c_addr = data->args[0];
break;
case BLM_SET_TIME_ADJ:
case BLM_SET_FADE_SPEED:
case BLM_READ_SCRIPT_LINE:
case BLM_WRITE_SCRIPT_LINE:
case BLM_SET_SCRIPT_LR:
case BLM_SET_ADDR:
case BLM_GET_FW_VER:
case BLM_SET_STARTUP_PARAM:
dev_err(&client->dev,
"BlinkM: cmd %d not implemented yet.\n", cmd);
break;
default:
dev_err(&client->dev, "BlinkM: unknown command %d\n", cmd);
mutex_unlock(&data->update_lock);
return -EINVAL;
} /* end switch(cmd) */
/* transfers done, unlock */
mutex_unlock(&data->update_lock);
return 0;
}
static int blinkm_led_common_set(struct led_classdev *led_cdev,
enum led_brightness value, int color)
{
/* led_brightness is 0, 127 or 255 - we just use it here as-is */
struct blinkm_led *led = cdev_to_blmled(led_cdev);
struct blinkm_data *data = i2c_get_clientdata(led->i2c_client);
switch (color) {
case RED:
/* bail out if there's no change */
if (data->next_red == (u8) value)
return 0;
data->next_red = (u8) value;
break;
case GREEN:
/* bail out if there's no change */
if (data->next_green == (u8) value)
return 0;
data->next_green = (u8) value;
break;
case BLUE:
/* bail out if there's no change */
if (data->next_blue == (u8) value)
return 0;
data->next_blue = (u8) value;
break;
default:
dev_err(&led->i2c_client->dev, "BlinkM: unknown color.\n");
return -EINVAL;
}
blinkm_transfer_hw(led->i2c_client, BLM_GO_RGB);
dev_dbg(&led->i2c_client->dev,
"# DONE # next_red = %d, next_green = %d,"
" next_blue = %d\n",
data->next_red, data->next_green,
data->next_blue);
return 0;
}
static int blinkm_led_red_set(struct led_classdev *led_cdev,
enum led_brightness value)
{
return blinkm_led_common_set(led_cdev, value, RED);
}
static int blinkm_led_green_set(struct led_classdev *led_cdev,
enum led_brightness value)
{
return blinkm_led_common_set(led_cdev, value, GREEN);
}
static int blinkm_led_blue_set(struct led_classdev *led_cdev,
enum led_brightness value)
{
return blinkm_led_common_set(led_cdev, value, BLUE);
}
static void blinkm_init_hw(struct i2c_client *client)
{
blinkm_transfer_hw(client, BLM_STOP_SCRIPT);
blinkm_transfer_hw(client, BLM_GO_RGB);
}
static int blinkm_test_run(struct i2c_client *client)
{
int ret;
struct blinkm_data *data = i2c_get_clientdata(client);
data->next_red = 0x01;
data->next_green = 0x05;
data->next_blue = 0x10;
ret = blinkm_transfer_hw(client, BLM_GO_RGB);
if (ret < 0)
return ret;
msleep(2000);
data->next_red = 0x25;
data->next_green = 0x10;
data->next_blue = 0x31;
ret = blinkm_transfer_hw(client, BLM_FADE_RGB);
if (ret < 0)
return ret;
msleep(2000);
data->next_hue = 0x50;
data->next_saturation = 0x10;
data->next_brightness = 0x20;
ret = blinkm_transfer_hw(client, BLM_FADE_HSB);
if (ret < 0)
return ret;
msleep(2000);
return 0;
}
/* Return 0 if detection is successful, -ENODEV otherwise */
static int blinkm_detect(struct i2c_client *client, struct i2c_board_info *info)
{
struct i2c_adapter *adapter = client->adapter;
int ret;
int count = 99;
u8 tmpargs[7];
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA
| I2C_FUNC_SMBUS_WORD_DATA
| I2C_FUNC_SMBUS_WRITE_BYTE))
return -ENODEV;
/* Now, we do the remaining detection. Simple for now. */
/* We might need more guards to protect other i2c slaves */
/* make sure the blinkM is balanced (read/writes) */
while (count > 0) {
ret = blinkm_write(client, BLM_GET_ADDR, NULL);
if (ret)
return ret;
usleep_range(5000, 10000);
ret = blinkm_read(client, BLM_GET_ADDR, tmpargs);
if (ret)
return ret;
usleep_range(5000, 10000);
if (tmpargs[0] == 0x09)
count = 0;
count--;
}
/* Step 1: Read BlinkM address back - cmd_char 'a' */
ret = blinkm_write(client, BLM_GET_ADDR, NULL);
if (ret < 0)
return ret;
usleep_range(20000, 30000); /* allow a small delay */
ret = blinkm_read(client, BLM_GET_ADDR, tmpargs);
if (ret < 0)
return ret;
if (tmpargs[0] != 0x09) {
dev_err(&client->dev, "enodev DEV ADDR = 0x%02X\n", tmpargs[0]);
return -ENODEV;
}
strscpy(info->type, "blinkm", I2C_NAME_SIZE);
return 0;
}
static int blinkm_probe(struct i2c_client *client)
{
struct blinkm_data *data;
struct blinkm_led *led[3];
int err, i;
char blinkm_led_name[28];
data = devm_kzalloc(&client->dev,
sizeof(struct blinkm_data), GFP_KERNEL);
if (!data) {
err = -ENOMEM;
goto exit;
}
data->i2c_addr = 0x08;
/* i2c addr - use fake addr of 0x08 initially (real is 0x09) */
data->fw_ver = 0xfe;
/* firmware version - use fake until we read real value
* (currently broken - BlinkM confused!) */
data->script_id = 0x01;
data->i2c_client = client;
i2c_set_clientdata(client, data);
mutex_init(&data->update_lock);
/* Register sysfs hooks */
err = sysfs_create_group(&client->dev.kobj, &blinkm_group);
if (err < 0) {
dev_err(&client->dev, "couldn't register sysfs group\n");
goto exit;
}
for (i = 0; i < 3; i++) {
/* RED = 0, GREEN = 1, BLUE = 2 */
led[i] = &data->blinkm_leds[i];
led[i]->i2c_client = client;
led[i]->id = i;
led[i]->led_cdev.max_brightness = 255;
led[i]->led_cdev.flags = LED_CORE_SUSPENDRESUME;
switch (i) {
case RED:
snprintf(blinkm_led_name, sizeof(blinkm_led_name),
"blinkm-%d-%d-red",
client->adapter->nr,
client->addr);
led[i]->led_cdev.name = blinkm_led_name;
led[i]->led_cdev.brightness_set_blocking =
blinkm_led_red_set;
err = led_classdev_register(&client->dev,
&led[i]->led_cdev);
if (err < 0) {
dev_err(&client->dev,
"couldn't register LED %s\n",
led[i]->led_cdev.name);
goto failred;
}
break;
case GREEN:
snprintf(blinkm_led_name, sizeof(blinkm_led_name),
"blinkm-%d-%d-green",
client->adapter->nr,
client->addr);
led[i]->led_cdev.name = blinkm_led_name;
led[i]->led_cdev.brightness_set_blocking =
blinkm_led_green_set;
err = led_classdev_register(&client->dev,
&led[i]->led_cdev);
if (err < 0) {
dev_err(&client->dev,
"couldn't register LED %s\n",
led[i]->led_cdev.name);
goto failgreen;
}
break;
case BLUE:
snprintf(blinkm_led_name, sizeof(blinkm_led_name),
"blinkm-%d-%d-blue",
client->adapter->nr,
client->addr);
led[i]->led_cdev.name = blinkm_led_name;
led[i]->led_cdev.brightness_set_blocking =
blinkm_led_blue_set;
err = led_classdev_register(&client->dev,
&led[i]->led_cdev);
if (err < 0) {
dev_err(&client->dev,
"couldn't register LED %s\n",
led[i]->led_cdev.name);
goto failblue;
}
break;
} /* end switch */
} /* end for */
/* Initialize the blinkm */
blinkm_init_hw(client);
return 0;
failblue:
led_classdev_unregister(&led[GREEN]->led_cdev);
failgreen:
led_classdev_unregister(&led[RED]->led_cdev);
failred:
sysfs_remove_group(&client->dev.kobj, &blinkm_group);
exit:
return err;
}
static void blinkm_remove(struct i2c_client *client)
{
struct blinkm_data *data = i2c_get_clientdata(client);
int ret = 0;
int i;
/* make sure no workqueue entries are pending */
for (i = 0; i < 3; i++)
led_classdev_unregister(&data->blinkm_leds[i].led_cdev);
/* reset rgb */
data->next_red = 0x00;
data->next_green = 0x00;
data->next_blue = 0x00;
ret = blinkm_transfer_hw(client, BLM_FADE_RGB);
if (ret < 0)
dev_err(&client->dev, "Failure in blinkm_remove ignored. Continuing.\n");
/* reset hsb */
data->next_hue = 0x00;
data->next_saturation = 0x00;
data->next_brightness = 0x00;
ret = blinkm_transfer_hw(client, BLM_FADE_HSB);
if (ret < 0)
dev_err(&client->dev, "Failure in blinkm_remove ignored. Continuing.\n");
/* red fade to off */
data->next_red = 0xff;
ret = blinkm_transfer_hw(client, BLM_GO_RGB);
if (ret < 0)
dev_err(&client->dev, "Failure in blinkm_remove ignored. Continuing.\n");
/* off */
data->next_red = 0x00;
ret = blinkm_transfer_hw(client, BLM_FADE_RGB);
if (ret < 0)
dev_err(&client->dev, "Failure in blinkm_remove ignored. Continuing.\n");
sysfs_remove_group(&client->dev.kobj, &blinkm_group);
}
static const struct i2c_device_id blinkm_id[] = {
{"blinkm", 0},
{}
};
MODULE_DEVICE_TABLE(i2c, blinkm_id);
/* This is the driver that will be inserted */
static struct i2c_driver blinkm_driver = {
.class = I2C_CLASS_HWMON,
.driver = {
.name = "blinkm",
},
.probe = blinkm_probe,
.remove = blinkm_remove,
.id_table = blinkm_id,
.detect = blinkm_detect,
.address_list = normal_i2c,
};
module_i2c_driver(blinkm_driver);
MODULE_AUTHOR("Jan-Simon Moeller <[email protected]>");
MODULE_DESCRIPTION("BlinkM RGB LED driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/leds/leds-blinkm.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* TI LP8788 MFD - keyled driver
*
* Copyright 2012 Texas Instruments
*
* Author: Milo(Woogyom) Kim <[email protected]>
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/platform_device.h>
#include <linux/leds.h>
#include <linux/mutex.h>
#include <linux/mfd/lp8788.h>
#include <linux/mfd/lp8788-isink.h>
#define MAX_BRIGHTNESS LP8788_ISINK_MAX_PWM
#define DEFAULT_LED_NAME "keyboard-backlight"
struct lp8788_led {
struct lp8788 *lp;
struct mutex lock;
struct led_classdev led_dev;
enum lp8788_isink_number isink_num;
int on;
};
struct lp8788_led_config {
enum lp8788_isink_scale scale;
enum lp8788_isink_number num;
int iout;
};
static struct lp8788_led_config default_led_config = {
.scale = LP8788_ISINK_SCALE_100mA,
.num = LP8788_ISINK_3,
.iout = 0,
};
static int lp8788_led_init_device(struct lp8788_led *led,
struct lp8788_led_platform_data *pdata)
{
struct lp8788_led_config *cfg = &default_led_config;
u8 addr, mask, val;
int ret;
if (pdata) {
cfg->scale = pdata->scale;
cfg->num = pdata->num;
cfg->iout = pdata->iout_code;
}
led->isink_num = cfg->num;
/* scale configuration */
addr = LP8788_ISINK_CTRL;
mask = 1 << (cfg->num + LP8788_ISINK_SCALE_OFFSET);
val = cfg->scale << (cfg->num + LP8788_ISINK_SCALE_OFFSET);
ret = lp8788_update_bits(led->lp, addr, mask, val);
if (ret)
return ret;
/* current configuration */
addr = lp8788_iout_addr[cfg->num];
mask = lp8788_iout_mask[cfg->num];
val = cfg->iout;
return lp8788_update_bits(led->lp, addr, mask, val);
}
static int lp8788_led_enable(struct lp8788_led *led,
enum lp8788_isink_number num, int on)
{
int ret;
u8 mask = 1 << num;
u8 val = on << num;
ret = lp8788_update_bits(led->lp, LP8788_ISINK_CTRL, mask, val);
if (ret == 0)
led->on = on;
return ret;
}
static int lp8788_brightness_set(struct led_classdev *led_cdev,
enum led_brightness val)
{
struct lp8788_led *led =
container_of(led_cdev, struct lp8788_led, led_dev);
enum lp8788_isink_number num = led->isink_num;
int enable, ret;
mutex_lock(&led->lock);
switch (num) {
case LP8788_ISINK_1:
case LP8788_ISINK_2:
case LP8788_ISINK_3:
ret = lp8788_write_byte(led->lp, lp8788_pwm_addr[num], val);
if (ret < 0)
goto unlock;
break;
default:
mutex_unlock(&led->lock);
return -EINVAL;
}
enable = (val > 0) ? 1 : 0;
if (enable != led->on)
ret = lp8788_led_enable(led, num, enable);
unlock:
mutex_unlock(&led->lock);
return ret;
}
static int lp8788_led_probe(struct platform_device *pdev)
{
struct lp8788 *lp = dev_get_drvdata(pdev->dev.parent);
struct lp8788_led_platform_data *led_pdata;
struct lp8788_led *led;
struct device *dev = &pdev->dev;
int ret;
led = devm_kzalloc(dev, sizeof(struct lp8788_led), GFP_KERNEL);
if (!led)
return -ENOMEM;
led->lp = lp;
led->led_dev.max_brightness = MAX_BRIGHTNESS;
led->led_dev.brightness_set_blocking = lp8788_brightness_set;
led_pdata = lp->pdata ? lp->pdata->led_pdata : NULL;
if (!led_pdata || !led_pdata->name)
led->led_dev.name = DEFAULT_LED_NAME;
else
led->led_dev.name = led_pdata->name;
mutex_init(&led->lock);
ret = lp8788_led_init_device(led, led_pdata);
if (ret) {
dev_err(dev, "led init device err: %d\n", ret);
return ret;
}
ret = devm_led_classdev_register(dev, &led->led_dev);
if (ret) {
dev_err(dev, "led register err: %d\n", ret);
return ret;
}
return 0;
}
static struct platform_driver lp8788_led_driver = {
.probe = lp8788_led_probe,
.driver = {
.name = LP8788_DEV_KEYLED,
},
};
module_platform_driver(lp8788_led_driver);
MODULE_DESCRIPTION("Texas Instruments LP8788 Keyboard LED Driver");
MODULE_AUTHOR("Milo Kim");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:lp8788-keyled");
| linux-master | drivers/leds/leds-lp8788.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* LED Class Core
*
* Copyright 2005-2006 Openedhand Ltd.
*
* Author: Richard Purdie <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/leds.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/property.h>
#include <linux/rwsem.h>
#include <linux/slab.h>
#include <uapi/linux/uleds.h>
#include "leds.h"
DECLARE_RWSEM(leds_list_lock);
EXPORT_SYMBOL_GPL(leds_list_lock);
LIST_HEAD(leds_list);
EXPORT_SYMBOL_GPL(leds_list);
const char * const led_colors[LED_COLOR_ID_MAX] = {
[LED_COLOR_ID_WHITE] = "white",
[LED_COLOR_ID_RED] = "red",
[LED_COLOR_ID_GREEN] = "green",
[LED_COLOR_ID_BLUE] = "blue",
[LED_COLOR_ID_AMBER] = "amber",
[LED_COLOR_ID_VIOLET] = "violet",
[LED_COLOR_ID_YELLOW] = "yellow",
[LED_COLOR_ID_IR] = "ir",
[LED_COLOR_ID_MULTI] = "multicolor",
[LED_COLOR_ID_RGB] = "rgb",
};
EXPORT_SYMBOL_GPL(led_colors);
static int __led_set_brightness(struct led_classdev *led_cdev, unsigned int value)
{
if (!led_cdev->brightness_set)
return -ENOTSUPP;
led_cdev->brightness_set(led_cdev, value);
return 0;
}
static int __led_set_brightness_blocking(struct led_classdev *led_cdev, unsigned int value)
{
if (!led_cdev->brightness_set_blocking)
return -ENOTSUPP;
return led_cdev->brightness_set_blocking(led_cdev, value);
}
static void led_timer_function(struct timer_list *t)
{
struct led_classdev *led_cdev = from_timer(led_cdev, t, blink_timer);
unsigned long brightness;
unsigned long delay;
if (!led_cdev->blink_delay_on || !led_cdev->blink_delay_off) {
led_set_brightness_nosleep(led_cdev, LED_OFF);
clear_bit(LED_BLINK_SW, &led_cdev->work_flags);
return;
}
if (test_and_clear_bit(LED_BLINK_ONESHOT_STOP,
&led_cdev->work_flags)) {
clear_bit(LED_BLINK_SW, &led_cdev->work_flags);
return;
}
brightness = led_get_brightness(led_cdev);
if (!brightness) {
/* Time to switch the LED on. */
if (test_and_clear_bit(LED_BLINK_BRIGHTNESS_CHANGE,
&led_cdev->work_flags))
brightness = led_cdev->new_blink_brightness;
else
brightness = led_cdev->blink_brightness;
delay = led_cdev->blink_delay_on;
} else {
/* Store the current brightness value to be able
* to restore it when the delay_off period is over.
*/
led_cdev->blink_brightness = brightness;
brightness = LED_OFF;
delay = led_cdev->blink_delay_off;
}
led_set_brightness_nosleep(led_cdev, brightness);
/* Return in next iteration if led is in one-shot mode and we are in
* the final blink state so that the led is toggled each delay_on +
* delay_off milliseconds in worst case.
*/
if (test_bit(LED_BLINK_ONESHOT, &led_cdev->work_flags)) {
if (test_bit(LED_BLINK_INVERT, &led_cdev->work_flags)) {
if (brightness)
set_bit(LED_BLINK_ONESHOT_STOP,
&led_cdev->work_flags);
} else {
if (!brightness)
set_bit(LED_BLINK_ONESHOT_STOP,
&led_cdev->work_flags);
}
}
mod_timer(&led_cdev->blink_timer, jiffies + msecs_to_jiffies(delay));
}
static void set_brightness_delayed_set_brightness(struct led_classdev *led_cdev,
unsigned int value)
{
int ret = 0;
ret = __led_set_brightness(led_cdev, value);
if (ret == -ENOTSUPP)
ret = __led_set_brightness_blocking(led_cdev, value);
if (ret < 0 &&
/* LED HW might have been unplugged, therefore don't warn */
!(ret == -ENODEV && (led_cdev->flags & LED_UNREGISTERING) &&
(led_cdev->flags & LED_HW_PLUGGABLE)))
dev_err(led_cdev->dev,
"Setting an LED's brightness failed (%d)\n", ret);
}
static void set_brightness_delayed(struct work_struct *ws)
{
struct led_classdev *led_cdev =
container_of(ws, struct led_classdev, set_brightness_work);
if (test_and_clear_bit(LED_BLINK_DISABLE, &led_cdev->work_flags)) {
led_stop_software_blink(led_cdev);
set_bit(LED_SET_BRIGHTNESS_OFF, &led_cdev->work_flags);
}
/*
* Triggers may call led_set_brightness(LED_OFF),
* led_set_brightness(LED_FULL) in quick succession to disable blinking
* and turn the LED on. Both actions may have been scheduled to run
* before this work item runs once. To make sure this works properly
* handle LED_SET_BRIGHTNESS_OFF first.
*/
if (test_and_clear_bit(LED_SET_BRIGHTNESS_OFF, &led_cdev->work_flags))
set_brightness_delayed_set_brightness(led_cdev, LED_OFF);
if (test_and_clear_bit(LED_SET_BRIGHTNESS, &led_cdev->work_flags))
set_brightness_delayed_set_brightness(led_cdev, led_cdev->delayed_set_value);
if (test_and_clear_bit(LED_SET_BLINK, &led_cdev->work_flags)) {
unsigned long delay_on = led_cdev->delayed_delay_on;
unsigned long delay_off = led_cdev->delayed_delay_off;
led_blink_set(led_cdev, &delay_on, &delay_off);
}
}
static void led_set_software_blink(struct led_classdev *led_cdev,
unsigned long delay_on,
unsigned long delay_off)
{
int current_brightness;
current_brightness = led_get_brightness(led_cdev);
if (current_brightness)
led_cdev->blink_brightness = current_brightness;
if (!led_cdev->blink_brightness)
led_cdev->blink_brightness = led_cdev->max_brightness;
led_cdev->blink_delay_on = delay_on;
led_cdev->blink_delay_off = delay_off;
/* never on - just set to off */
if (!delay_on) {
led_set_brightness_nosleep(led_cdev, LED_OFF);
return;
}
/* never off - just set to brightness */
if (!delay_off) {
led_set_brightness_nosleep(led_cdev,
led_cdev->blink_brightness);
return;
}
set_bit(LED_BLINK_SW, &led_cdev->work_flags);
mod_timer(&led_cdev->blink_timer, jiffies + 1);
}
static void led_blink_setup(struct led_classdev *led_cdev,
unsigned long *delay_on,
unsigned long *delay_off)
{
if (!test_bit(LED_BLINK_ONESHOT, &led_cdev->work_flags) &&
led_cdev->blink_set &&
!led_cdev->blink_set(led_cdev, delay_on, delay_off))
return;
/* blink with 1 Hz as default if nothing specified */
if (!*delay_on && !*delay_off)
*delay_on = *delay_off = 500;
led_set_software_blink(led_cdev, *delay_on, *delay_off);
}
void led_init_core(struct led_classdev *led_cdev)
{
INIT_WORK(&led_cdev->set_brightness_work, set_brightness_delayed);
timer_setup(&led_cdev->blink_timer, led_timer_function, 0);
}
EXPORT_SYMBOL_GPL(led_init_core);
void led_blink_set(struct led_classdev *led_cdev,
unsigned long *delay_on,
unsigned long *delay_off)
{
del_timer_sync(&led_cdev->blink_timer);
clear_bit(LED_BLINK_SW, &led_cdev->work_flags);
clear_bit(LED_BLINK_ONESHOT, &led_cdev->work_flags);
clear_bit(LED_BLINK_ONESHOT_STOP, &led_cdev->work_flags);
led_blink_setup(led_cdev, delay_on, delay_off);
}
EXPORT_SYMBOL_GPL(led_blink_set);
void led_blink_set_oneshot(struct led_classdev *led_cdev,
unsigned long *delay_on,
unsigned long *delay_off,
int invert)
{
if (test_bit(LED_BLINK_ONESHOT, &led_cdev->work_flags) &&
timer_pending(&led_cdev->blink_timer))
return;
set_bit(LED_BLINK_ONESHOT, &led_cdev->work_flags);
clear_bit(LED_BLINK_ONESHOT_STOP, &led_cdev->work_flags);
if (invert)
set_bit(LED_BLINK_INVERT, &led_cdev->work_flags);
else
clear_bit(LED_BLINK_INVERT, &led_cdev->work_flags);
led_blink_setup(led_cdev, delay_on, delay_off);
}
EXPORT_SYMBOL_GPL(led_blink_set_oneshot);
void led_blink_set_nosleep(struct led_classdev *led_cdev, unsigned long delay_on,
unsigned long delay_off)
{
/* If necessary delegate to a work queue task. */
if (led_cdev->blink_set && led_cdev->brightness_set_blocking) {
led_cdev->delayed_delay_on = delay_on;
led_cdev->delayed_delay_off = delay_off;
set_bit(LED_SET_BLINK, &led_cdev->work_flags);
schedule_work(&led_cdev->set_brightness_work);
return;
}
led_blink_set(led_cdev, &delay_on, &delay_off);
}
EXPORT_SYMBOL_GPL(led_blink_set_nosleep);
void led_stop_software_blink(struct led_classdev *led_cdev)
{
del_timer_sync(&led_cdev->blink_timer);
led_cdev->blink_delay_on = 0;
led_cdev->blink_delay_off = 0;
clear_bit(LED_BLINK_SW, &led_cdev->work_flags);
}
EXPORT_SYMBOL_GPL(led_stop_software_blink);
void led_set_brightness(struct led_classdev *led_cdev, unsigned int brightness)
{
/*
* If software blink is active, delay brightness setting
* until the next timer tick.
*/
if (test_bit(LED_BLINK_SW, &led_cdev->work_flags)) {
/*
* If we need to disable soft blinking delegate this to the
* work queue task to avoid problems in case we are called
* from hard irq context.
*/
if (!brightness) {
set_bit(LED_BLINK_DISABLE, &led_cdev->work_flags);
schedule_work(&led_cdev->set_brightness_work);
} else {
set_bit(LED_BLINK_BRIGHTNESS_CHANGE,
&led_cdev->work_flags);
led_cdev->new_blink_brightness = brightness;
}
return;
}
led_set_brightness_nosleep(led_cdev, brightness);
}
EXPORT_SYMBOL_GPL(led_set_brightness);
void led_set_brightness_nopm(struct led_classdev *led_cdev, unsigned int value)
{
/* Use brightness_set op if available, it is guaranteed not to sleep */
if (!__led_set_brightness(led_cdev, value))
return;
/*
* Brightness setting can sleep, delegate it to a work queue task.
* value 0 / LED_OFF is special, since it also disables hw-blinking
* (sw-blink disable is handled in led_set_brightness()).
* To avoid a hw-blink-disable getting lost when a second brightness
* change is done immediately afterwards (before the work runs),
* it uses a separate work_flag.
*/
if (value) {
led_cdev->delayed_set_value = value;
set_bit(LED_SET_BRIGHTNESS, &led_cdev->work_flags);
} else {
clear_bit(LED_SET_BRIGHTNESS, &led_cdev->work_flags);
clear_bit(LED_SET_BLINK, &led_cdev->work_flags);
set_bit(LED_SET_BRIGHTNESS_OFF, &led_cdev->work_flags);
}
schedule_work(&led_cdev->set_brightness_work);
}
EXPORT_SYMBOL_GPL(led_set_brightness_nopm);
void led_set_brightness_nosleep(struct led_classdev *led_cdev, unsigned int value)
{
led_cdev->brightness = min(value, led_cdev->max_brightness);
if (led_cdev->flags & LED_SUSPENDED)
return;
led_set_brightness_nopm(led_cdev, led_cdev->brightness);
}
EXPORT_SYMBOL_GPL(led_set_brightness_nosleep);
int led_set_brightness_sync(struct led_classdev *led_cdev, unsigned int value)
{
if (led_cdev->blink_delay_on || led_cdev->blink_delay_off)
return -EBUSY;
led_cdev->brightness = min(value, led_cdev->max_brightness);
if (led_cdev->flags & LED_SUSPENDED)
return 0;
return __led_set_brightness_blocking(led_cdev, led_cdev->brightness);
}
EXPORT_SYMBOL_GPL(led_set_brightness_sync);
int led_update_brightness(struct led_classdev *led_cdev)
{
int ret = 0;
if (led_cdev->brightness_get) {
ret = led_cdev->brightness_get(led_cdev);
if (ret >= 0) {
led_cdev->brightness = ret;
return 0;
}
}
return ret;
}
EXPORT_SYMBOL_GPL(led_update_brightness);
u32 *led_get_default_pattern(struct led_classdev *led_cdev, unsigned int *size)
{
struct fwnode_handle *fwnode = led_cdev->dev->fwnode;
u32 *pattern;
int count;
count = fwnode_property_count_u32(fwnode, "led-pattern");
if (count < 0)
return NULL;
pattern = kcalloc(count, sizeof(*pattern), GFP_KERNEL);
if (!pattern)
return NULL;
if (fwnode_property_read_u32_array(fwnode, "led-pattern", pattern, count)) {
kfree(pattern);
return NULL;
}
*size = count;
return pattern;
}
EXPORT_SYMBOL_GPL(led_get_default_pattern);
/* Caller must ensure led_cdev->led_access held */
void led_sysfs_disable(struct led_classdev *led_cdev)
{
lockdep_assert_held(&led_cdev->led_access);
led_cdev->flags |= LED_SYSFS_DISABLE;
}
EXPORT_SYMBOL_GPL(led_sysfs_disable);
/* Caller must ensure led_cdev->led_access held */
void led_sysfs_enable(struct led_classdev *led_cdev)
{
lockdep_assert_held(&led_cdev->led_access);
led_cdev->flags &= ~LED_SYSFS_DISABLE;
}
EXPORT_SYMBOL_GPL(led_sysfs_enable);
static void led_parse_fwnode_props(struct device *dev,
struct fwnode_handle *fwnode,
struct led_properties *props)
{
int ret;
if (!fwnode)
return;
if (fwnode_property_present(fwnode, "label")) {
ret = fwnode_property_read_string(fwnode, "label", &props->label);
if (ret)
dev_err(dev, "Error parsing 'label' property (%d)\n", ret);
return;
}
if (fwnode_property_present(fwnode, "color")) {
ret = fwnode_property_read_u32(fwnode, "color", &props->color);
if (ret)
dev_err(dev, "Error parsing 'color' property (%d)\n", ret);
else if (props->color >= LED_COLOR_ID_MAX)
dev_err(dev, "LED color identifier out of range\n");
else
props->color_present = true;
}
if (!fwnode_property_present(fwnode, "function"))
return;
ret = fwnode_property_read_string(fwnode, "function", &props->function);
if (ret) {
dev_err(dev,
"Error parsing 'function' property (%d)\n",
ret);
}
if (!fwnode_property_present(fwnode, "function-enumerator"))
return;
ret = fwnode_property_read_u32(fwnode, "function-enumerator",
&props->func_enum);
if (ret) {
dev_err(dev,
"Error parsing 'function-enumerator' property (%d)\n",
ret);
} else {
props->func_enum_present = true;
}
}
int led_compose_name(struct device *dev, struct led_init_data *init_data,
char *led_classdev_name)
{
struct led_properties props = {};
struct fwnode_handle *fwnode = init_data->fwnode;
const char *devicename = init_data->devicename;
if (!led_classdev_name)
return -EINVAL;
led_parse_fwnode_props(dev, fwnode, &props);
/* We want to label LEDs that can produce full range of colors
* as RGB, not multicolor */
BUG_ON(props.color == LED_COLOR_ID_MULTI);
if (props.label) {
/*
* If init_data.devicename is NULL, then it indicates that
* DT label should be used as-is for LED class device name.
* Otherwise the label is prepended with devicename to compose
* the final LED class device name.
*/
if (!devicename) {
strscpy(led_classdev_name, props.label,
LED_MAX_NAME_SIZE);
} else {
snprintf(led_classdev_name, LED_MAX_NAME_SIZE, "%s:%s",
devicename, props.label);
}
} else if (props.function || props.color_present) {
char tmp_buf[LED_MAX_NAME_SIZE];
if (props.func_enum_present) {
snprintf(tmp_buf, LED_MAX_NAME_SIZE, "%s:%s-%d",
props.color_present ? led_colors[props.color] : "",
props.function ?: "", props.func_enum);
} else {
snprintf(tmp_buf, LED_MAX_NAME_SIZE, "%s:%s",
props.color_present ? led_colors[props.color] : "",
props.function ?: "");
}
if (init_data->devname_mandatory) {
snprintf(led_classdev_name, LED_MAX_NAME_SIZE, "%s:%s",
devicename, tmp_buf);
} else {
strscpy(led_classdev_name, tmp_buf, LED_MAX_NAME_SIZE);
}
} else if (init_data->default_label) {
if (!devicename) {
dev_err(dev, "Legacy LED naming requires devicename segment");
return -EINVAL;
}
snprintf(led_classdev_name, LED_MAX_NAME_SIZE, "%s:%s",
devicename, init_data->default_label);
} else if (is_of_node(fwnode)) {
strscpy(led_classdev_name, to_of_node(fwnode)->name,
LED_MAX_NAME_SIZE);
} else
return -EINVAL;
return 0;
}
EXPORT_SYMBOL_GPL(led_compose_name);
enum led_default_state led_init_default_state_get(struct fwnode_handle *fwnode)
{
const char *state = NULL;
if (!fwnode_property_read_string(fwnode, "default-state", &state)) {
if (!strcmp(state, "keep"))
return LEDS_DEFSTATE_KEEP;
if (!strcmp(state, "on"))
return LEDS_DEFSTATE_ON;
}
return LEDS_DEFSTATE_OFF;
}
EXPORT_SYMBOL_GPL(led_init_default_state_get);
| linux-master | drivers/leds/led-core.c |
/*
* Bachmann ot200 leds driver.
*
* Author: Sebastian Andrzej Siewior <[email protected]>
* Christian Gmeiner <[email protected]>
*
* License: GPL as published by the FSF.
*/
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/leds.h>
#include <linux/io.h>
#include <linux/module.h>
struct ot200_led {
struct led_classdev cdev;
const char *name;
unsigned long port;
u8 mask;
};
/*
* The device has three leds on the back panel (led_err, led_init and led_run)
* and can handle up to seven leds on the front panel.
*/
static struct ot200_led leds[] = {
{
.name = "led_run",
.port = 0x5a,
.mask = BIT(0),
},
{
.name = "led_init",
.port = 0x5a,
.mask = BIT(1),
},
{
.name = "led_err",
.port = 0x5a,
.mask = BIT(2),
},
{
.name = "led_1",
.port = 0x49,
.mask = BIT(6),
},
{
.name = "led_2",
.port = 0x49,
.mask = BIT(5),
},
{
.name = "led_3",
.port = 0x49,
.mask = BIT(4),
},
{
.name = "led_4",
.port = 0x49,
.mask = BIT(3),
},
{
.name = "led_5",
.port = 0x49,
.mask = BIT(2),
},
{
.name = "led_6",
.port = 0x49,
.mask = BIT(1),
},
{
.name = "led_7",
.port = 0x49,
.mask = BIT(0),
}
};
static DEFINE_SPINLOCK(value_lock);
/*
* we need to store the current led states, as it is not
* possible to read the current led state via inb().
*/
static u8 leds_back;
static u8 leds_front;
static void ot200_led_brightness_set(struct led_classdev *led_cdev,
enum led_brightness value)
{
struct ot200_led *led = container_of(led_cdev, struct ot200_led, cdev);
u8 *val;
unsigned long flags;
spin_lock_irqsave(&value_lock, flags);
if (led->port == 0x49)
val = &leds_front;
else if (led->port == 0x5a)
val = &leds_back;
else
BUG();
if (value == LED_OFF)
*val &= ~led->mask;
else
*val |= led->mask;
outb(*val, led->port);
spin_unlock_irqrestore(&value_lock, flags);
}
static int ot200_led_probe(struct platform_device *pdev)
{
int i;
int ret;
for (i = 0; i < ARRAY_SIZE(leds); i++) {
leds[i].cdev.name = leds[i].name;
leds[i].cdev.brightness_set = ot200_led_brightness_set;
ret = devm_led_classdev_register(&pdev->dev, &leds[i].cdev);
if (ret < 0)
return ret;
}
leds_front = 0; /* turn off all front leds */
leds_back = BIT(1); /* turn on init led */
outb(leds_front, 0x49);
outb(leds_back, 0x5a);
return 0;
}
static struct platform_driver ot200_led_driver = {
.probe = ot200_led_probe,
.driver = {
.name = "leds-ot200",
},
};
module_platform_driver(ot200_led_driver);
MODULE_AUTHOR("Sebastian A. Siewior <[email protected]>");
MODULE_DESCRIPTION("ot200 LED driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:leds-ot200");
| linux-master | drivers/leds/leds-ot200.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2007-2008 Extreme Engineering Solutions, Inc.
*
* Author: Nate Case <[email protected]>
*
* LED driver for various PCA955x I2C LED drivers
*
* Supported devices:
*
* Device Description 7-bit slave address
* ------ ----------- -------------------
* PCA9550 2-bit driver 0x60 .. 0x61
* PCA9551 8-bit driver 0x60 .. 0x67
* PCA9552 16-bit driver 0x60 .. 0x67
* PCA9553/01 4-bit driver 0x62
* PCA9553/02 4-bit driver 0x63
*
* Philips PCA955x LED driver chips follow a register map as shown below:
*
* Control Register Description
* ---------------- -----------
* 0x0 Input register 0
* ..
* NUM_INPUT_REGS - 1 Last Input register X
*
* NUM_INPUT_REGS Frequency prescaler 0
* NUM_INPUT_REGS + 1 PWM register 0
* NUM_INPUT_REGS + 2 Frequency prescaler 1
* NUM_INPUT_REGS + 3 PWM register 1
*
* NUM_INPUT_REGS + 4 LED selector 0
* NUM_INPUT_REGS + 4
* + NUM_LED_REGS - 1 Last LED selector
*
* where NUM_INPUT_REGS and NUM_LED_REGS vary depending on how many
* bits the chip supports.
*/
#include <linux/bitops.h>
#include <linux/ctype.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/gpio/driver.h>
#include <linux/i2c.h>
#include <linux/leds.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/property.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <dt-bindings/leds/leds-pca955x.h>
/* LED select registers determine the source that drives LED outputs */
#define PCA955X_LS_LED_ON 0x0 /* Output LOW */
#define PCA955X_LS_LED_OFF 0x1 /* Output HI-Z */
#define PCA955X_LS_BLINK0 0x2 /* Blink at PWM0 rate */
#define PCA955X_LS_BLINK1 0x3 /* Blink at PWM1 rate */
#define PCA955X_GPIO_INPUT LED_OFF
#define PCA955X_GPIO_HIGH LED_OFF
#define PCA955X_GPIO_LOW LED_FULL
enum pca955x_type {
pca9550,
pca9551,
pca9552,
ibm_pca9552,
pca9553,
};
struct pca955x_chipdef {
int bits;
u8 slv_addr; /* 7-bit slave address mask */
int slv_addr_shift; /* Number of bits to ignore */
};
static struct pca955x_chipdef pca955x_chipdefs[] = {
[pca9550] = {
.bits = 2,
.slv_addr = /* 110000x */ 0x60,
.slv_addr_shift = 1,
},
[pca9551] = {
.bits = 8,
.slv_addr = /* 1100xxx */ 0x60,
.slv_addr_shift = 3,
},
[pca9552] = {
.bits = 16,
.slv_addr = /* 1100xxx */ 0x60,
.slv_addr_shift = 3,
},
[ibm_pca9552] = {
.bits = 16,
.slv_addr = /* 0110xxx */ 0x30,
.slv_addr_shift = 3,
},
[pca9553] = {
.bits = 4,
.slv_addr = /* 110001x */ 0x62,
.slv_addr_shift = 1,
},
};
static const struct i2c_device_id pca955x_id[] = {
{ "pca9550", pca9550 },
{ "pca9551", pca9551 },
{ "pca9552", pca9552 },
{ "ibm-pca9552", ibm_pca9552 },
{ "pca9553", pca9553 },
{ }
};
MODULE_DEVICE_TABLE(i2c, pca955x_id);
struct pca955x {
struct mutex lock;
struct pca955x_led *leds;
struct pca955x_chipdef *chipdef;
struct i2c_client *client;
unsigned long active_pins;
#ifdef CONFIG_LEDS_PCA955X_GPIO
struct gpio_chip gpio;
#endif
};
struct pca955x_led {
struct pca955x *pca955x;
struct led_classdev led_cdev;
int led_num; /* 0 .. 15 potentially */
u32 type;
enum led_default_state default_state;
struct fwnode_handle *fwnode;
};
struct pca955x_platform_data {
struct pca955x_led *leds;
int num_leds;
};
/* 8 bits per input register */
static inline int pca95xx_num_input_regs(int bits)
{
return (bits + 7) / 8;
}
/*
* Return an LED selector register value based on an existing one, with
* the appropriate 2-bit state value set for the given LED number (0-3).
*/
static inline u8 pca955x_ledsel(u8 oldval, int led_num, int state)
{
return (oldval & (~(0x3 << (led_num << 1)))) |
((state & 0x3) << (led_num << 1));
}
/*
* Write to frequency prescaler register, used to program the
* period of the PWM output. period = (PSCx + 1) / 38
*/
static int pca955x_write_psc(struct i2c_client *client, int n, u8 val)
{
struct pca955x *pca955x = i2c_get_clientdata(client);
u8 cmd = pca95xx_num_input_regs(pca955x->chipdef->bits) + (2 * n);
int ret;
ret = i2c_smbus_write_byte_data(client, cmd, val);
if (ret < 0)
dev_err(&client->dev, "%s: reg 0x%x, val 0x%x, err %d\n",
__func__, n, val, ret);
return ret;
}
/*
* Write to PWM register, which determines the duty cycle of the
* output. LED is OFF when the count is less than the value of this
* register, and ON when it is greater. If PWMx == 0, LED is always OFF.
*
* Duty cycle is (256 - PWMx) / 256
*/
static int pca955x_write_pwm(struct i2c_client *client, int n, u8 val)
{
struct pca955x *pca955x = i2c_get_clientdata(client);
u8 cmd = pca95xx_num_input_regs(pca955x->chipdef->bits) + 1 + (2 * n);
int ret;
ret = i2c_smbus_write_byte_data(client, cmd, val);
if (ret < 0)
dev_err(&client->dev, "%s: reg 0x%x, val 0x%x, err %d\n",
__func__, n, val, ret);
return ret;
}
/*
* Write to LED selector register, which determines the source that
* drives the LED output.
*/
static int pca955x_write_ls(struct i2c_client *client, int n, u8 val)
{
struct pca955x *pca955x = i2c_get_clientdata(client);
u8 cmd = pca95xx_num_input_regs(pca955x->chipdef->bits) + 4 + n;
int ret;
ret = i2c_smbus_write_byte_data(client, cmd, val);
if (ret < 0)
dev_err(&client->dev, "%s: reg 0x%x, val 0x%x, err %d\n",
__func__, n, val, ret);
return ret;
}
/*
* Read the LED selector register, which determines the source that
* drives the LED output.
*/
static int pca955x_read_ls(struct i2c_client *client, int n, u8 *val)
{
struct pca955x *pca955x = i2c_get_clientdata(client);
u8 cmd = pca95xx_num_input_regs(pca955x->chipdef->bits) + 4 + n;
int ret;
ret = i2c_smbus_read_byte_data(client, cmd);
if (ret < 0) {
dev_err(&client->dev, "%s: reg 0x%x, err %d\n",
__func__, n, ret);
return ret;
}
*val = (u8)ret;
return 0;
}
static int pca955x_read_pwm(struct i2c_client *client, int n, u8 *val)
{
struct pca955x *pca955x = i2c_get_clientdata(client);
u8 cmd = pca95xx_num_input_regs(pca955x->chipdef->bits) + 1 + (2 * n);
int ret;
ret = i2c_smbus_read_byte_data(client, cmd);
if (ret < 0) {
dev_err(&client->dev, "%s: reg 0x%x, err %d\n",
__func__, n, ret);
return ret;
}
*val = (u8)ret;
return 0;
}
static enum led_brightness pca955x_led_get(struct led_classdev *led_cdev)
{
struct pca955x_led *pca955x_led = container_of(led_cdev,
struct pca955x_led,
led_cdev);
struct pca955x *pca955x = pca955x_led->pca955x;
u8 ls, pwm;
int ret;
ret = pca955x_read_ls(pca955x->client, pca955x_led->led_num / 4, &ls);
if (ret)
return ret;
ls = (ls >> ((pca955x_led->led_num % 4) << 1)) & 0x3;
switch (ls) {
case PCA955X_LS_LED_ON:
ret = LED_FULL;
break;
case PCA955X_LS_LED_OFF:
ret = LED_OFF;
break;
case PCA955X_LS_BLINK0:
ret = LED_HALF;
break;
case PCA955X_LS_BLINK1:
ret = pca955x_read_pwm(pca955x->client, 1, &pwm);
if (ret)
return ret;
ret = 255 - pwm;
break;
}
return ret;
}
static int pca955x_led_set(struct led_classdev *led_cdev,
enum led_brightness value)
{
struct pca955x_led *pca955x_led;
struct pca955x *pca955x;
u8 ls;
int chip_ls; /* which LSx to use (0-3 potentially) */
int ls_led; /* which set of bits within LSx to use (0-3) */
int ret;
pca955x_led = container_of(led_cdev, struct pca955x_led, led_cdev);
pca955x = pca955x_led->pca955x;
chip_ls = pca955x_led->led_num / 4;
ls_led = pca955x_led->led_num % 4;
mutex_lock(&pca955x->lock);
ret = pca955x_read_ls(pca955x->client, chip_ls, &ls);
if (ret)
goto out;
switch (value) {
case LED_FULL:
ls = pca955x_ledsel(ls, ls_led, PCA955X_LS_LED_ON);
break;
case LED_OFF:
ls = pca955x_ledsel(ls, ls_led, PCA955X_LS_LED_OFF);
break;
case LED_HALF:
ls = pca955x_ledsel(ls, ls_led, PCA955X_LS_BLINK0);
break;
default:
/*
* Use PWM1 for all other values. This has the unwanted
* side effect of making all LEDs on the chip share the
* same brightness level if set to a value other than
* OFF, HALF, or FULL. But, this is probably better than
* just turning off for all other values.
*/
ret = pca955x_write_pwm(pca955x->client, 1, 255 - value);
if (ret)
goto out;
ls = pca955x_ledsel(ls, ls_led, PCA955X_LS_BLINK1);
break;
}
ret = pca955x_write_ls(pca955x->client, chip_ls, ls);
out:
mutex_unlock(&pca955x->lock);
return ret;
}
#ifdef CONFIG_LEDS_PCA955X_GPIO
/*
* Read the INPUT register, which contains the state of LEDs.
*/
static int pca955x_read_input(struct i2c_client *client, int n, u8 *val)
{
int ret = i2c_smbus_read_byte_data(client, n);
if (ret < 0) {
dev_err(&client->dev, "%s: reg 0x%x, err %d\n",
__func__, n, ret);
return ret;
}
*val = (u8)ret;
return 0;
}
static int pca955x_gpio_request_pin(struct gpio_chip *gc, unsigned int offset)
{
struct pca955x *pca955x = gpiochip_get_data(gc);
return test_and_set_bit(offset, &pca955x->active_pins) ? -EBUSY : 0;
}
static void pca955x_gpio_free_pin(struct gpio_chip *gc, unsigned int offset)
{
struct pca955x *pca955x = gpiochip_get_data(gc);
clear_bit(offset, &pca955x->active_pins);
}
static int pca955x_set_value(struct gpio_chip *gc, unsigned int offset,
int val)
{
struct pca955x *pca955x = gpiochip_get_data(gc);
struct pca955x_led *led = &pca955x->leds[offset];
if (val)
return pca955x_led_set(&led->led_cdev, PCA955X_GPIO_HIGH);
return pca955x_led_set(&led->led_cdev, PCA955X_GPIO_LOW);
}
static void pca955x_gpio_set_value(struct gpio_chip *gc, unsigned int offset,
int val)
{
pca955x_set_value(gc, offset, val);
}
static int pca955x_gpio_get_value(struct gpio_chip *gc, unsigned int offset)
{
struct pca955x *pca955x = gpiochip_get_data(gc);
struct pca955x_led *led = &pca955x->leds[offset];
u8 reg = 0;
/* There is nothing we can do about errors */
pca955x_read_input(pca955x->client, led->led_num / 8, ®);
return !!(reg & (1 << (led->led_num % 8)));
}
static int pca955x_gpio_direction_input(struct gpio_chip *gc,
unsigned int offset)
{
struct pca955x *pca955x = gpiochip_get_data(gc);
struct pca955x_led *led = &pca955x->leds[offset];
/* To use as input ensure pin is not driven. */
return pca955x_led_set(&led->led_cdev, PCA955X_GPIO_INPUT);
}
static int pca955x_gpio_direction_output(struct gpio_chip *gc,
unsigned int offset, int val)
{
return pca955x_set_value(gc, offset, val);
}
#endif /* CONFIG_LEDS_PCA955X_GPIO */
static struct pca955x_platform_data *
pca955x_get_pdata(struct i2c_client *client, struct pca955x_chipdef *chip)
{
struct pca955x_platform_data *pdata;
struct pca955x_led *led;
struct fwnode_handle *child;
int count;
count = device_get_child_node_count(&client->dev);
if (count > chip->bits)
return ERR_PTR(-ENODEV);
pdata = devm_kzalloc(&client->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return ERR_PTR(-ENOMEM);
pdata->leds = devm_kcalloc(&client->dev,
chip->bits, sizeof(struct pca955x_led),
GFP_KERNEL);
if (!pdata->leds)
return ERR_PTR(-ENOMEM);
device_for_each_child_node(&client->dev, child) {
u32 reg;
int res;
res = fwnode_property_read_u32(child, "reg", ®);
if ((res != 0) || (reg >= chip->bits))
continue;
led = &pdata->leds[reg];
led->type = PCA955X_TYPE_LED;
led->fwnode = child;
led->default_state = led_init_default_state_get(child);
fwnode_property_read_u32(child, "type", &led->type);
}
pdata->num_leds = chip->bits;
return pdata;
}
static const struct of_device_id of_pca955x_match[] = {
{ .compatible = "nxp,pca9550", .data = (void *)pca9550 },
{ .compatible = "nxp,pca9551", .data = (void *)pca9551 },
{ .compatible = "nxp,pca9552", .data = (void *)pca9552 },
{ .compatible = "ibm,pca9552", .data = (void *)ibm_pca9552 },
{ .compatible = "nxp,pca9553", .data = (void *)pca9553 },
{},
};
MODULE_DEVICE_TABLE(of, of_pca955x_match);
static int pca955x_probe(struct i2c_client *client)
{
struct pca955x *pca955x;
struct pca955x_led *pca955x_led;
struct pca955x_chipdef *chip;
struct led_classdev *led;
struct led_init_data init_data;
struct i2c_adapter *adapter;
int i, err;
struct pca955x_platform_data *pdata;
bool set_default_label = false;
bool keep_pwm = false;
char default_label[8];
enum pca955x_type chip_type;
const void *md = device_get_match_data(&client->dev);
if (md) {
chip_type = (enum pca955x_type)md;
} else {
const struct i2c_device_id *id = i2c_match_id(pca955x_id,
client);
if (id) {
chip_type = (enum pca955x_type)id->driver_data;
} else {
dev_err(&client->dev, "unknown chip\n");
return -ENODEV;
}
}
chip = &pca955x_chipdefs[chip_type];
adapter = client->adapter;
pdata = dev_get_platdata(&client->dev);
if (!pdata) {
pdata = pca955x_get_pdata(client, chip);
if (IS_ERR(pdata))
return PTR_ERR(pdata);
}
/* Make sure the slave address / chip type combo given is possible */
if ((client->addr & ~((1 << chip->slv_addr_shift) - 1)) !=
chip->slv_addr) {
dev_err(&client->dev, "invalid slave address %02x\n",
client->addr);
return -ENODEV;
}
dev_info(&client->dev, "leds-pca955x: Using %s %d-bit LED driver at "
"slave address 0x%02x\n", client->name, chip->bits,
client->addr);
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
return -EIO;
if (pdata->num_leds != chip->bits) {
dev_err(&client->dev,
"board info claims %d LEDs on a %d-bit chip\n",
pdata->num_leds, chip->bits);
return -ENODEV;
}
pca955x = devm_kzalloc(&client->dev, sizeof(*pca955x), GFP_KERNEL);
if (!pca955x)
return -ENOMEM;
pca955x->leds = devm_kcalloc(&client->dev, chip->bits,
sizeof(*pca955x_led), GFP_KERNEL);
if (!pca955x->leds)
return -ENOMEM;
i2c_set_clientdata(client, pca955x);
mutex_init(&pca955x->lock);
pca955x->client = client;
pca955x->chipdef = chip;
init_data.devname_mandatory = false;
init_data.devicename = "pca955x";
for (i = 0; i < chip->bits; i++) {
pca955x_led = &pca955x->leds[i];
pca955x_led->led_num = i;
pca955x_led->pca955x = pca955x;
pca955x_led->type = pdata->leds[i].type;
switch (pca955x_led->type) {
case PCA955X_TYPE_NONE:
case PCA955X_TYPE_GPIO:
break;
case PCA955X_TYPE_LED:
led = &pca955x_led->led_cdev;
led->brightness_set_blocking = pca955x_led_set;
led->brightness_get = pca955x_led_get;
if (pdata->leds[i].default_state == LEDS_DEFSTATE_OFF) {
err = pca955x_led_set(led, LED_OFF);
if (err)
return err;
} else if (pdata->leds[i].default_state == LEDS_DEFSTATE_ON) {
err = pca955x_led_set(led, LED_FULL);
if (err)
return err;
}
init_data.fwnode = pdata->leds[i].fwnode;
if (is_of_node(init_data.fwnode)) {
if (to_of_node(init_data.fwnode)->name[0] ==
'\0')
set_default_label = true;
else
set_default_label = false;
} else {
set_default_label = true;
}
if (set_default_label) {
snprintf(default_label, sizeof(default_label),
"%d", i);
init_data.default_label = default_label;
} else {
init_data.default_label = NULL;
}
err = devm_led_classdev_register_ext(&client->dev, led,
&init_data);
if (err)
return err;
set_bit(i, &pca955x->active_pins);
/*
* For default-state == "keep", let the core update the
* brightness from the hardware, then check the
* brightness to see if it's using PWM1. If so, PWM1
* should not be written below.
*/
if (pdata->leds[i].default_state == LEDS_DEFSTATE_KEEP) {
if (led->brightness != LED_FULL &&
led->brightness != LED_OFF &&
led->brightness != LED_HALF)
keep_pwm = true;
}
}
}
/* PWM0 is used for half brightness or 50% duty cycle */
err = pca955x_write_pwm(client, 0, 255 - LED_HALF);
if (err)
return err;
if (!keep_pwm) {
/* PWM1 is used for variable brightness, default to OFF */
err = pca955x_write_pwm(client, 1, 0);
if (err)
return err;
}
/* Set to fast frequency so we do not see flashing */
err = pca955x_write_psc(client, 0, 0);
if (err)
return err;
err = pca955x_write_psc(client, 1, 0);
if (err)
return err;
#ifdef CONFIG_LEDS_PCA955X_GPIO
pca955x->gpio.label = "gpio-pca955x";
pca955x->gpio.direction_input = pca955x_gpio_direction_input;
pca955x->gpio.direction_output = pca955x_gpio_direction_output;
pca955x->gpio.set = pca955x_gpio_set_value;
pca955x->gpio.get = pca955x_gpio_get_value;
pca955x->gpio.request = pca955x_gpio_request_pin;
pca955x->gpio.free = pca955x_gpio_free_pin;
pca955x->gpio.can_sleep = 1;
pca955x->gpio.base = -1;
pca955x->gpio.ngpio = chip->bits;
pca955x->gpio.parent = &client->dev;
pca955x->gpio.owner = THIS_MODULE;
err = devm_gpiochip_add_data(&client->dev, &pca955x->gpio,
pca955x);
if (err) {
/* Use data->gpio.dev as a flag for freeing gpiochip */
pca955x->gpio.parent = NULL;
dev_warn(&client->dev, "could not add gpiochip\n");
return err;
}
dev_info(&client->dev, "gpios %i...%i\n",
pca955x->gpio.base, pca955x->gpio.base +
pca955x->gpio.ngpio - 1);
#endif
return 0;
}
static struct i2c_driver pca955x_driver = {
.driver = {
.name = "leds-pca955x",
.of_match_table = of_pca955x_match,
},
.probe = pca955x_probe,
.id_table = pca955x_id,
};
module_i2c_driver(pca955x_driver);
MODULE_AUTHOR("Nate Case <[email protected]>");
MODULE_DESCRIPTION("PCA955x LED driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/leds/leds-pca955x.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* leds-max8997.c - LED class driver for MAX8997 LEDs.
*
* Copyright (C) 2011 Samsung Electronics
* Donggeun Kim <[email protected]>
*/
#include <linux/module.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/leds.h>
#include <linux/mfd/max8997.h>
#include <linux/mfd/max8997-private.h>
#include <linux/platform_device.h>
#define MAX8997_LED_FLASH_SHIFT 3
#define MAX8997_LED_FLASH_CUR_MASK 0xf8
#define MAX8997_LED_MOVIE_SHIFT 4
#define MAX8997_LED_MOVIE_CUR_MASK 0xf0
#define MAX8997_LED_FLASH_MAX_BRIGHTNESS 0x1f
#define MAX8997_LED_MOVIE_MAX_BRIGHTNESS 0xf
#define MAX8997_LED_NONE_MAX_BRIGHTNESS 0
#define MAX8997_LED0_FLASH_MASK 0x1
#define MAX8997_LED0_FLASH_PIN_MASK 0x5
#define MAX8997_LED0_MOVIE_MASK 0x8
#define MAX8997_LED0_MOVIE_PIN_MASK 0x28
#define MAX8997_LED1_FLASH_MASK 0x2
#define MAX8997_LED1_FLASH_PIN_MASK 0x6
#define MAX8997_LED1_MOVIE_MASK 0x10
#define MAX8997_LED1_MOVIE_PIN_MASK 0x30
#define MAX8997_LED_BOOST_ENABLE_MASK (1 << 6)
struct max8997_led {
struct max8997_dev *iodev;
struct led_classdev cdev;
bool enabled;
int id;
enum max8997_led_mode led_mode;
struct mutex mutex;
};
static void max8997_led_set_mode(struct max8997_led *led,
enum max8997_led_mode mode)
{
int ret;
struct i2c_client *client = led->iodev->i2c;
u8 mask = 0, val;
switch (mode) {
case MAX8997_FLASH_MODE:
mask = MAX8997_LED1_FLASH_MASK | MAX8997_LED0_FLASH_MASK;
val = led->id ?
MAX8997_LED1_FLASH_MASK : MAX8997_LED0_FLASH_MASK;
led->cdev.max_brightness = MAX8997_LED_FLASH_MAX_BRIGHTNESS;
break;
case MAX8997_MOVIE_MODE:
mask = MAX8997_LED1_MOVIE_MASK | MAX8997_LED0_MOVIE_MASK;
val = led->id ?
MAX8997_LED1_MOVIE_MASK : MAX8997_LED0_MOVIE_MASK;
led->cdev.max_brightness = MAX8997_LED_MOVIE_MAX_BRIGHTNESS;
break;
case MAX8997_FLASH_PIN_CONTROL_MODE:
mask = MAX8997_LED1_FLASH_PIN_MASK |
MAX8997_LED0_FLASH_PIN_MASK;
val = led->id ?
MAX8997_LED1_FLASH_PIN_MASK : MAX8997_LED0_FLASH_PIN_MASK;
led->cdev.max_brightness = MAX8997_LED_FLASH_MAX_BRIGHTNESS;
break;
case MAX8997_MOVIE_PIN_CONTROL_MODE:
mask = MAX8997_LED1_MOVIE_PIN_MASK |
MAX8997_LED0_MOVIE_PIN_MASK;
val = led->id ?
MAX8997_LED1_MOVIE_PIN_MASK : MAX8997_LED0_MOVIE_PIN_MASK;
led->cdev.max_brightness = MAX8997_LED_MOVIE_MAX_BRIGHTNESS;
break;
default:
led->cdev.max_brightness = MAX8997_LED_NONE_MAX_BRIGHTNESS;
break;
}
if (mask) {
ret = max8997_update_reg(client, MAX8997_REG_LEN_CNTL, val,
mask);
if (ret)
dev_err(led->iodev->dev,
"failed to update register(%d)\n", ret);
}
led->led_mode = mode;
}
static void max8997_led_enable(struct max8997_led *led, bool enable)
{
int ret;
struct i2c_client *client = led->iodev->i2c;
u8 val = 0, mask = MAX8997_LED_BOOST_ENABLE_MASK;
if (led->enabled == enable)
return;
val = enable ? MAX8997_LED_BOOST_ENABLE_MASK : 0;
ret = max8997_update_reg(client, MAX8997_REG_BOOST_CNTL, val, mask);
if (ret)
dev_err(led->iodev->dev,
"failed to update register(%d)\n", ret);
led->enabled = enable;
}
static void max8997_led_set_current(struct max8997_led *led,
enum led_brightness value)
{
int ret;
struct i2c_client *client = led->iodev->i2c;
u8 val = 0, mask = 0, reg = 0;
switch (led->led_mode) {
case MAX8997_FLASH_MODE:
case MAX8997_FLASH_PIN_CONTROL_MODE:
val = value << MAX8997_LED_FLASH_SHIFT;
mask = MAX8997_LED_FLASH_CUR_MASK;
reg = led->id ? MAX8997_REG_FLASH2_CUR : MAX8997_REG_FLASH1_CUR;
break;
case MAX8997_MOVIE_MODE:
case MAX8997_MOVIE_PIN_CONTROL_MODE:
val = value << MAX8997_LED_MOVIE_SHIFT;
mask = MAX8997_LED_MOVIE_CUR_MASK;
reg = MAX8997_REG_MOVIE_CUR;
break;
default:
break;
}
if (mask) {
ret = max8997_update_reg(client, reg, val, mask);
if (ret)
dev_err(led->iodev->dev,
"failed to update register(%d)\n", ret);
}
}
static void max8997_led_brightness_set(struct led_classdev *led_cdev,
enum led_brightness value)
{
struct max8997_led *led =
container_of(led_cdev, struct max8997_led, cdev);
if (value) {
max8997_led_set_current(led, value);
max8997_led_enable(led, true);
} else {
max8997_led_set_current(led, value);
max8997_led_enable(led, false);
}
}
static ssize_t mode_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct led_classdev *led_cdev = dev_get_drvdata(dev);
struct max8997_led *led =
container_of(led_cdev, struct max8997_led, cdev);
ssize_t ret = 0;
mutex_lock(&led->mutex);
switch (led->led_mode) {
case MAX8997_FLASH_MODE:
ret += sprintf(buf, "FLASH\n");
break;
case MAX8997_MOVIE_MODE:
ret += sprintf(buf, "MOVIE\n");
break;
case MAX8997_FLASH_PIN_CONTROL_MODE:
ret += sprintf(buf, "FLASH_PIN_CONTROL\n");
break;
case MAX8997_MOVIE_PIN_CONTROL_MODE:
ret += sprintf(buf, "MOVIE_PIN_CONTROL\n");
break;
default:
ret += sprintf(buf, "NONE\n");
break;
}
mutex_unlock(&led->mutex);
return ret;
}
static ssize_t mode_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
struct led_classdev *led_cdev = dev_get_drvdata(dev);
struct max8997_led *led =
container_of(led_cdev, struct max8997_led, cdev);
enum max8997_led_mode mode;
mutex_lock(&led->mutex);
if (!strncmp(buf, "FLASH_PIN_CONTROL", 17))
mode = MAX8997_FLASH_PIN_CONTROL_MODE;
else if (!strncmp(buf, "MOVIE_PIN_CONTROL", 17))
mode = MAX8997_MOVIE_PIN_CONTROL_MODE;
else if (!strncmp(buf, "FLASH", 5))
mode = MAX8997_FLASH_MODE;
else if (!strncmp(buf, "MOVIE", 5))
mode = MAX8997_MOVIE_MODE;
else
mode = MAX8997_NONE;
max8997_led_set_mode(led, mode);
mutex_unlock(&led->mutex);
return size;
}
static DEVICE_ATTR_RW(mode);
static struct attribute *max8997_attrs[] = {
&dev_attr_mode.attr,
NULL
};
ATTRIBUTE_GROUPS(max8997);
static int max8997_led_probe(struct platform_device *pdev)
{
struct max8997_dev *iodev = dev_get_drvdata(pdev->dev.parent);
struct max8997_platform_data *pdata = dev_get_platdata(iodev->dev);
struct max8997_led *led;
char name[20];
int ret = 0;
led = devm_kzalloc(&pdev->dev, sizeof(*led), GFP_KERNEL);
if (led == NULL)
return -ENOMEM;
led->id = pdev->id;
snprintf(name, sizeof(name), "max8997-led%d", pdev->id);
led->cdev.name = name;
led->cdev.brightness_set = max8997_led_brightness_set;
led->cdev.flags |= LED_CORE_SUSPENDRESUME;
led->cdev.brightness = 0;
led->cdev.groups = max8997_groups;
led->iodev = iodev;
/* initialize mode and brightness according to platform_data */
if (pdata && pdata->led_pdata) {
u8 mode = 0, brightness = 0;
mode = pdata->led_pdata->mode[led->id];
brightness = pdata->led_pdata->brightness[led->id];
max8997_led_set_mode(led, mode);
if (brightness > led->cdev.max_brightness)
brightness = led->cdev.max_brightness;
max8997_led_set_current(led, brightness);
led->cdev.brightness = brightness;
} else {
max8997_led_set_mode(led, MAX8997_NONE);
max8997_led_set_current(led, 0);
}
mutex_init(&led->mutex);
ret = devm_led_classdev_register(&pdev->dev, &led->cdev);
if (ret < 0)
return ret;
return 0;
}
static struct platform_driver max8997_led_driver = {
.driver = {
.name = "max8997-led",
},
.probe = max8997_led_probe,
};
module_platform_driver(max8997_led_driver);
MODULE_AUTHOR("Donggeun Kim <[email protected]>");
MODULE_DESCRIPTION("MAX8997 LED driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:max8997-led");
| linux-master | drivers/leds/leds-max8997.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
*
* h3xxx atmel micro companion support, notification LED subdevice
*
* Author : Linus Walleij <[email protected]>
*/
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/mfd/ipaq-micro.h>
#include <linux/leds.h>
#define LED_YELLOW 0x00
#define LED_GREEN 0x01
#define LED_EN (1 << 4) /* LED ON/OFF 0:off, 1:on */
#define LED_AUTOSTOP (1 << 5) /* LED ON/OFF auto stop set 0:disable, 1:enable */
#define LED_ALWAYS (1 << 6) /* LED Interrupt Mask 0:No mask, 1:mask */
static int micro_leds_brightness_set(struct led_classdev *led_cdev,
enum led_brightness value)
{
struct ipaq_micro *micro = dev_get_drvdata(led_cdev->dev->parent->parent);
/*
* In this message:
* Byte 0 = LED color: 0 = yellow, 1 = green
* yellow LED is always ~30 blinks per minute
* Byte 1 = duration (flags?) appears to be ignored
* Byte 2 = green ontime in 1/10 sec (deciseconds)
* 1 = 1/10 second
* 0 = 256/10 second
* Byte 3 = green offtime in 1/10 sec (deciseconds)
* 1 = 1/10 second
* 0 = 256/10 seconds
*/
struct ipaq_micro_msg msg = {
.id = MSG_NOTIFY_LED,
.tx_len = 4,
};
msg.tx_data[0] = LED_GREEN;
msg.tx_data[1] = 0;
if (value) {
msg.tx_data[2] = 0; /* Duty cycle 256 */
msg.tx_data[3] = 1;
} else {
msg.tx_data[2] = 1;
msg.tx_data[3] = 0; /* Duty cycle 256 */
}
return ipaq_micro_tx_msg_sync(micro, &msg);
}
/* Maximum duty cycle in ms 256/10 sec = 25600 ms */
#define IPAQ_LED_MAX_DUTY 25600
static int micro_leds_blink_set(struct led_classdev *led_cdev,
unsigned long *delay_on,
unsigned long *delay_off)
{
struct ipaq_micro *micro = dev_get_drvdata(led_cdev->dev->parent->parent);
/*
* In this message:
* Byte 0 = LED color: 0 = yellow, 1 = green
* yellow LED is always ~30 blinks per minute
* Byte 1 = duration (flags?) appears to be ignored
* Byte 2 = green ontime in 1/10 sec (deciseconds)
* 1 = 1/10 second
* 0 = 256/10 second
* Byte 3 = green offtime in 1/10 sec (deciseconds)
* 1 = 1/10 second
* 0 = 256/10 seconds
*/
struct ipaq_micro_msg msg = {
.id = MSG_NOTIFY_LED,
.tx_len = 4,
};
msg.tx_data[0] = LED_GREEN;
if (*delay_on > IPAQ_LED_MAX_DUTY ||
*delay_off > IPAQ_LED_MAX_DUTY)
return -EINVAL;
if (*delay_on == 0 && *delay_off == 0) {
*delay_on = 100;
*delay_off = 100;
}
msg.tx_data[1] = 0;
if (*delay_on >= IPAQ_LED_MAX_DUTY)
msg.tx_data[2] = 0;
else
msg.tx_data[2] = (u8) DIV_ROUND_CLOSEST(*delay_on, 100);
if (*delay_off >= IPAQ_LED_MAX_DUTY)
msg.tx_data[3] = 0;
else
msg.tx_data[3] = (u8) DIV_ROUND_CLOSEST(*delay_off, 100);
return ipaq_micro_tx_msg_sync(micro, &msg);
}
static struct led_classdev micro_led = {
.name = "led-ipaq-micro",
.brightness_set_blocking = micro_leds_brightness_set,
.blink_set = micro_leds_blink_set,
.flags = LED_CORE_SUSPENDRESUME,
};
static int micro_leds_probe(struct platform_device *pdev)
{
int ret;
ret = devm_led_classdev_register(&pdev->dev, µ_led);
if (ret) {
dev_err(&pdev->dev, "registering led failed: %d\n", ret);
return ret;
}
dev_info(&pdev->dev, "iPAQ micro notification LED driver\n");
return 0;
}
static struct platform_driver micro_leds_device_driver = {
.driver = {
.name = "ipaq-micro-leds",
},
.probe = micro_leds_probe,
};
module_platform_driver(micro_leds_device_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("driver for iPAQ Atmel micro leds");
MODULE_ALIAS("platform:ipaq-micro-leds");
| linux-master | drivers/leds/leds-ipaq-micro.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* leds-ns2.c - Driver for the Network Space v2 (and parents) dual-GPIO LED
*
* Copyright (C) 2010 LaCie
*
* Author: Simon Guinot <[email protected]>
*
* Based on leds-gpio.c by Raphael Assenat <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/gpio/consumer.h>
#include <linux/leds.h>
#include <linux/module.h>
#include <linux/of.h>
#include "leds.h"
enum ns2_led_modes {
NS_V2_LED_OFF,
NS_V2_LED_ON,
NS_V2_LED_SATA,
};
/*
* If the size of this structure or types of its members is changed,
* the filling of array modval in function ns2_led_register must be changed
* accordingly.
*/
struct ns2_led_modval {
u32 mode;
u32 cmd_level;
u32 slow_level;
} __packed;
/*
* The Network Space v2 dual-GPIO LED is wired to a CPLD. Three different LED
* modes are available: off, on and SATA activity blinking. The LED modes are
* controlled through two GPIOs (command and slow): each combination of values
* for the command/slow GPIOs corresponds to a LED mode.
*/
struct ns2_led {
struct led_classdev cdev;
struct gpio_desc *cmd;
struct gpio_desc *slow;
bool can_sleep;
unsigned char sata; /* True when SATA mode active. */
rwlock_t rw_lock; /* Lock GPIOs. */
int num_modes;
struct ns2_led_modval *modval;
};
static int ns2_led_get_mode(struct ns2_led *led, enum ns2_led_modes *mode)
{
int i;
int cmd_level;
int slow_level;
cmd_level = gpiod_get_value_cansleep(led->cmd);
slow_level = gpiod_get_value_cansleep(led->slow);
for (i = 0; i < led->num_modes; i++) {
if (cmd_level == led->modval[i].cmd_level &&
slow_level == led->modval[i].slow_level) {
*mode = led->modval[i].mode;
return 0;
}
}
return -EINVAL;
}
static void ns2_led_set_mode(struct ns2_led *led, enum ns2_led_modes mode)
{
int i;
unsigned long flags;
for (i = 0; i < led->num_modes; i++)
if (mode == led->modval[i].mode)
break;
if (i == led->num_modes)
return;
write_lock_irqsave(&led->rw_lock, flags);
if (!led->can_sleep) {
gpiod_set_value(led->cmd, led->modval[i].cmd_level);
gpiod_set_value(led->slow, led->modval[i].slow_level);
goto exit_unlock;
}
gpiod_set_value_cansleep(led->cmd, led->modval[i].cmd_level);
gpiod_set_value_cansleep(led->slow, led->modval[i].slow_level);
exit_unlock:
write_unlock_irqrestore(&led->rw_lock, flags);
}
static void ns2_led_set(struct led_classdev *led_cdev,
enum led_brightness value)
{
struct ns2_led *led = container_of(led_cdev, struct ns2_led, cdev);
enum ns2_led_modes mode;
if (value == LED_OFF)
mode = NS_V2_LED_OFF;
else if (led->sata)
mode = NS_V2_LED_SATA;
else
mode = NS_V2_LED_ON;
ns2_led_set_mode(led, mode);
}
static int ns2_led_set_blocking(struct led_classdev *led_cdev,
enum led_brightness value)
{
ns2_led_set(led_cdev, value);
return 0;
}
static ssize_t ns2_led_sata_store(struct device *dev,
struct device_attribute *attr,
const char *buff, size_t count)
{
struct led_classdev *led_cdev = dev_get_drvdata(dev);
struct ns2_led *led = container_of(led_cdev, struct ns2_led, cdev);
int ret;
unsigned long enable;
ret = kstrtoul(buff, 10, &enable);
if (ret < 0)
return ret;
enable = !!enable;
if (led->sata == enable)
goto exit;
led->sata = enable;
if (!led_get_brightness(led_cdev))
goto exit;
if (enable)
ns2_led_set_mode(led, NS_V2_LED_SATA);
else
ns2_led_set_mode(led, NS_V2_LED_ON);
exit:
return count;
}
static ssize_t ns2_led_sata_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct led_classdev *led_cdev = dev_get_drvdata(dev);
struct ns2_led *led = container_of(led_cdev, struct ns2_led, cdev);
return sprintf(buf, "%d\n", led->sata);
}
static DEVICE_ATTR(sata, 0644, ns2_led_sata_show, ns2_led_sata_store);
static struct attribute *ns2_led_attrs[] = {
&dev_attr_sata.attr,
NULL
};
ATTRIBUTE_GROUPS(ns2_led);
static int ns2_led_register(struct device *dev, struct fwnode_handle *node,
struct ns2_led *led)
{
struct led_init_data init_data = {};
struct ns2_led_modval *modval;
enum ns2_led_modes mode;
int nmodes, ret;
led->cmd = devm_fwnode_gpiod_get_index(dev, node, "cmd", 0, GPIOD_ASIS,
fwnode_get_name(node));
if (IS_ERR(led->cmd))
return PTR_ERR(led->cmd);
led->slow = devm_fwnode_gpiod_get_index(dev, node, "slow", 0,
GPIOD_ASIS,
fwnode_get_name(node));
if (IS_ERR(led->slow))
return PTR_ERR(led->slow);
ret = fwnode_property_count_u32(node, "modes-map");
if (ret < 0 || ret % 3) {
dev_err(dev, "Missing or malformed modes-map for %pfw\n", node);
return -EINVAL;
}
nmodes = ret / 3;
modval = devm_kcalloc(dev, nmodes, sizeof(*modval), GFP_KERNEL);
if (!modval)
return -ENOMEM;
fwnode_property_read_u32_array(node, "modes-map", (void *)modval,
nmodes * 3);
rwlock_init(&led->rw_lock);
led->cdev.blink_set = NULL;
led->cdev.flags |= LED_CORE_SUSPENDRESUME;
led->cdev.groups = ns2_led_groups;
led->can_sleep = gpiod_cansleep(led->cmd) || gpiod_cansleep(led->slow);
if (led->can_sleep)
led->cdev.brightness_set_blocking = ns2_led_set_blocking;
else
led->cdev.brightness_set = ns2_led_set;
led->num_modes = nmodes;
led->modval = modval;
ret = ns2_led_get_mode(led, &mode);
if (ret < 0)
return ret;
/* Set LED initial state. */
led->sata = (mode == NS_V2_LED_SATA) ? 1 : 0;
led->cdev.brightness = (mode == NS_V2_LED_OFF) ? LED_OFF : LED_FULL;
init_data.fwnode = node;
ret = devm_led_classdev_register_ext(dev, &led->cdev, &init_data);
if (ret)
dev_err(dev, "Failed to register LED for node %pfw\n", node);
return ret;
}
static int ns2_led_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct fwnode_handle *child;
struct ns2_led *leds;
int count;
int ret;
count = device_get_child_node_count(dev);
if (!count)
return -ENODEV;
leds = devm_kcalloc(dev, count, sizeof(*leds), GFP_KERNEL);
if (!leds)
return -ENOMEM;
device_for_each_child_node(dev, child) {
ret = ns2_led_register(dev, child, leds++);
if (ret) {
fwnode_handle_put(child);
return ret;
}
}
return 0;
}
static const struct of_device_id of_ns2_leds_match[] = {
{ .compatible = "lacie,ns2-leds", },
{},
};
MODULE_DEVICE_TABLE(of, of_ns2_leds_match);
static struct platform_driver ns2_led_driver = {
.probe = ns2_led_probe,
.driver = {
.name = "leds-ns2",
.of_match_table = of_ns2_leds_match,
},
};
module_platform_driver(ns2_led_driver);
MODULE_AUTHOR("Simon Guinot <[email protected]>");
MODULE_DESCRIPTION("Network Space v2 LED driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:leds-ns2");
| linux-master | drivers/leds/leds-ns2.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* LEDs driver for the Cobalt Raq series.
*
* Copyright (C) 2007 Yoichi Yuasa <[email protected]>
*/
#include <linux/init.h>
#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/leds.h>
#include <linux/platform_device.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/export.h>
#define LED_WEB 0x04
#define LED_POWER_OFF 0x08
static void __iomem *led_port;
static u8 led_value;
static DEFINE_SPINLOCK(led_value_lock);
static void raq_web_led_set(struct led_classdev *led_cdev,
enum led_brightness brightness)
{
unsigned long flags;
spin_lock_irqsave(&led_value_lock, flags);
if (brightness)
led_value |= LED_WEB;
else
led_value &= ~LED_WEB;
writeb(led_value, led_port);
spin_unlock_irqrestore(&led_value_lock, flags);
}
static struct led_classdev raq_web_led = {
.name = "raq::web",
.brightness_set = raq_web_led_set,
};
static void raq_power_off_led_set(struct led_classdev *led_cdev,
enum led_brightness brightness)
{
unsigned long flags;
spin_lock_irqsave(&led_value_lock, flags);
if (brightness)
led_value |= LED_POWER_OFF;
else
led_value &= ~LED_POWER_OFF;
writeb(led_value, led_port);
spin_unlock_irqrestore(&led_value_lock, flags);
}
static struct led_classdev raq_power_off_led = {
.name = "raq::power-off",
.brightness_set = raq_power_off_led_set,
.default_trigger = "power-off",
};
static int cobalt_raq_led_probe(struct platform_device *pdev)
{
struct resource *res;
int retval;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -EBUSY;
led_port = devm_ioremap(&pdev->dev, res->start, resource_size(res));
if (!led_port)
return -ENOMEM;
retval = led_classdev_register(&pdev->dev, &raq_power_off_led);
if (retval)
goto err_null;
retval = led_classdev_register(&pdev->dev, &raq_web_led);
if (retval)
goto err_unregister;
return 0;
err_unregister:
led_classdev_unregister(&raq_power_off_led);
err_null:
led_port = NULL;
return retval;
}
static struct platform_driver cobalt_raq_led_driver = {
.probe = cobalt_raq_led_probe,
.driver = {
.name = "cobalt-raq-leds",
},
};
builtin_platform_driver(cobalt_raq_led_driver);
| linux-master | drivers/leds/leds-cobalt-raq.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* LED Triggers Core
*
* Copyright 2005-2007 Openedhand Ltd.
*
* Author: Richard Purdie <[email protected]>
*/
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/device.h>
#include <linux/timer.h>
#include <linux/rwsem.h>
#include <linux/leds.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include "leds.h"
/*
* Nests outside led_cdev->trigger_lock
*/
static DECLARE_RWSEM(triggers_list_lock);
LIST_HEAD(trigger_list);
/* Used by LED Class */
static inline bool
trigger_relevant(struct led_classdev *led_cdev, struct led_trigger *trig)
{
return !trig->trigger_type || trig->trigger_type == led_cdev->trigger_type;
}
ssize_t led_trigger_write(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf,
loff_t pos, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
struct led_classdev *led_cdev = dev_get_drvdata(dev);
struct led_trigger *trig;
int ret = count;
mutex_lock(&led_cdev->led_access);
if (led_sysfs_is_disabled(led_cdev)) {
ret = -EBUSY;
goto unlock;
}
if (sysfs_streq(buf, "none")) {
led_trigger_remove(led_cdev);
goto unlock;
}
down_read(&triggers_list_lock);
list_for_each_entry(trig, &trigger_list, next_trig) {
if (sysfs_streq(buf, trig->name) && trigger_relevant(led_cdev, trig)) {
down_write(&led_cdev->trigger_lock);
led_trigger_set(led_cdev, trig);
up_write(&led_cdev->trigger_lock);
up_read(&triggers_list_lock);
goto unlock;
}
}
/* we come here only if buf matches no trigger */
ret = -EINVAL;
up_read(&triggers_list_lock);
unlock:
mutex_unlock(&led_cdev->led_access);
return ret;
}
EXPORT_SYMBOL_GPL(led_trigger_write);
__printf(3, 4)
static int led_trigger_snprintf(char *buf, ssize_t size, const char *fmt, ...)
{
va_list args;
int i;
va_start(args, fmt);
if (size <= 0)
i = vsnprintf(NULL, 0, fmt, args);
else
i = vscnprintf(buf, size, fmt, args);
va_end(args);
return i;
}
static int led_trigger_format(char *buf, size_t size,
struct led_classdev *led_cdev)
{
struct led_trigger *trig;
int len = led_trigger_snprintf(buf, size, "%s",
led_cdev->trigger ? "none" : "[none]");
list_for_each_entry(trig, &trigger_list, next_trig) {
bool hit;
if (!trigger_relevant(led_cdev, trig))
continue;
hit = led_cdev->trigger && !strcmp(led_cdev->trigger->name, trig->name);
len += led_trigger_snprintf(buf + len, size - len,
" %s%s%s", hit ? "[" : "",
trig->name, hit ? "]" : "");
}
len += led_trigger_snprintf(buf + len, size - len, "\n");
return len;
}
/*
* It was stupid to create 10000 cpu triggers, but we are stuck with it now.
* Don't make that mistake again. We work around it here by creating binary
* attribute, which is not limited by length. This is _not_ good design, do not
* copy it.
*/
ssize_t led_trigger_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t pos, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
struct led_classdev *led_cdev = dev_get_drvdata(dev);
void *data;
int len;
down_read(&triggers_list_lock);
down_read(&led_cdev->trigger_lock);
len = led_trigger_format(NULL, 0, led_cdev);
data = kvmalloc(len + 1, GFP_KERNEL);
if (!data) {
up_read(&led_cdev->trigger_lock);
up_read(&triggers_list_lock);
return -ENOMEM;
}
len = led_trigger_format(data, len + 1, led_cdev);
up_read(&led_cdev->trigger_lock);
up_read(&triggers_list_lock);
len = memory_read_from_buffer(buf, count, &pos, data, len);
kvfree(data);
return len;
}
EXPORT_SYMBOL_GPL(led_trigger_read);
/* Caller must ensure led_cdev->trigger_lock held */
int led_trigger_set(struct led_classdev *led_cdev, struct led_trigger *trig)
{
char *event = NULL;
char *envp[2];
const char *name;
int ret;
if (!led_cdev->trigger && !trig)
return 0;
name = trig ? trig->name : "none";
event = kasprintf(GFP_KERNEL, "TRIGGER=%s", name);
/* Remove any existing trigger */
if (led_cdev->trigger) {
spin_lock(&led_cdev->trigger->leddev_list_lock);
list_del_rcu(&led_cdev->trig_list);
spin_unlock(&led_cdev->trigger->leddev_list_lock);
/* ensure it's no longer visible on the led_cdevs list */
synchronize_rcu();
cancel_work_sync(&led_cdev->set_brightness_work);
led_stop_software_blink(led_cdev);
if (led_cdev->trigger->deactivate)
led_cdev->trigger->deactivate(led_cdev);
device_remove_groups(led_cdev->dev, led_cdev->trigger->groups);
led_cdev->trigger = NULL;
led_cdev->trigger_data = NULL;
led_cdev->activated = false;
led_cdev->flags &= ~LED_INIT_DEFAULT_TRIGGER;
led_set_brightness(led_cdev, LED_OFF);
}
if (trig) {
spin_lock(&trig->leddev_list_lock);
list_add_tail_rcu(&led_cdev->trig_list, &trig->led_cdevs);
spin_unlock(&trig->leddev_list_lock);
led_cdev->trigger = trig;
if (trig->activate)
ret = trig->activate(led_cdev);
else
ret = 0;
if (ret)
goto err_activate;
ret = device_add_groups(led_cdev->dev, trig->groups);
if (ret) {
dev_err(led_cdev->dev, "Failed to add trigger attributes\n");
goto err_add_groups;
}
}
if (event) {
envp[0] = event;
envp[1] = NULL;
if (kobject_uevent_env(&led_cdev->dev->kobj, KOBJ_CHANGE, envp))
dev_err(led_cdev->dev,
"%s: Error sending uevent\n", __func__);
kfree(event);
}
return 0;
err_add_groups:
if (trig->deactivate)
trig->deactivate(led_cdev);
err_activate:
spin_lock(&led_cdev->trigger->leddev_list_lock);
list_del_rcu(&led_cdev->trig_list);
spin_unlock(&led_cdev->trigger->leddev_list_lock);
synchronize_rcu();
led_cdev->trigger = NULL;
led_cdev->trigger_data = NULL;
led_set_brightness(led_cdev, LED_OFF);
kfree(event);
return ret;
}
EXPORT_SYMBOL_GPL(led_trigger_set);
void led_trigger_remove(struct led_classdev *led_cdev)
{
down_write(&led_cdev->trigger_lock);
led_trigger_set(led_cdev, NULL);
up_write(&led_cdev->trigger_lock);
}
EXPORT_SYMBOL_GPL(led_trigger_remove);
void led_trigger_set_default(struct led_classdev *led_cdev)
{
struct led_trigger *trig;
if (!led_cdev->default_trigger)
return;
down_read(&triggers_list_lock);
down_write(&led_cdev->trigger_lock);
list_for_each_entry(trig, &trigger_list, next_trig) {
if (!strcmp(led_cdev->default_trigger, trig->name) &&
trigger_relevant(led_cdev, trig)) {
led_cdev->flags |= LED_INIT_DEFAULT_TRIGGER;
led_trigger_set(led_cdev, trig);
break;
}
}
up_write(&led_cdev->trigger_lock);
up_read(&triggers_list_lock);
}
EXPORT_SYMBOL_GPL(led_trigger_set_default);
void led_trigger_rename_static(const char *name, struct led_trigger *trig)
{
/* new name must be on a temporary string to prevent races */
BUG_ON(name == trig->name);
down_write(&triggers_list_lock);
/* this assumes that trig->name was originaly allocated to
* non constant storage */
strcpy((char *)trig->name, name);
up_write(&triggers_list_lock);
}
EXPORT_SYMBOL_GPL(led_trigger_rename_static);
/* LED Trigger Interface */
int led_trigger_register(struct led_trigger *trig)
{
struct led_classdev *led_cdev;
struct led_trigger *_trig;
spin_lock_init(&trig->leddev_list_lock);
INIT_LIST_HEAD(&trig->led_cdevs);
down_write(&triggers_list_lock);
/* Make sure the trigger's name isn't already in use */
list_for_each_entry(_trig, &trigger_list, next_trig) {
if (!strcmp(_trig->name, trig->name) &&
(trig->trigger_type == _trig->trigger_type ||
!trig->trigger_type || !_trig->trigger_type)) {
up_write(&triggers_list_lock);
return -EEXIST;
}
}
/* Add to the list of led triggers */
list_add_tail(&trig->next_trig, &trigger_list);
up_write(&triggers_list_lock);
/* Register with any LEDs that have this as a default trigger */
down_read(&leds_list_lock);
list_for_each_entry(led_cdev, &leds_list, node) {
down_write(&led_cdev->trigger_lock);
if (!led_cdev->trigger && led_cdev->default_trigger &&
!strcmp(led_cdev->default_trigger, trig->name) &&
trigger_relevant(led_cdev, trig)) {
led_cdev->flags |= LED_INIT_DEFAULT_TRIGGER;
led_trigger_set(led_cdev, trig);
}
up_write(&led_cdev->trigger_lock);
}
up_read(&leds_list_lock);
return 0;
}
EXPORT_SYMBOL_GPL(led_trigger_register);
void led_trigger_unregister(struct led_trigger *trig)
{
struct led_classdev *led_cdev;
if (list_empty_careful(&trig->next_trig))
return;
/* Remove from the list of led triggers */
down_write(&triggers_list_lock);
list_del_init(&trig->next_trig);
up_write(&triggers_list_lock);
/* Remove anyone actively using this trigger */
down_read(&leds_list_lock);
list_for_each_entry(led_cdev, &leds_list, node) {
down_write(&led_cdev->trigger_lock);
if (led_cdev->trigger == trig)
led_trigger_set(led_cdev, NULL);
up_write(&led_cdev->trigger_lock);
}
up_read(&leds_list_lock);
}
EXPORT_SYMBOL_GPL(led_trigger_unregister);
static void devm_led_trigger_release(struct device *dev, void *res)
{
led_trigger_unregister(*(struct led_trigger **)res);
}
int devm_led_trigger_register(struct device *dev,
struct led_trigger *trig)
{
struct led_trigger **dr;
int rc;
dr = devres_alloc(devm_led_trigger_release, sizeof(*dr),
GFP_KERNEL);
if (!dr)
return -ENOMEM;
*dr = trig;
rc = led_trigger_register(trig);
if (rc)
devres_free(dr);
else
devres_add(dev, dr);
return rc;
}
EXPORT_SYMBOL_GPL(devm_led_trigger_register);
/* Simple LED Trigger Interface */
void led_trigger_event(struct led_trigger *trig,
enum led_brightness brightness)
{
struct led_classdev *led_cdev;
if (!trig)
return;
rcu_read_lock();
list_for_each_entry_rcu(led_cdev, &trig->led_cdevs, trig_list)
led_set_brightness(led_cdev, brightness);
rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(led_trigger_event);
static void led_trigger_blink_setup(struct led_trigger *trig,
unsigned long delay_on,
unsigned long delay_off,
int oneshot,
int invert)
{
struct led_classdev *led_cdev;
if (!trig)
return;
rcu_read_lock();
list_for_each_entry_rcu(led_cdev, &trig->led_cdevs, trig_list) {
if (oneshot)
led_blink_set_oneshot(led_cdev, &delay_on, &delay_off,
invert);
else
led_blink_set_nosleep(led_cdev, delay_on, delay_off);
}
rcu_read_unlock();
}
void led_trigger_blink(struct led_trigger *trig,
unsigned long delay_on,
unsigned long delay_off)
{
led_trigger_blink_setup(trig, delay_on, delay_off, 0, 0);
}
EXPORT_SYMBOL_GPL(led_trigger_blink);
void led_trigger_blink_oneshot(struct led_trigger *trig,
unsigned long delay_on,
unsigned long delay_off,
int invert)
{
led_trigger_blink_setup(trig, delay_on, delay_off, 1, invert);
}
EXPORT_SYMBOL_GPL(led_trigger_blink_oneshot);
void led_trigger_register_simple(const char *name, struct led_trigger **tp)
{
struct led_trigger *trig;
int err;
trig = kzalloc(sizeof(struct led_trigger), GFP_KERNEL);
if (trig) {
trig->name = name;
err = led_trigger_register(trig);
if (err < 0) {
kfree(trig);
trig = NULL;
pr_warn("LED trigger %s failed to register (%d)\n",
name, err);
}
} else {
pr_warn("LED trigger %s failed to register (no memory)\n",
name);
}
*tp = trig;
}
EXPORT_SYMBOL_GPL(led_trigger_register_simple);
void led_trigger_unregister_simple(struct led_trigger *trig)
{
if (trig)
led_trigger_unregister(trig);
kfree(trig);
}
EXPORT_SYMBOL_GPL(led_trigger_unregister_simple);
| linux-master | drivers/leds/led-triggers.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* LED Triggers Core
* For the HP Jornada 620/660/680/690 handhelds
*
* Copyright 2008 Kristoffer Ericson <[email protected]>
* this driver is based on leds-spitz.c by Richard Purdie.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/leds.h>
#include <asm/hd64461.h>
#include <mach/hp6xx.h>
static void hp6xxled_green_set(struct led_classdev *led_cdev,
enum led_brightness value)
{
u8 v8;
v8 = inb(PKDR);
if (value)
outb(v8 & (~PKDR_LED_GREEN), PKDR);
else
outb(v8 | PKDR_LED_GREEN, PKDR);
}
static void hp6xxled_red_set(struct led_classdev *led_cdev,
enum led_brightness value)
{
u16 v16;
v16 = inw(HD64461_GPBDR);
if (value)
outw(v16 & (~HD64461_GPBDR_LED_RED), HD64461_GPBDR);
else
outw(v16 | HD64461_GPBDR_LED_RED, HD64461_GPBDR);
}
static struct led_classdev hp6xx_red_led = {
.name = "hp6xx:red",
.default_trigger = "hp6xx-charge",
.brightness_set = hp6xxled_red_set,
.flags = LED_CORE_SUSPENDRESUME,
};
static struct led_classdev hp6xx_green_led = {
.name = "hp6xx:green",
.default_trigger = "disk-activity",
.brightness_set = hp6xxled_green_set,
.flags = LED_CORE_SUSPENDRESUME,
};
static int hp6xxled_probe(struct platform_device *pdev)
{
int ret;
ret = devm_led_classdev_register(&pdev->dev, &hp6xx_red_led);
if (ret < 0)
return ret;
return devm_led_classdev_register(&pdev->dev, &hp6xx_green_led);
}
static struct platform_driver hp6xxled_driver = {
.probe = hp6xxled_probe,
.driver = {
.name = "hp6xx-led",
},
};
module_platform_driver(hp6xxled_driver);
MODULE_AUTHOR("Kristoffer Ericson <[email protected]>");
MODULE_DESCRIPTION("HP Jornada 6xx LED driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:hp6xx-led");
| linux-master | drivers/leds/leds-hp6xx.c |
// SPDX-License-Identifier: GPL-2.0
// TI LM3532 LED driver
// Copyright (C) 2019 Texas Instruments Incorporated - https://www.ti.com/
// https://www.ti.com/lit/ds/symlink/lm3532.pdf
#include <linux/i2c.h>
#include <linux/leds.h>
#include <linux/slab.h>
#include <linux/regmap.h>
#include <linux/types.h>
#include <linux/regulator/consumer.h>
#include <linux/module.h>
#include <uapi/linux/uleds.h>
#include <linux/gpio/consumer.h>
#define LM3532_NAME "lm3532-led"
#define LM3532_BL_MODE_MANUAL 0x00
#define LM3532_BL_MODE_ALS 0x01
#define LM3532_REG_OUTPUT_CFG 0x10
#define LM3532_REG_STARTSHUT_RAMP 0x11
#define LM3532_REG_RT_RAMP 0x12
#define LM3532_REG_PWM_A_CFG 0x13
#define LM3532_REG_PWM_B_CFG 0x14
#define LM3532_REG_PWM_C_CFG 0x15
#define LM3532_REG_ZONE_CFG_A 0x16
#define LM3532_REG_CTRL_A_FS_CURR 0x17
#define LM3532_REG_ZONE_CFG_B 0x18
#define LM3532_REG_CTRL_B_FS_CURR 0x19
#define LM3532_REG_ZONE_CFG_C 0x1a
#define LM3532_REG_CTRL_C_FS_CURR 0x1b
#define LM3532_REG_ENABLE 0x1d
#define LM3532_ALS_CONFIG 0x23
#define LM3532_REG_ZN_0_HI 0x60
#define LM3532_REG_ZN_0_LO 0x61
#define LM3532_REG_ZN_1_HI 0x62
#define LM3532_REG_ZN_1_LO 0x63
#define LM3532_REG_ZN_2_HI 0x64
#define LM3532_REG_ZN_2_LO 0x65
#define LM3532_REG_ZN_3_HI 0x66
#define LM3532_REG_ZN_3_LO 0x67
#define LM3532_REG_ZONE_TRGT_A 0x70
#define LM3532_REG_ZONE_TRGT_B 0x75
#define LM3532_REG_ZONE_TRGT_C 0x7a
#define LM3532_REG_MAX 0x7e
/* Control Enable */
#define LM3532_CTRL_A_ENABLE BIT(0)
#define LM3532_CTRL_B_ENABLE BIT(1)
#define LM3532_CTRL_C_ENABLE BIT(2)
/* PWM Zone Control */
#define LM3532_PWM_ZONE_MASK 0x7c
#define LM3532_PWM_ZONE_0_EN BIT(2)
#define LM3532_PWM_ZONE_1_EN BIT(3)
#define LM3532_PWM_ZONE_2_EN BIT(4)
#define LM3532_PWM_ZONE_3_EN BIT(5)
#define LM3532_PWM_ZONE_4_EN BIT(6)
/* Brightness Configuration */
#define LM3532_I2C_CTRL BIT(0)
#define LM3532_ALS_CTRL 0
#define LM3532_LINEAR_MAP BIT(1)
#define LM3532_ZONE_MASK (BIT(2) | BIT(3) | BIT(4))
#define LM3532_ZONE_0 0
#define LM3532_ZONE_1 BIT(2)
#define LM3532_ZONE_2 BIT(3)
#define LM3532_ZONE_3 (BIT(2) | BIT(3))
#define LM3532_ZONE_4 BIT(4)
#define LM3532_ENABLE_ALS BIT(3)
#define LM3532_ALS_SEL_SHIFT 6
/* Zone Boundary Register */
#define LM3532_ALS_WINDOW_mV 2000
#define LM3532_ALS_ZB_MAX 4
#define LM3532_ALS_OFFSET_mV 2
#define LM3532_CONTROL_A 0
#define LM3532_CONTROL_B 1
#define LM3532_CONTROL_C 2
#define LM3532_MAX_CONTROL_BANKS 3
#define LM3532_MAX_LED_STRINGS 3
#define LM3532_OUTPUT_CFG_MASK 0x3
#define LM3532_BRT_VAL_ADJUST 8
#define LM3532_RAMP_DOWN_SHIFT 3
#define LM3532_NUM_RAMP_VALS 8
#define LM3532_NUM_AVG_VALS 8
#define LM3532_NUM_IMP_VALS 32
#define LM3532_FS_CURR_MIN 5000
#define LM3532_FS_CURR_MAX 29800
#define LM3532_FS_CURR_STEP 800
/*
* struct lm3532_als_data
* @config: value of ALS configuration register
* @als1_imp_sel: value of ALS1 resistor select register
* @als2_imp_sel: value of ALS2 resistor select register
* @als_avrg_time: ALS averaging time
* @als_input_mode: ALS input mode for brightness control
* @als_vmin: Minimum ALS voltage
* @als_vmax: Maximum ALS voltage
* @zone_lo: values of ALS lo ZB(Zone Boundary) registers
* @zone_hi: values of ALS hi ZB(Zone Boundary) registers
*/
struct lm3532_als_data {
u8 config;
u8 als1_imp_sel;
u8 als2_imp_sel;
u8 als_avrg_time;
u8 als_input_mode;
u32 als_vmin;
u32 als_vmax;
u8 zones_lo[LM3532_ALS_ZB_MAX];
u8 zones_hi[LM3532_ALS_ZB_MAX];
};
/**
* struct lm3532_led
* @led_dev: led class device
* @priv: Pointer the device data structure
* @control_bank: Control bank the LED is associated to
* @mode: Mode of the LED string
* @ctrl_brt_pointer: Zone target register that controls the sink
* @num_leds: Number of LED strings are supported in this array
* @full_scale_current: The full-scale current setting for the current sink.
* @led_strings: The LED strings supported in this array
* @enabled: Enabled status
*/
struct lm3532_led {
struct led_classdev led_dev;
struct lm3532_data *priv;
int control_bank;
int mode;
int ctrl_brt_pointer;
int num_leds;
int full_scale_current;
unsigned int enabled:1;
u32 led_strings[LM3532_MAX_CONTROL_BANKS];
};
/**
* struct lm3532_data
* @enable_gpio: Hardware enable gpio
* @regulator: regulator
* @client: i2c client
* @regmap: Devices register map
* @dev: Pointer to the devices device struct
* @lock: Lock for reading/writing the device
* @als_data: Pointer to the als data struct
* @runtime_ramp_up: Runtime ramp up setting
* @runtime_ramp_down: Runtime ramp down setting
* @leds: Array of LED strings
*/
struct lm3532_data {
struct gpio_desc *enable_gpio;
struct regulator *regulator;
struct i2c_client *client;
struct regmap *regmap;
struct device *dev;
struct mutex lock;
struct lm3532_als_data *als_data;
u32 runtime_ramp_up;
u32 runtime_ramp_down;
struct lm3532_led leds[];
};
static const struct reg_default lm3532_reg_defs[] = {
{LM3532_REG_OUTPUT_CFG, 0xe4},
{LM3532_REG_STARTSHUT_RAMP, 0xc0},
{LM3532_REG_RT_RAMP, 0xc0},
{LM3532_REG_PWM_A_CFG, 0x82},
{LM3532_REG_PWM_B_CFG, 0x82},
{LM3532_REG_PWM_C_CFG, 0x82},
{LM3532_REG_ZONE_CFG_A, 0xf1},
{LM3532_REG_CTRL_A_FS_CURR, 0xf3},
{LM3532_REG_ZONE_CFG_B, 0xf1},
{LM3532_REG_CTRL_B_FS_CURR, 0xf3},
{LM3532_REG_ZONE_CFG_C, 0xf1},
{LM3532_REG_CTRL_C_FS_CURR, 0xf3},
{LM3532_REG_ENABLE, 0xf8},
{LM3532_ALS_CONFIG, 0x44},
{LM3532_REG_ZN_0_HI, 0x35},
{LM3532_REG_ZN_0_LO, 0x33},
{LM3532_REG_ZN_1_HI, 0x6a},
{LM3532_REG_ZN_1_LO, 0x66},
{LM3532_REG_ZN_2_HI, 0xa1},
{LM3532_REG_ZN_2_LO, 0x99},
{LM3532_REG_ZN_3_HI, 0xdc},
{LM3532_REG_ZN_3_LO, 0xcc},
};
static const struct regmap_config lm3532_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.max_register = LM3532_REG_MAX,
.reg_defaults = lm3532_reg_defs,
.num_reg_defaults = ARRAY_SIZE(lm3532_reg_defs),
.cache_type = REGCACHE_FLAT,
};
static const int als_imp_table[LM3532_NUM_IMP_VALS] = {37000, 18500, 12330,
92500, 7400, 6170, 5290,
4630, 4110, 3700, 3360,
3080, 2850, 2640, 2440,
2310, 2180, 2060, 1950,
1850, 1760, 1680, 1610,
1540, 1480, 1420, 1370,
1320, 1280, 1230, 1190};
static int lm3532_get_als_imp_index(int als_imped)
{
int i;
if (als_imped > als_imp_table[1])
return 0;
if (als_imped < als_imp_table[LM3532_NUM_IMP_VALS - 1])
return LM3532_NUM_IMP_VALS - 1;
for (i = 1; i < LM3532_NUM_IMP_VALS; i++) {
if (als_imped == als_imp_table[i])
return i;
/* Find an approximate index by looking up the table */
if (als_imped < als_imp_table[i - 1] &&
als_imped > als_imp_table[i]) {
if (als_imped - als_imp_table[i - 1] <
als_imp_table[i] - als_imped)
return i + 1;
else
return i;
}
}
return -EINVAL;
}
static int lm3532_get_index(const int table[], int size, int value)
{
int i;
for (i = 1; i < size; i++) {
if (value == table[i])
return i;
/* Find an approximate index by looking up the table */
if (value > table[i - 1] &&
value < table[i]) {
if (value - table[i - 1] < table[i] - value)
return i - 1;
else
return i;
}
}
return -EINVAL;
}
static const int als_avrg_table[LM3532_NUM_AVG_VALS] = {17920, 35840, 71680,
1433360, 286720, 573440,
1146880, 2293760};
static int lm3532_get_als_avg_index(int avg_time)
{
if (avg_time <= als_avrg_table[0])
return 0;
if (avg_time > als_avrg_table[LM3532_NUM_AVG_VALS - 1])
return LM3532_NUM_AVG_VALS - 1;
return lm3532_get_index(&als_avrg_table[0], LM3532_NUM_AVG_VALS,
avg_time);
}
static const int ramp_table[LM3532_NUM_RAMP_VALS] = { 8, 1024, 2048, 4096, 8192,
16384, 32768, 65536};
static int lm3532_get_ramp_index(int ramp_time)
{
if (ramp_time <= ramp_table[0])
return 0;
if (ramp_time > ramp_table[LM3532_NUM_RAMP_VALS - 1])
return LM3532_NUM_RAMP_VALS - 1;
return lm3532_get_index(&ramp_table[0], LM3532_NUM_RAMP_VALS,
ramp_time);
}
/* Caller must take care of locking */
static int lm3532_led_enable(struct lm3532_led *led_data)
{
int ctrl_en_val = BIT(led_data->control_bank);
int ret;
if (led_data->enabled)
return 0;
ret = regmap_update_bits(led_data->priv->regmap, LM3532_REG_ENABLE,
ctrl_en_val, ctrl_en_val);
if (ret) {
dev_err(led_data->priv->dev, "Failed to set ctrl:%d\n", ret);
return ret;
}
ret = regulator_enable(led_data->priv->regulator);
if (ret < 0)
return ret;
led_data->enabled = 1;
return 0;
}
/* Caller must take care of locking */
static int lm3532_led_disable(struct lm3532_led *led_data)
{
int ctrl_en_val = BIT(led_data->control_bank);
int ret;
if (!led_data->enabled)
return 0;
ret = regmap_update_bits(led_data->priv->regmap, LM3532_REG_ENABLE,
ctrl_en_val, 0);
if (ret) {
dev_err(led_data->priv->dev, "Failed to set ctrl:%d\n", ret);
return ret;
}
ret = regulator_disable(led_data->priv->regulator);
if (ret < 0)
return ret;
led_data->enabled = 0;
return 0;
}
static int lm3532_brightness_set(struct led_classdev *led_cdev,
enum led_brightness brt_val)
{
struct lm3532_led *led =
container_of(led_cdev, struct lm3532_led, led_dev);
u8 brightness_reg;
int ret;
mutex_lock(&led->priv->lock);
if (led->mode == LM3532_ALS_CTRL) {
if (brt_val > LED_OFF)
ret = lm3532_led_enable(led);
else
ret = lm3532_led_disable(led);
goto unlock;
}
if (brt_val == LED_OFF) {
ret = lm3532_led_disable(led);
goto unlock;
}
ret = lm3532_led_enable(led);
if (ret)
goto unlock;
brightness_reg = LM3532_REG_ZONE_TRGT_A + led->control_bank * 5 +
(led->ctrl_brt_pointer >> 2);
ret = regmap_write(led->priv->regmap, brightness_reg, brt_val);
unlock:
mutex_unlock(&led->priv->lock);
return ret;
}
static int lm3532_init_registers(struct lm3532_led *led)
{
struct lm3532_data *drvdata = led->priv;
unsigned int runtime_ramp_val;
unsigned int output_cfg_val = 0;
unsigned int output_cfg_shift = 0;
unsigned int output_cfg_mask = 0;
unsigned int brightness_config_reg;
unsigned int brightness_config_val;
int fs_current_reg;
int fs_current_val;
int ret, i;
if (drvdata->enable_gpio)
gpiod_direction_output(drvdata->enable_gpio, 1);
brightness_config_reg = LM3532_REG_ZONE_CFG_A + led->control_bank * 2;
/*
* This could be hard coded to the default value but the control
* brightness register may have changed during boot.
*/
ret = regmap_read(drvdata->regmap, brightness_config_reg,
&led->ctrl_brt_pointer);
if (ret)
return ret;
led->ctrl_brt_pointer &= LM3532_ZONE_MASK;
brightness_config_val = led->ctrl_brt_pointer | led->mode;
ret = regmap_write(drvdata->regmap, brightness_config_reg,
brightness_config_val);
if (ret)
return ret;
if (led->full_scale_current) {
fs_current_reg = LM3532_REG_CTRL_A_FS_CURR + led->control_bank * 2;
fs_current_val = (led->full_scale_current - LM3532_FS_CURR_MIN) /
LM3532_FS_CURR_STEP;
ret = regmap_write(drvdata->regmap, fs_current_reg,
fs_current_val);
if (ret)
return ret;
}
for (i = 0; i < led->num_leds; i++) {
output_cfg_shift = led->led_strings[i] * 2;
output_cfg_val |= (led->control_bank << output_cfg_shift);
output_cfg_mask |= LM3532_OUTPUT_CFG_MASK << output_cfg_shift;
}
ret = regmap_update_bits(drvdata->regmap, LM3532_REG_OUTPUT_CFG,
output_cfg_mask, output_cfg_val);
if (ret)
return ret;
runtime_ramp_val = drvdata->runtime_ramp_up |
(drvdata->runtime_ramp_down << LM3532_RAMP_DOWN_SHIFT);
return regmap_write(drvdata->regmap, LM3532_REG_RT_RAMP,
runtime_ramp_val);
}
static int lm3532_als_configure(struct lm3532_data *priv,
struct lm3532_led *led)
{
struct lm3532_als_data *als = priv->als_data;
u32 als_vmin, als_vmax, als_vstep;
int zone_reg = LM3532_REG_ZN_0_HI;
int ret;
int i;
als_vmin = als->als_vmin;
als_vmax = als->als_vmax;
als_vstep = (als_vmax - als_vmin) / ((LM3532_ALS_ZB_MAX + 1) * 2);
for (i = 0; i < LM3532_ALS_ZB_MAX; i++) {
als->zones_lo[i] = ((als_vmin + als_vstep + (i * als_vstep)) *
LED_FULL) / 1000;
als->zones_hi[i] = ((als_vmin + LM3532_ALS_OFFSET_mV +
als_vstep + (i * als_vstep)) * LED_FULL) / 1000;
zone_reg = LM3532_REG_ZN_0_HI + i * 2;
ret = regmap_write(priv->regmap, zone_reg, als->zones_lo[i]);
if (ret)
return ret;
zone_reg += 1;
ret = regmap_write(priv->regmap, zone_reg, als->zones_hi[i]);
if (ret)
return ret;
}
als->config = (als->als_avrg_time | (LM3532_ENABLE_ALS) |
(als->als_input_mode << LM3532_ALS_SEL_SHIFT));
return regmap_write(priv->regmap, LM3532_ALS_CONFIG, als->config);
}
static int lm3532_parse_als(struct lm3532_data *priv)
{
struct lm3532_als_data *als;
int als_avg_time;
int als_impedance;
int ret;
als = devm_kzalloc(priv->dev, sizeof(*als), GFP_KERNEL);
if (als == NULL)
return -ENOMEM;
ret = device_property_read_u32(&priv->client->dev, "ti,als-vmin",
&als->als_vmin);
if (ret)
als->als_vmin = 0;
ret = device_property_read_u32(&priv->client->dev, "ti,als-vmax",
&als->als_vmax);
if (ret)
als->als_vmax = LM3532_ALS_WINDOW_mV;
if (als->als_vmax > LM3532_ALS_WINDOW_mV) {
ret = -EINVAL;
return ret;
}
ret = device_property_read_u32(&priv->client->dev, "ti,als1-imp-sel",
&als_impedance);
if (ret)
als->als1_imp_sel = 0;
else
als->als1_imp_sel = lm3532_get_als_imp_index(als_impedance);
ret = device_property_read_u32(&priv->client->dev, "ti,als2-imp-sel",
&als_impedance);
if (ret)
als->als2_imp_sel = 0;
else
als->als2_imp_sel = lm3532_get_als_imp_index(als_impedance);
ret = device_property_read_u32(&priv->client->dev, "ti,als-avrg-time-us",
&als_avg_time);
if (ret)
als->als_avrg_time = 0;
else
als->als_avrg_time = lm3532_get_als_avg_index(als_avg_time);
ret = device_property_read_u8(&priv->client->dev, "ti,als-input-mode",
&als->als_input_mode);
if (ret)
als->als_input_mode = 0;
if (als->als_input_mode > LM3532_BL_MODE_ALS) {
ret = -EINVAL;
return ret;
}
priv->als_data = als;
return ret;
}
static int lm3532_parse_node(struct lm3532_data *priv)
{
struct fwnode_handle *child = NULL;
struct lm3532_led *led;
int control_bank;
u32 ramp_time;
size_t i = 0;
int ret;
priv->enable_gpio = devm_gpiod_get_optional(&priv->client->dev,
"enable", GPIOD_OUT_LOW);
if (IS_ERR(priv->enable_gpio))
priv->enable_gpio = NULL;
priv->regulator = devm_regulator_get(&priv->client->dev, "vin");
if (IS_ERR(priv->regulator))
priv->regulator = NULL;
ret = device_property_read_u32(&priv->client->dev, "ramp-up-us",
&ramp_time);
if (ret)
dev_info(&priv->client->dev, "ramp-up-ms property missing\n");
else
priv->runtime_ramp_up = lm3532_get_ramp_index(ramp_time);
ret = device_property_read_u32(&priv->client->dev, "ramp-down-us",
&ramp_time);
if (ret)
dev_info(&priv->client->dev, "ramp-down-ms property missing\n");
else
priv->runtime_ramp_down = lm3532_get_ramp_index(ramp_time);
device_for_each_child_node(priv->dev, child) {
struct led_init_data idata = {
.fwnode = child,
.default_label = ":",
.devicename = priv->client->name,
};
led = &priv->leds[i];
ret = fwnode_property_read_u32(child, "reg", &control_bank);
if (ret) {
dev_err(&priv->client->dev, "reg property missing\n");
goto child_out;
}
if (control_bank > LM3532_CONTROL_C) {
dev_err(&priv->client->dev, "Control bank invalid\n");
continue;
}
led->control_bank = control_bank;
ret = fwnode_property_read_u32(child, "ti,led-mode",
&led->mode);
if (ret) {
dev_err(&priv->client->dev, "ti,led-mode property missing\n");
goto child_out;
}
if (fwnode_property_present(child, "led-max-microamp") &&
fwnode_property_read_u32(child, "led-max-microamp",
&led->full_scale_current))
dev_err(&priv->client->dev,
"Failed getting led-max-microamp\n");
else
led->full_scale_current = min(led->full_scale_current,
LM3532_FS_CURR_MAX);
if (led->mode == LM3532_BL_MODE_ALS) {
led->mode = LM3532_ALS_CTRL;
ret = lm3532_parse_als(priv);
if (ret)
dev_err(&priv->client->dev, "Failed to parse als\n");
else
lm3532_als_configure(priv, led);
} else {
led->mode = LM3532_I2C_CTRL;
}
led->num_leds = fwnode_property_count_u32(child, "led-sources");
if (led->num_leds > LM3532_MAX_LED_STRINGS) {
dev_err(&priv->client->dev, "Too many LED string defined\n");
continue;
}
ret = fwnode_property_read_u32_array(child, "led-sources",
led->led_strings,
led->num_leds);
if (ret) {
dev_err(&priv->client->dev, "led-sources property missing\n");
goto child_out;
}
led->priv = priv;
led->led_dev.brightness_set_blocking = lm3532_brightness_set;
ret = devm_led_classdev_register_ext(priv->dev, &led->led_dev, &idata);
if (ret) {
dev_err(&priv->client->dev, "led register err: %d\n",
ret);
goto child_out;
}
ret = lm3532_init_registers(led);
if (ret) {
dev_err(&priv->client->dev, "register init err: %d\n",
ret);
goto child_out;
}
i++;
}
return 0;
child_out:
fwnode_handle_put(child);
return ret;
}
static int lm3532_probe(struct i2c_client *client)
{
struct lm3532_data *drvdata;
int ret = 0;
int count;
count = device_get_child_node_count(&client->dev);
if (!count) {
dev_err(&client->dev, "LEDs are not defined in device tree!");
return -ENODEV;
}
drvdata = devm_kzalloc(&client->dev, struct_size(drvdata, leds, count),
GFP_KERNEL);
if (drvdata == NULL)
return -ENOMEM;
drvdata->client = client;
drvdata->dev = &client->dev;
drvdata->regmap = devm_regmap_init_i2c(client, &lm3532_regmap_config);
if (IS_ERR(drvdata->regmap)) {
ret = PTR_ERR(drvdata->regmap);
dev_err(&client->dev, "Failed to allocate register map: %d\n",
ret);
return ret;
}
mutex_init(&drvdata->lock);
i2c_set_clientdata(client, drvdata);
ret = lm3532_parse_node(drvdata);
if (ret) {
dev_err(&client->dev, "Failed to parse node\n");
return ret;
}
return ret;
}
static void lm3532_remove(struct i2c_client *client)
{
struct lm3532_data *drvdata = i2c_get_clientdata(client);
mutex_destroy(&drvdata->lock);
if (drvdata->enable_gpio)
gpiod_direction_output(drvdata->enable_gpio, 0);
}
static const struct of_device_id of_lm3532_leds_match[] = {
{ .compatible = "ti,lm3532", },
{},
};
MODULE_DEVICE_TABLE(of, of_lm3532_leds_match);
static const struct i2c_device_id lm3532_id[] = {
{LM3532_NAME, 0},
{}
};
MODULE_DEVICE_TABLE(i2c, lm3532_id);
static struct i2c_driver lm3532_i2c_driver = {
.probe = lm3532_probe,
.remove = lm3532_remove,
.id_table = lm3532_id,
.driver = {
.name = LM3532_NAME,
.of_match_table = of_lm3532_leds_match,
},
};
module_i2c_driver(lm3532_i2c_driver);
MODULE_DESCRIPTION("Back Light driver for LM3532");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Dan Murphy <[email protected]>");
| linux-master | drivers/leds/leds-lm3532.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/drivers/leds-pwm.c
*
* simple PWM based LED control
*
* Copyright 2009 Luotao Fu @ Pengutronix ([email protected])
*
* based on leds-gpio.c by Raphael Assenat <[email protected]>
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/leds.h>
#include <linux/err.h>
#include <linux/pwm.h>
#include <linux/slab.h>
#include "leds.h"
struct led_pwm {
const char *name;
u8 active_low;
u8 default_state;
unsigned int max_brightness;
};
struct led_pwm_data {
struct led_classdev cdev;
struct pwm_device *pwm;
struct pwm_state pwmstate;
unsigned int active_low;
};
struct led_pwm_priv {
int num_leds;
struct led_pwm_data leds[];
};
static int led_pwm_set(struct led_classdev *led_cdev,
enum led_brightness brightness)
{
struct led_pwm_data *led_dat =
container_of(led_cdev, struct led_pwm_data, cdev);
unsigned int max = led_dat->cdev.max_brightness;
unsigned long long duty = led_dat->pwmstate.period;
duty *= brightness;
do_div(duty, max);
if (led_dat->active_low)
duty = led_dat->pwmstate.period - duty;
led_dat->pwmstate.duty_cycle = duty;
led_dat->pwmstate.enabled = duty > 0;
return pwm_apply_state(led_dat->pwm, &led_dat->pwmstate);
}
__attribute__((nonnull))
static int led_pwm_add(struct device *dev, struct led_pwm_priv *priv,
struct led_pwm *led, struct fwnode_handle *fwnode)
{
struct led_pwm_data *led_data = &priv->leds[priv->num_leds];
struct led_init_data init_data = { .fwnode = fwnode };
int ret;
led_data->active_low = led->active_low;
led_data->cdev.name = led->name;
led_data->cdev.brightness = LED_OFF;
led_data->cdev.max_brightness = led->max_brightness;
led_data->cdev.flags = LED_CORE_SUSPENDRESUME;
led_data->pwm = devm_fwnode_pwm_get(dev, fwnode, NULL);
if (IS_ERR(led_data->pwm))
return dev_err_probe(dev, PTR_ERR(led_data->pwm),
"unable to request PWM for %s\n",
led->name);
led_data->cdev.brightness_set_blocking = led_pwm_set;
/* init PWM state */
switch (led->default_state) {
case LEDS_DEFSTATE_KEEP:
pwm_get_state(led_data->pwm, &led_data->pwmstate);
if (led_data->pwmstate.period)
break;
led->default_state = LEDS_DEFSTATE_OFF;
dev_warn(dev,
"failed to read period for %s, default to off",
led->name);
fallthrough;
default:
pwm_init_state(led_data->pwm, &led_data->pwmstate);
break;
}
/* set brightness */
switch (led->default_state) {
case LEDS_DEFSTATE_ON:
led_data->cdev.brightness = led->max_brightness;
break;
case LEDS_DEFSTATE_KEEP:
{
uint64_t brightness;
brightness = led->max_brightness;
brightness *= led_data->pwmstate.duty_cycle;
do_div(brightness, led_data->pwmstate.period);
led_data->cdev.brightness = brightness;
}
break;
}
ret = devm_led_classdev_register_ext(dev, &led_data->cdev, &init_data);
if (ret) {
dev_err(dev, "failed to register PWM led for %s: %d\n",
led->name, ret);
return ret;
}
if (led->default_state != LEDS_DEFSTATE_KEEP) {
ret = led_pwm_set(&led_data->cdev, led_data->cdev.brightness);
if (ret) {
dev_err(dev, "failed to set led PWM value for %s: %d",
led->name, ret);
return ret;
}
}
priv->num_leds++;
return 0;
}
static int led_pwm_create_fwnode(struct device *dev, struct led_pwm_priv *priv)
{
struct fwnode_handle *fwnode;
struct led_pwm led;
int ret;
device_for_each_child_node(dev, fwnode) {
memset(&led, 0, sizeof(led));
ret = fwnode_property_read_string(fwnode, "label", &led.name);
if (ret && is_of_node(fwnode))
led.name = to_of_node(fwnode)->name;
if (!led.name) {
ret = -EINVAL;
goto err_child_out;
}
led.active_low = fwnode_property_read_bool(fwnode,
"active-low");
fwnode_property_read_u32(fwnode, "max-brightness",
&led.max_brightness);
led.default_state = led_init_default_state_get(fwnode);
ret = led_pwm_add(dev, priv, &led, fwnode);
if (ret)
goto err_child_out;
}
return 0;
err_child_out:
fwnode_handle_put(fwnode);
return ret;
}
static int led_pwm_probe(struct platform_device *pdev)
{
struct led_pwm_priv *priv;
int ret = 0;
int count;
count = device_get_child_node_count(&pdev->dev);
if (!count)
return -EINVAL;
priv = devm_kzalloc(&pdev->dev, struct_size(priv, leds, count),
GFP_KERNEL);
if (!priv)
return -ENOMEM;
ret = led_pwm_create_fwnode(&pdev->dev, priv);
if (ret)
return ret;
platform_set_drvdata(pdev, priv);
return 0;
}
static const struct of_device_id of_pwm_leds_match[] = {
{ .compatible = "pwm-leds", },
{},
};
MODULE_DEVICE_TABLE(of, of_pwm_leds_match);
static struct platform_driver led_pwm_driver = {
.probe = led_pwm_probe,
.driver = {
.name = "leds_pwm",
.of_match_table = of_pwm_leds_match,
},
};
module_platform_driver(led_pwm_driver);
MODULE_AUTHOR("Luotao Fu <[email protected]>");
MODULE_DESCRIPTION("generic PWM LED driver");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:leds-pwm");
| linux-master | drivers/leds/leds-pwm.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* LED Flash class interface
*
* Copyright (C) 2015 Samsung Electronics Co., Ltd.
* Author: Jacek Anaszewski <[email protected]>
*/
#include <linux/device.h>
#include <linux/init.h>
#include <linux/led-class-flash.h>
#include <linux/leds.h>
#include <linux/module.h>
#include <linux/slab.h>
#include "leds.h"
#define has_flash_op(fled_cdev, op) \
(fled_cdev && fled_cdev->ops->op)
#define call_flash_op(fled_cdev, op, args...) \
((has_flash_op(fled_cdev, op)) ? \
(fled_cdev->ops->op(fled_cdev, args)) : \
-EINVAL)
static const char * const led_flash_fault_names[] = {
"led-over-voltage",
"flash-timeout-exceeded",
"controller-over-temperature",
"controller-short-circuit",
"led-power-supply-over-current",
"indicator-led-fault",
"led-under-voltage",
"controller-under-voltage",
"led-over-temperature",
};
static ssize_t flash_brightness_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
struct led_classdev *led_cdev = dev_get_drvdata(dev);
struct led_classdev_flash *fled_cdev = lcdev_to_flcdev(led_cdev);
unsigned long state;
ssize_t ret;
mutex_lock(&led_cdev->led_access);
if (led_sysfs_is_disabled(led_cdev)) {
ret = -EBUSY;
goto unlock;
}
ret = kstrtoul(buf, 10, &state);
if (ret)
goto unlock;
ret = led_set_flash_brightness(fled_cdev, state);
if (ret < 0)
goto unlock;
ret = size;
unlock:
mutex_unlock(&led_cdev->led_access);
return ret;
}
static ssize_t flash_brightness_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct led_classdev *led_cdev = dev_get_drvdata(dev);
struct led_classdev_flash *fled_cdev = lcdev_to_flcdev(led_cdev);
/* no lock needed for this */
led_update_flash_brightness(fled_cdev);
return sprintf(buf, "%u\n", fled_cdev->brightness.val);
}
static DEVICE_ATTR_RW(flash_brightness);
static ssize_t max_flash_brightness_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct led_classdev *led_cdev = dev_get_drvdata(dev);
struct led_classdev_flash *fled_cdev = lcdev_to_flcdev(led_cdev);
return sprintf(buf, "%u\n", fled_cdev->brightness.max);
}
static DEVICE_ATTR_RO(max_flash_brightness);
static ssize_t flash_strobe_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
struct led_classdev *led_cdev = dev_get_drvdata(dev);
struct led_classdev_flash *fled_cdev = lcdev_to_flcdev(led_cdev);
unsigned long state;
ssize_t ret = -EBUSY;
mutex_lock(&led_cdev->led_access);
if (led_sysfs_is_disabled(led_cdev))
goto unlock;
ret = kstrtoul(buf, 10, &state);
if (ret)
goto unlock;
if (state > 1) {
ret = -EINVAL;
goto unlock;
}
ret = led_set_flash_strobe(fled_cdev, state);
if (ret < 0)
goto unlock;
ret = size;
unlock:
mutex_unlock(&led_cdev->led_access);
return ret;
}
static ssize_t flash_strobe_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct led_classdev *led_cdev = dev_get_drvdata(dev);
struct led_classdev_flash *fled_cdev = lcdev_to_flcdev(led_cdev);
bool state;
int ret;
/* no lock needed for this */
ret = led_get_flash_strobe(fled_cdev, &state);
if (ret < 0)
return ret;
return sprintf(buf, "%u\n", state);
}
static DEVICE_ATTR_RW(flash_strobe);
static ssize_t flash_timeout_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
struct led_classdev *led_cdev = dev_get_drvdata(dev);
struct led_classdev_flash *fled_cdev = lcdev_to_flcdev(led_cdev);
unsigned long flash_timeout;
ssize_t ret;
mutex_lock(&led_cdev->led_access);
if (led_sysfs_is_disabled(led_cdev)) {
ret = -EBUSY;
goto unlock;
}
ret = kstrtoul(buf, 10, &flash_timeout);
if (ret)
goto unlock;
ret = led_set_flash_timeout(fled_cdev, flash_timeout);
if (ret < 0)
goto unlock;
ret = size;
unlock:
mutex_unlock(&led_cdev->led_access);
return ret;
}
static ssize_t flash_timeout_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct led_classdev *led_cdev = dev_get_drvdata(dev);
struct led_classdev_flash *fled_cdev = lcdev_to_flcdev(led_cdev);
return sprintf(buf, "%u\n", fled_cdev->timeout.val);
}
static DEVICE_ATTR_RW(flash_timeout);
static ssize_t max_flash_timeout_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct led_classdev *led_cdev = dev_get_drvdata(dev);
struct led_classdev_flash *fled_cdev = lcdev_to_flcdev(led_cdev);
return sprintf(buf, "%u\n", fled_cdev->timeout.max);
}
static DEVICE_ATTR_RO(max_flash_timeout);
static ssize_t flash_fault_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct led_classdev *led_cdev = dev_get_drvdata(dev);
struct led_classdev_flash *fled_cdev = lcdev_to_flcdev(led_cdev);
u32 fault, mask = 0x1;
char *pbuf = buf;
int i, ret, buf_len;
ret = led_get_flash_fault(fled_cdev, &fault);
if (ret < 0)
return -EINVAL;
*buf = '\0';
for (i = 0; i < LED_NUM_FLASH_FAULTS; ++i) {
if (fault & mask) {
buf_len = sprintf(pbuf, "%s ",
led_flash_fault_names[i]);
pbuf += buf_len;
}
mask <<= 1;
}
return strlen(strcat(buf, "\n"));
}
static DEVICE_ATTR_RO(flash_fault);
static struct attribute *led_flash_strobe_attrs[] = {
&dev_attr_flash_strobe.attr,
NULL,
};
static struct attribute *led_flash_timeout_attrs[] = {
&dev_attr_flash_timeout.attr,
&dev_attr_max_flash_timeout.attr,
NULL,
};
static struct attribute *led_flash_brightness_attrs[] = {
&dev_attr_flash_brightness.attr,
&dev_attr_max_flash_brightness.attr,
NULL,
};
static struct attribute *led_flash_fault_attrs[] = {
&dev_attr_flash_fault.attr,
NULL,
};
static const struct attribute_group led_flash_strobe_group = {
.attrs = led_flash_strobe_attrs,
};
static const struct attribute_group led_flash_timeout_group = {
.attrs = led_flash_timeout_attrs,
};
static const struct attribute_group led_flash_brightness_group = {
.attrs = led_flash_brightness_attrs,
};
static const struct attribute_group led_flash_fault_group = {
.attrs = led_flash_fault_attrs,
};
static void led_flash_resume(struct led_classdev *led_cdev)
{
struct led_classdev_flash *fled_cdev = lcdev_to_flcdev(led_cdev);
call_flash_op(fled_cdev, flash_brightness_set,
fled_cdev->brightness.val);
call_flash_op(fled_cdev, timeout_set, fled_cdev->timeout.val);
}
static void led_flash_init_sysfs_groups(struct led_classdev_flash *fled_cdev)
{
struct led_classdev *led_cdev = &fled_cdev->led_cdev;
const struct led_flash_ops *ops = fled_cdev->ops;
const struct attribute_group **flash_groups = fled_cdev->sysfs_groups;
int num_sysfs_groups = 0;
flash_groups[num_sysfs_groups++] = &led_flash_strobe_group;
if (ops->flash_brightness_set)
flash_groups[num_sysfs_groups++] = &led_flash_brightness_group;
if (ops->timeout_set)
flash_groups[num_sysfs_groups++] = &led_flash_timeout_group;
if (ops->fault_get)
flash_groups[num_sysfs_groups++] = &led_flash_fault_group;
led_cdev->groups = flash_groups;
}
int led_classdev_flash_register_ext(struct device *parent,
struct led_classdev_flash *fled_cdev,
struct led_init_data *init_data)
{
struct led_classdev *led_cdev;
const struct led_flash_ops *ops;
int ret;
if (!fled_cdev)
return -EINVAL;
led_cdev = &fled_cdev->led_cdev;
if (led_cdev->flags & LED_DEV_CAP_FLASH) {
if (!led_cdev->brightness_set_blocking)
return -EINVAL;
ops = fled_cdev->ops;
if (!ops || !ops->strobe_set)
return -EINVAL;
led_cdev->flash_resume = led_flash_resume;
/* Select the sysfs attributes to be created for the device */
led_flash_init_sysfs_groups(fled_cdev);
}
/* Register led class device */
ret = led_classdev_register_ext(parent, led_cdev, init_data);
if (ret < 0)
return ret;
return 0;
}
EXPORT_SYMBOL_GPL(led_classdev_flash_register_ext);
void led_classdev_flash_unregister(struct led_classdev_flash *fled_cdev)
{
if (!fled_cdev)
return;
led_classdev_unregister(&fled_cdev->led_cdev);
}
EXPORT_SYMBOL_GPL(led_classdev_flash_unregister);
static void devm_led_classdev_flash_release(struct device *dev, void *res)
{
led_classdev_flash_unregister(*(struct led_classdev_flash **)res);
}
int devm_led_classdev_flash_register_ext(struct device *parent,
struct led_classdev_flash *fled_cdev,
struct led_init_data *init_data)
{
struct led_classdev_flash **dr;
int ret;
dr = devres_alloc(devm_led_classdev_flash_release, sizeof(*dr),
GFP_KERNEL);
if (!dr)
return -ENOMEM;
ret = led_classdev_flash_register_ext(parent, fled_cdev, init_data);
if (ret) {
devres_free(dr);
return ret;
}
*dr = fled_cdev;
devres_add(parent, dr);
return 0;
}
EXPORT_SYMBOL_GPL(devm_led_classdev_flash_register_ext);
static int devm_led_classdev_flash_match(struct device *dev,
void *res, void *data)
{
struct led_classdev_flash **p = res;
if (WARN_ON(!p || !*p))
return 0;
return *p == data;
}
void devm_led_classdev_flash_unregister(struct device *dev,
struct led_classdev_flash *fled_cdev)
{
WARN_ON(devres_release(dev,
devm_led_classdev_flash_release,
devm_led_classdev_flash_match, fled_cdev));
}
EXPORT_SYMBOL_GPL(devm_led_classdev_flash_unregister);
static void led_clamp_align(struct led_flash_setting *s)
{
u32 v, offset;
v = s->val + s->step / 2;
v = clamp(v, s->min, s->max);
offset = v - s->min;
offset = s->step * (offset / s->step);
s->val = s->min + offset;
}
int led_set_flash_timeout(struct led_classdev_flash *fled_cdev, u32 timeout)
{
struct led_classdev *led_cdev = &fled_cdev->led_cdev;
struct led_flash_setting *s = &fled_cdev->timeout;
s->val = timeout;
led_clamp_align(s);
if (!(led_cdev->flags & LED_SUSPENDED))
return call_flash_op(fled_cdev, timeout_set, s->val);
return 0;
}
EXPORT_SYMBOL_GPL(led_set_flash_timeout);
int led_get_flash_fault(struct led_classdev_flash *fled_cdev, u32 *fault)
{
return call_flash_op(fled_cdev, fault_get, fault);
}
EXPORT_SYMBOL_GPL(led_get_flash_fault);
int led_set_flash_brightness(struct led_classdev_flash *fled_cdev,
u32 brightness)
{
struct led_classdev *led_cdev = &fled_cdev->led_cdev;
struct led_flash_setting *s = &fled_cdev->brightness;
s->val = brightness;
led_clamp_align(s);
if (!(led_cdev->flags & LED_SUSPENDED))
return call_flash_op(fled_cdev, flash_brightness_set, s->val);
return 0;
}
EXPORT_SYMBOL_GPL(led_set_flash_brightness);
int led_update_flash_brightness(struct led_classdev_flash *fled_cdev)
{
struct led_flash_setting *s = &fled_cdev->brightness;
u32 brightness;
if (has_flash_op(fled_cdev, flash_brightness_get)) {
int ret = call_flash_op(fled_cdev, flash_brightness_get,
&brightness);
if (ret < 0)
return ret;
s->val = brightness;
}
return 0;
}
EXPORT_SYMBOL_GPL(led_update_flash_brightness);
MODULE_AUTHOR("Jacek Anaszewski <[email protected]>");
MODULE_DESCRIPTION("LED Flash class interface");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/leds/led-class-flash.c |
// SPDX-License-Identifier: BSD-2-Clause OR GPL-2.0-or-later
/*
* Dell Wyse 3020 a.k.a. "Ariel" Embedded Controller LED Driver
*
* Copyright (C) 2020 Lubomir Rintel
*/
#include <linux/module.h>
#include <linux/leds.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
enum ec_index {
EC_BLUE_LED = 0x01,
EC_AMBER_LED = 0x02,
EC_GREEN_LED = 0x03,
};
enum {
EC_LED_OFF = 0x00,
EC_LED_STILL = 0x01,
EC_LED_FADE = 0x02,
EC_LED_BLINK = 0x03,
};
struct ariel_led {
struct regmap *ec_ram;
enum ec_index ec_index;
struct led_classdev led_cdev;
};
#define led_cdev_to_ariel_led(c) container_of(c, struct ariel_led, led_cdev)
static enum led_brightness ariel_led_get(struct led_classdev *led_cdev)
{
struct ariel_led *led = led_cdev_to_ariel_led(led_cdev);
unsigned int led_status = 0;
if (regmap_read(led->ec_ram, led->ec_index, &led_status))
return LED_OFF;
if (led_status == EC_LED_STILL)
return LED_FULL;
else
return LED_OFF;
}
static void ariel_led_set(struct led_classdev *led_cdev,
enum led_brightness brightness)
{
struct ariel_led *led = led_cdev_to_ariel_led(led_cdev);
if (brightness == LED_OFF)
regmap_write(led->ec_ram, led->ec_index, EC_LED_OFF);
else
regmap_write(led->ec_ram, led->ec_index, EC_LED_STILL);
}
static int ariel_blink_set(struct led_classdev *led_cdev,
unsigned long *delay_on, unsigned long *delay_off)
{
struct ariel_led *led = led_cdev_to_ariel_led(led_cdev);
if (*delay_on == 0 && *delay_off == 0)
return -EINVAL;
if (*delay_on == 0) {
regmap_write(led->ec_ram, led->ec_index, EC_LED_OFF);
} else if (*delay_off == 0) {
regmap_write(led->ec_ram, led->ec_index, EC_LED_STILL);
} else {
*delay_on = 500;
*delay_off = 500;
regmap_write(led->ec_ram, led->ec_index, EC_LED_BLINK);
}
return 0;
}
#define NLEDS 3
static int ariel_led_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct ariel_led *leds;
struct regmap *ec_ram;
int ret;
int i;
ec_ram = dev_get_regmap(dev->parent, "ec_ram");
if (!ec_ram)
return -ENODEV;
leds = devm_kcalloc(dev, NLEDS, sizeof(*leds), GFP_KERNEL);
if (!leds)
return -ENOMEM;
leds[0].ec_index = EC_BLUE_LED;
leds[0].led_cdev.name = "blue:power";
leds[0].led_cdev.default_trigger = "default-on";
leds[1].ec_index = EC_AMBER_LED;
leds[1].led_cdev.name = "amber:status";
leds[2].ec_index = EC_GREEN_LED;
leds[2].led_cdev.name = "green:status";
leds[2].led_cdev.default_trigger = "default-on";
for (i = 0; i < NLEDS; i++) {
leds[i].ec_ram = ec_ram;
leds[i].led_cdev.brightness_get = ariel_led_get;
leds[i].led_cdev.brightness_set = ariel_led_set;
leds[i].led_cdev.blink_set = ariel_blink_set;
ret = devm_led_classdev_register(dev, &leds[i].led_cdev);
if (ret)
return ret;
}
return 0;
}
static struct platform_driver ariel_led_driver = {
.probe = ariel_led_probe,
.driver = {
.name = "dell-wyse-ariel-led",
},
};
module_platform_driver(ariel_led_driver);
MODULE_AUTHOR("Lubomir Rintel <[email protected]>");
MODULE_DESCRIPTION("Dell Wyse 3020 Status LEDs Driver");
MODULE_LICENSE("Dual BSD/GPL");
| linux-master | drivers/leds/leds-ariel.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* LP5521 LED chip driver.
*
* Copyright (C) 2010 Nokia Corporation
* Copyright (C) 2012 Texas Instruments
*
* Contact: Samu Onkalo <[email protected]>
* Milo(Woogyom) Kim <[email protected]>
*/
#include <linux/delay.h>
#include <linux/firmware.h>
#include <linux/i2c.h>
#include <linux/leds.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/platform_data/leds-lp55xx.h>
#include <linux/slab.h>
#include <linux/of.h>
#include "leds-lp55xx-common.h"
#define LP5521_PROGRAM_LENGTH 32
#define LP5521_MAX_LEDS 3
#define LP5521_CMD_DIRECT 0x3F
/* Registers */
#define LP5521_REG_ENABLE 0x00
#define LP5521_REG_OP_MODE 0x01
#define LP5521_REG_R_PWM 0x02
#define LP5521_REG_G_PWM 0x03
#define LP5521_REG_B_PWM 0x04
#define LP5521_REG_R_CURRENT 0x05
#define LP5521_REG_G_CURRENT 0x06
#define LP5521_REG_B_CURRENT 0x07
#define LP5521_REG_CONFIG 0x08
#define LP5521_REG_STATUS 0x0C
#define LP5521_REG_RESET 0x0D
#define LP5521_REG_R_PROG_MEM 0x10
#define LP5521_REG_G_PROG_MEM 0x30
#define LP5521_REG_B_PROG_MEM 0x50
/* Base register to set LED current */
#define LP5521_REG_LED_CURRENT_BASE LP5521_REG_R_CURRENT
/* Base register to set the brightness */
#define LP5521_REG_LED_PWM_BASE LP5521_REG_R_PWM
/* Bits in ENABLE register */
#define LP5521_MASTER_ENABLE 0x40 /* Chip master enable */
#define LP5521_LOGARITHMIC_PWM 0x80 /* Logarithmic PWM adjustment */
#define LP5521_EXEC_RUN 0x2A
#define LP5521_ENABLE_DEFAULT \
(LP5521_MASTER_ENABLE | LP5521_LOGARITHMIC_PWM)
#define LP5521_ENABLE_RUN_PROGRAM \
(LP5521_ENABLE_DEFAULT | LP5521_EXEC_RUN)
/* CONFIG register */
#define LP5521_PWM_HF 0x40 /* PWM: 0 = 256Hz, 1 = 558Hz */
#define LP5521_PWRSAVE_EN 0x20 /* 1 = Power save mode */
#define LP5521_CP_MODE_MASK 0x18 /* Charge pump mode */
#define LP5521_CP_MODE_SHIFT 3
#define LP5521_R_TO_BATT 0x04 /* R out: 0 = CP, 1 = Vbat */
#define LP5521_CLK_INT 0x01 /* Internal clock */
#define LP5521_DEFAULT_CFG (LP5521_PWM_HF | LP5521_PWRSAVE_EN)
/* Status */
#define LP5521_EXT_CLK_USED 0x08
/* default R channel current register value */
#define LP5521_REG_R_CURR_DEFAULT 0xAF
/* Reset register value */
#define LP5521_RESET 0xFF
/* Program Memory Operations */
#define LP5521_MODE_R_M 0x30 /* Operation Mode Register */
#define LP5521_MODE_G_M 0x0C
#define LP5521_MODE_B_M 0x03
#define LP5521_LOAD_R 0x10
#define LP5521_LOAD_G 0x04
#define LP5521_LOAD_B 0x01
#define LP5521_R_IS_LOADING(mode) \
((mode & LP5521_MODE_R_M) == LP5521_LOAD_R)
#define LP5521_G_IS_LOADING(mode) \
((mode & LP5521_MODE_G_M) == LP5521_LOAD_G)
#define LP5521_B_IS_LOADING(mode) \
((mode & LP5521_MODE_B_M) == LP5521_LOAD_B)
#define LP5521_EXEC_R_M 0x30 /* Enable Register */
#define LP5521_EXEC_G_M 0x0C
#define LP5521_EXEC_B_M 0x03
#define LP5521_EXEC_M 0x3F
#define LP5521_RUN_R 0x20
#define LP5521_RUN_G 0x08
#define LP5521_RUN_B 0x02
static inline void lp5521_wait_opmode_done(void)
{
/* operation mode change needs to be longer than 153 us */
usleep_range(200, 300);
}
static inline void lp5521_wait_enable_done(void)
{
/* it takes more 488 us to update ENABLE register */
usleep_range(500, 600);
}
static void lp5521_set_led_current(struct lp55xx_led *led, u8 led_current)
{
led->led_current = led_current;
lp55xx_write(led->chip, LP5521_REG_LED_CURRENT_BASE + led->chan_nr,
led_current);
}
static void lp5521_load_engine(struct lp55xx_chip *chip)
{
enum lp55xx_engine_index idx = chip->engine_idx;
static const u8 mask[] = {
[LP55XX_ENGINE_1] = LP5521_MODE_R_M,
[LP55XX_ENGINE_2] = LP5521_MODE_G_M,
[LP55XX_ENGINE_3] = LP5521_MODE_B_M,
};
static const u8 val[] = {
[LP55XX_ENGINE_1] = LP5521_LOAD_R,
[LP55XX_ENGINE_2] = LP5521_LOAD_G,
[LP55XX_ENGINE_3] = LP5521_LOAD_B,
};
lp55xx_update_bits(chip, LP5521_REG_OP_MODE, mask[idx], val[idx]);
lp5521_wait_opmode_done();
}
static void lp5521_stop_all_engines(struct lp55xx_chip *chip)
{
lp55xx_write(chip, LP5521_REG_OP_MODE, 0);
lp5521_wait_opmode_done();
}
static void lp5521_stop_engine(struct lp55xx_chip *chip)
{
enum lp55xx_engine_index idx = chip->engine_idx;
static const u8 mask[] = {
[LP55XX_ENGINE_1] = LP5521_MODE_R_M,
[LP55XX_ENGINE_2] = LP5521_MODE_G_M,
[LP55XX_ENGINE_3] = LP5521_MODE_B_M,
};
lp55xx_update_bits(chip, LP5521_REG_OP_MODE, mask[idx], 0);
lp5521_wait_opmode_done();
}
static void lp5521_run_engine(struct lp55xx_chip *chip, bool start)
{
int ret;
u8 mode;
u8 exec;
/* stop engine */
if (!start) {
lp5521_stop_engine(chip);
lp55xx_write(chip, LP5521_REG_OP_MODE, LP5521_CMD_DIRECT);
lp5521_wait_opmode_done();
return;
}
/*
* To run the engine,
* operation mode and enable register should updated at the same time
*/
ret = lp55xx_read(chip, LP5521_REG_OP_MODE, &mode);
if (ret)
return;
ret = lp55xx_read(chip, LP5521_REG_ENABLE, &exec);
if (ret)
return;
/* change operation mode to RUN only when each engine is loading */
if (LP5521_R_IS_LOADING(mode)) {
mode = (mode & ~LP5521_MODE_R_M) | LP5521_RUN_R;
exec = (exec & ~LP5521_EXEC_R_M) | LP5521_RUN_R;
}
if (LP5521_G_IS_LOADING(mode)) {
mode = (mode & ~LP5521_MODE_G_M) | LP5521_RUN_G;
exec = (exec & ~LP5521_EXEC_G_M) | LP5521_RUN_G;
}
if (LP5521_B_IS_LOADING(mode)) {
mode = (mode & ~LP5521_MODE_B_M) | LP5521_RUN_B;
exec = (exec & ~LP5521_EXEC_B_M) | LP5521_RUN_B;
}
lp55xx_write(chip, LP5521_REG_OP_MODE, mode);
lp5521_wait_opmode_done();
lp55xx_update_bits(chip, LP5521_REG_ENABLE, LP5521_EXEC_M, exec);
lp5521_wait_enable_done();
}
static int lp5521_update_program_memory(struct lp55xx_chip *chip,
const u8 *data, size_t size)
{
enum lp55xx_engine_index idx = chip->engine_idx;
u8 pattern[LP5521_PROGRAM_LENGTH] = {0};
static const u8 addr[] = {
[LP55XX_ENGINE_1] = LP5521_REG_R_PROG_MEM,
[LP55XX_ENGINE_2] = LP5521_REG_G_PROG_MEM,
[LP55XX_ENGINE_3] = LP5521_REG_B_PROG_MEM,
};
unsigned cmd;
char c[3];
int nrchars;
int ret;
int offset = 0;
int i = 0;
while ((offset < size - 1) && (i < LP5521_PROGRAM_LENGTH)) {
/* separate sscanfs because length is working only for %s */
ret = sscanf(data + offset, "%2s%n ", c, &nrchars);
if (ret != 1)
goto err;
ret = sscanf(c, "%2x", &cmd);
if (ret != 1)
goto err;
pattern[i] = (u8)cmd;
offset += nrchars;
i++;
}
/* Each instruction is 16bit long. Check that length is even */
if (i % 2)
goto err;
for (i = 0; i < LP5521_PROGRAM_LENGTH; i++) {
ret = lp55xx_write(chip, addr[idx] + i, pattern[i]);
if (ret)
return -EINVAL;
}
return size;
err:
dev_err(&chip->cl->dev, "wrong pattern format\n");
return -EINVAL;
}
static void lp5521_firmware_loaded(struct lp55xx_chip *chip)
{
const struct firmware *fw = chip->fw;
if (fw->size > LP5521_PROGRAM_LENGTH) {
dev_err(&chip->cl->dev, "firmware data size overflow: %zu\n",
fw->size);
return;
}
/*
* Program memory sequence
* 1) set engine mode to "LOAD"
* 2) write firmware data into program memory
*/
lp5521_load_engine(chip);
lp5521_update_program_memory(chip, fw->data, fw->size);
}
static int lp5521_post_init_device(struct lp55xx_chip *chip)
{
int ret;
u8 val;
/*
* Make sure that the chip is reset by reading back the r channel
* current reg. This is dummy read is required on some platforms -
* otherwise further access to the R G B channels in the
* LP5521_REG_ENABLE register will not have any effect - strange!
*/
ret = lp55xx_read(chip, LP5521_REG_R_CURRENT, &val);
if (ret) {
dev_err(&chip->cl->dev, "error in resetting chip\n");
return ret;
}
if (val != LP5521_REG_R_CURR_DEFAULT) {
dev_err(&chip->cl->dev,
"unexpected data in register (expected 0x%x got 0x%x)\n",
LP5521_REG_R_CURR_DEFAULT, val);
ret = -EINVAL;
return ret;
}
usleep_range(10000, 20000);
/* Set all PWMs to direct control mode */
ret = lp55xx_write(chip, LP5521_REG_OP_MODE, LP5521_CMD_DIRECT);
/* Update configuration for the clock setting */
val = LP5521_DEFAULT_CFG;
if (!lp55xx_is_extclk_used(chip))
val |= LP5521_CLK_INT;
val |= (chip->pdata->charge_pump_mode << LP5521_CP_MODE_SHIFT) & LP5521_CP_MODE_MASK;
ret = lp55xx_write(chip, LP5521_REG_CONFIG, val);
if (ret)
return ret;
/* Initialize all channels PWM to zero -> leds off */
lp55xx_write(chip, LP5521_REG_R_PWM, 0);
lp55xx_write(chip, LP5521_REG_G_PWM, 0);
lp55xx_write(chip, LP5521_REG_B_PWM, 0);
/* Set engines are set to run state when OP_MODE enables engines */
ret = lp55xx_write(chip, LP5521_REG_ENABLE, LP5521_ENABLE_RUN_PROGRAM);
if (ret)
return ret;
lp5521_wait_enable_done();
return 0;
}
static int lp5521_run_selftest(struct lp55xx_chip *chip, char *buf)
{
struct lp55xx_platform_data *pdata = chip->pdata;
int ret;
u8 status;
ret = lp55xx_read(chip, LP5521_REG_STATUS, &status);
if (ret < 0)
return ret;
if (pdata->clock_mode != LP55XX_CLOCK_EXT)
return 0;
/* Check that ext clock is really in use if requested */
if ((status & LP5521_EXT_CLK_USED) == 0)
return -EIO;
return 0;
}
static int lp5521_multicolor_brightness(struct lp55xx_led *led)
{
struct lp55xx_chip *chip = led->chip;
int ret;
int i;
mutex_lock(&chip->lock);
for (i = 0; i < led->mc_cdev.num_colors; i++) {
ret = lp55xx_write(chip,
LP5521_REG_LED_PWM_BASE +
led->mc_cdev.subled_info[i].channel,
led->mc_cdev.subled_info[i].brightness);
if (ret)
break;
}
mutex_unlock(&chip->lock);
return ret;
}
static int lp5521_led_brightness(struct lp55xx_led *led)
{
struct lp55xx_chip *chip = led->chip;
int ret;
mutex_lock(&chip->lock);
ret = lp55xx_write(chip, LP5521_REG_LED_PWM_BASE + led->chan_nr,
led->brightness);
mutex_unlock(&chip->lock);
return ret;
}
static ssize_t show_engine_mode(struct device *dev,
struct device_attribute *attr,
char *buf, int nr)
{
struct lp55xx_led *led = i2c_get_clientdata(to_i2c_client(dev));
struct lp55xx_chip *chip = led->chip;
enum lp55xx_engine_mode mode = chip->engines[nr - 1].mode;
switch (mode) {
case LP55XX_ENGINE_RUN:
return sprintf(buf, "run\n");
case LP55XX_ENGINE_LOAD:
return sprintf(buf, "load\n");
case LP55XX_ENGINE_DISABLED:
default:
return sprintf(buf, "disabled\n");
}
}
show_mode(1)
show_mode(2)
show_mode(3)
static ssize_t store_engine_mode(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len, int nr)
{
struct lp55xx_led *led = i2c_get_clientdata(to_i2c_client(dev));
struct lp55xx_chip *chip = led->chip;
struct lp55xx_engine *engine = &chip->engines[nr - 1];
mutex_lock(&chip->lock);
chip->engine_idx = nr;
if (!strncmp(buf, "run", 3)) {
lp5521_run_engine(chip, true);
engine->mode = LP55XX_ENGINE_RUN;
} else if (!strncmp(buf, "load", 4)) {
lp5521_stop_engine(chip);
lp5521_load_engine(chip);
engine->mode = LP55XX_ENGINE_LOAD;
} else if (!strncmp(buf, "disabled", 8)) {
lp5521_stop_engine(chip);
engine->mode = LP55XX_ENGINE_DISABLED;
}
mutex_unlock(&chip->lock);
return len;
}
store_mode(1)
store_mode(2)
store_mode(3)
static ssize_t store_engine_load(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len, int nr)
{
struct lp55xx_led *led = i2c_get_clientdata(to_i2c_client(dev));
struct lp55xx_chip *chip = led->chip;
int ret;
mutex_lock(&chip->lock);
chip->engine_idx = nr;
lp5521_load_engine(chip);
ret = lp5521_update_program_memory(chip, buf, len);
mutex_unlock(&chip->lock);
return ret;
}
store_load(1)
store_load(2)
store_load(3)
static ssize_t lp5521_selftest(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct lp55xx_led *led = i2c_get_clientdata(to_i2c_client(dev));
struct lp55xx_chip *chip = led->chip;
int ret;
mutex_lock(&chip->lock);
ret = lp5521_run_selftest(chip, buf);
mutex_unlock(&chip->lock);
return sysfs_emit(buf, "%s\n", ret ? "FAIL" : "OK");
}
/* device attributes */
static LP55XX_DEV_ATTR_RW(engine1_mode, show_engine1_mode, store_engine1_mode);
static LP55XX_DEV_ATTR_RW(engine2_mode, show_engine2_mode, store_engine2_mode);
static LP55XX_DEV_ATTR_RW(engine3_mode, show_engine3_mode, store_engine3_mode);
static LP55XX_DEV_ATTR_WO(engine1_load, store_engine1_load);
static LP55XX_DEV_ATTR_WO(engine2_load, store_engine2_load);
static LP55XX_DEV_ATTR_WO(engine3_load, store_engine3_load);
static LP55XX_DEV_ATTR_RO(selftest, lp5521_selftest);
static struct attribute *lp5521_attributes[] = {
&dev_attr_engine1_mode.attr,
&dev_attr_engine2_mode.attr,
&dev_attr_engine3_mode.attr,
&dev_attr_engine1_load.attr,
&dev_attr_engine2_load.attr,
&dev_attr_engine3_load.attr,
&dev_attr_selftest.attr,
NULL
};
static const struct attribute_group lp5521_group = {
.attrs = lp5521_attributes,
};
/* Chip specific configurations */
static struct lp55xx_device_config lp5521_cfg = {
.reset = {
.addr = LP5521_REG_RESET,
.val = LP5521_RESET,
},
.enable = {
.addr = LP5521_REG_ENABLE,
.val = LP5521_ENABLE_DEFAULT,
},
.max_channel = LP5521_MAX_LEDS,
.post_init_device = lp5521_post_init_device,
.brightness_fn = lp5521_led_brightness,
.multicolor_brightness_fn = lp5521_multicolor_brightness,
.set_led_current = lp5521_set_led_current,
.firmware_cb = lp5521_firmware_loaded,
.run_engine = lp5521_run_engine,
.dev_attr_group = &lp5521_group,
};
static int lp5521_probe(struct i2c_client *client)
{
const struct i2c_device_id *id = i2c_client_get_device_id(client);
int ret;
struct lp55xx_chip *chip;
struct lp55xx_led *led;
struct lp55xx_platform_data *pdata = dev_get_platdata(&client->dev);
struct device_node *np = dev_of_node(&client->dev);
chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
if (!chip)
return -ENOMEM;
chip->cfg = &lp5521_cfg;
if (!pdata) {
if (np) {
pdata = lp55xx_of_populate_pdata(&client->dev, np,
chip);
if (IS_ERR(pdata))
return PTR_ERR(pdata);
} else {
dev_err(&client->dev, "no platform data\n");
return -EINVAL;
}
}
led = devm_kcalloc(&client->dev,
pdata->num_channels, sizeof(*led), GFP_KERNEL);
if (!led)
return -ENOMEM;
chip->cl = client;
chip->pdata = pdata;
mutex_init(&chip->lock);
i2c_set_clientdata(client, led);
ret = lp55xx_init_device(chip);
if (ret)
goto err_init;
dev_info(&client->dev, "%s programmable led chip found\n", id->name);
ret = lp55xx_register_leds(led, chip);
if (ret)
goto err_out;
ret = lp55xx_register_sysfs(chip);
if (ret) {
dev_err(&client->dev, "registering sysfs failed\n");
goto err_out;
}
return 0;
err_out:
lp55xx_deinit_device(chip);
err_init:
return ret;
}
static void lp5521_remove(struct i2c_client *client)
{
struct lp55xx_led *led = i2c_get_clientdata(client);
struct lp55xx_chip *chip = led->chip;
lp5521_stop_all_engines(chip);
lp55xx_unregister_sysfs(chip);
lp55xx_deinit_device(chip);
}
static const struct i2c_device_id lp5521_id[] = {
{ "lp5521", 0 }, /* Three channel chip */
{ }
};
MODULE_DEVICE_TABLE(i2c, lp5521_id);
static const struct of_device_id of_lp5521_leds_match[] = {
{ .compatible = "national,lp5521", },
{},
};
MODULE_DEVICE_TABLE(of, of_lp5521_leds_match);
static struct i2c_driver lp5521_driver = {
.driver = {
.name = "lp5521",
.of_match_table = of_lp5521_leds_match,
},
.probe = lp5521_probe,
.remove = lp5521_remove,
.id_table = lp5521_id,
};
module_i2c_driver(lp5521_driver);
MODULE_AUTHOR("Mathias Nyman, Yuri Zaporozhets, Samu Onkalo");
MODULE_AUTHOR("Milo Kim <[email protected]>");
MODULE_DESCRIPTION("LP5521 LED engine");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/leds/leds-lp5521.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* LEDs driver for PCEngines WRAP
*
* Copyright (C) 2006 Kristian Kielhofner <[email protected]>
*
* Based on leds-net48xx.c
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/leds.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/scx200_gpio.h>
#include <linux/module.h>
#define DRVNAME "wrap-led"
#define WRAP_POWER_LED_GPIO 2
#define WRAP_ERROR_LED_GPIO 3
#define WRAP_EXTRA_LED_GPIO 18
static struct platform_device *pdev;
static void wrap_power_led_set(struct led_classdev *led_cdev,
enum led_brightness value)
{
if (value)
scx200_gpio_set_low(WRAP_POWER_LED_GPIO);
else
scx200_gpio_set_high(WRAP_POWER_LED_GPIO);
}
static void wrap_error_led_set(struct led_classdev *led_cdev,
enum led_brightness value)
{
if (value)
scx200_gpio_set_low(WRAP_ERROR_LED_GPIO);
else
scx200_gpio_set_high(WRAP_ERROR_LED_GPIO);
}
static void wrap_extra_led_set(struct led_classdev *led_cdev,
enum led_brightness value)
{
if (value)
scx200_gpio_set_low(WRAP_EXTRA_LED_GPIO);
else
scx200_gpio_set_high(WRAP_EXTRA_LED_GPIO);
}
static struct led_classdev wrap_power_led = {
.name = "wrap::power",
.brightness_set = wrap_power_led_set,
.default_trigger = "default-on",
.flags = LED_CORE_SUSPENDRESUME,
};
static struct led_classdev wrap_error_led = {
.name = "wrap::error",
.brightness_set = wrap_error_led_set,
.flags = LED_CORE_SUSPENDRESUME,
};
static struct led_classdev wrap_extra_led = {
.name = "wrap::extra",
.brightness_set = wrap_extra_led_set,
.flags = LED_CORE_SUSPENDRESUME,
};
static int wrap_led_probe(struct platform_device *pdev)
{
int ret;
ret = devm_led_classdev_register(&pdev->dev, &wrap_power_led);
if (ret < 0)
return ret;
ret = devm_led_classdev_register(&pdev->dev, &wrap_error_led);
if (ret < 0)
return ret;
return devm_led_classdev_register(&pdev->dev, &wrap_extra_led);
}
static struct platform_driver wrap_led_driver = {
.probe = wrap_led_probe,
.driver = {
.name = DRVNAME,
},
};
static int __init wrap_led_init(void)
{
int ret;
if (!scx200_gpio_present()) {
ret = -ENODEV;
goto out;
}
ret = platform_driver_register(&wrap_led_driver);
if (ret < 0)
goto out;
pdev = platform_device_register_simple(DRVNAME, -1, NULL, 0);
if (IS_ERR(pdev)) {
ret = PTR_ERR(pdev);
platform_driver_unregister(&wrap_led_driver);
goto out;
}
out:
return ret;
}
static void __exit wrap_led_exit(void)
{
platform_device_unregister(pdev);
platform_driver_unregister(&wrap_led_driver);
}
module_init(wrap_led_init);
module_exit(wrap_led_exit);
MODULE_AUTHOR("Kristian Kielhofner <[email protected]>");
MODULE_DESCRIPTION("PCEngines WRAP LED driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/leds/leds-wrap.c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.