python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2018 Broadcom
*/
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/thermal.h>
/*
* In stingray thermal IO memory,
* Total Number of available TMONs MASK is at offset 0
* temperature registers BASE is at 4 byte offset.
* Each TMON temperature register size is 4.
*/
#define SR_TMON_TEMP_BASE(id) ((id) * 0x4)
#define SR_TMON_MAX_LIST 6
struct sr_tmon {
unsigned int crit_temp;
unsigned int tmon_id;
struct sr_thermal *priv;
};
struct sr_thermal {
void __iomem *regs;
unsigned int max_crit_temp;
struct sr_tmon tmon[SR_TMON_MAX_LIST];
};
static int sr_get_temp(struct thermal_zone_device *tz, int *temp)
{
struct sr_tmon *tmon = thermal_zone_device_priv(tz);
struct sr_thermal *sr_thermal = tmon->priv;
*temp = readl(sr_thermal->regs + SR_TMON_TEMP_BASE(tmon->tmon_id));
return 0;
}
static const struct thermal_zone_device_ops sr_tz_ops = {
.get_temp = sr_get_temp,
};
static int sr_thermal_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct thermal_zone_device *tz;
struct sr_thermal *sr_thermal;
struct sr_tmon *tmon;
struct resource *res;
u32 sr_tmon_list = 0;
unsigned int i;
int ret;
sr_thermal = devm_kzalloc(dev, sizeof(*sr_thermal), GFP_KERNEL);
if (!sr_thermal)
return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -ENOENT;
sr_thermal->regs = (void __iomem *)devm_memremap(&pdev->dev, res->start,
resource_size(res),
MEMREMAP_WB);
if (IS_ERR(sr_thermal->regs)) {
dev_err(dev, "failed to get io address\n");
return PTR_ERR(sr_thermal->regs);
}
ret = device_property_read_u32(dev, "brcm,tmon-mask", &sr_tmon_list);
if (ret)
return ret;
tmon = sr_thermal->tmon;
for (i = 0; i < SR_TMON_MAX_LIST; i++, tmon++) {
if (!(sr_tmon_list & BIT(i)))
continue;
/* Flush temperature registers */
writel(0, sr_thermal->regs + SR_TMON_TEMP_BASE(i));
tmon->tmon_id = i;
tmon->priv = sr_thermal;
tz = devm_thermal_of_zone_register(dev, i, tmon,
&sr_tz_ops);
if (IS_ERR(tz))
return PTR_ERR(tz);
dev_dbg(dev, "thermal sensor %d registered\n", i);
}
return 0;
}
static const struct of_device_id sr_thermal_of_match[] = {
{ .compatible = "brcm,sr-thermal", },
{},
};
MODULE_DEVICE_TABLE(of, sr_thermal_of_match);
static struct platform_driver sr_thermal_driver = {
.probe = sr_thermal_probe,
.driver = {
.name = "sr-thermal",
.of_match_table = sr_thermal_of_match,
},
};
module_platform_driver(sr_thermal_driver);
MODULE_AUTHOR("Pramod Kumar <[email protected]>");
MODULE_DESCRIPTION("Stingray thermal driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/thermal/broadcom/sr-thermal.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Broadcom STB AVS TMON thermal sensor driver
*
* Copyright (c) 2015-2017 Broadcom
*/
#define DRV_NAME "brcmstb_thermal"
#define pr_fmt(fmt) DRV_NAME ": " fmt
#include <linux/bitops.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/irqreturn.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/thermal.h>
#define AVS_TMON_STATUS 0x00
#define AVS_TMON_STATUS_valid_msk BIT(11)
#define AVS_TMON_STATUS_data_msk GENMASK(10, 1)
#define AVS_TMON_STATUS_data_shift 1
#define AVS_TMON_EN_OVERTEMP_RESET 0x04
#define AVS_TMON_EN_OVERTEMP_RESET_msk BIT(0)
#define AVS_TMON_RESET_THRESH 0x08
#define AVS_TMON_RESET_THRESH_msk GENMASK(10, 1)
#define AVS_TMON_RESET_THRESH_shift 1
#define AVS_TMON_INT_IDLE_TIME 0x10
#define AVS_TMON_EN_TEMP_INT_SRCS 0x14
#define AVS_TMON_EN_TEMP_INT_SRCS_high BIT(1)
#define AVS_TMON_EN_TEMP_INT_SRCS_low BIT(0)
#define AVS_TMON_INT_THRESH 0x18
#define AVS_TMON_INT_THRESH_high_msk GENMASK(26, 17)
#define AVS_TMON_INT_THRESH_high_shift 17
#define AVS_TMON_INT_THRESH_low_msk GENMASK(10, 1)
#define AVS_TMON_INT_THRESH_low_shift 1
#define AVS_TMON_TEMP_INT_CODE 0x1c
#define AVS_TMON_TP_TEST_ENABLE 0x20
/* Default coefficients */
#define AVS_TMON_TEMP_SLOPE 487
#define AVS_TMON_TEMP_OFFSET 410040
/* HW related temperature constants */
#define AVS_TMON_TEMP_MAX 0x3ff
#define AVS_TMON_TEMP_MIN -88161
#define AVS_TMON_TEMP_MASK AVS_TMON_TEMP_MAX
enum avs_tmon_trip_type {
TMON_TRIP_TYPE_LOW = 0,
TMON_TRIP_TYPE_HIGH,
TMON_TRIP_TYPE_RESET,
TMON_TRIP_TYPE_MAX,
};
struct avs_tmon_trip {
/* HW bit to enable the trip */
u32 enable_offs;
u32 enable_mask;
/* HW field to read the trip temperature */
u32 reg_offs;
u32 reg_msk;
int reg_shift;
};
static struct avs_tmon_trip avs_tmon_trips[] = {
/* Trips when temperature is below threshold */
[TMON_TRIP_TYPE_LOW] = {
.enable_offs = AVS_TMON_EN_TEMP_INT_SRCS,
.enable_mask = AVS_TMON_EN_TEMP_INT_SRCS_low,
.reg_offs = AVS_TMON_INT_THRESH,
.reg_msk = AVS_TMON_INT_THRESH_low_msk,
.reg_shift = AVS_TMON_INT_THRESH_low_shift,
},
/* Trips when temperature is above threshold */
[TMON_TRIP_TYPE_HIGH] = {
.enable_offs = AVS_TMON_EN_TEMP_INT_SRCS,
.enable_mask = AVS_TMON_EN_TEMP_INT_SRCS_high,
.reg_offs = AVS_TMON_INT_THRESH,
.reg_msk = AVS_TMON_INT_THRESH_high_msk,
.reg_shift = AVS_TMON_INT_THRESH_high_shift,
},
/* Automatically resets chip when above threshold */
[TMON_TRIP_TYPE_RESET] = {
.enable_offs = AVS_TMON_EN_OVERTEMP_RESET,
.enable_mask = AVS_TMON_EN_OVERTEMP_RESET_msk,
.reg_offs = AVS_TMON_RESET_THRESH,
.reg_msk = AVS_TMON_RESET_THRESH_msk,
.reg_shift = AVS_TMON_RESET_THRESH_shift,
},
};
struct brcmstb_thermal_params {
unsigned int offset;
unsigned int mult;
const struct thermal_zone_device_ops *of_ops;
};
struct brcmstb_thermal_priv {
void __iomem *tmon_base;
struct device *dev;
struct thermal_zone_device *thermal;
/* Process specific thermal parameters used for calculations */
const struct brcmstb_thermal_params *temp_params;
};
/* Convert a HW code to a temperature reading (millidegree celsius) */
static inline int avs_tmon_code_to_temp(struct brcmstb_thermal_priv *priv,
u32 code)
{
int offset = priv->temp_params->offset;
int mult = priv->temp_params->mult;
return (offset - (int)((code & AVS_TMON_TEMP_MASK) * mult));
}
/*
* Convert a temperature value (millidegree celsius) to a HW code
*
* @temp: temperature to convert
* @low: if true, round toward the low side
*/
static inline u32 avs_tmon_temp_to_code(struct brcmstb_thermal_priv *priv,
int temp, bool low)
{
int offset = priv->temp_params->offset;
int mult = priv->temp_params->mult;
if (temp < AVS_TMON_TEMP_MIN)
return AVS_TMON_TEMP_MAX; /* Maximum code value */
if (temp >= offset)
return 0; /* Minimum code value */
if (low)
return (u32)(DIV_ROUND_UP(offset - temp, mult));
else
return (u32)((offset - temp) / mult);
}
static int brcmstb_get_temp(struct thermal_zone_device *tz, int *temp)
{
struct brcmstb_thermal_priv *priv = thermal_zone_device_priv(tz);
u32 val;
long t;
val = __raw_readl(priv->tmon_base + AVS_TMON_STATUS);
if (!(val & AVS_TMON_STATUS_valid_msk))
return -EIO;
val = (val & AVS_TMON_STATUS_data_msk) >> AVS_TMON_STATUS_data_shift;
t = avs_tmon_code_to_temp(priv, val);
if (t < 0)
*temp = 0;
else
*temp = t;
return 0;
}
static void avs_tmon_trip_enable(struct brcmstb_thermal_priv *priv,
enum avs_tmon_trip_type type, int en)
{
struct avs_tmon_trip *trip = &avs_tmon_trips[type];
u32 val = __raw_readl(priv->tmon_base + trip->enable_offs);
dev_dbg(priv->dev, "%sable trip, type %d\n", en ? "en" : "dis", type);
if (en)
val |= trip->enable_mask;
else
val &= ~trip->enable_mask;
__raw_writel(val, priv->tmon_base + trip->enable_offs);
}
static int avs_tmon_get_trip_temp(struct brcmstb_thermal_priv *priv,
enum avs_tmon_trip_type type)
{
struct avs_tmon_trip *trip = &avs_tmon_trips[type];
u32 val = __raw_readl(priv->tmon_base + trip->reg_offs);
val &= trip->reg_msk;
val >>= trip->reg_shift;
return avs_tmon_code_to_temp(priv, val);
}
static void avs_tmon_set_trip_temp(struct brcmstb_thermal_priv *priv,
enum avs_tmon_trip_type type,
int temp)
{
struct avs_tmon_trip *trip = &avs_tmon_trips[type];
u32 val, orig;
dev_dbg(priv->dev, "set temp %d to %d\n", type, temp);
/* round toward low temp for the low interrupt */
val = avs_tmon_temp_to_code(priv, temp,
type == TMON_TRIP_TYPE_LOW);
val <<= trip->reg_shift;
val &= trip->reg_msk;
orig = __raw_readl(priv->tmon_base + trip->reg_offs);
orig &= ~trip->reg_msk;
orig |= val;
__raw_writel(orig, priv->tmon_base + trip->reg_offs);
}
static int avs_tmon_get_intr_temp(struct brcmstb_thermal_priv *priv)
{
u32 val;
val = __raw_readl(priv->tmon_base + AVS_TMON_TEMP_INT_CODE);
return avs_tmon_code_to_temp(priv, val);
}
static irqreturn_t brcmstb_tmon_irq_thread(int irq, void *data)
{
struct brcmstb_thermal_priv *priv = data;
int low, high, intr;
low = avs_tmon_get_trip_temp(priv, TMON_TRIP_TYPE_LOW);
high = avs_tmon_get_trip_temp(priv, TMON_TRIP_TYPE_HIGH);
intr = avs_tmon_get_intr_temp(priv);
dev_dbg(priv->dev, "low/intr/high: %d/%d/%d\n",
low, intr, high);
/* Disable high-temp until next threshold shift */
if (intr >= high)
avs_tmon_trip_enable(priv, TMON_TRIP_TYPE_HIGH, 0);
/* Disable low-temp until next threshold shift */
if (intr <= low)
avs_tmon_trip_enable(priv, TMON_TRIP_TYPE_LOW, 0);
/*
* Notify using the interrupt temperature, in case the temperature
* changes before it can next be read out
*/
thermal_zone_device_update(priv->thermal, intr);
return IRQ_HANDLED;
}
static int brcmstb_set_trips(struct thermal_zone_device *tz, int low, int high)
{
struct brcmstb_thermal_priv *priv = thermal_zone_device_priv(tz);
dev_dbg(priv->dev, "set trips %d <--> %d\n", low, high);
/*
* Disable low-temp if "low" is too small. As per thermal framework
* API, we use -INT_MAX rather than INT_MIN.
*/
if (low <= -INT_MAX) {
avs_tmon_trip_enable(priv, TMON_TRIP_TYPE_LOW, 0);
} else {
avs_tmon_set_trip_temp(priv, TMON_TRIP_TYPE_LOW, low);
avs_tmon_trip_enable(priv, TMON_TRIP_TYPE_LOW, 1);
}
/* Disable high-temp if "high" is too big. */
if (high == INT_MAX) {
avs_tmon_trip_enable(priv, TMON_TRIP_TYPE_HIGH, 0);
} else {
avs_tmon_set_trip_temp(priv, TMON_TRIP_TYPE_HIGH, high);
avs_tmon_trip_enable(priv, TMON_TRIP_TYPE_HIGH, 1);
}
return 0;
}
static const struct thermal_zone_device_ops brcmstb_16nm_of_ops = {
.get_temp = brcmstb_get_temp,
};
static const struct brcmstb_thermal_params brcmstb_16nm_params = {
.offset = 457829,
.mult = 557,
.of_ops = &brcmstb_16nm_of_ops,
};
static const struct thermal_zone_device_ops brcmstb_28nm_of_ops = {
.get_temp = brcmstb_get_temp,
.set_trips = brcmstb_set_trips,
};
static const struct brcmstb_thermal_params brcmstb_28nm_params = {
.offset = 410040,
.mult = 487,
.of_ops = &brcmstb_28nm_of_ops,
};
static const struct of_device_id brcmstb_thermal_id_table[] = {
{ .compatible = "brcm,avs-tmon-bcm7216", .data = &brcmstb_16nm_params },
{ .compatible = "brcm,avs-tmon", .data = &brcmstb_28nm_params },
{},
};
MODULE_DEVICE_TABLE(of, brcmstb_thermal_id_table);
static int brcmstb_thermal_probe(struct platform_device *pdev)
{
const struct thermal_zone_device_ops *of_ops;
struct thermal_zone_device *thermal;
struct brcmstb_thermal_priv *priv;
int irq, ret;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->temp_params = of_device_get_match_data(&pdev->dev);
if (!priv->temp_params)
return -EINVAL;
priv->tmon_base = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
if (IS_ERR(priv->tmon_base))
return PTR_ERR(priv->tmon_base);
priv->dev = &pdev->dev;
of_ops = priv->temp_params->of_ops;
thermal = devm_thermal_of_zone_register(&pdev->dev, 0, priv,
of_ops);
if (IS_ERR(thermal)) {
ret = PTR_ERR(thermal);
dev_err(&pdev->dev, "could not register sensor: %d\n", ret);
return ret;
}
priv->thermal = thermal;
irq = platform_get_irq_optional(pdev, 0);
if (irq >= 0) {
ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
brcmstb_tmon_irq_thread,
IRQF_ONESHOT,
DRV_NAME, priv);
if (ret < 0) {
dev_err(&pdev->dev, "could not request IRQ: %d\n", ret);
return ret;
}
}
dev_info(&pdev->dev, "registered AVS TMON of-sensor driver\n");
return 0;
}
static struct platform_driver brcmstb_thermal_driver = {
.probe = brcmstb_thermal_probe,
.driver = {
.name = DRV_NAME,
.of_match_table = brcmstb_thermal_id_table,
},
};
module_platform_driver(brcmstb_thermal_driver);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Brian Norris");
MODULE_DESCRIPTION("Broadcom STB AVS TMON thermal driver");
| linux-master | drivers/thermal/broadcom/brcmstb_thermal.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* exynos_tmu.c - Samsung Exynos TMU (Thermal Management Unit)
*
* Copyright (C) 2014 Samsung Electronics
* Bartlomiej Zolnierkiewicz <[email protected]>
* Lukasz Majewski <[email protected]>
*
* Copyright (C) 2011 Samsung Electronics
* Donggeun Kim <[email protected]>
* Amit Daniel Kachhap <[email protected]>
*/
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <linux/thermal.h>
#include <dt-bindings/thermal/thermal_exynos.h>
/* Exynos generic registers */
#define EXYNOS_TMU_REG_TRIMINFO 0x0
#define EXYNOS_TMU_REG_CONTROL 0x20
#define EXYNOS_TMU_REG_STATUS 0x28
#define EXYNOS_TMU_REG_CURRENT_TEMP 0x40
#define EXYNOS_TMU_REG_INTEN 0x70
#define EXYNOS_TMU_REG_INTSTAT 0x74
#define EXYNOS_TMU_REG_INTCLEAR 0x78
#define EXYNOS_TMU_TEMP_MASK 0xff
#define EXYNOS_TMU_REF_VOLTAGE_SHIFT 24
#define EXYNOS_TMU_REF_VOLTAGE_MASK 0x1f
#define EXYNOS_TMU_BUF_SLOPE_SEL_MASK 0xf
#define EXYNOS_TMU_BUF_SLOPE_SEL_SHIFT 8
#define EXYNOS_TMU_CORE_EN_SHIFT 0
/* Exynos3250 specific registers */
#define EXYNOS_TMU_TRIMINFO_CON1 0x10
/* Exynos4210 specific registers */
#define EXYNOS4210_TMU_REG_THRESHOLD_TEMP 0x44
#define EXYNOS4210_TMU_REG_TRIG_LEVEL0 0x50
/* Exynos5250, Exynos4412, Exynos3250 specific registers */
#define EXYNOS_TMU_TRIMINFO_CON2 0x14
#define EXYNOS_THD_TEMP_RISE 0x50
#define EXYNOS_THD_TEMP_FALL 0x54
#define EXYNOS_EMUL_CON 0x80
#define EXYNOS_TRIMINFO_RELOAD_ENABLE 1
#define EXYNOS_TRIMINFO_25_SHIFT 0
#define EXYNOS_TRIMINFO_85_SHIFT 8
#define EXYNOS_TMU_TRIP_MODE_SHIFT 13
#define EXYNOS_TMU_TRIP_MODE_MASK 0x7
#define EXYNOS_TMU_THERM_TRIP_EN_SHIFT 12
#define EXYNOS_TMU_INTEN_RISE0_SHIFT 0
#define EXYNOS_TMU_INTEN_FALL0_SHIFT 16
#define EXYNOS_EMUL_TIME 0x57F0
#define EXYNOS_EMUL_TIME_MASK 0xffff
#define EXYNOS_EMUL_TIME_SHIFT 16
#define EXYNOS_EMUL_DATA_SHIFT 8
#define EXYNOS_EMUL_DATA_MASK 0xFF
#define EXYNOS_EMUL_ENABLE 0x1
/* Exynos5260 specific */
#define EXYNOS5260_TMU_REG_INTEN 0xC0
#define EXYNOS5260_TMU_REG_INTSTAT 0xC4
#define EXYNOS5260_TMU_REG_INTCLEAR 0xC8
#define EXYNOS5260_EMUL_CON 0x100
/* Exynos4412 specific */
#define EXYNOS4412_MUX_ADDR_VALUE 6
#define EXYNOS4412_MUX_ADDR_SHIFT 20
/* Exynos5433 specific registers */
#define EXYNOS5433_THD_TEMP_RISE3_0 0x050
#define EXYNOS5433_THD_TEMP_RISE7_4 0x054
#define EXYNOS5433_THD_TEMP_FALL3_0 0x060
#define EXYNOS5433_THD_TEMP_FALL7_4 0x064
#define EXYNOS5433_TMU_REG_INTEN 0x0c0
#define EXYNOS5433_TMU_REG_INTPEND 0x0c8
#define EXYNOS5433_TMU_EMUL_CON 0x110
#define EXYNOS5433_TMU_PD_DET_EN 0x130
#define EXYNOS5433_TRIMINFO_SENSOR_ID_SHIFT 16
#define EXYNOS5433_TRIMINFO_CALIB_SEL_SHIFT 23
#define EXYNOS5433_TRIMINFO_SENSOR_ID_MASK \
(0xf << EXYNOS5433_TRIMINFO_SENSOR_ID_SHIFT)
#define EXYNOS5433_TRIMINFO_CALIB_SEL_MASK BIT(23)
#define EXYNOS5433_TRIMINFO_ONE_POINT_TRIMMING 0
#define EXYNOS5433_TRIMINFO_TWO_POINT_TRIMMING 1
#define EXYNOS5433_PD_DET_EN 1
#define EXYNOS5433_G3D_BASE 0x10070000
/* Exynos7 specific registers */
#define EXYNOS7_THD_TEMP_RISE7_6 0x50
#define EXYNOS7_THD_TEMP_FALL7_6 0x60
#define EXYNOS7_TMU_REG_INTEN 0x110
#define EXYNOS7_TMU_REG_INTPEND 0x118
#define EXYNOS7_TMU_REG_EMUL_CON 0x160
#define EXYNOS7_TMU_TEMP_MASK 0x1ff
#define EXYNOS7_PD_DET_EN_SHIFT 23
#define EXYNOS7_TMU_INTEN_RISE0_SHIFT 0
#define EXYNOS7_EMUL_DATA_SHIFT 7
#define EXYNOS7_EMUL_DATA_MASK 0x1ff
#define EXYNOS_FIRST_POINT_TRIM 25
#define EXYNOS_SECOND_POINT_TRIM 85
#define EXYNOS_NOISE_CANCEL_MODE 4
#define MCELSIUS 1000
enum soc_type {
SOC_ARCH_EXYNOS3250 = 1,
SOC_ARCH_EXYNOS4210,
SOC_ARCH_EXYNOS4412,
SOC_ARCH_EXYNOS5250,
SOC_ARCH_EXYNOS5260,
SOC_ARCH_EXYNOS5420,
SOC_ARCH_EXYNOS5420_TRIMINFO,
SOC_ARCH_EXYNOS5433,
SOC_ARCH_EXYNOS7,
};
/**
* struct exynos_tmu_data : A structure to hold the private data of the TMU
* driver
* @id: identifier of the one instance of the TMU controller.
* @base: base address of the single instance of the TMU controller.
* @base_second: base address of the common registers of the TMU controller.
* @irq: irq number of the TMU controller.
* @soc: id of the SOC type.
* @irq_work: pointer to the irq work structure.
* @lock: lock to implement synchronization.
* @clk: pointer to the clock structure.
* @clk_sec: pointer to the clock structure for accessing the base_second.
* @sclk: pointer to the clock structure for accessing the tmu special clk.
* @cal_type: calibration type for temperature
* @efuse_value: SoC defined fuse value
* @min_efuse_value: minimum valid trimming data
* @max_efuse_value: maximum valid trimming data
* @temp_error1: fused value of the first point trim.
* @temp_error2: fused value of the second point trim.
* @gain: gain of amplifier in the positive-TC generator block
* 0 < gain <= 15
* @reference_voltage: reference voltage of amplifier
* in the positive-TC generator block
* 0 < reference_voltage <= 31
* @regulator: pointer to the TMU regulator structure.
* @reg_conf: pointer to structure to register with core thermal.
* @tzd: pointer to thermal_zone_device structure
* @ntrip: number of supported trip points.
* @enabled: current status of TMU device
* @tmu_set_trip_temp: SoC specific method to set trip (rising threshold)
* @tmu_set_trip_hyst: SoC specific to set hysteresis (falling threshold)
* @tmu_initialize: SoC specific TMU initialization method
* @tmu_control: SoC specific TMU control method
* @tmu_read: SoC specific TMU temperature read method
* @tmu_set_emulation: SoC specific TMU emulation setting method
* @tmu_clear_irqs: SoC specific TMU interrupts clearing method
*/
struct exynos_tmu_data {
int id;
void __iomem *base;
void __iomem *base_second;
int irq;
enum soc_type soc;
struct work_struct irq_work;
struct mutex lock;
struct clk *clk, *clk_sec, *sclk;
u32 cal_type;
u32 efuse_value;
u32 min_efuse_value;
u32 max_efuse_value;
u16 temp_error1, temp_error2;
u8 gain;
u8 reference_voltage;
struct regulator *regulator;
struct thermal_zone_device *tzd;
unsigned int ntrip;
bool enabled;
void (*tmu_set_trip_temp)(struct exynos_tmu_data *data, int trip,
u8 temp);
void (*tmu_set_trip_hyst)(struct exynos_tmu_data *data, int trip,
u8 temp, u8 hyst);
void (*tmu_initialize)(struct platform_device *pdev);
void (*tmu_control)(struct platform_device *pdev, bool on);
int (*tmu_read)(struct exynos_tmu_data *data);
void (*tmu_set_emulation)(struct exynos_tmu_data *data, int temp);
void (*tmu_clear_irqs)(struct exynos_tmu_data *data);
};
/*
* TMU treats temperature as a mapped temperature code.
* The temperature is converted differently depending on the calibration type.
*/
static int temp_to_code(struct exynos_tmu_data *data, u8 temp)
{
if (data->cal_type == TYPE_ONE_POINT_TRIMMING)
return temp + data->temp_error1 - EXYNOS_FIRST_POINT_TRIM;
return (temp - EXYNOS_FIRST_POINT_TRIM) *
(data->temp_error2 - data->temp_error1) /
(EXYNOS_SECOND_POINT_TRIM - EXYNOS_FIRST_POINT_TRIM) +
data->temp_error1;
}
/*
* Calculate a temperature value from a temperature code.
* The unit of the temperature is degree Celsius.
*/
static int code_to_temp(struct exynos_tmu_data *data, u16 temp_code)
{
if (data->cal_type == TYPE_ONE_POINT_TRIMMING)
return temp_code - data->temp_error1 + EXYNOS_FIRST_POINT_TRIM;
return (temp_code - data->temp_error1) *
(EXYNOS_SECOND_POINT_TRIM - EXYNOS_FIRST_POINT_TRIM) /
(data->temp_error2 - data->temp_error1) +
EXYNOS_FIRST_POINT_TRIM;
}
static void sanitize_temp_error(struct exynos_tmu_data *data, u32 trim_info)
{
u16 tmu_temp_mask =
(data->soc == SOC_ARCH_EXYNOS7) ? EXYNOS7_TMU_TEMP_MASK
: EXYNOS_TMU_TEMP_MASK;
data->temp_error1 = trim_info & tmu_temp_mask;
data->temp_error2 = ((trim_info >> EXYNOS_TRIMINFO_85_SHIFT) &
EXYNOS_TMU_TEMP_MASK);
if (!data->temp_error1 ||
(data->min_efuse_value > data->temp_error1) ||
(data->temp_error1 > data->max_efuse_value))
data->temp_error1 = data->efuse_value & EXYNOS_TMU_TEMP_MASK;
if (!data->temp_error2)
data->temp_error2 =
(data->efuse_value >> EXYNOS_TRIMINFO_85_SHIFT) &
EXYNOS_TMU_TEMP_MASK;
}
static int exynos_tmu_initialize(struct platform_device *pdev)
{
struct exynos_tmu_data *data = platform_get_drvdata(pdev);
struct thermal_zone_device *tzd = data->tzd;
int num_trips = thermal_zone_get_num_trips(tzd);
unsigned int status;
int ret = 0, temp;
ret = thermal_zone_get_crit_temp(tzd, &temp);
if (ret && data->soc != SOC_ARCH_EXYNOS5433) { /* FIXME */
dev_err(&pdev->dev,
"No CRITICAL trip point defined in device tree!\n");
goto out;
}
if (num_trips > data->ntrip) {
dev_info(&pdev->dev,
"More trip points than supported by this TMU.\n");
dev_info(&pdev->dev,
"%d trip points should be configured in polling mode.\n",
num_trips - data->ntrip);
}
mutex_lock(&data->lock);
clk_enable(data->clk);
if (!IS_ERR(data->clk_sec))
clk_enable(data->clk_sec);
status = readb(data->base + EXYNOS_TMU_REG_STATUS);
if (!status) {
ret = -EBUSY;
} else {
int i, ntrips =
min_t(int, num_trips, data->ntrip);
data->tmu_initialize(pdev);
/* Write temperature code for rising and falling threshold */
for (i = 0; i < ntrips; i++) {
struct thermal_trip trip;
ret = thermal_zone_get_trip(tzd, i, &trip);
if (ret)
goto err;
data->tmu_set_trip_temp(data, i, trip.temperature / MCELSIUS);
data->tmu_set_trip_hyst(data, i, trip.temperature / MCELSIUS,
trip.hysteresis / MCELSIUS);
}
data->tmu_clear_irqs(data);
}
err:
clk_disable(data->clk);
mutex_unlock(&data->lock);
if (!IS_ERR(data->clk_sec))
clk_disable(data->clk_sec);
out:
return ret;
}
static u32 get_con_reg(struct exynos_tmu_data *data, u32 con)
{
if (data->soc == SOC_ARCH_EXYNOS4412 ||
data->soc == SOC_ARCH_EXYNOS3250)
con |= (EXYNOS4412_MUX_ADDR_VALUE << EXYNOS4412_MUX_ADDR_SHIFT);
con &= ~(EXYNOS_TMU_REF_VOLTAGE_MASK << EXYNOS_TMU_REF_VOLTAGE_SHIFT);
con |= data->reference_voltage << EXYNOS_TMU_REF_VOLTAGE_SHIFT;
con &= ~(EXYNOS_TMU_BUF_SLOPE_SEL_MASK << EXYNOS_TMU_BUF_SLOPE_SEL_SHIFT);
con |= (data->gain << EXYNOS_TMU_BUF_SLOPE_SEL_SHIFT);
con &= ~(EXYNOS_TMU_TRIP_MODE_MASK << EXYNOS_TMU_TRIP_MODE_SHIFT);
con |= (EXYNOS_NOISE_CANCEL_MODE << EXYNOS_TMU_TRIP_MODE_SHIFT);
return con;
}
static void exynos_tmu_control(struct platform_device *pdev, bool on)
{
struct exynos_tmu_data *data = platform_get_drvdata(pdev);
mutex_lock(&data->lock);
clk_enable(data->clk);
data->tmu_control(pdev, on);
data->enabled = on;
clk_disable(data->clk);
mutex_unlock(&data->lock);
}
static void exynos4210_tmu_set_trip_temp(struct exynos_tmu_data *data,
int trip_id, u8 temp)
{
struct thermal_trip trip;
u8 ref, th_code;
if (thermal_zone_get_trip(data->tzd, 0, &trip))
return;
ref = trip.temperature / MCELSIUS;
if (trip_id == 0) {
th_code = temp_to_code(data, ref);
writeb(th_code, data->base + EXYNOS4210_TMU_REG_THRESHOLD_TEMP);
}
temp -= ref;
writeb(temp, data->base + EXYNOS4210_TMU_REG_TRIG_LEVEL0 + trip_id * 4);
}
/* failing thresholds are not supported on Exynos4210 */
static void exynos4210_tmu_set_trip_hyst(struct exynos_tmu_data *data,
int trip, u8 temp, u8 hyst)
{
}
static void exynos4210_tmu_initialize(struct platform_device *pdev)
{
struct exynos_tmu_data *data = platform_get_drvdata(pdev);
sanitize_temp_error(data, readl(data->base + EXYNOS_TMU_REG_TRIMINFO));
}
static void exynos4412_tmu_set_trip_temp(struct exynos_tmu_data *data,
int trip, u8 temp)
{
u32 th, con;
th = readl(data->base + EXYNOS_THD_TEMP_RISE);
th &= ~(0xff << 8 * trip);
th |= temp_to_code(data, temp) << 8 * trip;
writel(th, data->base + EXYNOS_THD_TEMP_RISE);
if (trip == 3) {
con = readl(data->base + EXYNOS_TMU_REG_CONTROL);
con |= (1 << EXYNOS_TMU_THERM_TRIP_EN_SHIFT);
writel(con, data->base + EXYNOS_TMU_REG_CONTROL);
}
}
static void exynos4412_tmu_set_trip_hyst(struct exynos_tmu_data *data,
int trip, u8 temp, u8 hyst)
{
u32 th;
th = readl(data->base + EXYNOS_THD_TEMP_FALL);
th &= ~(0xff << 8 * trip);
if (hyst)
th |= temp_to_code(data, temp - hyst) << 8 * trip;
writel(th, data->base + EXYNOS_THD_TEMP_FALL);
}
static void exynos4412_tmu_initialize(struct platform_device *pdev)
{
struct exynos_tmu_data *data = platform_get_drvdata(pdev);
unsigned int trim_info, ctrl;
if (data->soc == SOC_ARCH_EXYNOS3250 ||
data->soc == SOC_ARCH_EXYNOS4412 ||
data->soc == SOC_ARCH_EXYNOS5250) {
if (data->soc == SOC_ARCH_EXYNOS3250) {
ctrl = readl(data->base + EXYNOS_TMU_TRIMINFO_CON1);
ctrl |= EXYNOS_TRIMINFO_RELOAD_ENABLE;
writel(ctrl, data->base + EXYNOS_TMU_TRIMINFO_CON1);
}
ctrl = readl(data->base + EXYNOS_TMU_TRIMINFO_CON2);
ctrl |= EXYNOS_TRIMINFO_RELOAD_ENABLE;
writel(ctrl, data->base + EXYNOS_TMU_TRIMINFO_CON2);
}
/* On exynos5420 the triminfo register is in the shared space */
if (data->soc == SOC_ARCH_EXYNOS5420_TRIMINFO)
trim_info = readl(data->base_second + EXYNOS_TMU_REG_TRIMINFO);
else
trim_info = readl(data->base + EXYNOS_TMU_REG_TRIMINFO);
sanitize_temp_error(data, trim_info);
}
static void exynos5433_tmu_set_trip_temp(struct exynos_tmu_data *data,
int trip, u8 temp)
{
unsigned int reg_off, j;
u32 th;
if (trip > 3) {
reg_off = EXYNOS5433_THD_TEMP_RISE7_4;
j = trip - 4;
} else {
reg_off = EXYNOS5433_THD_TEMP_RISE3_0;
j = trip;
}
th = readl(data->base + reg_off);
th &= ~(0xff << j * 8);
th |= (temp_to_code(data, temp) << j * 8);
writel(th, data->base + reg_off);
}
static void exynos5433_tmu_set_trip_hyst(struct exynos_tmu_data *data,
int trip, u8 temp, u8 hyst)
{
unsigned int reg_off, j;
u32 th;
if (trip > 3) {
reg_off = EXYNOS5433_THD_TEMP_FALL7_4;
j = trip - 4;
} else {
reg_off = EXYNOS5433_THD_TEMP_FALL3_0;
j = trip;
}
th = readl(data->base + reg_off);
th &= ~(0xff << j * 8);
th |= (temp_to_code(data, temp - hyst) << j * 8);
writel(th, data->base + reg_off);
}
static void exynos5433_tmu_initialize(struct platform_device *pdev)
{
struct exynos_tmu_data *data = platform_get_drvdata(pdev);
unsigned int trim_info;
int sensor_id, cal_type;
trim_info = readl(data->base + EXYNOS_TMU_REG_TRIMINFO);
sanitize_temp_error(data, trim_info);
/* Read the temperature sensor id */
sensor_id = (trim_info & EXYNOS5433_TRIMINFO_SENSOR_ID_MASK)
>> EXYNOS5433_TRIMINFO_SENSOR_ID_SHIFT;
dev_info(&pdev->dev, "Temperature sensor ID: 0x%x\n", sensor_id);
/* Read the calibration mode */
writel(trim_info, data->base + EXYNOS_TMU_REG_TRIMINFO);
cal_type = (trim_info & EXYNOS5433_TRIMINFO_CALIB_SEL_MASK)
>> EXYNOS5433_TRIMINFO_CALIB_SEL_SHIFT;
switch (cal_type) {
case EXYNOS5433_TRIMINFO_TWO_POINT_TRIMMING:
data->cal_type = TYPE_TWO_POINT_TRIMMING;
break;
case EXYNOS5433_TRIMINFO_ONE_POINT_TRIMMING:
default:
data->cal_type = TYPE_ONE_POINT_TRIMMING;
break;
}
dev_info(&pdev->dev, "Calibration type is %d-point calibration\n",
cal_type ? 2 : 1);
}
static void exynos7_tmu_set_trip_temp(struct exynos_tmu_data *data,
int trip, u8 temp)
{
unsigned int reg_off, bit_off;
u32 th;
reg_off = ((7 - trip) / 2) * 4;
bit_off = ((8 - trip) % 2);
th = readl(data->base + EXYNOS7_THD_TEMP_RISE7_6 + reg_off);
th &= ~(EXYNOS7_TMU_TEMP_MASK << (16 * bit_off));
th |= temp_to_code(data, temp) << (16 * bit_off);
writel(th, data->base + EXYNOS7_THD_TEMP_RISE7_6 + reg_off);
}
static void exynos7_tmu_set_trip_hyst(struct exynos_tmu_data *data,
int trip, u8 temp, u8 hyst)
{
unsigned int reg_off, bit_off;
u32 th;
reg_off = ((7 - trip) / 2) * 4;
bit_off = ((8 - trip) % 2);
th = readl(data->base + EXYNOS7_THD_TEMP_FALL7_6 + reg_off);
th &= ~(EXYNOS7_TMU_TEMP_MASK << (16 * bit_off));
th |= temp_to_code(data, temp - hyst) << (16 * bit_off);
writel(th, data->base + EXYNOS7_THD_TEMP_FALL7_6 + reg_off);
}
static void exynos7_tmu_initialize(struct platform_device *pdev)
{
struct exynos_tmu_data *data = platform_get_drvdata(pdev);
unsigned int trim_info;
trim_info = readl(data->base + EXYNOS_TMU_REG_TRIMINFO);
sanitize_temp_error(data, trim_info);
}
static void exynos4210_tmu_control(struct platform_device *pdev, bool on)
{
struct exynos_tmu_data *data = platform_get_drvdata(pdev);
struct thermal_zone_device *tz = data->tzd;
struct thermal_trip trip;
unsigned int con, interrupt_en = 0, i;
con = get_con_reg(data, readl(data->base + EXYNOS_TMU_REG_CONTROL));
if (on) {
for (i = 0; i < data->ntrip; i++) {
if (thermal_zone_get_trip(tz, i, &trip))
continue;
interrupt_en |=
(1 << (EXYNOS_TMU_INTEN_RISE0_SHIFT + i * 4));
}
if (data->soc != SOC_ARCH_EXYNOS4210)
interrupt_en |=
interrupt_en << EXYNOS_TMU_INTEN_FALL0_SHIFT;
con |= (1 << EXYNOS_TMU_CORE_EN_SHIFT);
} else {
con &= ~(1 << EXYNOS_TMU_CORE_EN_SHIFT);
}
writel(interrupt_en, data->base + EXYNOS_TMU_REG_INTEN);
writel(con, data->base + EXYNOS_TMU_REG_CONTROL);
}
static void exynos5433_tmu_control(struct platform_device *pdev, bool on)
{
struct exynos_tmu_data *data = platform_get_drvdata(pdev);
struct thermal_zone_device *tz = data->tzd;
struct thermal_trip trip;
unsigned int con, interrupt_en = 0, pd_det_en, i;
con = get_con_reg(data, readl(data->base + EXYNOS_TMU_REG_CONTROL));
if (on) {
for (i = 0; i < data->ntrip; i++) {
if (thermal_zone_get_trip(tz, i, &trip))
continue;
interrupt_en |=
(1 << (EXYNOS7_TMU_INTEN_RISE0_SHIFT + i));
}
interrupt_en |=
interrupt_en << EXYNOS_TMU_INTEN_FALL0_SHIFT;
con |= (1 << EXYNOS_TMU_CORE_EN_SHIFT);
} else
con &= ~(1 << EXYNOS_TMU_CORE_EN_SHIFT);
pd_det_en = on ? EXYNOS5433_PD_DET_EN : 0;
writel(pd_det_en, data->base + EXYNOS5433_TMU_PD_DET_EN);
writel(interrupt_en, data->base + EXYNOS5433_TMU_REG_INTEN);
writel(con, data->base + EXYNOS_TMU_REG_CONTROL);
}
static void exynos7_tmu_control(struct platform_device *pdev, bool on)
{
struct exynos_tmu_data *data = platform_get_drvdata(pdev);
struct thermal_zone_device *tz = data->tzd;
struct thermal_trip trip;
unsigned int con, interrupt_en = 0, i;
con = get_con_reg(data, readl(data->base + EXYNOS_TMU_REG_CONTROL));
if (on) {
for (i = 0; i < data->ntrip; i++) {
if (thermal_zone_get_trip(tz, i, &trip))
continue;
interrupt_en |=
(1 << (EXYNOS7_TMU_INTEN_RISE0_SHIFT + i));
}
interrupt_en |=
interrupt_en << EXYNOS_TMU_INTEN_FALL0_SHIFT;
con |= (1 << EXYNOS_TMU_CORE_EN_SHIFT);
con |= (1 << EXYNOS7_PD_DET_EN_SHIFT);
} else {
con &= ~(1 << EXYNOS_TMU_CORE_EN_SHIFT);
con &= ~(1 << EXYNOS7_PD_DET_EN_SHIFT);
}
writel(interrupt_en, data->base + EXYNOS7_TMU_REG_INTEN);
writel(con, data->base + EXYNOS_TMU_REG_CONTROL);
}
static int exynos_get_temp(struct thermal_zone_device *tz, int *temp)
{
struct exynos_tmu_data *data = thermal_zone_device_priv(tz);
int value, ret = 0;
if (!data || !data->tmu_read)
return -EINVAL;
else if (!data->enabled)
/*
* Called too early, probably
* from thermal_zone_of_sensor_register().
*/
return -EAGAIN;
mutex_lock(&data->lock);
clk_enable(data->clk);
value = data->tmu_read(data);
if (value < 0)
ret = value;
else
*temp = code_to_temp(data, value) * MCELSIUS;
clk_disable(data->clk);
mutex_unlock(&data->lock);
return ret;
}
#ifdef CONFIG_THERMAL_EMULATION
static u32 get_emul_con_reg(struct exynos_tmu_data *data, unsigned int val,
int temp)
{
if (temp) {
temp /= MCELSIUS;
val &= ~(EXYNOS_EMUL_TIME_MASK << EXYNOS_EMUL_TIME_SHIFT);
val |= (EXYNOS_EMUL_TIME << EXYNOS_EMUL_TIME_SHIFT);
if (data->soc == SOC_ARCH_EXYNOS7) {
val &= ~(EXYNOS7_EMUL_DATA_MASK <<
EXYNOS7_EMUL_DATA_SHIFT);
val |= (temp_to_code(data, temp) <<
EXYNOS7_EMUL_DATA_SHIFT) |
EXYNOS_EMUL_ENABLE;
} else {
val &= ~(EXYNOS_EMUL_DATA_MASK <<
EXYNOS_EMUL_DATA_SHIFT);
val |= (temp_to_code(data, temp) <<
EXYNOS_EMUL_DATA_SHIFT) |
EXYNOS_EMUL_ENABLE;
}
} else {
val &= ~EXYNOS_EMUL_ENABLE;
}
return val;
}
static void exynos4412_tmu_set_emulation(struct exynos_tmu_data *data,
int temp)
{
unsigned int val;
u32 emul_con;
if (data->soc == SOC_ARCH_EXYNOS5260)
emul_con = EXYNOS5260_EMUL_CON;
else if (data->soc == SOC_ARCH_EXYNOS5433)
emul_con = EXYNOS5433_TMU_EMUL_CON;
else if (data->soc == SOC_ARCH_EXYNOS7)
emul_con = EXYNOS7_TMU_REG_EMUL_CON;
else
emul_con = EXYNOS_EMUL_CON;
val = readl(data->base + emul_con);
val = get_emul_con_reg(data, val, temp);
writel(val, data->base + emul_con);
}
static int exynos_tmu_set_emulation(struct thermal_zone_device *tz, int temp)
{
struct exynos_tmu_data *data = thermal_zone_device_priv(tz);
int ret = -EINVAL;
if (data->soc == SOC_ARCH_EXYNOS4210)
goto out;
if (temp && temp < MCELSIUS)
goto out;
mutex_lock(&data->lock);
clk_enable(data->clk);
data->tmu_set_emulation(data, temp);
clk_disable(data->clk);
mutex_unlock(&data->lock);
return 0;
out:
return ret;
}
#else
#define exynos4412_tmu_set_emulation NULL
static int exynos_tmu_set_emulation(struct thermal_zone_device *tz, int temp)
{ return -EINVAL; }
#endif /* CONFIG_THERMAL_EMULATION */
static int exynos4210_tmu_read(struct exynos_tmu_data *data)
{
int ret = readb(data->base + EXYNOS_TMU_REG_CURRENT_TEMP);
/* "temp_code" should range between 75 and 175 */
return (ret < 75 || ret > 175) ? -ENODATA : ret;
}
static int exynos4412_tmu_read(struct exynos_tmu_data *data)
{
return readb(data->base + EXYNOS_TMU_REG_CURRENT_TEMP);
}
static int exynos7_tmu_read(struct exynos_tmu_data *data)
{
return readw(data->base + EXYNOS_TMU_REG_CURRENT_TEMP) &
EXYNOS7_TMU_TEMP_MASK;
}
static void exynos_tmu_work(struct work_struct *work)
{
struct exynos_tmu_data *data = container_of(work,
struct exynos_tmu_data, irq_work);
thermal_zone_device_update(data->tzd, THERMAL_EVENT_UNSPECIFIED);
mutex_lock(&data->lock);
clk_enable(data->clk);
/* TODO: take action based on particular interrupt */
data->tmu_clear_irqs(data);
clk_disable(data->clk);
mutex_unlock(&data->lock);
enable_irq(data->irq);
}
static void exynos4210_tmu_clear_irqs(struct exynos_tmu_data *data)
{
unsigned int val_irq;
u32 tmu_intstat, tmu_intclear;
if (data->soc == SOC_ARCH_EXYNOS5260) {
tmu_intstat = EXYNOS5260_TMU_REG_INTSTAT;
tmu_intclear = EXYNOS5260_TMU_REG_INTCLEAR;
} else if (data->soc == SOC_ARCH_EXYNOS7) {
tmu_intstat = EXYNOS7_TMU_REG_INTPEND;
tmu_intclear = EXYNOS7_TMU_REG_INTPEND;
} else if (data->soc == SOC_ARCH_EXYNOS5433) {
tmu_intstat = EXYNOS5433_TMU_REG_INTPEND;
tmu_intclear = EXYNOS5433_TMU_REG_INTPEND;
} else {
tmu_intstat = EXYNOS_TMU_REG_INTSTAT;
tmu_intclear = EXYNOS_TMU_REG_INTCLEAR;
}
val_irq = readl(data->base + tmu_intstat);
/*
* Clear the interrupts. Please note that the documentation for
* Exynos3250, Exynos4412, Exynos5250 and Exynos5260 incorrectly
* states that INTCLEAR register has a different placing of bits
* responsible for FALL IRQs than INTSTAT register. Exynos5420
* and Exynos5440 documentation is correct (Exynos4210 doesn't
* support FALL IRQs at all).
*/
writel(val_irq, data->base + tmu_intclear);
}
static irqreturn_t exynos_tmu_irq(int irq, void *id)
{
struct exynos_tmu_data *data = id;
disable_irq_nosync(irq);
schedule_work(&data->irq_work);
return IRQ_HANDLED;
}
static const struct of_device_id exynos_tmu_match[] = {
{
.compatible = "samsung,exynos3250-tmu",
.data = (const void *)SOC_ARCH_EXYNOS3250,
}, {
.compatible = "samsung,exynos4210-tmu",
.data = (const void *)SOC_ARCH_EXYNOS4210,
}, {
.compatible = "samsung,exynos4412-tmu",
.data = (const void *)SOC_ARCH_EXYNOS4412,
}, {
.compatible = "samsung,exynos5250-tmu",
.data = (const void *)SOC_ARCH_EXYNOS5250,
}, {
.compatible = "samsung,exynos5260-tmu",
.data = (const void *)SOC_ARCH_EXYNOS5260,
}, {
.compatible = "samsung,exynos5420-tmu",
.data = (const void *)SOC_ARCH_EXYNOS5420,
}, {
.compatible = "samsung,exynos5420-tmu-ext-triminfo",
.data = (const void *)SOC_ARCH_EXYNOS5420_TRIMINFO,
}, {
.compatible = "samsung,exynos5433-tmu",
.data = (const void *)SOC_ARCH_EXYNOS5433,
}, {
.compatible = "samsung,exynos7-tmu",
.data = (const void *)SOC_ARCH_EXYNOS7,
},
{ },
};
MODULE_DEVICE_TABLE(of, exynos_tmu_match);
static int exynos_map_dt_data(struct platform_device *pdev)
{
struct exynos_tmu_data *data = platform_get_drvdata(pdev);
struct resource res;
if (!data || !pdev->dev.of_node)
return -ENODEV;
data->id = of_alias_get_id(pdev->dev.of_node, "tmuctrl");
if (data->id < 0)
data->id = 0;
data->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
if (data->irq <= 0) {
dev_err(&pdev->dev, "failed to get IRQ\n");
return -ENODEV;
}
if (of_address_to_resource(pdev->dev.of_node, 0, &res)) {
dev_err(&pdev->dev, "failed to get Resource 0\n");
return -ENODEV;
}
data->base = devm_ioremap(&pdev->dev, res.start, resource_size(&res));
if (!data->base) {
dev_err(&pdev->dev, "Failed to ioremap memory\n");
return -EADDRNOTAVAIL;
}
data->soc = (uintptr_t)of_device_get_match_data(&pdev->dev);
switch (data->soc) {
case SOC_ARCH_EXYNOS4210:
data->tmu_set_trip_temp = exynos4210_tmu_set_trip_temp;
data->tmu_set_trip_hyst = exynos4210_tmu_set_trip_hyst;
data->tmu_initialize = exynos4210_tmu_initialize;
data->tmu_control = exynos4210_tmu_control;
data->tmu_read = exynos4210_tmu_read;
data->tmu_clear_irqs = exynos4210_tmu_clear_irqs;
data->ntrip = 4;
data->gain = 15;
data->reference_voltage = 7;
data->efuse_value = 55;
data->min_efuse_value = 40;
data->max_efuse_value = 100;
break;
case SOC_ARCH_EXYNOS3250:
case SOC_ARCH_EXYNOS4412:
case SOC_ARCH_EXYNOS5250:
case SOC_ARCH_EXYNOS5260:
case SOC_ARCH_EXYNOS5420:
case SOC_ARCH_EXYNOS5420_TRIMINFO:
data->tmu_set_trip_temp = exynos4412_tmu_set_trip_temp;
data->tmu_set_trip_hyst = exynos4412_tmu_set_trip_hyst;
data->tmu_initialize = exynos4412_tmu_initialize;
data->tmu_control = exynos4210_tmu_control;
data->tmu_read = exynos4412_tmu_read;
data->tmu_set_emulation = exynos4412_tmu_set_emulation;
data->tmu_clear_irqs = exynos4210_tmu_clear_irqs;
data->ntrip = 4;
data->gain = 8;
data->reference_voltage = 16;
data->efuse_value = 55;
if (data->soc != SOC_ARCH_EXYNOS5420 &&
data->soc != SOC_ARCH_EXYNOS5420_TRIMINFO)
data->min_efuse_value = 40;
else
data->min_efuse_value = 0;
data->max_efuse_value = 100;
break;
case SOC_ARCH_EXYNOS5433:
data->tmu_set_trip_temp = exynos5433_tmu_set_trip_temp;
data->tmu_set_trip_hyst = exynos5433_tmu_set_trip_hyst;
data->tmu_initialize = exynos5433_tmu_initialize;
data->tmu_control = exynos5433_tmu_control;
data->tmu_read = exynos4412_tmu_read;
data->tmu_set_emulation = exynos4412_tmu_set_emulation;
data->tmu_clear_irqs = exynos4210_tmu_clear_irqs;
data->ntrip = 8;
data->gain = 8;
if (res.start == EXYNOS5433_G3D_BASE)
data->reference_voltage = 23;
else
data->reference_voltage = 16;
data->efuse_value = 75;
data->min_efuse_value = 40;
data->max_efuse_value = 150;
break;
case SOC_ARCH_EXYNOS7:
data->tmu_set_trip_temp = exynos7_tmu_set_trip_temp;
data->tmu_set_trip_hyst = exynos7_tmu_set_trip_hyst;
data->tmu_initialize = exynos7_tmu_initialize;
data->tmu_control = exynos7_tmu_control;
data->tmu_read = exynos7_tmu_read;
data->tmu_set_emulation = exynos4412_tmu_set_emulation;
data->tmu_clear_irqs = exynos4210_tmu_clear_irqs;
data->ntrip = 8;
data->gain = 9;
data->reference_voltage = 17;
data->efuse_value = 75;
data->min_efuse_value = 15;
data->max_efuse_value = 100;
break;
default:
dev_err(&pdev->dev, "Platform not supported\n");
return -EINVAL;
}
data->cal_type = TYPE_ONE_POINT_TRIMMING;
/*
* Check if the TMU shares some registers and then try to map the
* memory of common registers.
*/
if (data->soc != SOC_ARCH_EXYNOS5420_TRIMINFO)
return 0;
if (of_address_to_resource(pdev->dev.of_node, 1, &res)) {
dev_err(&pdev->dev, "failed to get Resource 1\n");
return -ENODEV;
}
data->base_second = devm_ioremap(&pdev->dev, res.start,
resource_size(&res));
if (!data->base_second) {
dev_err(&pdev->dev, "Failed to ioremap memory\n");
return -ENOMEM;
}
return 0;
}
static const struct thermal_zone_device_ops exynos_sensor_ops = {
.get_temp = exynos_get_temp,
.set_emul_temp = exynos_tmu_set_emulation,
};
static int exynos_tmu_probe(struct platform_device *pdev)
{
struct exynos_tmu_data *data;
int ret;
data = devm_kzalloc(&pdev->dev, sizeof(struct exynos_tmu_data),
GFP_KERNEL);
if (!data)
return -ENOMEM;
platform_set_drvdata(pdev, data);
mutex_init(&data->lock);
/*
* Try enabling the regulator if found
* TODO: Add regulator as an SOC feature, so that regulator enable
* is a compulsory call.
*/
data->regulator = devm_regulator_get_optional(&pdev->dev, "vtmu");
if (!IS_ERR(data->regulator)) {
ret = regulator_enable(data->regulator);
if (ret) {
dev_err(&pdev->dev, "failed to enable vtmu\n");
return ret;
}
} else {
if (PTR_ERR(data->regulator) == -EPROBE_DEFER)
return -EPROBE_DEFER;
dev_info(&pdev->dev, "Regulator node (vtmu) not found\n");
}
ret = exynos_map_dt_data(pdev);
if (ret)
goto err_sensor;
INIT_WORK(&data->irq_work, exynos_tmu_work);
data->clk = devm_clk_get(&pdev->dev, "tmu_apbif");
if (IS_ERR(data->clk)) {
dev_err(&pdev->dev, "Failed to get clock\n");
ret = PTR_ERR(data->clk);
goto err_sensor;
}
data->clk_sec = devm_clk_get(&pdev->dev, "tmu_triminfo_apbif");
if (IS_ERR(data->clk_sec)) {
if (data->soc == SOC_ARCH_EXYNOS5420_TRIMINFO) {
dev_err(&pdev->dev, "Failed to get triminfo clock\n");
ret = PTR_ERR(data->clk_sec);
goto err_sensor;
}
} else {
ret = clk_prepare(data->clk_sec);
if (ret) {
dev_err(&pdev->dev, "Failed to get clock\n");
goto err_sensor;
}
}
ret = clk_prepare(data->clk);
if (ret) {
dev_err(&pdev->dev, "Failed to get clock\n");
goto err_clk_sec;
}
switch (data->soc) {
case SOC_ARCH_EXYNOS5433:
case SOC_ARCH_EXYNOS7:
data->sclk = devm_clk_get(&pdev->dev, "tmu_sclk");
if (IS_ERR(data->sclk)) {
dev_err(&pdev->dev, "Failed to get sclk\n");
ret = PTR_ERR(data->sclk);
goto err_clk;
} else {
ret = clk_prepare_enable(data->sclk);
if (ret) {
dev_err(&pdev->dev, "Failed to enable sclk\n");
goto err_clk;
}
}
break;
default:
break;
}
/*
* data->tzd must be registered before calling exynos_tmu_initialize(),
* requesting irq and calling exynos_tmu_control().
*/
data->tzd = devm_thermal_of_zone_register(&pdev->dev, 0, data,
&exynos_sensor_ops);
if (IS_ERR(data->tzd)) {
ret = PTR_ERR(data->tzd);
if (ret != -EPROBE_DEFER)
dev_err(&pdev->dev, "Failed to register sensor: %d\n",
ret);
goto err_sclk;
}
ret = exynos_tmu_initialize(pdev);
if (ret) {
dev_err(&pdev->dev, "Failed to initialize TMU\n");
goto err_sclk;
}
ret = devm_request_irq(&pdev->dev, data->irq, exynos_tmu_irq,
IRQF_TRIGGER_RISING | IRQF_SHARED, dev_name(&pdev->dev), data);
if (ret) {
dev_err(&pdev->dev, "Failed to request irq: %d\n", data->irq);
goto err_sclk;
}
exynos_tmu_control(pdev, true);
return 0;
err_sclk:
clk_disable_unprepare(data->sclk);
err_clk:
clk_unprepare(data->clk);
err_clk_sec:
if (!IS_ERR(data->clk_sec))
clk_unprepare(data->clk_sec);
err_sensor:
if (!IS_ERR(data->regulator))
regulator_disable(data->regulator);
return ret;
}
static int exynos_tmu_remove(struct platform_device *pdev)
{
struct exynos_tmu_data *data = platform_get_drvdata(pdev);
exynos_tmu_control(pdev, false);
clk_disable_unprepare(data->sclk);
clk_unprepare(data->clk);
if (!IS_ERR(data->clk_sec))
clk_unprepare(data->clk_sec);
if (!IS_ERR(data->regulator))
regulator_disable(data->regulator);
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int exynos_tmu_suspend(struct device *dev)
{
exynos_tmu_control(to_platform_device(dev), false);
return 0;
}
static int exynos_tmu_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
exynos_tmu_initialize(pdev);
exynos_tmu_control(pdev, true);
return 0;
}
static SIMPLE_DEV_PM_OPS(exynos_tmu_pm,
exynos_tmu_suspend, exynos_tmu_resume);
#define EXYNOS_TMU_PM (&exynos_tmu_pm)
#else
#define EXYNOS_TMU_PM NULL
#endif
static struct platform_driver exynos_tmu_driver = {
.driver = {
.name = "exynos-tmu",
.pm = EXYNOS_TMU_PM,
.of_match_table = exynos_tmu_match,
},
.probe = exynos_tmu_probe,
.remove = exynos_tmu_remove,
};
module_platform_driver(exynos_tmu_driver);
MODULE_DESCRIPTION("Exynos TMU Driver");
MODULE_AUTHOR("Donggeun Kim <[email protected]>");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:exynos-tmu");
| linux-master | drivers/thermal/samsung/exynos_tmu.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2015 MediaTek Inc.
* Author: Hanyi Wu <[email protected]>
* Sascha Hauer <[email protected]>
* Dawei Chien <[email protected]>
* Louis Yu <[email protected]>
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/nvmem-consumer.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/thermal.h>
#include <linux/reset.h>
#include <linux/types.h>
#include "../thermal_hwmon.h"
/* AUXADC Registers */
#define AUXADC_CON1_SET_V 0x008
#define AUXADC_CON1_CLR_V 0x00c
#define AUXADC_CON2_V 0x010
#define AUXADC_DATA(channel) (0x14 + (channel) * 4)
#define APMIXED_SYS_TS_CON0 0x600
#define APMIXED_SYS_TS_CON1 0x604
/* Thermal Controller Registers */
#define TEMP_MONCTL0 0x000
#define TEMP_MONCTL1 0x004
#define TEMP_MONCTL2 0x008
#define TEMP_MONIDET0 0x014
#define TEMP_MONIDET1 0x018
#define TEMP_MSRCTL0 0x038
#define TEMP_MSRCTL1 0x03c
#define TEMP_AHBPOLL 0x040
#define TEMP_AHBTO 0x044
#define TEMP_ADCPNP0 0x048
#define TEMP_ADCPNP1 0x04c
#define TEMP_ADCPNP2 0x050
#define TEMP_ADCPNP3 0x0b4
#define TEMP_ADCMUX 0x054
#define TEMP_ADCEN 0x060
#define TEMP_PNPMUXADDR 0x064
#define TEMP_ADCMUXADDR 0x068
#define TEMP_ADCENADDR 0x074
#define TEMP_ADCVALIDADDR 0x078
#define TEMP_ADCVOLTADDR 0x07c
#define TEMP_RDCTRL 0x080
#define TEMP_ADCVALIDMASK 0x084
#define TEMP_ADCVOLTAGESHIFT 0x088
#define TEMP_ADCWRITECTRL 0x08c
#define TEMP_MSR0 0x090
#define TEMP_MSR1 0x094
#define TEMP_MSR2 0x098
#define TEMP_MSR3 0x0B8
#define TEMP_SPARE0 0x0f0
#define TEMP_ADCPNP0_1 0x148
#define TEMP_ADCPNP1_1 0x14c
#define TEMP_ADCPNP2_1 0x150
#define TEMP_MSR0_1 0x190
#define TEMP_MSR1_1 0x194
#define TEMP_MSR2_1 0x198
#define TEMP_ADCPNP3_1 0x1b4
#define TEMP_MSR3_1 0x1B8
#define PTPCORESEL 0x400
#define TEMP_MONCTL1_PERIOD_UNIT(x) ((x) & 0x3ff)
#define TEMP_MONCTL2_FILTER_INTERVAL(x) (((x) & 0x3ff) << 16)
#define TEMP_MONCTL2_SENSOR_INTERVAL(x) ((x) & 0x3ff)
#define TEMP_AHBPOLL_ADC_POLL_INTERVAL(x) (x)
#define TEMP_ADCWRITECTRL_ADC_PNP_WRITE BIT(0)
#define TEMP_ADCWRITECTRL_ADC_MUX_WRITE BIT(1)
#define TEMP_ADCVALIDMASK_VALID_HIGH BIT(5)
#define TEMP_ADCVALIDMASK_VALID_POS(bit) (bit)
/* MT8173 thermal sensors */
#define MT8173_TS1 0
#define MT8173_TS2 1
#define MT8173_TS3 2
#define MT8173_TS4 3
#define MT8173_TSABB 4
/* AUXADC channel 11 is used for the temperature sensors */
#define MT8173_TEMP_AUXADC_CHANNEL 11
/* The total number of temperature sensors in the MT8173 */
#define MT8173_NUM_SENSORS 5
/* The number of banks in the MT8173 */
#define MT8173_NUM_ZONES 4
/* The number of sensing points per bank */
#define MT8173_NUM_SENSORS_PER_ZONE 4
/* The number of controller in the MT8173 */
#define MT8173_NUM_CONTROLLER 1
/* The calibration coefficient of sensor */
#define MT8173_CALIBRATION 165
/* Valid temperatures range */
#define MT8173_TEMP_MIN -20000
#define MT8173_TEMP_MAX 150000
/*
* Layout of the fuses providing the calibration data
* These macros could be used for MT8183, MT8173, MT2701, and MT2712.
* MT8183 has 6 sensors and needs 6 VTS calibration data.
* MT8173 has 5 sensors and needs 5 VTS calibration data.
* MT2701 has 3 sensors and needs 3 VTS calibration data.
* MT2712 has 4 sensors and needs 4 VTS calibration data.
*/
#define CALIB_BUF0_VALID_V1 BIT(0)
#define CALIB_BUF1_ADC_GE_V1(x) (((x) >> 22) & 0x3ff)
#define CALIB_BUF0_VTS_TS1_V1(x) (((x) >> 17) & 0x1ff)
#define CALIB_BUF0_VTS_TS2_V1(x) (((x) >> 8) & 0x1ff)
#define CALIB_BUF1_VTS_TS3_V1(x) (((x) >> 0) & 0x1ff)
#define CALIB_BUF2_VTS_TS4_V1(x) (((x) >> 23) & 0x1ff)
#define CALIB_BUF2_VTS_TS5_V1(x) (((x) >> 5) & 0x1ff)
#define CALIB_BUF2_VTS_TSABB_V1(x) (((x) >> 14) & 0x1ff)
#define CALIB_BUF0_DEGC_CALI_V1(x) (((x) >> 1) & 0x3f)
#define CALIB_BUF0_O_SLOPE_V1(x) (((x) >> 26) & 0x3f)
#define CALIB_BUF0_O_SLOPE_SIGN_V1(x) (((x) >> 7) & 0x1)
#define CALIB_BUF1_ID_V1(x) (((x) >> 9) & 0x1)
/*
* Layout of the fuses providing the calibration data
* These macros could be used for MT7622.
*/
#define CALIB_BUF0_ADC_OE_V2(x) (((x) >> 22) & 0x3ff)
#define CALIB_BUF0_ADC_GE_V2(x) (((x) >> 12) & 0x3ff)
#define CALIB_BUF0_DEGC_CALI_V2(x) (((x) >> 6) & 0x3f)
#define CALIB_BUF0_O_SLOPE_V2(x) (((x) >> 0) & 0x3f)
#define CALIB_BUF1_VTS_TS1_V2(x) (((x) >> 23) & 0x1ff)
#define CALIB_BUF1_VTS_TS2_V2(x) (((x) >> 14) & 0x1ff)
#define CALIB_BUF1_VTS_TSABB_V2(x) (((x) >> 5) & 0x1ff)
#define CALIB_BUF1_VALID_V2(x) (((x) >> 4) & 0x1)
#define CALIB_BUF1_O_SLOPE_SIGN_V2(x) (((x) >> 3) & 0x1)
/*
* Layout of the fuses providing the calibration data
* These macros can be used for MT7981 and MT7986.
*/
#define CALIB_BUF0_ADC_GE_V3(x) (((x) >> 0) & 0x3ff)
#define CALIB_BUF0_DEGC_CALI_V3(x) (((x) >> 20) & 0x3f)
#define CALIB_BUF0_O_SLOPE_V3(x) (((x) >> 26) & 0x3f)
#define CALIB_BUF1_VTS_TS1_V3(x) (((x) >> 0) & 0x1ff)
#define CALIB_BUF1_VTS_TS2_V3(x) (((x) >> 21) & 0x1ff)
#define CALIB_BUF1_VTS_TSABB_V3(x) (((x) >> 9) & 0x1ff)
#define CALIB_BUF1_VALID_V3(x) (((x) >> 18) & 0x1)
#define CALIB_BUF1_O_SLOPE_SIGN_V3(x) (((x) >> 19) & 0x1)
#define CALIB_BUF1_ID_V3(x) (((x) >> 20) & 0x1)
enum {
VTS1,
VTS2,
VTS3,
VTS4,
VTS5,
VTSABB,
MAX_NUM_VTS,
};
enum mtk_thermal_version {
MTK_THERMAL_V1 = 1,
MTK_THERMAL_V2,
MTK_THERMAL_V3,
};
/* MT2701 thermal sensors */
#define MT2701_TS1 0
#define MT2701_TS2 1
#define MT2701_TSABB 2
/* AUXADC channel 11 is used for the temperature sensors */
#define MT2701_TEMP_AUXADC_CHANNEL 11
/* The total number of temperature sensors in the MT2701 */
#define MT2701_NUM_SENSORS 3
/* The number of sensing points per bank */
#define MT2701_NUM_SENSORS_PER_ZONE 3
/* The number of controller in the MT2701 */
#define MT2701_NUM_CONTROLLER 1
/* The calibration coefficient of sensor */
#define MT2701_CALIBRATION 165
/* MT2712 thermal sensors */
#define MT2712_TS1 0
#define MT2712_TS2 1
#define MT2712_TS3 2
#define MT2712_TS4 3
/* AUXADC channel 11 is used for the temperature sensors */
#define MT2712_TEMP_AUXADC_CHANNEL 11
/* The total number of temperature sensors in the MT2712 */
#define MT2712_NUM_SENSORS 4
/* The number of sensing points per bank */
#define MT2712_NUM_SENSORS_PER_ZONE 4
/* The number of controller in the MT2712 */
#define MT2712_NUM_CONTROLLER 1
/* The calibration coefficient of sensor */
#define MT2712_CALIBRATION 165
#define MT7622_TEMP_AUXADC_CHANNEL 11
#define MT7622_NUM_SENSORS 1
#define MT7622_NUM_ZONES 1
#define MT7622_NUM_SENSORS_PER_ZONE 1
#define MT7622_TS1 0
#define MT7622_NUM_CONTROLLER 1
/* The maximum number of banks */
#define MAX_NUM_ZONES 8
/* The calibration coefficient of sensor */
#define MT7622_CALIBRATION 165
/* MT8183 thermal sensors */
#define MT8183_TS1 0
#define MT8183_TS2 1
#define MT8183_TS3 2
#define MT8183_TS4 3
#define MT8183_TS5 4
#define MT8183_TSABB 5
/* AUXADC channel is used for the temperature sensors */
#define MT8183_TEMP_AUXADC_CHANNEL 11
/* The total number of temperature sensors in the MT8183 */
#define MT8183_NUM_SENSORS 6
/* The number of banks in the MT8183 */
#define MT8183_NUM_ZONES 1
/* The number of sensing points per bank */
#define MT8183_NUM_SENSORS_PER_ZONE 6
/* The number of controller in the MT8183 */
#define MT8183_NUM_CONTROLLER 2
/* The calibration coefficient of sensor */
#define MT8183_CALIBRATION 153
/* AUXADC channel 11 is used for the temperature sensors */
#define MT7986_TEMP_AUXADC_CHANNEL 11
/* The total number of temperature sensors in the MT7986 */
#define MT7986_NUM_SENSORS 1
/* The number of banks in the MT7986 */
#define MT7986_NUM_ZONES 1
/* The number of sensing points per bank */
#define MT7986_NUM_SENSORS_PER_ZONE 1
/* MT7986 thermal sensors */
#define MT7986_TS1 0
/* The number of controller in the MT7986 */
#define MT7986_NUM_CONTROLLER 1
/* The calibration coefficient of sensor */
#define MT7986_CALIBRATION 165
/* MT8365 */
#define MT8365_TEMP_AUXADC_CHANNEL 11
#define MT8365_CALIBRATION 164
#define MT8365_NUM_CONTROLLER 1
#define MT8365_NUM_BANKS 1
#define MT8365_NUM_SENSORS 3
#define MT8365_NUM_SENSORS_PER_ZONE 3
#define MT8365_TS1 0
#define MT8365_TS2 1
#define MT8365_TS3 2
struct mtk_thermal;
struct thermal_bank_cfg {
unsigned int num_sensors;
const int *sensors;
};
struct mtk_thermal_bank {
struct mtk_thermal *mt;
int id;
};
struct mtk_thermal_data {
s32 num_banks;
s32 num_sensors;
s32 auxadc_channel;
const int *vts_index;
const int *sensor_mux_values;
const int *msr;
const int *adcpnp;
const int cali_val;
const int num_controller;
const int *controller_offset;
bool need_switch_bank;
struct thermal_bank_cfg bank_data[MAX_NUM_ZONES];
enum mtk_thermal_version version;
u32 apmixed_buffer_ctl_reg;
u32 apmixed_buffer_ctl_mask;
u32 apmixed_buffer_ctl_set;
};
struct mtk_thermal {
struct device *dev;
void __iomem *thermal_base;
struct clk *clk_peri_therm;
struct clk *clk_auxadc;
/* lock: for getting and putting banks */
struct mutex lock;
/* Calibration values */
s32 adc_ge;
s32 adc_oe;
s32 degc_cali;
s32 o_slope;
s32 o_slope_sign;
s32 vts[MAX_NUM_VTS];
const struct mtk_thermal_data *conf;
struct mtk_thermal_bank banks[MAX_NUM_ZONES];
int (*raw_to_mcelsius)(struct mtk_thermal *mt, int sensno, s32 raw);
};
/* MT8183 thermal sensor data */
static const int mt8183_bank_data[MT8183_NUM_SENSORS] = {
MT8183_TS1, MT8183_TS2, MT8183_TS3, MT8183_TS4, MT8183_TS5, MT8183_TSABB
};
static const int mt8183_msr[MT8183_NUM_SENSORS_PER_ZONE] = {
TEMP_MSR0_1, TEMP_MSR1_1, TEMP_MSR2_1, TEMP_MSR1, TEMP_MSR0, TEMP_MSR3_1
};
static const int mt8183_adcpnp[MT8183_NUM_SENSORS_PER_ZONE] = {
TEMP_ADCPNP0_1, TEMP_ADCPNP1_1, TEMP_ADCPNP2_1,
TEMP_ADCPNP1, TEMP_ADCPNP0, TEMP_ADCPNP3_1
};
static const int mt8183_mux_values[MT8183_NUM_SENSORS] = { 0, 1, 2, 3, 4, 0 };
static const int mt8183_tc_offset[MT8183_NUM_CONTROLLER] = {0x0, 0x100};
static const int mt8183_vts_index[MT8183_NUM_SENSORS] = {
VTS1, VTS2, VTS3, VTS4, VTS5, VTSABB
};
/* MT8173 thermal sensor data */
static const int mt8173_bank_data[MT8173_NUM_ZONES][3] = {
{ MT8173_TS2, MT8173_TS3 },
{ MT8173_TS2, MT8173_TS4 },
{ MT8173_TS1, MT8173_TS2, MT8173_TSABB },
{ MT8173_TS2 },
};
static const int mt8173_msr[MT8173_NUM_SENSORS_PER_ZONE] = {
TEMP_MSR0, TEMP_MSR1, TEMP_MSR2, TEMP_MSR3
};
static const int mt8173_adcpnp[MT8173_NUM_SENSORS_PER_ZONE] = {
TEMP_ADCPNP0, TEMP_ADCPNP1, TEMP_ADCPNP2, TEMP_ADCPNP3
};
static const int mt8173_mux_values[MT8173_NUM_SENSORS] = { 0, 1, 2, 3, 16 };
static const int mt8173_tc_offset[MT8173_NUM_CONTROLLER] = { 0x0, };
static const int mt8173_vts_index[MT8173_NUM_SENSORS] = {
VTS1, VTS2, VTS3, VTS4, VTSABB
};
/* MT2701 thermal sensor data */
static const int mt2701_bank_data[MT2701_NUM_SENSORS] = {
MT2701_TS1, MT2701_TS2, MT2701_TSABB
};
static const int mt2701_msr[MT2701_NUM_SENSORS_PER_ZONE] = {
TEMP_MSR0, TEMP_MSR1, TEMP_MSR2
};
static const int mt2701_adcpnp[MT2701_NUM_SENSORS_PER_ZONE] = {
TEMP_ADCPNP0, TEMP_ADCPNP1, TEMP_ADCPNP2
};
static const int mt2701_mux_values[MT2701_NUM_SENSORS] = { 0, 1, 16 };
static const int mt2701_tc_offset[MT2701_NUM_CONTROLLER] = { 0x0, };
static const int mt2701_vts_index[MT2701_NUM_SENSORS] = {
VTS1, VTS2, VTS3
};
/* MT2712 thermal sensor data */
static const int mt2712_bank_data[MT2712_NUM_SENSORS] = {
MT2712_TS1, MT2712_TS2, MT2712_TS3, MT2712_TS4
};
static const int mt2712_msr[MT2712_NUM_SENSORS_PER_ZONE] = {
TEMP_MSR0, TEMP_MSR1, TEMP_MSR2, TEMP_MSR3
};
static const int mt2712_adcpnp[MT2712_NUM_SENSORS_PER_ZONE] = {
TEMP_ADCPNP0, TEMP_ADCPNP1, TEMP_ADCPNP2, TEMP_ADCPNP3
};
static const int mt2712_mux_values[MT2712_NUM_SENSORS] = { 0, 1, 2, 3 };
static const int mt2712_tc_offset[MT2712_NUM_CONTROLLER] = { 0x0, };
static const int mt2712_vts_index[MT2712_NUM_SENSORS] = {
VTS1, VTS2, VTS3, VTS4
};
/* MT7622 thermal sensor data */
static const int mt7622_bank_data[MT7622_NUM_SENSORS] = { MT7622_TS1, };
static const int mt7622_msr[MT7622_NUM_SENSORS_PER_ZONE] = { TEMP_MSR0, };
static const int mt7622_adcpnp[MT7622_NUM_SENSORS_PER_ZONE] = { TEMP_ADCPNP0, };
static const int mt7622_mux_values[MT7622_NUM_SENSORS] = { 0, };
static const int mt7622_vts_index[MT7622_NUM_SENSORS] = { VTS1 };
static const int mt7622_tc_offset[MT7622_NUM_CONTROLLER] = { 0x0, };
/* MT7986 thermal sensor data */
static const int mt7986_bank_data[MT7986_NUM_SENSORS] = { MT7986_TS1, };
static const int mt7986_msr[MT7986_NUM_SENSORS_PER_ZONE] = { TEMP_MSR0, };
static const int mt7986_adcpnp[MT7986_NUM_SENSORS_PER_ZONE] = { TEMP_ADCPNP0, };
static const int mt7986_mux_values[MT7986_NUM_SENSORS] = { 0, };
static const int mt7986_vts_index[MT7986_NUM_SENSORS] = { VTS1 };
static const int mt7986_tc_offset[MT7986_NUM_CONTROLLER] = { 0x0, };
/* MT8365 thermal sensor data */
static const int mt8365_bank_data[MT8365_NUM_SENSORS] = {
MT8365_TS1, MT8365_TS2, MT8365_TS3
};
static const int mt8365_msr[MT8365_NUM_SENSORS_PER_ZONE] = {
TEMP_MSR0, TEMP_MSR1, TEMP_MSR2
};
static const int mt8365_adcpnp[MT8365_NUM_SENSORS_PER_ZONE] = {
TEMP_ADCPNP0, TEMP_ADCPNP1, TEMP_ADCPNP2
};
static const int mt8365_mux_values[MT8365_NUM_SENSORS] = { 0, 1, 2 };
static const int mt8365_tc_offset[MT8365_NUM_CONTROLLER] = { 0 };
static const int mt8365_vts_index[MT8365_NUM_SENSORS] = { VTS1, VTS2, VTS3 };
/*
* The MT8173 thermal controller has four banks. Each bank can read up to
* four temperature sensors simultaneously. The MT8173 has a total of 5
* temperature sensors. We use each bank to measure a certain area of the
* SoC. Since TS2 is located centrally in the SoC it is influenced by multiple
* areas, hence is used in different banks.
*
* The thermal core only gets the maximum temperature of all banks, so
* the bank concept wouldn't be necessary here. However, the SVS (Smart
* Voltage Scaling) unit makes its decisions based on the same bank
* data, and this indeed needs the temperatures of the individual banks
* for making better decisions.
*/
static const struct mtk_thermal_data mt8173_thermal_data = {
.auxadc_channel = MT8173_TEMP_AUXADC_CHANNEL,
.num_banks = MT8173_NUM_ZONES,
.num_sensors = MT8173_NUM_SENSORS,
.vts_index = mt8173_vts_index,
.cali_val = MT8173_CALIBRATION,
.num_controller = MT8173_NUM_CONTROLLER,
.controller_offset = mt8173_tc_offset,
.need_switch_bank = true,
.bank_data = {
{
.num_sensors = 2,
.sensors = mt8173_bank_data[0],
}, {
.num_sensors = 2,
.sensors = mt8173_bank_data[1],
}, {
.num_sensors = 3,
.sensors = mt8173_bank_data[2],
}, {
.num_sensors = 1,
.sensors = mt8173_bank_data[3],
},
},
.msr = mt8173_msr,
.adcpnp = mt8173_adcpnp,
.sensor_mux_values = mt8173_mux_values,
.version = MTK_THERMAL_V1,
};
/*
* The MT2701 thermal controller has one bank, which can read up to
* three temperature sensors simultaneously. The MT2701 has a total of 3
* temperature sensors.
*
* The thermal core only gets the maximum temperature of this one bank,
* so the bank concept wouldn't be necessary here. However, the SVS (Smart
* Voltage Scaling) unit makes its decisions based on the same bank
* data.
*/
static const struct mtk_thermal_data mt2701_thermal_data = {
.auxadc_channel = MT2701_TEMP_AUXADC_CHANNEL,
.num_banks = 1,
.num_sensors = MT2701_NUM_SENSORS,
.vts_index = mt2701_vts_index,
.cali_val = MT2701_CALIBRATION,
.num_controller = MT2701_NUM_CONTROLLER,
.controller_offset = mt2701_tc_offset,
.need_switch_bank = true,
.bank_data = {
{
.num_sensors = 3,
.sensors = mt2701_bank_data,
},
},
.msr = mt2701_msr,
.adcpnp = mt2701_adcpnp,
.sensor_mux_values = mt2701_mux_values,
.version = MTK_THERMAL_V1,
};
/*
* The MT8365 thermal controller has one bank, which can read up to
* four temperature sensors simultaneously. The MT8365 has a total of 3
* temperature sensors.
*
* The thermal core only gets the maximum temperature of this one bank,
* so the bank concept wouldn't be necessary here. However, the SVS (Smart
* Voltage Scaling) unit makes its decisions based on the same bank
* data.
*/
static const struct mtk_thermal_data mt8365_thermal_data = {
.auxadc_channel = MT8365_TEMP_AUXADC_CHANNEL,
.num_banks = MT8365_NUM_BANKS,
.num_sensors = MT8365_NUM_SENSORS,
.vts_index = mt8365_vts_index,
.cali_val = MT8365_CALIBRATION,
.num_controller = MT8365_NUM_CONTROLLER,
.controller_offset = mt8365_tc_offset,
.need_switch_bank = false,
.bank_data = {
{
.num_sensors = MT8365_NUM_SENSORS,
.sensors = mt8365_bank_data
},
},
.msr = mt8365_msr,
.adcpnp = mt8365_adcpnp,
.sensor_mux_values = mt8365_mux_values,
.version = MTK_THERMAL_V1,
.apmixed_buffer_ctl_reg = APMIXED_SYS_TS_CON0,
.apmixed_buffer_ctl_mask = (u32) ~GENMASK(29, 28),
.apmixed_buffer_ctl_set = 0,
};
/*
* The MT2712 thermal controller has one bank, which can read up to
* four temperature sensors simultaneously. The MT2712 has a total of 4
* temperature sensors.
*
* The thermal core only gets the maximum temperature of this one bank,
* so the bank concept wouldn't be necessary here. However, the SVS (Smart
* Voltage Scaling) unit makes its decisions based on the same bank
* data.
*/
static const struct mtk_thermal_data mt2712_thermal_data = {
.auxadc_channel = MT2712_TEMP_AUXADC_CHANNEL,
.num_banks = 1,
.num_sensors = MT2712_NUM_SENSORS,
.vts_index = mt2712_vts_index,
.cali_val = MT2712_CALIBRATION,
.num_controller = MT2712_NUM_CONTROLLER,
.controller_offset = mt2712_tc_offset,
.need_switch_bank = true,
.bank_data = {
{
.num_sensors = 4,
.sensors = mt2712_bank_data,
},
},
.msr = mt2712_msr,
.adcpnp = mt2712_adcpnp,
.sensor_mux_values = mt2712_mux_values,
.version = MTK_THERMAL_V1,
};
/*
* MT7622 have only one sensing point which uses AUXADC Channel 11 for raw data
* access.
*/
static const struct mtk_thermal_data mt7622_thermal_data = {
.auxadc_channel = MT7622_TEMP_AUXADC_CHANNEL,
.num_banks = MT7622_NUM_ZONES,
.num_sensors = MT7622_NUM_SENSORS,
.vts_index = mt7622_vts_index,
.cali_val = MT7622_CALIBRATION,
.num_controller = MT7622_NUM_CONTROLLER,
.controller_offset = mt7622_tc_offset,
.need_switch_bank = true,
.bank_data = {
{
.num_sensors = 1,
.sensors = mt7622_bank_data,
},
},
.msr = mt7622_msr,
.adcpnp = mt7622_adcpnp,
.sensor_mux_values = mt7622_mux_values,
.version = MTK_THERMAL_V2,
.apmixed_buffer_ctl_reg = APMIXED_SYS_TS_CON1,
.apmixed_buffer_ctl_mask = GENMASK(31, 6) | BIT(3),
.apmixed_buffer_ctl_set = BIT(0),
};
/*
* The MT8183 thermal controller has one bank for the current SW framework.
* The MT8183 has a total of 6 temperature sensors.
* There are two thermal controller to control the six sensor.
* The first one bind 2 sensor, and the other bind 4 sensors.
* The thermal core only gets the maximum temperature of all sensor, so
* the bank concept wouldn't be necessary here. However, the SVS (Smart
* Voltage Scaling) unit makes its decisions based on the same bank
* data, and this indeed needs the temperatures of the individual banks
* for making better decisions.
*/
static const struct mtk_thermal_data mt8183_thermal_data = {
.auxadc_channel = MT8183_TEMP_AUXADC_CHANNEL,
.num_banks = MT8183_NUM_ZONES,
.num_sensors = MT8183_NUM_SENSORS,
.vts_index = mt8183_vts_index,
.cali_val = MT8183_CALIBRATION,
.num_controller = MT8183_NUM_CONTROLLER,
.controller_offset = mt8183_tc_offset,
.need_switch_bank = false,
.bank_data = {
{
.num_sensors = 6,
.sensors = mt8183_bank_data,
},
},
.msr = mt8183_msr,
.adcpnp = mt8183_adcpnp,
.sensor_mux_values = mt8183_mux_values,
.version = MTK_THERMAL_V1,
};
/*
* MT7986 uses AUXADC Channel 11 for raw data access.
*/
static const struct mtk_thermal_data mt7986_thermal_data = {
.auxadc_channel = MT7986_TEMP_AUXADC_CHANNEL,
.num_banks = MT7986_NUM_ZONES,
.num_sensors = MT7986_NUM_SENSORS,
.vts_index = mt7986_vts_index,
.cali_val = MT7986_CALIBRATION,
.num_controller = MT7986_NUM_CONTROLLER,
.controller_offset = mt7986_tc_offset,
.need_switch_bank = true,
.bank_data = {
{
.num_sensors = 1,
.sensors = mt7986_bank_data,
},
},
.msr = mt7986_msr,
.adcpnp = mt7986_adcpnp,
.sensor_mux_values = mt7986_mux_values,
.version = MTK_THERMAL_V3,
};
static bool mtk_thermal_temp_is_valid(int temp)
{
return (temp >= MT8173_TEMP_MIN) && (temp <= MT8173_TEMP_MAX);
}
/**
* raw_to_mcelsius_v1 - convert a raw ADC value to mcelsius
* @mt: The thermal controller
* @sensno: sensor number
* @raw: raw ADC value
*
* This converts the raw ADC value to mcelsius using the SoC specific
* calibration constants
*/
static int raw_to_mcelsius_v1(struct mtk_thermal *mt, int sensno, s32 raw)
{
s32 tmp;
raw &= 0xfff;
tmp = 203450520 << 3;
tmp /= mt->conf->cali_val + mt->o_slope;
tmp /= 10000 + mt->adc_ge;
tmp *= raw - mt->vts[sensno] - 3350;
tmp >>= 3;
return mt->degc_cali * 500 - tmp;
}
static int raw_to_mcelsius_v2(struct mtk_thermal *mt, int sensno, s32 raw)
{
s32 format_1;
s32 format_2;
s32 g_oe;
s32 g_gain;
s32 g_x_roomt;
s32 tmp;
if (raw == 0)
return 0;
raw &= 0xfff;
g_gain = 10000 + (((mt->adc_ge - 512) * 10000) >> 12);
g_oe = mt->adc_oe - 512;
format_1 = mt->vts[VTS2] + 3105 - g_oe;
format_2 = (mt->degc_cali * 10) >> 1;
g_x_roomt = (((format_1 * 10000) >> 12) * 10000) / g_gain;
tmp = (((((raw - g_oe) * 10000) >> 12) * 10000) / g_gain) - g_x_roomt;
tmp = tmp * 10 * 100 / 11;
if (mt->o_slope_sign == 0)
tmp = tmp / (165 - mt->o_slope);
else
tmp = tmp / (165 + mt->o_slope);
return (format_2 - tmp) * 100;
}
static int raw_to_mcelsius_v3(struct mtk_thermal *mt, int sensno, s32 raw)
{
s32 tmp;
if (raw == 0)
return 0;
raw &= 0xfff;
tmp = 100000 * 15 / 16 * 10000;
tmp /= 4096 - 512 + mt->adc_ge;
tmp /= 1490;
tmp *= raw - mt->vts[sensno] - 2900;
return mt->degc_cali * 500 - tmp;
}
/**
* mtk_thermal_get_bank - get bank
* @bank: The bank
*
* The bank registers are banked, we have to select a bank in the
* PTPCORESEL register to access it.
*/
static void mtk_thermal_get_bank(struct mtk_thermal_bank *bank)
{
struct mtk_thermal *mt = bank->mt;
u32 val;
if (mt->conf->need_switch_bank) {
mutex_lock(&mt->lock);
val = readl(mt->thermal_base + PTPCORESEL);
val &= ~0xf;
val |= bank->id;
writel(val, mt->thermal_base + PTPCORESEL);
}
}
/**
* mtk_thermal_put_bank - release bank
* @bank: The bank
*
* release a bank previously taken with mtk_thermal_get_bank,
*/
static void mtk_thermal_put_bank(struct mtk_thermal_bank *bank)
{
struct mtk_thermal *mt = bank->mt;
if (mt->conf->need_switch_bank)
mutex_unlock(&mt->lock);
}
/**
* mtk_thermal_bank_temperature - get the temperature of a bank
* @bank: The bank
*
* The temperature of a bank is considered the maximum temperature of
* the sensors associated to the bank.
*/
static int mtk_thermal_bank_temperature(struct mtk_thermal_bank *bank)
{
struct mtk_thermal *mt = bank->mt;
const struct mtk_thermal_data *conf = mt->conf;
int i, temp = INT_MIN, max = INT_MIN;
u32 raw;
for (i = 0; i < conf->bank_data[bank->id].num_sensors; i++) {
raw = readl(mt->thermal_base + conf->msr[i]);
temp = mt->raw_to_mcelsius(
mt, conf->bank_data[bank->id].sensors[i], raw);
/*
* Depending on the filt/sen intervals and ADC polling time,
* we may need up to 60 milliseconds after initialization: this
* will result in the first reading containing an out of range
* temperature value.
* Validate the reading to both address the aforementioned issue
* and to eventually avoid bogus readings during runtime in the
* event that the AUXADC gets unstable due to high EMI, etc.
*/
if (!mtk_thermal_temp_is_valid(temp))
temp = THERMAL_TEMP_INVALID;
if (temp > max)
max = temp;
}
return max;
}
static int mtk_read_temp(struct thermal_zone_device *tz, int *temperature)
{
struct mtk_thermal *mt = thermal_zone_device_priv(tz);
int i;
int tempmax = INT_MIN;
for (i = 0; i < mt->conf->num_banks; i++) {
struct mtk_thermal_bank *bank = &mt->banks[i];
mtk_thermal_get_bank(bank);
tempmax = max(tempmax, mtk_thermal_bank_temperature(bank));
mtk_thermal_put_bank(bank);
}
*temperature = tempmax;
return 0;
}
static const struct thermal_zone_device_ops mtk_thermal_ops = {
.get_temp = mtk_read_temp,
};
static void mtk_thermal_init_bank(struct mtk_thermal *mt, int num,
u32 apmixed_phys_base, u32 auxadc_phys_base,
int ctrl_id)
{
struct mtk_thermal_bank *bank = &mt->banks[num];
const struct mtk_thermal_data *conf = mt->conf;
int i;
int offset = mt->conf->controller_offset[ctrl_id];
void __iomem *controller_base = mt->thermal_base + offset;
bank->id = num;
bank->mt = mt;
mtk_thermal_get_bank(bank);
/* bus clock 66M counting unit is 12 * 15.15ns * 256 = 46.540us */
writel(TEMP_MONCTL1_PERIOD_UNIT(12), controller_base + TEMP_MONCTL1);
/*
* filt interval is 1 * 46.540us = 46.54us,
* sen interval is 429 * 46.540us = 19.96ms
*/
writel(TEMP_MONCTL2_FILTER_INTERVAL(1) |
TEMP_MONCTL2_SENSOR_INTERVAL(429),
controller_base + TEMP_MONCTL2);
/* poll is set to 10u */
writel(TEMP_AHBPOLL_ADC_POLL_INTERVAL(768),
controller_base + TEMP_AHBPOLL);
/* temperature sampling control, 1 sample */
writel(0x0, controller_base + TEMP_MSRCTL0);
/* exceed this polling time, IRQ would be inserted */
writel(0xffffffff, controller_base + TEMP_AHBTO);
/* number of interrupts per event, 1 is enough */
writel(0x0, controller_base + TEMP_MONIDET0);
writel(0x0, controller_base + TEMP_MONIDET1);
/*
* The MT8173 thermal controller does not have its own ADC. Instead it
* uses AHB bus accesses to control the AUXADC. To do this the thermal
* controller has to be programmed with the physical addresses of the
* AUXADC registers and with the various bit positions in the AUXADC.
* Also the thermal controller controls a mux in the APMIXEDSYS register
* space.
*/
/*
* this value will be stored to TEMP_PNPMUXADDR (TEMP_SPARE0)
* automatically by hw
*/
writel(BIT(conf->auxadc_channel), controller_base + TEMP_ADCMUX);
/* AHB address for auxadc mux selection */
writel(auxadc_phys_base + AUXADC_CON1_CLR_V,
controller_base + TEMP_ADCMUXADDR);
if (mt->conf->version == MTK_THERMAL_V1) {
/* AHB address for pnp sensor mux selection */
writel(apmixed_phys_base + APMIXED_SYS_TS_CON1,
controller_base + TEMP_PNPMUXADDR);
}
/* AHB value for auxadc enable */
writel(BIT(conf->auxadc_channel), controller_base + TEMP_ADCEN);
/* AHB address for auxadc enable (channel 0 immediate mode selected) */
writel(auxadc_phys_base + AUXADC_CON1_SET_V,
controller_base + TEMP_ADCENADDR);
/* AHB address for auxadc valid bit */
writel(auxadc_phys_base + AUXADC_DATA(conf->auxadc_channel),
controller_base + TEMP_ADCVALIDADDR);
/* AHB address for auxadc voltage output */
writel(auxadc_phys_base + AUXADC_DATA(conf->auxadc_channel),
controller_base + TEMP_ADCVOLTADDR);
/* read valid & voltage are at the same register */
writel(0x0, controller_base + TEMP_RDCTRL);
/* indicate where the valid bit is */
writel(TEMP_ADCVALIDMASK_VALID_HIGH | TEMP_ADCVALIDMASK_VALID_POS(12),
controller_base + TEMP_ADCVALIDMASK);
/* no shift */
writel(0x0, controller_base + TEMP_ADCVOLTAGESHIFT);
/* enable auxadc mux write transaction */
writel(TEMP_ADCWRITECTRL_ADC_MUX_WRITE,
controller_base + TEMP_ADCWRITECTRL);
for (i = 0; i < conf->bank_data[num].num_sensors; i++)
writel(conf->sensor_mux_values[conf->bank_data[num].sensors[i]],
mt->thermal_base + conf->adcpnp[i]);
writel((1 << conf->bank_data[num].num_sensors) - 1,
controller_base + TEMP_MONCTL0);
writel(TEMP_ADCWRITECTRL_ADC_PNP_WRITE |
TEMP_ADCWRITECTRL_ADC_MUX_WRITE,
controller_base + TEMP_ADCWRITECTRL);
mtk_thermal_put_bank(bank);
}
static u64 of_get_phys_base(struct device_node *np)
{
struct resource res;
if (of_address_to_resource(np, 0, &res))
return OF_BAD_ADDR;
return res.start;
}
static int mtk_thermal_extract_efuse_v1(struct mtk_thermal *mt, u32 *buf)
{
int i;
if (!(buf[0] & CALIB_BUF0_VALID_V1))
return -EINVAL;
mt->adc_ge = CALIB_BUF1_ADC_GE_V1(buf[1]);
for (i = 0; i < mt->conf->num_sensors; i++) {
switch (mt->conf->vts_index[i]) {
case VTS1:
mt->vts[VTS1] = CALIB_BUF0_VTS_TS1_V1(buf[0]);
break;
case VTS2:
mt->vts[VTS2] = CALIB_BUF0_VTS_TS2_V1(buf[0]);
break;
case VTS3:
mt->vts[VTS3] = CALIB_BUF1_VTS_TS3_V1(buf[1]);
break;
case VTS4:
mt->vts[VTS4] = CALIB_BUF2_VTS_TS4_V1(buf[2]);
break;
case VTS5:
mt->vts[VTS5] = CALIB_BUF2_VTS_TS5_V1(buf[2]);
break;
case VTSABB:
mt->vts[VTSABB] =
CALIB_BUF2_VTS_TSABB_V1(buf[2]);
break;
default:
break;
}
}
mt->degc_cali = CALIB_BUF0_DEGC_CALI_V1(buf[0]);
if (CALIB_BUF1_ID_V1(buf[1]) &
CALIB_BUF0_O_SLOPE_SIGN_V1(buf[0]))
mt->o_slope = -CALIB_BUF0_O_SLOPE_V1(buf[0]);
else
mt->o_slope = CALIB_BUF0_O_SLOPE_V1(buf[0]);
return 0;
}
static int mtk_thermal_extract_efuse_v2(struct mtk_thermal *mt, u32 *buf)
{
if (!CALIB_BUF1_VALID_V2(buf[1]))
return -EINVAL;
mt->adc_oe = CALIB_BUF0_ADC_OE_V2(buf[0]);
mt->adc_ge = CALIB_BUF0_ADC_GE_V2(buf[0]);
mt->degc_cali = CALIB_BUF0_DEGC_CALI_V2(buf[0]);
mt->o_slope = CALIB_BUF0_O_SLOPE_V2(buf[0]);
mt->vts[VTS1] = CALIB_BUF1_VTS_TS1_V2(buf[1]);
mt->vts[VTS2] = CALIB_BUF1_VTS_TS2_V2(buf[1]);
mt->vts[VTSABB] = CALIB_BUF1_VTS_TSABB_V2(buf[1]);
mt->o_slope_sign = CALIB_BUF1_O_SLOPE_SIGN_V2(buf[1]);
return 0;
}
static int mtk_thermal_extract_efuse_v3(struct mtk_thermal *mt, u32 *buf)
{
if (!CALIB_BUF1_VALID_V3(buf[1]))
return -EINVAL;
mt->adc_ge = CALIB_BUF0_ADC_GE_V3(buf[0]);
mt->degc_cali = CALIB_BUF0_DEGC_CALI_V3(buf[0]);
mt->o_slope = CALIB_BUF0_O_SLOPE_V3(buf[0]);
mt->vts[VTS1] = CALIB_BUF1_VTS_TS1_V3(buf[1]);
mt->vts[VTS2] = CALIB_BUF1_VTS_TS2_V3(buf[1]);
mt->vts[VTSABB] = CALIB_BUF1_VTS_TSABB_V3(buf[1]);
mt->o_slope_sign = CALIB_BUF1_O_SLOPE_SIGN_V3(buf[1]);
if (CALIB_BUF1_ID_V3(buf[1]) == 0)
mt->o_slope = 0;
return 0;
}
static int mtk_thermal_get_calibration_data(struct device *dev,
struct mtk_thermal *mt)
{
struct nvmem_cell *cell;
u32 *buf;
size_t len;
int i, ret = 0;
/* Start with default values */
mt->adc_ge = 512;
mt->adc_oe = 512;
for (i = 0; i < mt->conf->num_sensors; i++)
mt->vts[i] = 260;
mt->degc_cali = 40;
mt->o_slope = 0;
cell = nvmem_cell_get(dev, "calibration-data");
if (IS_ERR(cell)) {
if (PTR_ERR(cell) == -EPROBE_DEFER)
return PTR_ERR(cell);
return 0;
}
buf = (u32 *)nvmem_cell_read(cell, &len);
nvmem_cell_put(cell);
if (IS_ERR(buf))
return PTR_ERR(buf);
if (len < 3 * sizeof(u32)) {
dev_warn(dev, "invalid calibration data\n");
ret = -EINVAL;
goto out;
}
switch (mt->conf->version) {
case MTK_THERMAL_V1:
ret = mtk_thermal_extract_efuse_v1(mt, buf);
break;
case MTK_THERMAL_V2:
ret = mtk_thermal_extract_efuse_v2(mt, buf);
break;
case MTK_THERMAL_V3:
ret = mtk_thermal_extract_efuse_v3(mt, buf);
break;
default:
ret = -EINVAL;
break;
}
if (ret) {
dev_info(dev, "Device not calibrated, using default calibration values\n");
ret = 0;
}
out:
kfree(buf);
return ret;
}
static const struct of_device_id mtk_thermal_of_match[] = {
{
.compatible = "mediatek,mt8173-thermal",
.data = (void *)&mt8173_thermal_data,
},
{
.compatible = "mediatek,mt2701-thermal",
.data = (void *)&mt2701_thermal_data,
},
{
.compatible = "mediatek,mt2712-thermal",
.data = (void *)&mt2712_thermal_data,
},
{
.compatible = "mediatek,mt7622-thermal",
.data = (void *)&mt7622_thermal_data,
},
{
.compatible = "mediatek,mt7986-thermal",
.data = (void *)&mt7986_thermal_data,
},
{
.compatible = "mediatek,mt8183-thermal",
.data = (void *)&mt8183_thermal_data,
},
{
.compatible = "mediatek,mt8365-thermal",
.data = (void *)&mt8365_thermal_data,
}, {
},
};
MODULE_DEVICE_TABLE(of, mtk_thermal_of_match);
static void mtk_thermal_turn_on_buffer(struct mtk_thermal *mt,
void __iomem *apmixed_base)
{
u32 tmp;
if (!mt->conf->apmixed_buffer_ctl_reg)
return;
tmp = readl(apmixed_base + mt->conf->apmixed_buffer_ctl_reg);
tmp &= mt->conf->apmixed_buffer_ctl_mask;
tmp |= mt->conf->apmixed_buffer_ctl_set;
writel(tmp, apmixed_base + mt->conf->apmixed_buffer_ctl_reg);
udelay(200);
}
static void mtk_thermal_release_periodic_ts(struct mtk_thermal *mt,
void __iomem *auxadc_base)
{
int tmp;
writel(0x800, auxadc_base + AUXADC_CON1_SET_V);
writel(0x1, mt->thermal_base + TEMP_MONCTL0);
tmp = readl(mt->thermal_base + TEMP_MSRCTL1);
writel((tmp & (~0x10e)), mt->thermal_base + TEMP_MSRCTL1);
}
static int mtk_thermal_probe(struct platform_device *pdev)
{
int ret, i, ctrl_id;
struct device_node *auxadc, *apmixedsys, *np = pdev->dev.of_node;
struct mtk_thermal *mt;
u64 auxadc_phys_base, apmixed_phys_base;
struct thermal_zone_device *tzdev;
void __iomem *apmixed_base, *auxadc_base;
mt = devm_kzalloc(&pdev->dev, sizeof(*mt), GFP_KERNEL);
if (!mt)
return -ENOMEM;
mt->conf = of_device_get_match_data(&pdev->dev);
mt->thermal_base = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
if (IS_ERR(mt->thermal_base))
return PTR_ERR(mt->thermal_base);
ret = mtk_thermal_get_calibration_data(&pdev->dev, mt);
if (ret)
return ret;
mutex_init(&mt->lock);
mt->dev = &pdev->dev;
auxadc = of_parse_phandle(np, "mediatek,auxadc", 0);
if (!auxadc) {
dev_err(&pdev->dev, "missing auxadc node\n");
return -ENODEV;
}
auxadc_base = of_iomap(auxadc, 0);
auxadc_phys_base = of_get_phys_base(auxadc);
of_node_put(auxadc);
if (auxadc_phys_base == OF_BAD_ADDR) {
dev_err(&pdev->dev, "Can't get auxadc phys address\n");
return -EINVAL;
}
apmixedsys = of_parse_phandle(np, "mediatek,apmixedsys", 0);
if (!apmixedsys) {
dev_err(&pdev->dev, "missing apmixedsys node\n");
return -ENODEV;
}
apmixed_base = of_iomap(apmixedsys, 0);
apmixed_phys_base = of_get_phys_base(apmixedsys);
of_node_put(apmixedsys);
if (apmixed_phys_base == OF_BAD_ADDR) {
dev_err(&pdev->dev, "Can't get auxadc phys address\n");
return -EINVAL;
}
ret = device_reset_optional(&pdev->dev);
if (ret)
return ret;
mt->clk_auxadc = devm_clk_get_enabled(&pdev->dev, "auxadc");
if (IS_ERR(mt->clk_auxadc)) {
ret = PTR_ERR(mt->clk_auxadc);
dev_err(&pdev->dev, "Can't enable auxadc clk: %d\n", ret);
return ret;
}
mt->clk_peri_therm = devm_clk_get_enabled(&pdev->dev, "therm");
if (IS_ERR(mt->clk_peri_therm)) {
ret = PTR_ERR(mt->clk_peri_therm);
dev_err(&pdev->dev, "Can't enable peri clk: %d\n", ret);
return ret;
}
mtk_thermal_turn_on_buffer(mt, apmixed_base);
if (mt->conf->version != MTK_THERMAL_V2)
mtk_thermal_release_periodic_ts(mt, auxadc_base);
if (mt->conf->version == MTK_THERMAL_V1)
mt->raw_to_mcelsius = raw_to_mcelsius_v1;
else if (mt->conf->version == MTK_THERMAL_V2)
mt->raw_to_mcelsius = raw_to_mcelsius_v2;
else
mt->raw_to_mcelsius = raw_to_mcelsius_v3;
for (ctrl_id = 0; ctrl_id < mt->conf->num_controller ; ctrl_id++)
for (i = 0; i < mt->conf->num_banks; i++)
mtk_thermal_init_bank(mt, i, apmixed_phys_base,
auxadc_phys_base, ctrl_id);
tzdev = devm_thermal_of_zone_register(&pdev->dev, 0, mt,
&mtk_thermal_ops);
if (IS_ERR(tzdev))
return PTR_ERR(tzdev);
ret = devm_thermal_add_hwmon_sysfs(&pdev->dev, tzdev);
if (ret)
dev_warn(&pdev->dev, "error in thermal_add_hwmon_sysfs");
return 0;
}
static struct platform_driver mtk_thermal_driver = {
.probe = mtk_thermal_probe,
.driver = {
.name = "mtk-thermal",
.of_match_table = mtk_thermal_of_match,
},
};
module_platform_driver(mtk_thermal_driver);
MODULE_AUTHOR("Michael Kao <[email protected]>");
MODULE_AUTHOR("Louis Yu <[email protected]>");
MODULE_AUTHOR("Dawei Chien <[email protected]>");
MODULE_AUTHOR("Sascha Hauer <[email protected]>");
MODULE_AUTHOR("Hanyi Wu <[email protected]>");
MODULE_DESCRIPTION("Mediatek thermal driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/thermal/mediatek/auxadc_thermal.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2023 MediaTek Inc.
* Author: Balsam CHIHI <[email protected]>
*/
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/delay.h>
#include <linux/debugfs.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/nvmem-consumer.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
#include <linux/thermal.h>
#include <dt-bindings/thermal/mediatek,lvts-thermal.h>
#include "../thermal_hwmon.h"
#define LVTS_MONCTL0(__base) (__base + 0x0000)
#define LVTS_MONCTL1(__base) (__base + 0x0004)
#define LVTS_MONCTL2(__base) (__base + 0x0008)
#define LVTS_MONINT(__base) (__base + 0x000C)
#define LVTS_MONINTSTS(__base) (__base + 0x0010)
#define LVTS_MONIDET0(__base) (__base + 0x0014)
#define LVTS_MONIDET1(__base) (__base + 0x0018)
#define LVTS_MONIDET2(__base) (__base + 0x001C)
#define LVTS_MONIDET3(__base) (__base + 0x0020)
#define LVTS_H2NTHRE(__base) (__base + 0x0024)
#define LVTS_HTHRE(__base) (__base + 0x0028)
#define LVTS_OFFSETH(__base) (__base + 0x0030)
#define LVTS_OFFSETL(__base) (__base + 0x0034)
#define LVTS_MSRCTL0(__base) (__base + 0x0038)
#define LVTS_MSRCTL1(__base) (__base + 0x003C)
#define LVTS_TSSEL(__base) (__base + 0x0040)
#define LVTS_CALSCALE(__base) (__base + 0x0048)
#define LVTS_ID(__base) (__base + 0x004C)
#define LVTS_CONFIG(__base) (__base + 0x0050)
#define LVTS_EDATA00(__base) (__base + 0x0054)
#define LVTS_EDATA01(__base) (__base + 0x0058)
#define LVTS_EDATA02(__base) (__base + 0x005C)
#define LVTS_EDATA03(__base) (__base + 0x0060)
#define LVTS_MSR0(__base) (__base + 0x0090)
#define LVTS_MSR1(__base) (__base + 0x0094)
#define LVTS_MSR2(__base) (__base + 0x0098)
#define LVTS_MSR3(__base) (__base + 0x009C)
#define LVTS_IMMD0(__base) (__base + 0x00A0)
#define LVTS_IMMD1(__base) (__base + 0x00A4)
#define LVTS_IMMD2(__base) (__base + 0x00A8)
#define LVTS_IMMD3(__base) (__base + 0x00AC)
#define LVTS_PROTCTL(__base) (__base + 0x00C0)
#define LVTS_PROTTA(__base) (__base + 0x00C4)
#define LVTS_PROTTB(__base) (__base + 0x00C8)
#define LVTS_PROTTC(__base) (__base + 0x00CC)
#define LVTS_CLKEN(__base) (__base + 0x00E4)
#define LVTS_PERIOD_UNIT 0
#define LVTS_GROUP_INTERVAL 0
#define LVTS_FILTER_INTERVAL 0
#define LVTS_SENSOR_INTERVAL 0
#define LVTS_HW_FILTER 0x0
#define LVTS_TSSEL_CONF 0x13121110
#define LVTS_CALSCALE_CONF 0x300
#define LVTS_MONINT_CONF 0x8300318C
#define LVTS_MONINT_OFFSET_SENSOR0 0xC
#define LVTS_MONINT_OFFSET_SENSOR1 0x180
#define LVTS_MONINT_OFFSET_SENSOR2 0x3000
#define LVTS_MONINT_OFFSET_SENSOR3 0x3000000
#define LVTS_INT_SENSOR0 0x0009001F
#define LVTS_INT_SENSOR1 0x001203E0
#define LVTS_INT_SENSOR2 0x00247C00
#define LVTS_INT_SENSOR3 0x1FC00000
#define LVTS_SENSOR_MAX 4
#define LVTS_GOLDEN_TEMP_MAX 62
#define LVTS_GOLDEN_TEMP_DEFAULT 50
#define LVTS_COEFF_A -250460
#define LVTS_COEFF_B 250460
#define LVTS_MSR_IMMEDIATE_MODE 0
#define LVTS_MSR_FILTERED_MODE 1
#define LVTS_MSR_READ_TIMEOUT_US 400
#define LVTS_MSR_READ_WAIT_US (LVTS_MSR_READ_TIMEOUT_US / 2)
#define LVTS_HW_SHUTDOWN_MT8195 105000
#define LVTS_MINIMUM_THRESHOLD 20000
static int golden_temp = LVTS_GOLDEN_TEMP_DEFAULT;
static int coeff_b = LVTS_COEFF_B;
struct lvts_sensor_data {
int dt_id;
};
struct lvts_ctrl_data {
struct lvts_sensor_data lvts_sensor[LVTS_SENSOR_MAX];
int cal_offset[LVTS_SENSOR_MAX];
int hw_tshut_temp;
int num_lvts_sensor;
int offset;
int mode;
};
struct lvts_data {
const struct lvts_ctrl_data *lvts_ctrl;
int num_lvts_ctrl;
};
struct lvts_sensor {
struct thermal_zone_device *tz;
void __iomem *msr;
void __iomem *base;
int id;
int dt_id;
int low_thresh;
int high_thresh;
};
struct lvts_ctrl {
struct lvts_sensor sensors[LVTS_SENSOR_MAX];
u32 calibration[LVTS_SENSOR_MAX];
u32 hw_tshut_raw_temp;
int num_lvts_sensor;
int mode;
void __iomem *base;
int low_thresh;
int high_thresh;
};
struct lvts_domain {
struct lvts_ctrl *lvts_ctrl;
struct reset_control *reset;
struct clk *clk;
int num_lvts_ctrl;
void __iomem *base;
size_t calib_len;
u8 *calib;
#ifdef CONFIG_DEBUG_FS
struct dentry *dom_dentry;
#endif
};
#ifdef CONFIG_MTK_LVTS_THERMAL_DEBUGFS
#define LVTS_DEBUG_FS_REGS(__reg) \
{ \
.name = __stringify(__reg), \
.offset = __reg(0), \
}
static const struct debugfs_reg32 lvts_regs[] = {
LVTS_DEBUG_FS_REGS(LVTS_MONCTL0),
LVTS_DEBUG_FS_REGS(LVTS_MONCTL1),
LVTS_DEBUG_FS_REGS(LVTS_MONCTL2),
LVTS_DEBUG_FS_REGS(LVTS_MONINT),
LVTS_DEBUG_FS_REGS(LVTS_MONINTSTS),
LVTS_DEBUG_FS_REGS(LVTS_MONIDET0),
LVTS_DEBUG_FS_REGS(LVTS_MONIDET1),
LVTS_DEBUG_FS_REGS(LVTS_MONIDET2),
LVTS_DEBUG_FS_REGS(LVTS_MONIDET3),
LVTS_DEBUG_FS_REGS(LVTS_H2NTHRE),
LVTS_DEBUG_FS_REGS(LVTS_HTHRE),
LVTS_DEBUG_FS_REGS(LVTS_OFFSETH),
LVTS_DEBUG_FS_REGS(LVTS_OFFSETL),
LVTS_DEBUG_FS_REGS(LVTS_MSRCTL0),
LVTS_DEBUG_FS_REGS(LVTS_MSRCTL1),
LVTS_DEBUG_FS_REGS(LVTS_TSSEL),
LVTS_DEBUG_FS_REGS(LVTS_CALSCALE),
LVTS_DEBUG_FS_REGS(LVTS_ID),
LVTS_DEBUG_FS_REGS(LVTS_CONFIG),
LVTS_DEBUG_FS_REGS(LVTS_EDATA00),
LVTS_DEBUG_FS_REGS(LVTS_EDATA01),
LVTS_DEBUG_FS_REGS(LVTS_EDATA02),
LVTS_DEBUG_FS_REGS(LVTS_EDATA03),
LVTS_DEBUG_FS_REGS(LVTS_MSR0),
LVTS_DEBUG_FS_REGS(LVTS_MSR1),
LVTS_DEBUG_FS_REGS(LVTS_MSR2),
LVTS_DEBUG_FS_REGS(LVTS_MSR3),
LVTS_DEBUG_FS_REGS(LVTS_IMMD0),
LVTS_DEBUG_FS_REGS(LVTS_IMMD1),
LVTS_DEBUG_FS_REGS(LVTS_IMMD2),
LVTS_DEBUG_FS_REGS(LVTS_IMMD3),
LVTS_DEBUG_FS_REGS(LVTS_PROTCTL),
LVTS_DEBUG_FS_REGS(LVTS_PROTTA),
LVTS_DEBUG_FS_REGS(LVTS_PROTTB),
LVTS_DEBUG_FS_REGS(LVTS_PROTTC),
LVTS_DEBUG_FS_REGS(LVTS_CLKEN),
};
static int lvts_debugfs_init(struct device *dev, struct lvts_domain *lvts_td)
{
struct debugfs_regset32 *regset;
struct lvts_ctrl *lvts_ctrl;
struct dentry *dentry;
char name[64];
int i;
lvts_td->dom_dentry = debugfs_create_dir(dev_name(dev), NULL);
if (IS_ERR(lvts_td->dom_dentry))
return 0;
for (i = 0; i < lvts_td->num_lvts_ctrl; i++) {
lvts_ctrl = &lvts_td->lvts_ctrl[i];
sprintf(name, "controller%d", i);
dentry = debugfs_create_dir(name, lvts_td->dom_dentry);
if (!dentry)
continue;
regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL);
if (!regset)
continue;
regset->base = lvts_ctrl->base;
regset->regs = lvts_regs;
regset->nregs = ARRAY_SIZE(lvts_regs);
debugfs_create_regset32("registers", 0400, dentry, regset);
}
return 0;
}
static void lvts_debugfs_exit(struct lvts_domain *lvts_td)
{
debugfs_remove_recursive(lvts_td->dom_dentry);
}
#else
static inline int lvts_debugfs_init(struct device *dev,
struct lvts_domain *lvts_td)
{
return 0;
}
static void lvts_debugfs_exit(struct lvts_domain *lvts_td) { }
#endif
static int lvts_raw_to_temp(u32 raw_temp)
{
int temperature;
temperature = ((s64)(raw_temp & 0xFFFF) * LVTS_COEFF_A) >> 14;
temperature += coeff_b;
return temperature;
}
static u32 lvts_temp_to_raw(int temperature)
{
u32 raw_temp = ((s64)(coeff_b - temperature)) << 14;
raw_temp = div_s64(raw_temp, -LVTS_COEFF_A);
return raw_temp;
}
static int lvts_get_temp(struct thermal_zone_device *tz, int *temp)
{
struct lvts_sensor *lvts_sensor = thermal_zone_device_priv(tz);
void __iomem *msr = lvts_sensor->msr;
u32 value;
int rc;
/*
* Measurement registers:
*
* LVTS_MSR[0-3] / LVTS_IMMD[0-3]
*
* Bits:
*
* 32-17: Unused
* 16 : Valid temperature
* 15-0 : Raw temperature
*/
rc = readl_poll_timeout(msr, value, value & BIT(16),
LVTS_MSR_READ_WAIT_US, LVTS_MSR_READ_TIMEOUT_US);
/*
* As the thermal zone temperature will read before the
* hardware sensor is fully initialized, we have to check the
* validity of the temperature returned when reading the
* measurement register. The thermal controller will set the
* valid bit temperature only when it is totally initialized.
*
* Otherwise, we may end up with garbage values out of the
* functionning temperature and directly jump to a system
* shutdown.
*/
if (rc)
return -EAGAIN;
*temp = lvts_raw_to_temp(value & 0xFFFF);
return 0;
}
static void lvts_update_irq_mask(struct lvts_ctrl *lvts_ctrl)
{
u32 masks[] = {
LVTS_MONINT_OFFSET_SENSOR0,
LVTS_MONINT_OFFSET_SENSOR1,
LVTS_MONINT_OFFSET_SENSOR2,
LVTS_MONINT_OFFSET_SENSOR3,
};
u32 value = 0;
int i;
value = readl(LVTS_MONINT(lvts_ctrl->base));
for (i = 0; i < ARRAY_SIZE(masks); i++) {
if (lvts_ctrl->sensors[i].high_thresh == lvts_ctrl->high_thresh
&& lvts_ctrl->sensors[i].low_thresh == lvts_ctrl->low_thresh)
value |= masks[i];
else
value &= ~masks[i];
}
writel(value, LVTS_MONINT(lvts_ctrl->base));
}
static bool lvts_should_update_thresh(struct lvts_ctrl *lvts_ctrl, int high)
{
int i;
if (high > lvts_ctrl->high_thresh)
return true;
for (i = 0; i < lvts_ctrl->num_lvts_sensor; i++)
if (lvts_ctrl->sensors[i].high_thresh == lvts_ctrl->high_thresh
&& lvts_ctrl->sensors[i].low_thresh == lvts_ctrl->low_thresh)
return false;
return true;
}
static int lvts_set_trips(struct thermal_zone_device *tz, int low, int high)
{
struct lvts_sensor *lvts_sensor = thermal_zone_device_priv(tz);
struct lvts_ctrl *lvts_ctrl = container_of(lvts_sensor, struct lvts_ctrl, sensors[lvts_sensor->id]);
void __iomem *base = lvts_sensor->base;
u32 raw_low = lvts_temp_to_raw(low != -INT_MAX ? low : LVTS_MINIMUM_THRESHOLD);
u32 raw_high = lvts_temp_to_raw(high);
bool should_update_thresh;
lvts_sensor->low_thresh = low;
lvts_sensor->high_thresh = high;
should_update_thresh = lvts_should_update_thresh(lvts_ctrl, high);
if (should_update_thresh) {
lvts_ctrl->high_thresh = high;
lvts_ctrl->low_thresh = low;
}
lvts_update_irq_mask(lvts_ctrl);
if (!should_update_thresh)
return 0;
/*
* Low offset temperature threshold
*
* LVTS_OFFSETL
*
* Bits:
*
* 14-0 : Raw temperature for threshold
*/
pr_debug("%s: Setting low limit temperature interrupt: %d\n",
thermal_zone_device_type(tz), low);
writel(raw_low, LVTS_OFFSETL(base));
/*
* High offset temperature threshold
*
* LVTS_OFFSETH
*
* Bits:
*
* 14-0 : Raw temperature for threshold
*/
pr_debug("%s: Setting high limit temperature interrupt: %d\n",
thermal_zone_device_type(tz), high);
writel(raw_high, LVTS_OFFSETH(base));
return 0;
}
static irqreturn_t lvts_ctrl_irq_handler(struct lvts_ctrl *lvts_ctrl)
{
irqreturn_t iret = IRQ_NONE;
u32 value;
u32 masks[] = {
LVTS_INT_SENSOR0,
LVTS_INT_SENSOR1,
LVTS_INT_SENSOR2,
LVTS_INT_SENSOR3
};
int i;
/*
* Interrupt monitoring status
*
* LVTS_MONINTST
*
* Bits:
*
* 31 : Interrupt for stage 3
* 30 : Interrupt for stage 2
* 29 : Interrupt for state 1
* 28 : Interrupt using filter on sensor 3
*
* 27 : Interrupt using immediate on sensor 3
* 26 : Interrupt normal to hot on sensor 3
* 25 : Interrupt high offset on sensor 3
* 24 : Interrupt low offset on sensor 3
*
* 23 : Interrupt hot threshold on sensor 3
* 22 : Interrupt cold threshold on sensor 3
* 21 : Interrupt using filter on sensor 2
* 20 : Interrupt using filter on sensor 1
*
* 19 : Interrupt using filter on sensor 0
* 18 : Interrupt using immediate on sensor 2
* 17 : Interrupt using immediate on sensor 1
* 16 : Interrupt using immediate on sensor 0
*
* 15 : Interrupt device access timeout interrupt
* 14 : Interrupt normal to hot on sensor 2
* 13 : Interrupt high offset interrupt on sensor 2
* 12 : Interrupt low offset interrupt on sensor 2
*
* 11 : Interrupt hot threshold on sensor 2
* 10 : Interrupt cold threshold on sensor 2
* 9 : Interrupt normal to hot on sensor 1
* 8 : Interrupt high offset interrupt on sensor 1
*
* 7 : Interrupt low offset interrupt on sensor 1
* 6 : Interrupt hot threshold on sensor 1
* 5 : Interrupt cold threshold on sensor 1
* 4 : Interrupt normal to hot on sensor 0
*
* 3 : Interrupt high offset interrupt on sensor 0
* 2 : Interrupt low offset interrupt on sensor 0
* 1 : Interrupt hot threshold on sensor 0
* 0 : Interrupt cold threshold on sensor 0
*
* We are interested in the sensor(s) responsible of the
* interrupt event. We update the thermal framework with the
* thermal zone associated with the sensor. The framework will
* take care of the rest whatever the kind of interrupt, we
* are only interested in which sensor raised the interrupt.
*
* sensor 3 interrupt: 0001 1111 1100 0000 0000 0000 0000 0000
* => 0x1FC00000
* sensor 2 interrupt: 0000 0000 0010 0100 0111 1100 0000 0000
* => 0x00247C00
* sensor 1 interrupt: 0000 0000 0001 0010 0000 0011 1110 0000
* => 0X001203E0
* sensor 0 interrupt: 0000 0000 0000 1001 0000 0000 0001 1111
* => 0x0009001F
*/
value = readl(LVTS_MONINTSTS(lvts_ctrl->base));
/*
* Let's figure out which sensors raised the interrupt
*
* NOTE: the masks array must be ordered with the index
* corresponding to the sensor id eg. index=0, mask for
* sensor0.
*/
for (i = 0; i < ARRAY_SIZE(masks); i++) {
if (!(value & masks[i]))
continue;
thermal_zone_device_update(lvts_ctrl->sensors[i].tz,
THERMAL_TRIP_VIOLATED);
iret = IRQ_HANDLED;
}
/*
* Write back to clear the interrupt status (W1C)
*/
writel(value, LVTS_MONINTSTS(lvts_ctrl->base));
return iret;
}
/*
* Temperature interrupt handler. Even if the driver supports more
* interrupt modes, we use the interrupt when the temperature crosses
* the hot threshold the way up and the way down (modulo the
* hysteresis).
*
* Each thermal domain has a couple of interrupts, one for hardware
* reset and another one for all the thermal events happening on the
* different sensors.
*
* The interrupt is configured for thermal events when crossing the
* hot temperature limit. At each interrupt, we check in every
* controller if there is an interrupt pending.
*/
static irqreturn_t lvts_irq_handler(int irq, void *data)
{
struct lvts_domain *lvts_td = data;
irqreturn_t aux, iret = IRQ_NONE;
int i;
for (i = 0; i < lvts_td->num_lvts_ctrl; i++) {
aux = lvts_ctrl_irq_handler(&lvts_td->lvts_ctrl[i]);
if (aux != IRQ_HANDLED)
continue;
iret = IRQ_HANDLED;
}
return iret;
}
static struct thermal_zone_device_ops lvts_ops = {
.get_temp = lvts_get_temp,
.set_trips = lvts_set_trips,
};
static int lvts_sensor_init(struct device *dev, struct lvts_ctrl *lvts_ctrl,
const struct lvts_ctrl_data *lvts_ctrl_data)
{
struct lvts_sensor *lvts_sensor = lvts_ctrl->sensors;
void __iomem *msr_regs[] = {
LVTS_MSR0(lvts_ctrl->base),
LVTS_MSR1(lvts_ctrl->base),
LVTS_MSR2(lvts_ctrl->base),
LVTS_MSR3(lvts_ctrl->base)
};
void __iomem *imm_regs[] = {
LVTS_IMMD0(lvts_ctrl->base),
LVTS_IMMD1(lvts_ctrl->base),
LVTS_IMMD2(lvts_ctrl->base),
LVTS_IMMD3(lvts_ctrl->base)
};
int i;
for (i = 0; i < lvts_ctrl_data->num_lvts_sensor; i++) {
int dt_id = lvts_ctrl_data->lvts_sensor[i].dt_id;
/*
* At this point, we don't know which id matches which
* sensor. Let's set arbitrally the id from the index.
*/
lvts_sensor[i].id = i;
/*
* The thermal zone registration will set the trip
* point interrupt in the thermal controller
* register. But this one will be reset in the
* initialization after. So we need to post pone the
* thermal zone creation after the controller is
* setup. For this reason, we store the device tree
* node id from the data in the sensor structure
*/
lvts_sensor[i].dt_id = dt_id;
/*
* We assign the base address of the thermal
* controller as a back pointer. So it will be
* accessible from the different thermal framework ops
* as we pass the lvts_sensor pointer as thermal zone
* private data.
*/
lvts_sensor[i].base = lvts_ctrl->base;
/*
* Each sensor has its own register address to read from.
*/
lvts_sensor[i].msr = lvts_ctrl_data->mode == LVTS_MSR_IMMEDIATE_MODE ?
imm_regs[i] : msr_regs[i];
lvts_sensor[i].low_thresh = INT_MIN;
lvts_sensor[i].high_thresh = INT_MIN;
};
lvts_ctrl->num_lvts_sensor = lvts_ctrl_data->num_lvts_sensor;
return 0;
}
/*
* The efuse blob values follows the sensor enumeration per thermal
* controller. The decoding of the stream is as follow:
*
* stream index map for MCU Domain :
*
* <-----mcu-tc#0-----> <-----sensor#0-----> <-----sensor#1----->
* 0x01 | 0x02 | 0x03 | 0x04 | 0x05 | 0x06 | 0x07 | 0x08 | 0x09
*
* <-----mcu-tc#1-----> <-----sensor#2-----> <-----sensor#3----->
* 0x0A | 0x0B | 0x0C | 0x0D | 0x0E | 0x0F | 0x10 | 0x11 | 0x12
*
* <-----mcu-tc#2-----> <-----sensor#4-----> <-----sensor#5-----> <-----sensor#6-----> <-----sensor#7----->
* 0x13 | 0x14 | 0x15 | 0x16 | 0x17 | 0x18 | 0x19 | 0x1A | 0x1B | 0x1C | 0x1D | 0x1E | 0x1F | 0x20 | 0x21
*
* stream index map for AP Domain :
*
* <-----ap--tc#0-----> <-----sensor#0-----> <-----sensor#1----->
* 0x22 | 0x23 | 0x24 | 0x25 | 0x26 | 0x27 | 0x28 | 0x29 | 0x2A
*
* <-----ap--tc#1-----> <-----sensor#2-----> <-----sensor#3----->
* 0x2B | 0x2C | 0x2D | 0x2E | 0x2F | 0x30 | 0x31 | 0x32 | 0x33
*
* <-----ap--tc#2-----> <-----sensor#4-----> <-----sensor#5-----> <-----sensor#6----->
* 0x34 | 0x35 | 0x36 | 0x37 | 0x38 | 0x39 | 0x3A | 0x3B | 0x3C | 0x3D | 0x3E | 0x3F
*
* <-----ap--tc#3-----> <-----sensor#7-----> <-----sensor#8----->
* 0x40 | 0x41 | 0x42 | 0x43 | 0x44 | 0x45 | 0x46 | 0x47 | 0x48
*
* The data description gives the offset of the calibration data in
* this bytes stream for each sensor.
*/
static int lvts_calibration_init(struct device *dev, struct lvts_ctrl *lvts_ctrl,
const struct lvts_ctrl_data *lvts_ctrl_data,
u8 *efuse_calibration)
{
int i;
for (i = 0; i < lvts_ctrl_data->num_lvts_sensor; i++)
memcpy(&lvts_ctrl->calibration[i],
efuse_calibration + lvts_ctrl_data->cal_offset[i], 2);
return 0;
}
/*
* The efuse bytes stream can be split into different chunk of
* nvmems. This function reads and concatenate those into a single
* buffer so it can be read sequentially when initializing the
* calibration data.
*/
static int lvts_calibration_read(struct device *dev, struct lvts_domain *lvts_td,
const struct lvts_data *lvts_data)
{
struct device_node *np = dev_of_node(dev);
struct nvmem_cell *cell;
struct property *prop;
const char *cell_name;
of_property_for_each_string(np, "nvmem-cell-names", prop, cell_name) {
size_t len;
u8 *efuse;
cell = of_nvmem_cell_get(np, cell_name);
if (IS_ERR(cell)) {
dev_err(dev, "Failed to get cell '%s'\n", cell_name);
return PTR_ERR(cell);
}
efuse = nvmem_cell_read(cell, &len);
nvmem_cell_put(cell);
if (IS_ERR(efuse)) {
dev_err(dev, "Failed to read cell '%s'\n", cell_name);
return PTR_ERR(efuse);
}
lvts_td->calib = devm_krealloc(dev, lvts_td->calib,
lvts_td->calib_len + len, GFP_KERNEL);
if (!lvts_td->calib)
return -ENOMEM;
memcpy(lvts_td->calib + lvts_td->calib_len, efuse, len);
lvts_td->calib_len += len;
kfree(efuse);
}
return 0;
}
static int lvts_golden_temp_init(struct device *dev, u32 *value)
{
u32 gt;
gt = (*value) >> 24;
if (gt && gt < LVTS_GOLDEN_TEMP_MAX)
golden_temp = gt;
coeff_b = golden_temp * 500 + LVTS_COEFF_B;
return 0;
}
static int lvts_ctrl_init(struct device *dev, struct lvts_domain *lvts_td,
const struct lvts_data *lvts_data)
{
size_t size = sizeof(*lvts_td->lvts_ctrl) * lvts_data->num_lvts_ctrl;
struct lvts_ctrl *lvts_ctrl;
int i, ret;
/*
* Create the calibration bytes stream from efuse data
*/
ret = lvts_calibration_read(dev, lvts_td, lvts_data);
if (ret)
return ret;
/*
* The golden temp information is contained in the first chunk
* of efuse data.
*/
ret = lvts_golden_temp_init(dev, (u32 *)lvts_td->calib);
if (ret)
return ret;
lvts_ctrl = devm_kzalloc(dev, size, GFP_KERNEL);
if (!lvts_ctrl)
return -ENOMEM;
for (i = 0; i < lvts_data->num_lvts_ctrl; i++) {
lvts_ctrl[i].base = lvts_td->base + lvts_data->lvts_ctrl[i].offset;
ret = lvts_sensor_init(dev, &lvts_ctrl[i],
&lvts_data->lvts_ctrl[i]);
if (ret)
return ret;
ret = lvts_calibration_init(dev, &lvts_ctrl[i],
&lvts_data->lvts_ctrl[i],
lvts_td->calib);
if (ret)
return ret;
/*
* The mode the ctrl will use to read the temperature
* (filtered or immediate)
*/
lvts_ctrl[i].mode = lvts_data->lvts_ctrl[i].mode;
/*
* The temperature to raw temperature must be done
* after initializing the calibration.
*/
lvts_ctrl[i].hw_tshut_raw_temp =
lvts_temp_to_raw(lvts_data->lvts_ctrl[i].hw_tshut_temp);
lvts_ctrl[i].low_thresh = INT_MIN;
lvts_ctrl[i].high_thresh = INT_MIN;
}
/*
* We no longer need the efuse bytes stream, let's free it
*/
devm_kfree(dev, lvts_td->calib);
lvts_td->lvts_ctrl = lvts_ctrl;
lvts_td->num_lvts_ctrl = lvts_data->num_lvts_ctrl;
return 0;
}
/*
* At this point the configuration register is the only place in the
* driver where we write multiple values. Per hardware constraint,
* each write in the configuration register must be separated by a
* delay of 2 us.
*/
static void lvts_write_config(struct lvts_ctrl *lvts_ctrl, u32 *cmds, int nr_cmds)
{
int i;
/*
* Configuration register
*/
for (i = 0; i < nr_cmds; i++) {
writel(cmds[i], LVTS_CONFIG(lvts_ctrl->base));
usleep_range(2, 4);
}
}
static int lvts_irq_init(struct lvts_ctrl *lvts_ctrl)
{
/*
* LVTS_PROTCTL : Thermal Protection Sensor Selection
*
* Bits:
*
* 19-18 : Sensor to base the protection on
* 17-16 : Strategy:
* 00 : Average of 4 sensors
* 01 : Max of 4 sensors
* 10 : Selected sensor with bits 19-18
* 11 : Reserved
*/
writel(BIT(16), LVTS_PROTCTL(lvts_ctrl->base));
/*
* LVTS_PROTTA : Stage 1 temperature threshold
* LVTS_PROTTB : Stage 2 temperature threshold
* LVTS_PROTTC : Stage 3 temperature threshold
*
* Bits:
*
* 14-0: Raw temperature threshold
*
* writel(0x0, LVTS_PROTTA(lvts_ctrl->base));
* writel(0x0, LVTS_PROTTB(lvts_ctrl->base));
*/
writel(lvts_ctrl->hw_tshut_raw_temp, LVTS_PROTTC(lvts_ctrl->base));
/*
* LVTS_MONINT : Interrupt configuration register
*
* The LVTS_MONINT register layout is the same as the LVTS_MONINTSTS
* register, except we set the bits to enable the interrupt.
*/
writel(LVTS_MONINT_CONF, LVTS_MONINT(lvts_ctrl->base));
return 0;
}
static int lvts_domain_reset(struct device *dev, struct reset_control *reset)
{
int ret;
ret = reset_control_assert(reset);
if (ret)
return ret;
return reset_control_deassert(reset);
}
/*
* Enable or disable the clocks of a specified thermal controller
*/
static int lvts_ctrl_set_enable(struct lvts_ctrl *lvts_ctrl, int enable)
{
/*
* LVTS_CLKEN : Internal LVTS clock
*
* Bits:
*
* 0 : enable / disable clock
*/
writel(enable, LVTS_CLKEN(lvts_ctrl->base));
return 0;
}
static int lvts_ctrl_connect(struct device *dev, struct lvts_ctrl *lvts_ctrl)
{
u32 id, cmds[] = { 0xC103FFFF, 0xC502FF55 };
lvts_write_config(lvts_ctrl, cmds, ARRAY_SIZE(cmds));
/*
* LVTS_ID : Get ID and status of the thermal controller
*
* Bits:
*
* 0-5 : thermal controller id
* 7 : thermal controller connection is valid
*/
id = readl(LVTS_ID(lvts_ctrl->base));
if (!(id & BIT(7)))
return -EIO;
return 0;
}
static int lvts_ctrl_initialize(struct device *dev, struct lvts_ctrl *lvts_ctrl)
{
/*
* Write device mask: 0xC1030000
*/
u32 cmds[] = {
0xC1030E01, 0xC1030CFC, 0xC1030A8C, 0xC103098D, 0xC10308F1,
0xC10307A6, 0xC10306B8, 0xC1030500, 0xC1030420, 0xC1030300,
0xC1030030, 0xC10300F6, 0xC1030050, 0xC1030060, 0xC10300AC,
0xC10300FC, 0xC103009D, 0xC10300F1, 0xC10300E1
};
lvts_write_config(lvts_ctrl, cmds, ARRAY_SIZE(cmds));
return 0;
}
static int lvts_ctrl_calibrate(struct device *dev, struct lvts_ctrl *lvts_ctrl)
{
int i;
void __iomem *lvts_edata[] = {
LVTS_EDATA00(lvts_ctrl->base),
LVTS_EDATA01(lvts_ctrl->base),
LVTS_EDATA02(lvts_ctrl->base),
LVTS_EDATA03(lvts_ctrl->base)
};
/*
* LVTS_EDATA0X : Efuse calibration reference value for sensor X
*
* Bits:
*
* 20-0 : Efuse value for normalization data
*/
for (i = 0; i < LVTS_SENSOR_MAX; i++)
writel(lvts_ctrl->calibration[i], lvts_edata[i]);
return 0;
}
static int lvts_ctrl_configure(struct device *dev, struct lvts_ctrl *lvts_ctrl)
{
u32 value;
/*
* LVTS_TSSEL : Sensing point index numbering
*
* Bits:
*
* 31-24: ADC Sense 3
* 23-16: ADC Sense 2
* 15-8 : ADC Sense 1
* 7-0 : ADC Sense 0
*/
value = LVTS_TSSEL_CONF;
writel(value, LVTS_TSSEL(lvts_ctrl->base));
/*
* LVTS_CALSCALE : ADC voltage round
*/
value = 0x300;
value = LVTS_CALSCALE_CONF;
/*
* LVTS_MSRCTL0 : Sensor filtering strategy
*
* Filters:
*
* 000 : One sample
* 001 : Avg 2 samples
* 010 : 4 samples, drop min and max, avg 2 samples
* 011 : 6 samples, drop min and max, avg 4 samples
* 100 : 10 samples, drop min and max, avg 8 samples
* 101 : 18 samples, drop min and max, avg 16 samples
*
* Bits:
*
* 0-2 : Sensor0 filter
* 3-5 : Sensor1 filter
* 6-8 : Sensor2 filter
* 9-11 : Sensor3 filter
*/
value = LVTS_HW_FILTER << 9 | LVTS_HW_FILTER << 6 |
LVTS_HW_FILTER << 3 | LVTS_HW_FILTER;
writel(value, LVTS_MSRCTL0(lvts_ctrl->base));
/*
* LVTS_MONCTL1 : Period unit and group interval configuration
*
* The clock source of LVTS thermal controller is 26MHz.
*
* The period unit is a time base for all the interval delays
* specified in the registers. By default we use 12. The time
* conversion is done by multiplying by 256 and 1/26.10^6
*
* An interval delay multiplied by the period unit gives the
* duration in seconds.
*
* - Filter interval delay is a delay between two samples of
* the same sensor.
*
* - Sensor interval delay is a delay between two samples of
* different sensors.
*
* - Group interval delay is a delay between different rounds.
*
* For example:
* If Period unit = C, filter delay = 1, sensor delay = 2, group delay = 1,
* and two sensors, TS1 and TS2, are in a LVTS thermal controller
* and then
* Period unit time = C * 1/26M * 256 = 12 * 38.46ns * 256 = 118.149us
* Filter interval delay = 1 * Period unit = 118.149us
* Sensor interval delay = 2 * Period unit = 236.298us
* Group interval delay = 1 * Period unit = 118.149us
*
* TS1 TS1 ... TS1 TS2 TS2 ... TS2 TS1...
* <--> Filter interval delay
* <--> Sensor interval delay
* <--> Group interval delay
* Bits:
* 29 - 20 : Group interval
* 16 - 13 : Send a single interrupt when crossing the hot threshold (1)
* or an interrupt everytime the hot threshold is crossed (0)
* 9 - 0 : Period unit
*
*/
value = LVTS_GROUP_INTERVAL << 20 | LVTS_PERIOD_UNIT;
writel(value, LVTS_MONCTL1(lvts_ctrl->base));
/*
* LVTS_MONCTL2 : Filtering and sensor interval
*
* Bits:
*
* 25-16 : Interval unit in PERIOD_UNIT between sample on
* the same sensor, filter interval
* 9-0 : Interval unit in PERIOD_UNIT between each sensor
*
*/
value = LVTS_FILTER_INTERVAL << 16 | LVTS_SENSOR_INTERVAL;
writel(value, LVTS_MONCTL2(lvts_ctrl->base));
return lvts_irq_init(lvts_ctrl);
}
static int lvts_ctrl_start(struct device *dev, struct lvts_ctrl *lvts_ctrl)
{
struct lvts_sensor *lvts_sensors = lvts_ctrl->sensors;
struct thermal_zone_device *tz;
u32 sensor_map = 0;
int i;
/*
* Bitmaps to enable each sensor on immediate and filtered modes, as
* described in MSRCTL1 and MONCTL0 registers below, respectively.
*/
u32 sensor_imm_bitmap[] = { BIT(4), BIT(5), BIT(6), BIT(9) };
u32 sensor_filt_bitmap[] = { BIT(0), BIT(1), BIT(2), BIT(3) };
u32 *sensor_bitmap = lvts_ctrl->mode == LVTS_MSR_IMMEDIATE_MODE ?
sensor_imm_bitmap : sensor_filt_bitmap;
for (i = 0; i < lvts_ctrl->num_lvts_sensor; i++) {
int dt_id = lvts_sensors[i].dt_id;
tz = devm_thermal_of_zone_register(dev, dt_id, &lvts_sensors[i],
&lvts_ops);
if (IS_ERR(tz)) {
/*
* This thermal zone is not described in the
* device tree. It is not an error from the
* thermal OF code POV, we just continue.
*/
if (PTR_ERR(tz) == -ENODEV)
continue;
return PTR_ERR(tz);
}
devm_thermal_add_hwmon_sysfs(dev, tz);
/*
* The thermal zone pointer will be needed in the
* interrupt handler, we store it in the sensor
* structure. The thermal domain structure will be
* passed to the interrupt handler private data as the
* interrupt is shared for all the controller
* belonging to the thermal domain.
*/
lvts_sensors[i].tz = tz;
/*
* This sensor was correctly associated with a thermal
* zone, let's set the corresponding bit in the sensor
* map, so we can enable the temperature monitoring in
* the hardware thermal controller.
*/
sensor_map |= sensor_bitmap[i];
}
/*
* The initialization of the thermal zones give us
* which sensor point to enable. If any thermal zone
* was not described in the device tree, it won't be
* enabled here in the sensor map.
*/
if (lvts_ctrl->mode == LVTS_MSR_IMMEDIATE_MODE) {
/*
* LVTS_MSRCTL1 : Measurement control
*
* Bits:
*
* 9: Ignore MSRCTL0 config and do immediate measurement on sensor3
* 6: Ignore MSRCTL0 config and do immediate measurement on sensor2
* 5: Ignore MSRCTL0 config and do immediate measurement on sensor1
* 4: Ignore MSRCTL0 config and do immediate measurement on sensor0
*
* That configuration will ignore the filtering and the delays
* introduced in MONCTL1 and MONCTL2
*/
writel(sensor_map, LVTS_MSRCTL1(lvts_ctrl->base));
} else {
/*
* Bits:
* 9: Single point access flow
* 0-3: Enable sensing point 0-3
*/
writel(sensor_map | BIT(9), LVTS_MONCTL0(lvts_ctrl->base));
}
return 0;
}
static int lvts_domain_init(struct device *dev, struct lvts_domain *lvts_td,
const struct lvts_data *lvts_data)
{
struct lvts_ctrl *lvts_ctrl;
int i, ret;
ret = lvts_ctrl_init(dev, lvts_td, lvts_data);
if (ret)
return ret;
ret = lvts_domain_reset(dev, lvts_td->reset);
if (ret) {
dev_dbg(dev, "Failed to reset domain");
return ret;
}
for (i = 0; i < lvts_td->num_lvts_ctrl; i++) {
lvts_ctrl = &lvts_td->lvts_ctrl[i];
/*
* Initialization steps:
*
* - Enable the clock
* - Connect to the LVTS
* - Initialize the LVTS
* - Prepare the calibration data
* - Select monitored sensors
* [ Configure sampling ]
* [ Configure the interrupt ]
* - Start measurement
*/
ret = lvts_ctrl_set_enable(lvts_ctrl, true);
if (ret) {
dev_dbg(dev, "Failed to enable LVTS clock");
return ret;
}
ret = lvts_ctrl_connect(dev, lvts_ctrl);
if (ret) {
dev_dbg(dev, "Failed to connect to LVTS controller");
return ret;
}
ret = lvts_ctrl_initialize(dev, lvts_ctrl);
if (ret) {
dev_dbg(dev, "Failed to initialize controller");
return ret;
}
ret = lvts_ctrl_calibrate(dev, lvts_ctrl);
if (ret) {
dev_dbg(dev, "Failed to calibrate controller");
return ret;
}
ret = lvts_ctrl_configure(dev, lvts_ctrl);
if (ret) {
dev_dbg(dev, "Failed to configure controller");
return ret;
}
ret = lvts_ctrl_start(dev, lvts_ctrl);
if (ret) {
dev_dbg(dev, "Failed to start controller");
return ret;
}
}
return lvts_debugfs_init(dev, lvts_td);
}
static int lvts_probe(struct platform_device *pdev)
{
const struct lvts_data *lvts_data;
struct lvts_domain *lvts_td;
struct device *dev = &pdev->dev;
struct resource *res;
int irq, ret;
lvts_td = devm_kzalloc(dev, sizeof(*lvts_td), GFP_KERNEL);
if (!lvts_td)
return -ENOMEM;
lvts_data = of_device_get_match_data(dev);
lvts_td->clk = devm_clk_get_enabled(dev, NULL);
if (IS_ERR(lvts_td->clk))
return dev_err_probe(dev, PTR_ERR(lvts_td->clk), "Failed to retrieve clock\n");
res = platform_get_mem_or_io(pdev, 0);
if (!res)
return dev_err_probe(dev, (-ENXIO), "No IO resource\n");
lvts_td->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(lvts_td->base))
return dev_err_probe(dev, PTR_ERR(lvts_td->base), "Failed to map io resource\n");
lvts_td->reset = devm_reset_control_get_by_index(dev, 0);
if (IS_ERR(lvts_td->reset))
return dev_err_probe(dev, PTR_ERR(lvts_td->reset), "Failed to get reset control\n");
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
ret = lvts_domain_init(dev, lvts_td, lvts_data);
if (ret)
return dev_err_probe(dev, ret, "Failed to initialize the lvts domain\n");
/*
* At this point the LVTS is initialized and enabled. We can
* safely enable the interrupt.
*/
ret = devm_request_threaded_irq(dev, irq, NULL, lvts_irq_handler,
IRQF_ONESHOT, dev_name(dev), lvts_td);
if (ret)
return dev_err_probe(dev, ret, "Failed to request interrupt\n");
platform_set_drvdata(pdev, lvts_td);
return 0;
}
static int lvts_remove(struct platform_device *pdev)
{
struct lvts_domain *lvts_td;
int i;
lvts_td = platform_get_drvdata(pdev);
for (i = 0; i < lvts_td->num_lvts_ctrl; i++)
lvts_ctrl_set_enable(&lvts_td->lvts_ctrl[i], false);
lvts_debugfs_exit(lvts_td);
return 0;
}
static const struct lvts_ctrl_data mt8195_lvts_mcu_data_ctrl[] = {
{
.cal_offset = { 0x04, 0x07 },
.lvts_sensor = {
{ .dt_id = MT8195_MCU_BIG_CPU0 },
{ .dt_id = MT8195_MCU_BIG_CPU1 }
},
.num_lvts_sensor = 2,
.offset = 0x0,
.hw_tshut_temp = LVTS_HW_SHUTDOWN_MT8195,
},
{
.cal_offset = { 0x0d, 0x10 },
.lvts_sensor = {
{ .dt_id = MT8195_MCU_BIG_CPU2 },
{ .dt_id = MT8195_MCU_BIG_CPU3 }
},
.num_lvts_sensor = 2,
.offset = 0x100,
.hw_tshut_temp = LVTS_HW_SHUTDOWN_MT8195,
},
{
.cal_offset = { 0x16, 0x19, 0x1c, 0x1f },
.lvts_sensor = {
{ .dt_id = MT8195_MCU_LITTLE_CPU0 },
{ .dt_id = MT8195_MCU_LITTLE_CPU1 },
{ .dt_id = MT8195_MCU_LITTLE_CPU2 },
{ .dt_id = MT8195_MCU_LITTLE_CPU3 }
},
.num_lvts_sensor = 4,
.offset = 0x200,
.hw_tshut_temp = LVTS_HW_SHUTDOWN_MT8195,
}
};
static const struct lvts_ctrl_data mt8195_lvts_ap_data_ctrl[] = {
{
.cal_offset = { 0x25, 0x28 },
.lvts_sensor = {
{ .dt_id = MT8195_AP_VPU0 },
{ .dt_id = MT8195_AP_VPU1 }
},
.num_lvts_sensor = 2,
.offset = 0x0,
.hw_tshut_temp = LVTS_HW_SHUTDOWN_MT8195,
},
{
.cal_offset = { 0x2e, 0x31 },
.lvts_sensor = {
{ .dt_id = MT8195_AP_GPU0 },
{ .dt_id = MT8195_AP_GPU1 }
},
.num_lvts_sensor = 2,
.offset = 0x100,
.hw_tshut_temp = LVTS_HW_SHUTDOWN_MT8195,
},
{
.cal_offset = { 0x37, 0x3a, 0x3d },
.lvts_sensor = {
{ .dt_id = MT8195_AP_VDEC },
{ .dt_id = MT8195_AP_IMG },
{ .dt_id = MT8195_AP_INFRA },
},
.num_lvts_sensor = 3,
.offset = 0x200,
.hw_tshut_temp = LVTS_HW_SHUTDOWN_MT8195,
},
{
.cal_offset = { 0x43, 0x46 },
.lvts_sensor = {
{ .dt_id = MT8195_AP_CAM0 },
{ .dt_id = MT8195_AP_CAM1 }
},
.num_lvts_sensor = 2,
.offset = 0x300,
.hw_tshut_temp = LVTS_HW_SHUTDOWN_MT8195,
}
};
static const struct lvts_data mt8195_lvts_mcu_data = {
.lvts_ctrl = mt8195_lvts_mcu_data_ctrl,
.num_lvts_ctrl = ARRAY_SIZE(mt8195_lvts_mcu_data_ctrl),
};
static const struct lvts_data mt8195_lvts_ap_data = {
.lvts_ctrl = mt8195_lvts_ap_data_ctrl,
.num_lvts_ctrl = ARRAY_SIZE(mt8195_lvts_ap_data_ctrl),
};
static const struct of_device_id lvts_of_match[] = {
{ .compatible = "mediatek,mt8195-lvts-mcu", .data = &mt8195_lvts_mcu_data },
{ .compatible = "mediatek,mt8195-lvts-ap", .data = &mt8195_lvts_ap_data },
{},
};
MODULE_DEVICE_TABLE(of, lvts_of_match);
static struct platform_driver lvts_driver = {
.probe = lvts_probe,
.remove = lvts_remove,
.driver = {
.name = "mtk-lvts-thermal",
.of_match_table = lvts_of_match,
},
};
module_platform_driver(lvts_driver);
MODULE_AUTHOR("Balsam CHIHI <[email protected]>");
MODULE_DESCRIPTION("MediaTek LVTS Thermal Driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/thermal/mediatek/lvts_thermal.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/module.h>
#include <linux/platform_device.h>
#include <dt-bindings/thermal/tegra124-soctherm.h>
#include "soctherm.h"
#define TEGRA132_THERMTRIP_ANY_EN_MASK (0x1 << 28)
#define TEGRA132_THERMTRIP_MEM_EN_MASK (0x1 << 27)
#define TEGRA132_THERMTRIP_GPU_EN_MASK (0x1 << 26)
#define TEGRA132_THERMTRIP_CPU_EN_MASK (0x1 << 25)
#define TEGRA132_THERMTRIP_TSENSE_EN_MASK (0x1 << 24)
#define TEGRA132_THERMTRIP_GPUMEM_THRESH_MASK (0xff << 16)
#define TEGRA132_THERMTRIP_CPU_THRESH_MASK (0xff << 8)
#define TEGRA132_THERMTRIP_TSENSE_THRESH_MASK 0xff
#define TEGRA132_THERMCTL_LVL0_UP_THRESH_MASK (0xff << 17)
#define TEGRA132_THERMCTL_LVL0_DN_THRESH_MASK (0xff << 9)
#define TEGRA132_THRESH_GRAIN 1000
#define TEGRA132_BPTT 8
static const struct tegra_tsensor_configuration tegra132_tsensor_config = {
.tall = 16300,
.tiddq_en = 1,
.ten_count = 1,
.tsample = 120,
.tsample_ate = 480,
};
static const struct tegra_tsensor_group tegra132_tsensor_group_cpu = {
.id = TEGRA124_SOCTHERM_SENSOR_CPU,
.name = "cpu",
.sensor_temp_offset = SENSOR_TEMP1,
.sensor_temp_mask = SENSOR_TEMP1_CPU_TEMP_MASK,
.pdiv = 8,
.pdiv_ate = 8,
.pdiv_mask = SENSOR_PDIV_CPU_MASK,
.pllx_hotspot_diff = 10,
.pllx_hotspot_mask = SENSOR_HOTSPOT_CPU_MASK,
.thermtrip_any_en_mask = TEGRA132_THERMTRIP_ANY_EN_MASK,
.thermtrip_enable_mask = TEGRA132_THERMTRIP_CPU_EN_MASK,
.thermtrip_threshold_mask = TEGRA132_THERMTRIP_CPU_THRESH_MASK,
.thermctl_isr_mask = THERM_IRQ_CPU_MASK,
.thermctl_lvl0_offset = THERMCTL_LEVEL0_GROUP_CPU,
.thermctl_lvl0_up_thresh_mask = TEGRA132_THERMCTL_LVL0_UP_THRESH_MASK,
.thermctl_lvl0_dn_thresh_mask = TEGRA132_THERMCTL_LVL0_DN_THRESH_MASK,
};
static const struct tegra_tsensor_group tegra132_tsensor_group_gpu = {
.id = TEGRA124_SOCTHERM_SENSOR_GPU,
.name = "gpu",
.sensor_temp_offset = SENSOR_TEMP1,
.sensor_temp_mask = SENSOR_TEMP1_GPU_TEMP_MASK,
.pdiv = 8,
.pdiv_ate = 8,
.pdiv_mask = SENSOR_PDIV_GPU_MASK,
.pllx_hotspot_diff = 5,
.pllx_hotspot_mask = SENSOR_HOTSPOT_GPU_MASK,
.thermtrip_any_en_mask = TEGRA132_THERMTRIP_ANY_EN_MASK,
.thermtrip_enable_mask = TEGRA132_THERMTRIP_GPU_EN_MASK,
.thermtrip_threshold_mask = TEGRA132_THERMTRIP_GPUMEM_THRESH_MASK,
.thermctl_isr_mask = THERM_IRQ_GPU_MASK,
.thermctl_lvl0_offset = THERMCTL_LEVEL0_GROUP_GPU,
.thermctl_lvl0_up_thresh_mask = TEGRA132_THERMCTL_LVL0_UP_THRESH_MASK,
.thermctl_lvl0_dn_thresh_mask = TEGRA132_THERMCTL_LVL0_DN_THRESH_MASK,
};
static const struct tegra_tsensor_group tegra132_tsensor_group_pll = {
.id = TEGRA124_SOCTHERM_SENSOR_PLLX,
.name = "pll",
.sensor_temp_offset = SENSOR_TEMP2,
.sensor_temp_mask = SENSOR_TEMP2_PLLX_TEMP_MASK,
.pdiv = 8,
.pdiv_ate = 8,
.pdiv_mask = SENSOR_PDIV_PLLX_MASK,
.thermtrip_any_en_mask = TEGRA132_THERMTRIP_ANY_EN_MASK,
.thermtrip_enable_mask = TEGRA132_THERMTRIP_TSENSE_EN_MASK,
.thermtrip_threshold_mask = TEGRA132_THERMTRIP_TSENSE_THRESH_MASK,
.thermctl_isr_mask = THERM_IRQ_TSENSE_MASK,
.thermctl_lvl0_offset = THERMCTL_LEVEL0_GROUP_TSENSE,
.thermctl_lvl0_up_thresh_mask = TEGRA132_THERMCTL_LVL0_UP_THRESH_MASK,
.thermctl_lvl0_dn_thresh_mask = TEGRA132_THERMCTL_LVL0_DN_THRESH_MASK,
};
static const struct tegra_tsensor_group tegra132_tsensor_group_mem = {
.id = TEGRA124_SOCTHERM_SENSOR_MEM,
.name = "mem",
.sensor_temp_offset = SENSOR_TEMP2,
.sensor_temp_mask = SENSOR_TEMP2_MEM_TEMP_MASK,
.pdiv = 8,
.pdiv_ate = 8,
.pdiv_mask = SENSOR_PDIV_MEM_MASK,
.pllx_hotspot_diff = 0,
.pllx_hotspot_mask = SENSOR_HOTSPOT_MEM_MASK,
.thermtrip_any_en_mask = TEGRA132_THERMTRIP_ANY_EN_MASK,
.thermtrip_enable_mask = TEGRA132_THERMTRIP_MEM_EN_MASK,
.thermtrip_threshold_mask = TEGRA132_THERMTRIP_GPUMEM_THRESH_MASK,
.thermctl_isr_mask = THERM_IRQ_MEM_MASK,
.thermctl_lvl0_offset = THERMCTL_LEVEL0_GROUP_MEM,
.thermctl_lvl0_up_thresh_mask = TEGRA132_THERMCTL_LVL0_UP_THRESH_MASK,
.thermctl_lvl0_dn_thresh_mask = TEGRA132_THERMCTL_LVL0_DN_THRESH_MASK,
};
static const struct tegra_tsensor_group *tegra132_tsensor_groups[] = {
&tegra132_tsensor_group_cpu,
&tegra132_tsensor_group_gpu,
&tegra132_tsensor_group_pll,
&tegra132_tsensor_group_mem,
};
static struct tegra_tsensor tegra132_tsensors[] = {
{
.name = "cpu0",
.base = 0xc0,
.config = &tegra132_tsensor_config,
.calib_fuse_offset = 0x098,
.fuse_corr_alpha = 1126600,
.fuse_corr_beta = -9433500,
.group = &tegra132_tsensor_group_cpu,
}, {
.name = "cpu1",
.base = 0xe0,
.config = &tegra132_tsensor_config,
.calib_fuse_offset = 0x084,
.fuse_corr_alpha = 1110800,
.fuse_corr_beta = -7383000,
.group = &tegra132_tsensor_group_cpu,
}, {
.name = "cpu2",
.base = 0x100,
.config = &tegra132_tsensor_config,
.calib_fuse_offset = 0x088,
.fuse_corr_alpha = 1113800,
.fuse_corr_beta = -6215200,
.group = &tegra132_tsensor_group_cpu,
}, {
.name = "cpu3",
.base = 0x120,
.config = &tegra132_tsensor_config,
.calib_fuse_offset = 0x12c,
.fuse_corr_alpha = 1129600,
.fuse_corr_beta = -8196100,
.group = &tegra132_tsensor_group_cpu,
}, {
.name = "mem0",
.base = 0x140,
.config = &tegra132_tsensor_config,
.calib_fuse_offset = 0x158,
.fuse_corr_alpha = 1132900,
.fuse_corr_beta = -6755300,
.group = &tegra132_tsensor_group_mem,
}, {
.name = "mem1",
.base = 0x160,
.config = &tegra132_tsensor_config,
.calib_fuse_offset = 0x15c,
.fuse_corr_alpha = 1142300,
.fuse_corr_beta = -7374200,
.group = &tegra132_tsensor_group_mem,
}, {
.name = "gpu",
.base = 0x180,
.config = &tegra132_tsensor_config,
.calib_fuse_offset = 0x154,
.fuse_corr_alpha = 1125100,
.fuse_corr_beta = -6350400,
.group = &tegra132_tsensor_group_gpu,
}, {
.name = "pllx",
.base = 0x1a0,
.config = &tegra132_tsensor_config,
.calib_fuse_offset = 0x160,
.fuse_corr_alpha = 1118100,
.fuse_corr_beta = -8208800,
.group = &tegra132_tsensor_group_pll,
},
};
/*
* Mask/shift bits in FUSE_TSENSOR_COMMON and
* FUSE_TSENSOR_COMMON, which are described in
* tegra_soctherm_fuse.c
*/
static const struct tegra_soctherm_fuse tegra132_soctherm_fuse = {
.fuse_base_cp_mask = 0x3ff,
.fuse_base_cp_shift = 0,
.fuse_base_ft_mask = 0x7ff << 10,
.fuse_base_ft_shift = 10,
.fuse_shift_ft_mask = 0x1f << 21,
.fuse_shift_ft_shift = 21,
.fuse_spare_realignment = 0x1fc,
};
const struct tegra_soctherm_soc tegra132_soctherm = {
.tsensors = tegra132_tsensors,
.num_tsensors = ARRAY_SIZE(tegra132_tsensors),
.ttgs = tegra132_tsensor_groups,
.num_ttgs = ARRAY_SIZE(tegra132_tsensor_groups),
.tfuse = &tegra132_soctherm_fuse,
.thresh_grain = TEGRA132_THRESH_GRAIN,
.bptt = TEGRA132_BPTT,
.use_ccroc = true,
};
| linux-master | drivers/thermal/tegra/tegra132-soctherm.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
*/
#include <linux/module.h>
#include <linux/platform_device.h>
#include <soc/tegra/fuse.h>
#include "soctherm.h"
#define NOMINAL_CALIB_FT 105
#define NOMINAL_CALIB_CP 25
#define FUSE_TSENSOR_CALIB_CP_TS_BASE_MASK 0x1fff
#define FUSE_TSENSOR_CALIB_FT_TS_BASE_MASK (0x1fff << 13)
#define FUSE_TSENSOR_CALIB_FT_TS_BASE_SHIFT 13
#define FUSE_TSENSOR_COMMON 0x180
/*
* Tegra210: Layout of bits in FUSE_TSENSOR_COMMON:
* 3 2 1 0
* 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | BASE_FT | BASE_CP | SHFT_FT | SHIFT_CP |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*
* Tegra12x, etc:
* In chips prior to Tegra210, this fuse was incorrectly sized as 26 bits,
* and didn't hold SHIFT_CP in [31:26]. Therefore these missing six bits
* were obtained via the FUSE_SPARE_REALIGNMENT_REG register [5:0].
*
* FUSE_TSENSOR_COMMON:
* 3 2 1 0
* 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* |-----------| SHFT_FT | BASE_FT | BASE_CP |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*
* FUSE_SPARE_REALIGNMENT_REG:
* 3 2 1 0
* 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* |---------------------------------------------------| SHIFT_CP |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*/
#define CALIB_COEFFICIENT 1000000LL
/**
* div64_s64_precise() - wrapper for div64_s64()
* @a: the dividend
* @b: the divisor
*
* Implements division with fairly accurate rounding instead of truncation by
* shifting the dividend to the left by 16 so that the quotient has a
* much higher precision.
*
* Return: the quotient of a / b.
*/
static s64 div64_s64_precise(s64 a, s32 b)
{
s64 r, al;
/* Scale up for increased precision division */
al = a << 16;
r = div64_s64(al * 2 + 1, 2 * b);
return r >> 16;
}
int tegra_calc_shared_calib(const struct tegra_soctherm_fuse *tfuse,
struct tsensor_shared_calib *shared)
{
u32 val;
s32 shifted_cp, shifted_ft;
int err;
err = tegra_fuse_readl(FUSE_TSENSOR_COMMON, &val);
if (err)
return err;
shared->base_cp = (val & tfuse->fuse_base_cp_mask) >>
tfuse->fuse_base_cp_shift;
shared->base_ft = (val & tfuse->fuse_base_ft_mask) >>
tfuse->fuse_base_ft_shift;
shifted_ft = (val & tfuse->fuse_shift_ft_mask) >>
tfuse->fuse_shift_ft_shift;
shifted_ft = sign_extend32(shifted_ft, 4);
if (tfuse->fuse_spare_realignment) {
err = tegra_fuse_readl(tfuse->fuse_spare_realignment, &val);
if (err)
return err;
}
shifted_cp = sign_extend32(val, 5);
shared->actual_temp_cp = 2 * NOMINAL_CALIB_CP + shifted_cp;
shared->actual_temp_ft = 2 * NOMINAL_CALIB_FT + shifted_ft;
return 0;
}
int tegra_calc_tsensor_calib(const struct tegra_tsensor *sensor,
const struct tsensor_shared_calib *shared,
u32 *calibration)
{
const struct tegra_tsensor_group *sensor_group;
u32 val, calib;
s32 actual_tsensor_ft, actual_tsensor_cp;
s32 delta_sens, delta_temp;
s32 mult, div;
s16 therma, thermb;
s64 temp;
int err;
sensor_group = sensor->group;
err = tegra_fuse_readl(sensor->calib_fuse_offset, &val);
if (err)
return err;
actual_tsensor_cp = (shared->base_cp * 64) + sign_extend32(val, 12);
val = (val & FUSE_TSENSOR_CALIB_FT_TS_BASE_MASK) >>
FUSE_TSENSOR_CALIB_FT_TS_BASE_SHIFT;
actual_tsensor_ft = (shared->base_ft * 32) + sign_extend32(val, 12);
delta_sens = actual_tsensor_ft - actual_tsensor_cp;
delta_temp = shared->actual_temp_ft - shared->actual_temp_cp;
mult = sensor_group->pdiv * sensor->config->tsample_ate;
div = sensor->config->tsample * sensor_group->pdiv_ate;
temp = (s64)delta_temp * (1LL << 13) * mult;
therma = div64_s64_precise(temp, (s64)delta_sens * div);
temp = ((s64)actual_tsensor_ft * shared->actual_temp_cp) -
((s64)actual_tsensor_cp * shared->actual_temp_ft);
thermb = div64_s64_precise(temp, delta_sens);
temp = (s64)therma * sensor->fuse_corr_alpha;
therma = div64_s64_precise(temp, CALIB_COEFFICIENT);
temp = (s64)thermb * sensor->fuse_corr_alpha + sensor->fuse_corr_beta;
thermb = div64_s64_precise(temp, CALIB_COEFFICIENT);
calib = ((u16)therma << SENSOR_CONFIG2_THERMA_SHIFT) |
((u16)thermb << SENSOR_CONFIG2_THERMB_SHIFT);
*calibration = calib;
return 0;
}
MODULE_AUTHOR("Wei Ni <[email protected]>");
MODULE_DESCRIPTION("Tegra SOCTHERM fuse management");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/thermal/tegra/soctherm-fuse.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/module.h>
#include <linux/platform_device.h>
#include <soc/tegra/fuse.h>
#include <dt-bindings/thermal/tegra124-soctherm.h>
#include "soctherm.h"
#define TEGRA210_THERMTRIP_ANY_EN_MASK (0x1 << 31)
#define TEGRA210_THERMTRIP_MEM_EN_MASK (0x1 << 30)
#define TEGRA210_THERMTRIP_GPU_EN_MASK (0x1 << 29)
#define TEGRA210_THERMTRIP_CPU_EN_MASK (0x1 << 28)
#define TEGRA210_THERMTRIP_TSENSE_EN_MASK (0x1 << 27)
#define TEGRA210_THERMTRIP_GPUMEM_THRESH_MASK (0x1ff << 18)
#define TEGRA210_THERMTRIP_CPU_THRESH_MASK (0x1ff << 9)
#define TEGRA210_THERMTRIP_TSENSE_THRESH_MASK 0x1ff
#define TEGRA210_THERMCTL_LVL0_UP_THRESH_MASK (0x1ff << 18)
#define TEGRA210_THERMCTL_LVL0_DN_THRESH_MASK (0x1ff << 9)
#define TEGRA210_THRESH_GRAIN 500
#define TEGRA210_BPTT 9
static const struct tegra_tsensor_configuration tegra210_tsensor_config = {
.tall = 16300,
.tiddq_en = 1,
.ten_count = 1,
.tsample = 120,
.tsample_ate = 480,
};
static const struct tegra_tsensor_group tegra210_tsensor_group_cpu = {
.id = TEGRA124_SOCTHERM_SENSOR_CPU,
.name = "cpu",
.sensor_temp_offset = SENSOR_TEMP1,
.sensor_temp_mask = SENSOR_TEMP1_CPU_TEMP_MASK,
.pdiv = 8,
.pdiv_ate = 8,
.pdiv_mask = SENSOR_PDIV_CPU_MASK,
.pllx_hotspot_diff = 10,
.pllx_hotspot_mask = SENSOR_HOTSPOT_CPU_MASK,
.thermtrip_any_en_mask = TEGRA210_THERMTRIP_ANY_EN_MASK,
.thermtrip_enable_mask = TEGRA210_THERMTRIP_CPU_EN_MASK,
.thermtrip_threshold_mask = TEGRA210_THERMTRIP_CPU_THRESH_MASK,
.thermctl_isr_mask = THERM_IRQ_CPU_MASK,
.thermctl_lvl0_offset = THERMCTL_LEVEL0_GROUP_CPU,
.thermctl_lvl0_up_thresh_mask = TEGRA210_THERMCTL_LVL0_UP_THRESH_MASK,
.thermctl_lvl0_dn_thresh_mask = TEGRA210_THERMCTL_LVL0_DN_THRESH_MASK,
};
static const struct tegra_tsensor_group tegra210_tsensor_group_gpu = {
.id = TEGRA124_SOCTHERM_SENSOR_GPU,
.name = "gpu",
.sensor_temp_offset = SENSOR_TEMP1,
.sensor_temp_mask = SENSOR_TEMP1_GPU_TEMP_MASK,
.pdiv = 8,
.pdiv_ate = 8,
.pdiv_mask = SENSOR_PDIV_GPU_MASK,
.pllx_hotspot_diff = 5,
.pllx_hotspot_mask = SENSOR_HOTSPOT_GPU_MASK,
.thermtrip_any_en_mask = TEGRA210_THERMTRIP_ANY_EN_MASK,
.thermtrip_enable_mask = TEGRA210_THERMTRIP_GPU_EN_MASK,
.thermtrip_threshold_mask = TEGRA210_THERMTRIP_GPUMEM_THRESH_MASK,
.thermctl_isr_mask = THERM_IRQ_GPU_MASK,
.thermctl_lvl0_offset = THERMCTL_LEVEL0_GROUP_GPU,
.thermctl_lvl0_up_thresh_mask = TEGRA210_THERMCTL_LVL0_UP_THRESH_MASK,
.thermctl_lvl0_dn_thresh_mask = TEGRA210_THERMCTL_LVL0_DN_THRESH_MASK,
};
static const struct tegra_tsensor_group tegra210_tsensor_group_pll = {
.id = TEGRA124_SOCTHERM_SENSOR_PLLX,
.name = "pll",
.sensor_temp_offset = SENSOR_TEMP2,
.sensor_temp_mask = SENSOR_TEMP2_PLLX_TEMP_MASK,
.pdiv = 8,
.pdiv_ate = 8,
.pdiv_mask = SENSOR_PDIV_PLLX_MASK,
.thermtrip_any_en_mask = TEGRA210_THERMTRIP_ANY_EN_MASK,
.thermtrip_enable_mask = TEGRA210_THERMTRIP_TSENSE_EN_MASK,
.thermtrip_threshold_mask = TEGRA210_THERMTRIP_TSENSE_THRESH_MASK,
.thermctl_isr_mask = THERM_IRQ_TSENSE_MASK,
.thermctl_lvl0_offset = THERMCTL_LEVEL0_GROUP_TSENSE,
.thermctl_lvl0_up_thresh_mask = TEGRA210_THERMCTL_LVL0_UP_THRESH_MASK,
.thermctl_lvl0_dn_thresh_mask = TEGRA210_THERMCTL_LVL0_DN_THRESH_MASK,
};
static const struct tegra_tsensor_group tegra210_tsensor_group_mem = {
.id = TEGRA124_SOCTHERM_SENSOR_MEM,
.name = "mem",
.sensor_temp_offset = SENSOR_TEMP2,
.sensor_temp_mask = SENSOR_TEMP2_MEM_TEMP_MASK,
.pdiv = 8,
.pdiv_ate = 8,
.pdiv_mask = SENSOR_PDIV_MEM_MASK,
.pllx_hotspot_diff = 0,
.pllx_hotspot_mask = SENSOR_HOTSPOT_MEM_MASK,
.thermtrip_any_en_mask = TEGRA210_THERMTRIP_ANY_EN_MASK,
.thermtrip_enable_mask = TEGRA210_THERMTRIP_MEM_EN_MASK,
.thermtrip_threshold_mask = TEGRA210_THERMTRIP_GPUMEM_THRESH_MASK,
.thermctl_isr_mask = THERM_IRQ_MEM_MASK,
.thermctl_lvl0_offset = THERMCTL_LEVEL0_GROUP_MEM,
.thermctl_lvl0_up_thresh_mask = TEGRA210_THERMCTL_LVL0_UP_THRESH_MASK,
.thermctl_lvl0_dn_thresh_mask = TEGRA210_THERMCTL_LVL0_DN_THRESH_MASK,
};
static const struct tegra_tsensor_group *tegra210_tsensor_groups[] = {
&tegra210_tsensor_group_cpu,
&tegra210_tsensor_group_gpu,
&tegra210_tsensor_group_pll,
&tegra210_tsensor_group_mem,
};
static const struct tegra_tsensor tegra210_tsensors[] = {
{
.name = "cpu0",
.base = 0xc0,
.config = &tegra210_tsensor_config,
.calib_fuse_offset = 0x098,
.fuse_corr_alpha = 1085000,
.fuse_corr_beta = 3244200,
.group = &tegra210_tsensor_group_cpu,
}, {
.name = "cpu1",
.base = 0xe0,
.config = &tegra210_tsensor_config,
.calib_fuse_offset = 0x084,
.fuse_corr_alpha = 1126200,
.fuse_corr_beta = -67500,
.group = &tegra210_tsensor_group_cpu,
}, {
.name = "cpu2",
.base = 0x100,
.config = &tegra210_tsensor_config,
.calib_fuse_offset = 0x088,
.fuse_corr_alpha = 1098400,
.fuse_corr_beta = 2251100,
.group = &tegra210_tsensor_group_cpu,
}, {
.name = "cpu3",
.base = 0x120,
.config = &tegra210_tsensor_config,
.calib_fuse_offset = 0x12c,
.fuse_corr_alpha = 1108000,
.fuse_corr_beta = 602700,
.group = &tegra210_tsensor_group_cpu,
}, {
.name = "mem0",
.base = 0x140,
.config = &tegra210_tsensor_config,
.calib_fuse_offset = 0x158,
.fuse_corr_alpha = 1069200,
.fuse_corr_beta = 3549900,
.group = &tegra210_tsensor_group_mem,
}, {
.name = "mem1",
.base = 0x160,
.config = &tegra210_tsensor_config,
.calib_fuse_offset = 0x15c,
.fuse_corr_alpha = 1173700,
.fuse_corr_beta = -6263600,
.group = &tegra210_tsensor_group_mem,
}, {
.name = "gpu",
.base = 0x180,
.config = &tegra210_tsensor_config,
.calib_fuse_offset = 0x154,
.fuse_corr_alpha = 1074300,
.fuse_corr_beta = 2734900,
.group = &tegra210_tsensor_group_gpu,
}, {
.name = "pllx",
.base = 0x1a0,
.config = &tegra210_tsensor_config,
.calib_fuse_offset = 0x160,
.fuse_corr_alpha = 1039700,
.fuse_corr_beta = 6829100,
.group = &tegra210_tsensor_group_pll,
},
};
/*
* Mask/shift bits in FUSE_TSENSOR_COMMON and
* FUSE_TSENSOR_COMMON, which are described in
* tegra_soctherm_fuse.c
*/
static const struct tegra_soctherm_fuse tegra210_soctherm_fuse = {
.fuse_base_cp_mask = 0x3ff << 11,
.fuse_base_cp_shift = 11,
.fuse_base_ft_mask = 0x7ff << 21,
.fuse_base_ft_shift = 21,
.fuse_shift_ft_mask = 0x1f << 6,
.fuse_shift_ft_shift = 6,
.fuse_spare_realignment = 0,
};
static struct tsensor_group_thermtrips tegra210_tsensor_thermtrips[] = {
{.id = TEGRA124_SOCTHERM_SENSOR_NUM},
{.id = TEGRA124_SOCTHERM_SENSOR_NUM},
{.id = TEGRA124_SOCTHERM_SENSOR_NUM},
{.id = TEGRA124_SOCTHERM_SENSOR_NUM},
};
const struct tegra_soctherm_soc tegra210_soctherm = {
.tsensors = tegra210_tsensors,
.num_tsensors = ARRAY_SIZE(tegra210_tsensors),
.ttgs = tegra210_tsensor_groups,
.num_ttgs = ARRAY_SIZE(tegra210_tsensor_groups),
.tfuse = &tegra210_soctherm_fuse,
.thresh_grain = TEGRA210_THRESH_GRAIN,
.bptt = TEGRA210_BPTT,
.use_ccroc = false,
.thermtrips = tegra210_tsensor_thermtrips,
};
| linux-master | drivers/thermal/tegra/tegra210-soctherm.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2015-2017, NVIDIA CORPORATION. All rights reserved.
*
* Author:
* Mikko Perttunen <[email protected]>
* Aapo Vienamo <[email protected]>
*/
#include <linux/err.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/thermal.h>
#include <linux/workqueue.h>
#include <soc/tegra/bpmp.h>
#include <soc/tegra/bpmp-abi.h>
struct tegra_bpmp_thermal_zone {
struct tegra_bpmp_thermal *tegra;
struct thermal_zone_device *tzd;
struct work_struct tz_device_update_work;
unsigned int idx;
};
struct tegra_bpmp_thermal {
struct device *dev;
struct tegra_bpmp *bpmp;
unsigned int num_zones;
struct tegra_bpmp_thermal_zone **zones;
};
static int __tegra_bpmp_thermal_get_temp(struct tegra_bpmp_thermal_zone *zone,
int *out_temp)
{
struct mrq_thermal_host_to_bpmp_request req;
union mrq_thermal_bpmp_to_host_response reply;
struct tegra_bpmp_message msg;
int err;
memset(&req, 0, sizeof(req));
req.type = CMD_THERMAL_GET_TEMP;
req.get_temp.zone = zone->idx;
memset(&msg, 0, sizeof(msg));
msg.mrq = MRQ_THERMAL;
msg.tx.data = &req;
msg.tx.size = sizeof(req);
msg.rx.data = &reply;
msg.rx.size = sizeof(reply);
err = tegra_bpmp_transfer(zone->tegra->bpmp, &msg);
if (err)
return err;
if (msg.rx.ret == -BPMP_EFAULT)
return -EAGAIN;
if (msg.rx.ret)
return -EINVAL;
*out_temp = reply.get_temp.temp;
return 0;
}
static int tegra_bpmp_thermal_get_temp(struct thermal_zone_device *tz, int *out_temp)
{
struct tegra_bpmp_thermal_zone *zone = thermal_zone_device_priv(tz);
return __tegra_bpmp_thermal_get_temp(zone, out_temp);
}
static int tegra_bpmp_thermal_set_trips(struct thermal_zone_device *tz, int low, int high)
{
struct tegra_bpmp_thermal_zone *zone = thermal_zone_device_priv(tz);
struct mrq_thermal_host_to_bpmp_request req;
struct tegra_bpmp_message msg;
int err;
memset(&req, 0, sizeof(req));
req.type = CMD_THERMAL_SET_TRIP;
req.set_trip.zone = zone->idx;
req.set_trip.enabled = true;
req.set_trip.low = low;
req.set_trip.high = high;
memset(&msg, 0, sizeof(msg));
msg.mrq = MRQ_THERMAL;
msg.tx.data = &req;
msg.tx.size = sizeof(req);
err = tegra_bpmp_transfer(zone->tegra->bpmp, &msg);
if (err)
return err;
if (msg.rx.ret)
return -EINVAL;
return 0;
}
static void tz_device_update_work_fn(struct work_struct *work)
{
struct tegra_bpmp_thermal_zone *zone;
zone = container_of(work, struct tegra_bpmp_thermal_zone,
tz_device_update_work);
thermal_zone_device_update(zone->tzd, THERMAL_TRIP_VIOLATED);
}
static void bpmp_mrq_thermal(unsigned int mrq, struct tegra_bpmp_channel *ch,
void *data)
{
struct mrq_thermal_bpmp_to_host_request req;
struct tegra_bpmp_thermal *tegra = data;
size_t offset;
int i;
offset = offsetof(struct tegra_bpmp_mb_data, data);
iosys_map_memcpy_from(&req, &ch->ib, offset, sizeof(req));
if (req.type != CMD_THERMAL_HOST_TRIP_REACHED) {
dev_err(tegra->dev, "%s: invalid request type: %d\n", __func__, req.type);
tegra_bpmp_mrq_return(ch, -EINVAL, NULL, 0);
return;
}
for (i = 0; i < tegra->num_zones; ++i) {
if (tegra->zones[i]->idx != req.host_trip_reached.zone)
continue;
schedule_work(&tegra->zones[i]->tz_device_update_work);
tegra_bpmp_mrq_return(ch, 0, NULL, 0);
return;
}
dev_err(tegra->dev, "%s: invalid thermal zone: %d\n", __func__,
req.host_trip_reached.zone);
tegra_bpmp_mrq_return(ch, -EINVAL, NULL, 0);
}
static int tegra_bpmp_thermal_get_num_zones(struct tegra_bpmp *bpmp,
int *num_zones)
{
struct mrq_thermal_host_to_bpmp_request req;
union mrq_thermal_bpmp_to_host_response reply;
struct tegra_bpmp_message msg;
int err;
memset(&req, 0, sizeof(req));
req.type = CMD_THERMAL_GET_NUM_ZONES;
memset(&msg, 0, sizeof(msg));
msg.mrq = MRQ_THERMAL;
msg.tx.data = &req;
msg.tx.size = sizeof(req);
msg.rx.data = &reply;
msg.rx.size = sizeof(reply);
err = tegra_bpmp_transfer(bpmp, &msg);
if (err)
return err;
if (msg.rx.ret)
return -EINVAL;
*num_zones = reply.get_num_zones.num;
return 0;
}
static int tegra_bpmp_thermal_trips_supported(struct tegra_bpmp *bpmp, bool *supported)
{
struct mrq_thermal_host_to_bpmp_request req;
union mrq_thermal_bpmp_to_host_response reply;
struct tegra_bpmp_message msg;
int err;
memset(&req, 0, sizeof(req));
req.type = CMD_THERMAL_QUERY_ABI;
req.query_abi.type = CMD_THERMAL_SET_TRIP;
memset(&msg, 0, sizeof(msg));
msg.mrq = MRQ_THERMAL;
msg.tx.data = &req;
msg.tx.size = sizeof(req);
msg.rx.data = &reply;
msg.rx.size = sizeof(reply);
err = tegra_bpmp_transfer(bpmp, &msg);
if (err)
return err;
if (msg.rx.ret == 0) {
*supported = true;
return 0;
} else if (msg.rx.ret == -BPMP_ENODEV) {
*supported = false;
return 0;
} else {
return -EINVAL;
}
}
static const struct thermal_zone_device_ops tegra_bpmp_of_thermal_ops = {
.get_temp = tegra_bpmp_thermal_get_temp,
.set_trips = tegra_bpmp_thermal_set_trips,
};
static const struct thermal_zone_device_ops tegra_bpmp_of_thermal_ops_notrips = {
.get_temp = tegra_bpmp_thermal_get_temp,
};
static int tegra_bpmp_thermal_probe(struct platform_device *pdev)
{
struct tegra_bpmp *bpmp = dev_get_drvdata(pdev->dev.parent);
const struct thermal_zone_device_ops *thermal_ops;
struct tegra_bpmp_thermal *tegra;
struct thermal_zone_device *tzd;
unsigned int i, max_num_zones;
bool supported;
int err;
err = tegra_bpmp_thermal_trips_supported(bpmp, &supported);
if (err) {
dev_err(&pdev->dev, "failed to determine if trip points are supported\n");
return err;
}
if (supported)
thermal_ops = &tegra_bpmp_of_thermal_ops;
else
thermal_ops = &tegra_bpmp_of_thermal_ops_notrips;
tegra = devm_kzalloc(&pdev->dev, sizeof(*tegra), GFP_KERNEL);
if (!tegra)
return -ENOMEM;
tegra->dev = &pdev->dev;
tegra->bpmp = bpmp;
err = tegra_bpmp_thermal_get_num_zones(bpmp, &max_num_zones);
if (err) {
dev_err(&pdev->dev, "failed to get the number of zones: %d\n",
err);
return err;
}
tegra->zones = devm_kcalloc(&pdev->dev, max_num_zones,
sizeof(*tegra->zones), GFP_KERNEL);
if (!tegra->zones)
return -ENOMEM;
for (i = 0; i < max_num_zones; ++i) {
struct tegra_bpmp_thermal_zone *zone;
int temp;
zone = devm_kzalloc(&pdev->dev, sizeof(*zone), GFP_KERNEL);
if (!zone)
return -ENOMEM;
zone->idx = i;
zone->tegra = tegra;
err = __tegra_bpmp_thermal_get_temp(zone, &temp);
/*
* Sensors in powergated domains may temporarily fail to be read
* (-EAGAIN), but will become accessible when the domain is powered on.
*/
if (err < 0 && err != -EAGAIN) {
devm_kfree(&pdev->dev, zone);
continue;
}
tzd = devm_thermal_of_zone_register(
&pdev->dev, i, zone, thermal_ops);
if (IS_ERR(tzd)) {
if (PTR_ERR(tzd) == -EPROBE_DEFER)
return -EPROBE_DEFER;
devm_kfree(&pdev->dev, zone);
continue;
}
zone->tzd = tzd;
INIT_WORK(&zone->tz_device_update_work,
tz_device_update_work_fn);
tegra->zones[tegra->num_zones++] = zone;
}
err = tegra_bpmp_request_mrq(bpmp, MRQ_THERMAL, bpmp_mrq_thermal,
tegra);
if (err) {
dev_err(&pdev->dev, "failed to register mrq handler: %d\n",
err);
return err;
}
platform_set_drvdata(pdev, tegra);
return 0;
}
static int tegra_bpmp_thermal_remove(struct platform_device *pdev)
{
struct tegra_bpmp_thermal *tegra = platform_get_drvdata(pdev);
tegra_bpmp_free_mrq(tegra->bpmp, MRQ_THERMAL, tegra);
return 0;
}
static const struct of_device_id tegra_bpmp_thermal_of_match[] = {
{ .compatible = "nvidia,tegra186-bpmp-thermal" },
{ },
};
MODULE_DEVICE_TABLE(of, tegra_bpmp_thermal_of_match);
static struct platform_driver tegra_bpmp_thermal_driver = {
.probe = tegra_bpmp_thermal_probe,
.remove = tegra_bpmp_thermal_remove,
.driver = {
.name = "tegra-bpmp-thermal",
.of_match_table = tegra_bpmp_thermal_of_match,
},
};
module_platform_driver(tegra_bpmp_thermal_driver);
MODULE_AUTHOR("Mikko Perttunen <[email protected]>");
MODULE_DESCRIPTION("NVIDIA Tegra BPMP thermal sensor driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/thermal/tegra/tegra-bpmp-thermal.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/module.h>
#include <linux/platform_device.h>
#include <dt-bindings/thermal/tegra124-soctherm.h>
#include "soctherm.h"
#define TEGRA124_THERMTRIP_ANY_EN_MASK (0x1 << 28)
#define TEGRA124_THERMTRIP_MEM_EN_MASK (0x1 << 27)
#define TEGRA124_THERMTRIP_GPU_EN_MASK (0x1 << 26)
#define TEGRA124_THERMTRIP_CPU_EN_MASK (0x1 << 25)
#define TEGRA124_THERMTRIP_TSENSE_EN_MASK (0x1 << 24)
#define TEGRA124_THERMTRIP_GPUMEM_THRESH_MASK (0xff << 16)
#define TEGRA124_THERMTRIP_CPU_THRESH_MASK (0xff << 8)
#define TEGRA124_THERMTRIP_TSENSE_THRESH_MASK 0xff
#define TEGRA124_THERMCTL_LVL0_UP_THRESH_MASK (0xff << 17)
#define TEGRA124_THERMCTL_LVL0_DN_THRESH_MASK (0xff << 9)
#define TEGRA124_THRESH_GRAIN 1000
#define TEGRA124_BPTT 8
static const struct tegra_tsensor_configuration tegra124_tsensor_config = {
.tall = 16300,
.tiddq_en = 1,
.ten_count = 1,
.tsample = 120,
.tsample_ate = 480,
};
static const struct tegra_tsensor_group tegra124_tsensor_group_cpu = {
.id = TEGRA124_SOCTHERM_SENSOR_CPU,
.name = "cpu",
.sensor_temp_offset = SENSOR_TEMP1,
.sensor_temp_mask = SENSOR_TEMP1_CPU_TEMP_MASK,
.pdiv = 8,
.pdiv_ate = 8,
.pdiv_mask = SENSOR_PDIV_CPU_MASK,
.pllx_hotspot_diff = 10,
.pllx_hotspot_mask = SENSOR_HOTSPOT_CPU_MASK,
.thermtrip_any_en_mask = TEGRA124_THERMTRIP_ANY_EN_MASK,
.thermtrip_enable_mask = TEGRA124_THERMTRIP_CPU_EN_MASK,
.thermtrip_threshold_mask = TEGRA124_THERMTRIP_CPU_THRESH_MASK,
.thermctl_isr_mask = THERM_IRQ_CPU_MASK,
.thermctl_lvl0_offset = THERMCTL_LEVEL0_GROUP_CPU,
.thermctl_lvl0_up_thresh_mask = TEGRA124_THERMCTL_LVL0_UP_THRESH_MASK,
.thermctl_lvl0_dn_thresh_mask = TEGRA124_THERMCTL_LVL0_DN_THRESH_MASK,
};
static const struct tegra_tsensor_group tegra124_tsensor_group_gpu = {
.id = TEGRA124_SOCTHERM_SENSOR_GPU,
.name = "gpu",
.sensor_temp_offset = SENSOR_TEMP1,
.sensor_temp_mask = SENSOR_TEMP1_GPU_TEMP_MASK,
.pdiv = 8,
.pdiv_ate = 8,
.pdiv_mask = SENSOR_PDIV_GPU_MASK,
.pllx_hotspot_diff = 5,
.pllx_hotspot_mask = SENSOR_HOTSPOT_GPU_MASK,
.thermtrip_any_en_mask = TEGRA124_THERMTRIP_ANY_EN_MASK,
.thermtrip_enable_mask = TEGRA124_THERMTRIP_GPU_EN_MASK,
.thermtrip_threshold_mask = TEGRA124_THERMTRIP_GPUMEM_THRESH_MASK,
.thermctl_isr_mask = THERM_IRQ_GPU_MASK,
.thermctl_lvl0_offset = THERMCTL_LEVEL0_GROUP_GPU,
.thermctl_lvl0_up_thresh_mask = TEGRA124_THERMCTL_LVL0_UP_THRESH_MASK,
.thermctl_lvl0_dn_thresh_mask = TEGRA124_THERMCTL_LVL0_DN_THRESH_MASK,
};
static const struct tegra_tsensor_group tegra124_tsensor_group_pll = {
.id = TEGRA124_SOCTHERM_SENSOR_PLLX,
.name = "pll",
.sensor_temp_offset = SENSOR_TEMP2,
.sensor_temp_mask = SENSOR_TEMP2_PLLX_TEMP_MASK,
.pdiv = 8,
.pdiv_ate = 8,
.pdiv_mask = SENSOR_PDIV_PLLX_MASK,
.thermtrip_any_en_mask = TEGRA124_THERMTRIP_ANY_EN_MASK,
.thermtrip_enable_mask = TEGRA124_THERMTRIP_TSENSE_EN_MASK,
.thermtrip_threshold_mask = TEGRA124_THERMTRIP_TSENSE_THRESH_MASK,
.thermctl_isr_mask = THERM_IRQ_TSENSE_MASK,
.thermctl_lvl0_offset = THERMCTL_LEVEL0_GROUP_TSENSE,
.thermctl_lvl0_up_thresh_mask = TEGRA124_THERMCTL_LVL0_UP_THRESH_MASK,
.thermctl_lvl0_dn_thresh_mask = TEGRA124_THERMCTL_LVL0_DN_THRESH_MASK,
};
static const struct tegra_tsensor_group tegra124_tsensor_group_mem = {
.id = TEGRA124_SOCTHERM_SENSOR_MEM,
.name = "mem",
.sensor_temp_offset = SENSOR_TEMP2,
.sensor_temp_mask = SENSOR_TEMP2_MEM_TEMP_MASK,
.pdiv = 8,
.pdiv_ate = 8,
.pdiv_mask = SENSOR_PDIV_MEM_MASK,
.pllx_hotspot_diff = 0,
.pllx_hotspot_mask = SENSOR_HOTSPOT_MEM_MASK,
.thermtrip_any_en_mask = TEGRA124_THERMTRIP_ANY_EN_MASK,
.thermtrip_enable_mask = TEGRA124_THERMTRIP_MEM_EN_MASK,
.thermtrip_threshold_mask = TEGRA124_THERMTRIP_GPUMEM_THRESH_MASK,
.thermctl_isr_mask = THERM_IRQ_MEM_MASK,
.thermctl_lvl0_offset = THERMCTL_LEVEL0_GROUP_MEM,
.thermctl_lvl0_up_thresh_mask = TEGRA124_THERMCTL_LVL0_UP_THRESH_MASK,
.thermctl_lvl0_dn_thresh_mask = TEGRA124_THERMCTL_LVL0_DN_THRESH_MASK,
};
static const struct tegra_tsensor_group *tegra124_tsensor_groups[] = {
&tegra124_tsensor_group_cpu,
&tegra124_tsensor_group_gpu,
&tegra124_tsensor_group_pll,
&tegra124_tsensor_group_mem,
};
static const struct tegra_tsensor tegra124_tsensors[] = {
{
.name = "cpu0",
.base = 0xc0,
.config = &tegra124_tsensor_config,
.calib_fuse_offset = 0x098,
.fuse_corr_alpha = 1135400,
.fuse_corr_beta = -6266900,
.group = &tegra124_tsensor_group_cpu,
}, {
.name = "cpu1",
.base = 0xe0,
.config = &tegra124_tsensor_config,
.calib_fuse_offset = 0x084,
.fuse_corr_alpha = 1122220,
.fuse_corr_beta = -5700700,
.group = &tegra124_tsensor_group_cpu,
}, {
.name = "cpu2",
.base = 0x100,
.config = &tegra124_tsensor_config,
.calib_fuse_offset = 0x088,
.fuse_corr_alpha = 1127000,
.fuse_corr_beta = -6768200,
.group = &tegra124_tsensor_group_cpu,
}, {
.name = "cpu3",
.base = 0x120,
.config = &tegra124_tsensor_config,
.calib_fuse_offset = 0x12c,
.fuse_corr_alpha = 1110900,
.fuse_corr_beta = -6232000,
.group = &tegra124_tsensor_group_cpu,
}, {
.name = "mem0",
.base = 0x140,
.config = &tegra124_tsensor_config,
.calib_fuse_offset = 0x158,
.fuse_corr_alpha = 1122300,
.fuse_corr_beta = -5936400,
.group = &tegra124_tsensor_group_mem,
}, {
.name = "mem1",
.base = 0x160,
.config = &tegra124_tsensor_config,
.calib_fuse_offset = 0x15c,
.fuse_corr_alpha = 1145700,
.fuse_corr_beta = -7124600,
.group = &tegra124_tsensor_group_mem,
}, {
.name = "gpu",
.base = 0x180,
.config = &tegra124_tsensor_config,
.calib_fuse_offset = 0x154,
.fuse_corr_alpha = 1120100,
.fuse_corr_beta = -6000500,
.group = &tegra124_tsensor_group_gpu,
}, {
.name = "pllx",
.base = 0x1a0,
.config = &tegra124_tsensor_config,
.calib_fuse_offset = 0x160,
.fuse_corr_alpha = 1106500,
.fuse_corr_beta = -6729300,
.group = &tegra124_tsensor_group_pll,
},
};
/*
* Mask/shift bits in FUSE_TSENSOR_COMMON and
* FUSE_TSENSOR_COMMON, which are described in
* tegra_soctherm_fuse.c
*/
static const struct tegra_soctherm_fuse tegra124_soctherm_fuse = {
.fuse_base_cp_mask = 0x3ff,
.fuse_base_cp_shift = 0,
.fuse_base_ft_mask = 0x7ff << 10,
.fuse_base_ft_shift = 10,
.fuse_shift_ft_mask = 0x1f << 21,
.fuse_shift_ft_shift = 21,
.fuse_spare_realignment = 0x1fc,
};
const struct tegra_soctherm_soc tegra124_soctherm = {
.tsensors = tegra124_tsensors,
.num_tsensors = ARRAY_SIZE(tegra124_tsensors),
.ttgs = tegra124_tsensor_groups,
.num_ttgs = ARRAY_SIZE(tegra124_tsensor_groups),
.tfuse = &tegra124_soctherm_fuse,
.thresh_grain = TEGRA124_THRESH_GRAIN,
.bptt = TEGRA124_BPTT,
.use_ccroc = false,
};
| linux-master | drivers/thermal/tegra/tegra124-soctherm.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2014 - 2018, NVIDIA CORPORATION. All rights reserved.
*
* Author:
* Mikko Perttunen <[email protected]>
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/debugfs.h>
#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
#include <linux/thermal.h>
#include <dt-bindings/thermal/tegra124-soctherm.h>
#include "../thermal_core.h"
#include "soctherm.h"
#define SENSOR_CONFIG0 0
#define SENSOR_CONFIG0_STOP BIT(0)
#define SENSOR_CONFIG0_CPTR_OVER BIT(2)
#define SENSOR_CONFIG0_OVER BIT(3)
#define SENSOR_CONFIG0_TCALC_OVER BIT(4)
#define SENSOR_CONFIG0_TALL_MASK (0xfffff << 8)
#define SENSOR_CONFIG0_TALL_SHIFT 8
#define SENSOR_CONFIG1 4
#define SENSOR_CONFIG1_TSAMPLE_MASK 0x3ff
#define SENSOR_CONFIG1_TSAMPLE_SHIFT 0
#define SENSOR_CONFIG1_TIDDQ_EN_MASK (0x3f << 15)
#define SENSOR_CONFIG1_TIDDQ_EN_SHIFT 15
#define SENSOR_CONFIG1_TEN_COUNT_MASK (0x3f << 24)
#define SENSOR_CONFIG1_TEN_COUNT_SHIFT 24
#define SENSOR_CONFIG1_TEMP_ENABLE BIT(31)
/*
* SENSOR_CONFIG2 is defined in soctherm.h
* because, it will be used by tegra_soctherm_fuse.c
*/
#define SENSOR_STATUS0 0xc
#define SENSOR_STATUS0_VALID_MASK BIT(31)
#define SENSOR_STATUS0_CAPTURE_MASK 0xffff
#define SENSOR_STATUS1 0x10
#define SENSOR_STATUS1_TEMP_VALID_MASK BIT(31)
#define SENSOR_STATUS1_TEMP_MASK 0xffff
#define READBACK_VALUE_MASK 0xff00
#define READBACK_VALUE_SHIFT 8
#define READBACK_ADD_HALF BIT(7)
#define READBACK_NEGATE BIT(0)
/*
* THERMCTL_LEVEL0_GROUP_CPU is defined in soctherm.h
* because it will be used by tegraxxx_soctherm.c
*/
#define THERMCTL_LVL0_CPU0_EN_MASK BIT(8)
#define THERMCTL_LVL0_CPU0_CPU_THROT_MASK (0x3 << 5)
#define THERMCTL_LVL0_CPU0_CPU_THROT_LIGHT 0x1
#define THERMCTL_LVL0_CPU0_CPU_THROT_HEAVY 0x2
#define THERMCTL_LVL0_CPU0_GPU_THROT_MASK (0x3 << 3)
#define THERMCTL_LVL0_CPU0_GPU_THROT_LIGHT 0x1
#define THERMCTL_LVL0_CPU0_GPU_THROT_HEAVY 0x2
#define THERMCTL_LVL0_CPU0_MEM_THROT_MASK BIT(2)
#define THERMCTL_LVL0_CPU0_STATUS_MASK 0x3
#define THERMCTL_LVL0_UP_STATS 0x10
#define THERMCTL_LVL0_DN_STATS 0x14
#define THERMCTL_INTR_STATUS 0x84
#define TH_INTR_MD0_MASK BIT(25)
#define TH_INTR_MU0_MASK BIT(24)
#define TH_INTR_GD0_MASK BIT(17)
#define TH_INTR_GU0_MASK BIT(16)
#define TH_INTR_CD0_MASK BIT(9)
#define TH_INTR_CU0_MASK BIT(8)
#define TH_INTR_PD0_MASK BIT(1)
#define TH_INTR_PU0_MASK BIT(0)
#define TH_INTR_IGNORE_MASK 0xFCFCFCFC
#define THERMCTL_STATS_CTL 0x94
#define STATS_CTL_CLR_DN 0x8
#define STATS_CTL_EN_DN 0x4
#define STATS_CTL_CLR_UP 0x2
#define STATS_CTL_EN_UP 0x1
#define OC1_CFG 0x310
#define OC1_CFG_LONG_LATENCY_MASK BIT(6)
#define OC1_CFG_HW_RESTORE_MASK BIT(5)
#define OC1_CFG_PWR_GOOD_MASK_MASK BIT(4)
#define OC1_CFG_THROTTLE_MODE_MASK (0x3 << 2)
#define OC1_CFG_ALARM_POLARITY_MASK BIT(1)
#define OC1_CFG_EN_THROTTLE_MASK BIT(0)
#define OC1_CNT_THRESHOLD 0x314
#define OC1_THROTTLE_PERIOD 0x318
#define OC1_ALARM_COUNT 0x31c
#define OC1_FILTER 0x320
#define OC1_STATS 0x3a8
#define OC_INTR_STATUS 0x39c
#define OC_INTR_ENABLE 0x3a0
#define OC_INTR_DISABLE 0x3a4
#define OC_STATS_CTL 0x3c4
#define OC_STATS_CTL_CLR_ALL 0x2
#define OC_STATS_CTL_EN_ALL 0x1
#define OC_INTR_OC1_MASK BIT(0)
#define OC_INTR_OC2_MASK BIT(1)
#define OC_INTR_OC3_MASK BIT(2)
#define OC_INTR_OC4_MASK BIT(3)
#define OC_INTR_OC5_MASK BIT(4)
#define THROT_GLOBAL_CFG 0x400
#define THROT_GLOBAL_ENB_MASK BIT(0)
#define CPU_PSKIP_STATUS 0x418
#define XPU_PSKIP_STATUS_M_MASK (0xff << 12)
#define XPU_PSKIP_STATUS_N_MASK (0xff << 4)
#define XPU_PSKIP_STATUS_SW_OVERRIDE_MASK BIT(1)
#define XPU_PSKIP_STATUS_ENABLED_MASK BIT(0)
#define THROT_PRIORITY_LOCK 0x424
#define THROT_PRIORITY_LOCK_PRIORITY_MASK 0xff
#define THROT_STATUS 0x428
#define THROT_STATUS_BREACH_MASK BIT(12)
#define THROT_STATUS_STATE_MASK (0xff << 4)
#define THROT_STATUS_ENABLED_MASK BIT(0)
#define THROT_PSKIP_CTRL_LITE_CPU 0x430
#define THROT_PSKIP_CTRL_ENABLE_MASK BIT(31)
#define THROT_PSKIP_CTRL_DIVIDEND_MASK (0xff << 8)
#define THROT_PSKIP_CTRL_DIVISOR_MASK 0xff
#define THROT_PSKIP_CTRL_VECT_GPU_MASK (0x7 << 16)
#define THROT_PSKIP_CTRL_VECT_CPU_MASK (0x7 << 8)
#define THROT_PSKIP_CTRL_VECT2_CPU_MASK 0x7
#define THROT_VECT_NONE 0x0 /* 3'b000 */
#define THROT_VECT_LOW 0x1 /* 3'b001 */
#define THROT_VECT_MED 0x3 /* 3'b011 */
#define THROT_VECT_HIGH 0x7 /* 3'b111 */
#define THROT_PSKIP_RAMP_LITE_CPU 0x434
#define THROT_PSKIP_RAMP_SEQ_BYPASS_MODE_MASK BIT(31)
#define THROT_PSKIP_RAMP_DURATION_MASK (0xffff << 8)
#define THROT_PSKIP_RAMP_STEP_MASK 0xff
#define THROT_PRIORITY_LITE 0x444
#define THROT_PRIORITY_LITE_PRIO_MASK 0xff
#define THROT_DELAY_LITE 0x448
#define THROT_DELAY_LITE_DELAY_MASK 0xff
/* car register offsets needed for enabling HW throttling */
#define CAR_SUPER_CCLKG_DIVIDER 0x36c
#define CDIVG_USE_THERM_CONTROLS_MASK BIT(30)
/* ccroc register offsets needed for enabling HW throttling for Tegra132 */
#define CCROC_SUPER_CCLKG_DIVIDER 0x024
#define CCROC_GLOBAL_CFG 0x148
#define CCROC_THROT_PSKIP_RAMP_CPU 0x150
#define CCROC_THROT_PSKIP_RAMP_SEQ_BYPASS_MODE_MASK BIT(31)
#define CCROC_THROT_PSKIP_RAMP_DURATION_MASK (0xffff << 8)
#define CCROC_THROT_PSKIP_RAMP_STEP_MASK 0xff
#define CCROC_THROT_PSKIP_CTRL_CPU 0x154
#define CCROC_THROT_PSKIP_CTRL_ENB_MASK BIT(31)
#define CCROC_THROT_PSKIP_CTRL_DIVIDEND_MASK (0xff << 8)
#define CCROC_THROT_PSKIP_CTRL_DIVISOR_MASK 0xff
/* get val from register(r) mask bits(m) */
#define REG_GET_MASK(r, m) (((r) & (m)) >> (ffs(m) - 1))
/* set val(v) to mask bits(m) of register(r) */
#define REG_SET_MASK(r, m, v) (((r) & ~(m)) | \
(((v) & (m >> (ffs(m) - 1))) << (ffs(m) - 1)))
/* get dividend from the depth */
#define THROT_DEPTH_DIVIDEND(depth) ((256 * (100 - (depth)) / 100) - 1)
/* gk20a nv_therm interface N:3 Mapping. Levels defined in tegra124-soctherm.h
* level vector
* NONE 3'b000
* LOW 3'b001
* MED 3'b011
* HIGH 3'b111
*/
#define THROT_LEVEL_TO_DEPTH(level) ((0x1 << (level)) - 1)
/* get THROT_PSKIP_xxx offset per LIGHT/HEAVY throt and CPU/GPU dev */
#define THROT_OFFSET 0x30
#define THROT_PSKIP_CTRL(throt, dev) (THROT_PSKIP_CTRL_LITE_CPU + \
(THROT_OFFSET * throt) + (8 * dev))
#define THROT_PSKIP_RAMP(throt, dev) (THROT_PSKIP_RAMP_LITE_CPU + \
(THROT_OFFSET * throt) + (8 * dev))
/* get THROT_xxx_CTRL offset per LIGHT/HEAVY throt */
#define THROT_PRIORITY_CTRL(throt) (THROT_PRIORITY_LITE + \
(THROT_OFFSET * throt))
#define THROT_DELAY_CTRL(throt) (THROT_DELAY_LITE + \
(THROT_OFFSET * throt))
#define ALARM_OFFSET 0x14
#define ALARM_CFG(throt) (OC1_CFG + \
(ALARM_OFFSET * (throt - THROTTLE_OC1)))
#define ALARM_CNT_THRESHOLD(throt) (OC1_CNT_THRESHOLD + \
(ALARM_OFFSET * (throt - THROTTLE_OC1)))
#define ALARM_THROTTLE_PERIOD(throt) (OC1_THROTTLE_PERIOD + \
(ALARM_OFFSET * (throt - THROTTLE_OC1)))
#define ALARM_ALARM_COUNT(throt) (OC1_ALARM_COUNT + \
(ALARM_OFFSET * (throt - THROTTLE_OC1)))
#define ALARM_FILTER(throt) (OC1_FILTER + \
(ALARM_OFFSET * (throt - THROTTLE_OC1)))
#define ALARM_STATS(throt) (OC1_STATS + \
(4 * (throt - THROTTLE_OC1)))
/* get CCROC_THROT_PSKIP_xxx offset per HIGH/MED/LOW vect*/
#define CCROC_THROT_OFFSET 0x0c
#define CCROC_THROT_PSKIP_CTRL_CPU_REG(vect) (CCROC_THROT_PSKIP_CTRL_CPU + \
(CCROC_THROT_OFFSET * vect))
#define CCROC_THROT_PSKIP_RAMP_CPU_REG(vect) (CCROC_THROT_PSKIP_RAMP_CPU + \
(CCROC_THROT_OFFSET * vect))
/* get THERMCTL_LEVELx offset per CPU/GPU/MEM/TSENSE rg and LEVEL0~3 lv */
#define THERMCTL_LVL_REGS_SIZE 0x20
#define THERMCTL_LVL_REG(rg, lv) ((rg) + ((lv) * THERMCTL_LVL_REGS_SIZE))
#define OC_THROTTLE_MODE_DISABLED 0
#define OC_THROTTLE_MODE_BRIEF 2
static const int min_low_temp = -127000;
static const int max_high_temp = 127000;
enum soctherm_throttle_id {
THROTTLE_LIGHT = 0,
THROTTLE_HEAVY,
THROTTLE_OC1,
THROTTLE_OC2,
THROTTLE_OC3,
THROTTLE_OC4,
THROTTLE_OC5, /* OC5 is reserved */
THROTTLE_SIZE,
};
enum soctherm_oc_irq_id {
TEGRA_SOC_OC_IRQ_1,
TEGRA_SOC_OC_IRQ_2,
TEGRA_SOC_OC_IRQ_3,
TEGRA_SOC_OC_IRQ_4,
TEGRA_SOC_OC_IRQ_5,
TEGRA_SOC_OC_IRQ_MAX,
};
enum soctherm_throttle_dev_id {
THROTTLE_DEV_CPU = 0,
THROTTLE_DEV_GPU,
THROTTLE_DEV_SIZE,
};
static const char *const throt_names[] = {
[THROTTLE_LIGHT] = "light",
[THROTTLE_HEAVY] = "heavy",
[THROTTLE_OC1] = "oc1",
[THROTTLE_OC2] = "oc2",
[THROTTLE_OC3] = "oc3",
[THROTTLE_OC4] = "oc4",
[THROTTLE_OC5] = "oc5",
};
struct tegra_soctherm;
struct tegra_thermctl_zone {
void __iomem *reg;
struct device *dev;
struct tegra_soctherm *ts;
struct thermal_zone_device *tz;
const struct tegra_tsensor_group *sg;
};
struct soctherm_oc_cfg {
u32 active_low;
u32 throt_period;
u32 alarm_cnt_thresh;
u32 alarm_filter;
u32 mode;
bool intr_en;
};
struct soctherm_throt_cfg {
const char *name;
unsigned int id;
u8 priority;
u8 cpu_throt_level;
u32 cpu_throt_depth;
u32 gpu_throt_level;
struct soctherm_oc_cfg oc_cfg;
struct thermal_cooling_device *cdev;
bool init;
};
struct tegra_soctherm {
struct reset_control *reset;
struct clk *clock_tsensor;
struct clk *clock_soctherm;
void __iomem *regs;
void __iomem *clk_regs;
void __iomem *ccroc_regs;
int thermal_irq;
int edp_irq;
u32 *calib;
struct thermal_zone_device **thermctl_tzs;
struct tegra_soctherm_soc *soc;
struct soctherm_throt_cfg throt_cfgs[THROTTLE_SIZE];
struct dentry *debugfs_dir;
struct mutex thermctl_lock;
};
struct soctherm_oc_irq_chip_data {
struct mutex irq_lock; /* serialize OC IRQs */
struct irq_chip irq_chip;
struct irq_domain *domain;
int irq_enable;
};
static struct soctherm_oc_irq_chip_data soc_irq_cdata;
/**
* ccroc_writel() - writes a value to a CCROC register
* @ts: pointer to a struct tegra_soctherm
* @value: the value to write
* @reg: the register offset
*
* Writes @v to @reg. No return value.
*/
static inline void ccroc_writel(struct tegra_soctherm *ts, u32 value, u32 reg)
{
writel(value, (ts->ccroc_regs + reg));
}
/**
* ccroc_readl() - reads specified register from CCROC IP block
* @ts: pointer to a struct tegra_soctherm
* @reg: register address to be read
*
* Return: the value of the register
*/
static inline u32 ccroc_readl(struct tegra_soctherm *ts, u32 reg)
{
return readl(ts->ccroc_regs + reg);
}
static void enable_tsensor(struct tegra_soctherm *tegra, unsigned int i)
{
const struct tegra_tsensor *sensor = &tegra->soc->tsensors[i];
void __iomem *base = tegra->regs + sensor->base;
unsigned int val;
val = sensor->config->tall << SENSOR_CONFIG0_TALL_SHIFT;
writel(val, base + SENSOR_CONFIG0);
val = (sensor->config->tsample - 1) << SENSOR_CONFIG1_TSAMPLE_SHIFT;
val |= sensor->config->tiddq_en << SENSOR_CONFIG1_TIDDQ_EN_SHIFT;
val |= sensor->config->ten_count << SENSOR_CONFIG1_TEN_COUNT_SHIFT;
val |= SENSOR_CONFIG1_TEMP_ENABLE;
writel(val, base + SENSOR_CONFIG1);
writel(tegra->calib[i], base + SENSOR_CONFIG2);
}
/*
* Translate from soctherm readback format to millicelsius.
* The soctherm readback format in bits is as follows:
* TTTTTTTT H______N
* where T's contain the temperature in Celsius,
* H denotes an addition of 0.5 Celsius and N denotes negation
* of the final value.
*/
static int translate_temp(u16 val)
{
int t;
t = ((val & READBACK_VALUE_MASK) >> READBACK_VALUE_SHIFT) * 1000;
if (val & READBACK_ADD_HALF)
t += 500;
if (val & READBACK_NEGATE)
t *= -1;
return t;
}
static int tegra_thermctl_get_temp(struct thermal_zone_device *tz, int *out_temp)
{
struct tegra_thermctl_zone *zone = thermal_zone_device_priv(tz);
u32 val;
val = readl(zone->reg);
val = REG_GET_MASK(val, zone->sg->sensor_temp_mask);
*out_temp = translate_temp(val);
return 0;
}
/**
* enforce_temp_range() - check and enforce temperature range [min, max]
* @dev: struct device * of the SOC_THERM instance
* @trip_temp: the trip temperature to check
*
* Checks and enforces the permitted temperature range that SOC_THERM
* HW can support This is
* done while taking care of precision.
*
* Return: The precision adjusted capped temperature in millicelsius.
*/
static int enforce_temp_range(struct device *dev, int trip_temp)
{
int temp;
temp = clamp_val(trip_temp, min_low_temp, max_high_temp);
if (temp != trip_temp)
dev_dbg(dev, "soctherm: trip temperature %d forced to %d\n",
trip_temp, temp);
return temp;
}
/**
* thermtrip_program() - Configures the hardware to shut down the
* system if a given sensor group reaches a given temperature
* @dev: ptr to the struct device for the SOC_THERM IP block
* @sg: pointer to the sensor group to set the thermtrip temperature for
* @trip_temp: the temperature in millicelsius to trigger the thermal trip at
*
* Sets the thermal trip threshold of the given sensor group to be the
* @trip_temp. If this threshold is crossed, the hardware will shut
* down.
*
* Note that, although @trip_temp is specified in millicelsius, the
* hardware is programmed in degrees Celsius.
*
* Return: 0 upon success, or %-EINVAL upon failure.
*/
static int thermtrip_program(struct device *dev,
const struct tegra_tsensor_group *sg,
int trip_temp)
{
struct tegra_soctherm *ts = dev_get_drvdata(dev);
int temp;
u32 r;
if (!sg || !sg->thermtrip_threshold_mask)
return -EINVAL;
temp = enforce_temp_range(dev, trip_temp) / ts->soc->thresh_grain;
r = readl(ts->regs + THERMCTL_THERMTRIP_CTL);
r = REG_SET_MASK(r, sg->thermtrip_threshold_mask, temp);
r = REG_SET_MASK(r, sg->thermtrip_enable_mask, 1);
r = REG_SET_MASK(r, sg->thermtrip_any_en_mask, 0);
writel(r, ts->regs + THERMCTL_THERMTRIP_CTL);
return 0;
}
/**
* throttrip_program() - Configures the hardware to throttle the
* pulse if a given sensor group reaches a given temperature
* @dev: ptr to the struct device for the SOC_THERM IP block
* @sg: pointer to the sensor group to set the thermtrip temperature for
* @stc: pointer to the throttle need to be triggered
* @trip_temp: the temperature in millicelsius to trigger the thermal trip at
*
* Sets the thermal trip threshold and throttle event of the given sensor
* group. If this threshold is crossed, the hardware will trigger the
* throttle.
*
* Note that, although @trip_temp is specified in millicelsius, the
* hardware is programmed in degrees Celsius.
*
* Return: 0 upon success, or %-EINVAL upon failure.
*/
static int throttrip_program(struct device *dev,
const struct tegra_tsensor_group *sg,
struct soctherm_throt_cfg *stc,
int trip_temp)
{
struct tegra_soctherm *ts = dev_get_drvdata(dev);
int temp, cpu_throt, gpu_throt;
unsigned int throt;
u32 r, reg_off;
if (!sg || !stc || !stc->init)
return -EINVAL;
temp = enforce_temp_range(dev, trip_temp) / ts->soc->thresh_grain;
/* Hardcode LIGHT on LEVEL1 and HEAVY on LEVEL2 */
throt = stc->id;
reg_off = THERMCTL_LVL_REG(sg->thermctl_lvl0_offset, throt + 1);
if (throt == THROTTLE_LIGHT) {
cpu_throt = THERMCTL_LVL0_CPU0_CPU_THROT_LIGHT;
gpu_throt = THERMCTL_LVL0_CPU0_GPU_THROT_LIGHT;
} else {
cpu_throt = THERMCTL_LVL0_CPU0_CPU_THROT_HEAVY;
gpu_throt = THERMCTL_LVL0_CPU0_GPU_THROT_HEAVY;
if (throt != THROTTLE_HEAVY)
dev_warn(dev,
"invalid throt id %d - assuming HEAVY",
throt);
}
r = readl(ts->regs + reg_off);
r = REG_SET_MASK(r, sg->thermctl_lvl0_up_thresh_mask, temp);
r = REG_SET_MASK(r, sg->thermctl_lvl0_dn_thresh_mask, temp);
r = REG_SET_MASK(r, THERMCTL_LVL0_CPU0_CPU_THROT_MASK, cpu_throt);
r = REG_SET_MASK(r, THERMCTL_LVL0_CPU0_GPU_THROT_MASK, gpu_throt);
r = REG_SET_MASK(r, THERMCTL_LVL0_CPU0_EN_MASK, 1);
writel(r, ts->regs + reg_off);
return 0;
}
static struct soctherm_throt_cfg *
find_throttle_cfg_by_name(struct tegra_soctherm *ts, const char *name)
{
unsigned int i;
for (i = 0; ts->throt_cfgs[i].name; i++)
if (!strcmp(ts->throt_cfgs[i].name, name))
return &ts->throt_cfgs[i];
return NULL;
}
static int tsensor_group_thermtrip_get(struct tegra_soctherm *ts, int id)
{
int i, temp = min_low_temp;
struct tsensor_group_thermtrips *tt = ts->soc->thermtrips;
if (id >= TEGRA124_SOCTHERM_SENSOR_NUM)
return temp;
if (tt) {
for (i = 0; i < ts->soc->num_ttgs; i++) {
if (tt[i].id == id)
return tt[i].temp;
}
}
return temp;
}
static int tegra_thermctl_set_trip_temp(struct thermal_zone_device *tz, int trip_id, int temp)
{
struct tegra_thermctl_zone *zone = thermal_zone_device_priv(tz);
struct tegra_soctherm *ts = zone->ts;
struct thermal_trip trip;
const struct tegra_tsensor_group *sg = zone->sg;
struct device *dev = zone->dev;
int ret;
if (!tz)
return -EINVAL;
ret = __thermal_zone_get_trip(tz, trip_id, &trip);
if (ret)
return ret;
if (trip.type == THERMAL_TRIP_CRITICAL) {
/*
* If thermtrips property is set in DT,
* doesn't need to program critical type trip to HW,
* if not, program critical trip to HW.
*/
if (min_low_temp == tsensor_group_thermtrip_get(ts, sg->id))
return thermtrip_program(dev, sg, temp);
else
return 0;
} else if (trip.type == THERMAL_TRIP_HOT) {
int i;
for (i = 0; i < THROTTLE_SIZE; i++) {
struct thermal_cooling_device *cdev;
struct soctherm_throt_cfg *stc;
if (!ts->throt_cfgs[i].init)
continue;
cdev = ts->throt_cfgs[i].cdev;
if (get_thermal_instance(tz, cdev, trip_id))
stc = find_throttle_cfg_by_name(ts, cdev->type);
else
continue;
return throttrip_program(dev, sg, stc, temp);
}
}
return 0;
}
static void thermal_irq_enable(struct tegra_thermctl_zone *zn)
{
u32 r;
/* multiple zones could be handling and setting trips at once */
mutex_lock(&zn->ts->thermctl_lock);
r = readl(zn->ts->regs + THERMCTL_INTR_ENABLE);
r = REG_SET_MASK(r, zn->sg->thermctl_isr_mask, TH_INTR_UP_DN_EN);
writel(r, zn->ts->regs + THERMCTL_INTR_ENABLE);
mutex_unlock(&zn->ts->thermctl_lock);
}
static void thermal_irq_disable(struct tegra_thermctl_zone *zn)
{
u32 r;
/* multiple zones could be handling and setting trips at once */
mutex_lock(&zn->ts->thermctl_lock);
r = readl(zn->ts->regs + THERMCTL_INTR_DISABLE);
r = REG_SET_MASK(r, zn->sg->thermctl_isr_mask, 0);
writel(r, zn->ts->regs + THERMCTL_INTR_DISABLE);
mutex_unlock(&zn->ts->thermctl_lock);
}
static int tegra_thermctl_set_trips(struct thermal_zone_device *tz, int lo, int hi)
{
struct tegra_thermctl_zone *zone = thermal_zone_device_priv(tz);
u32 r;
thermal_irq_disable(zone);
r = readl(zone->ts->regs + zone->sg->thermctl_lvl0_offset);
r = REG_SET_MASK(r, THERMCTL_LVL0_CPU0_EN_MASK, 0);
writel(r, zone->ts->regs + zone->sg->thermctl_lvl0_offset);
lo = enforce_temp_range(zone->dev, lo) / zone->ts->soc->thresh_grain;
hi = enforce_temp_range(zone->dev, hi) / zone->ts->soc->thresh_grain;
dev_dbg(zone->dev, "%s hi:%d, lo:%d\n", __func__, hi, lo);
r = REG_SET_MASK(r, zone->sg->thermctl_lvl0_up_thresh_mask, hi);
r = REG_SET_MASK(r, zone->sg->thermctl_lvl0_dn_thresh_mask, lo);
r = REG_SET_MASK(r, THERMCTL_LVL0_CPU0_EN_MASK, 1);
writel(r, zone->ts->regs + zone->sg->thermctl_lvl0_offset);
thermal_irq_enable(zone);
return 0;
}
static const struct thermal_zone_device_ops tegra_of_thermal_ops = {
.get_temp = tegra_thermctl_get_temp,
.set_trip_temp = tegra_thermctl_set_trip_temp,
.set_trips = tegra_thermctl_set_trips,
};
static int get_hot_temp(struct thermal_zone_device *tz, int *trip_id, int *temp)
{
int i, ret;
struct thermal_trip trip;
for (i = 0; i < thermal_zone_get_num_trips(tz); i++) {
ret = thermal_zone_get_trip(tz, i, &trip);
if (ret)
return -EINVAL;
if (trip.type == THERMAL_TRIP_HOT) {
*trip_id = i;
return 0;
}
}
return -EINVAL;
}
/**
* tegra_soctherm_set_hwtrips() - set HW trip point from DT data
* @dev: struct device * of the SOC_THERM instance
* @sg: pointer to the sensor group to set the thermtrip temperature for
* @tz: struct thermal_zone_device *
*
* Configure the SOC_THERM HW trip points, setting "THERMTRIP"
* "THROTTLE" trip points , using "thermtrips", "critical" or "hot"
* type trip_temp
* from thermal zone.
* After they have been configured, THERMTRIP or THROTTLE will take
* action when the configured SoC thermal sensor group reaches a
* certain temperature.
*
* Return: 0 upon success, or a negative error code on failure.
* "Success" does not mean that trips was enabled; it could also
* mean that no node was found in DT.
* THERMTRIP has been enabled successfully when a message similar to
* this one appears on the serial console:
* "thermtrip: will shut down when sensor group XXX reaches YYYYYY mC"
* THROTTLE has been enabled successfully when a message similar to
* this one appears on the serial console:
* ""throttrip: will throttle when sensor group XXX reaches YYYYYY mC"
*/
static int tegra_soctherm_set_hwtrips(struct device *dev,
const struct tegra_tsensor_group *sg,
struct thermal_zone_device *tz)
{
struct tegra_soctherm *ts = dev_get_drvdata(dev);
struct soctherm_throt_cfg *stc;
int i, trip, temperature, ret;
/* Get thermtrips. If missing, try to get critical trips. */
temperature = tsensor_group_thermtrip_get(ts, sg->id);
if (min_low_temp == temperature)
if (thermal_zone_get_crit_temp(tz, &temperature))
temperature = max_high_temp;
ret = thermtrip_program(dev, sg, temperature);
if (ret) {
dev_err(dev, "thermtrip: %s: error during enable\n", sg->name);
return ret;
}
dev_info(dev, "thermtrip: will shut down when %s reaches %d mC\n",
sg->name, temperature);
ret = get_hot_temp(tz, &trip, &temperature);
if (ret) {
dev_info(dev, "throttrip: %s: missing hot temperature\n",
sg->name);
return 0;
}
for (i = 0; i < THROTTLE_OC1; i++) {
struct thermal_cooling_device *cdev;
if (!ts->throt_cfgs[i].init)
continue;
cdev = ts->throt_cfgs[i].cdev;
if (get_thermal_instance(tz, cdev, trip))
stc = find_throttle_cfg_by_name(ts, cdev->type);
else
continue;
ret = throttrip_program(dev, sg, stc, temperature);
if (ret) {
dev_err(dev, "throttrip: %s: error during enable\n",
sg->name);
return ret;
}
dev_info(dev,
"throttrip: will throttle when %s reaches %d mC\n",
sg->name, temperature);
break;
}
if (i == THROTTLE_SIZE)
dev_info(dev, "throttrip: %s: missing throttle cdev\n",
sg->name);
return 0;
}
static irqreturn_t soctherm_thermal_isr(int irq, void *dev_id)
{
struct tegra_soctherm *ts = dev_id;
u32 r;
/* Case for no lock:
* Although interrupts are enabled in set_trips, there is still no need
* to lock here because the interrupts are disabled before programming
* new trip points. Hence there cant be a interrupt on the same sensor.
* An interrupt can however occur on a sensor while trips are being
* programmed on a different one. This beign a LEVEL interrupt won't
* cause a new interrupt but this is taken care of by the re-reading of
* the STATUS register in the thread function.
*/
r = readl(ts->regs + THERMCTL_INTR_STATUS);
writel(r, ts->regs + THERMCTL_INTR_DISABLE);
return IRQ_WAKE_THREAD;
}
/**
* soctherm_thermal_isr_thread() - Handles a thermal interrupt request
* @irq: The interrupt number being requested; not used
* @dev_id: Opaque pointer to tegra_soctherm;
*
* Clears the interrupt status register if there are expected
* interrupt bits set.
* The interrupt(s) are then handled by updating the corresponding
* thermal zones.
*
* An error is logged if any unexpected interrupt bits are set.
*
* Disabled interrupts are re-enabled.
*
* Return: %IRQ_HANDLED. Interrupt was handled and no further processing
* is needed.
*/
static irqreturn_t soctherm_thermal_isr_thread(int irq, void *dev_id)
{
struct tegra_soctherm *ts = dev_id;
struct thermal_zone_device *tz;
u32 st, ex = 0, cp = 0, gp = 0, pl = 0, me = 0;
st = readl(ts->regs + THERMCTL_INTR_STATUS);
/* deliberately clear expected interrupts handled in SW */
cp |= st & TH_INTR_CD0_MASK;
cp |= st & TH_INTR_CU0_MASK;
gp |= st & TH_INTR_GD0_MASK;
gp |= st & TH_INTR_GU0_MASK;
pl |= st & TH_INTR_PD0_MASK;
pl |= st & TH_INTR_PU0_MASK;
me |= st & TH_INTR_MD0_MASK;
me |= st & TH_INTR_MU0_MASK;
ex |= cp | gp | pl | me;
if (ex) {
writel(ex, ts->regs + THERMCTL_INTR_STATUS);
st &= ~ex;
if (cp) {
tz = ts->thermctl_tzs[TEGRA124_SOCTHERM_SENSOR_CPU];
thermal_zone_device_update(tz,
THERMAL_EVENT_UNSPECIFIED);
}
if (gp) {
tz = ts->thermctl_tzs[TEGRA124_SOCTHERM_SENSOR_GPU];
thermal_zone_device_update(tz,
THERMAL_EVENT_UNSPECIFIED);
}
if (pl) {
tz = ts->thermctl_tzs[TEGRA124_SOCTHERM_SENSOR_PLLX];
thermal_zone_device_update(tz,
THERMAL_EVENT_UNSPECIFIED);
}
if (me) {
tz = ts->thermctl_tzs[TEGRA124_SOCTHERM_SENSOR_MEM];
thermal_zone_device_update(tz,
THERMAL_EVENT_UNSPECIFIED);
}
}
/* deliberately ignore expected interrupts NOT handled in SW */
ex |= TH_INTR_IGNORE_MASK;
st &= ~ex;
if (st) {
/* Whine about any other unexpected INTR bits still set */
pr_err("soctherm: Ignored unexpected INTRs 0x%08x\n", st);
writel(st, ts->regs + THERMCTL_INTR_STATUS);
}
return IRQ_HANDLED;
}
/**
* soctherm_oc_intr_enable() - Enables the soctherm over-current interrupt
* @ts: pointer to a struct tegra_soctherm
* @alarm: The soctherm throttle id
* @enable: Flag indicating enable the soctherm over-current
* interrupt or disable it
*
* Enables a specific over-current pins @alarm to raise an interrupt if the flag
* is set and the alarm corresponds to OC1, OC2, OC3, or OC4.
*/
static void soctherm_oc_intr_enable(struct tegra_soctherm *ts,
enum soctherm_throttle_id alarm,
bool enable)
{
u32 r;
if (!enable)
return;
r = readl(ts->regs + OC_INTR_ENABLE);
switch (alarm) {
case THROTTLE_OC1:
r = REG_SET_MASK(r, OC_INTR_OC1_MASK, 1);
break;
case THROTTLE_OC2:
r = REG_SET_MASK(r, OC_INTR_OC2_MASK, 1);
break;
case THROTTLE_OC3:
r = REG_SET_MASK(r, OC_INTR_OC3_MASK, 1);
break;
case THROTTLE_OC4:
r = REG_SET_MASK(r, OC_INTR_OC4_MASK, 1);
break;
default:
r = 0;
break;
}
writel(r, ts->regs + OC_INTR_ENABLE);
}
/**
* soctherm_handle_alarm() - Handles soctherm alarms
* @alarm: The soctherm throttle id
*
* "Handles" over-current alarms (OC1, OC2, OC3, and OC4) by printing
* a warning or informative message.
*
* Return: -EINVAL for @alarm = THROTTLE_OC3, otherwise 0 (success).
*/
static int soctherm_handle_alarm(enum soctherm_throttle_id alarm)
{
int rv = -EINVAL;
switch (alarm) {
case THROTTLE_OC1:
pr_debug("soctherm: Successfully handled OC1 alarm\n");
rv = 0;
break;
case THROTTLE_OC2:
pr_debug("soctherm: Successfully handled OC2 alarm\n");
rv = 0;
break;
case THROTTLE_OC3:
pr_debug("soctherm: Successfully handled OC3 alarm\n");
rv = 0;
break;
case THROTTLE_OC4:
pr_debug("soctherm: Successfully handled OC4 alarm\n");
rv = 0;
break;
default:
break;
}
if (rv)
pr_err("soctherm: ERROR in handling %s alarm\n",
throt_names[alarm]);
return rv;
}
/**
* soctherm_edp_isr_thread() - log an over-current interrupt request
* @irq: OC irq number. Currently not being used. See description
* @arg: a void pointer for callback, currently not being used
*
* Over-current events are handled in hardware. This function is called to log
* and handle any OC events that happened. Additionally, it checks every
* over-current interrupt registers for registers are set but
* was not expected (i.e. any discrepancy in interrupt status) by the function,
* the discrepancy will logged.
*
* Return: %IRQ_HANDLED
*/
static irqreturn_t soctherm_edp_isr_thread(int irq, void *arg)
{
struct tegra_soctherm *ts = arg;
u32 st, ex, oc1, oc2, oc3, oc4;
st = readl(ts->regs + OC_INTR_STATUS);
/* deliberately clear expected interrupts handled in SW */
oc1 = st & OC_INTR_OC1_MASK;
oc2 = st & OC_INTR_OC2_MASK;
oc3 = st & OC_INTR_OC3_MASK;
oc4 = st & OC_INTR_OC4_MASK;
ex = oc1 | oc2 | oc3 | oc4;
pr_err("soctherm: OC ALARM 0x%08x\n", ex);
if (ex) {
writel(st, ts->regs + OC_INTR_STATUS);
st &= ~ex;
if (oc1 && !soctherm_handle_alarm(THROTTLE_OC1))
soctherm_oc_intr_enable(ts, THROTTLE_OC1, true);
if (oc2 && !soctherm_handle_alarm(THROTTLE_OC2))
soctherm_oc_intr_enable(ts, THROTTLE_OC2, true);
if (oc3 && !soctherm_handle_alarm(THROTTLE_OC3))
soctherm_oc_intr_enable(ts, THROTTLE_OC3, true);
if (oc4 && !soctherm_handle_alarm(THROTTLE_OC4))
soctherm_oc_intr_enable(ts, THROTTLE_OC4, true);
if (oc1 && soc_irq_cdata.irq_enable & BIT(0))
handle_nested_irq(
irq_find_mapping(soc_irq_cdata.domain, 0));
if (oc2 && soc_irq_cdata.irq_enable & BIT(1))
handle_nested_irq(
irq_find_mapping(soc_irq_cdata.domain, 1));
if (oc3 && soc_irq_cdata.irq_enable & BIT(2))
handle_nested_irq(
irq_find_mapping(soc_irq_cdata.domain, 2));
if (oc4 && soc_irq_cdata.irq_enable & BIT(3))
handle_nested_irq(
irq_find_mapping(soc_irq_cdata.domain, 3));
}
if (st) {
pr_err("soctherm: Ignored unexpected OC ALARM 0x%08x\n", st);
writel(st, ts->regs + OC_INTR_STATUS);
}
return IRQ_HANDLED;
}
/**
* soctherm_edp_isr() - Disables any active interrupts
* @irq: The interrupt request number
* @arg: Opaque pointer to an argument
*
* Writes to the OC_INTR_DISABLE register the over current interrupt status,
* masking any asserted interrupts. Doing this prevents the same interrupts
* from triggering this isr repeatedly. The thread woken by this isr will
* handle asserted interrupts and subsequently unmask/re-enable them.
*
* The OC_INTR_DISABLE register indicates which OC interrupts
* have been disabled.
*
* Return: %IRQ_WAKE_THREAD, handler requests to wake the handler thread
*/
static irqreturn_t soctherm_edp_isr(int irq, void *arg)
{
struct tegra_soctherm *ts = arg;
u32 r;
if (!ts)
return IRQ_NONE;
r = readl(ts->regs + OC_INTR_STATUS);
writel(r, ts->regs + OC_INTR_DISABLE);
return IRQ_WAKE_THREAD;
}
/**
* soctherm_oc_irq_lock() - locks the over-current interrupt request
* @data: Interrupt request data
*
* Looks up the chip data from @data and locks the mutex associated with
* a particular over-current interrupt request.
*/
static void soctherm_oc_irq_lock(struct irq_data *data)
{
struct soctherm_oc_irq_chip_data *d = irq_data_get_irq_chip_data(data);
mutex_lock(&d->irq_lock);
}
/**
* soctherm_oc_irq_sync_unlock() - Unlocks the OC interrupt request
* @data: Interrupt request data
*
* Looks up the interrupt request data @data and unlocks the mutex associated
* with a particular over-current interrupt request.
*/
static void soctherm_oc_irq_sync_unlock(struct irq_data *data)
{
struct soctherm_oc_irq_chip_data *d = irq_data_get_irq_chip_data(data);
mutex_unlock(&d->irq_lock);
}
/**
* soctherm_oc_irq_enable() - Enables the SOC_THERM over-current interrupt queue
* @data: irq_data structure of the chip
*
* Sets the irq_enable bit of SOC_THERM allowing SOC_THERM
* to respond to over-current interrupts.
*
*/
static void soctherm_oc_irq_enable(struct irq_data *data)
{
struct soctherm_oc_irq_chip_data *d = irq_data_get_irq_chip_data(data);
d->irq_enable |= BIT(data->hwirq);
}
/**
* soctherm_oc_irq_disable() - Disables overcurrent interrupt requests
* @data: The interrupt request information
*
* Clears the interrupt request enable bit of the overcurrent
* interrupt request chip data.
*
* Return: Nothing is returned (void)
*/
static void soctherm_oc_irq_disable(struct irq_data *data)
{
struct soctherm_oc_irq_chip_data *d = irq_data_get_irq_chip_data(data);
d->irq_enable &= ~BIT(data->hwirq);
}
static int soctherm_oc_irq_set_type(struct irq_data *data, unsigned int type)
{
return 0;
}
/**
* soctherm_oc_irq_map() - SOC_THERM interrupt request domain mapper
* @h: Interrupt request domain
* @virq: Virtual interrupt request number
* @hw: Hardware interrupt request number
*
* Mapping callback function for SOC_THERM's irq_domain. When a SOC_THERM
* interrupt request is called, the irq_domain takes the request's virtual
* request number (much like a virtual memory address) and maps it to a
* physical hardware request number.
*
* When a mapping doesn't already exist for a virtual request number, the
* irq_domain calls this function to associate the virtual request number with
* a hardware request number.
*
* Return: 0
*/
static int soctherm_oc_irq_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hw)
{
struct soctherm_oc_irq_chip_data *data = h->host_data;
irq_set_chip_data(virq, data);
irq_set_chip(virq, &data->irq_chip);
irq_set_nested_thread(virq, 1);
return 0;
}
/**
* soctherm_irq_domain_xlate_twocell() - xlate for soctherm interrupts
* @d: Interrupt request domain
* @ctrlr: Controller device tree node
* @intspec: Array of u32s from DTs "interrupt" property
* @intsize: Number of values inside the intspec array
* @out_hwirq: HW IRQ value associated with this interrupt
* @out_type: The IRQ SENSE type for this interrupt.
*
* This Device Tree IRQ specifier translation function will translate a
* specific "interrupt" as defined by 2 DT values where the cell values map
* the hwirq number + 1 and linux irq flags. Since the output is the hwirq
* number, this function will subtract 1 from the value listed in DT.
*
* Return: 0
*/
static int soctherm_irq_domain_xlate_twocell(struct irq_domain *d,
struct device_node *ctrlr, const u32 *intspec, unsigned int intsize,
irq_hw_number_t *out_hwirq, unsigned int *out_type)
{
if (WARN_ON(intsize < 2))
return -EINVAL;
/*
* The HW value is 1 index less than the DT IRQ values.
* i.e. OC4 goes to HW index 3.
*/
*out_hwirq = intspec[0] - 1;
*out_type = intspec[1] & IRQ_TYPE_SENSE_MASK;
return 0;
}
static const struct irq_domain_ops soctherm_oc_domain_ops = {
.map = soctherm_oc_irq_map,
.xlate = soctherm_irq_domain_xlate_twocell,
};
/**
* soctherm_oc_int_init() - Initial enabling of the over
* current interrupts
* @np: The devicetree node for soctherm
* @num_irqs: The number of new interrupt requests
*
* Sets the over current interrupt request chip data
*
* Return: 0 on success or if overcurrent interrupts are not enabled,
* -ENOMEM (out of memory), or irq_base if the function failed to
* allocate the irqs
*/
static int soctherm_oc_int_init(struct device_node *np, int num_irqs)
{
if (!num_irqs) {
pr_info("%s(): OC interrupts are not enabled\n", __func__);
return 0;
}
mutex_init(&soc_irq_cdata.irq_lock);
soc_irq_cdata.irq_enable = 0;
soc_irq_cdata.irq_chip.name = "soc_therm_oc";
soc_irq_cdata.irq_chip.irq_bus_lock = soctherm_oc_irq_lock;
soc_irq_cdata.irq_chip.irq_bus_sync_unlock =
soctherm_oc_irq_sync_unlock;
soc_irq_cdata.irq_chip.irq_disable = soctherm_oc_irq_disable;
soc_irq_cdata.irq_chip.irq_enable = soctherm_oc_irq_enable;
soc_irq_cdata.irq_chip.irq_set_type = soctherm_oc_irq_set_type;
soc_irq_cdata.irq_chip.irq_set_wake = NULL;
soc_irq_cdata.domain = irq_domain_add_linear(np, num_irqs,
&soctherm_oc_domain_ops,
&soc_irq_cdata);
if (!soc_irq_cdata.domain) {
pr_err("%s: Failed to create IRQ domain\n", __func__);
return -ENOMEM;
}
pr_debug("%s(): OC interrupts enabled successful\n", __func__);
return 0;
}
#ifdef CONFIG_DEBUG_FS
static int regs_show(struct seq_file *s, void *data)
{
struct platform_device *pdev = s->private;
struct tegra_soctherm *ts = platform_get_drvdata(pdev);
const struct tegra_tsensor *tsensors = ts->soc->tsensors;
const struct tegra_tsensor_group **ttgs = ts->soc->ttgs;
u32 r, state;
int i, level;
seq_puts(s, "-----TSENSE (convert HW)-----\n");
for (i = 0; i < ts->soc->num_tsensors; i++) {
r = readl(ts->regs + tsensors[i].base + SENSOR_CONFIG1);
state = REG_GET_MASK(r, SENSOR_CONFIG1_TEMP_ENABLE);
seq_printf(s, "%s: ", tsensors[i].name);
seq_printf(s, "En(%d) ", state);
if (!state) {
seq_puts(s, "\n");
continue;
}
state = REG_GET_MASK(r, SENSOR_CONFIG1_TIDDQ_EN_MASK);
seq_printf(s, "tiddq(%d) ", state);
state = REG_GET_MASK(r, SENSOR_CONFIG1_TEN_COUNT_MASK);
seq_printf(s, "ten_count(%d) ", state);
state = REG_GET_MASK(r, SENSOR_CONFIG1_TSAMPLE_MASK);
seq_printf(s, "tsample(%d) ", state + 1);
r = readl(ts->regs + tsensors[i].base + SENSOR_STATUS1);
state = REG_GET_MASK(r, SENSOR_STATUS1_TEMP_VALID_MASK);
seq_printf(s, "Temp(%d/", state);
state = REG_GET_MASK(r, SENSOR_STATUS1_TEMP_MASK);
seq_printf(s, "%d) ", translate_temp(state));
r = readl(ts->regs + tsensors[i].base + SENSOR_STATUS0);
state = REG_GET_MASK(r, SENSOR_STATUS0_VALID_MASK);
seq_printf(s, "Capture(%d/", state);
state = REG_GET_MASK(r, SENSOR_STATUS0_CAPTURE_MASK);
seq_printf(s, "%d) ", state);
r = readl(ts->regs + tsensors[i].base + SENSOR_CONFIG0);
state = REG_GET_MASK(r, SENSOR_CONFIG0_STOP);
seq_printf(s, "Stop(%d) ", state);
state = REG_GET_MASK(r, SENSOR_CONFIG0_TALL_MASK);
seq_printf(s, "Tall(%d) ", state);
state = REG_GET_MASK(r, SENSOR_CONFIG0_TCALC_OVER);
seq_printf(s, "Over(%d/", state);
state = REG_GET_MASK(r, SENSOR_CONFIG0_OVER);
seq_printf(s, "%d/", state);
state = REG_GET_MASK(r, SENSOR_CONFIG0_CPTR_OVER);
seq_printf(s, "%d) ", state);
r = readl(ts->regs + tsensors[i].base + SENSOR_CONFIG2);
state = REG_GET_MASK(r, SENSOR_CONFIG2_THERMA_MASK);
seq_printf(s, "Therm_A/B(%d/", state);
state = REG_GET_MASK(r, SENSOR_CONFIG2_THERMB_MASK);
seq_printf(s, "%d)\n", (s16)state);
}
r = readl(ts->regs + SENSOR_PDIV);
seq_printf(s, "PDIV: 0x%x\n", r);
r = readl(ts->regs + SENSOR_HOTSPOT_OFF);
seq_printf(s, "HOTSPOT: 0x%x\n", r);
seq_puts(s, "\n");
seq_puts(s, "-----SOC_THERM-----\n");
r = readl(ts->regs + SENSOR_TEMP1);
state = REG_GET_MASK(r, SENSOR_TEMP1_CPU_TEMP_MASK);
seq_printf(s, "Temperatures: CPU(%d) ", translate_temp(state));
state = REG_GET_MASK(r, SENSOR_TEMP1_GPU_TEMP_MASK);
seq_printf(s, " GPU(%d) ", translate_temp(state));
r = readl(ts->regs + SENSOR_TEMP2);
state = REG_GET_MASK(r, SENSOR_TEMP2_PLLX_TEMP_MASK);
seq_printf(s, " PLLX(%d) ", translate_temp(state));
state = REG_GET_MASK(r, SENSOR_TEMP2_MEM_TEMP_MASK);
seq_printf(s, " MEM(%d)\n", translate_temp(state));
for (i = 0; i < ts->soc->num_ttgs; i++) {
seq_printf(s, "%s:\n", ttgs[i]->name);
for (level = 0; level < 4; level++) {
s32 v;
u32 mask;
u16 off = ttgs[i]->thermctl_lvl0_offset;
r = readl(ts->regs + THERMCTL_LVL_REG(off, level));
mask = ttgs[i]->thermctl_lvl0_up_thresh_mask;
state = REG_GET_MASK(r, mask);
v = sign_extend32(state, ts->soc->bptt - 1);
v *= ts->soc->thresh_grain;
seq_printf(s, " %d: Up/Dn(%d /", level, v);
mask = ttgs[i]->thermctl_lvl0_dn_thresh_mask;
state = REG_GET_MASK(r, mask);
v = sign_extend32(state, ts->soc->bptt - 1);
v *= ts->soc->thresh_grain;
seq_printf(s, "%d ) ", v);
mask = THERMCTL_LVL0_CPU0_EN_MASK;
state = REG_GET_MASK(r, mask);
seq_printf(s, "En(%d) ", state);
mask = THERMCTL_LVL0_CPU0_CPU_THROT_MASK;
state = REG_GET_MASK(r, mask);
seq_puts(s, "CPU Throt");
if (!state)
seq_printf(s, "(%s) ", "none");
else if (state == THERMCTL_LVL0_CPU0_CPU_THROT_LIGHT)
seq_printf(s, "(%s) ", "L");
else if (state == THERMCTL_LVL0_CPU0_CPU_THROT_HEAVY)
seq_printf(s, "(%s) ", "H");
else
seq_printf(s, "(%s) ", "H+L");
mask = THERMCTL_LVL0_CPU0_GPU_THROT_MASK;
state = REG_GET_MASK(r, mask);
seq_puts(s, "GPU Throt");
if (!state)
seq_printf(s, "(%s) ", "none");
else if (state == THERMCTL_LVL0_CPU0_GPU_THROT_LIGHT)
seq_printf(s, "(%s) ", "L");
else if (state == THERMCTL_LVL0_CPU0_GPU_THROT_HEAVY)
seq_printf(s, "(%s) ", "H");
else
seq_printf(s, "(%s) ", "H+L");
mask = THERMCTL_LVL0_CPU0_STATUS_MASK;
state = REG_GET_MASK(r, mask);
seq_printf(s, "Status(%s)\n",
state == 0 ? "LO" :
state == 1 ? "In" :
state == 2 ? "Res" : "HI");
}
}
r = readl(ts->regs + THERMCTL_STATS_CTL);
seq_printf(s, "STATS: Up(%s) Dn(%s)\n",
r & STATS_CTL_EN_UP ? "En" : "--",
r & STATS_CTL_EN_DN ? "En" : "--");
for (level = 0; level < 4; level++) {
u16 off;
off = THERMCTL_LVL0_UP_STATS;
r = readl(ts->regs + THERMCTL_LVL_REG(off, level));
seq_printf(s, " Level_%d Up(%d) ", level, r);
off = THERMCTL_LVL0_DN_STATS;
r = readl(ts->regs + THERMCTL_LVL_REG(off, level));
seq_printf(s, "Dn(%d)\n", r);
}
r = readl(ts->regs + THERMCTL_THERMTRIP_CTL);
state = REG_GET_MASK(r, ttgs[0]->thermtrip_any_en_mask);
seq_printf(s, "Thermtrip Any En(%d)\n", state);
for (i = 0; i < ts->soc->num_ttgs; i++) {
state = REG_GET_MASK(r, ttgs[i]->thermtrip_enable_mask);
seq_printf(s, " %s En(%d) ", ttgs[i]->name, state);
state = REG_GET_MASK(r, ttgs[i]->thermtrip_threshold_mask);
state *= ts->soc->thresh_grain;
seq_printf(s, "Thresh(%d)\n", state);
}
r = readl(ts->regs + THROT_GLOBAL_CFG);
seq_puts(s, "\n");
seq_printf(s, "GLOBAL THROTTLE CONFIG: 0x%08x\n", r);
seq_puts(s, "---------------------------------------------------\n");
r = readl(ts->regs + THROT_STATUS);
state = REG_GET_MASK(r, THROT_STATUS_BREACH_MASK);
seq_printf(s, "THROT STATUS: breach(%d) ", state);
state = REG_GET_MASK(r, THROT_STATUS_STATE_MASK);
seq_printf(s, "state(%d) ", state);
state = REG_GET_MASK(r, THROT_STATUS_ENABLED_MASK);
seq_printf(s, "enabled(%d)\n", state);
r = readl(ts->regs + CPU_PSKIP_STATUS);
if (ts->soc->use_ccroc) {
state = REG_GET_MASK(r, XPU_PSKIP_STATUS_ENABLED_MASK);
seq_printf(s, "CPU PSKIP STATUS: enabled(%d)\n", state);
} else {
state = REG_GET_MASK(r, XPU_PSKIP_STATUS_M_MASK);
seq_printf(s, "CPU PSKIP STATUS: M(%d) ", state);
state = REG_GET_MASK(r, XPU_PSKIP_STATUS_N_MASK);
seq_printf(s, "N(%d) ", state);
state = REG_GET_MASK(r, XPU_PSKIP_STATUS_ENABLED_MASK);
seq_printf(s, "enabled(%d)\n", state);
}
return 0;
}
DEFINE_SHOW_ATTRIBUTE(regs);
static void soctherm_debug_init(struct platform_device *pdev)
{
struct tegra_soctherm *tegra = platform_get_drvdata(pdev);
struct dentry *root;
root = debugfs_create_dir("soctherm", NULL);
tegra->debugfs_dir = root;
debugfs_create_file("reg_contents", 0644, root, pdev, ®s_fops);
}
#else
static inline void soctherm_debug_init(struct platform_device *pdev) {}
#endif
static int soctherm_clk_enable(struct platform_device *pdev, bool enable)
{
struct tegra_soctherm *tegra = platform_get_drvdata(pdev);
int err;
if (!tegra->clock_soctherm || !tegra->clock_tsensor)
return -EINVAL;
reset_control_assert(tegra->reset);
if (enable) {
err = clk_prepare_enable(tegra->clock_soctherm);
if (err) {
reset_control_deassert(tegra->reset);
return err;
}
err = clk_prepare_enable(tegra->clock_tsensor);
if (err) {
clk_disable_unprepare(tegra->clock_soctherm);
reset_control_deassert(tegra->reset);
return err;
}
} else {
clk_disable_unprepare(tegra->clock_tsensor);
clk_disable_unprepare(tegra->clock_soctherm);
}
reset_control_deassert(tegra->reset);
return 0;
}
static int throt_get_cdev_max_state(struct thermal_cooling_device *cdev,
unsigned long *max_state)
{
*max_state = 1;
return 0;
}
static int throt_get_cdev_cur_state(struct thermal_cooling_device *cdev,
unsigned long *cur_state)
{
struct tegra_soctherm *ts = cdev->devdata;
u32 r;
r = readl(ts->regs + THROT_STATUS);
if (REG_GET_MASK(r, THROT_STATUS_STATE_MASK))
*cur_state = 1;
else
*cur_state = 0;
return 0;
}
static int throt_set_cdev_state(struct thermal_cooling_device *cdev,
unsigned long cur_state)
{
return 0;
}
static const struct thermal_cooling_device_ops throt_cooling_ops = {
.get_max_state = throt_get_cdev_max_state,
.get_cur_state = throt_get_cdev_cur_state,
.set_cur_state = throt_set_cdev_state,
};
static int soctherm_thermtrips_parse(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct tegra_soctherm *ts = dev_get_drvdata(dev);
struct tsensor_group_thermtrips *tt = ts->soc->thermtrips;
const int max_num_prop = ts->soc->num_ttgs * 2;
u32 *tlb;
int i, j, n, ret;
if (!tt)
return -ENOMEM;
n = of_property_count_u32_elems(dev->of_node, "nvidia,thermtrips");
if (n <= 0) {
dev_info(dev,
"missing thermtrips, will use critical trips as shut down temp\n");
return n;
}
n = min(max_num_prop, n);
tlb = devm_kcalloc(&pdev->dev, max_num_prop, sizeof(u32), GFP_KERNEL);
if (!tlb)
return -ENOMEM;
ret = of_property_read_u32_array(dev->of_node, "nvidia,thermtrips",
tlb, n);
if (ret) {
dev_err(dev, "invalid num ele: thermtrips:%d\n", ret);
return ret;
}
i = 0;
for (j = 0; j < n; j = j + 2) {
if (tlb[j] >= TEGRA124_SOCTHERM_SENSOR_NUM)
continue;
tt[i].id = tlb[j];
tt[i].temp = tlb[j + 1];
i++;
}
return 0;
}
static void soctherm_oc_cfg_parse(struct device *dev,
struct device_node *np_oc,
struct soctherm_throt_cfg *stc)
{
u32 val;
if (of_property_read_bool(np_oc, "nvidia,polarity-active-low"))
stc->oc_cfg.active_low = 1;
else
stc->oc_cfg.active_low = 0;
if (!of_property_read_u32(np_oc, "nvidia,count-threshold", &val)) {
stc->oc_cfg.intr_en = 1;
stc->oc_cfg.alarm_cnt_thresh = val;
}
if (!of_property_read_u32(np_oc, "nvidia,throttle-period-us", &val))
stc->oc_cfg.throt_period = val;
if (!of_property_read_u32(np_oc, "nvidia,alarm-filter", &val))
stc->oc_cfg.alarm_filter = val;
/* BRIEF throttling by default, do not support STICKY */
stc->oc_cfg.mode = OC_THROTTLE_MODE_BRIEF;
}
static int soctherm_throt_cfg_parse(struct device *dev,
struct device_node *np,
struct soctherm_throt_cfg *stc)
{
struct tegra_soctherm *ts = dev_get_drvdata(dev);
int ret;
u32 val;
ret = of_property_read_u32(np, "nvidia,priority", &val);
if (ret) {
dev_err(dev, "throttle-cfg: %s: invalid priority\n", stc->name);
return -EINVAL;
}
stc->priority = val;
ret = of_property_read_u32(np, ts->soc->use_ccroc ?
"nvidia,cpu-throt-level" :
"nvidia,cpu-throt-percent", &val);
if (!ret) {
if (ts->soc->use_ccroc &&
val <= TEGRA_SOCTHERM_THROT_LEVEL_HIGH)
stc->cpu_throt_level = val;
else if (!ts->soc->use_ccroc && val <= 100)
stc->cpu_throt_depth = val;
else
goto err;
} else {
goto err;
}
ret = of_property_read_u32(np, "nvidia,gpu-throt-level", &val);
if (!ret && val <= TEGRA_SOCTHERM_THROT_LEVEL_HIGH)
stc->gpu_throt_level = val;
else
goto err;
return 0;
err:
dev_err(dev, "throttle-cfg: %s: no throt prop or invalid prop\n",
stc->name);
return -EINVAL;
}
/**
* soctherm_init_hw_throt_cdev() - Parse the HW throttle configurations
* and register them as cooling devices.
* @pdev: Pointer to platform_device struct
*/
static void soctherm_init_hw_throt_cdev(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct tegra_soctherm *ts = dev_get_drvdata(dev);
struct device_node *np_stc, *np_stcc;
const char *name;
int i;
for (i = 0; i < THROTTLE_SIZE; i++) {
ts->throt_cfgs[i].name = throt_names[i];
ts->throt_cfgs[i].id = i;
ts->throt_cfgs[i].init = false;
}
np_stc = of_get_child_by_name(dev->of_node, "throttle-cfgs");
if (!np_stc) {
dev_info(dev,
"throttle-cfg: no throttle-cfgs - not enabling\n");
return;
}
for_each_child_of_node(np_stc, np_stcc) {
struct soctherm_throt_cfg *stc;
struct thermal_cooling_device *tcd;
int err;
name = np_stcc->name;
stc = find_throttle_cfg_by_name(ts, name);
if (!stc) {
dev_err(dev,
"throttle-cfg: could not find %s\n", name);
continue;
}
if (stc->init) {
dev_err(dev, "throttle-cfg: %s: redefined!\n", name);
of_node_put(np_stcc);
break;
}
err = soctherm_throt_cfg_parse(dev, np_stcc, stc);
if (err)
continue;
if (stc->id >= THROTTLE_OC1) {
soctherm_oc_cfg_parse(dev, np_stcc, stc);
stc->init = true;
} else {
tcd = thermal_of_cooling_device_register(np_stcc,
(char *)name, ts,
&throt_cooling_ops);
if (IS_ERR_OR_NULL(tcd)) {
dev_err(dev,
"throttle-cfg: %s: failed to register cooling device\n",
name);
continue;
}
stc->cdev = tcd;
stc->init = true;
}
}
of_node_put(np_stc);
}
/**
* throttlectl_cpu_level_cfg() - programs CCROC NV_THERM level config
* @ts: pointer to a struct tegra_soctherm
* @level: describing the level LOW/MED/HIGH of throttling
*
* It's necessary to set up the CPU-local CCROC NV_THERM instance with
* the M/N values desired for each level. This function does this.
*
* This function pre-programs the CCROC NV_THERM levels in terms of
* pre-configured "Low", "Medium" or "Heavy" throttle levels which are
* mapped to THROT_LEVEL_LOW, THROT_LEVEL_MED and THROT_LEVEL_HVY.
*/
static void throttlectl_cpu_level_cfg(struct tegra_soctherm *ts, int level)
{
u8 depth, dividend;
u32 r;
switch (level) {
case TEGRA_SOCTHERM_THROT_LEVEL_LOW:
depth = 50;
break;
case TEGRA_SOCTHERM_THROT_LEVEL_MED:
depth = 75;
break;
case TEGRA_SOCTHERM_THROT_LEVEL_HIGH:
depth = 80;
break;
case TEGRA_SOCTHERM_THROT_LEVEL_NONE:
return;
default:
return;
}
dividend = THROT_DEPTH_DIVIDEND(depth);
/* setup PSKIP in ccroc nv_therm registers */
r = ccroc_readl(ts, CCROC_THROT_PSKIP_RAMP_CPU_REG(level));
r = REG_SET_MASK(r, CCROC_THROT_PSKIP_RAMP_DURATION_MASK, 0xff);
r = REG_SET_MASK(r, CCROC_THROT_PSKIP_RAMP_STEP_MASK, 0xf);
ccroc_writel(ts, r, CCROC_THROT_PSKIP_RAMP_CPU_REG(level));
r = ccroc_readl(ts, CCROC_THROT_PSKIP_CTRL_CPU_REG(level));
r = REG_SET_MASK(r, CCROC_THROT_PSKIP_CTRL_ENB_MASK, 1);
r = REG_SET_MASK(r, CCROC_THROT_PSKIP_CTRL_DIVIDEND_MASK, dividend);
r = REG_SET_MASK(r, CCROC_THROT_PSKIP_CTRL_DIVISOR_MASK, 0xff);
ccroc_writel(ts, r, CCROC_THROT_PSKIP_CTRL_CPU_REG(level));
}
/**
* throttlectl_cpu_level_select() - program CPU pulse skipper config
* @ts: pointer to a struct tegra_soctherm
* @throt: the LIGHT/HEAVY of throttle event id
*
* Pulse skippers are used to throttle clock frequencies. This
* function programs the pulse skippers based on @throt and platform
* data. This function is used on SoCs which have CPU-local pulse
* skipper control, such as T13x. It programs soctherm's interface to
* Denver:CCROC NV_THERM in terms of Low, Medium and HIGH throttling
* vectors. PSKIP_BYPASS mode is set as required per HW spec.
*/
static void throttlectl_cpu_level_select(struct tegra_soctherm *ts,
enum soctherm_throttle_id throt)
{
u32 r, throt_vect;
/* Denver:CCROC NV_THERM interface N:3 Mapping */
switch (ts->throt_cfgs[throt].cpu_throt_level) {
case TEGRA_SOCTHERM_THROT_LEVEL_LOW:
throt_vect = THROT_VECT_LOW;
break;
case TEGRA_SOCTHERM_THROT_LEVEL_MED:
throt_vect = THROT_VECT_MED;
break;
case TEGRA_SOCTHERM_THROT_LEVEL_HIGH:
throt_vect = THROT_VECT_HIGH;
break;
default:
throt_vect = THROT_VECT_NONE;
break;
}
r = readl(ts->regs + THROT_PSKIP_CTRL(throt, THROTTLE_DEV_CPU));
r = REG_SET_MASK(r, THROT_PSKIP_CTRL_ENABLE_MASK, 1);
r = REG_SET_MASK(r, THROT_PSKIP_CTRL_VECT_CPU_MASK, throt_vect);
r = REG_SET_MASK(r, THROT_PSKIP_CTRL_VECT2_CPU_MASK, throt_vect);
writel(r, ts->regs + THROT_PSKIP_CTRL(throt, THROTTLE_DEV_CPU));
/* bypass sequencer in soc_therm as it is programmed in ccroc */
r = REG_SET_MASK(0, THROT_PSKIP_RAMP_SEQ_BYPASS_MODE_MASK, 1);
writel(r, ts->regs + THROT_PSKIP_RAMP(throt, THROTTLE_DEV_CPU));
}
/**
* throttlectl_cpu_mn() - program CPU pulse skipper configuration
* @ts: pointer to a struct tegra_soctherm
* @throt: the LIGHT/HEAVY of throttle event id
*
* Pulse skippers are used to throttle clock frequencies. This
* function programs the pulse skippers based on @throt and platform
* data. This function is used for CPUs that have "remote" pulse
* skipper control, e.g., the CPU pulse skipper is controlled by the
* SOC_THERM IP block. (SOC_THERM is located outside the CPU
* complex.)
*/
static void throttlectl_cpu_mn(struct tegra_soctherm *ts,
enum soctherm_throttle_id throt)
{
u32 r;
int depth;
u8 dividend;
depth = ts->throt_cfgs[throt].cpu_throt_depth;
dividend = THROT_DEPTH_DIVIDEND(depth);
r = readl(ts->regs + THROT_PSKIP_CTRL(throt, THROTTLE_DEV_CPU));
r = REG_SET_MASK(r, THROT_PSKIP_CTRL_ENABLE_MASK, 1);
r = REG_SET_MASK(r, THROT_PSKIP_CTRL_DIVIDEND_MASK, dividend);
r = REG_SET_MASK(r, THROT_PSKIP_CTRL_DIVISOR_MASK, 0xff);
writel(r, ts->regs + THROT_PSKIP_CTRL(throt, THROTTLE_DEV_CPU));
r = readl(ts->regs + THROT_PSKIP_RAMP(throt, THROTTLE_DEV_CPU));
r = REG_SET_MASK(r, THROT_PSKIP_RAMP_DURATION_MASK, 0xff);
r = REG_SET_MASK(r, THROT_PSKIP_RAMP_STEP_MASK, 0xf);
writel(r, ts->regs + THROT_PSKIP_RAMP(throt, THROTTLE_DEV_CPU));
}
/**
* throttlectl_gpu_level_select() - selects throttling level for GPU
* @ts: pointer to a struct tegra_soctherm
* @throt: the LIGHT/HEAVY of throttle event id
*
* This function programs soctherm's interface to GK20a NV_THERM to select
* pre-configured "Low", "Medium" or "Heavy" throttle levels.
*
* Return: boolean true if HW was programmed
*/
static void throttlectl_gpu_level_select(struct tegra_soctherm *ts,
enum soctherm_throttle_id throt)
{
u32 r, level, throt_vect;
level = ts->throt_cfgs[throt].gpu_throt_level;
throt_vect = THROT_LEVEL_TO_DEPTH(level);
r = readl(ts->regs + THROT_PSKIP_CTRL(throt, THROTTLE_DEV_GPU));
r = REG_SET_MASK(r, THROT_PSKIP_CTRL_ENABLE_MASK, 1);
r = REG_SET_MASK(r, THROT_PSKIP_CTRL_VECT_GPU_MASK, throt_vect);
writel(r, ts->regs + THROT_PSKIP_CTRL(throt, THROTTLE_DEV_GPU));
}
static int soctherm_oc_cfg_program(struct tegra_soctherm *ts,
enum soctherm_throttle_id throt)
{
u32 r;
struct soctherm_oc_cfg *oc = &ts->throt_cfgs[throt].oc_cfg;
if (oc->mode == OC_THROTTLE_MODE_DISABLED)
return -EINVAL;
r = REG_SET_MASK(0, OC1_CFG_HW_RESTORE_MASK, 1);
r = REG_SET_MASK(r, OC1_CFG_THROTTLE_MODE_MASK, oc->mode);
r = REG_SET_MASK(r, OC1_CFG_ALARM_POLARITY_MASK, oc->active_low);
r = REG_SET_MASK(r, OC1_CFG_EN_THROTTLE_MASK, 1);
writel(r, ts->regs + ALARM_CFG(throt));
writel(oc->throt_period, ts->regs + ALARM_THROTTLE_PERIOD(throt));
writel(oc->alarm_cnt_thresh, ts->regs + ALARM_CNT_THRESHOLD(throt));
writel(oc->alarm_filter, ts->regs + ALARM_FILTER(throt));
soctherm_oc_intr_enable(ts, throt, oc->intr_en);
return 0;
}
/**
* soctherm_throttle_program() - programs pulse skippers' configuration
* @ts: pointer to a struct tegra_soctherm
* @throt: the LIGHT/HEAVY of the throttle event id.
*
* Pulse skippers are used to throttle clock frequencies.
* This function programs the pulse skippers.
*/
static void soctherm_throttle_program(struct tegra_soctherm *ts,
enum soctherm_throttle_id throt)
{
u32 r;
struct soctherm_throt_cfg stc = ts->throt_cfgs[throt];
if (!stc.init)
return;
if ((throt >= THROTTLE_OC1) && (soctherm_oc_cfg_program(ts, throt)))
return;
/* Setup PSKIP parameters */
if (ts->soc->use_ccroc)
throttlectl_cpu_level_select(ts, throt);
else
throttlectl_cpu_mn(ts, throt);
throttlectl_gpu_level_select(ts, throt);
r = REG_SET_MASK(0, THROT_PRIORITY_LITE_PRIO_MASK, stc.priority);
writel(r, ts->regs + THROT_PRIORITY_CTRL(throt));
r = REG_SET_MASK(0, THROT_DELAY_LITE_DELAY_MASK, 0);
writel(r, ts->regs + THROT_DELAY_CTRL(throt));
r = readl(ts->regs + THROT_PRIORITY_LOCK);
r = REG_GET_MASK(r, THROT_PRIORITY_LOCK_PRIORITY_MASK);
if (r >= stc.priority)
return;
r = REG_SET_MASK(0, THROT_PRIORITY_LOCK_PRIORITY_MASK,
stc.priority);
writel(r, ts->regs + THROT_PRIORITY_LOCK);
}
static void tegra_soctherm_throttle(struct device *dev)
{
struct tegra_soctherm *ts = dev_get_drvdata(dev);
u32 v;
int i;
/* configure LOW, MED and HIGH levels for CCROC NV_THERM */
if (ts->soc->use_ccroc) {
throttlectl_cpu_level_cfg(ts, TEGRA_SOCTHERM_THROT_LEVEL_LOW);
throttlectl_cpu_level_cfg(ts, TEGRA_SOCTHERM_THROT_LEVEL_MED);
throttlectl_cpu_level_cfg(ts, TEGRA_SOCTHERM_THROT_LEVEL_HIGH);
}
/* Thermal HW throttle programming */
for (i = 0; i < THROTTLE_SIZE; i++)
soctherm_throttle_program(ts, i);
v = REG_SET_MASK(0, THROT_GLOBAL_ENB_MASK, 1);
if (ts->soc->use_ccroc) {
ccroc_writel(ts, v, CCROC_GLOBAL_CFG);
v = ccroc_readl(ts, CCROC_SUPER_CCLKG_DIVIDER);
v = REG_SET_MASK(v, CDIVG_USE_THERM_CONTROLS_MASK, 1);
ccroc_writel(ts, v, CCROC_SUPER_CCLKG_DIVIDER);
} else {
writel(v, ts->regs + THROT_GLOBAL_CFG);
v = readl(ts->clk_regs + CAR_SUPER_CCLKG_DIVIDER);
v = REG_SET_MASK(v, CDIVG_USE_THERM_CONTROLS_MASK, 1);
writel(v, ts->clk_regs + CAR_SUPER_CCLKG_DIVIDER);
}
/* initialize stats collection */
v = STATS_CTL_CLR_DN | STATS_CTL_EN_DN |
STATS_CTL_CLR_UP | STATS_CTL_EN_UP;
writel(v, ts->regs + THERMCTL_STATS_CTL);
}
static int soctherm_interrupts_init(struct platform_device *pdev,
struct tegra_soctherm *tegra)
{
struct device_node *np = pdev->dev.of_node;
int ret;
ret = soctherm_oc_int_init(np, TEGRA_SOC_OC_IRQ_MAX);
if (ret < 0) {
dev_err(&pdev->dev, "soctherm_oc_int_init failed\n");
return ret;
}
tegra->thermal_irq = platform_get_irq(pdev, 0);
if (tegra->thermal_irq < 0) {
dev_dbg(&pdev->dev, "get 'thermal_irq' failed.\n");
return 0;
}
tegra->edp_irq = platform_get_irq(pdev, 1);
if (tegra->edp_irq < 0) {
dev_dbg(&pdev->dev, "get 'edp_irq' failed.\n");
return 0;
}
ret = devm_request_threaded_irq(&pdev->dev,
tegra->thermal_irq,
soctherm_thermal_isr,
soctherm_thermal_isr_thread,
IRQF_ONESHOT,
dev_name(&pdev->dev),
tegra);
if (ret < 0) {
dev_err(&pdev->dev, "request_irq 'thermal_irq' failed.\n");
return ret;
}
ret = devm_request_threaded_irq(&pdev->dev,
tegra->edp_irq,
soctherm_edp_isr,
soctherm_edp_isr_thread,
IRQF_ONESHOT,
"soctherm_edp",
tegra);
if (ret < 0) {
dev_err(&pdev->dev, "request_irq 'edp_irq' failed.\n");
return ret;
}
return 0;
}
static void soctherm_init(struct platform_device *pdev)
{
struct tegra_soctherm *tegra = platform_get_drvdata(pdev);
const struct tegra_tsensor_group **ttgs = tegra->soc->ttgs;
int i;
u32 pdiv, hotspot;
/* Initialize raw sensors */
for (i = 0; i < tegra->soc->num_tsensors; ++i)
enable_tsensor(tegra, i);
/* program pdiv and hotspot offsets per THERM */
pdiv = readl(tegra->regs + SENSOR_PDIV);
hotspot = readl(tegra->regs + SENSOR_HOTSPOT_OFF);
for (i = 0; i < tegra->soc->num_ttgs; ++i) {
pdiv = REG_SET_MASK(pdiv, ttgs[i]->pdiv_mask,
ttgs[i]->pdiv);
/* hotspot offset from PLLX, doesn't need to configure PLLX */
if (ttgs[i]->id == TEGRA124_SOCTHERM_SENSOR_PLLX)
continue;
hotspot = REG_SET_MASK(hotspot,
ttgs[i]->pllx_hotspot_mask,
ttgs[i]->pllx_hotspot_diff);
}
writel(pdiv, tegra->regs + SENSOR_PDIV);
writel(hotspot, tegra->regs + SENSOR_HOTSPOT_OFF);
/* Configure hw throttle */
tegra_soctherm_throttle(&pdev->dev);
}
static const struct of_device_id tegra_soctherm_of_match[] = {
#ifdef CONFIG_ARCH_TEGRA_124_SOC
{
.compatible = "nvidia,tegra124-soctherm",
.data = &tegra124_soctherm,
},
#endif
#ifdef CONFIG_ARCH_TEGRA_132_SOC
{
.compatible = "nvidia,tegra132-soctherm",
.data = &tegra132_soctherm,
},
#endif
#ifdef CONFIG_ARCH_TEGRA_210_SOC
{
.compatible = "nvidia,tegra210-soctherm",
.data = &tegra210_soctherm,
},
#endif
{ },
};
MODULE_DEVICE_TABLE(of, tegra_soctherm_of_match);
static int tegra_soctherm_probe(struct platform_device *pdev)
{
const struct of_device_id *match;
struct tegra_soctherm *tegra;
struct thermal_zone_device *z;
struct tsensor_shared_calib shared_calib;
struct tegra_soctherm_soc *soc;
unsigned int i;
int err;
match = of_match_node(tegra_soctherm_of_match, pdev->dev.of_node);
if (!match)
return -ENODEV;
soc = (struct tegra_soctherm_soc *)match->data;
if (soc->num_ttgs > TEGRA124_SOCTHERM_SENSOR_NUM)
return -EINVAL;
tegra = devm_kzalloc(&pdev->dev, sizeof(*tegra), GFP_KERNEL);
if (!tegra)
return -ENOMEM;
mutex_init(&tegra->thermctl_lock);
dev_set_drvdata(&pdev->dev, tegra);
tegra->soc = soc;
tegra->regs = devm_platform_ioremap_resource_byname(pdev, "soctherm-reg");
if (IS_ERR(tegra->regs)) {
dev_err(&pdev->dev, "can't get soctherm registers");
return PTR_ERR(tegra->regs);
}
if (!tegra->soc->use_ccroc) {
tegra->clk_regs = devm_platform_ioremap_resource_byname(pdev, "car-reg");
if (IS_ERR(tegra->clk_regs)) {
dev_err(&pdev->dev, "can't get car clk registers");
return PTR_ERR(tegra->clk_regs);
}
} else {
tegra->ccroc_regs = devm_platform_ioremap_resource_byname(pdev, "ccroc-reg");
if (IS_ERR(tegra->ccroc_regs)) {
dev_err(&pdev->dev, "can't get ccroc registers");
return PTR_ERR(tegra->ccroc_regs);
}
}
tegra->reset = devm_reset_control_get(&pdev->dev, "soctherm");
if (IS_ERR(tegra->reset)) {
dev_err(&pdev->dev, "can't get soctherm reset\n");
return PTR_ERR(tegra->reset);
}
tegra->clock_tsensor = devm_clk_get(&pdev->dev, "tsensor");
if (IS_ERR(tegra->clock_tsensor)) {
dev_err(&pdev->dev, "can't get tsensor clock\n");
return PTR_ERR(tegra->clock_tsensor);
}
tegra->clock_soctherm = devm_clk_get(&pdev->dev, "soctherm");
if (IS_ERR(tegra->clock_soctherm)) {
dev_err(&pdev->dev, "can't get soctherm clock\n");
return PTR_ERR(tegra->clock_soctherm);
}
tegra->calib = devm_kcalloc(&pdev->dev,
soc->num_tsensors, sizeof(u32),
GFP_KERNEL);
if (!tegra->calib)
return -ENOMEM;
/* calculate shared calibration data */
err = tegra_calc_shared_calib(soc->tfuse, &shared_calib);
if (err)
return err;
/* calculate tsensor calibration data */
for (i = 0; i < soc->num_tsensors; ++i) {
err = tegra_calc_tsensor_calib(&soc->tsensors[i],
&shared_calib,
&tegra->calib[i]);
if (err)
return err;
}
tegra->thermctl_tzs = devm_kcalloc(&pdev->dev,
soc->num_ttgs, sizeof(z),
GFP_KERNEL);
if (!tegra->thermctl_tzs)
return -ENOMEM;
err = soctherm_clk_enable(pdev, true);
if (err)
return err;
soctherm_thermtrips_parse(pdev);
soctherm_init_hw_throt_cdev(pdev);
soctherm_init(pdev);
for (i = 0; i < soc->num_ttgs; ++i) {
struct tegra_thermctl_zone *zone =
devm_kzalloc(&pdev->dev, sizeof(*zone), GFP_KERNEL);
if (!zone) {
err = -ENOMEM;
goto disable_clocks;
}
zone->reg = tegra->regs + soc->ttgs[i]->sensor_temp_offset;
zone->dev = &pdev->dev;
zone->sg = soc->ttgs[i];
zone->ts = tegra;
z = devm_thermal_of_zone_register(&pdev->dev,
soc->ttgs[i]->id, zone,
&tegra_of_thermal_ops);
if (IS_ERR(z)) {
err = PTR_ERR(z);
dev_err(&pdev->dev, "failed to register sensor: %d\n",
err);
goto disable_clocks;
}
zone->tz = z;
tegra->thermctl_tzs[soc->ttgs[i]->id] = z;
/* Configure hw trip points */
err = tegra_soctherm_set_hwtrips(&pdev->dev, soc->ttgs[i], z);
if (err)
goto disable_clocks;
}
err = soctherm_interrupts_init(pdev, tegra);
soctherm_debug_init(pdev);
return 0;
disable_clocks:
soctherm_clk_enable(pdev, false);
return err;
}
static int tegra_soctherm_remove(struct platform_device *pdev)
{
struct tegra_soctherm *tegra = platform_get_drvdata(pdev);
debugfs_remove_recursive(tegra->debugfs_dir);
soctherm_clk_enable(pdev, false);
return 0;
}
static int __maybe_unused soctherm_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
soctherm_clk_enable(pdev, false);
return 0;
}
static int __maybe_unused soctherm_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct tegra_soctherm *tegra = platform_get_drvdata(pdev);
struct tegra_soctherm_soc *soc = tegra->soc;
int err, i;
err = soctherm_clk_enable(pdev, true);
if (err) {
dev_err(&pdev->dev,
"Resume failed: enable clocks failed\n");
return err;
}
soctherm_init(pdev);
for (i = 0; i < soc->num_ttgs; ++i) {
struct thermal_zone_device *tz;
tz = tegra->thermctl_tzs[soc->ttgs[i]->id];
err = tegra_soctherm_set_hwtrips(dev, soc->ttgs[i], tz);
if (err) {
dev_err(&pdev->dev,
"Resume failed: set hwtrips failed\n");
return err;
}
}
return 0;
}
static SIMPLE_DEV_PM_OPS(tegra_soctherm_pm, soctherm_suspend, soctherm_resume);
static struct platform_driver tegra_soctherm_driver = {
.probe = tegra_soctherm_probe,
.remove = tegra_soctherm_remove,
.driver = {
.name = "tegra_soctherm",
.pm = &tegra_soctherm_pm,
.of_match_table = tegra_soctherm_of_match,
},
};
module_platform_driver(tegra_soctherm_driver);
MODULE_AUTHOR("Mikko Perttunen <[email protected]>");
MODULE_DESCRIPTION("NVIDIA Tegra SOCTHERM thermal management driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/thermal/tegra/soctherm.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Tegra30 SoC Thermal Sensor driver
*
* Based on downstream HWMON driver from NVIDIA.
* Copyright (C) 2011 NVIDIA Corporation
*
* Author: Dmitry Osipenko <[email protected]>
* Copyright (C) 2021 GRATE-DRIVER project
*/
#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/math.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/reset.h>
#include <linux/slab.h>
#include <linux/thermal.h>
#include <linux/types.h>
#include <soc/tegra/fuse.h>
#include "../thermal_hwmon.h"
#define TSENSOR_SENSOR0_CONFIG0 0x0
#define TSENSOR_SENSOR0_CONFIG0_SENSOR_STOP BIT(0)
#define TSENSOR_SENSOR0_CONFIG0_HW_FREQ_DIV_EN BIT(1)
#define TSENSOR_SENSOR0_CONFIG0_THERMAL_RST_EN BIT(2)
#define TSENSOR_SENSOR0_CONFIG0_DVFS_EN BIT(3)
#define TSENSOR_SENSOR0_CONFIG0_INTR_OVERFLOW_EN BIT(4)
#define TSENSOR_SENSOR0_CONFIG0_INTR_HW_FREQ_DIV_EN BIT(5)
#define TSENSOR_SENSOR0_CONFIG0_INTR_THERMAL_RST_EN BIT(6)
#define TSENSOR_SENSOR0_CONFIG0_M GENMASK(23, 8)
#define TSENSOR_SENSOR0_CONFIG0_N GENMASK(31, 24)
#define TSENSOR_SENSOR0_CONFIG1 0x8
#define TSENSOR_SENSOR0_CONFIG1_TH1 GENMASK(15, 0)
#define TSENSOR_SENSOR0_CONFIG1_TH2 GENMASK(31, 16)
#define TSENSOR_SENSOR0_CONFIG2 0xc
#define TSENSOR_SENSOR0_CONFIG2_TH3 GENMASK(15, 0)
#define TSENSOR_SENSOR0_STATUS0 0x18
#define TSENSOR_SENSOR0_STATUS0_STATE GENMASK(2, 0)
#define TSENSOR_SENSOR0_STATUS0_INTR BIT(8)
#define TSENSOR_SENSOR0_STATUS0_CURRENT_VALID BIT(9)
#define TSENSOR_SENSOR0_TS_STATUS1 0x1c
#define TSENSOR_SENSOR0_TS_STATUS1_CURRENT_COUNT GENMASK(31, 16)
#define TEGRA30_FUSE_TEST_PROG_VER 0x28
#define TEGRA30_FUSE_TSENSOR_CALIB 0x98
#define TEGRA30_FUSE_TSENSOR_CALIB_LOW GENMASK(15, 0)
#define TEGRA30_FUSE_TSENSOR_CALIB_HIGH GENMASK(31, 16)
#define TEGRA30_FUSE_SPARE_BIT 0x144
struct tegra_tsensor;
struct tegra_tsensor_calibration_data {
int a, b, m, n, p, r;
};
struct tegra_tsensor_channel {
void __iomem *regs;
unsigned int id;
struct tegra_tsensor *ts;
struct thermal_zone_device *tzd;
};
struct tegra_tsensor {
void __iomem *regs;
bool swap_channels;
struct clk *clk;
struct device *dev;
struct reset_control *rst;
struct tegra_tsensor_channel ch[2];
struct tegra_tsensor_calibration_data calib;
};
static int tegra_tsensor_hw_enable(const struct tegra_tsensor *ts)
{
u32 val;
int err;
err = reset_control_assert(ts->rst);
if (err) {
dev_err(ts->dev, "failed to assert hardware reset: %d\n", err);
return err;
}
err = clk_prepare_enable(ts->clk);
if (err) {
dev_err(ts->dev, "failed to enable clock: %d\n", err);
return err;
}
fsleep(1000);
err = reset_control_deassert(ts->rst);
if (err) {
dev_err(ts->dev, "failed to deassert hardware reset: %d\n", err);
goto disable_clk;
}
/*
* Sensors are enabled after reset by default, but not gauging
* until clock counter is programmed.
*
* M: number of reference clock pulses after which every
* temperature / voltage measurement is made
*
* N: number of reference clock counts for which the counter runs
*/
val = FIELD_PREP(TSENSOR_SENSOR0_CONFIG0_M, 12500);
val |= FIELD_PREP(TSENSOR_SENSOR0_CONFIG0_N, 255);
/* apply the same configuration to both channels */
writel_relaxed(val, ts->regs + 0x40 + TSENSOR_SENSOR0_CONFIG0);
writel_relaxed(val, ts->regs + 0x80 + TSENSOR_SENSOR0_CONFIG0);
return 0;
disable_clk:
clk_disable_unprepare(ts->clk);
return err;
}
static int tegra_tsensor_hw_disable(const struct tegra_tsensor *ts)
{
int err;
err = reset_control_assert(ts->rst);
if (err) {
dev_err(ts->dev, "failed to assert hardware reset: %d\n", err);
return err;
}
clk_disable_unprepare(ts->clk);
return 0;
}
static void devm_tegra_tsensor_hw_disable(void *data)
{
const struct tegra_tsensor *ts = data;
tegra_tsensor_hw_disable(ts);
}
static int tegra_tsensor_get_temp(struct thermal_zone_device *tz, int *temp)
{
const struct tegra_tsensor_channel *tsc = thermal_zone_device_priv(tz);
const struct tegra_tsensor *ts = tsc->ts;
int err, c1, c2, c3, c4, counter;
u32 val;
/*
* Counter will be invalid if hardware is misprogrammed or not enough
* time passed since the time when sensor was enabled.
*/
err = readl_relaxed_poll_timeout(tsc->regs + TSENSOR_SENSOR0_STATUS0, val,
val & TSENSOR_SENSOR0_STATUS0_CURRENT_VALID,
21 * USEC_PER_MSEC,
21 * USEC_PER_MSEC * 50);
if (err) {
dev_err_once(ts->dev, "ch%u: counter invalid\n", tsc->id);
return err;
}
val = readl_relaxed(tsc->regs + TSENSOR_SENSOR0_TS_STATUS1);
counter = FIELD_GET(TSENSOR_SENSOR0_TS_STATUS1_CURRENT_COUNT, val);
/*
* This shouldn't happen with a valid counter status, nevertheless
* lets verify the value since it's in a separate (from status)
* register.
*/
if (counter == 0xffff) {
dev_err_once(ts->dev, "ch%u: counter overflow\n", tsc->id);
return -EINVAL;
}
/*
* temperature = a * counter + b
* temperature = m * (temperature ^ 2) + n * temperature + p
*/
c1 = DIV_ROUND_CLOSEST(ts->calib.a * counter + ts->calib.b, 1000000);
c1 = c1 ?: 1;
c2 = DIV_ROUND_CLOSEST(ts->calib.p, c1);
c3 = c1 * ts->calib.m;
c4 = ts->calib.n;
*temp = DIV_ROUND_CLOSEST(c1 * (c2 + c3 + c4), 1000);
return 0;
}
static int tegra_tsensor_temp_to_counter(const struct tegra_tsensor *ts, int temp)
{
int c1, c2;
c1 = DIV_ROUND_CLOSEST(ts->calib.p - temp * 1000, ts->calib.m);
c2 = -ts->calib.r - int_sqrt(ts->calib.r * ts->calib.r - c1);
return DIV_ROUND_CLOSEST(c2 * 1000000 - ts->calib.b, ts->calib.a);
}
static int tegra_tsensor_set_trips(struct thermal_zone_device *tz, int low, int high)
{
const struct tegra_tsensor_channel *tsc = thermal_zone_device_priv(tz);
const struct tegra_tsensor *ts = tsc->ts;
u32 val;
/*
* TSENSOR doesn't trigger interrupt on the "low" temperature breach,
* hence bail out if high temperature is unspecified.
*/
if (high == INT_MAX)
return 0;
val = readl_relaxed(tsc->regs + TSENSOR_SENSOR0_CONFIG1);
val &= ~TSENSOR_SENSOR0_CONFIG1_TH1;
high = tegra_tsensor_temp_to_counter(ts, high);
val |= FIELD_PREP(TSENSOR_SENSOR0_CONFIG1_TH1, high);
writel_relaxed(val, tsc->regs + TSENSOR_SENSOR0_CONFIG1);
return 0;
}
static const struct thermal_zone_device_ops ops = {
.get_temp = tegra_tsensor_get_temp,
.set_trips = tegra_tsensor_set_trips,
};
static bool
tegra_tsensor_handle_channel_interrupt(const struct tegra_tsensor *ts,
unsigned int id)
{
const struct tegra_tsensor_channel *tsc = &ts->ch[id];
u32 val;
val = readl_relaxed(tsc->regs + TSENSOR_SENSOR0_STATUS0);
writel_relaxed(val, tsc->regs + TSENSOR_SENSOR0_STATUS0);
if (FIELD_GET(TSENSOR_SENSOR0_STATUS0_STATE, val) == 5)
dev_err_ratelimited(ts->dev, "ch%u: counter overflowed\n", id);
if (!FIELD_GET(TSENSOR_SENSOR0_STATUS0_INTR, val))
return false;
thermal_zone_device_update(tsc->tzd, THERMAL_EVENT_UNSPECIFIED);
return true;
}
static irqreturn_t tegra_tsensor_isr(int irq, void *data)
{
const struct tegra_tsensor *ts = data;
bool handled = false;
unsigned int i;
for (i = 0; i < ARRAY_SIZE(ts->ch); i++)
handled |= tegra_tsensor_handle_channel_interrupt(ts, i);
return handled ? IRQ_HANDLED : IRQ_NONE;
}
static int tegra_tsensor_disable_hw_channel(const struct tegra_tsensor *ts,
unsigned int id)
{
const struct tegra_tsensor_channel *tsc = &ts->ch[id];
struct thermal_zone_device *tzd = tsc->tzd;
u32 val;
int err;
if (!tzd)
goto stop_channel;
err = thermal_zone_device_disable(tzd);
if (err) {
dev_err(ts->dev, "ch%u: failed to disable zone: %d\n", id, err);
return err;
}
stop_channel:
/* stop channel gracefully */
val = readl_relaxed(tsc->regs + TSENSOR_SENSOR0_CONFIG0);
val |= FIELD_PREP(TSENSOR_SENSOR0_CONFIG0_SENSOR_STOP, 1);
writel_relaxed(val, tsc->regs + TSENSOR_SENSOR0_CONFIG0);
return 0;
}
static void tegra_tsensor_get_hw_channel_trips(struct thermal_zone_device *tzd,
int *hot_trip, int *crit_trip)
{
unsigned int i;
/*
* 90C is the maximal critical temperature of all Tegra30 SoC variants,
* use it for the default trip if unspecified in a device-tree.
*/
*hot_trip = 85000;
*crit_trip = 90000;
for (i = 0; i < thermal_zone_get_num_trips(tzd); i++) {
struct thermal_trip trip;
thermal_zone_get_trip(tzd, i, &trip);
if (trip.type == THERMAL_TRIP_HOT)
*hot_trip = trip.temperature;
if (trip.type == THERMAL_TRIP_CRITICAL)
*crit_trip = trip.temperature;
}
/* clamp hardware trips to the calibration limits */
*hot_trip = clamp(*hot_trip, 25000, 90000);
/*
* Kernel will perform a normal system shut down if it will
* see that critical temperature is breached, hence set the
* hardware limit by 5C higher in order to allow system to
* shut down gracefully before sending signal to the Power
* Management controller.
*/
*crit_trip = clamp(*crit_trip + 5000, 25000, 90000);
}
static int tegra_tsensor_enable_hw_channel(const struct tegra_tsensor *ts,
unsigned int id)
{
const struct tegra_tsensor_channel *tsc = &ts->ch[id];
struct thermal_zone_device *tzd = tsc->tzd;
int err, hot_trip = 0, crit_trip = 0;
u32 val;
if (!tzd) {
val = readl_relaxed(tsc->regs + TSENSOR_SENSOR0_CONFIG0);
val &= ~TSENSOR_SENSOR0_CONFIG0_SENSOR_STOP;
writel_relaxed(val, tsc->regs + TSENSOR_SENSOR0_CONFIG0);
return 0;
}
tegra_tsensor_get_hw_channel_trips(tzd, &hot_trip, &crit_trip);
dev_info_once(ts->dev, "ch%u: PMC emergency shutdown trip set to %dC\n",
id, DIV_ROUND_CLOSEST(crit_trip, 1000));
hot_trip = tegra_tsensor_temp_to_counter(ts, hot_trip);
crit_trip = tegra_tsensor_temp_to_counter(ts, crit_trip);
/* program LEVEL2 counter threshold */
val = readl_relaxed(tsc->regs + TSENSOR_SENSOR0_CONFIG1);
val &= ~TSENSOR_SENSOR0_CONFIG1_TH2;
val |= FIELD_PREP(TSENSOR_SENSOR0_CONFIG1_TH2, hot_trip);
writel_relaxed(val, tsc->regs + TSENSOR_SENSOR0_CONFIG1);
/* program LEVEL3 counter threshold */
val = readl_relaxed(tsc->regs + TSENSOR_SENSOR0_CONFIG2);
val &= ~TSENSOR_SENSOR0_CONFIG2_TH3;
val |= FIELD_PREP(TSENSOR_SENSOR0_CONFIG2_TH3, crit_trip);
writel_relaxed(val, tsc->regs + TSENSOR_SENSOR0_CONFIG2);
/*
* Enable sensor, emergency shutdown, interrupts for level 1/2/3
* breaches and counter overflow condition.
*
* Disable DIV2 throttle for now since we need to figure out how
* to integrate it properly with the thermal framework.
*
* Thermal levels supported by hardware:
*
* Level 0 = cold
* Level 1 = passive cooling (cpufreq DVFS)
* Level 2 = passive cooling assisted by hardware (DIV2)
* Level 3 = emergency shutdown assisted by hardware (PMC)
*/
val = readl_relaxed(tsc->regs + TSENSOR_SENSOR0_CONFIG0);
val &= ~TSENSOR_SENSOR0_CONFIG0_SENSOR_STOP;
val |= FIELD_PREP(TSENSOR_SENSOR0_CONFIG0_DVFS_EN, 1);
val |= FIELD_PREP(TSENSOR_SENSOR0_CONFIG0_HW_FREQ_DIV_EN, 0);
val |= FIELD_PREP(TSENSOR_SENSOR0_CONFIG0_THERMAL_RST_EN, 1);
val |= FIELD_PREP(TSENSOR_SENSOR0_CONFIG0_INTR_OVERFLOW_EN, 1);
val |= FIELD_PREP(TSENSOR_SENSOR0_CONFIG0_INTR_HW_FREQ_DIV_EN, 1);
val |= FIELD_PREP(TSENSOR_SENSOR0_CONFIG0_INTR_THERMAL_RST_EN, 1);
writel_relaxed(val, tsc->regs + TSENSOR_SENSOR0_CONFIG0);
err = thermal_zone_device_enable(tzd);
if (err) {
dev_err(ts->dev, "ch%u: failed to enable zone: %d\n", id, err);
return err;
}
return 0;
}
static bool tegra_tsensor_fuse_read_spare(unsigned int spare)
{
u32 val = 0;
tegra_fuse_readl(TEGRA30_FUSE_SPARE_BIT + spare * 4, &val);
return !!val;
}
static int tegra_tsensor_nvmem_setup(struct tegra_tsensor *ts)
{
u32 i, ate_ver = 0, cal = 0, t1_25C = 0, t2_90C = 0;
int err, c1_25C, c2_90C;
err = tegra_fuse_readl(TEGRA30_FUSE_TEST_PROG_VER, &ate_ver);
if (err) {
dev_err_probe(ts->dev, err, "failed to get ATE version\n");
return err;
}
if (ate_ver < 8) {
dev_info(ts->dev, "unsupported ATE version: %u\n", ate_ver);
return -ENODEV;
}
/*
* We have two TSENSOR channels in a two different spots on SoC.
* Second channel provides more accurate data on older SoC versions,
* use it as a primary channel.
*/
if (ate_ver <= 21) {
dev_info_once(ts->dev,
"older ATE version detected, channels remapped\n");
ts->swap_channels = true;
}
err = tegra_fuse_readl(TEGRA30_FUSE_TSENSOR_CALIB, &cal);
if (err) {
dev_err(ts->dev, "failed to get calibration data: %d\n", err);
return err;
}
/* get calibrated counter values for 25C/90C thresholds */
c1_25C = FIELD_GET(TEGRA30_FUSE_TSENSOR_CALIB_LOW, cal);
c2_90C = FIELD_GET(TEGRA30_FUSE_TSENSOR_CALIB_HIGH, cal);
/* and calibrated temperatures corresponding to the counter values */
for (i = 0; i < 7; i++) {
t1_25C |= tegra_tsensor_fuse_read_spare(14 + i) << i;
t1_25C |= tegra_tsensor_fuse_read_spare(21 + i) << i;
t2_90C |= tegra_tsensor_fuse_read_spare(0 + i) << i;
t2_90C |= tegra_tsensor_fuse_read_spare(7 + i) << i;
}
if (c2_90C - c1_25C <= t2_90C - t1_25C) {
dev_err(ts->dev, "invalid calibration data: %d %d %u %u\n",
c2_90C, c1_25C, t2_90C, t1_25C);
return -EINVAL;
}
/* all calibration coefficients are premultiplied by 1000000 */
ts->calib.a = DIV_ROUND_CLOSEST((t2_90C - t1_25C) * 1000000,
(c2_90C - c1_25C));
ts->calib.b = t1_25C * 1000000 - ts->calib.a * c1_25C;
if (tegra_sku_info.revision == TEGRA_REVISION_A01) {
ts->calib.m = -2775;
ts->calib.n = 1338811;
ts->calib.p = -7300000;
} else {
ts->calib.m = -3512;
ts->calib.n = 1528943;
ts->calib.p = -11100000;
}
/* except the coefficient of a reduced quadratic equation */
ts->calib.r = DIV_ROUND_CLOSEST(ts->calib.n, ts->calib.m * 2);
dev_info_once(ts->dev,
"calibration: %d %d %u %u ATE ver: %u SoC rev: %u\n",
c2_90C, c1_25C, t2_90C, t1_25C, ate_ver,
tegra_sku_info.revision);
return 0;
}
static int tegra_tsensor_register_channel(struct tegra_tsensor *ts,
unsigned int id)
{
struct tegra_tsensor_channel *tsc = &ts->ch[id];
unsigned int hw_id = ts->swap_channels ? !id : id;
tsc->ts = ts;
tsc->id = id;
tsc->regs = ts->regs + 0x40 * (hw_id + 1);
tsc->tzd = devm_thermal_of_zone_register(ts->dev, id, tsc, &ops);
if (IS_ERR(tsc->tzd)) {
if (PTR_ERR(tsc->tzd) != -ENODEV)
return dev_err_probe(ts->dev, PTR_ERR(tsc->tzd),
"failed to register thermal zone\n");
/*
* It's okay if sensor isn't assigned to any thermal zone
* in a device-tree.
*/
tsc->tzd = NULL;
return 0;
}
devm_thermal_add_hwmon_sysfs(ts->dev, tsc->tzd);
return 0;
}
static int tegra_tsensor_probe(struct platform_device *pdev)
{
struct tegra_tsensor *ts;
unsigned int i;
int err, irq;
ts = devm_kzalloc(&pdev->dev, sizeof(*ts), GFP_KERNEL);
if (!ts)
return -ENOMEM;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
ts->dev = &pdev->dev;
platform_set_drvdata(pdev, ts);
ts->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ts->regs))
return PTR_ERR(ts->regs);
ts->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(ts->clk))
return dev_err_probe(&pdev->dev, PTR_ERR(ts->clk),
"failed to get clock\n");
ts->rst = devm_reset_control_get_exclusive(&pdev->dev, NULL);
if (IS_ERR(ts->rst))
return dev_err_probe(&pdev->dev, PTR_ERR(ts->rst),
"failed to get reset control\n");
err = tegra_tsensor_nvmem_setup(ts);
if (err)
return err;
err = tegra_tsensor_hw_enable(ts);
if (err)
return err;
err = devm_add_action_or_reset(&pdev->dev,
devm_tegra_tsensor_hw_disable,
ts);
if (err)
return err;
for (i = 0; i < ARRAY_SIZE(ts->ch); i++) {
err = tegra_tsensor_register_channel(ts, i);
if (err)
return err;
}
/*
* Enable the channels before setting the interrupt so
* set_trips() can not be called while we are setting up the
* register TSENSOR_SENSOR0_CONFIG1. With this we close a
* potential race window where we are setting up the TH2 and
* the temperature hits TH1 resulting to an update of the
* TSENSOR_SENSOR0_CONFIG1 register in the ISR.
*/
for (i = 0; i < ARRAY_SIZE(ts->ch); i++) {
err = tegra_tsensor_enable_hw_channel(ts, i);
if (err)
return err;
}
err = devm_request_threaded_irq(&pdev->dev, irq, NULL,
tegra_tsensor_isr, IRQF_ONESHOT,
"tegra_tsensor", ts);
if (err)
return dev_err_probe(&pdev->dev, err,
"failed to request interrupt\n");
return 0;
}
static int __maybe_unused tegra_tsensor_suspend(struct device *dev)
{
struct tegra_tsensor *ts = dev_get_drvdata(dev);
unsigned int i;
int err;
for (i = 0; i < ARRAY_SIZE(ts->ch); i++) {
err = tegra_tsensor_disable_hw_channel(ts, i);
if (err)
goto enable_channel;
}
err = tegra_tsensor_hw_disable(ts);
if (err)
goto enable_channel;
return 0;
enable_channel:
while (i--)
tegra_tsensor_enable_hw_channel(ts, i);
return err;
}
static int __maybe_unused tegra_tsensor_resume(struct device *dev)
{
struct tegra_tsensor *ts = dev_get_drvdata(dev);
unsigned int i;
int err;
err = tegra_tsensor_hw_enable(ts);
if (err)
return err;
for (i = 0; i < ARRAY_SIZE(ts->ch); i++) {
err = tegra_tsensor_enable_hw_channel(ts, i);
if (err)
return err;
}
return 0;
}
static const struct dev_pm_ops tegra_tsensor_pm_ops = {
SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(tegra_tsensor_suspend,
tegra_tsensor_resume)
};
static const struct of_device_id tegra_tsensor_of_match[] = {
{ .compatible = "nvidia,tegra30-tsensor", },
{},
};
MODULE_DEVICE_TABLE(of, tegra_tsensor_of_match);
static struct platform_driver tegra_tsensor_driver = {
.probe = tegra_tsensor_probe,
.driver = {
.name = "tegra30-tsensor",
.of_match_table = tegra_tsensor_of_match,
.pm = &tegra_tsensor_pm_ops,
},
};
module_platform_driver(tegra_tsensor_driver);
MODULE_DESCRIPTION("NVIDIA Tegra30 Thermal Sensor driver");
MODULE_AUTHOR("Dmitry Osipenko <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/thermal/tegra/tegra30-tsensor.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) STMicroelectronics 2018 - All Rights Reserved
* Author: David Hernandez Sanchez <[email protected]> for
* STMicroelectronics.
*/
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/thermal.h>
#include "../thermal_hwmon.h"
/* DTS register offsets */
#define DTS_CFGR1_OFFSET 0x0
#define DTS_T0VALR1_OFFSET 0x8
#define DTS_RAMPVALR_OFFSET 0X10
#define DTS_ITR1_OFFSET 0x14
#define DTS_DR_OFFSET 0x1C
#define DTS_SR_OFFSET 0x20
#define DTS_ITENR_OFFSET 0x24
#define DTS_ICIFR_OFFSET 0x28
/* DTS_CFGR1 register mask definitions */
#define HSREF_CLK_DIV_MASK GENMASK(30, 24)
#define TS1_SMP_TIME_MASK GENMASK(19, 16)
#define TS1_INTRIG_SEL_MASK GENMASK(11, 8)
/* DTS_T0VALR1 register mask definitions */
#define TS1_T0_MASK GENMASK(17, 16)
#define TS1_FMT0_MASK GENMASK(15, 0)
/* DTS_RAMPVALR register mask definitions */
#define TS1_RAMP_COEFF_MASK GENMASK(15, 0)
/* DTS_ITR1 register mask definitions */
#define TS1_HITTHD_MASK GENMASK(31, 16)
#define TS1_LITTHD_MASK GENMASK(15, 0)
/* DTS_DR register mask definitions */
#define TS1_MFREQ_MASK GENMASK(15, 0)
/* DTS_ITENR register mask definitions */
#define ITENR_MASK (GENMASK(2, 0) | GENMASK(6, 4))
/* DTS_ICIFR register mask definitions */
#define ICIFR_MASK (GENMASK(2, 0) | GENMASK(6, 4))
/* Less significant bit position definitions */
#define TS1_T0_POS 16
#define TS1_HITTHD_POS 16
#define TS1_LITTHD_POS 0
#define HSREF_CLK_DIV_POS 24
/* DTS_CFGR1 bit definitions */
#define TS1_EN BIT(0)
#define TS1_START BIT(4)
#define REFCLK_SEL BIT(20)
#define REFCLK_LSE REFCLK_SEL
#define Q_MEAS_OPT BIT(21)
#define CALIBRATION_CONTROL Q_MEAS_OPT
/* DTS_SR bit definitions */
#define TS_RDY BIT(15)
/* Bit definitions below are common for DTS_SR, DTS_ITENR and DTS_CIFR */
#define HIGH_THRESHOLD BIT(2)
#define LOW_THRESHOLD BIT(1)
/* Constants */
#define ADJUST 100
#define ONE_MHZ 1000000
#define POLL_TIMEOUT 5000
#define STARTUP_TIME 40
#define TS1_T0_VAL0 30000 /* 30 celsius */
#define TS1_T0_VAL1 130000 /* 130 celsius */
#define NO_HW_TRIG 0
#define SAMPLING_TIME 15
struct stm_thermal_sensor {
struct device *dev;
struct thermal_zone_device *th_dev;
enum thermal_device_mode mode;
struct clk *clk;
unsigned int low_temp_enabled;
unsigned int high_temp_enabled;
int irq;
void __iomem *base;
int t0, fmt0, ramp_coeff;
};
static int stm_enable_irq(struct stm_thermal_sensor *sensor)
{
u32 value;
dev_dbg(sensor->dev, "low:%d high:%d\n", sensor->low_temp_enabled,
sensor->high_temp_enabled);
/* Disable IT generation for low and high thresholds */
value = readl_relaxed(sensor->base + DTS_ITENR_OFFSET);
value &= ~(LOW_THRESHOLD | HIGH_THRESHOLD);
if (sensor->low_temp_enabled)
value |= HIGH_THRESHOLD;
if (sensor->high_temp_enabled)
value |= LOW_THRESHOLD;
/* Enable interrupts */
writel_relaxed(value, sensor->base + DTS_ITENR_OFFSET);
return 0;
}
static irqreturn_t stm_thermal_irq_handler(int irq, void *sdata)
{
struct stm_thermal_sensor *sensor = sdata;
dev_dbg(sensor->dev, "sr:%d\n",
readl_relaxed(sensor->base + DTS_SR_OFFSET));
thermal_zone_device_update(sensor->th_dev, THERMAL_EVENT_UNSPECIFIED);
stm_enable_irq(sensor);
/* Acknoledge all DTS irqs */
writel_relaxed(ICIFR_MASK, sensor->base + DTS_ICIFR_OFFSET);
return IRQ_HANDLED;
}
static int stm_sensor_power_on(struct stm_thermal_sensor *sensor)
{
int ret;
u32 value;
/* Enable sensor */
value = readl_relaxed(sensor->base + DTS_CFGR1_OFFSET);
value |= TS1_EN;
writel_relaxed(value, sensor->base + DTS_CFGR1_OFFSET);
/*
* The DTS block can be enabled by setting TSx_EN bit in
* DTS_CFGRx register. It requires a startup time of
* 40μs. Use 5 ms as arbitrary timeout.
*/
ret = readl_poll_timeout(sensor->base + DTS_SR_OFFSET,
value, (value & TS_RDY),
STARTUP_TIME, POLL_TIMEOUT);
if (ret)
return ret;
/* Start continuous measuring */
value = readl_relaxed(sensor->base +
DTS_CFGR1_OFFSET);
value |= TS1_START;
writel_relaxed(value, sensor->base +
DTS_CFGR1_OFFSET);
sensor->mode = THERMAL_DEVICE_ENABLED;
return 0;
}
static int stm_sensor_power_off(struct stm_thermal_sensor *sensor)
{
u32 value;
sensor->mode = THERMAL_DEVICE_DISABLED;
/* Stop measuring */
value = readl_relaxed(sensor->base + DTS_CFGR1_OFFSET);
value &= ~TS1_START;
writel_relaxed(value, sensor->base + DTS_CFGR1_OFFSET);
/* Ensure stop is taken into account */
usleep_range(STARTUP_TIME, POLL_TIMEOUT);
/* Disable sensor */
value = readl_relaxed(sensor->base + DTS_CFGR1_OFFSET);
value &= ~TS1_EN;
writel_relaxed(value, sensor->base + DTS_CFGR1_OFFSET);
/* Ensure disable is taken into account */
return readl_poll_timeout(sensor->base + DTS_SR_OFFSET, value,
!(value & TS_RDY),
STARTUP_TIME, POLL_TIMEOUT);
}
static int stm_thermal_calibration(struct stm_thermal_sensor *sensor)
{
u32 value, clk_freq;
u32 prescaler;
/* Figure out prescaler value for PCLK during calibration */
clk_freq = clk_get_rate(sensor->clk);
if (!clk_freq)
return -EINVAL;
prescaler = 0;
clk_freq /= ONE_MHZ;
if (clk_freq) {
while (prescaler <= clk_freq)
prescaler++;
}
value = readl_relaxed(sensor->base + DTS_CFGR1_OFFSET);
/* Clear prescaler */
value &= ~HSREF_CLK_DIV_MASK;
/* Set prescaler. pclk_freq/prescaler < 1MHz */
value |= (prescaler << HSREF_CLK_DIV_POS);
/* Select PCLK as reference clock */
value &= ~REFCLK_SEL;
/* Set maximal sampling time for better precision */
value |= TS1_SMP_TIME_MASK;
/* Measure with calibration */
value &= ~CALIBRATION_CONTROL;
/* select trigger */
value &= ~TS1_INTRIG_SEL_MASK;
value |= NO_HW_TRIG;
writel_relaxed(value, sensor->base + DTS_CFGR1_OFFSET);
return 0;
}
/* Fill in DTS structure with factory sensor values */
static int stm_thermal_read_factory_settings(struct stm_thermal_sensor *sensor)
{
/* Retrieve engineering calibration temperature */
sensor->t0 = readl_relaxed(sensor->base + DTS_T0VALR1_OFFSET) &
TS1_T0_MASK;
if (!sensor->t0)
sensor->t0 = TS1_T0_VAL0;
else
sensor->t0 = TS1_T0_VAL1;
/* Retrieve fmt0 and put it on Hz */
sensor->fmt0 = ADJUST * (readl_relaxed(sensor->base +
DTS_T0VALR1_OFFSET) & TS1_FMT0_MASK);
/* Retrieve ramp coefficient */
sensor->ramp_coeff = readl_relaxed(sensor->base + DTS_RAMPVALR_OFFSET) &
TS1_RAMP_COEFF_MASK;
if (!sensor->fmt0 || !sensor->ramp_coeff) {
dev_err(sensor->dev, "%s: wrong setting\n", __func__);
return -EINVAL;
}
dev_dbg(sensor->dev, "%s: T0 = %doC, FMT0 = %dHz, RAMP_COEFF = %dHz/oC",
__func__, sensor->t0, sensor->fmt0, sensor->ramp_coeff);
return 0;
}
static int stm_thermal_calculate_threshold(struct stm_thermal_sensor *sensor,
int temp, u32 *th)
{
int freqM;
/* Figure out the CLK_PTAT frequency for a given temperature */
freqM = ((temp - sensor->t0) * sensor->ramp_coeff) / 1000 +
sensor->fmt0;
/* Figure out the threshold sample number */
*th = clk_get_rate(sensor->clk) * SAMPLING_TIME / freqM;
if (!*th)
return -EINVAL;
dev_dbg(sensor->dev, "freqM=%d Hz, threshold=0x%x", freqM, *th);
return 0;
}
/* Disable temperature interrupt */
static int stm_disable_irq(struct stm_thermal_sensor *sensor)
{
u32 value;
/* Disable IT generation */
value = readl_relaxed(sensor->base + DTS_ITENR_OFFSET);
value &= ~ITENR_MASK;
writel_relaxed(value, sensor->base + DTS_ITENR_OFFSET);
return 0;
}
static int stm_thermal_set_trips(struct thermal_zone_device *tz, int low, int high)
{
struct stm_thermal_sensor *sensor = thermal_zone_device_priv(tz);
u32 itr1, th;
int ret;
dev_dbg(sensor->dev, "set trips %d <--> %d\n", low, high);
/* Erase threshold content */
itr1 = readl_relaxed(sensor->base + DTS_ITR1_OFFSET);
itr1 &= ~(TS1_LITTHD_MASK | TS1_HITTHD_MASK);
/*
* Disable low-temp if "low" is too small. As per thermal framework
* API, we use -INT_MAX rather than INT_MIN.
*/
if (low > -INT_MAX) {
sensor->low_temp_enabled = 1;
/* add 0.5 of hysteresis due to measurement error */
ret = stm_thermal_calculate_threshold(sensor, low - 500, &th);
if (ret)
return ret;
itr1 |= (TS1_HITTHD_MASK & (th << TS1_HITTHD_POS));
} else {
sensor->low_temp_enabled = 0;
}
/* Disable high-temp if "high" is too big. */
if (high < INT_MAX) {
sensor->high_temp_enabled = 1;
ret = stm_thermal_calculate_threshold(sensor, high, &th);
if (ret)
return ret;
itr1 |= (TS1_LITTHD_MASK & (th << TS1_LITTHD_POS));
} else {
sensor->high_temp_enabled = 0;
}
/* Write new threshod values*/
writel_relaxed(itr1, sensor->base + DTS_ITR1_OFFSET);
return 0;
}
/* Callback to get temperature from HW */
static int stm_thermal_get_temp(struct thermal_zone_device *tz, int *temp)
{
struct stm_thermal_sensor *sensor = thermal_zone_device_priv(tz);
u32 periods;
int freqM, ret;
if (sensor->mode != THERMAL_DEVICE_ENABLED)
return -EAGAIN;
/* Retrieve the number of periods sampled */
ret = readl_relaxed_poll_timeout(sensor->base + DTS_DR_OFFSET, periods,
(periods & TS1_MFREQ_MASK),
STARTUP_TIME, POLL_TIMEOUT);
if (ret)
return ret;
/* Figure out the CLK_PTAT frequency */
freqM = (clk_get_rate(sensor->clk) * SAMPLING_TIME) / periods;
if (!freqM)
return -EINVAL;
/* Figure out the temperature in mili celsius */
*temp = (freqM - sensor->fmt0) * 1000 / sensor->ramp_coeff + sensor->t0;
return 0;
}
/* Registers DTS irq to be visible by GIC */
static int stm_register_irq(struct stm_thermal_sensor *sensor)
{
struct device *dev = sensor->dev;
struct platform_device *pdev = to_platform_device(dev);
int ret;
sensor->irq = platform_get_irq(pdev, 0);
if (sensor->irq < 0)
return sensor->irq;
ret = devm_request_threaded_irq(dev, sensor->irq,
NULL,
stm_thermal_irq_handler,
IRQF_ONESHOT,
dev->driver->name, sensor);
if (ret) {
dev_err(dev, "%s: Failed to register IRQ %d\n", __func__,
sensor->irq);
return ret;
}
dev_dbg(dev, "%s: thermal IRQ registered", __func__);
return 0;
}
static int stm_thermal_sensor_off(struct stm_thermal_sensor *sensor)
{
int ret;
stm_disable_irq(sensor);
ret = stm_sensor_power_off(sensor);
if (ret)
return ret;
clk_disable_unprepare(sensor->clk);
return 0;
}
static int stm_thermal_prepare(struct stm_thermal_sensor *sensor)
{
int ret;
ret = clk_prepare_enable(sensor->clk);
if (ret)
return ret;
ret = stm_thermal_read_factory_settings(sensor);
if (ret)
goto thermal_unprepare;
ret = stm_thermal_calibration(sensor);
if (ret)
goto thermal_unprepare;
return 0;
thermal_unprepare:
clk_disable_unprepare(sensor->clk);
return ret;
}
#ifdef CONFIG_PM_SLEEP
static int stm_thermal_suspend(struct device *dev)
{
struct stm_thermal_sensor *sensor = dev_get_drvdata(dev);
return stm_thermal_sensor_off(sensor);
}
static int stm_thermal_resume(struct device *dev)
{
int ret;
struct stm_thermal_sensor *sensor = dev_get_drvdata(dev);
ret = stm_thermal_prepare(sensor);
if (ret)
return ret;
ret = stm_sensor_power_on(sensor);
if (ret)
return ret;
thermal_zone_device_update(sensor->th_dev, THERMAL_EVENT_UNSPECIFIED);
stm_enable_irq(sensor);
return 0;
}
#endif /* CONFIG_PM_SLEEP */
static SIMPLE_DEV_PM_OPS(stm_thermal_pm_ops,
stm_thermal_suspend, stm_thermal_resume);
static const struct thermal_zone_device_ops stm_tz_ops = {
.get_temp = stm_thermal_get_temp,
.set_trips = stm_thermal_set_trips,
};
static const struct of_device_id stm_thermal_of_match[] = {
{ .compatible = "st,stm32-thermal"},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, stm_thermal_of_match);
static int stm_thermal_probe(struct platform_device *pdev)
{
struct stm_thermal_sensor *sensor;
void __iomem *base;
int ret;
if (!pdev->dev.of_node) {
dev_err(&pdev->dev, "%s: device tree node not found\n",
__func__);
return -EINVAL;
}
sensor = devm_kzalloc(&pdev->dev, sizeof(*sensor), GFP_KERNEL);
if (!sensor)
return -ENOMEM;
platform_set_drvdata(pdev, sensor);
sensor->dev = &pdev->dev;
base = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
if (IS_ERR(base))
return PTR_ERR(base);
/* Populate sensor */
sensor->base = base;
sensor->clk = devm_clk_get(&pdev->dev, "pclk");
if (IS_ERR(sensor->clk)) {
dev_err(&pdev->dev, "%s: failed to fetch PCLK clock\n",
__func__);
return PTR_ERR(sensor->clk);
}
stm_disable_irq(sensor);
/* Clear irq flags */
writel_relaxed(ICIFR_MASK, sensor->base + DTS_ICIFR_OFFSET);
/* Configure and enable HW sensor */
ret = stm_thermal_prepare(sensor);
if (ret) {
dev_err(&pdev->dev, "Error prepare sensor: %d\n", ret);
return ret;
}
ret = stm_sensor_power_on(sensor);
if (ret) {
dev_err(&pdev->dev, "Error power on sensor: %d\n", ret);
return ret;
}
sensor->th_dev = devm_thermal_of_zone_register(&pdev->dev, 0,
sensor,
&stm_tz_ops);
if (IS_ERR(sensor->th_dev)) {
dev_err(&pdev->dev, "%s: thermal zone sensor registering KO\n",
__func__);
ret = PTR_ERR(sensor->th_dev);
return ret;
}
/* Register IRQ into GIC */
ret = stm_register_irq(sensor);
if (ret)
goto err_tz;
stm_enable_irq(sensor);
/*
* Thermal_zone doesn't enable hwmon as default,
* enable it here
*/
ret = thermal_add_hwmon_sysfs(sensor->th_dev);
if (ret)
goto err_tz;
dev_info(&pdev->dev, "%s: Driver initialized successfully\n",
__func__);
return 0;
err_tz:
return ret;
}
static int stm_thermal_remove(struct platform_device *pdev)
{
struct stm_thermal_sensor *sensor = platform_get_drvdata(pdev);
stm_thermal_sensor_off(sensor);
thermal_remove_hwmon_sysfs(sensor->th_dev);
return 0;
}
static struct platform_driver stm_thermal_driver = {
.driver = {
.name = "stm_thermal",
.pm = &stm_thermal_pm_ops,
.of_match_table = stm_thermal_of_match,
},
.probe = stm_thermal_probe,
.remove = stm_thermal_remove,
};
module_platform_driver(stm_thermal_driver);
MODULE_DESCRIPTION("STMicroelectronics STM32 Thermal Sensor Driver");
MODULE_AUTHOR("David Hernandez Sanchez <[email protected]>");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:stm_thermal");
| linux-master | drivers/thermal/st/stm_thermal.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* ST Thermal Sensor Driver for memory mapped sensors.
* Author: Ajit Pal Singh <[email protected]>
*
* Copyright (C) 2003-2014 STMicroelectronics (R&D) Limited
*/
#include <linux/of.h>
#include <linux/module.h>
#include "st_thermal.h"
#define STIH416_MPE_CONF 0x0
#define STIH416_MPE_STATUS 0x4
#define STIH416_MPE_INT_THRESH 0x8
#define STIH416_MPE_INT_EN 0xC
/* Power control bits for the memory mapped thermal sensor */
#define THERMAL_PDN BIT(4)
#define THERMAL_SRSTN BIT(10)
static const struct reg_field st_mmap_thermal_regfields[MAX_REGFIELDS] = {
/*
* According to the STIH416 MPE temp sensor data sheet -
* the PDN (Power Down Bit) and SRSTN (Soft Reset Bit) need to be
* written simultaneously for powering on and off the temperature
* sensor. regmap_update_bits() will be used to update the register.
*/
[INT_THRESH_HI] = REG_FIELD(STIH416_MPE_INT_THRESH, 0, 7),
[DCORRECT] = REG_FIELD(STIH416_MPE_CONF, 5, 9),
[OVERFLOW] = REG_FIELD(STIH416_MPE_STATUS, 9, 9),
[DATA] = REG_FIELD(STIH416_MPE_STATUS, 11, 18),
[INT_ENABLE] = REG_FIELD(STIH416_MPE_INT_EN, 0, 0),
};
static irqreturn_t st_mmap_thermal_trip_handler(int irq, void *sdata)
{
struct st_thermal_sensor *sensor = sdata;
thermal_zone_device_update(sensor->thermal_dev,
THERMAL_EVENT_UNSPECIFIED);
return IRQ_HANDLED;
}
/* Private ops for the Memory Mapped based thermal sensors */
static int st_mmap_power_ctrl(struct st_thermal_sensor *sensor,
enum st_thermal_power_state power_state)
{
const unsigned int mask = (THERMAL_PDN | THERMAL_SRSTN);
const unsigned int val = power_state ? mask : 0;
return regmap_update_bits(sensor->regmap, STIH416_MPE_CONF, mask, val);
}
static int st_mmap_alloc_regfields(struct st_thermal_sensor *sensor)
{
struct device *dev = sensor->dev;
struct regmap *regmap = sensor->regmap;
const struct reg_field *reg_fields = sensor->cdata->reg_fields;
sensor->int_thresh_hi = devm_regmap_field_alloc(dev, regmap,
reg_fields[INT_THRESH_HI]);
sensor->int_enable = devm_regmap_field_alloc(dev, regmap,
reg_fields[INT_ENABLE]);
if (IS_ERR(sensor->int_thresh_hi) || IS_ERR(sensor->int_enable)) {
dev_err(dev, "failed to alloc mmap regfields\n");
return -EINVAL;
}
return 0;
}
static int st_mmap_enable_irq(struct st_thermal_sensor *sensor)
{
int ret;
/* Set upper critical threshold */
ret = regmap_field_write(sensor->int_thresh_hi,
sensor->cdata->crit_temp -
sensor->cdata->temp_adjust_val);
if (ret)
return ret;
return regmap_field_write(sensor->int_enable, 1);
}
static int st_mmap_register_enable_irq(struct st_thermal_sensor *sensor)
{
struct device *dev = sensor->dev;
struct platform_device *pdev = to_platform_device(dev);
int ret;
sensor->irq = platform_get_irq(pdev, 0);
if (sensor->irq < 0)
return sensor->irq;
ret = devm_request_threaded_irq(dev, sensor->irq,
NULL, st_mmap_thermal_trip_handler,
IRQF_TRIGGER_RISING | IRQF_ONESHOT,
dev->driver->name, sensor);
if (ret) {
dev_err(dev, "failed to register IRQ %d\n", sensor->irq);
return ret;
}
return st_mmap_enable_irq(sensor);
}
static const struct regmap_config st_416mpe_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
};
static int st_mmap_regmap_init(struct st_thermal_sensor *sensor)
{
struct device *dev = sensor->dev;
struct platform_device *pdev = to_platform_device(dev);
sensor->mmio_base = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
if (IS_ERR(sensor->mmio_base))
return PTR_ERR(sensor->mmio_base);
sensor->regmap = devm_regmap_init_mmio(dev, sensor->mmio_base,
&st_416mpe_regmap_config);
if (IS_ERR(sensor->regmap)) {
dev_err(dev, "failed to initialise regmap\n");
return PTR_ERR(sensor->regmap);
}
return 0;
}
static const struct st_thermal_sensor_ops st_mmap_sensor_ops = {
.power_ctrl = st_mmap_power_ctrl,
.alloc_regfields = st_mmap_alloc_regfields,
.regmap_init = st_mmap_regmap_init,
.register_enable_irq = st_mmap_register_enable_irq,
.enable_irq = st_mmap_enable_irq,
};
/* Compatible device data stih416 mpe thermal sensor */
static const struct st_thermal_compat_data st_416mpe_cdata = {
.reg_fields = st_mmap_thermal_regfields,
.ops = &st_mmap_sensor_ops,
.calibration_val = 14,
.temp_adjust_val = -95,
.crit_temp = 120,
};
/* Compatible device data stih407 thermal sensor */
static const struct st_thermal_compat_data st_407_cdata = {
.reg_fields = st_mmap_thermal_regfields,
.ops = &st_mmap_sensor_ops,
.calibration_val = 16,
.temp_adjust_val = -95,
.crit_temp = 120,
};
static const struct of_device_id st_mmap_thermal_of_match[] = {
{ .compatible = "st,stih416-mpe-thermal", .data = &st_416mpe_cdata },
{ .compatible = "st,stih407-thermal", .data = &st_407_cdata },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, st_mmap_thermal_of_match);
static int st_mmap_probe(struct platform_device *pdev)
{
return st_thermal_register(pdev, st_mmap_thermal_of_match);
}
static void st_mmap_remove(struct platform_device *pdev)
{
st_thermal_unregister(pdev);
}
static struct platform_driver st_mmap_thermal_driver = {
.driver = {
.name = "st_thermal_mmap",
.pm = &st_thermal_pm_ops,
.of_match_table = st_mmap_thermal_of_match,
},
.probe = st_mmap_probe,
.remove_new = st_mmap_remove,
};
module_platform_driver(st_mmap_thermal_driver);
MODULE_AUTHOR("STMicroelectronics (R&D) Limited <[email protected]>");
MODULE_DESCRIPTION("STMicroelectronics STi SoC Thermal Sensor Driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/thermal/st/st_thermal_memmap.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* ST Thermal Sensor Driver core routines
* Author: Ajit Pal Singh <[email protected]>
*
* Copyright (C) 2003-2014 STMicroelectronics (R&D) Limited
*/
#include <linux/clk.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include "st_thermal.h"
/* The Thermal Framework expects millidegrees */
#define mcelsius(temp) ((temp) * 1000)
/*
* Function to allocate regfields which are common
* between syscfg and memory mapped based sensors
*/
static int st_thermal_alloc_regfields(struct st_thermal_sensor *sensor)
{
struct device *dev = sensor->dev;
struct regmap *regmap = sensor->regmap;
const struct reg_field *reg_fields = sensor->cdata->reg_fields;
sensor->dcorrect = devm_regmap_field_alloc(dev, regmap,
reg_fields[DCORRECT]);
sensor->overflow = devm_regmap_field_alloc(dev, regmap,
reg_fields[OVERFLOW]);
sensor->temp_data = devm_regmap_field_alloc(dev, regmap,
reg_fields[DATA]);
if (IS_ERR(sensor->dcorrect) ||
IS_ERR(sensor->overflow) ||
IS_ERR(sensor->temp_data)) {
dev_err(dev, "failed to allocate common regfields\n");
return -EINVAL;
}
return sensor->ops->alloc_regfields(sensor);
}
static int st_thermal_sensor_on(struct st_thermal_sensor *sensor)
{
int ret;
struct device *dev = sensor->dev;
ret = clk_prepare_enable(sensor->clk);
if (ret) {
dev_err(dev, "failed to enable clk\n");
return ret;
}
ret = sensor->ops->power_ctrl(sensor, POWER_ON);
if (ret) {
dev_err(dev, "failed to power on sensor\n");
clk_disable_unprepare(sensor->clk);
}
return ret;
}
static int st_thermal_sensor_off(struct st_thermal_sensor *sensor)
{
int ret;
ret = sensor->ops->power_ctrl(sensor, POWER_OFF);
if (ret)
return ret;
clk_disable_unprepare(sensor->clk);
return 0;
}
static int st_thermal_calibration(struct st_thermal_sensor *sensor)
{
int ret;
unsigned int val;
struct device *dev = sensor->dev;
/* Check if sensor calibration data is already written */
ret = regmap_field_read(sensor->dcorrect, &val);
if (ret) {
dev_err(dev, "failed to read calibration data\n");
return ret;
}
if (!val) {
/*
* Sensor calibration value not set by bootloader,
* default calibration data to be used
*/
ret = regmap_field_write(sensor->dcorrect,
sensor->cdata->calibration_val);
if (ret)
dev_err(dev, "failed to set calibration data\n");
}
return ret;
}
/* Callback to get temperature from HW*/
static int st_thermal_get_temp(struct thermal_zone_device *th, int *temperature)
{
struct st_thermal_sensor *sensor = thermal_zone_device_priv(th);
unsigned int temp;
unsigned int overflow;
int ret;
ret = regmap_field_read(sensor->overflow, &overflow);
if (ret)
return ret;
if (overflow)
return -EIO;
ret = regmap_field_read(sensor->temp_data, &temp);
if (ret)
return ret;
temp += sensor->cdata->temp_adjust_val;
temp = mcelsius(temp);
*temperature = temp;
return 0;
}
static struct thermal_zone_device_ops st_tz_ops = {
.get_temp = st_thermal_get_temp,
};
static struct thermal_trip trip;
int st_thermal_register(struct platform_device *pdev,
const struct of_device_id *st_thermal_of_match)
{
struct st_thermal_sensor *sensor;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
const struct of_device_id *match;
int polling_delay;
int ret;
if (!np) {
dev_err(dev, "device tree node not found\n");
return -EINVAL;
}
sensor = devm_kzalloc(dev, sizeof(*sensor), GFP_KERNEL);
if (!sensor)
return -ENOMEM;
sensor->dev = dev;
match = of_match_device(st_thermal_of_match, dev);
if (!(match && match->data))
return -EINVAL;
sensor->cdata = match->data;
if (!sensor->cdata->ops)
return -EINVAL;
sensor->ops = sensor->cdata->ops;
ret = (sensor->ops->regmap_init)(sensor);
if (ret)
return ret;
ret = st_thermal_alloc_regfields(sensor);
if (ret)
return ret;
sensor->clk = devm_clk_get(dev, "thermal");
if (IS_ERR(sensor->clk)) {
dev_err(dev, "failed to fetch clock\n");
return PTR_ERR(sensor->clk);
}
if (sensor->ops->register_enable_irq) {
ret = sensor->ops->register_enable_irq(sensor);
if (ret)
return ret;
}
ret = st_thermal_sensor_on(sensor);
if (ret)
return ret;
ret = st_thermal_calibration(sensor);
if (ret)
goto sensor_off;
polling_delay = sensor->ops->register_enable_irq ? 0 : 1000;
trip.temperature = sensor->cdata->crit_temp;
trip.type = THERMAL_TRIP_CRITICAL;
sensor->thermal_dev =
thermal_zone_device_register_with_trips(dev_name(dev), &trip, 1, 0, sensor,
&st_tz_ops, NULL, 0, polling_delay);
if (IS_ERR(sensor->thermal_dev)) {
dev_err(dev, "failed to register thermal zone device\n");
ret = PTR_ERR(sensor->thermal_dev);
goto sensor_off;
}
ret = thermal_zone_device_enable(sensor->thermal_dev);
if (ret)
goto tzd_unregister;
platform_set_drvdata(pdev, sensor);
return 0;
tzd_unregister:
thermal_zone_device_unregister(sensor->thermal_dev);
sensor_off:
st_thermal_sensor_off(sensor);
return ret;
}
EXPORT_SYMBOL_GPL(st_thermal_register);
void st_thermal_unregister(struct platform_device *pdev)
{
struct st_thermal_sensor *sensor = platform_get_drvdata(pdev);
st_thermal_sensor_off(sensor);
thermal_zone_device_unregister(sensor->thermal_dev);
}
EXPORT_SYMBOL_GPL(st_thermal_unregister);
#ifdef CONFIG_PM_SLEEP
static int st_thermal_suspend(struct device *dev)
{
struct st_thermal_sensor *sensor = dev_get_drvdata(dev);
return st_thermal_sensor_off(sensor);
}
static int st_thermal_resume(struct device *dev)
{
int ret;
struct st_thermal_sensor *sensor = dev_get_drvdata(dev);
ret = st_thermal_sensor_on(sensor);
if (ret)
return ret;
ret = st_thermal_calibration(sensor);
if (ret)
return ret;
if (sensor->ops->enable_irq) {
ret = sensor->ops->enable_irq(sensor);
if (ret)
return ret;
}
return 0;
}
#endif
SIMPLE_DEV_PM_OPS(st_thermal_pm_ops, st_thermal_suspend, st_thermal_resume);
EXPORT_SYMBOL_GPL(st_thermal_pm_ops);
MODULE_AUTHOR("STMicroelectronics (R&D) Limited <[email protected]>");
MODULE_DESCRIPTION("STMicroelectronics STi SoC Thermal Sensor Driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/thermal/st/st_thermal.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2014-2021 Broadcom
*/
#include <linux/init.h>
#include <linux/types.h>
#include <linux/module.h>
#include <linux/panic_notifier.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/sysfs.h>
#include <linux/io.h>
#include <linux/string.h>
#include <linux/device.h>
#include <linux/list.h>
#include <linux/of.h>
#include <linux/bitops.h>
#include <linux/pm.h>
#include <linux/kernel.h>
#include <linux/kdebug.h>
#include <linux/notifier.h>
#ifdef CONFIG_MIPS
#include <asm/traps.h>
#endif
#define ARB_ERR_CAP_CLEAR (1 << 0)
#define ARB_ERR_CAP_STATUS_TIMEOUT (1 << 12)
#define ARB_ERR_CAP_STATUS_TEA (1 << 11)
#define ARB_ERR_CAP_STATUS_WRITE (1 << 1)
#define ARB_ERR_CAP_STATUS_VALID (1 << 0)
#define ARB_BP_CAP_CLEAR (1 << 0)
#define ARB_BP_CAP_STATUS_PROT_SHIFT 14
#define ARB_BP_CAP_STATUS_TYPE (1 << 13)
#define ARB_BP_CAP_STATUS_RSP_SHIFT 10
#define ARB_BP_CAP_STATUS_MASK GENMASK(1, 0)
#define ARB_BP_CAP_STATUS_BS_SHIFT 2
#define ARB_BP_CAP_STATUS_WRITE (1 << 1)
#define ARB_BP_CAP_STATUS_VALID (1 << 0)
enum {
ARB_TIMER,
ARB_BP_CAP_CLR,
ARB_BP_CAP_HI_ADDR,
ARB_BP_CAP_ADDR,
ARB_BP_CAP_STATUS,
ARB_BP_CAP_MASTER,
ARB_ERR_CAP_CLR,
ARB_ERR_CAP_HI_ADDR,
ARB_ERR_CAP_ADDR,
ARB_ERR_CAP_STATUS,
ARB_ERR_CAP_MASTER,
};
static const int gisb_offsets_bcm7038[] = {
[ARB_TIMER] = 0x00c,
[ARB_BP_CAP_CLR] = 0x014,
[ARB_BP_CAP_HI_ADDR] = -1,
[ARB_BP_CAP_ADDR] = 0x0b8,
[ARB_BP_CAP_STATUS] = 0x0c0,
[ARB_BP_CAP_MASTER] = -1,
[ARB_ERR_CAP_CLR] = 0x0c4,
[ARB_ERR_CAP_HI_ADDR] = -1,
[ARB_ERR_CAP_ADDR] = 0x0c8,
[ARB_ERR_CAP_STATUS] = 0x0d0,
[ARB_ERR_CAP_MASTER] = -1,
};
static const int gisb_offsets_bcm7278[] = {
[ARB_TIMER] = 0x008,
[ARB_BP_CAP_CLR] = 0x01c,
[ARB_BP_CAP_HI_ADDR] = -1,
[ARB_BP_CAP_ADDR] = 0x220,
[ARB_BP_CAP_STATUS] = 0x230,
[ARB_BP_CAP_MASTER] = 0x234,
[ARB_ERR_CAP_CLR] = 0x7f8,
[ARB_ERR_CAP_HI_ADDR] = -1,
[ARB_ERR_CAP_ADDR] = 0x7e0,
[ARB_ERR_CAP_STATUS] = 0x7f0,
[ARB_ERR_CAP_MASTER] = 0x7f4,
};
static const int gisb_offsets_bcm7400[] = {
[ARB_TIMER] = 0x00c,
[ARB_BP_CAP_CLR] = 0x014,
[ARB_BP_CAP_HI_ADDR] = -1,
[ARB_BP_CAP_ADDR] = 0x0b8,
[ARB_BP_CAP_STATUS] = 0x0c0,
[ARB_BP_CAP_MASTER] = 0x0c4,
[ARB_ERR_CAP_CLR] = 0x0c8,
[ARB_ERR_CAP_HI_ADDR] = -1,
[ARB_ERR_CAP_ADDR] = 0x0cc,
[ARB_ERR_CAP_STATUS] = 0x0d4,
[ARB_ERR_CAP_MASTER] = 0x0d8,
};
static const int gisb_offsets_bcm7435[] = {
[ARB_TIMER] = 0x00c,
[ARB_BP_CAP_CLR] = 0x014,
[ARB_BP_CAP_HI_ADDR] = -1,
[ARB_BP_CAP_ADDR] = 0x158,
[ARB_BP_CAP_STATUS] = 0x160,
[ARB_BP_CAP_MASTER] = 0x164,
[ARB_ERR_CAP_CLR] = 0x168,
[ARB_ERR_CAP_HI_ADDR] = -1,
[ARB_ERR_CAP_ADDR] = 0x16c,
[ARB_ERR_CAP_STATUS] = 0x174,
[ARB_ERR_CAP_MASTER] = 0x178,
};
static const int gisb_offsets_bcm7445[] = {
[ARB_TIMER] = 0x008,
[ARB_BP_CAP_CLR] = 0x010,
[ARB_BP_CAP_HI_ADDR] = -1,
[ARB_BP_CAP_ADDR] = 0x1d8,
[ARB_BP_CAP_STATUS] = 0x1e0,
[ARB_BP_CAP_MASTER] = 0x1e4,
[ARB_ERR_CAP_CLR] = 0x7e4,
[ARB_ERR_CAP_HI_ADDR] = 0x7e8,
[ARB_ERR_CAP_ADDR] = 0x7ec,
[ARB_ERR_CAP_STATUS] = 0x7f4,
[ARB_ERR_CAP_MASTER] = 0x7f8,
};
struct brcmstb_gisb_arb_device {
void __iomem *base;
const int *gisb_offsets;
bool big_endian;
struct mutex lock;
struct list_head next;
u32 valid_mask;
const char *master_names[sizeof(u32) * BITS_PER_BYTE];
u32 saved_timeout;
};
static LIST_HEAD(brcmstb_gisb_arb_device_list);
static u32 gisb_read(struct brcmstb_gisb_arb_device *gdev, int reg)
{
int offset = gdev->gisb_offsets[reg];
if (offset < 0) {
/* return 1 if the hardware doesn't have ARB_ERR_CAP_MASTER */
if (reg == ARB_ERR_CAP_MASTER)
return 1;
else
return 0;
}
if (gdev->big_endian)
return ioread32be(gdev->base + offset);
else
return ioread32(gdev->base + offset);
}
static u64 gisb_read_address(struct brcmstb_gisb_arb_device *gdev)
{
u64 value;
value = gisb_read(gdev, ARB_ERR_CAP_ADDR);
value |= (u64)gisb_read(gdev, ARB_ERR_CAP_HI_ADDR) << 32;
return value;
}
static u64 gisb_read_bp_address(struct brcmstb_gisb_arb_device *gdev)
{
u64 value;
value = gisb_read(gdev, ARB_BP_CAP_ADDR);
value |= (u64)gisb_read(gdev, ARB_BP_CAP_HI_ADDR) << 32;
return value;
}
static void gisb_write(struct brcmstb_gisb_arb_device *gdev, u32 val, int reg)
{
int offset = gdev->gisb_offsets[reg];
if (offset == -1)
return;
if (gdev->big_endian)
iowrite32be(val, gdev->base + offset);
else
iowrite32(val, gdev->base + offset);
}
static ssize_t gisb_arb_get_timeout(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct brcmstb_gisb_arb_device *gdev = dev_get_drvdata(dev);
u32 timeout;
mutex_lock(&gdev->lock);
timeout = gisb_read(gdev, ARB_TIMER);
mutex_unlock(&gdev->lock);
return sprintf(buf, "%d", timeout);
}
static ssize_t gisb_arb_set_timeout(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct brcmstb_gisb_arb_device *gdev = dev_get_drvdata(dev);
int val, ret;
ret = kstrtoint(buf, 10, &val);
if (ret < 0)
return ret;
if (val == 0 || val >= 0xffffffff)
return -EINVAL;
mutex_lock(&gdev->lock);
gisb_write(gdev, val, ARB_TIMER);
mutex_unlock(&gdev->lock);
return count;
}
static const char *
brcmstb_gisb_master_to_str(struct brcmstb_gisb_arb_device *gdev,
u32 masters)
{
u32 mask = gdev->valid_mask & masters;
if (hweight_long(mask) != 1)
return NULL;
return gdev->master_names[ffs(mask) - 1];
}
static int brcmstb_gisb_arb_decode_addr(struct brcmstb_gisb_arb_device *gdev,
const char *reason)
{
u32 cap_status;
u64 arb_addr;
u32 master;
const char *m_name;
char m_fmt[11];
cap_status = gisb_read(gdev, ARB_ERR_CAP_STATUS);
/* Invalid captured address, bail out */
if (!(cap_status & ARB_ERR_CAP_STATUS_VALID))
return 1;
/* Read the address and master */
arb_addr = gisb_read_address(gdev);
master = gisb_read(gdev, ARB_ERR_CAP_MASTER);
m_name = brcmstb_gisb_master_to_str(gdev, master);
if (!m_name) {
snprintf(m_fmt, sizeof(m_fmt), "0x%08x", master);
m_name = m_fmt;
}
pr_crit("GISB: %s at 0x%llx [%c %s], core: %s\n",
reason, arb_addr,
cap_status & ARB_ERR_CAP_STATUS_WRITE ? 'W' : 'R',
cap_status & ARB_ERR_CAP_STATUS_TIMEOUT ? "timeout" : "",
m_name);
/* clear the GISB error */
gisb_write(gdev, ARB_ERR_CAP_CLEAR, ARB_ERR_CAP_CLR);
return 0;
}
#ifdef CONFIG_MIPS
static int brcmstb_bus_error_handler(struct pt_regs *regs, int is_fixup)
{
int ret = 0;
struct brcmstb_gisb_arb_device *gdev;
u32 cap_status;
list_for_each_entry(gdev, &brcmstb_gisb_arb_device_list, next) {
cap_status = gisb_read(gdev, ARB_ERR_CAP_STATUS);
/* Invalid captured address, bail out */
if (!(cap_status & ARB_ERR_CAP_STATUS_VALID)) {
is_fixup = 1;
goto out;
}
ret |= brcmstb_gisb_arb_decode_addr(gdev, "bus error");
}
out:
return is_fixup ? MIPS_BE_FIXUP : MIPS_BE_FATAL;
}
#endif
static irqreturn_t brcmstb_gisb_timeout_handler(int irq, void *dev_id)
{
brcmstb_gisb_arb_decode_addr(dev_id, "timeout");
return IRQ_HANDLED;
}
static irqreturn_t brcmstb_gisb_tea_handler(int irq, void *dev_id)
{
brcmstb_gisb_arb_decode_addr(dev_id, "target abort");
return IRQ_HANDLED;
}
static irqreturn_t brcmstb_gisb_bp_handler(int irq, void *dev_id)
{
struct brcmstb_gisb_arb_device *gdev = dev_id;
const char *m_name;
u32 bp_status;
u64 arb_addr;
u32 master;
char m_fmt[11];
bp_status = gisb_read(gdev, ARB_BP_CAP_STATUS);
/* Invalid captured address, bail out */
if (!(bp_status & ARB_BP_CAP_STATUS_VALID))
return IRQ_HANDLED;
/* Read the address and master */
arb_addr = gisb_read_bp_address(gdev);
master = gisb_read(gdev, ARB_BP_CAP_MASTER);
m_name = brcmstb_gisb_master_to_str(gdev, master);
if (!m_name) {
snprintf(m_fmt, sizeof(m_fmt), "0x%08x", master);
m_name = m_fmt;
}
pr_crit("GISB: breakpoint at 0x%llx [%c], core: %s\n",
arb_addr, bp_status & ARB_BP_CAP_STATUS_WRITE ? 'W' : 'R',
m_name);
/* clear the GISB error */
gisb_write(gdev, ARB_ERR_CAP_CLEAR, ARB_ERR_CAP_CLR);
return IRQ_HANDLED;
}
/*
* Dump out gisb errors on die or panic.
*/
static int dump_gisb_error(struct notifier_block *self, unsigned long v,
void *p);
static struct notifier_block gisb_die_notifier = {
.notifier_call = dump_gisb_error,
};
static struct notifier_block gisb_panic_notifier = {
.notifier_call = dump_gisb_error,
};
static int dump_gisb_error(struct notifier_block *self, unsigned long v,
void *p)
{
struct brcmstb_gisb_arb_device *gdev;
const char *reason = "panic";
if (self == &gisb_die_notifier)
reason = "die";
/* iterate over each GISB arb registered handlers */
list_for_each_entry(gdev, &brcmstb_gisb_arb_device_list, next)
brcmstb_gisb_arb_decode_addr(gdev, reason);
return NOTIFY_DONE;
}
static DEVICE_ATTR(gisb_arb_timeout, S_IWUSR | S_IRUGO,
gisb_arb_get_timeout, gisb_arb_set_timeout);
static struct attribute *gisb_arb_sysfs_attrs[] = {
&dev_attr_gisb_arb_timeout.attr,
NULL,
};
static struct attribute_group gisb_arb_sysfs_attr_group = {
.attrs = gisb_arb_sysfs_attrs,
};
static const struct of_device_id brcmstb_gisb_arb_of_match[] = {
{ .compatible = "brcm,gisb-arb", .data = gisb_offsets_bcm7445 },
{ .compatible = "brcm,bcm7445-gisb-arb", .data = gisb_offsets_bcm7445 },
{ .compatible = "brcm,bcm7435-gisb-arb", .data = gisb_offsets_bcm7435 },
{ .compatible = "brcm,bcm7400-gisb-arb", .data = gisb_offsets_bcm7400 },
{ .compatible = "brcm,bcm7278-gisb-arb", .data = gisb_offsets_bcm7278 },
{ .compatible = "brcm,bcm7038-gisb-arb", .data = gisb_offsets_bcm7038 },
{ },
};
static int __init brcmstb_gisb_arb_probe(struct platform_device *pdev)
{
struct device_node *dn = pdev->dev.of_node;
struct brcmstb_gisb_arb_device *gdev;
const struct of_device_id *of_id;
int err, timeout_irq, tea_irq, bp_irq;
unsigned int num_masters, j = 0;
int i, first, last;
timeout_irq = platform_get_irq(pdev, 0);
tea_irq = platform_get_irq(pdev, 1);
bp_irq = platform_get_irq(pdev, 2);
gdev = devm_kzalloc(&pdev->dev, sizeof(*gdev), GFP_KERNEL);
if (!gdev)
return -ENOMEM;
mutex_init(&gdev->lock);
INIT_LIST_HEAD(&gdev->next);
gdev->base = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
if (IS_ERR(gdev->base))
return PTR_ERR(gdev->base);
of_id = of_match_node(brcmstb_gisb_arb_of_match, dn);
if (!of_id) {
pr_err("failed to look up compatible string\n");
return -EINVAL;
}
gdev->gisb_offsets = of_id->data;
gdev->big_endian = of_device_is_big_endian(dn);
err = devm_request_irq(&pdev->dev, timeout_irq,
brcmstb_gisb_timeout_handler, 0, pdev->name,
gdev);
if (err < 0)
return err;
err = devm_request_irq(&pdev->dev, tea_irq,
brcmstb_gisb_tea_handler, 0, pdev->name,
gdev);
if (err < 0)
return err;
/* Interrupt is optional */
if (bp_irq > 0) {
err = devm_request_irq(&pdev->dev, bp_irq,
brcmstb_gisb_bp_handler, 0, pdev->name,
gdev);
if (err < 0)
return err;
}
/* If we do not have a valid mask, assume all masters are enabled */
if (of_property_read_u32(dn, "brcm,gisb-arb-master-mask",
&gdev->valid_mask))
gdev->valid_mask = 0xffffffff;
/* Proceed with reading the litteral names if we agree on the
* number of masters
*/
num_masters = of_property_count_strings(dn,
"brcm,gisb-arb-master-names");
if (hweight_long(gdev->valid_mask) == num_masters) {
first = ffs(gdev->valid_mask) - 1;
last = fls(gdev->valid_mask) - 1;
for (i = first; i < last; i++) {
if (!(gdev->valid_mask & BIT(i)))
continue;
of_property_read_string_index(dn,
"brcm,gisb-arb-master-names", j,
&gdev->master_names[i]);
j++;
}
}
err = sysfs_create_group(&pdev->dev.kobj, &gisb_arb_sysfs_attr_group);
if (err)
return err;
platform_set_drvdata(pdev, gdev);
list_add_tail(&gdev->next, &brcmstb_gisb_arb_device_list);
#ifdef CONFIG_MIPS
mips_set_be_handler(brcmstb_bus_error_handler);
#endif
if (list_is_singular(&brcmstb_gisb_arb_device_list)) {
register_die_notifier(&gisb_die_notifier);
atomic_notifier_chain_register(&panic_notifier_list,
&gisb_panic_notifier);
}
dev_info(&pdev->dev, "registered irqs: %d, %d\n",
timeout_irq, tea_irq);
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int brcmstb_gisb_arb_suspend(struct device *dev)
{
struct brcmstb_gisb_arb_device *gdev = dev_get_drvdata(dev);
gdev->saved_timeout = gisb_read(gdev, ARB_TIMER);
return 0;
}
/* Make sure we provide the same timeout value that was configured before, and
* do this before the GISB timeout interrupt handler has any chance to run.
*/
static int brcmstb_gisb_arb_resume_noirq(struct device *dev)
{
struct brcmstb_gisb_arb_device *gdev = dev_get_drvdata(dev);
gisb_write(gdev, gdev->saved_timeout, ARB_TIMER);
return 0;
}
#else
#define brcmstb_gisb_arb_suspend NULL
#define brcmstb_gisb_arb_resume_noirq NULL
#endif
static const struct dev_pm_ops brcmstb_gisb_arb_pm_ops = {
.suspend = brcmstb_gisb_arb_suspend,
.resume_noirq = brcmstb_gisb_arb_resume_noirq,
};
static struct platform_driver brcmstb_gisb_arb_driver = {
.driver = {
.name = "brcm-gisb-arb",
.of_match_table = brcmstb_gisb_arb_of_match,
.pm = &brcmstb_gisb_arb_pm_ops,
},
};
static int __init brcm_gisb_driver_init(void)
{
return platform_driver_probe(&brcmstb_gisb_arb_driver,
brcmstb_gisb_arb_probe);
}
module_init(brcm_gisb_driver_init);
MODULE_AUTHOR("Broadcom");
MODULE_DESCRIPTION("Broadcom STB GISB arbiter driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/bus/brcmstb_gisb.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2020 BAIKAL ELECTRONICS, JSC
*
* Authors:
* Serge Semin <[email protected]>
*
* Baikal-T1 APB-bus driver
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/device.h>
#include <linux/atomic.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/nmi.h>
#include <linux/of.h>
#include <linux/regmap.h>
#include <linux/clk.h>
#include <linux/reset.h>
#include <linux/time64.h>
#include <linux/clk.h>
#include <linux/sysfs.h>
#define APB_EHB_ISR 0x00
#define APB_EHB_ISR_PENDING BIT(0)
#define APB_EHB_ISR_MASK BIT(1)
#define APB_EHB_ADDR 0x04
#define APB_EHB_TIMEOUT 0x08
#define APB_EHB_TIMEOUT_MIN 0x000003FFU
#define APB_EHB_TIMEOUT_MAX 0xFFFFFFFFU
/*
* struct bt1_apb - Baikal-T1 APB EHB private data
* @dev: Pointer to the device structure.
* @regs: APB EHB registers map.
* @res: No-device error injection memory region.
* @irq: Errors IRQ number.
* @rate: APB-bus reference clock rate.
* @pclk: APB-reference clock.
* @prst: APB domain reset line.
* @count: Number of errors detected.
*/
struct bt1_apb {
struct device *dev;
struct regmap *regs;
void __iomem *res;
int irq;
unsigned long rate;
struct clk *pclk;
struct reset_control *prst;
atomic_t count;
};
static const struct regmap_config bt1_apb_regmap_cfg = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
.max_register = APB_EHB_TIMEOUT,
.fast_io = true
};
static inline unsigned long bt1_apb_n_to_timeout_us(struct bt1_apb *apb, u32 n)
{
u64 timeout = (u64)n * USEC_PER_SEC;
do_div(timeout, apb->rate);
return timeout;
}
static inline unsigned long bt1_apb_timeout_to_n_us(struct bt1_apb *apb,
unsigned long timeout)
{
u64 n = (u64)timeout * apb->rate;
do_div(n, USEC_PER_SEC);
return n;
}
static irqreturn_t bt1_apb_isr(int irq, void *data)
{
struct bt1_apb *apb = data;
u32 addr = 0;
regmap_read(apb->regs, APB_EHB_ADDR, &addr);
dev_crit_ratelimited(apb->dev,
"APB-bus fault %d: Slave access timeout at 0x%08x\n",
atomic_inc_return(&apb->count),
addr);
/*
* Print backtrace on each CPU. This might be pointless if the fault
* has happened on the same CPU as the IRQ handler is executed or
* the other core proceeded further execution despite the error.
* But if it's not, by looking at the trace we would get straight to
* the cause of the problem.
*/
trigger_all_cpu_backtrace();
regmap_update_bits(apb->regs, APB_EHB_ISR, APB_EHB_ISR_PENDING, 0);
return IRQ_HANDLED;
}
static void bt1_apb_clear_data(void *data)
{
struct bt1_apb *apb = data;
struct platform_device *pdev = to_platform_device(apb->dev);
platform_set_drvdata(pdev, NULL);
}
static struct bt1_apb *bt1_apb_create_data(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct bt1_apb *apb;
int ret;
apb = devm_kzalloc(dev, sizeof(*apb), GFP_KERNEL);
if (!apb)
return ERR_PTR(-ENOMEM);
ret = devm_add_action(dev, bt1_apb_clear_data, apb);
if (ret) {
dev_err(dev, "Can't add APB EHB data clear action\n");
return ERR_PTR(ret);
}
apb->dev = dev;
atomic_set(&apb->count, 0);
platform_set_drvdata(pdev, apb);
return apb;
}
static int bt1_apb_request_regs(struct bt1_apb *apb)
{
struct platform_device *pdev = to_platform_device(apb->dev);
void __iomem *regs;
regs = devm_platform_ioremap_resource_byname(pdev, "ehb");
if (IS_ERR(regs)) {
dev_err(apb->dev, "Couldn't map APB EHB registers\n");
return PTR_ERR(regs);
}
apb->regs = devm_regmap_init_mmio(apb->dev, regs, &bt1_apb_regmap_cfg);
if (IS_ERR(apb->regs)) {
dev_err(apb->dev, "Couldn't create APB EHB regmap\n");
return PTR_ERR(apb->regs);
}
apb->res = devm_platform_ioremap_resource_byname(pdev, "nodev");
if (IS_ERR(apb->res))
dev_err(apb->dev, "Couldn't map reserved region\n");
return PTR_ERR_OR_ZERO(apb->res);
}
static int bt1_apb_request_rst(struct bt1_apb *apb)
{
int ret;
apb->prst = devm_reset_control_get_optional_exclusive(apb->dev, "prst");
if (IS_ERR(apb->prst))
return dev_err_probe(apb->dev, PTR_ERR(apb->prst),
"Couldn't get reset control line\n");
ret = reset_control_deassert(apb->prst);
if (ret)
dev_err(apb->dev, "Failed to deassert the reset line\n");
return ret;
}
static void bt1_apb_disable_clk(void *data)
{
struct bt1_apb *apb = data;
clk_disable_unprepare(apb->pclk);
}
static int bt1_apb_request_clk(struct bt1_apb *apb)
{
int ret;
apb->pclk = devm_clk_get(apb->dev, "pclk");
if (IS_ERR(apb->pclk))
return dev_err_probe(apb->dev, PTR_ERR(apb->pclk),
"Couldn't get APB clock descriptor\n");
ret = clk_prepare_enable(apb->pclk);
if (ret) {
dev_err(apb->dev, "Couldn't enable the APB clock\n");
return ret;
}
ret = devm_add_action_or_reset(apb->dev, bt1_apb_disable_clk, apb);
if (ret) {
dev_err(apb->dev, "Can't add APB EHB clocks disable action\n");
return ret;
}
apb->rate = clk_get_rate(apb->pclk);
if (!apb->rate) {
dev_err(apb->dev, "Invalid clock rate\n");
return -EINVAL;
}
return 0;
}
static void bt1_apb_clear_irq(void *data)
{
struct bt1_apb *apb = data;
regmap_update_bits(apb->regs, APB_EHB_ISR, APB_EHB_ISR_MASK, 0);
}
static int bt1_apb_request_irq(struct bt1_apb *apb)
{
struct platform_device *pdev = to_platform_device(apb->dev);
int ret;
apb->irq = platform_get_irq(pdev, 0);
if (apb->irq < 0)
return apb->irq;
ret = devm_request_irq(apb->dev, apb->irq, bt1_apb_isr, IRQF_SHARED,
"bt1-apb", apb);
if (ret) {
dev_err(apb->dev, "Couldn't request APB EHB IRQ\n");
return ret;
}
ret = devm_add_action(apb->dev, bt1_apb_clear_irq, apb);
if (ret) {
dev_err(apb->dev, "Can't add APB EHB IRQs clear action\n");
return ret;
}
/* Unmask IRQ and clear it' pending flag. */
regmap_update_bits(apb->regs, APB_EHB_ISR,
APB_EHB_ISR_PENDING | APB_EHB_ISR_MASK,
APB_EHB_ISR_MASK);
return 0;
}
static ssize_t count_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct bt1_apb *apb = dev_get_drvdata(dev);
return scnprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&apb->count));
}
static DEVICE_ATTR_RO(count);
static ssize_t timeout_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct bt1_apb *apb = dev_get_drvdata(dev);
unsigned long timeout;
int ret;
u32 n;
ret = regmap_read(apb->regs, APB_EHB_TIMEOUT, &n);
if (ret)
return ret;
timeout = bt1_apb_n_to_timeout_us(apb, n);
return scnprintf(buf, PAGE_SIZE, "%lu\n", timeout);
}
static ssize_t timeout_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct bt1_apb *apb = dev_get_drvdata(dev);
unsigned long timeout;
int ret;
u32 n;
if (kstrtoul(buf, 0, &timeout) < 0)
return -EINVAL;
n = bt1_apb_timeout_to_n_us(apb, timeout);
n = clamp(n, APB_EHB_TIMEOUT_MIN, APB_EHB_TIMEOUT_MAX);
ret = regmap_write(apb->regs, APB_EHB_TIMEOUT, n);
return ret ?: count;
}
static DEVICE_ATTR_RW(timeout);
static ssize_t inject_error_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return scnprintf(buf, PAGE_SIZE, "Error injection: nodev irq\n");
}
static ssize_t inject_error_store(struct device *dev,
struct device_attribute *attr,
const char *data, size_t count)
{
struct bt1_apb *apb = dev_get_drvdata(dev);
/*
* Either dummy read from the unmapped address in the APB IO area
* or manually set the IRQ status.
*/
if (sysfs_streq(data, "nodev"))
readl(apb->res);
else if (sysfs_streq(data, "irq"))
regmap_update_bits(apb->regs, APB_EHB_ISR, APB_EHB_ISR_PENDING,
APB_EHB_ISR_PENDING);
else
return -EINVAL;
return count;
}
static DEVICE_ATTR_RW(inject_error);
static struct attribute *bt1_apb_sysfs_attrs[] = {
&dev_attr_count.attr,
&dev_attr_timeout.attr,
&dev_attr_inject_error.attr,
NULL
};
ATTRIBUTE_GROUPS(bt1_apb_sysfs);
static void bt1_apb_remove_sysfs(void *data)
{
struct bt1_apb *apb = data;
device_remove_groups(apb->dev, bt1_apb_sysfs_groups);
}
static int bt1_apb_init_sysfs(struct bt1_apb *apb)
{
int ret;
ret = device_add_groups(apb->dev, bt1_apb_sysfs_groups);
if (ret) {
dev_err(apb->dev, "Failed to create EHB APB sysfs nodes\n");
return ret;
}
ret = devm_add_action_or_reset(apb->dev, bt1_apb_remove_sysfs, apb);
if (ret)
dev_err(apb->dev, "Can't add APB EHB sysfs remove action\n");
return ret;
}
static int bt1_apb_probe(struct platform_device *pdev)
{
struct bt1_apb *apb;
int ret;
apb = bt1_apb_create_data(pdev);
if (IS_ERR(apb))
return PTR_ERR(apb);
ret = bt1_apb_request_regs(apb);
if (ret)
return ret;
ret = bt1_apb_request_rst(apb);
if (ret)
return ret;
ret = bt1_apb_request_clk(apb);
if (ret)
return ret;
ret = bt1_apb_request_irq(apb);
if (ret)
return ret;
ret = bt1_apb_init_sysfs(apb);
if (ret)
return ret;
return 0;
}
static const struct of_device_id bt1_apb_of_match[] = {
{ .compatible = "baikal,bt1-apb" },
{ }
};
MODULE_DEVICE_TABLE(of, bt1_apb_of_match);
static struct platform_driver bt1_apb_driver = {
.probe = bt1_apb_probe,
.driver = {
.name = "bt1-apb",
.of_match_table = bt1_apb_of_match
}
};
module_platform_driver(bt1_apb_driver);
MODULE_AUTHOR("Serge Semin <[email protected]>");
MODULE_DESCRIPTION("Baikal-T1 APB-bus driver");
| linux-master | drivers/bus/bt1-apb.c |
/*
* EIM driver for Freescale's i.MX chips
*
* Copyright (C) 2013 Freescale Semiconductor, Inc.
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
#include <linux/module.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/mfd/syscon.h>
#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
#include <linux/regmap.h>
struct imx_weim_devtype {
unsigned int cs_count;
unsigned int cs_regs_count;
unsigned int cs_stride;
unsigned int wcr_offset;
unsigned int wcr_bcm;
unsigned int wcr_cont_bclk;
};
static const struct imx_weim_devtype imx1_weim_devtype = {
.cs_count = 6,
.cs_regs_count = 2,
.cs_stride = 0x08,
};
static const struct imx_weim_devtype imx27_weim_devtype = {
.cs_count = 6,
.cs_regs_count = 3,
.cs_stride = 0x10,
};
static const struct imx_weim_devtype imx50_weim_devtype = {
.cs_count = 4,
.cs_regs_count = 6,
.cs_stride = 0x18,
.wcr_offset = 0x90,
.wcr_bcm = BIT(0),
.wcr_cont_bclk = BIT(3),
};
static const struct imx_weim_devtype imx51_weim_devtype = {
.cs_count = 6,
.cs_regs_count = 6,
.cs_stride = 0x18,
};
#define MAX_CS_REGS_COUNT 6
#define MAX_CS_COUNT 6
#define OF_REG_SIZE 3
struct cs_timing {
bool is_applied;
u32 regs[MAX_CS_REGS_COUNT];
};
struct cs_timing_state {
struct cs_timing cs[MAX_CS_COUNT];
};
struct weim_priv {
void __iomem *base;
struct cs_timing_state timing_state;
};
static const struct of_device_id weim_id_table[] = {
/* i.MX1/21 */
{ .compatible = "fsl,imx1-weim", .data = &imx1_weim_devtype, },
/* i.MX25/27/31/35 */
{ .compatible = "fsl,imx27-weim", .data = &imx27_weim_devtype, },
/* i.MX50/53/6Q */
{ .compatible = "fsl,imx50-weim", .data = &imx50_weim_devtype, },
{ .compatible = "fsl,imx6q-weim", .data = &imx50_weim_devtype, },
/* i.MX51 */
{ .compatible = "fsl,imx51-weim", .data = &imx51_weim_devtype, },
{ }
};
MODULE_DEVICE_TABLE(of, weim_id_table);
static int imx_weim_gpr_setup(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct of_range_parser parser;
struct of_range range;
struct regmap *gpr;
u32 gprvals[4] = {
05, /* CS0(128M) CS1(0M) CS2(0M) CS3(0M) */
033, /* CS0(64M) CS1(64M) CS2(0M) CS3(0M) */
0113, /* CS0(64M) CS1(32M) CS2(32M) CS3(0M) */
01111, /* CS0(32M) CS1(32M) CS2(32M) CS3(32M) */
};
u32 gprval = 0;
u32 val;
int cs = 0;
int i = 0;
gpr = syscon_regmap_lookup_by_phandle(np, "fsl,weim-cs-gpr");
if (IS_ERR(gpr)) {
dev_dbg(&pdev->dev, "failed to find weim-cs-gpr\n");
return 0;
}
if (of_range_parser_init(&parser, np))
goto err;
for_each_of_range(&parser, &range) {
cs = range.bus_addr >> 32;
val = (range.size / SZ_32M) | 1;
gprval |= val << cs * 3;
i++;
}
if (i == 0 || i % 4)
goto err;
for (i = 0; i < ARRAY_SIZE(gprvals); i++) {
if (gprval == gprvals[i]) {
/* Found it. Set up IOMUXC_GPR1[11:0] with it. */
regmap_update_bits(gpr, IOMUXC_GPR1, 0xfff, gprval);
return 0;
}
}
err:
dev_err(&pdev->dev, "Invalid 'ranges' configuration\n");
return -EINVAL;
}
/* Parse and set the timing for this device. */
static int weim_timing_setup(struct device *dev, struct device_node *np,
const struct imx_weim_devtype *devtype)
{
u32 cs_idx, value[MAX_CS_REGS_COUNT];
int i, ret;
int reg_idx, num_regs;
struct cs_timing *cst;
struct weim_priv *priv;
struct cs_timing_state *ts;
void __iomem *base;
if (WARN_ON(devtype->cs_regs_count > MAX_CS_REGS_COUNT))
return -EINVAL;
if (WARN_ON(devtype->cs_count > MAX_CS_COUNT))
return -EINVAL;
priv = dev_get_drvdata(dev);
base = priv->base;
ts = &priv->timing_state;
ret = of_property_read_u32_array(np, "fsl,weim-cs-timing",
value, devtype->cs_regs_count);
if (ret)
return ret;
/*
* the child node's "reg" property may contain multiple address ranges,
* extract the chip select for each.
*/
num_regs = of_property_count_elems_of_size(np, "reg", OF_REG_SIZE);
if (num_regs < 0)
return num_regs;
if (!num_regs)
return -EINVAL;
for (reg_idx = 0; reg_idx < num_regs; reg_idx++) {
/* get the CS index from this child node's "reg" property. */
ret = of_property_read_u32_index(np, "reg",
reg_idx * OF_REG_SIZE, &cs_idx);
if (ret)
break;
if (cs_idx >= devtype->cs_count)
return -EINVAL;
/* prevent re-configuring a CS that's already been configured */
cst = &ts->cs[cs_idx];
if (cst->is_applied && memcmp(value, cst->regs,
devtype->cs_regs_count * sizeof(u32))) {
dev_err(dev, "fsl,weim-cs-timing conflict on %pOF", np);
return -EINVAL;
}
/* set the timing for WEIM */
for (i = 0; i < devtype->cs_regs_count; i++)
writel(value[i],
base + cs_idx * devtype->cs_stride + i * 4);
if (!cst->is_applied) {
cst->is_applied = true;
memcpy(cst->regs, value,
devtype->cs_regs_count * sizeof(u32));
}
}
return 0;
}
static int weim_parse_dt(struct platform_device *pdev)
{
const struct of_device_id *of_id = of_match_device(weim_id_table,
&pdev->dev);
const struct imx_weim_devtype *devtype = of_id->data;
int ret = 0, have_child = 0;
struct device_node *child;
struct weim_priv *priv;
void __iomem *base;
u32 reg;
if (devtype == &imx50_weim_devtype) {
ret = imx_weim_gpr_setup(pdev);
if (ret)
return ret;
}
priv = dev_get_drvdata(&pdev->dev);
base = priv->base;
if (of_property_read_bool(pdev->dev.of_node, "fsl,burst-clk-enable")) {
if (devtype->wcr_bcm) {
reg = readl(base + devtype->wcr_offset);
reg |= devtype->wcr_bcm;
if (of_property_read_bool(pdev->dev.of_node,
"fsl,continuous-burst-clk")) {
if (devtype->wcr_cont_bclk) {
reg |= devtype->wcr_cont_bclk;
} else {
dev_err(&pdev->dev,
"continuous burst clk not supported.\n");
return -EINVAL;
}
}
writel(reg, base + devtype->wcr_offset);
} else {
dev_err(&pdev->dev, "burst clk mode not supported.\n");
return -EINVAL;
}
}
for_each_available_child_of_node(pdev->dev.of_node, child) {
ret = weim_timing_setup(&pdev->dev, child, devtype);
if (ret)
dev_warn(&pdev->dev, "%pOF set timing failed.\n",
child);
else
have_child = 1;
}
if (have_child)
ret = of_platform_default_populate(pdev->dev.of_node,
NULL, &pdev->dev);
if (ret)
dev_err(&pdev->dev, "%pOF fail to create devices.\n",
pdev->dev.of_node);
return ret;
}
static int weim_probe(struct platform_device *pdev)
{
struct weim_priv *priv;
struct clk *clk;
void __iomem *base;
int ret;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
/* get the resource */
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
priv->base = base;
dev_set_drvdata(&pdev->dev, priv);
/* get the clock */
clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(clk))
return PTR_ERR(clk);
ret = clk_prepare_enable(clk);
if (ret)
return ret;
/* parse the device node */
ret = weim_parse_dt(pdev);
if (ret)
clk_disable_unprepare(clk);
else
dev_info(&pdev->dev, "Driver registered.\n");
return ret;
}
#if IS_ENABLED(CONFIG_OF_DYNAMIC)
static int of_weim_notify(struct notifier_block *nb, unsigned long action,
void *arg)
{
const struct imx_weim_devtype *devtype;
struct of_reconfig_data *rd = arg;
const struct of_device_id *of_id;
struct platform_device *pdev;
int ret = NOTIFY_OK;
switch (of_reconfig_get_state_change(action, rd)) {
case OF_RECONFIG_CHANGE_ADD:
of_id = of_match_node(weim_id_table, rd->dn->parent);
if (!of_id)
return NOTIFY_OK; /* not for us */
devtype = of_id->data;
pdev = of_find_device_by_node(rd->dn->parent);
if (!pdev) {
pr_err("%s: could not find platform device for '%pOF'\n",
__func__, rd->dn->parent);
return notifier_from_errno(-EINVAL);
}
if (weim_timing_setup(&pdev->dev, rd->dn, devtype))
dev_warn(&pdev->dev,
"Failed to setup timing for '%pOF'\n", rd->dn);
if (!of_node_check_flag(rd->dn, OF_POPULATED)) {
/*
* Clear the flag before adding the device so that
* fw_devlink doesn't skip adding consumers to this
* device.
*/
rd->dn->fwnode.flags &= ~FWNODE_FLAG_NOT_DEVICE;
if (!of_platform_device_create(rd->dn, NULL, &pdev->dev)) {
dev_err(&pdev->dev,
"Failed to create child device '%pOF'\n",
rd->dn);
ret = notifier_from_errno(-EINVAL);
}
}
platform_device_put(pdev);
break;
case OF_RECONFIG_CHANGE_REMOVE:
if (!of_node_check_flag(rd->dn, OF_POPULATED))
return NOTIFY_OK; /* device already destroyed */
of_id = of_match_node(weim_id_table, rd->dn->parent);
if (!of_id)
return NOTIFY_OK; /* not for us */
pdev = of_find_device_by_node(rd->dn);
if (!pdev) {
pr_err("Could not find platform device for '%pOF'\n",
rd->dn);
ret = notifier_from_errno(-EINVAL);
} else {
of_platform_device_destroy(&pdev->dev, NULL);
platform_device_put(pdev);
}
break;
default:
break;
}
return ret;
}
static struct notifier_block weim_of_notifier = {
.notifier_call = of_weim_notify,
};
#endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
static struct platform_driver weim_driver = {
.driver = {
.name = "imx-weim",
.of_match_table = weim_id_table,
},
.probe = weim_probe,
};
static int __init weim_init(void)
{
#if IS_ENABLED(CONFIG_OF_DYNAMIC)
WARN_ON(of_reconfig_notifier_register(&weim_of_notifier));
#endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
return platform_driver_register(&weim_driver);
}
module_init(weim_init);
static void __exit weim_exit(void)
{
#if IS_ENABLED(CONFIG_OF_DYNAMIC)
of_reconfig_notifier_unregister(&weim_of_notifier);
#endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
return platform_driver_unregister(&weim_driver);
}
module_exit(weim_exit);
MODULE_AUTHOR("Freescale Semiconductor Inc.");
MODULE_DESCRIPTION("i.MX EIM Controller Driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/bus/imx-weim.c |
// SPDX-License-Identifier: GPL-2.0-only
// Copyright (c) 2021, Michael Srba
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pm_clock.h>
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/reset.h>
/* AXI Halt Register Offsets */
#define AXI_HALTREQ_REG 0x0
#define AXI_HALTACK_REG 0x4
#define AXI_IDLE_REG 0x8
#define SSCAON_CONFIG0_CLAMP_EN_OVRD BIT(4)
#define SSCAON_CONFIG0_CLAMP_EN_OVRD_VAL BIT(5)
static const char *const qcom_ssc_block_pd_names[] = {
"ssc_cx",
"ssc_mx"
};
struct qcom_ssc_block_bus_data {
const char *const *pd_names;
struct device *pds[ARRAY_SIZE(qcom_ssc_block_pd_names)];
char __iomem *reg_mpm_sscaon_config0;
char __iomem *reg_mpm_sscaon_config1;
struct regmap *halt_map;
struct clk *xo_clk;
struct clk *aggre2_clk;
struct clk *gcc_im_sleep_clk;
struct clk *aggre2_north_clk;
struct clk *ssc_xo_clk;
struct clk *ssc_ahbs_clk;
struct reset_control *ssc_bcr;
struct reset_control *ssc_reset;
u32 ssc_axi_halt;
int num_pds;
};
static void reg32_set_bits(char __iomem *reg, u32 value)
{
u32 tmp = ioread32(reg);
iowrite32(tmp | value, reg);
}
static void reg32_clear_bits(char __iomem *reg, u32 value)
{
u32 tmp = ioread32(reg);
iowrite32(tmp & (~value), reg);
}
static int qcom_ssc_block_bus_init(struct device *dev)
{
int ret;
struct qcom_ssc_block_bus_data *data = dev_get_drvdata(dev);
ret = clk_prepare_enable(data->xo_clk);
if (ret) {
dev_err(dev, "error enabling xo_clk: %d\n", ret);
goto err_xo_clk;
}
ret = clk_prepare_enable(data->aggre2_clk);
if (ret) {
dev_err(dev, "error enabling aggre2_clk: %d\n", ret);
goto err_aggre2_clk;
}
ret = clk_prepare_enable(data->gcc_im_sleep_clk);
if (ret) {
dev_err(dev, "error enabling gcc_im_sleep_clk: %d\n", ret);
goto err_gcc_im_sleep_clk;
}
/*
* We need to intervene here because the HW logic driving these signals cannot handle
* initialization after power collapse by itself.
*/
reg32_clear_bits(data->reg_mpm_sscaon_config0,
SSCAON_CONFIG0_CLAMP_EN_OVRD | SSCAON_CONFIG0_CLAMP_EN_OVRD_VAL);
/* override few_ack/rest_ack */
reg32_clear_bits(data->reg_mpm_sscaon_config1, BIT(31));
ret = clk_prepare_enable(data->aggre2_north_clk);
if (ret) {
dev_err(dev, "error enabling aggre2_north_clk: %d\n", ret);
goto err_aggre2_north_clk;
}
ret = reset_control_deassert(data->ssc_reset);
if (ret) {
dev_err(dev, "error deasserting ssc_reset: %d\n", ret);
goto err_ssc_reset;
}
ret = reset_control_deassert(data->ssc_bcr);
if (ret) {
dev_err(dev, "error deasserting ssc_bcr: %d\n", ret);
goto err_ssc_bcr;
}
regmap_write(data->halt_map, data->ssc_axi_halt + AXI_HALTREQ_REG, 0);
ret = clk_prepare_enable(data->ssc_xo_clk);
if (ret) {
dev_err(dev, "error deasserting ssc_xo_clk: %d\n", ret);
goto err_ssc_xo_clk;
}
ret = clk_prepare_enable(data->ssc_ahbs_clk);
if (ret) {
dev_err(dev, "error deasserting ssc_ahbs_clk: %d\n", ret);
goto err_ssc_ahbs_clk;
}
return 0;
err_ssc_ahbs_clk:
clk_disable(data->ssc_xo_clk);
err_ssc_xo_clk:
regmap_write(data->halt_map, data->ssc_axi_halt + AXI_HALTREQ_REG, 1);
reset_control_assert(data->ssc_bcr);
err_ssc_bcr:
reset_control_assert(data->ssc_reset);
err_ssc_reset:
clk_disable(data->aggre2_north_clk);
err_aggre2_north_clk:
reg32_set_bits(data->reg_mpm_sscaon_config0, BIT(4) | BIT(5));
reg32_set_bits(data->reg_mpm_sscaon_config1, BIT(31));
clk_disable(data->gcc_im_sleep_clk);
err_gcc_im_sleep_clk:
clk_disable(data->aggre2_clk);
err_aggre2_clk:
clk_disable(data->xo_clk);
err_xo_clk:
return ret;
}
static void qcom_ssc_block_bus_deinit(struct device *dev)
{
int ret;
struct qcom_ssc_block_bus_data *data = dev_get_drvdata(dev);
clk_disable(data->ssc_xo_clk);
clk_disable(data->ssc_ahbs_clk);
ret = reset_control_assert(data->ssc_bcr);
if (ret)
dev_err(dev, "error asserting ssc_bcr: %d\n", ret);
regmap_write(data->halt_map, data->ssc_axi_halt + AXI_HALTREQ_REG, 1);
reg32_set_bits(data->reg_mpm_sscaon_config1, BIT(31));
reg32_set_bits(data->reg_mpm_sscaon_config0, BIT(4) | BIT(5));
ret = reset_control_assert(data->ssc_reset);
if (ret)
dev_err(dev, "error asserting ssc_reset: %d\n", ret);
clk_disable(data->gcc_im_sleep_clk);
clk_disable(data->aggre2_north_clk);
clk_disable(data->aggre2_clk);
clk_disable(data->xo_clk);
}
static int qcom_ssc_block_bus_pds_attach(struct device *dev, struct device **pds,
const char *const *pd_names, size_t num_pds)
{
int ret;
int i;
for (i = 0; i < num_pds; i++) {
pds[i] = dev_pm_domain_attach_by_name(dev, pd_names[i]);
if (IS_ERR_OR_NULL(pds[i])) {
ret = PTR_ERR(pds[i]) ? : -ENODATA;
goto unroll_attach;
}
}
return num_pds;
unroll_attach:
for (i--; i >= 0; i--)
dev_pm_domain_detach(pds[i], false);
return ret;
};
static void qcom_ssc_block_bus_pds_detach(struct device *dev, struct device **pds, size_t num_pds)
{
int i;
for (i = 0; i < num_pds; i++)
dev_pm_domain_detach(pds[i], false);
}
static int qcom_ssc_block_bus_pds_enable(struct device **pds, size_t num_pds)
{
int ret;
int i;
for (i = 0; i < num_pds; i++) {
dev_pm_genpd_set_performance_state(pds[i], INT_MAX);
ret = pm_runtime_get_sync(pds[i]);
if (ret < 0)
goto unroll_pd_votes;
}
return 0;
unroll_pd_votes:
for (i--; i >= 0; i--) {
dev_pm_genpd_set_performance_state(pds[i], 0);
pm_runtime_put(pds[i]);
}
return ret;
};
static void qcom_ssc_block_bus_pds_disable(struct device **pds, size_t num_pds)
{
int i;
for (i = 0; i < num_pds; i++) {
dev_pm_genpd_set_performance_state(pds[i], 0);
pm_runtime_put(pds[i]);
}
}
static int qcom_ssc_block_bus_probe(struct platform_device *pdev)
{
struct qcom_ssc_block_bus_data *data;
struct device_node *np = pdev->dev.of_node;
struct of_phandle_args halt_args;
struct resource *res;
int ret;
data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
platform_set_drvdata(pdev, data);
data->pd_names = qcom_ssc_block_pd_names;
data->num_pds = ARRAY_SIZE(qcom_ssc_block_pd_names);
/* power domains */
ret = qcom_ssc_block_bus_pds_attach(&pdev->dev, data->pds, data->pd_names, data->num_pds);
if (ret < 0)
return dev_err_probe(&pdev->dev, ret, "error when attaching power domains\n");
ret = qcom_ssc_block_bus_pds_enable(data->pds, data->num_pds);
if (ret < 0)
return dev_err_probe(&pdev->dev, ret, "error when enabling power domains\n");
/* low level overrides for when the HW logic doesn't "just work" */
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mpm_sscaon_config0");
data->reg_mpm_sscaon_config0 = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(data->reg_mpm_sscaon_config0))
return dev_err_probe(&pdev->dev, PTR_ERR(data->reg_mpm_sscaon_config0),
"Failed to ioremap mpm_sscaon_config0\n");
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mpm_sscaon_config1");
data->reg_mpm_sscaon_config1 = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(data->reg_mpm_sscaon_config1))
return dev_err_probe(&pdev->dev, PTR_ERR(data->reg_mpm_sscaon_config1),
"Failed to ioremap mpm_sscaon_config1\n");
/* resets */
data->ssc_bcr = devm_reset_control_get_exclusive(&pdev->dev, "ssc_bcr");
if (IS_ERR(data->ssc_bcr))
return dev_err_probe(&pdev->dev, PTR_ERR(data->ssc_bcr),
"Failed to acquire reset: scc_bcr\n");
data->ssc_reset = devm_reset_control_get_exclusive(&pdev->dev, "ssc_reset");
if (IS_ERR(data->ssc_reset))
return dev_err_probe(&pdev->dev, PTR_ERR(data->ssc_reset),
"Failed to acquire reset: ssc_reset:\n");
/* clocks */
data->xo_clk = devm_clk_get(&pdev->dev, "xo");
if (IS_ERR(data->xo_clk))
return dev_err_probe(&pdev->dev, PTR_ERR(data->xo_clk),
"Failed to get clock: xo\n");
data->aggre2_clk = devm_clk_get(&pdev->dev, "aggre2");
if (IS_ERR(data->aggre2_clk))
return dev_err_probe(&pdev->dev, PTR_ERR(data->aggre2_clk),
"Failed to get clock: aggre2\n");
data->gcc_im_sleep_clk = devm_clk_get(&pdev->dev, "gcc_im_sleep");
if (IS_ERR(data->gcc_im_sleep_clk))
return dev_err_probe(&pdev->dev, PTR_ERR(data->gcc_im_sleep_clk),
"Failed to get clock: gcc_im_sleep\n");
data->aggre2_north_clk = devm_clk_get(&pdev->dev, "aggre2_north");
if (IS_ERR(data->aggre2_north_clk))
return dev_err_probe(&pdev->dev, PTR_ERR(data->aggre2_north_clk),
"Failed to get clock: aggre2_north\n");
data->ssc_xo_clk = devm_clk_get(&pdev->dev, "ssc_xo");
if (IS_ERR(data->ssc_xo_clk))
return dev_err_probe(&pdev->dev, PTR_ERR(data->ssc_xo_clk),
"Failed to get clock: ssc_xo\n");
data->ssc_ahbs_clk = devm_clk_get(&pdev->dev, "ssc_ahbs");
if (IS_ERR(data->ssc_ahbs_clk))
return dev_err_probe(&pdev->dev, PTR_ERR(data->ssc_ahbs_clk),
"Failed to get clock: ssc_ahbs\n");
ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node, "qcom,halt-regs", 1, 0,
&halt_args);
if (ret < 0)
return dev_err_probe(&pdev->dev, ret, "Failed to parse qcom,halt-regs\n");
data->halt_map = syscon_node_to_regmap(halt_args.np);
of_node_put(halt_args.np);
if (IS_ERR(data->halt_map))
return PTR_ERR(data->halt_map);
data->ssc_axi_halt = halt_args.args[0];
qcom_ssc_block_bus_init(&pdev->dev);
of_platform_populate(np, NULL, NULL, &pdev->dev);
return 0;
}
static int qcom_ssc_block_bus_remove(struct platform_device *pdev)
{
struct qcom_ssc_block_bus_data *data = platform_get_drvdata(pdev);
qcom_ssc_block_bus_deinit(&pdev->dev);
iounmap(data->reg_mpm_sscaon_config0);
iounmap(data->reg_mpm_sscaon_config1);
qcom_ssc_block_bus_pds_disable(data->pds, data->num_pds);
qcom_ssc_block_bus_pds_detach(&pdev->dev, data->pds, data->num_pds);
pm_runtime_disable(&pdev->dev);
pm_clk_destroy(&pdev->dev);
return 0;
}
static const struct of_device_id qcom_ssc_block_bus_of_match[] = {
{ .compatible = "qcom,ssc-block-bus", },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, qcom_ssc_block_bus_of_match);
static struct platform_driver qcom_ssc_block_bus_driver = {
.probe = qcom_ssc_block_bus_probe,
.remove = qcom_ssc_block_bus_remove,
.driver = {
.name = "qcom-ssc-block-bus",
.of_match_table = qcom_ssc_block_bus_of_match,
},
};
module_platform_driver(qcom_ssc_block_bus_driver);
MODULE_DESCRIPTION("A driver for handling the init sequence needed for accessing the SSC block on (some) qcom SoCs over AHB");
MODULE_AUTHOR("Michael Srba <[email protected]>");
| linux-master | drivers/bus/qcom-ssc-block-bus.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Intel IXP4xx Expansion Bus Controller
* Copyright (C) 2021 Linaro Ltd.
*
* Author: Linus Walleij <[email protected]>
*/
#include <linux/bitfield.h>
#include <linux/bits.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/log2.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#define IXP4XX_EXP_NUM_CS 8
#define IXP4XX_EXP_TIMING_CS0 0x00
#define IXP4XX_EXP_TIMING_CS1 0x04
#define IXP4XX_EXP_TIMING_CS2 0x08
#define IXP4XX_EXP_TIMING_CS3 0x0c
#define IXP4XX_EXP_TIMING_CS4 0x10
#define IXP4XX_EXP_TIMING_CS5 0x14
#define IXP4XX_EXP_TIMING_CS6 0x18
#define IXP4XX_EXP_TIMING_CS7 0x1c
/* Bits inside each CS timing register */
#define IXP4XX_EXP_TIMING_STRIDE 0x04
#define IXP4XX_EXP_CS_EN BIT(31)
#define IXP456_EXP_PAR_EN BIT(30) /* Only on IXP45x and IXP46x */
#define IXP4XX_EXP_T1_MASK GENMASK(29, 28)
#define IXP4XX_EXP_T1_SHIFT 28
#define IXP4XX_EXP_T2_MASK GENMASK(27, 26)
#define IXP4XX_EXP_T2_SHIFT 26
#define IXP4XX_EXP_T3_MASK GENMASK(25, 22)
#define IXP4XX_EXP_T3_SHIFT 22
#define IXP4XX_EXP_T4_MASK GENMASK(21, 20)
#define IXP4XX_EXP_T4_SHIFT 20
#define IXP4XX_EXP_T5_MASK GENMASK(19, 16)
#define IXP4XX_EXP_T5_SHIFT 16
#define IXP4XX_EXP_CYC_TYPE_MASK GENMASK(15, 14)
#define IXP4XX_EXP_CYC_TYPE_SHIFT 14
#define IXP4XX_EXP_SIZE_MASK GENMASK(13, 10)
#define IXP4XX_EXP_SIZE_SHIFT 10
#define IXP4XX_EXP_CNFG_0 BIT(9) /* Always zero */
#define IXP43X_EXP_SYNC_INTEL BIT(8) /* Only on IXP43x */
#define IXP43X_EXP_EXP_CHIP BIT(7) /* Only on IXP43x, dangerous to touch on IXP42x */
#define IXP4XX_EXP_BYTE_RD16 BIT(6)
#define IXP4XX_EXP_HRDY_POL BIT(5) /* Only on IXP42x */
#define IXP4XX_EXP_MUX_EN BIT(4)
#define IXP4XX_EXP_SPLT_EN BIT(3)
#define IXP4XX_EXP_WORD BIT(2) /* Always zero */
#define IXP4XX_EXP_WR_EN BIT(1)
#define IXP4XX_EXP_BYTE_EN BIT(0)
#define IXP4XX_EXP_CNFG0 0x20
#define IXP4XX_EXP_CNFG0_MEM_MAP BIT(31)
#define IXP4XX_EXP_CNFG1 0x24
#define IXP4XX_EXP_BOOT_BASE 0x00000000
#define IXP4XX_EXP_NORMAL_BASE 0x50000000
#define IXP4XX_EXP_STRIDE 0x01000000
/* Fuses on the IXP43x */
#define IXP43X_EXP_UNIT_FUSE_RESET 0x28
#define IXP43x_EXP_FUSE_SPEED_MASK GENMASK(23, 22)
/* Number of device tree values in "reg" */
#define IXP4XX_OF_REG_SIZE 3
struct ixp4xx_eb {
struct device *dev;
struct regmap *rmap;
u32 bus_base;
bool is_42x;
bool is_43x;
};
struct ixp4xx_exp_tim_prop {
const char *prop;
u32 max;
u32 mask;
u16 shift;
};
static const struct ixp4xx_exp_tim_prop ixp4xx_exp_tim_props[] = {
{
.prop = "intel,ixp4xx-eb-t1",
.max = 3,
.mask = IXP4XX_EXP_T1_MASK,
.shift = IXP4XX_EXP_T1_SHIFT,
},
{
.prop = "intel,ixp4xx-eb-t2",
.max = 3,
.mask = IXP4XX_EXP_T2_MASK,
.shift = IXP4XX_EXP_T2_SHIFT,
},
{
.prop = "intel,ixp4xx-eb-t3",
.max = 15,
.mask = IXP4XX_EXP_T3_MASK,
.shift = IXP4XX_EXP_T3_SHIFT,
},
{
.prop = "intel,ixp4xx-eb-t4",
.max = 3,
.mask = IXP4XX_EXP_T4_MASK,
.shift = IXP4XX_EXP_T4_SHIFT,
},
{
.prop = "intel,ixp4xx-eb-t5",
.max = 15,
.mask = IXP4XX_EXP_T5_MASK,
.shift = IXP4XX_EXP_T5_SHIFT,
},
{
.prop = "intel,ixp4xx-eb-byte-access-on-halfword",
.max = 1,
.mask = IXP4XX_EXP_BYTE_RD16,
},
{
.prop = "intel,ixp4xx-eb-hpi-hrdy-pol-high",
.max = 1,
.mask = IXP4XX_EXP_HRDY_POL,
},
{
.prop = "intel,ixp4xx-eb-mux-address-and-data",
.max = 1,
.mask = IXP4XX_EXP_MUX_EN,
},
{
.prop = "intel,ixp4xx-eb-ahb-split-transfers",
.max = 1,
.mask = IXP4XX_EXP_SPLT_EN,
},
{
.prop = "intel,ixp4xx-eb-write-enable",
.max = 1,
.mask = IXP4XX_EXP_WR_EN,
},
{
.prop = "intel,ixp4xx-eb-byte-access",
.max = 1,
.mask = IXP4XX_EXP_BYTE_EN,
},
};
static void ixp4xx_exp_setup_chipselect(struct ixp4xx_eb *eb,
struct device_node *np,
u32 cs_index,
u32 cs_size)
{
u32 cs_cfg;
u32 val;
u32 cur_cssize;
u32 cs_order;
int ret;
int i;
if (eb->is_42x && (cs_index > 7)) {
dev_err(eb->dev,
"invalid chipselect %u, we only support 0-7\n",
cs_index);
return;
}
if (eb->is_43x && (cs_index > 3)) {
dev_err(eb->dev,
"invalid chipselect %u, we only support 0-3\n",
cs_index);
return;
}
/* Several chip selects can be joined into one device */
if (cs_size > IXP4XX_EXP_STRIDE)
cur_cssize = IXP4XX_EXP_STRIDE;
else
cur_cssize = cs_size;
/*
* The following will read/modify/write the configuration for one
* chipselect, attempting to leave the boot defaults in place unless
* something is explicitly defined.
*/
regmap_read(eb->rmap, IXP4XX_EXP_TIMING_CS0 +
IXP4XX_EXP_TIMING_STRIDE * cs_index, &cs_cfg);
dev_info(eb->dev, "CS%d at %#08x, size %#08x, config before: %#08x\n",
cs_index, eb->bus_base + IXP4XX_EXP_STRIDE * cs_index,
cur_cssize, cs_cfg);
/* Size set-up first align to 2^9 .. 2^24 */
cur_cssize = roundup_pow_of_two(cur_cssize);
if (cur_cssize < 512)
cur_cssize = 512;
cs_order = ilog2(cur_cssize);
if (cs_order < 9 || cs_order > 24) {
dev_err(eb->dev, "illegal size order %d\n", cs_order);
return;
}
dev_dbg(eb->dev, "CS%d size order: %d\n", cs_index, cs_order);
cs_cfg &= ~(IXP4XX_EXP_SIZE_MASK);
cs_cfg |= ((cs_order - 9) << IXP4XX_EXP_SIZE_SHIFT);
for (i = 0; i < ARRAY_SIZE(ixp4xx_exp_tim_props); i++) {
const struct ixp4xx_exp_tim_prop *ip = &ixp4xx_exp_tim_props[i];
/* All are regular u32 values */
ret = of_property_read_u32(np, ip->prop, &val);
if (ret)
continue;
/* Handle bools (single bits) first */
if (ip->max == 1) {
if (val)
cs_cfg |= ip->mask;
else
cs_cfg &= ~ip->mask;
dev_info(eb->dev, "CS%d %s %s\n", cs_index,
val ? "enabled" : "disabled",
ip->prop);
continue;
}
if (val > ip->max) {
dev_err(eb->dev,
"CS%d too high value for %s: %u, capped at %u\n",
cs_index, ip->prop, val, ip->max);
val = ip->max;
}
/* This assumes max value fills all the assigned bits (and it does) */
cs_cfg &= ~ip->mask;
cs_cfg |= (val << ip->shift);
dev_info(eb->dev, "CS%d set %s to %u\n", cs_index, ip->prop, val);
}
ret = of_property_read_u32(np, "intel,ixp4xx-eb-cycle-type", &val);
if (!ret) {
if (val > 3) {
dev_err(eb->dev, "illegal cycle type %d\n", val);
return;
}
dev_info(eb->dev, "CS%d set cycle type %d\n", cs_index, val);
cs_cfg &= ~IXP4XX_EXP_CYC_TYPE_MASK;
cs_cfg |= val << IXP4XX_EXP_CYC_TYPE_SHIFT;
}
if (eb->is_43x) {
/* Should always be zero */
cs_cfg &= ~IXP4XX_EXP_WORD;
/*
* This bit for Intel strata flash is currently unused, but let's
* report it if we find one.
*/
if (cs_cfg & IXP43X_EXP_SYNC_INTEL)
dev_info(eb->dev, "claims to be Intel strata flash\n");
}
cs_cfg |= IXP4XX_EXP_CS_EN;
regmap_write(eb->rmap,
IXP4XX_EXP_TIMING_CS0 + IXP4XX_EXP_TIMING_STRIDE * cs_index,
cs_cfg);
dev_info(eb->dev, "CS%d wrote %#08x into CS config\n", cs_index, cs_cfg);
/*
* If several chip selects are joined together into one big
* device area, we call ourselves recursively for each successive
* chip select. For a 32MB flash chip this results in two calls
* for example.
*/
if (cs_size > IXP4XX_EXP_STRIDE)
ixp4xx_exp_setup_chipselect(eb, np,
cs_index + 1,
cs_size - IXP4XX_EXP_STRIDE);
}
static void ixp4xx_exp_setup_child(struct ixp4xx_eb *eb,
struct device_node *np)
{
u32 cs_sizes[IXP4XX_EXP_NUM_CS];
int num_regs;
u32 csindex;
u32 cssize;
int ret;
int i;
num_regs = of_property_count_elems_of_size(np, "reg", IXP4XX_OF_REG_SIZE);
if (num_regs <= 0)
return;
dev_dbg(eb->dev, "child %s has %d register sets\n",
of_node_full_name(np), num_regs);
for (csindex = 0; csindex < IXP4XX_EXP_NUM_CS; csindex++)
cs_sizes[csindex] = 0;
for (i = 0; i < num_regs; i++) {
u32 rbase, rsize;
ret = of_property_read_u32_index(np, "reg",
i * IXP4XX_OF_REG_SIZE, &csindex);
if (ret)
break;
ret = of_property_read_u32_index(np, "reg",
i * IXP4XX_OF_REG_SIZE + 1, &rbase);
if (ret)
break;
ret = of_property_read_u32_index(np, "reg",
i * IXP4XX_OF_REG_SIZE + 2, &rsize);
if (ret)
break;
if (csindex >= IXP4XX_EXP_NUM_CS) {
dev_err(eb->dev, "illegal CS %d\n", csindex);
continue;
}
/*
* The memory window always starts from CS base so we need to add
* the start and size to get to the size from the start of the CS
* base. For example if CS0 is at 0x50000000 and the reg is
* <0 0xe40000 0x40000> the size is e80000.
*
* Roof this if we have several regs setting the same CS.
*/
cssize = rbase + rsize;
dev_dbg(eb->dev, "CS%d size %#08x\n", csindex, cssize);
if (cs_sizes[csindex] < cssize)
cs_sizes[csindex] = cssize;
}
for (csindex = 0; csindex < IXP4XX_EXP_NUM_CS; csindex++) {
cssize = cs_sizes[csindex];
if (!cssize)
continue;
/* Just this one, so set it up and return */
ixp4xx_exp_setup_chipselect(eb, np, csindex, cssize);
}
}
static int ixp4xx_exp_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct ixp4xx_eb *eb;
struct device_node *child;
bool have_children = false;
u32 val;
int ret;
eb = devm_kzalloc(dev, sizeof(*eb), GFP_KERNEL);
if (!eb)
return -ENOMEM;
eb->dev = dev;
eb->is_42x = of_device_is_compatible(np, "intel,ixp42x-expansion-bus-controller");
eb->is_43x = of_device_is_compatible(np, "intel,ixp43x-expansion-bus-controller");
eb->rmap = syscon_node_to_regmap(np);
if (IS_ERR(eb->rmap))
return dev_err_probe(dev, PTR_ERR(eb->rmap), "no regmap\n");
/* We check that the regmap work only on first read */
ret = regmap_read(eb->rmap, IXP4XX_EXP_CNFG0, &val);
if (ret)
return dev_err_probe(dev, ret, "cannot read regmap\n");
if (val & IXP4XX_EXP_CNFG0_MEM_MAP)
eb->bus_base = IXP4XX_EXP_BOOT_BASE;
else
eb->bus_base = IXP4XX_EXP_NORMAL_BASE;
dev_info(dev, "expansion bus at %08x\n", eb->bus_base);
if (eb->is_43x) {
/* Check some fuses */
regmap_read(eb->rmap, IXP43X_EXP_UNIT_FUSE_RESET, &val);
switch (FIELD_GET(IXP43x_EXP_FUSE_SPEED_MASK, val)) {
case 0:
dev_info(dev, "IXP43x at 533 MHz\n");
break;
case 1:
dev_info(dev, "IXP43x at 400 MHz\n");
break;
case 2:
dev_info(dev, "IXP43x at 667 MHz\n");
break;
default:
dev_info(dev, "IXP43x unknown speed\n");
break;
}
}
/* Walk over the child nodes and see what chipselects we use */
for_each_available_child_of_node(np, child) {
ixp4xx_exp_setup_child(eb, child);
/* We have at least one child */
have_children = true;
}
if (have_children)
return of_platform_default_populate(np, NULL, dev);
return 0;
}
static const struct of_device_id ixp4xx_exp_of_match[] = {
{ .compatible = "intel,ixp42x-expansion-bus-controller", },
{ .compatible = "intel,ixp43x-expansion-bus-controller", },
{ .compatible = "intel,ixp45x-expansion-bus-controller", },
{ .compatible = "intel,ixp46x-expansion-bus-controller", },
{ }
};
static struct platform_driver ixp4xx_exp_driver = {
.probe = ixp4xx_exp_probe,
.driver = {
.name = "intel-extbus",
.of_match_table = ixp4xx_exp_of_match,
},
};
module_platform_driver(ixp4xx_exp_driver);
MODULE_AUTHOR("Linus Walleij <[email protected]>");
MODULE_DESCRIPTION("Intel IXP4xx external bus driver");
| linux-master | drivers/bus/intel-ixp4xx-eb.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* TI PWM Subsystem driver
*
* Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
*/
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/err.h>
#include <linux/pm_runtime.h>
#include <linux/of_platform.h>
static const struct of_device_id pwmss_of_match[] = {
{ .compatible = "ti,am33xx-pwmss" },
{},
};
MODULE_DEVICE_TABLE(of, pwmss_of_match);
static int pwmss_probe(struct platform_device *pdev)
{
int ret;
struct device_node *node = pdev->dev.of_node;
pm_runtime_enable(&pdev->dev);
/* Populate all the child nodes here... */
ret = of_platform_populate(node, NULL, NULL, &pdev->dev);
if (ret)
dev_err(&pdev->dev, "no child node found\n");
return ret;
}
static int pwmss_remove(struct platform_device *pdev)
{
pm_runtime_disable(&pdev->dev);
return 0;
}
static struct platform_driver pwmss_driver = {
.driver = {
.name = "pwmss",
.of_match_table = pwmss_of_match,
},
.probe = pwmss_probe,
.remove = pwmss_remove,
};
module_platform_driver(pwmss_driver);
MODULE_DESCRIPTION("PWM Subsystem driver");
MODULE_AUTHOR("Texas Instruments");
MODULE_LICENSE("GPL");
| linux-master | drivers/bus/ti-pwmss.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Turris Mox module configuration bus driver
*
* Copyright (C) 2019 Marek Behún <[email protected]>
*/
#include <dt-bindings/bus/moxtet.h>
#include <linux/bitops.h>
#include <linux/debugfs.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/moxtet.h>
#include <linux/mutex.h>
#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/spi/spi.h>
/*
* @name: module name for sysfs
* @hwirq_base: base index for IRQ for this module (-1 if no IRQs)
* @nirqs: how many interrupts does the shift register provide
* @desc: module description for kernel log
*/
static const struct {
const char *name;
int hwirq_base;
int nirqs;
const char *desc;
} mox_module_table[] = {
/* do not change order of this array! */
{ NULL, 0, 0, NULL },
{ "sfp", -1, 0, "MOX D (SFP cage)" },
{ "pci", MOXTET_IRQ_PCI, 1, "MOX B (Mini-PCIe)" },
{ "topaz", MOXTET_IRQ_TOPAZ, 1, "MOX C (4 port switch)" },
{ "peridot", MOXTET_IRQ_PERIDOT(0), 1, "MOX E (8 port switch)" },
{ "usb3", MOXTET_IRQ_USB3, 2, "MOX F (USB 3.0)" },
{ "pci-bridge", -1, 0, "MOX G (Mini-PCIe bridge)" },
};
static inline bool mox_module_known(unsigned int id)
{
return id >= TURRIS_MOX_MODULE_FIRST && id <= TURRIS_MOX_MODULE_LAST;
}
static inline const char *mox_module_name(unsigned int id)
{
if (mox_module_known(id))
return mox_module_table[id].name;
else
return "unknown";
}
#define DEF_MODULE_ATTR(name, fmt, ...) \
static ssize_t \
module_##name##_show(struct device *dev, struct device_attribute *a, \
char *buf) \
{ \
struct moxtet_device *mdev = to_moxtet_device(dev); \
return sprintf(buf, (fmt), __VA_ARGS__); \
} \
static DEVICE_ATTR_RO(module_##name)
DEF_MODULE_ATTR(id, "0x%x\n", mdev->id);
DEF_MODULE_ATTR(name, "%s\n", mox_module_name(mdev->id));
DEF_MODULE_ATTR(description, "%s\n",
mox_module_known(mdev->id) ? mox_module_table[mdev->id].desc
: "");
static struct attribute *moxtet_dev_attrs[] = {
&dev_attr_module_id.attr,
&dev_attr_module_name.attr,
&dev_attr_module_description.attr,
NULL,
};
static const struct attribute_group moxtet_dev_group = {
.attrs = moxtet_dev_attrs,
};
static const struct attribute_group *moxtet_dev_groups[] = {
&moxtet_dev_group,
NULL,
};
static int moxtet_match(struct device *dev, struct device_driver *drv)
{
struct moxtet_device *mdev = to_moxtet_device(dev);
struct moxtet_driver *tdrv = to_moxtet_driver(drv);
const enum turris_mox_module_id *t;
if (of_driver_match_device(dev, drv))
return 1;
if (!tdrv->id_table)
return 0;
for (t = tdrv->id_table; *t; ++t)
if (*t == mdev->id)
return 1;
return 0;
}
static struct bus_type moxtet_bus_type = {
.name = "moxtet",
.dev_groups = moxtet_dev_groups,
.match = moxtet_match,
};
int __moxtet_register_driver(struct module *owner,
struct moxtet_driver *mdrv)
{
mdrv->driver.owner = owner;
mdrv->driver.bus = &moxtet_bus_type;
return driver_register(&mdrv->driver);
}
EXPORT_SYMBOL_GPL(__moxtet_register_driver);
static int moxtet_dev_check(struct device *dev, void *data)
{
struct moxtet_device *mdev = to_moxtet_device(dev);
struct moxtet_device *new_dev = data;
if (mdev->moxtet == new_dev->moxtet && mdev->id == new_dev->id &&
mdev->idx == new_dev->idx)
return -EBUSY;
return 0;
}
static void moxtet_dev_release(struct device *dev)
{
struct moxtet_device *mdev = to_moxtet_device(dev);
put_device(mdev->moxtet->dev);
kfree(mdev);
}
static struct moxtet_device *
moxtet_alloc_device(struct moxtet *moxtet)
{
struct moxtet_device *dev;
if (!get_device(moxtet->dev))
return NULL;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev) {
put_device(moxtet->dev);
return NULL;
}
dev->moxtet = moxtet;
dev->dev.parent = moxtet->dev;
dev->dev.bus = &moxtet_bus_type;
dev->dev.release = moxtet_dev_release;
device_initialize(&dev->dev);
return dev;
}
static int moxtet_add_device(struct moxtet_device *dev)
{
static DEFINE_MUTEX(add_mutex);
int ret;
if (dev->idx >= TURRIS_MOX_MAX_MODULES || dev->id > 0xf)
return -EINVAL;
dev_set_name(&dev->dev, "moxtet-%s.%u", mox_module_name(dev->id),
dev->idx);
mutex_lock(&add_mutex);
ret = bus_for_each_dev(&moxtet_bus_type, NULL, dev,
moxtet_dev_check);
if (ret)
goto done;
ret = device_add(&dev->dev);
if (ret < 0)
dev_err(dev->moxtet->dev, "can't add %s, status %d\n",
dev_name(dev->moxtet->dev), ret);
done:
mutex_unlock(&add_mutex);
return ret;
}
static int __unregister(struct device *dev, void *null)
{
if (dev->of_node) {
of_node_clear_flag(dev->of_node, OF_POPULATED);
of_node_put(dev->of_node);
}
device_unregister(dev);
return 0;
}
static struct moxtet_device *
of_register_moxtet_device(struct moxtet *moxtet, struct device_node *nc)
{
struct moxtet_device *dev;
u32 val;
int ret;
dev = moxtet_alloc_device(moxtet);
if (!dev) {
dev_err(moxtet->dev,
"Moxtet device alloc error for %pOF\n", nc);
return ERR_PTR(-ENOMEM);
}
ret = of_property_read_u32(nc, "reg", &val);
if (ret) {
dev_err(moxtet->dev, "%pOF has no valid 'reg' property (%d)\n",
nc, ret);
goto err_put;
}
dev->idx = val;
if (dev->idx >= TURRIS_MOX_MAX_MODULES) {
dev_err(moxtet->dev, "%pOF Moxtet address 0x%x out of range\n",
nc, dev->idx);
ret = -EINVAL;
goto err_put;
}
dev->id = moxtet->modules[dev->idx];
if (!dev->id) {
dev_err(moxtet->dev, "%pOF Moxtet address 0x%x is empty\n", nc,
dev->idx);
ret = -ENODEV;
goto err_put;
}
of_node_get(nc);
dev->dev.of_node = nc;
ret = moxtet_add_device(dev);
if (ret) {
dev_err(moxtet->dev,
"Moxtet device register error for %pOF\n", nc);
of_node_put(nc);
goto err_put;
}
return dev;
err_put:
put_device(&dev->dev);
return ERR_PTR(ret);
}
static void of_register_moxtet_devices(struct moxtet *moxtet)
{
struct moxtet_device *dev;
struct device_node *nc;
if (!moxtet->dev->of_node)
return;
for_each_available_child_of_node(moxtet->dev->of_node, nc) {
if (of_node_test_and_set_flag(nc, OF_POPULATED))
continue;
dev = of_register_moxtet_device(moxtet, nc);
if (IS_ERR(dev)) {
dev_warn(moxtet->dev,
"Failed to create Moxtet device for %pOF\n",
nc);
of_node_clear_flag(nc, OF_POPULATED);
}
}
}
static void
moxtet_register_devices_from_topology(struct moxtet *moxtet)
{
struct moxtet_device *dev;
int i, ret;
for (i = 0; i < moxtet->count; ++i) {
dev = moxtet_alloc_device(moxtet);
if (!dev) {
dev_err(moxtet->dev, "Moxtet device %u alloc error\n",
i);
continue;
}
dev->idx = i;
dev->id = moxtet->modules[i];
ret = moxtet_add_device(dev);
if (ret && ret != -EBUSY) {
put_device(&dev->dev);
dev_err(moxtet->dev,
"Moxtet device %u register error: %i\n", i,
ret);
}
}
}
/*
* @nsame: how many modules with same id are already in moxtet->modules
*/
static int moxtet_set_irq(struct moxtet *moxtet, int idx, int id, int nsame)
{
int i, first;
struct moxtet_irqpos *pos;
first = mox_module_table[id].hwirq_base +
nsame * mox_module_table[id].nirqs;
if (first + mox_module_table[id].nirqs > MOXTET_NIRQS)
return -EINVAL;
for (i = 0; i < mox_module_table[id].nirqs; ++i) {
pos = &moxtet->irq.position[first + i];
pos->idx = idx;
pos->bit = i;
moxtet->irq.exists |= BIT(first + i);
}
return 0;
}
static int moxtet_find_topology(struct moxtet *moxtet)
{
u8 buf[TURRIS_MOX_MAX_MODULES];
int cnts[TURRIS_MOX_MODULE_LAST];
int i, ret;
memset(cnts, 0, sizeof(cnts));
ret = spi_read(to_spi_device(moxtet->dev), buf, TURRIS_MOX_MAX_MODULES);
if (ret < 0)
return ret;
if (buf[0] == TURRIS_MOX_CPU_ID_EMMC) {
dev_info(moxtet->dev, "Found MOX A (eMMC CPU) module\n");
} else if (buf[0] == TURRIS_MOX_CPU_ID_SD) {
dev_info(moxtet->dev, "Found MOX A (CPU) module\n");
} else {
dev_err(moxtet->dev, "Invalid Turris MOX A CPU module 0x%02x\n",
buf[0]);
return -ENODEV;
}
moxtet->count = 0;
for (i = 1; i < TURRIS_MOX_MAX_MODULES; ++i) {
int id;
if (buf[i] == 0xff)
break;
id = buf[i] & 0xf;
moxtet->modules[i-1] = id;
++moxtet->count;
if (mox_module_known(id)) {
dev_info(moxtet->dev, "Found %s module\n",
mox_module_table[id].desc);
if (moxtet_set_irq(moxtet, i-1, id, cnts[id]++) < 0)
dev_err(moxtet->dev,
" Cannot set IRQ for module %s\n",
mox_module_table[id].desc);
} else {
dev_warn(moxtet->dev,
"Unknown Moxtet module found (ID 0x%02x)\n",
id);
}
}
return 0;
}
static int moxtet_spi_read(struct moxtet *moxtet, u8 *buf)
{
struct spi_transfer xfer = {
.rx_buf = buf,
.tx_buf = moxtet->tx,
.len = moxtet->count + 1
};
int ret;
mutex_lock(&moxtet->lock);
ret = spi_sync_transfer(to_spi_device(moxtet->dev), &xfer, 1);
mutex_unlock(&moxtet->lock);
return ret;
}
int moxtet_device_read(struct device *dev)
{
struct moxtet_device *mdev = to_moxtet_device(dev);
struct moxtet *moxtet = mdev->moxtet;
u8 buf[TURRIS_MOX_MAX_MODULES];
int ret;
if (mdev->idx >= moxtet->count)
return -EINVAL;
ret = moxtet_spi_read(moxtet, buf);
if (ret < 0)
return ret;
return buf[mdev->idx + 1] >> 4;
}
EXPORT_SYMBOL_GPL(moxtet_device_read);
int moxtet_device_write(struct device *dev, u8 val)
{
struct moxtet_device *mdev = to_moxtet_device(dev);
struct moxtet *moxtet = mdev->moxtet;
int ret;
if (mdev->idx >= moxtet->count)
return -EINVAL;
mutex_lock(&moxtet->lock);
moxtet->tx[moxtet->count - mdev->idx] = val;
ret = spi_write(to_spi_device(moxtet->dev), moxtet->tx,
moxtet->count + 1);
mutex_unlock(&moxtet->lock);
return ret;
}
EXPORT_SYMBOL_GPL(moxtet_device_write);
int moxtet_device_written(struct device *dev)
{
struct moxtet_device *mdev = to_moxtet_device(dev);
struct moxtet *moxtet = mdev->moxtet;
if (mdev->idx >= moxtet->count)
return -EINVAL;
return moxtet->tx[moxtet->count - mdev->idx];
}
EXPORT_SYMBOL_GPL(moxtet_device_written);
#ifdef CONFIG_DEBUG_FS
static int moxtet_debug_open(struct inode *inode, struct file *file)
{
file->private_data = inode->i_private;
return nonseekable_open(inode, file);
}
static ssize_t input_read(struct file *file, char __user *buf, size_t len,
loff_t *ppos)
{
struct moxtet *moxtet = file->private_data;
u8 bin[TURRIS_MOX_MAX_MODULES];
u8 hex[sizeof(bin) * 2 + 1];
int ret, n;
ret = moxtet_spi_read(moxtet, bin);
if (ret < 0)
return ret;
n = moxtet->count + 1;
bin2hex(hex, bin, n);
hex[2*n] = '\n';
return simple_read_from_buffer(buf, len, ppos, hex, 2*n + 1);
}
static const struct file_operations input_fops = {
.owner = THIS_MODULE,
.open = moxtet_debug_open,
.read = input_read,
.llseek = no_llseek,
};
static ssize_t output_read(struct file *file, char __user *buf, size_t len,
loff_t *ppos)
{
struct moxtet *moxtet = file->private_data;
u8 hex[TURRIS_MOX_MAX_MODULES * 2 + 1];
u8 *p = hex;
int i;
mutex_lock(&moxtet->lock);
for (i = 0; i < moxtet->count; ++i)
p = hex_byte_pack(p, moxtet->tx[moxtet->count - i]);
mutex_unlock(&moxtet->lock);
*p++ = '\n';
return simple_read_from_buffer(buf, len, ppos, hex, p - hex);
}
static ssize_t output_write(struct file *file, const char __user *buf,
size_t len, loff_t *ppos)
{
struct moxtet *moxtet = file->private_data;
u8 bin[TURRIS_MOX_MAX_MODULES];
u8 hex[sizeof(bin) * 2 + 1];
ssize_t res;
loff_t dummy = 0;
int err, i;
if (len > 2 * moxtet->count + 1 || len < 2 * moxtet->count)
return -EINVAL;
res = simple_write_to_buffer(hex, sizeof(hex), &dummy, buf, len);
if (res < 0)
return res;
if (len % 2 == 1 && hex[len - 1] != '\n')
return -EINVAL;
err = hex2bin(bin, hex, moxtet->count);
if (err < 0)
return -EINVAL;
mutex_lock(&moxtet->lock);
for (i = 0; i < moxtet->count; ++i)
moxtet->tx[moxtet->count - i] = bin[i];
err = spi_write(to_spi_device(moxtet->dev), moxtet->tx,
moxtet->count + 1);
mutex_unlock(&moxtet->lock);
return err < 0 ? err : len;
}
static const struct file_operations output_fops = {
.owner = THIS_MODULE,
.open = moxtet_debug_open,
.read = output_read,
.write = output_write,
.llseek = no_llseek,
};
static int moxtet_register_debugfs(struct moxtet *moxtet)
{
struct dentry *root, *entry;
root = debugfs_create_dir("moxtet", NULL);
if (IS_ERR(root))
return PTR_ERR(root);
entry = debugfs_create_file_unsafe("input", 0444, root, moxtet,
&input_fops);
if (IS_ERR(entry))
goto err_remove;
entry = debugfs_create_file_unsafe("output", 0644, root, moxtet,
&output_fops);
if (IS_ERR(entry))
goto err_remove;
moxtet->debugfs_root = root;
return 0;
err_remove:
debugfs_remove_recursive(root);
return PTR_ERR(entry);
}
static void moxtet_unregister_debugfs(struct moxtet *moxtet)
{
debugfs_remove_recursive(moxtet->debugfs_root);
}
#else
static inline int moxtet_register_debugfs(struct moxtet *moxtet)
{
return 0;
}
static inline void moxtet_unregister_debugfs(struct moxtet *moxtet)
{
}
#endif
static int moxtet_irq_domain_map(struct irq_domain *d, unsigned int irq,
irq_hw_number_t hw)
{
struct moxtet *moxtet = d->host_data;
if (hw >= MOXTET_NIRQS || !(moxtet->irq.exists & BIT(hw))) {
dev_err(moxtet->dev, "Invalid hw irq number\n");
return -EINVAL;
}
irq_set_chip_data(irq, d->host_data);
irq_set_chip_and_handler(irq, &moxtet->irq.chip, handle_level_irq);
return 0;
}
static int moxtet_irq_domain_xlate(struct irq_domain *d,
struct device_node *ctrlr,
const u32 *intspec, unsigned int intsize,
unsigned long *out_hwirq,
unsigned int *out_type)
{
struct moxtet *moxtet = d->host_data;
int irq;
if (WARN_ON(intsize < 1))
return -EINVAL;
irq = intspec[0];
if (irq >= MOXTET_NIRQS || !(moxtet->irq.exists & BIT(irq)))
return -EINVAL;
*out_hwirq = irq;
*out_type = IRQ_TYPE_NONE;
return 0;
}
static const struct irq_domain_ops moxtet_irq_domain = {
.map = moxtet_irq_domain_map,
.xlate = moxtet_irq_domain_xlate,
};
static void moxtet_irq_mask(struct irq_data *d)
{
struct moxtet *moxtet = irq_data_get_irq_chip_data(d);
moxtet->irq.masked |= BIT(d->hwirq);
}
static void moxtet_irq_unmask(struct irq_data *d)
{
struct moxtet *moxtet = irq_data_get_irq_chip_data(d);
moxtet->irq.masked &= ~BIT(d->hwirq);
}
static void moxtet_irq_print_chip(struct irq_data *d, struct seq_file *p)
{
struct moxtet *moxtet = irq_data_get_irq_chip_data(d);
struct moxtet_irqpos *pos = &moxtet->irq.position[d->hwirq];
int id;
id = moxtet->modules[pos->idx];
seq_printf(p, " moxtet-%s.%i#%i", mox_module_name(id), pos->idx,
pos->bit);
}
static const struct irq_chip moxtet_irq_chip = {
.name = "moxtet",
.irq_mask = moxtet_irq_mask,
.irq_unmask = moxtet_irq_unmask,
.irq_print_chip = moxtet_irq_print_chip,
};
static int moxtet_irq_read(struct moxtet *moxtet, unsigned long *map)
{
struct moxtet_irqpos *pos = moxtet->irq.position;
u8 buf[TURRIS_MOX_MAX_MODULES];
int i, ret;
ret = moxtet_spi_read(moxtet, buf);
if (ret < 0)
return ret;
*map = 0;
for_each_set_bit(i, &moxtet->irq.exists, MOXTET_NIRQS) {
if (!(buf[pos[i].idx + 1] & BIT(4 + pos[i].bit)))
set_bit(i, map);
}
return 0;
}
static irqreturn_t moxtet_irq_thread_fn(int irq, void *data)
{
struct moxtet *moxtet = data;
unsigned long set;
int nhandled = 0, i, sub_irq, ret;
ret = moxtet_irq_read(moxtet, &set);
if (ret < 0)
goto out;
set &= ~moxtet->irq.masked;
do {
for_each_set_bit(i, &set, MOXTET_NIRQS) {
sub_irq = irq_find_mapping(moxtet->irq.domain, i);
handle_nested_irq(sub_irq);
dev_dbg(moxtet->dev, "%i irq\n", i);
++nhandled;
}
ret = moxtet_irq_read(moxtet, &set);
if (ret < 0)
goto out;
set &= ~moxtet->irq.masked;
} while (set);
out:
return (nhandled > 0 ? IRQ_HANDLED : IRQ_NONE);
}
static void moxtet_irq_free(struct moxtet *moxtet)
{
int i, irq;
for (i = 0; i < MOXTET_NIRQS; ++i) {
if (moxtet->irq.exists & BIT(i)) {
irq = irq_find_mapping(moxtet->irq.domain, i);
irq_dispose_mapping(irq);
}
}
irq_domain_remove(moxtet->irq.domain);
}
static int moxtet_irq_setup(struct moxtet *moxtet)
{
int i, ret;
moxtet->irq.domain = irq_domain_add_simple(moxtet->dev->of_node,
MOXTET_NIRQS, 0,
&moxtet_irq_domain, moxtet);
if (moxtet->irq.domain == NULL) {
dev_err(moxtet->dev, "Could not add IRQ domain\n");
return -ENOMEM;
}
for (i = 0; i < MOXTET_NIRQS; ++i)
if (moxtet->irq.exists & BIT(i))
irq_create_mapping(moxtet->irq.domain, i);
moxtet->irq.chip = moxtet_irq_chip;
moxtet->irq.masked = ~0;
ret = request_threaded_irq(moxtet->dev_irq, NULL, moxtet_irq_thread_fn,
IRQF_ONESHOT, "moxtet", moxtet);
if (ret < 0)
goto err_free;
return 0;
err_free:
moxtet_irq_free(moxtet);
return ret;
}
static int moxtet_probe(struct spi_device *spi)
{
struct moxtet *moxtet;
int ret;
ret = spi_setup(spi);
if (ret < 0)
return ret;
moxtet = devm_kzalloc(&spi->dev, sizeof(struct moxtet),
GFP_KERNEL);
if (!moxtet)
return -ENOMEM;
moxtet->dev = &spi->dev;
spi_set_drvdata(spi, moxtet);
mutex_init(&moxtet->lock);
moxtet->dev_irq = of_irq_get(moxtet->dev->of_node, 0);
if (moxtet->dev_irq == -EPROBE_DEFER)
return -EPROBE_DEFER;
if (moxtet->dev_irq <= 0) {
dev_err(moxtet->dev, "No IRQ resource found\n");
return -ENXIO;
}
ret = moxtet_find_topology(moxtet);
if (ret < 0)
return ret;
if (moxtet->irq.exists) {
ret = moxtet_irq_setup(moxtet);
if (ret < 0)
return ret;
}
of_register_moxtet_devices(moxtet);
moxtet_register_devices_from_topology(moxtet);
ret = moxtet_register_debugfs(moxtet);
if (ret < 0)
dev_warn(moxtet->dev, "Failed creating debugfs entries: %i\n",
ret);
return 0;
}
static void moxtet_remove(struct spi_device *spi)
{
struct moxtet *moxtet = spi_get_drvdata(spi);
free_irq(moxtet->dev_irq, moxtet);
moxtet_irq_free(moxtet);
moxtet_unregister_debugfs(moxtet);
device_for_each_child(moxtet->dev, NULL, __unregister);
mutex_destroy(&moxtet->lock);
}
static const struct of_device_id moxtet_dt_ids[] = {
{ .compatible = "cznic,moxtet" },
{},
};
MODULE_DEVICE_TABLE(of, moxtet_dt_ids);
static struct spi_driver moxtet_spi_driver = {
.driver = {
.name = "moxtet",
.of_match_table = moxtet_dt_ids,
},
.probe = moxtet_probe,
.remove = moxtet_remove,
};
static int __init moxtet_init(void)
{
int ret;
ret = bus_register(&moxtet_bus_type);
if (ret < 0) {
pr_err("moxtet bus registration failed: %d\n", ret);
goto error;
}
ret = spi_register_driver(&moxtet_spi_driver);
if (ret < 0) {
pr_err("moxtet spi driver registration failed: %d\n", ret);
goto error_bus;
}
return 0;
error_bus:
bus_unregister(&moxtet_bus_type);
error:
return ret;
}
postcore_initcall_sync(moxtet_init);
static void __exit moxtet_exit(void)
{
spi_unregister_driver(&moxtet_spi_driver);
bus_unregister(&moxtet_bus_type);
}
module_exit(moxtet_exit);
MODULE_AUTHOR("Marek Behun <[email protected]>");
MODULE_DESCRIPTION("CZ.NIC's Turris Mox module configuration bus");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/bus/moxtet.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* TI da8xx master peripheral priority driver
*
* Copyright (C) 2016 BayLibre SAS
*
* Author:
* Bartosz Golaszewski <[email protected]>
*/
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/regmap.h>
/*
* REVISIT: Linux doesn't have a good framework for the kind of performance
* knobs this driver controls. We can't use device tree properties as it deals
* with hardware configuration rather than description. We also don't want to
* commit to maintaining some random sysfs attributes.
*
* For now we just hardcode the register values for the boards that need
* some changes (as is the case for the LCD controller on da850-lcdk - the
* first board we support here). When linux gets an appropriate framework,
* we'll easily convert the driver to it.
*/
#define DA8XX_MSTPRI0_OFFSET 0
#define DA8XX_MSTPRI1_OFFSET 4
#define DA8XX_MSTPRI2_OFFSET 8
enum {
DA8XX_MSTPRI_ARM_I = 0,
DA8XX_MSTPRI_ARM_D,
DA8XX_MSTPRI_UPP,
DA8XX_MSTPRI_SATA,
DA8XX_MSTPRI_PRU0,
DA8XX_MSTPRI_PRU1,
DA8XX_MSTPRI_EDMA30TC0,
DA8XX_MSTPRI_EDMA30TC1,
DA8XX_MSTPRI_EDMA31TC0,
DA8XX_MSTPRI_VPIF_DMA_0,
DA8XX_MSTPRI_VPIF_DMA_1,
DA8XX_MSTPRI_EMAC,
DA8XX_MSTPRI_USB0CFG,
DA8XX_MSTPRI_USB0CDMA,
DA8XX_MSTPRI_UHPI,
DA8XX_MSTPRI_USB1,
DA8XX_MSTPRI_LCDC,
};
struct da8xx_mstpri_descr {
int reg;
int shift;
int mask;
};
static const struct da8xx_mstpri_descr da8xx_mstpri_priority_list[] = {
[DA8XX_MSTPRI_ARM_I] = {
.reg = DA8XX_MSTPRI0_OFFSET,
.shift = 0,
.mask = 0x0000000f,
},
[DA8XX_MSTPRI_ARM_D] = {
.reg = DA8XX_MSTPRI0_OFFSET,
.shift = 4,
.mask = 0x000000f0,
},
[DA8XX_MSTPRI_UPP] = {
.reg = DA8XX_MSTPRI0_OFFSET,
.shift = 16,
.mask = 0x000f0000,
},
[DA8XX_MSTPRI_SATA] = {
.reg = DA8XX_MSTPRI0_OFFSET,
.shift = 20,
.mask = 0x00f00000,
},
[DA8XX_MSTPRI_PRU0] = {
.reg = DA8XX_MSTPRI1_OFFSET,
.shift = 0,
.mask = 0x0000000f,
},
[DA8XX_MSTPRI_PRU1] = {
.reg = DA8XX_MSTPRI1_OFFSET,
.shift = 4,
.mask = 0x000000f0,
},
[DA8XX_MSTPRI_EDMA30TC0] = {
.reg = DA8XX_MSTPRI1_OFFSET,
.shift = 8,
.mask = 0x00000f00,
},
[DA8XX_MSTPRI_EDMA30TC1] = {
.reg = DA8XX_MSTPRI1_OFFSET,
.shift = 12,
.mask = 0x0000f000,
},
[DA8XX_MSTPRI_EDMA31TC0] = {
.reg = DA8XX_MSTPRI1_OFFSET,
.shift = 16,
.mask = 0x000f0000,
},
[DA8XX_MSTPRI_VPIF_DMA_0] = {
.reg = DA8XX_MSTPRI1_OFFSET,
.shift = 24,
.mask = 0x0f000000,
},
[DA8XX_MSTPRI_VPIF_DMA_1] = {
.reg = DA8XX_MSTPRI1_OFFSET,
.shift = 28,
.mask = 0xf0000000,
},
[DA8XX_MSTPRI_EMAC] = {
.reg = DA8XX_MSTPRI2_OFFSET,
.shift = 0,
.mask = 0x0000000f,
},
[DA8XX_MSTPRI_USB0CFG] = {
.reg = DA8XX_MSTPRI2_OFFSET,
.shift = 8,
.mask = 0x00000f00,
},
[DA8XX_MSTPRI_USB0CDMA] = {
.reg = DA8XX_MSTPRI2_OFFSET,
.shift = 12,
.mask = 0x0000f000,
},
[DA8XX_MSTPRI_UHPI] = {
.reg = DA8XX_MSTPRI2_OFFSET,
.shift = 20,
.mask = 0x00f00000,
},
[DA8XX_MSTPRI_USB1] = {
.reg = DA8XX_MSTPRI2_OFFSET,
.shift = 24,
.mask = 0x0f000000,
},
[DA8XX_MSTPRI_LCDC] = {
.reg = DA8XX_MSTPRI2_OFFSET,
.shift = 28,
.mask = 0xf0000000,
},
};
struct da8xx_mstpri_priority {
int which;
u32 val;
};
struct da8xx_mstpri_board_priorities {
const char *board;
const struct da8xx_mstpri_priority *priorities;
size_t numprio;
};
/*
* Default memory settings of da850 do not meet the throughput/latency
* requirements of tilcdc. This results in the image displayed being
* incorrect and the following warning being displayed by the LCDC
* drm driver:
*
* tilcdc da8xx_lcdc.0: tilcdc_crtc_irq(0x00000020): FIFO underfow
*/
static const struct da8xx_mstpri_priority da850_lcdk_priorities[] = {
{
.which = DA8XX_MSTPRI_LCDC,
.val = 0,
},
{
.which = DA8XX_MSTPRI_EDMA30TC1,
.val = 0,
},
{
.which = DA8XX_MSTPRI_EDMA30TC0,
.val = 1,
},
};
static const struct da8xx_mstpri_board_priorities da8xx_mstpri_board_confs[] = {
{
.board = "ti,da850-lcdk",
.priorities = da850_lcdk_priorities,
.numprio = ARRAY_SIZE(da850_lcdk_priorities),
},
};
static const struct da8xx_mstpri_board_priorities *
da8xx_mstpri_get_board_prio(void)
{
const struct da8xx_mstpri_board_priorities *board_prio;
int i;
for (i = 0; i < ARRAY_SIZE(da8xx_mstpri_board_confs); i++) {
board_prio = &da8xx_mstpri_board_confs[i];
if (of_machine_is_compatible(board_prio->board))
return board_prio;
}
return NULL;
}
static int da8xx_mstpri_probe(struct platform_device *pdev)
{
const struct da8xx_mstpri_board_priorities *prio_list;
const struct da8xx_mstpri_descr *prio_descr;
const struct da8xx_mstpri_priority *prio;
struct device *dev = &pdev->dev;
struct resource *res;
void __iomem *mstpri;
u32 reg;
int i;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
mstpri = devm_ioremap_resource(dev, res);
if (IS_ERR(mstpri)) {
dev_err(dev, "unable to map MSTPRI registers\n");
return PTR_ERR(mstpri);
}
prio_list = da8xx_mstpri_get_board_prio();
if (!prio_list) {
dev_err(dev, "no master priorities defined for this board\n");
return -EINVAL;
}
for (i = 0; i < prio_list->numprio; i++) {
prio = &prio_list->priorities[i];
prio_descr = &da8xx_mstpri_priority_list[prio->which];
if (prio_descr->reg + sizeof(u32) > resource_size(res)) {
dev_warn(dev, "register offset out of range\n");
continue;
}
reg = readl(mstpri + prio_descr->reg);
reg &= ~prio_descr->mask;
reg |= prio->val << prio_descr->shift;
writel(reg, mstpri + prio_descr->reg);
}
return 0;
}
static const struct of_device_id da8xx_mstpri_of_match[] = {
{ .compatible = "ti,da850-mstpri", },
{ },
};
static struct platform_driver da8xx_mstpri_driver = {
.probe = da8xx_mstpri_probe,
.driver = {
.name = "da8xx-mstpri",
.of_match_table = da8xx_mstpri_of_match,
},
};
module_platform_driver(da8xx_mstpri_driver);
MODULE_AUTHOR("Bartosz Golaszewski <[email protected]>");
MODULE_DESCRIPTION("TI da8xx master peripheral priority driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/bus/da8xx-mstpri.c |
/*
* Tegra ACONNECT Bus Driver
*
* Copyright (C) 2016, NVIDIA CORPORATION. All rights reserved.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/clk.h>
#include <linux/module.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
struct tegra_aconnect {
struct clk *ape_clk;
struct clk *apb2ape_clk;
};
static int tegra_aconnect_probe(struct platform_device *pdev)
{
struct tegra_aconnect *aconnect;
if (!pdev->dev.of_node)
return -EINVAL;
aconnect = devm_kzalloc(&pdev->dev, sizeof(struct tegra_aconnect),
GFP_KERNEL);
if (!aconnect)
return -ENOMEM;
aconnect->ape_clk = devm_clk_get(&pdev->dev, "ape");
if (IS_ERR(aconnect->ape_clk)) {
dev_err(&pdev->dev, "Can't retrieve ape clock\n");
return PTR_ERR(aconnect->ape_clk);
}
aconnect->apb2ape_clk = devm_clk_get(&pdev->dev, "apb2ape");
if (IS_ERR(aconnect->apb2ape_clk)) {
dev_err(&pdev->dev, "Can't retrieve apb2ape clock\n");
return PTR_ERR(aconnect->apb2ape_clk);
}
dev_set_drvdata(&pdev->dev, aconnect);
pm_runtime_enable(&pdev->dev);
of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
dev_info(&pdev->dev, "Tegra ACONNECT bus registered\n");
return 0;
}
static int tegra_aconnect_remove(struct platform_device *pdev)
{
pm_runtime_disable(&pdev->dev);
return 0;
}
static int tegra_aconnect_runtime_resume(struct device *dev)
{
struct tegra_aconnect *aconnect = dev_get_drvdata(dev);
int ret;
ret = clk_prepare_enable(aconnect->ape_clk);
if (ret) {
dev_err(dev, "ape clk_enable failed: %d\n", ret);
return ret;
}
ret = clk_prepare_enable(aconnect->apb2ape_clk);
if (ret) {
clk_disable_unprepare(aconnect->ape_clk);
dev_err(dev, "apb2ape clk_enable failed: %d\n", ret);
return ret;
}
return 0;
}
static int tegra_aconnect_runtime_suspend(struct device *dev)
{
struct tegra_aconnect *aconnect = dev_get_drvdata(dev);
clk_disable_unprepare(aconnect->ape_clk);
clk_disable_unprepare(aconnect->apb2ape_clk);
return 0;
}
static const struct dev_pm_ops tegra_aconnect_pm_ops = {
SET_RUNTIME_PM_OPS(tegra_aconnect_runtime_suspend,
tegra_aconnect_runtime_resume, NULL)
SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
pm_runtime_force_resume)
};
static const struct of_device_id tegra_aconnect_of_match[] = {
{ .compatible = "nvidia,tegra210-aconnect", },
{ }
};
MODULE_DEVICE_TABLE(of, tegra_aconnect_of_match);
static struct platform_driver tegra_aconnect_driver = {
.probe = tegra_aconnect_probe,
.remove = tegra_aconnect_remove,
.driver = {
.name = "tegra-aconnect",
.of_match_table = tegra_aconnect_of_match,
.pm = &tegra_aconnect_pm_ops,
},
};
module_platform_driver(tegra_aconnect_driver);
MODULE_DESCRIPTION("NVIDIA Tegra ACONNECT Bus Driver");
MODULE_AUTHOR("Jon Hunter <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/bus/tegra-aconnect.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2020 BAIKAL ELECTRONICS, JSC
*
* Authors:
* Serge Semin <[email protected]>
*
* Baikal-T1 AXI-bus driver
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/bitfield.h>
#include <linux/device.h>
#include <linux/atomic.h>
#include <linux/regmap.h>
#include <linux/platform_device.h>
#include <linux/mfd/syscon.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/nmi.h>
#include <linux/of.h>
#include <linux/clk.h>
#include <linux/reset.h>
#include <linux/sysfs.h>
#define BT1_AXI_WERRL 0x110
#define BT1_AXI_WERRH 0x114
#define BT1_AXI_WERRH_TYPE BIT(23)
#define BT1_AXI_WERRH_ADDR_FLD 24
#define BT1_AXI_WERRH_ADDR_MASK GENMASK(31, BT1_AXI_WERRH_ADDR_FLD)
/*
* struct bt1_axi - Baikal-T1 AXI-bus private data
* @dev: Pointer to the device structure.
* @qos_regs: AXI Interconnect QoS tuning registers.
* @sys_regs: Baikal-T1 System Controller registers map.
* @irq: Errors IRQ number.
* @aclk: AXI reference clock.
* @arst: AXI Interconnect reset line.
* @count: Number of errors detected.
*/
struct bt1_axi {
struct device *dev;
void __iomem *qos_regs;
struct regmap *sys_regs;
int irq;
struct clk *aclk;
struct reset_control *arst;
atomic_t count;
};
static irqreturn_t bt1_axi_isr(int irq, void *data)
{
struct bt1_axi *axi = data;
u32 low = 0, high = 0;
regmap_read(axi->sys_regs, BT1_AXI_WERRL, &low);
regmap_read(axi->sys_regs, BT1_AXI_WERRH, &high);
dev_crit_ratelimited(axi->dev,
"AXI-bus fault %d: %s at 0x%x%08x\n",
atomic_inc_return(&axi->count),
high & BT1_AXI_WERRH_TYPE ? "no slave" : "slave protocol error",
high, low);
/*
* Print backtrace on each CPU. This might be pointless if the fault
* has happened on the same CPU as the IRQ handler is executed or
* the other core proceeded further execution despite the error.
* But if it's not, by looking at the trace we would get straight to
* the cause of the problem.
*/
trigger_all_cpu_backtrace();
return IRQ_HANDLED;
}
static void bt1_axi_clear_data(void *data)
{
struct bt1_axi *axi = data;
struct platform_device *pdev = to_platform_device(axi->dev);
platform_set_drvdata(pdev, NULL);
}
static struct bt1_axi *bt1_axi_create_data(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct bt1_axi *axi;
int ret;
axi = devm_kzalloc(dev, sizeof(*axi), GFP_KERNEL);
if (!axi)
return ERR_PTR(-ENOMEM);
ret = devm_add_action(dev, bt1_axi_clear_data, axi);
if (ret) {
dev_err(dev, "Can't add AXI EHB data clear action\n");
return ERR_PTR(ret);
}
axi->dev = dev;
atomic_set(&axi->count, 0);
platform_set_drvdata(pdev, axi);
return axi;
}
static int bt1_axi_request_regs(struct bt1_axi *axi)
{
struct platform_device *pdev = to_platform_device(axi->dev);
struct device *dev = axi->dev;
axi->sys_regs = syscon_regmap_lookup_by_phandle(dev->of_node, "syscon");
if (IS_ERR(axi->sys_regs)) {
dev_err(dev, "Couldn't find syscon registers\n");
return PTR_ERR(axi->sys_regs);
}
axi->qos_regs = devm_platform_ioremap_resource_byname(pdev, "qos");
if (IS_ERR(axi->qos_regs))
dev_err(dev, "Couldn't map AXI-bus QoS registers\n");
return PTR_ERR_OR_ZERO(axi->qos_regs);
}
static int bt1_axi_request_rst(struct bt1_axi *axi)
{
int ret;
axi->arst = devm_reset_control_get_optional_exclusive(axi->dev, "arst");
if (IS_ERR(axi->arst))
return dev_err_probe(axi->dev, PTR_ERR(axi->arst),
"Couldn't get reset control line\n");
ret = reset_control_deassert(axi->arst);
if (ret)
dev_err(axi->dev, "Failed to deassert the reset line\n");
return ret;
}
static void bt1_axi_disable_clk(void *data)
{
struct bt1_axi *axi = data;
clk_disable_unprepare(axi->aclk);
}
static int bt1_axi_request_clk(struct bt1_axi *axi)
{
int ret;
axi->aclk = devm_clk_get(axi->dev, "aclk");
if (IS_ERR(axi->aclk))
return dev_err_probe(axi->dev, PTR_ERR(axi->aclk),
"Couldn't get AXI Interconnect clock\n");
ret = clk_prepare_enable(axi->aclk);
if (ret) {
dev_err(axi->dev, "Couldn't enable the AXI clock\n");
return ret;
}
ret = devm_add_action_or_reset(axi->dev, bt1_axi_disable_clk, axi);
if (ret)
dev_err(axi->dev, "Can't add AXI clock disable action\n");
return ret;
}
static int bt1_axi_request_irq(struct bt1_axi *axi)
{
struct platform_device *pdev = to_platform_device(axi->dev);
int ret;
axi->irq = platform_get_irq(pdev, 0);
if (axi->irq < 0)
return axi->irq;
ret = devm_request_irq(axi->dev, axi->irq, bt1_axi_isr, IRQF_SHARED,
"bt1-axi", axi);
if (ret)
dev_err(axi->dev, "Couldn't request AXI EHB IRQ\n");
return ret;
}
static ssize_t count_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct bt1_axi *axi = dev_get_drvdata(dev);
return scnprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&axi->count));
}
static DEVICE_ATTR_RO(count);
static ssize_t inject_error_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return scnprintf(buf, PAGE_SIZE, "Error injection: bus unaligned\n");
}
static ssize_t inject_error_store(struct device *dev,
struct device_attribute *attr,
const char *data, size_t count)
{
struct bt1_axi *axi = dev_get_drvdata(dev);
/*
* Performing unaligned read from the memory will cause the CM2 bus
* error while unaligned writing - the AXI bus write error handled
* by this driver.
*/
if (sysfs_streq(data, "bus"))
readb(axi->qos_regs);
else if (sysfs_streq(data, "unaligned"))
writeb(0, axi->qos_regs);
else
return -EINVAL;
return count;
}
static DEVICE_ATTR_RW(inject_error);
static struct attribute *bt1_axi_sysfs_attrs[] = {
&dev_attr_count.attr,
&dev_attr_inject_error.attr,
NULL
};
ATTRIBUTE_GROUPS(bt1_axi_sysfs);
static void bt1_axi_remove_sysfs(void *data)
{
struct bt1_axi *axi = data;
device_remove_groups(axi->dev, bt1_axi_sysfs_groups);
}
static int bt1_axi_init_sysfs(struct bt1_axi *axi)
{
int ret;
ret = device_add_groups(axi->dev, bt1_axi_sysfs_groups);
if (ret) {
dev_err(axi->dev, "Failed to add sysfs files group\n");
return ret;
}
ret = devm_add_action_or_reset(axi->dev, bt1_axi_remove_sysfs, axi);
if (ret)
dev_err(axi->dev, "Can't add AXI EHB sysfs remove action\n");
return ret;
}
static int bt1_axi_probe(struct platform_device *pdev)
{
struct bt1_axi *axi;
int ret;
axi = bt1_axi_create_data(pdev);
if (IS_ERR(axi))
return PTR_ERR(axi);
ret = bt1_axi_request_regs(axi);
if (ret)
return ret;
ret = bt1_axi_request_rst(axi);
if (ret)
return ret;
ret = bt1_axi_request_clk(axi);
if (ret)
return ret;
ret = bt1_axi_request_irq(axi);
if (ret)
return ret;
ret = bt1_axi_init_sysfs(axi);
if (ret)
return ret;
return 0;
}
static const struct of_device_id bt1_axi_of_match[] = {
{ .compatible = "baikal,bt1-axi" },
{ }
};
MODULE_DEVICE_TABLE(of, bt1_axi_of_match);
static struct platform_driver bt1_axi_driver = {
.probe = bt1_axi_probe,
.driver = {
.name = "bt1-axi",
.of_match_table = bt1_axi_of_match
}
};
module_platform_driver(bt1_axi_driver);
MODULE_AUTHOR("Serge Semin <[email protected]>");
MODULE_DESCRIPTION("Baikal-T1 AXI-bus driver");
| linux-master | drivers/bus/bt1-axi.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Allwinner A64 Display Engine 2.0 Bus Driver
*
* Copyright (C) 2018 Icenowy Zheng <[email protected]>
*/
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/soc/sunxi/sunxi_sram.h>
static int sun50i_de2_bus_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
int ret;
ret = sunxi_sram_claim(&pdev->dev);
if (ret)
return dev_err_probe(&pdev->dev, ret,
"Couldn't map SRAM to device\n");
of_platform_populate(np, NULL, NULL, &pdev->dev);
return 0;
}
static int sun50i_de2_bus_remove(struct platform_device *pdev)
{
sunxi_sram_release(&pdev->dev);
return 0;
}
static const struct of_device_id sun50i_de2_bus_of_match[] = {
{ .compatible = "allwinner,sun50i-a64-de2", },
{ /* sentinel */ }
};
static struct platform_driver sun50i_de2_bus_driver = {
.probe = sun50i_de2_bus_probe,
.remove = sun50i_de2_bus_remove,
.driver = {
.name = "sun50i-de2-bus",
.of_match_table = sun50i_de2_bus_of_match,
},
};
builtin_platform_driver(sun50i_de2_bus_driver);
| linux-master | drivers/bus/sun50i-de2.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Qualcomm External Bus Interface 2 (EBI2) driver
* an older version of the Qualcomm Parallel Interface Controller (QPIC)
*
* Copyright (C) 2016 Linaro Ltd.
*
* Author: Linus Walleij <[email protected]>
*
* See the device tree bindings for this block for more details on the
* hardware.
*/
#include <linux/module.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/bitops.h>
/*
* CS0, CS1, CS4 and CS5 are two bits wide, CS2 and CS3 are one bit.
*/
#define EBI2_CS0_ENABLE_MASK BIT(0)|BIT(1)
#define EBI2_CS1_ENABLE_MASK BIT(2)|BIT(3)
#define EBI2_CS2_ENABLE_MASK BIT(4)
#define EBI2_CS3_ENABLE_MASK BIT(5)
#define EBI2_CS4_ENABLE_MASK BIT(6)|BIT(7)
#define EBI2_CS5_ENABLE_MASK BIT(8)|BIT(9)
#define EBI2_CSN_MASK GENMASK(9, 0)
#define EBI2_XMEM_CFG 0x0000 /* Power management etc */
/*
* SLOW CSn CFG
*
* Bits 31-28: RECOVERY recovery cycles (0 = 1, 1 = 2 etc) this is the time the
* memory continues to drive the data bus after OE is de-asserted.
* Inserted when reading one CS and switching to another CS or read
* followed by write on the same CS. Valid values 0 thru 15.
* Bits 27-24: WR_HOLD write hold cycles, these are extra cycles inserted after
* every write minimum 1. The data out is driven from the time WE is
* asserted until CS is asserted. With a hold of 1, the CS stays
* active for 1 extra cycle etc. Valid values 0 thru 15.
* Bits 23-16: WR_DELTA initial latency for write cycles inserted for the first
* write to a page or burst memory
* Bits 15-8: RD_DELTA initial latency for read cycles inserted for the first
* read to a page or burst memory
* Bits 7-4: WR_WAIT number of wait cycles for every write access, 0=1 cycle
* so 1 thru 16 cycles.
* Bits 3-0: RD_WAIT number of wait cycles for every read access, 0=1 cycle
* so 1 thru 16 cycles.
*/
#define EBI2_XMEM_CS0_SLOW_CFG 0x0008
#define EBI2_XMEM_CS1_SLOW_CFG 0x000C
#define EBI2_XMEM_CS2_SLOW_CFG 0x0010
#define EBI2_XMEM_CS3_SLOW_CFG 0x0014
#define EBI2_XMEM_CS4_SLOW_CFG 0x0018
#define EBI2_XMEM_CS5_SLOW_CFG 0x001C
#define EBI2_XMEM_RECOVERY_SHIFT 28
#define EBI2_XMEM_WR_HOLD_SHIFT 24
#define EBI2_XMEM_WR_DELTA_SHIFT 16
#define EBI2_XMEM_RD_DELTA_SHIFT 8
#define EBI2_XMEM_WR_WAIT_SHIFT 4
#define EBI2_XMEM_RD_WAIT_SHIFT 0
/*
* FAST CSn CFG
* Bits 31-28: ?
* Bits 27-24: RD_HOLD: the length in cycles of the first segment of a read
* transfer. For a single read trandfer this will be the time
* from CS assertion to OE assertion.
* Bits 18-24: ?
* Bits 17-16: ADV_OE_RECOVERY, the number of cycles elapsed before an OE
* assertion, with respect to the cycle where ADV is asserted.
* 2 means 2 cycles between ADV and OE. Values 0, 1, 2 or 3.
* Bits 5: ADDR_HOLD_ENA, The address is held for an extra cycle to meet
* hold time requirements with ADV assertion.
*
* The manual mentions "write precharge cycles" and "precharge cycles".
* We have not been able to figure out which bit fields these correspond to
* in the hardware, or what valid values exist. The current hypothesis is that
* this is something just used on the FAST chip selects. There is also a "byte
* device enable" flag somewhere for 8bit memories.
*/
#define EBI2_XMEM_CS0_FAST_CFG 0x0028
#define EBI2_XMEM_CS1_FAST_CFG 0x002C
#define EBI2_XMEM_CS2_FAST_CFG 0x0030
#define EBI2_XMEM_CS3_FAST_CFG 0x0034
#define EBI2_XMEM_CS4_FAST_CFG 0x0038
#define EBI2_XMEM_CS5_FAST_CFG 0x003C
#define EBI2_XMEM_RD_HOLD_SHIFT 24
#define EBI2_XMEM_ADV_OE_RECOVERY_SHIFT 16
#define EBI2_XMEM_ADDR_HOLD_ENA_SHIFT 5
/**
* struct cs_data - struct with info on a chipselect setting
* @enable_mask: mask to enable the chipselect in the EBI2 config
* @slow_cfg: offset to XMEMC slow CS config
* @fast_cfg: offset to XMEMC fast CS config
*/
struct cs_data {
u32 enable_mask;
u16 slow_cfg;
u16 fast_cfg;
};
static const struct cs_data cs_info[] = {
{
/* CS0 */
.enable_mask = EBI2_CS0_ENABLE_MASK,
.slow_cfg = EBI2_XMEM_CS0_SLOW_CFG,
.fast_cfg = EBI2_XMEM_CS0_FAST_CFG,
},
{
/* CS1 */
.enable_mask = EBI2_CS1_ENABLE_MASK,
.slow_cfg = EBI2_XMEM_CS1_SLOW_CFG,
.fast_cfg = EBI2_XMEM_CS1_FAST_CFG,
},
{
/* CS2 */
.enable_mask = EBI2_CS2_ENABLE_MASK,
.slow_cfg = EBI2_XMEM_CS2_SLOW_CFG,
.fast_cfg = EBI2_XMEM_CS2_FAST_CFG,
},
{
/* CS3 */
.enable_mask = EBI2_CS3_ENABLE_MASK,
.slow_cfg = EBI2_XMEM_CS3_SLOW_CFG,
.fast_cfg = EBI2_XMEM_CS3_FAST_CFG,
},
{
/* CS4 */
.enable_mask = EBI2_CS4_ENABLE_MASK,
.slow_cfg = EBI2_XMEM_CS4_SLOW_CFG,
.fast_cfg = EBI2_XMEM_CS4_FAST_CFG,
},
{
/* CS5 */
.enable_mask = EBI2_CS5_ENABLE_MASK,
.slow_cfg = EBI2_XMEM_CS5_SLOW_CFG,
.fast_cfg = EBI2_XMEM_CS5_FAST_CFG,
},
};
/**
* struct ebi2_xmem_prop - describes an XMEM config property
* @prop: the device tree binding name
* @max: maximum value for the property
* @slowreg: true if this property is in the SLOW CS config register
* else it is assumed to be in the FAST config register
* @shift: the bit field start in the SLOW or FAST register for this
* property
*/
struct ebi2_xmem_prop {
const char *prop;
u32 max;
bool slowreg;
u16 shift;
};
static const struct ebi2_xmem_prop xmem_props[] = {
{
.prop = "qcom,xmem-recovery-cycles",
.max = 15,
.slowreg = true,
.shift = EBI2_XMEM_RECOVERY_SHIFT,
},
{
.prop = "qcom,xmem-write-hold-cycles",
.max = 15,
.slowreg = true,
.shift = EBI2_XMEM_WR_HOLD_SHIFT,
},
{
.prop = "qcom,xmem-write-delta-cycles",
.max = 255,
.slowreg = true,
.shift = EBI2_XMEM_WR_DELTA_SHIFT,
},
{
.prop = "qcom,xmem-read-delta-cycles",
.max = 255,
.slowreg = true,
.shift = EBI2_XMEM_RD_DELTA_SHIFT,
},
{
.prop = "qcom,xmem-write-wait-cycles",
.max = 15,
.slowreg = true,
.shift = EBI2_XMEM_WR_WAIT_SHIFT,
},
{
.prop = "qcom,xmem-read-wait-cycles",
.max = 15,
.slowreg = true,
.shift = EBI2_XMEM_RD_WAIT_SHIFT,
},
{
.prop = "qcom,xmem-address-hold-enable",
.max = 1, /* boolean prop */
.slowreg = false,
.shift = EBI2_XMEM_ADDR_HOLD_ENA_SHIFT,
},
{
.prop = "qcom,xmem-adv-to-oe-recovery-cycles",
.max = 3,
.slowreg = false,
.shift = EBI2_XMEM_ADV_OE_RECOVERY_SHIFT,
},
{
.prop = "qcom,xmem-read-hold-cycles",
.max = 15,
.slowreg = false,
.shift = EBI2_XMEM_RD_HOLD_SHIFT,
},
};
static void qcom_ebi2_setup_chipselect(struct device_node *np,
struct device *dev,
void __iomem *ebi2_base,
void __iomem *ebi2_xmem,
u32 csindex)
{
const struct cs_data *csd;
u32 slowcfg, fastcfg;
u32 val;
int ret;
int i;
csd = &cs_info[csindex];
val = readl(ebi2_base);
val |= csd->enable_mask;
writel(val, ebi2_base);
dev_dbg(dev, "enabled CS%u\n", csindex);
/* Next set up the XMEMC */
slowcfg = 0;
fastcfg = 0;
for (i = 0; i < ARRAY_SIZE(xmem_props); i++) {
const struct ebi2_xmem_prop *xp = &xmem_props[i];
/* All are regular u32 values */
ret = of_property_read_u32(np, xp->prop, &val);
if (ret) {
dev_dbg(dev, "could not read %s for CS%d\n",
xp->prop, csindex);
continue;
}
/* First check boolean props */
if (xp->max == 1 && val) {
if (xp->slowreg)
slowcfg |= BIT(xp->shift);
else
fastcfg |= BIT(xp->shift);
dev_dbg(dev, "set %s flag\n", xp->prop);
continue;
}
/* We're dealing with an u32 */
if (val > xp->max) {
dev_err(dev,
"too high value for %s: %u, capped at %u\n",
xp->prop, val, xp->max);
val = xp->max;
}
if (xp->slowreg)
slowcfg |= (val << xp->shift);
else
fastcfg |= (val << xp->shift);
dev_dbg(dev, "set %s to %u\n", xp->prop, val);
}
dev_info(dev, "CS%u: SLOW CFG 0x%08x, FAST CFG 0x%08x\n",
csindex, slowcfg, fastcfg);
if (slowcfg)
writel(slowcfg, ebi2_xmem + csd->slow_cfg);
if (fastcfg)
writel(fastcfg, ebi2_xmem + csd->fast_cfg);
}
static int qcom_ebi2_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct device_node *child;
struct device *dev = &pdev->dev;
struct resource *res;
void __iomem *ebi2_base;
void __iomem *ebi2_xmem;
struct clk *ebi2xclk;
struct clk *ebi2clk;
bool have_children = false;
u32 val;
int ret;
ebi2xclk = devm_clk_get(dev, "ebi2x");
if (IS_ERR(ebi2xclk))
return PTR_ERR(ebi2xclk);
ret = clk_prepare_enable(ebi2xclk);
if (ret) {
dev_err(dev, "could not enable EBI2X clk (%d)\n", ret);
return ret;
}
ebi2clk = devm_clk_get(dev, "ebi2");
if (IS_ERR(ebi2clk)) {
ret = PTR_ERR(ebi2clk);
goto err_disable_2x_clk;
}
ret = clk_prepare_enable(ebi2clk);
if (ret) {
dev_err(dev, "could not enable EBI2 clk\n");
goto err_disable_2x_clk;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
ebi2_base = devm_ioremap_resource(dev, res);
if (IS_ERR(ebi2_base)) {
ret = PTR_ERR(ebi2_base);
goto err_disable_clk;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
ebi2_xmem = devm_ioremap_resource(dev, res);
if (IS_ERR(ebi2_xmem)) {
ret = PTR_ERR(ebi2_xmem);
goto err_disable_clk;
}
/* Allegedly this turns the power save mode off */
writel(0UL, ebi2_xmem + EBI2_XMEM_CFG);
/* Disable all chipselects */
val = readl(ebi2_base);
val &= ~EBI2_CSN_MASK;
writel(val, ebi2_base);
/* Walk over the child nodes and see what chipselects we use */
for_each_available_child_of_node(np, child) {
u32 csindex;
/* Figure out the chipselect */
ret = of_property_read_u32(child, "reg", &csindex);
if (ret) {
of_node_put(child);
return ret;
}
if (csindex > 5) {
dev_err(dev,
"invalid chipselect %u, we only support 0-5\n",
csindex);
continue;
}
qcom_ebi2_setup_chipselect(child,
dev,
ebi2_base,
ebi2_xmem,
csindex);
/* We have at least one child */
have_children = true;
}
if (have_children)
return of_platform_default_populate(np, NULL, dev);
return 0;
err_disable_clk:
clk_disable_unprepare(ebi2clk);
err_disable_2x_clk:
clk_disable_unprepare(ebi2xclk);
return ret;
}
static const struct of_device_id qcom_ebi2_of_match[] = {
{ .compatible = "qcom,msm8660-ebi2", },
{ .compatible = "qcom,apq8060-ebi2", },
{ }
};
static struct platform_driver qcom_ebi2_driver = {
.probe = qcom_ebi2_probe,
.driver = {
.name = "qcom-ebi2",
.of_match_table = qcom_ebi2_of_match,
},
};
module_platform_driver(qcom_ebi2_driver);
MODULE_AUTHOR("Linus Walleij <[email protected]>");
MODULE_DESCRIPTION("Qualcomm EBI2 driver");
| linux-master | drivers/bus/qcom-ebi2.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Driver for NVIDIA Generic Memory Interface
*
* Copyright (C) 2016 Host Mobility AB. All rights reserved.
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include <soc/tegra/common.h>
#define TEGRA_GMI_CONFIG 0x00
#define TEGRA_GMI_CONFIG_GO BIT(31)
#define TEGRA_GMI_BUS_WIDTH_32BIT BIT(30)
#define TEGRA_GMI_MUX_MODE BIT(28)
#define TEGRA_GMI_RDY_BEFORE_DATA BIT(24)
#define TEGRA_GMI_RDY_ACTIVE_HIGH BIT(23)
#define TEGRA_GMI_ADV_ACTIVE_HIGH BIT(22)
#define TEGRA_GMI_OE_ACTIVE_HIGH BIT(21)
#define TEGRA_GMI_CS_ACTIVE_HIGH BIT(20)
#define TEGRA_GMI_CS_SELECT(x) ((x & 0x7) << 4)
#define TEGRA_GMI_TIMING0 0x10
#define TEGRA_GMI_MUXED_WIDTH(x) ((x & 0xf) << 12)
#define TEGRA_GMI_HOLD_WIDTH(x) ((x & 0xf) << 8)
#define TEGRA_GMI_ADV_WIDTH(x) ((x & 0xf) << 4)
#define TEGRA_GMI_CE_WIDTH(x) (x & 0xf)
#define TEGRA_GMI_TIMING1 0x14
#define TEGRA_GMI_WE_WIDTH(x) ((x & 0xff) << 16)
#define TEGRA_GMI_OE_WIDTH(x) ((x & 0xff) << 8)
#define TEGRA_GMI_WAIT_WIDTH(x) (x & 0xff)
#define TEGRA_GMI_MAX_CHIP_SELECT 8
struct tegra_gmi {
struct device *dev;
void __iomem *base;
struct clk *clk;
struct reset_control *rst;
u32 snor_config;
u32 snor_timing0;
u32 snor_timing1;
};
static int tegra_gmi_enable(struct tegra_gmi *gmi)
{
int err;
pm_runtime_enable(gmi->dev);
err = pm_runtime_resume_and_get(gmi->dev);
if (err) {
pm_runtime_disable(gmi->dev);
return err;
}
reset_control_assert(gmi->rst);
usleep_range(2000, 4000);
reset_control_deassert(gmi->rst);
writel(gmi->snor_timing0, gmi->base + TEGRA_GMI_TIMING0);
writel(gmi->snor_timing1, gmi->base + TEGRA_GMI_TIMING1);
gmi->snor_config |= TEGRA_GMI_CONFIG_GO;
writel(gmi->snor_config, gmi->base + TEGRA_GMI_CONFIG);
return 0;
}
static void tegra_gmi_disable(struct tegra_gmi *gmi)
{
u32 config;
/* stop GMI operation */
config = readl(gmi->base + TEGRA_GMI_CONFIG);
config &= ~TEGRA_GMI_CONFIG_GO;
writel(config, gmi->base + TEGRA_GMI_CONFIG);
reset_control_assert(gmi->rst);
pm_runtime_put_sync_suspend(gmi->dev);
pm_runtime_force_suspend(gmi->dev);
}
static int tegra_gmi_parse_dt(struct tegra_gmi *gmi)
{
struct device_node *child;
u32 property, ranges[4];
int err;
child = of_get_next_available_child(gmi->dev->of_node, NULL);
if (!child) {
dev_err(gmi->dev, "no child nodes found\n");
return -ENODEV;
}
/*
* We currently only support one child device due to lack of
* chip-select address decoding. Which means that we only have one
* chip-select line from the GMI controller.
*/
if (of_get_child_count(gmi->dev->of_node) > 1)
dev_warn(gmi->dev, "only one child device is supported.");
if (of_property_read_bool(child, "nvidia,snor-data-width-32bit"))
gmi->snor_config |= TEGRA_GMI_BUS_WIDTH_32BIT;
if (of_property_read_bool(child, "nvidia,snor-mux-mode"))
gmi->snor_config |= TEGRA_GMI_MUX_MODE;
if (of_property_read_bool(child, "nvidia,snor-rdy-active-before-data"))
gmi->snor_config |= TEGRA_GMI_RDY_BEFORE_DATA;
if (of_property_read_bool(child, "nvidia,snor-rdy-active-high"))
gmi->snor_config |= TEGRA_GMI_RDY_ACTIVE_HIGH;
if (of_property_read_bool(child, "nvidia,snor-adv-active-high"))
gmi->snor_config |= TEGRA_GMI_ADV_ACTIVE_HIGH;
if (of_property_read_bool(child, "nvidia,snor-oe-active-high"))
gmi->snor_config |= TEGRA_GMI_OE_ACTIVE_HIGH;
if (of_property_read_bool(child, "nvidia,snor-cs-active-high"))
gmi->snor_config |= TEGRA_GMI_CS_ACTIVE_HIGH;
/* Decode the CS# */
err = of_property_read_u32_array(child, "ranges", ranges, 4);
if (err < 0) {
/* Invalid binding */
if (err == -EOVERFLOW) {
dev_err(gmi->dev,
"failed to decode CS: invalid ranges length\n");
goto error_cs;
}
/*
* If we reach here it means that the child node has an empty
* ranges or it does not exist at all. Attempt to decode the
* CS# from the reg property instead.
*/
err = of_property_read_u32(child, "reg", &property);
if (err < 0) {
dev_err(gmi->dev,
"failed to decode CS: no reg property found\n");
goto error_cs;
}
} else {
property = ranges[1];
}
/* Valid chip selects are CS0-CS7 */
if (property >= TEGRA_GMI_MAX_CHIP_SELECT) {
dev_err(gmi->dev, "invalid chip select: %d", property);
err = -EINVAL;
goto error_cs;
}
gmi->snor_config |= TEGRA_GMI_CS_SELECT(property);
/* The default values that are provided below are reset values */
if (!of_property_read_u32(child, "nvidia,snor-muxed-width", &property))
gmi->snor_timing0 |= TEGRA_GMI_MUXED_WIDTH(property);
else
gmi->snor_timing0 |= TEGRA_GMI_MUXED_WIDTH(1);
if (!of_property_read_u32(child, "nvidia,snor-hold-width", &property))
gmi->snor_timing0 |= TEGRA_GMI_HOLD_WIDTH(property);
else
gmi->snor_timing0 |= TEGRA_GMI_HOLD_WIDTH(1);
if (!of_property_read_u32(child, "nvidia,snor-adv-width", &property))
gmi->snor_timing0 |= TEGRA_GMI_ADV_WIDTH(property);
else
gmi->snor_timing0 |= TEGRA_GMI_ADV_WIDTH(1);
if (!of_property_read_u32(child, "nvidia,snor-ce-width", &property))
gmi->snor_timing0 |= TEGRA_GMI_CE_WIDTH(property);
else
gmi->snor_timing0 |= TEGRA_GMI_CE_WIDTH(4);
if (!of_property_read_u32(child, "nvidia,snor-we-width", &property))
gmi->snor_timing1 |= TEGRA_GMI_WE_WIDTH(property);
else
gmi->snor_timing1 |= TEGRA_GMI_WE_WIDTH(1);
if (!of_property_read_u32(child, "nvidia,snor-oe-width", &property))
gmi->snor_timing1 |= TEGRA_GMI_OE_WIDTH(property);
else
gmi->snor_timing1 |= TEGRA_GMI_OE_WIDTH(1);
if (!of_property_read_u32(child, "nvidia,snor-wait-width", &property))
gmi->snor_timing1 |= TEGRA_GMI_WAIT_WIDTH(property);
else
gmi->snor_timing1 |= TEGRA_GMI_WAIT_WIDTH(3);
error_cs:
of_node_put(child);
return err;
}
static int tegra_gmi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct tegra_gmi *gmi;
int err;
gmi = devm_kzalloc(dev, sizeof(*gmi), GFP_KERNEL);
if (!gmi)
return -ENOMEM;
platform_set_drvdata(pdev, gmi);
gmi->dev = dev;
gmi->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(gmi->base))
return PTR_ERR(gmi->base);
gmi->clk = devm_clk_get(dev, "gmi");
if (IS_ERR(gmi->clk)) {
dev_err(dev, "can not get clock\n");
return PTR_ERR(gmi->clk);
}
gmi->rst = devm_reset_control_get(dev, "gmi");
if (IS_ERR(gmi->rst)) {
dev_err(dev, "can not get reset\n");
return PTR_ERR(gmi->rst);
}
err = devm_tegra_core_dev_init_opp_table_common(&pdev->dev);
if (err)
return err;
err = tegra_gmi_parse_dt(gmi);
if (err)
return err;
err = tegra_gmi_enable(gmi);
if (err < 0)
return err;
err = of_platform_default_populate(dev->of_node, NULL, dev);
if (err < 0) {
dev_err(dev, "fail to create devices.\n");
tegra_gmi_disable(gmi);
return err;
}
return 0;
}
static int tegra_gmi_remove(struct platform_device *pdev)
{
struct tegra_gmi *gmi = platform_get_drvdata(pdev);
of_platform_depopulate(gmi->dev);
tegra_gmi_disable(gmi);
return 0;
}
static int __maybe_unused tegra_gmi_runtime_resume(struct device *dev)
{
struct tegra_gmi *gmi = dev_get_drvdata(dev);
int err;
err = clk_prepare_enable(gmi->clk);
if (err < 0) {
dev_err(gmi->dev, "failed to enable clock: %d\n", err);
return err;
}
return 0;
}
static int __maybe_unused tegra_gmi_runtime_suspend(struct device *dev)
{
struct tegra_gmi *gmi = dev_get_drvdata(dev);
clk_disable_unprepare(gmi->clk);
return 0;
}
static const struct dev_pm_ops tegra_gmi_pm = {
SET_RUNTIME_PM_OPS(tegra_gmi_runtime_suspend, tegra_gmi_runtime_resume,
NULL)
};
static const struct of_device_id tegra_gmi_id_table[] = {
{ .compatible = "nvidia,tegra20-gmi", },
{ .compatible = "nvidia,tegra30-gmi", },
{ }
};
MODULE_DEVICE_TABLE(of, tegra_gmi_id_table);
static struct platform_driver tegra_gmi_driver = {
.probe = tegra_gmi_probe,
.remove = tegra_gmi_remove,
.driver = {
.name = "tegra-gmi",
.of_match_table = tegra_gmi_id_table,
.pm = &tegra_gmi_pm,
},
};
module_platform_driver(tegra_gmi_driver);
MODULE_AUTHOR("Mirza Krak <[email protected]");
MODULE_DESCRIPTION("NVIDIA Tegra GMI Bus Driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/bus/tegra-gmi.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* RSB (Reduced Serial Bus) driver.
*
* Author: Chen-Yu Tsai <[email protected]>
*
* The RSB controller looks like an SMBus controller which only supports
* byte and word data transfers. But, it differs from standard SMBus
* protocol on several aspects:
* - it uses addresses set at runtime to address slaves. Runtime addresses
* are sent to slaves using their 12bit hardware addresses. Up to 15
* runtime addresses are available.
* - it adds a parity bit every 8bits of data and address for read and
* write accesses; this replaces the ack bit
* - only one read access is required to read a byte (instead of a write
* followed by a read access in standard SMBus protocol)
* - there's no Ack bit after each read access
*
* This means this bus cannot be used to interface with standard SMBus
* devices. Devices known to support this interface include the AXP223,
* AXP809, and AXP806 PMICs, and the AC100 audio codec, all from X-Powers.
*
* A description of the operation and wire protocol can be found in the
* RSB section of Allwinner's A80 user manual, which can be found at
*
* https://github.com/allwinner-zh/documents/tree/master/A80
*
* This document is officially released by Allwinner.
*
* This driver is based on i2c-sun6i-p2wi.c, the P2WI bus driver.
*/
#include <linux/clk.h>
#include <linux/clk/clk-conf.h>
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/reset.h>
#include <linux/slab.h>
#include <linux/sunxi-rsb.h>
#include <linux/types.h>
/* RSB registers */
#define RSB_CTRL 0x0 /* Global control */
#define RSB_CCR 0x4 /* Clock control */
#define RSB_INTE 0x8 /* Interrupt controls */
#define RSB_INTS 0xc /* Interrupt status */
#define RSB_ADDR 0x10 /* Address to send with read/write command */
#define RSB_DATA 0x1c /* Data to read/write */
#define RSB_LCR 0x24 /* Line control */
#define RSB_DMCR 0x28 /* Device mode (init) control */
#define RSB_CMD 0x2c /* RSB Command */
#define RSB_DAR 0x30 /* Device address / runtime address */
/* CTRL fields */
#define RSB_CTRL_START_TRANS BIT(7)
#define RSB_CTRL_ABORT_TRANS BIT(6)
#define RSB_CTRL_GLOBAL_INT_ENB BIT(1)
#define RSB_CTRL_SOFT_RST BIT(0)
/* CLK CTRL fields */
#define RSB_CCR_SDA_OUT_DELAY(v) (((v) & 0x7) << 8)
#define RSB_CCR_MAX_CLK_DIV 0xff
#define RSB_CCR_CLK_DIV(v) ((v) & RSB_CCR_MAX_CLK_DIV)
/* STATUS fields */
#define RSB_INTS_TRANS_ERR_ACK BIT(16)
#define RSB_INTS_TRANS_ERR_DATA_BIT(v) (((v) >> 8) & 0xf)
#define RSB_INTS_TRANS_ERR_DATA GENMASK(11, 8)
#define RSB_INTS_LOAD_BSY BIT(2)
#define RSB_INTS_TRANS_ERR BIT(1)
#define RSB_INTS_TRANS_OVER BIT(0)
/* LINE CTRL fields*/
#define RSB_LCR_SCL_STATE BIT(5)
#define RSB_LCR_SDA_STATE BIT(4)
#define RSB_LCR_SCL_CTL BIT(3)
#define RSB_LCR_SCL_CTL_EN BIT(2)
#define RSB_LCR_SDA_CTL BIT(1)
#define RSB_LCR_SDA_CTL_EN BIT(0)
/* DEVICE MODE CTRL field values */
#define RSB_DMCR_DEVICE_START BIT(31)
#define RSB_DMCR_MODE_DATA (0x7c << 16)
#define RSB_DMCR_MODE_REG (0x3e << 8)
#define RSB_DMCR_DEV_ADDR 0x00
/* CMD values */
#define RSB_CMD_RD8 0x8b
#define RSB_CMD_RD16 0x9c
#define RSB_CMD_RD32 0xa6
#define RSB_CMD_WR8 0x4e
#define RSB_CMD_WR16 0x59
#define RSB_CMD_WR32 0x63
#define RSB_CMD_STRA 0xe8
/* DAR fields */
#define RSB_DAR_RTA(v) (((v) & 0xff) << 16)
#define RSB_DAR_DA(v) ((v) & 0xffff)
#define RSB_MAX_FREQ 20000000
#define RSB_CTRL_NAME "sunxi-rsb"
struct sunxi_rsb_addr_map {
u16 hwaddr;
u8 rtaddr;
};
struct sunxi_rsb {
struct device *dev;
void __iomem *regs;
struct clk *clk;
struct reset_control *rstc;
struct completion complete;
struct mutex lock;
unsigned int status;
u32 clk_freq;
};
/* bus / slave device related functions */
static struct bus_type sunxi_rsb_bus;
static int sunxi_rsb_device_match(struct device *dev, struct device_driver *drv)
{
return of_driver_match_device(dev, drv);
}
static int sunxi_rsb_device_probe(struct device *dev)
{
const struct sunxi_rsb_driver *drv = to_sunxi_rsb_driver(dev->driver);
struct sunxi_rsb_device *rdev = to_sunxi_rsb_device(dev);
int ret;
if (!drv->probe)
return -ENODEV;
if (!rdev->irq) {
int irq = -ENOENT;
if (dev->of_node)
irq = of_irq_get(dev->of_node, 0);
if (irq == -EPROBE_DEFER)
return irq;
if (irq < 0)
irq = 0;
rdev->irq = irq;
}
ret = of_clk_set_defaults(dev->of_node, false);
if (ret < 0)
return ret;
return drv->probe(rdev);
}
static void sunxi_rsb_device_remove(struct device *dev)
{
const struct sunxi_rsb_driver *drv = to_sunxi_rsb_driver(dev->driver);
drv->remove(to_sunxi_rsb_device(dev));
}
static int sunxi_rsb_device_modalias(const struct device *dev, struct kobj_uevent_env *env)
{
return of_device_uevent_modalias(dev, env);
}
static struct bus_type sunxi_rsb_bus = {
.name = RSB_CTRL_NAME,
.match = sunxi_rsb_device_match,
.probe = sunxi_rsb_device_probe,
.remove = sunxi_rsb_device_remove,
.uevent = sunxi_rsb_device_modalias,
};
static void sunxi_rsb_dev_release(struct device *dev)
{
struct sunxi_rsb_device *rdev = to_sunxi_rsb_device(dev);
kfree(rdev);
}
/**
* sunxi_rsb_device_create() - allocate and add an RSB device
* @rsb: RSB controller
* @node: RSB slave device node
* @hwaddr: RSB slave hardware address
* @rtaddr: RSB slave runtime address
*/
static struct sunxi_rsb_device *sunxi_rsb_device_create(struct sunxi_rsb *rsb,
struct device_node *node, u16 hwaddr, u8 rtaddr)
{
int err;
struct sunxi_rsb_device *rdev;
rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
if (!rdev)
return ERR_PTR(-ENOMEM);
rdev->rsb = rsb;
rdev->hwaddr = hwaddr;
rdev->rtaddr = rtaddr;
rdev->dev.bus = &sunxi_rsb_bus;
rdev->dev.parent = rsb->dev;
rdev->dev.of_node = node;
rdev->dev.release = sunxi_rsb_dev_release;
dev_set_name(&rdev->dev, "%s-%x", RSB_CTRL_NAME, hwaddr);
err = device_register(&rdev->dev);
if (err < 0) {
dev_err(&rdev->dev, "Can't add %s, status %d\n",
dev_name(&rdev->dev), err);
goto err_device_add;
}
dev_dbg(&rdev->dev, "device %s registered\n", dev_name(&rdev->dev));
return rdev;
err_device_add:
put_device(&rdev->dev);
return ERR_PTR(err);
}
/**
* sunxi_rsb_device_unregister(): unregister an RSB device
* @rdev: rsb_device to be removed
*/
static void sunxi_rsb_device_unregister(struct sunxi_rsb_device *rdev)
{
device_unregister(&rdev->dev);
}
static int sunxi_rsb_remove_devices(struct device *dev, void *data)
{
struct sunxi_rsb_device *rdev = to_sunxi_rsb_device(dev);
if (dev->bus == &sunxi_rsb_bus)
sunxi_rsb_device_unregister(rdev);
return 0;
}
/**
* sunxi_rsb_driver_register() - Register device driver with RSB core
* @rdrv: device driver to be associated with slave-device.
*
* This API will register the client driver with the RSB framework.
* It is typically called from the driver's module-init function.
*/
int sunxi_rsb_driver_register(struct sunxi_rsb_driver *rdrv)
{
rdrv->driver.bus = &sunxi_rsb_bus;
return driver_register(&rdrv->driver);
}
EXPORT_SYMBOL_GPL(sunxi_rsb_driver_register);
/* common code that starts a transfer */
static int _sunxi_rsb_run_xfer(struct sunxi_rsb *rsb)
{
u32 int_mask, status;
bool timeout;
if (readl(rsb->regs + RSB_CTRL) & RSB_CTRL_START_TRANS) {
dev_dbg(rsb->dev, "RSB transfer still in progress\n");
return -EBUSY;
}
reinit_completion(&rsb->complete);
int_mask = RSB_INTS_LOAD_BSY | RSB_INTS_TRANS_ERR | RSB_INTS_TRANS_OVER;
writel(int_mask, rsb->regs + RSB_INTE);
writel(RSB_CTRL_START_TRANS | RSB_CTRL_GLOBAL_INT_ENB,
rsb->regs + RSB_CTRL);
if (irqs_disabled()) {
timeout = readl_poll_timeout_atomic(rsb->regs + RSB_INTS,
status, (status & int_mask),
10, 100000);
writel(status, rsb->regs + RSB_INTS);
} else {
timeout = !wait_for_completion_io_timeout(&rsb->complete,
msecs_to_jiffies(100));
status = rsb->status;
}
if (timeout) {
dev_dbg(rsb->dev, "RSB timeout\n");
/* abort the transfer */
writel(RSB_CTRL_ABORT_TRANS, rsb->regs + RSB_CTRL);
/* clear any interrupt flags */
writel(readl(rsb->regs + RSB_INTS), rsb->regs + RSB_INTS);
return -ETIMEDOUT;
}
if (status & RSB_INTS_LOAD_BSY) {
dev_dbg(rsb->dev, "RSB busy\n");
return -EBUSY;
}
if (status & RSB_INTS_TRANS_ERR) {
if (status & RSB_INTS_TRANS_ERR_ACK) {
dev_dbg(rsb->dev, "RSB slave nack\n");
return -EINVAL;
}
if (status & RSB_INTS_TRANS_ERR_DATA) {
dev_dbg(rsb->dev, "RSB transfer data error\n");
return -EIO;
}
}
return 0;
}
static int sunxi_rsb_read(struct sunxi_rsb *rsb, u8 rtaddr, u8 addr,
u32 *buf, size_t len)
{
u32 cmd;
int ret;
if (!buf)
return -EINVAL;
switch (len) {
case 1:
cmd = RSB_CMD_RD8;
break;
case 2:
cmd = RSB_CMD_RD16;
break;
case 4:
cmd = RSB_CMD_RD32;
break;
default:
dev_err(rsb->dev, "Invalid access width: %zd\n", len);
return -EINVAL;
}
ret = pm_runtime_resume_and_get(rsb->dev);
if (ret)
return ret;
mutex_lock(&rsb->lock);
writel(addr, rsb->regs + RSB_ADDR);
writel(RSB_DAR_RTA(rtaddr), rsb->regs + RSB_DAR);
writel(cmd, rsb->regs + RSB_CMD);
ret = _sunxi_rsb_run_xfer(rsb);
if (ret)
goto unlock;
*buf = readl(rsb->regs + RSB_DATA) & GENMASK(len * 8 - 1, 0);
unlock:
mutex_unlock(&rsb->lock);
pm_runtime_mark_last_busy(rsb->dev);
pm_runtime_put_autosuspend(rsb->dev);
return ret;
}
static int sunxi_rsb_write(struct sunxi_rsb *rsb, u8 rtaddr, u8 addr,
const u32 *buf, size_t len)
{
u32 cmd;
int ret;
if (!buf)
return -EINVAL;
switch (len) {
case 1:
cmd = RSB_CMD_WR8;
break;
case 2:
cmd = RSB_CMD_WR16;
break;
case 4:
cmd = RSB_CMD_WR32;
break;
default:
dev_err(rsb->dev, "Invalid access width: %zd\n", len);
return -EINVAL;
}
ret = pm_runtime_resume_and_get(rsb->dev);
if (ret)
return ret;
mutex_lock(&rsb->lock);
writel(addr, rsb->regs + RSB_ADDR);
writel(RSB_DAR_RTA(rtaddr), rsb->regs + RSB_DAR);
writel(*buf, rsb->regs + RSB_DATA);
writel(cmd, rsb->regs + RSB_CMD);
ret = _sunxi_rsb_run_xfer(rsb);
mutex_unlock(&rsb->lock);
pm_runtime_mark_last_busy(rsb->dev);
pm_runtime_put_autosuspend(rsb->dev);
return ret;
}
/* RSB regmap functions */
struct sunxi_rsb_ctx {
struct sunxi_rsb_device *rdev;
int size;
};
static int regmap_sunxi_rsb_reg_read(void *context, unsigned int reg,
unsigned int *val)
{
struct sunxi_rsb_ctx *ctx = context;
struct sunxi_rsb_device *rdev = ctx->rdev;
if (reg > 0xff)
return -EINVAL;
return sunxi_rsb_read(rdev->rsb, rdev->rtaddr, reg, val, ctx->size);
}
static int regmap_sunxi_rsb_reg_write(void *context, unsigned int reg,
unsigned int val)
{
struct sunxi_rsb_ctx *ctx = context;
struct sunxi_rsb_device *rdev = ctx->rdev;
return sunxi_rsb_write(rdev->rsb, rdev->rtaddr, reg, &val, ctx->size);
}
static void regmap_sunxi_rsb_free_ctx(void *context)
{
struct sunxi_rsb_ctx *ctx = context;
kfree(ctx);
}
static struct regmap_bus regmap_sunxi_rsb = {
.reg_write = regmap_sunxi_rsb_reg_write,
.reg_read = regmap_sunxi_rsb_reg_read,
.free_context = regmap_sunxi_rsb_free_ctx,
.reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
.val_format_endian_default = REGMAP_ENDIAN_NATIVE,
};
static struct sunxi_rsb_ctx *regmap_sunxi_rsb_init_ctx(struct sunxi_rsb_device *rdev,
const struct regmap_config *config)
{
struct sunxi_rsb_ctx *ctx;
switch (config->val_bits) {
case 8:
case 16:
case 32:
break;
default:
return ERR_PTR(-EINVAL);
}
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return ERR_PTR(-ENOMEM);
ctx->rdev = rdev;
ctx->size = config->val_bits / 8;
return ctx;
}
struct regmap *__devm_regmap_init_sunxi_rsb(struct sunxi_rsb_device *rdev,
const struct regmap_config *config,
struct lock_class_key *lock_key,
const char *lock_name)
{
struct sunxi_rsb_ctx *ctx = regmap_sunxi_rsb_init_ctx(rdev, config);
if (IS_ERR(ctx))
return ERR_CAST(ctx);
return __devm_regmap_init(&rdev->dev, ®map_sunxi_rsb, ctx, config,
lock_key, lock_name);
}
EXPORT_SYMBOL_GPL(__devm_regmap_init_sunxi_rsb);
/* RSB controller driver functions */
static irqreturn_t sunxi_rsb_irq(int irq, void *dev_id)
{
struct sunxi_rsb *rsb = dev_id;
u32 status;
status = readl(rsb->regs + RSB_INTS);
rsb->status = status;
/* Clear interrupts */
status &= (RSB_INTS_LOAD_BSY | RSB_INTS_TRANS_ERR |
RSB_INTS_TRANS_OVER);
writel(status, rsb->regs + RSB_INTS);
complete(&rsb->complete);
return IRQ_HANDLED;
}
static int sunxi_rsb_init_device_mode(struct sunxi_rsb *rsb)
{
int ret = 0;
u32 reg;
/* send init sequence */
writel(RSB_DMCR_DEVICE_START | RSB_DMCR_MODE_DATA |
RSB_DMCR_MODE_REG | RSB_DMCR_DEV_ADDR, rsb->regs + RSB_DMCR);
readl_poll_timeout(rsb->regs + RSB_DMCR, reg,
!(reg & RSB_DMCR_DEVICE_START), 100, 250000);
if (reg & RSB_DMCR_DEVICE_START)
ret = -ETIMEDOUT;
/* clear interrupt status bits */
writel(readl(rsb->regs + RSB_INTS), rsb->regs + RSB_INTS);
return ret;
}
/*
* There are 15 valid runtime addresses, though Allwinner typically
* skips the first, for unknown reasons, and uses the following three.
*
* 0x17, 0x2d, 0x3a, 0x4e, 0x59, 0x63, 0x74, 0x8b,
* 0x9c, 0xa6, 0xb1, 0xc5, 0xd2, 0xe8, 0xff
*
* No designs with 2 RSB slave devices sharing identical hardware
* addresses on the same bus have been seen in the wild. All designs
* use 0x2d for the primary PMIC, 0x3a for the secondary PMIC if
* there is one, and 0x45 for peripheral ICs.
*
* The hardware does not seem to support re-setting runtime addresses.
* Attempts to do so result in the slave devices returning a NACK.
* Hence we just hardcode the mapping here, like Allwinner does.
*/
static const struct sunxi_rsb_addr_map sunxi_rsb_addr_maps[] = {
{ 0x3a3, 0x2d }, /* Primary PMIC: AXP223, AXP809, AXP81X, ... */
{ 0x745, 0x3a }, /* Secondary PMIC: AXP806, ... */
{ 0xe89, 0x4e }, /* Peripheral IC: AC100, ... */
};
static u8 sunxi_rsb_get_rtaddr(u16 hwaddr)
{
int i;
for (i = 0; i < ARRAY_SIZE(sunxi_rsb_addr_maps); i++)
if (hwaddr == sunxi_rsb_addr_maps[i].hwaddr)
return sunxi_rsb_addr_maps[i].rtaddr;
return 0; /* 0 is an invalid runtime address */
}
static int of_rsb_register_devices(struct sunxi_rsb *rsb)
{
struct device *dev = rsb->dev;
struct device_node *child, *np = dev->of_node;
u32 hwaddr;
u8 rtaddr;
int ret;
if (!np)
return -EINVAL;
/* Runtime addresses for all slaves should be set first */
for_each_available_child_of_node(np, child) {
dev_dbg(dev, "setting child %pOF runtime address\n",
child);
ret = of_property_read_u32(child, "reg", &hwaddr);
if (ret) {
dev_err(dev, "%pOF: invalid 'reg' property: %d\n",
child, ret);
continue;
}
rtaddr = sunxi_rsb_get_rtaddr(hwaddr);
if (!rtaddr) {
dev_err(dev, "%pOF: unknown hardware device address\n",
child);
continue;
}
/*
* Since no devices have been registered yet, we are the
* only ones using the bus, we can skip locking the bus.
*/
/* setup command parameters */
writel(RSB_CMD_STRA, rsb->regs + RSB_CMD);
writel(RSB_DAR_RTA(rtaddr) | RSB_DAR_DA(hwaddr),
rsb->regs + RSB_DAR);
/* send command */
ret = _sunxi_rsb_run_xfer(rsb);
if (ret)
dev_warn(dev, "%pOF: set runtime address failed: %d\n",
child, ret);
}
/* Then we start adding devices and probing them */
for_each_available_child_of_node(np, child) {
struct sunxi_rsb_device *rdev;
dev_dbg(dev, "adding child %pOF\n", child);
ret = of_property_read_u32(child, "reg", &hwaddr);
if (ret)
continue;
rtaddr = sunxi_rsb_get_rtaddr(hwaddr);
if (!rtaddr)
continue;
rdev = sunxi_rsb_device_create(rsb, child, hwaddr, rtaddr);
if (IS_ERR(rdev))
dev_err(dev, "failed to add child device %pOF: %ld\n",
child, PTR_ERR(rdev));
}
return 0;
}
static int sunxi_rsb_hw_init(struct sunxi_rsb *rsb)
{
struct device *dev = rsb->dev;
unsigned long p_clk_freq;
u32 clk_delay, reg;
int clk_div, ret;
ret = clk_prepare_enable(rsb->clk);
if (ret) {
dev_err(dev, "failed to enable clk: %d\n", ret);
return ret;
}
ret = reset_control_deassert(rsb->rstc);
if (ret) {
dev_err(dev, "failed to deassert reset line: %d\n", ret);
goto err_clk_disable;
}
/* reset the controller */
writel(RSB_CTRL_SOFT_RST, rsb->regs + RSB_CTRL);
readl_poll_timeout(rsb->regs + RSB_CTRL, reg,
!(reg & RSB_CTRL_SOFT_RST), 1000, 100000);
/*
* Clock frequency and delay calculation code is from
* Allwinner U-boot sources.
*
* From A83 user manual:
* bus clock frequency = parent clock frequency / (2 * (divider + 1))
*/
p_clk_freq = clk_get_rate(rsb->clk);
clk_div = p_clk_freq / rsb->clk_freq / 2;
if (!clk_div)
clk_div = 1;
else if (clk_div > RSB_CCR_MAX_CLK_DIV + 1)
clk_div = RSB_CCR_MAX_CLK_DIV + 1;
clk_delay = clk_div >> 1;
if (!clk_delay)
clk_delay = 1;
dev_info(dev, "RSB running at %lu Hz\n", p_clk_freq / clk_div / 2);
writel(RSB_CCR_SDA_OUT_DELAY(clk_delay) | RSB_CCR_CLK_DIV(clk_div - 1),
rsb->regs + RSB_CCR);
return 0;
err_clk_disable:
clk_disable_unprepare(rsb->clk);
return ret;
}
static void sunxi_rsb_hw_exit(struct sunxi_rsb *rsb)
{
reset_control_assert(rsb->rstc);
/* Keep the clock and PM reference counts consistent. */
if (!pm_runtime_status_suspended(rsb->dev))
clk_disable_unprepare(rsb->clk);
}
static int __maybe_unused sunxi_rsb_runtime_suspend(struct device *dev)
{
struct sunxi_rsb *rsb = dev_get_drvdata(dev);
clk_disable_unprepare(rsb->clk);
return 0;
}
static int __maybe_unused sunxi_rsb_runtime_resume(struct device *dev)
{
struct sunxi_rsb *rsb = dev_get_drvdata(dev);
return clk_prepare_enable(rsb->clk);
}
static int __maybe_unused sunxi_rsb_suspend(struct device *dev)
{
struct sunxi_rsb *rsb = dev_get_drvdata(dev);
sunxi_rsb_hw_exit(rsb);
return 0;
}
static int __maybe_unused sunxi_rsb_resume(struct device *dev)
{
struct sunxi_rsb *rsb = dev_get_drvdata(dev);
return sunxi_rsb_hw_init(rsb);
}
static int sunxi_rsb_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct sunxi_rsb *rsb;
u32 clk_freq = 3000000;
int irq, ret;
of_property_read_u32(np, "clock-frequency", &clk_freq);
if (clk_freq > RSB_MAX_FREQ) {
dev_err(dev,
"clock-frequency (%u Hz) is too high (max = 20MHz)\n",
clk_freq);
return -EINVAL;
}
rsb = devm_kzalloc(dev, sizeof(*rsb), GFP_KERNEL);
if (!rsb)
return -ENOMEM;
rsb->dev = dev;
rsb->clk_freq = clk_freq;
platform_set_drvdata(pdev, rsb);
rsb->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(rsb->regs))
return PTR_ERR(rsb->regs);
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
rsb->clk = devm_clk_get(dev, NULL);
if (IS_ERR(rsb->clk)) {
ret = PTR_ERR(rsb->clk);
dev_err(dev, "failed to retrieve clk: %d\n", ret);
return ret;
}
rsb->rstc = devm_reset_control_get(dev, NULL);
if (IS_ERR(rsb->rstc)) {
ret = PTR_ERR(rsb->rstc);
dev_err(dev, "failed to retrieve reset controller: %d\n", ret);
return ret;
}
init_completion(&rsb->complete);
mutex_init(&rsb->lock);
ret = devm_request_irq(dev, irq, sunxi_rsb_irq, 0, RSB_CTRL_NAME, rsb);
if (ret) {
dev_err(dev, "can't register interrupt handler irq %d: %d\n",
irq, ret);
return ret;
}
ret = sunxi_rsb_hw_init(rsb);
if (ret)
return ret;
/* initialize all devices on the bus into RSB mode */
ret = sunxi_rsb_init_device_mode(rsb);
if (ret)
dev_warn(dev, "Initialize device mode failed: %d\n", ret);
pm_suspend_ignore_children(dev, true);
pm_runtime_set_active(dev);
pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC);
pm_runtime_use_autosuspend(dev);
pm_runtime_enable(dev);
of_rsb_register_devices(rsb);
return 0;
}
static int sunxi_rsb_remove(struct platform_device *pdev)
{
struct sunxi_rsb *rsb = platform_get_drvdata(pdev);
device_for_each_child(rsb->dev, NULL, sunxi_rsb_remove_devices);
pm_runtime_disable(&pdev->dev);
sunxi_rsb_hw_exit(rsb);
return 0;
}
static const struct dev_pm_ops sunxi_rsb_dev_pm_ops = {
SET_RUNTIME_PM_OPS(sunxi_rsb_runtime_suspend,
sunxi_rsb_runtime_resume, NULL)
SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(sunxi_rsb_suspend, sunxi_rsb_resume)
};
static const struct of_device_id sunxi_rsb_of_match_table[] = {
{ .compatible = "allwinner,sun8i-a23-rsb" },
{}
};
MODULE_DEVICE_TABLE(of, sunxi_rsb_of_match_table);
static struct platform_driver sunxi_rsb_driver = {
.probe = sunxi_rsb_probe,
.remove = sunxi_rsb_remove,
.driver = {
.name = RSB_CTRL_NAME,
.of_match_table = sunxi_rsb_of_match_table,
.pm = &sunxi_rsb_dev_pm_ops,
},
};
static int __init sunxi_rsb_init(void)
{
int ret;
ret = bus_register(&sunxi_rsb_bus);
if (ret) {
pr_err("failed to register sunxi sunxi_rsb bus: %d\n", ret);
return ret;
}
ret = platform_driver_register(&sunxi_rsb_driver);
if (ret) {
bus_unregister(&sunxi_rsb_bus);
return ret;
}
return 0;
}
module_init(sunxi_rsb_init);
static void __exit sunxi_rsb_exit(void)
{
platform_driver_unregister(&sunxi_rsb_driver);
bus_unregister(&sunxi_rsb_bus);
}
module_exit(sunxi_rsb_exit);
MODULE_AUTHOR("Chen-Yu Tsai <[email protected]>");
MODULE_DESCRIPTION("Allwinner sunXi Reduced Serial Bus controller driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/bus/sunxi-rsb.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2017 Hisilicon Limited, All Rights Reserved.
* Author: Zhichang Yuan <[email protected]>
* Author: Zou Rongrong <[email protected]>
* Author: John Garry <[email protected]>
*/
#include <linux/acpi.h>
#include <linux/console.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/logic_pio.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/serial_8250.h>
#include <linux/slab.h>
#define DRV_NAME "hisi-lpc"
/*
* Setting this bit means each IO operation will target a different port
* address; 0 means repeated IO operations will use the same port,
* such as BT.
*/
#define FG_INCRADDR_LPC 0x02
struct lpc_cycle_para {
unsigned int opflags;
unsigned int csize; /* data length of each operation */
};
struct hisi_lpc_dev {
spinlock_t cycle_lock;
void __iomem *membase;
struct logic_pio_hwaddr *io_host;
};
/* The max IO cycle counts supported is four per operation at maximum */
#define LPC_MAX_DWIDTH 4
#define LPC_REG_STARTUP_SIGNAL 0x00
#define LPC_REG_STARTUP_SIGNAL_START BIT(0)
#define LPC_REG_OP_STATUS 0x04
#define LPC_REG_OP_STATUS_IDLE BIT(0)
#define LPC_REG_OP_STATUS_FINISHED BIT(1)
#define LPC_REG_OP_LEN 0x10 /* LPC cycles count per start */
#define LPC_REG_CMD 0x14
#define LPC_REG_CMD_OP BIT(0) /* 0: read, 1: write */
#define LPC_REG_CMD_SAMEADDR BIT(3)
#define LPC_REG_ADDR 0x20 /* target address */
#define LPC_REG_WDATA 0x24 /* write FIFO */
#define LPC_REG_RDATA 0x28 /* read FIFO */
/* The minimal nanosecond interval for each query on LPC cycle status */
#define LPC_NSEC_PERWAIT 100
/*
* The maximum waiting time is about 128us. It is specific for stream I/O,
* such as ins.
*
* The fastest IO cycle time is about 390ns, but the worst case will wait
* for extra 256 lpc clocks, so (256 + 13) * 30ns = 8 us. The maximum burst
* cycles is 16. So, the maximum waiting time is about 128us under worst
* case.
*
* Choose 1300 as the maximum.
*/
#define LPC_MAX_WAITCNT 1300
/* About 10us. This is specific for single IO operations, such as inb */
#define LPC_PEROP_WAITCNT 100
static int wait_lpc_idle(void __iomem *mbase, unsigned int waitcnt)
{
u32 status;
do {
status = readl(mbase + LPC_REG_OP_STATUS);
if (status & LPC_REG_OP_STATUS_IDLE)
return (status & LPC_REG_OP_STATUS_FINISHED) ? 0 : -EIO;
ndelay(LPC_NSEC_PERWAIT);
} while (--waitcnt);
return -ETIMEDOUT;
}
/*
* hisi_lpc_target_in - trigger a series of LPC cycles for read operation
* @lpcdev: pointer to hisi lpc device
* @para: some parameters used to control the lpc I/O operations
* @addr: the lpc I/O target port address
* @buf: where the read back data is stored
* @opcnt: how many I/O operations required, i.e. data width
*
* Returns 0 on success, non-zero on fail.
*/
static int hisi_lpc_target_in(struct hisi_lpc_dev *lpcdev,
struct lpc_cycle_para *para, unsigned long addr,
unsigned char *buf, unsigned long opcnt)
{
unsigned int cmd_word;
unsigned int waitcnt;
unsigned long flags;
int ret;
if (!buf || !opcnt || !para || !para->csize || !lpcdev)
return -EINVAL;
cmd_word = 0; /* IO mode, Read */
waitcnt = LPC_PEROP_WAITCNT;
if (!(para->opflags & FG_INCRADDR_LPC)) {
cmd_word |= LPC_REG_CMD_SAMEADDR;
waitcnt = LPC_MAX_WAITCNT;
}
/* whole operation must be atomic */
spin_lock_irqsave(&lpcdev->cycle_lock, flags);
writel_relaxed(opcnt, lpcdev->membase + LPC_REG_OP_LEN);
writel_relaxed(cmd_word, lpcdev->membase + LPC_REG_CMD);
writel_relaxed(addr, lpcdev->membase + LPC_REG_ADDR);
writel(LPC_REG_STARTUP_SIGNAL_START,
lpcdev->membase + LPC_REG_STARTUP_SIGNAL);
/* whether the operation is finished */
ret = wait_lpc_idle(lpcdev->membase, waitcnt);
if (ret) {
spin_unlock_irqrestore(&lpcdev->cycle_lock, flags);
return ret;
}
readsb(lpcdev->membase + LPC_REG_RDATA, buf, opcnt);
spin_unlock_irqrestore(&lpcdev->cycle_lock, flags);
return 0;
}
/*
* hisi_lpc_target_out - trigger a series of LPC cycles for write operation
* @lpcdev: pointer to hisi lpc device
* @para: some parameters used to control the lpc I/O operations
* @addr: the lpc I/O target port address
* @buf: where the data to be written is stored
* @opcnt: how many I/O operations required, i.e. data width
*
* Returns 0 on success, non-zero on fail.
*/
static int hisi_lpc_target_out(struct hisi_lpc_dev *lpcdev,
struct lpc_cycle_para *para, unsigned long addr,
const unsigned char *buf, unsigned long opcnt)
{
unsigned int waitcnt;
unsigned long flags;
u32 cmd_word;
int ret;
if (!buf || !opcnt || !para || !lpcdev)
return -EINVAL;
/* default is increasing address */
cmd_word = LPC_REG_CMD_OP; /* IO mode, write */
waitcnt = LPC_PEROP_WAITCNT;
if (!(para->opflags & FG_INCRADDR_LPC)) {
cmd_word |= LPC_REG_CMD_SAMEADDR;
waitcnt = LPC_MAX_WAITCNT;
}
spin_lock_irqsave(&lpcdev->cycle_lock, flags);
writel_relaxed(opcnt, lpcdev->membase + LPC_REG_OP_LEN);
writel_relaxed(cmd_word, lpcdev->membase + LPC_REG_CMD);
writel_relaxed(addr, lpcdev->membase + LPC_REG_ADDR);
writesb(lpcdev->membase + LPC_REG_WDATA, buf, opcnt);
writel(LPC_REG_STARTUP_SIGNAL_START,
lpcdev->membase + LPC_REG_STARTUP_SIGNAL);
/* whether the operation is finished */
ret = wait_lpc_idle(lpcdev->membase, waitcnt);
spin_unlock_irqrestore(&lpcdev->cycle_lock, flags);
return ret;
}
static unsigned long hisi_lpc_pio_to_addr(struct hisi_lpc_dev *lpcdev,
unsigned long pio)
{
return pio - lpcdev->io_host->io_start + lpcdev->io_host->hw_start;
}
/*
* hisi_lpc_comm_in - input the data in a single operation
* @hostdata: pointer to the device information relevant to LPC controller
* @pio: the target I/O port address
* @dwidth: the data length required to read from the target I/O port
*
* When success, data is returned. Otherwise, ~0 is returned.
*/
static u32 hisi_lpc_comm_in(void *hostdata, unsigned long pio, size_t dwidth)
{
struct hisi_lpc_dev *lpcdev = hostdata;
struct lpc_cycle_para iopara;
unsigned long addr;
__le32 rd_data = 0;
int ret;
if (!lpcdev || !dwidth || dwidth > LPC_MAX_DWIDTH)
return ~0;
addr = hisi_lpc_pio_to_addr(lpcdev, pio);
iopara.opflags = FG_INCRADDR_LPC;
iopara.csize = dwidth;
ret = hisi_lpc_target_in(lpcdev, &iopara, addr,
(unsigned char *)&rd_data, dwidth);
if (ret)
return ~0;
return le32_to_cpu(rd_data);
}
/*
* hisi_lpc_comm_out - output the data in a single operation
* @hostdata: pointer to the device information relevant to LPC controller
* @pio: the target I/O port address
* @val: a value to be output from caller, maximum is four bytes
* @dwidth: the data width required writing to the target I/O port
*
* This function corresponds to out(b,w,l) only.
*/
static void hisi_lpc_comm_out(void *hostdata, unsigned long pio,
u32 val, size_t dwidth)
{
struct hisi_lpc_dev *lpcdev = hostdata;
struct lpc_cycle_para iopara;
const unsigned char *buf;
unsigned long addr;
__le32 _val = cpu_to_le32(val);
if (!lpcdev || !dwidth || dwidth > LPC_MAX_DWIDTH)
return;
buf = (const unsigned char *)&_val;
addr = hisi_lpc_pio_to_addr(lpcdev, pio);
iopara.opflags = FG_INCRADDR_LPC;
iopara.csize = dwidth;
hisi_lpc_target_out(lpcdev, &iopara, addr, buf, dwidth);
}
/*
* hisi_lpc_comm_ins - input the data in the buffer in multiple operations
* @hostdata: pointer to the device information relevant to LPC controller
* @pio: the target I/O port address
* @buffer: a buffer where read/input data bytes are stored
* @dwidth: the data width required writing to the target I/O port
* @count: how many data units whose length is dwidth will be read
*
* When success, the data read back is stored in buffer pointed by buffer.
* Returns 0 on success, -errno otherwise.
*/
static u32 hisi_lpc_comm_ins(void *hostdata, unsigned long pio, void *buffer,
size_t dwidth, unsigned int count)
{
struct hisi_lpc_dev *lpcdev = hostdata;
unsigned char *buf = buffer;
struct lpc_cycle_para iopara;
unsigned long addr;
if (!lpcdev || !buf || !count || !dwidth || dwidth > LPC_MAX_DWIDTH)
return -EINVAL;
iopara.opflags = 0;
if (dwidth > 1)
iopara.opflags |= FG_INCRADDR_LPC;
iopara.csize = dwidth;
addr = hisi_lpc_pio_to_addr(lpcdev, pio);
do {
int ret;
ret = hisi_lpc_target_in(lpcdev, &iopara, addr, buf, dwidth);
if (ret)
return ret;
buf += dwidth;
} while (--count);
return 0;
}
/*
* hisi_lpc_comm_outs - output the data in the buffer in multiple operations
* @hostdata: pointer to the device information relevant to LPC controller
* @pio: the target I/O port address
* @buffer: a buffer where write/output data bytes are stored
* @dwidth: the data width required writing to the target I/O port
* @count: how many data units whose length is dwidth will be written
*/
static void hisi_lpc_comm_outs(void *hostdata, unsigned long pio,
const void *buffer, size_t dwidth,
unsigned int count)
{
struct hisi_lpc_dev *lpcdev = hostdata;
struct lpc_cycle_para iopara;
const unsigned char *buf = buffer;
unsigned long addr;
if (!lpcdev || !buf || !count || !dwidth || dwidth > LPC_MAX_DWIDTH)
return;
iopara.opflags = 0;
if (dwidth > 1)
iopara.opflags |= FG_INCRADDR_LPC;
iopara.csize = dwidth;
addr = hisi_lpc_pio_to_addr(lpcdev, pio);
do {
if (hisi_lpc_target_out(lpcdev, &iopara, addr, buf, dwidth))
break;
buf += dwidth;
} while (--count);
}
static const struct logic_pio_host_ops hisi_lpc_ops = {
.in = hisi_lpc_comm_in,
.out = hisi_lpc_comm_out,
.ins = hisi_lpc_comm_ins,
.outs = hisi_lpc_comm_outs,
};
#ifdef CONFIG_ACPI
static int hisi_lpc_acpi_xlat_io_res(struct acpi_device *adev,
struct acpi_device *host,
struct resource *res)
{
unsigned long sys_port;
resource_size_t len = resource_size(res);
sys_port = logic_pio_trans_hwaddr(acpi_fwnode_handle(host), res->start, len);
if (sys_port == ~0UL)
return -EFAULT;
res->start = sys_port;
res->end = sys_port + len;
return 0;
}
/*
* Released firmware describes the IO port max address as 0x3fff, which is
* the max host bus address. Fixup to a proper range. This will probably
* never be fixed in firmware.
*/
static void hisi_lpc_acpi_fixup_child_resource(struct device *hostdev,
struct resource *r)
{
if (r->end != 0x3fff)
return;
if (r->start == 0xe4)
r->end = 0xe4 + 0x04 - 1;
else if (r->start == 0x2f8)
r->end = 0x2f8 + 0x08 - 1;
else
dev_warn(hostdev, "unrecognised resource %pR to fixup, ignoring\n",
r);
}
/*
* hisi_lpc_acpi_set_io_res - set the resources for a child
* @adev: ACPI companion of the device node to be updated the I/O resource
* @hostdev: the device node associated with host controller
* @res: double pointer to be set to the address of translated resources
* @num_res: pointer to variable to hold the number of translated resources
*
* Returns 0 when successful, and a negative value for failure.
*
* For a given host controller, each child device will have an associated
* host-relative address resource. This function will return the translated
* logical PIO addresses for each child devices resources.
*/
static int hisi_lpc_acpi_set_io_res(struct acpi_device *adev,
struct device *hostdev,
const struct resource **res, int *num_res)
{
struct acpi_device *host = to_acpi_device(adev->dev.parent);
struct resource_entry *rentry;
LIST_HEAD(resource_list);
struct resource *resources;
int count;
int i;
if (!adev->status.present) {
dev_dbg(&adev->dev, "device is not present\n");
return -EIO;
}
if (acpi_device_enumerated(adev)) {
dev_dbg(&adev->dev, "has been enumerated\n");
return -EIO;
}
/*
* The following code segment to retrieve the resources is common to
* acpi_create_platform_device(), so consider a common helper function
* in future.
*/
count = acpi_dev_get_resources(adev, &resource_list, NULL, NULL);
if (count <= 0) {
dev_dbg(&adev->dev, "failed to get resources\n");
return count ? count : -EIO;
}
resources = devm_kcalloc(hostdev, count, sizeof(*resources),
GFP_KERNEL);
if (!resources) {
dev_warn(hostdev, "could not allocate memory for %d resources\n",
count);
acpi_dev_free_resource_list(&resource_list);
return -ENOMEM;
}
count = 0;
list_for_each_entry(rentry, &resource_list, node) {
resources[count] = *rentry->res;
hisi_lpc_acpi_fixup_child_resource(hostdev, &resources[count]);
count++;
}
acpi_dev_free_resource_list(&resource_list);
/* translate the I/O resources */
for (i = 0; i < count; i++) {
int ret;
if (!(resources[i].flags & IORESOURCE_IO))
continue;
ret = hisi_lpc_acpi_xlat_io_res(adev, host, &resources[i]);
if (ret) {
dev_err(&adev->dev, "translate IO range %pR failed (%d)\n",
&resources[i], ret);
return ret;
}
}
*res = resources;
*num_res = count;
return 0;
}
static int hisi_lpc_acpi_remove_subdev(struct device *dev, void *unused)
{
platform_device_unregister(to_platform_device(dev));
return 0;
}
static int hisi_lpc_acpi_clear_enumerated(struct acpi_device *adev, void *not_used)
{
acpi_device_clear_enumerated(adev);
return 0;
}
struct hisi_lpc_acpi_cell {
const char *hid;
const struct platform_device_info *pdevinfo;
};
static void hisi_lpc_acpi_remove(struct device *hostdev)
{
device_for_each_child(hostdev, NULL, hisi_lpc_acpi_remove_subdev);
acpi_dev_for_each_child(ACPI_COMPANION(hostdev),
hisi_lpc_acpi_clear_enumerated, NULL);
}
static int hisi_lpc_acpi_add_child(struct acpi_device *child, void *data)
{
const char *hid = acpi_device_hid(child);
struct device *hostdev = data;
const struct hisi_lpc_acpi_cell *cell;
struct platform_device *pdev;
const struct resource *res;
bool found = false;
int num_res;
int ret;
ret = hisi_lpc_acpi_set_io_res(child, hostdev, &res, &num_res);
if (ret) {
dev_warn(hostdev, "set resource fail (%d)\n", ret);
return ret;
}
cell = (struct hisi_lpc_acpi_cell []){
/* ipmi */
{
.hid = "IPI0001",
.pdevinfo = (struct platform_device_info []) {
{
.parent = hostdev,
.fwnode = acpi_fwnode_handle(child),
.name = "hisi-lpc-ipmi",
.id = PLATFORM_DEVID_AUTO,
.res = res,
.num_res = num_res,
},
},
},
/* 8250-compatible uart */
{
.hid = "HISI1031",
.pdevinfo = (struct platform_device_info []) {
{
.parent = hostdev,
.fwnode = acpi_fwnode_handle(child),
.name = "serial8250",
.id = PLATFORM_DEVID_AUTO,
.res = res,
.num_res = num_res,
.data = (struct plat_serial8250_port []) {
{
.iobase = res->start,
.uartclk = 1843200,
.iotype = UPIO_PORT,
.flags = UPF_BOOT_AUTOCONF,
},
{}
},
.size_data = 2 * sizeof(struct plat_serial8250_port),
},
},
},
{}
};
for (; cell && cell->hid; cell++) {
if (!strcmp(cell->hid, hid)) {
found = true;
break;
}
}
if (!found) {
dev_warn(hostdev,
"could not find cell for child device (%s), discarding\n",
hid);
return 0;
}
pdev = platform_device_register_full(cell->pdevinfo);
if (IS_ERR(pdev))
return PTR_ERR(pdev);
acpi_device_set_enumerated(child);
return 0;
}
/*
* hisi_lpc_acpi_probe - probe children for ACPI FW
* @hostdev: LPC host device pointer
*
* Returns 0 when successful, and a negative value for failure.
*
* Create a platform device per child, fixing up the resources
* from bus addresses to Logical PIO addresses.
*
*/
static int hisi_lpc_acpi_probe(struct device *hostdev)
{
int ret;
/* Only consider the children of the host */
ret = acpi_dev_for_each_child(ACPI_COMPANION(hostdev),
hisi_lpc_acpi_add_child, hostdev);
if (ret)
hisi_lpc_acpi_remove(hostdev);
return ret;
}
#else
static int hisi_lpc_acpi_probe(struct device *dev)
{
return -ENODEV;
}
static void hisi_lpc_acpi_remove(struct device *hostdev)
{
}
#endif // CONFIG_ACPI
/*
* hisi_lpc_probe - the probe callback function for hisi lpc host,
* will finish all the initialization.
* @pdev: the platform device corresponding to hisi lpc host
*
* Returns 0 on success, non-zero on fail.
*/
static int hisi_lpc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct logic_pio_hwaddr *range;
struct hisi_lpc_dev *lpcdev;
resource_size_t io_end;
int ret;
lpcdev = devm_kzalloc(dev, sizeof(*lpcdev), GFP_KERNEL);
if (!lpcdev)
return -ENOMEM;
spin_lock_init(&lpcdev->cycle_lock);
lpcdev->membase = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(lpcdev->membase))
return PTR_ERR(lpcdev->membase);
range = devm_kzalloc(dev, sizeof(*range), GFP_KERNEL);
if (!range)
return -ENOMEM;
range->fwnode = dev_fwnode(dev);
range->flags = LOGIC_PIO_INDIRECT;
range->size = PIO_INDIRECT_SIZE;
range->hostdata = lpcdev;
range->ops = &hisi_lpc_ops;
lpcdev->io_host = range;
ret = logic_pio_register_range(range);
if (ret) {
dev_err(dev, "register IO range failed (%d)!\n", ret);
return ret;
}
/* register the LPC host PIO resources */
if (is_acpi_device_node(range->fwnode))
ret = hisi_lpc_acpi_probe(dev);
else
ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
if (ret) {
logic_pio_unregister_range(range);
return ret;
}
dev_set_drvdata(dev, lpcdev);
io_end = lpcdev->io_host->io_start + lpcdev->io_host->size;
dev_info(dev, "registered range [%pa - %pa]\n",
&lpcdev->io_host->io_start, &io_end);
return ret;
}
static int hisi_lpc_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct hisi_lpc_dev *lpcdev = dev_get_drvdata(dev);
struct logic_pio_hwaddr *range = lpcdev->io_host;
if (is_acpi_device_node(range->fwnode))
hisi_lpc_acpi_remove(dev);
else
of_platform_depopulate(dev);
logic_pio_unregister_range(range);
return 0;
}
static const struct of_device_id hisi_lpc_of_match[] = {
{ .compatible = "hisilicon,hip06-lpc", },
{ .compatible = "hisilicon,hip07-lpc", },
{}
};
static const struct acpi_device_id hisi_lpc_acpi_match[] = {
{"HISI0191"},
{}
};
static struct platform_driver hisi_lpc_driver = {
.driver = {
.name = DRV_NAME,
.of_match_table = hisi_lpc_of_match,
.acpi_match_table = hisi_lpc_acpi_match,
},
.probe = hisi_lpc_probe,
.remove = hisi_lpc_remove,
};
builtin_platform_driver(hisi_lpc_driver);
| linux-master | drivers/bus/hisi_lpc.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* NBUS driver for TS-4600 based boards
*
* Copyright (c) 2016 - Savoir-faire Linux
* Author: Sebastien Bourdelin <[email protected]>
*
* This driver implements a GPIOs bit-banged bus, called the NBUS by Technologic
* Systems. It is used to communicate with the peripherals in the FPGA on the
* TS-4600 SoM.
*/
#include <linux/bitops.h>
#include <linux/gpio/consumer.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pwm.h>
#include <linux/ts-nbus.h>
#define TS_NBUS_DIRECTION_IN 0
#define TS_NBUS_DIRECTION_OUT 1
#define TS_NBUS_WRITE_ADR 0
#define TS_NBUS_WRITE_VAL 1
struct ts_nbus {
struct pwm_device *pwm;
struct gpio_descs *data;
struct gpio_desc *csn;
struct gpio_desc *txrx;
struct gpio_desc *strobe;
struct gpio_desc *ale;
struct gpio_desc *rdy;
struct mutex lock;
};
/*
* request all gpios required by the bus.
*/
static int ts_nbus_init_pdata(struct platform_device *pdev, struct ts_nbus
*ts_nbus)
{
ts_nbus->data = devm_gpiod_get_array(&pdev->dev, "ts,data",
GPIOD_OUT_HIGH);
if (IS_ERR(ts_nbus->data)) {
dev_err(&pdev->dev, "failed to retrieve ts,data-gpio from dts\n");
return PTR_ERR(ts_nbus->data);
}
ts_nbus->csn = devm_gpiod_get(&pdev->dev, "ts,csn", GPIOD_OUT_HIGH);
if (IS_ERR(ts_nbus->csn)) {
dev_err(&pdev->dev, "failed to retrieve ts,csn-gpio from dts\n");
return PTR_ERR(ts_nbus->csn);
}
ts_nbus->txrx = devm_gpiod_get(&pdev->dev, "ts,txrx", GPIOD_OUT_HIGH);
if (IS_ERR(ts_nbus->txrx)) {
dev_err(&pdev->dev, "failed to retrieve ts,txrx-gpio from dts\n");
return PTR_ERR(ts_nbus->txrx);
}
ts_nbus->strobe = devm_gpiod_get(&pdev->dev, "ts,strobe", GPIOD_OUT_HIGH);
if (IS_ERR(ts_nbus->strobe)) {
dev_err(&pdev->dev, "failed to retrieve ts,strobe-gpio from dts\n");
return PTR_ERR(ts_nbus->strobe);
}
ts_nbus->ale = devm_gpiod_get(&pdev->dev, "ts,ale", GPIOD_OUT_HIGH);
if (IS_ERR(ts_nbus->ale)) {
dev_err(&pdev->dev, "failed to retrieve ts,ale-gpio from dts\n");
return PTR_ERR(ts_nbus->ale);
}
ts_nbus->rdy = devm_gpiod_get(&pdev->dev, "ts,rdy", GPIOD_IN);
if (IS_ERR(ts_nbus->rdy)) {
dev_err(&pdev->dev, "failed to retrieve ts,rdy-gpio from dts\n");
return PTR_ERR(ts_nbus->rdy);
}
return 0;
}
/*
* the data gpios are used for reading and writing values, their directions
* should be adjusted accordingly.
*/
static void ts_nbus_set_direction(struct ts_nbus *ts_nbus, int direction)
{
int i;
for (i = 0; i < 8; i++) {
if (direction == TS_NBUS_DIRECTION_IN)
gpiod_direction_input(ts_nbus->data->desc[i]);
else
/* when used as output the default state of the data
* lines are set to high */
gpiod_direction_output(ts_nbus->data->desc[i], 1);
}
}
/*
* reset the bus in its initial state.
* The data, csn, strobe and ale lines must be zero'ed to let the FPGA knows a
* new transaction can be process.
*/
static void ts_nbus_reset_bus(struct ts_nbus *ts_nbus)
{
DECLARE_BITMAP(values, 8);
values[0] = 0;
gpiod_set_array_value_cansleep(8, ts_nbus->data->desc,
ts_nbus->data->info, values);
gpiod_set_value_cansleep(ts_nbus->csn, 0);
gpiod_set_value_cansleep(ts_nbus->strobe, 0);
gpiod_set_value_cansleep(ts_nbus->ale, 0);
}
/*
* let the FPGA knows it can process.
*/
static void ts_nbus_start_transaction(struct ts_nbus *ts_nbus)
{
gpiod_set_value_cansleep(ts_nbus->strobe, 1);
}
/*
* read a byte value from the data gpios.
* return 0 on success or negative errno on failure.
*/
static int ts_nbus_read_byte(struct ts_nbus *ts_nbus, u8 *val)
{
struct gpio_descs *gpios = ts_nbus->data;
int ret, i;
*val = 0;
for (i = 0; i < 8; i++) {
ret = gpiod_get_value_cansleep(gpios->desc[i]);
if (ret < 0)
return ret;
if (ret)
*val |= BIT(i);
}
return 0;
}
/*
* set the data gpios accordingly to the byte value.
*/
static void ts_nbus_write_byte(struct ts_nbus *ts_nbus, u8 byte)
{
struct gpio_descs *gpios = ts_nbus->data;
DECLARE_BITMAP(values, 8);
values[0] = byte;
gpiod_set_array_value_cansleep(8, gpios->desc, gpios->info, values);
}
/*
* reading the bus consists of resetting the bus, then notifying the FPGA to
* send the data in the data gpios and return the read value.
* return 0 on success or negative errno on failure.
*/
static int ts_nbus_read_bus(struct ts_nbus *ts_nbus, u8 *val)
{
ts_nbus_reset_bus(ts_nbus);
ts_nbus_start_transaction(ts_nbus);
return ts_nbus_read_byte(ts_nbus, val);
}
/*
* writing to the bus consists of resetting the bus, then define the type of
* command (address/value), write the data and notify the FPGA to retrieve the
* value in the data gpios.
*/
static void ts_nbus_write_bus(struct ts_nbus *ts_nbus, int cmd, u8 val)
{
ts_nbus_reset_bus(ts_nbus);
if (cmd == TS_NBUS_WRITE_ADR)
gpiod_set_value_cansleep(ts_nbus->ale, 1);
ts_nbus_write_byte(ts_nbus, val);
ts_nbus_start_transaction(ts_nbus);
}
/*
* read the value in the FPGA register at the given address.
* return 0 on success or negative errno on failure.
*/
int ts_nbus_read(struct ts_nbus *ts_nbus, u8 adr, u16 *val)
{
int ret, i;
u8 byte;
/* bus access must be atomic */
mutex_lock(&ts_nbus->lock);
/* set the bus in read mode */
gpiod_set_value_cansleep(ts_nbus->txrx, 0);
/* write address */
ts_nbus_write_bus(ts_nbus, TS_NBUS_WRITE_ADR, adr);
/* set the data gpios direction as input before reading */
ts_nbus_set_direction(ts_nbus, TS_NBUS_DIRECTION_IN);
/* reading value MSB first */
do {
*val = 0;
byte = 0;
for (i = 1; i >= 0; i--) {
/* read a byte from the bus, leave on error */
ret = ts_nbus_read_bus(ts_nbus, &byte);
if (ret < 0)
goto err;
/* append the byte read to the final value */
*val |= byte << (i * 8);
}
gpiod_set_value_cansleep(ts_nbus->csn, 1);
ret = gpiod_get_value_cansleep(ts_nbus->rdy);
} while (ret);
err:
/* restore the data gpios direction as output after reading */
ts_nbus_set_direction(ts_nbus, TS_NBUS_DIRECTION_OUT);
mutex_unlock(&ts_nbus->lock);
return ret;
}
EXPORT_SYMBOL_GPL(ts_nbus_read);
/*
* write the desired value in the FPGA register at the given address.
*/
int ts_nbus_write(struct ts_nbus *ts_nbus, u8 adr, u16 val)
{
int i;
/* bus access must be atomic */
mutex_lock(&ts_nbus->lock);
/* set the bus in write mode */
gpiod_set_value_cansleep(ts_nbus->txrx, 1);
/* write address */
ts_nbus_write_bus(ts_nbus, TS_NBUS_WRITE_ADR, adr);
/* writing value MSB first */
for (i = 1; i >= 0; i--)
ts_nbus_write_bus(ts_nbus, TS_NBUS_WRITE_VAL, (u8)(val >> (i * 8)));
/* wait for completion */
gpiod_set_value_cansleep(ts_nbus->csn, 1);
while (gpiod_get_value_cansleep(ts_nbus->rdy) != 0) {
gpiod_set_value_cansleep(ts_nbus->csn, 0);
gpiod_set_value_cansleep(ts_nbus->csn, 1);
}
mutex_unlock(&ts_nbus->lock);
return 0;
}
EXPORT_SYMBOL_GPL(ts_nbus_write);
static int ts_nbus_probe(struct platform_device *pdev)
{
struct pwm_device *pwm;
struct pwm_args pargs;
struct device *dev = &pdev->dev;
struct ts_nbus *ts_nbus;
int ret;
ts_nbus = devm_kzalloc(dev, sizeof(*ts_nbus), GFP_KERNEL);
if (!ts_nbus)
return -ENOMEM;
mutex_init(&ts_nbus->lock);
ret = ts_nbus_init_pdata(pdev, ts_nbus);
if (ret < 0)
return ret;
pwm = devm_pwm_get(dev, NULL);
if (IS_ERR(pwm)) {
ret = PTR_ERR(pwm);
if (ret != -EPROBE_DEFER)
dev_err(dev, "unable to request PWM\n");
return ret;
}
pwm_get_args(pwm, &pargs);
if (!pargs.period) {
dev_err(&pdev->dev, "invalid PWM period\n");
return -EINVAL;
}
/*
* FIXME: pwm_apply_args() should be removed when switching to
* the atomic PWM API.
*/
pwm_apply_args(pwm);
ret = pwm_config(pwm, pargs.period, pargs.period);
if (ret < 0)
return ret;
/*
* we can now start the FPGA and populate the peripherals.
*/
pwm_enable(pwm);
ts_nbus->pwm = pwm;
/*
* let the child nodes retrieve this instance of the ts-nbus.
*/
dev_set_drvdata(dev, ts_nbus);
ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
if (ret < 0)
return ret;
dev_info(dev, "initialized\n");
return 0;
}
static int ts_nbus_remove(struct platform_device *pdev)
{
struct ts_nbus *ts_nbus = dev_get_drvdata(&pdev->dev);
/* shutdown the FPGA */
mutex_lock(&ts_nbus->lock);
pwm_disable(ts_nbus->pwm);
mutex_unlock(&ts_nbus->lock);
return 0;
}
static const struct of_device_id ts_nbus_of_match[] = {
{ .compatible = "technologic,ts-nbus", },
{ },
};
MODULE_DEVICE_TABLE(of, ts_nbus_of_match);
static struct platform_driver ts_nbus_driver = {
.probe = ts_nbus_probe,
.remove = ts_nbus_remove,
.driver = {
.name = "ts_nbus",
.of_match_table = ts_nbus_of_match,
},
};
module_platform_driver(ts_nbus_driver);
MODULE_ALIAS("platform:ts_nbus");
MODULE_AUTHOR("Sebastien Bourdelin <[email protected]>");
MODULE_DESCRIPTION("Technologic Systems NBUS");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/bus/ts-nbus.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Address map functions for Marvell EBU SoCs (Kirkwood, Armada
* 370/XP, Dove, Orion5x and MV78xx0)
*
* The Marvell EBU SoCs have a configurable physical address space:
* the physical address at which certain devices (PCIe, NOR, NAND,
* etc.) sit can be configured. The configuration takes place through
* two sets of registers:
*
* - One to configure the access of the CPU to the devices. Depending
* on the families, there are between 8 and 20 configurable windows,
* each can be use to create a physical memory window that maps to a
* specific device. Devices are identified by a tuple (target,
* attribute).
*
* - One to configure the access to the CPU to the SDRAM. There are
* either 2 (for Dove) or 4 (for other families) windows to map the
* SDRAM into the physical address space.
*
* This driver:
*
* - Reads out the SDRAM address decoding windows at initialization
* time, and fills the mvebu_mbus_dram_info structure with these
* information. The exported function mv_mbus_dram_info() allow
* device drivers to get those information related to the SDRAM
* address decoding windows. This is because devices also have their
* own windows (configured through registers that are part of each
* device register space), and therefore the drivers for Marvell
* devices have to configure those device -> SDRAM windows to ensure
* that DMA works properly.
*
* - Provides an API for platform code or device drivers to
* dynamically add or remove address decoding windows for the CPU ->
* device accesses. This API is mvebu_mbus_add_window_by_id(),
* mvebu_mbus_add_window_remap_by_id() and
* mvebu_mbus_del_window().
*
* - Provides a debugfs interface in /sys/kernel/debug/mvebu-mbus/ to
* see the list of CPU -> SDRAM windows and their configuration
* (file 'sdram') and the list of CPU -> devices windows and their
* configuration (file 'devices').
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/mbus.h>
#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/debugfs.h>
#include <linux/log2.h>
#include <linux/memblock.h>
#include <linux/syscore_ops.h>
/*
* DDR target is the same on all platforms.
*/
#define TARGET_DDR 0
/*
* CPU Address Decode Windows registers
*/
#define WIN_CTRL_OFF 0x0000
#define WIN_CTRL_ENABLE BIT(0)
/* Only on HW I/O coherency capable platforms */
#define WIN_CTRL_SYNCBARRIER BIT(1)
#define WIN_CTRL_TGT_MASK 0xf0
#define WIN_CTRL_TGT_SHIFT 4
#define WIN_CTRL_ATTR_MASK 0xff00
#define WIN_CTRL_ATTR_SHIFT 8
#define WIN_CTRL_SIZE_MASK 0xffff0000
#define WIN_CTRL_SIZE_SHIFT 16
#define WIN_BASE_OFF 0x0004
#define WIN_BASE_LOW 0xffff0000
#define WIN_BASE_HIGH 0xf
#define WIN_REMAP_LO_OFF 0x0008
#define WIN_REMAP_LOW 0xffff0000
#define WIN_REMAP_HI_OFF 0x000c
#define UNIT_SYNC_BARRIER_OFF 0x84
#define UNIT_SYNC_BARRIER_ALL 0xFFFF
#define ATTR_HW_COHERENCY (0x1 << 4)
#define DDR_BASE_CS_OFF(n) (0x0000 + ((n) << 3))
#define DDR_BASE_CS_HIGH_MASK 0xf
#define DDR_BASE_CS_LOW_MASK 0xff000000
#define DDR_SIZE_CS_OFF(n) (0x0004 + ((n) << 3))
#define DDR_SIZE_ENABLED BIT(0)
#define DDR_SIZE_CS_MASK 0x1c
#define DDR_SIZE_CS_SHIFT 2
#define DDR_SIZE_MASK 0xff000000
#define DOVE_DDR_BASE_CS_OFF(n) ((n) << 4)
/* Relative to mbusbridge_base */
#define MBUS_BRIDGE_CTRL_OFF 0x0
#define MBUS_BRIDGE_BASE_OFF 0x4
/* Maximum number of windows, for all known platforms */
#define MBUS_WINS_MAX 20
struct mvebu_mbus_state;
struct mvebu_mbus_soc_data {
unsigned int num_wins;
bool has_mbus_bridge;
unsigned int (*win_cfg_offset)(const int win);
unsigned int (*win_remap_offset)(const int win);
void (*setup_cpu_target)(struct mvebu_mbus_state *s);
int (*save_cpu_target)(struct mvebu_mbus_state *s,
u32 __iomem *store_addr);
int (*show_cpu_target)(struct mvebu_mbus_state *s,
struct seq_file *seq, void *v);
};
/*
* Used to store the state of one MBus window across suspend/resume.
*/
struct mvebu_mbus_win_data {
u32 ctrl;
u32 base;
u32 remap_lo;
u32 remap_hi;
};
struct mvebu_mbus_state {
void __iomem *mbuswins_base;
void __iomem *sdramwins_base;
void __iomem *mbusbridge_base;
phys_addr_t sdramwins_phys_base;
struct dentry *debugfs_root;
struct dentry *debugfs_sdram;
struct dentry *debugfs_devs;
struct resource pcie_mem_aperture;
struct resource pcie_io_aperture;
const struct mvebu_mbus_soc_data *soc;
int hw_io_coherency;
/* Used during suspend/resume */
u32 mbus_bridge_ctrl;
u32 mbus_bridge_base;
struct mvebu_mbus_win_data wins[MBUS_WINS_MAX];
};
static struct mvebu_mbus_state mbus_state;
/*
* We provide two variants of the mv_mbus_dram_info() function:
*
* - The normal one, where the described DRAM ranges may overlap with
* the I/O windows, but for which the DRAM ranges are guaranteed to
* have a power of two size. Such ranges are suitable for the DMA
* masters that only DMA between the RAM and the device, which is
* actually all devices except the crypto engines.
*
* - The 'nooverlap' one, where the described DRAM ranges are
* guaranteed to not overlap with the I/O windows, but for which the
* DRAM ranges will not have power of two sizes. They will only be
* aligned on a 64 KB boundary, and have a size multiple of 64
* KB. Such ranges are suitable for the DMA masters that DMA between
* the crypto SRAM (which is mapped through an I/O window) and a
* device. This is the case for the crypto engines.
*/
static struct mbus_dram_target_info mvebu_mbus_dram_info;
static struct mbus_dram_target_info mvebu_mbus_dram_info_nooverlap;
const struct mbus_dram_target_info *mv_mbus_dram_info(void)
{
return &mvebu_mbus_dram_info;
}
EXPORT_SYMBOL_GPL(mv_mbus_dram_info);
const struct mbus_dram_target_info *mv_mbus_dram_info_nooverlap(void)
{
return &mvebu_mbus_dram_info_nooverlap;
}
EXPORT_SYMBOL_GPL(mv_mbus_dram_info_nooverlap);
/* Checks whether the given window has remap capability */
static bool mvebu_mbus_window_is_remappable(struct mvebu_mbus_state *mbus,
const int win)
{
return mbus->soc->win_remap_offset(win) != MVEBU_MBUS_NO_REMAP;
}
/*
* Functions to manipulate the address decoding windows
*/
static void mvebu_mbus_read_window(struct mvebu_mbus_state *mbus,
int win, int *enabled, u64 *base,
u32 *size, u8 *target, u8 *attr,
u64 *remap)
{
void __iomem *addr = mbus->mbuswins_base +
mbus->soc->win_cfg_offset(win);
u32 basereg = readl(addr + WIN_BASE_OFF);
u32 ctrlreg = readl(addr + WIN_CTRL_OFF);
if (!(ctrlreg & WIN_CTRL_ENABLE)) {
*enabled = 0;
return;
}
*enabled = 1;
*base = ((u64)basereg & WIN_BASE_HIGH) << 32;
*base |= (basereg & WIN_BASE_LOW);
*size = (ctrlreg | ~WIN_CTRL_SIZE_MASK) + 1;
if (target)
*target = (ctrlreg & WIN_CTRL_TGT_MASK) >> WIN_CTRL_TGT_SHIFT;
if (attr)
*attr = (ctrlreg & WIN_CTRL_ATTR_MASK) >> WIN_CTRL_ATTR_SHIFT;
if (remap) {
if (mvebu_mbus_window_is_remappable(mbus, win)) {
u32 remap_low, remap_hi;
void __iomem *addr_rmp = mbus->mbuswins_base +
mbus->soc->win_remap_offset(win);
remap_low = readl(addr_rmp + WIN_REMAP_LO_OFF);
remap_hi = readl(addr_rmp + WIN_REMAP_HI_OFF);
*remap = ((u64)remap_hi << 32) | remap_low;
} else
*remap = 0;
}
}
static void mvebu_mbus_disable_window(struct mvebu_mbus_state *mbus,
int win)
{
void __iomem *addr;
addr = mbus->mbuswins_base + mbus->soc->win_cfg_offset(win);
writel(0, addr + WIN_BASE_OFF);
writel(0, addr + WIN_CTRL_OFF);
if (mvebu_mbus_window_is_remappable(mbus, win)) {
addr = mbus->mbuswins_base + mbus->soc->win_remap_offset(win);
writel(0, addr + WIN_REMAP_LO_OFF);
writel(0, addr + WIN_REMAP_HI_OFF);
}
}
/* Checks whether the given window number is available */
static int mvebu_mbus_window_is_free(struct mvebu_mbus_state *mbus,
const int win)
{
void __iomem *addr = mbus->mbuswins_base +
mbus->soc->win_cfg_offset(win);
u32 ctrl = readl(addr + WIN_CTRL_OFF);
return !(ctrl & WIN_CTRL_ENABLE);
}
/*
* Checks whether the given (base, base+size) area doesn't overlap an
* existing region
*/
static int mvebu_mbus_window_conflicts(struct mvebu_mbus_state *mbus,
phys_addr_t base, size_t size,
u8 target, u8 attr)
{
u64 end = (u64)base + size;
int win;
for (win = 0; win < mbus->soc->num_wins; win++) {
u64 wbase, wend;
u32 wsize;
u8 wtarget, wattr;
int enabled;
mvebu_mbus_read_window(mbus, win,
&enabled, &wbase, &wsize,
&wtarget, &wattr, NULL);
if (!enabled)
continue;
wend = wbase + wsize;
/*
* Check if the current window overlaps with the
* proposed physical range
*/
if ((u64)base < wend && end > wbase)
return 0;
}
return 1;
}
static int mvebu_mbus_find_window(struct mvebu_mbus_state *mbus,
phys_addr_t base, size_t size)
{
int win;
for (win = 0; win < mbus->soc->num_wins; win++) {
u64 wbase;
u32 wsize;
int enabled;
mvebu_mbus_read_window(mbus, win,
&enabled, &wbase, &wsize,
NULL, NULL, NULL);
if (!enabled)
continue;
if (base == wbase && size == wsize)
return win;
}
return -ENODEV;
}
static int mvebu_mbus_setup_window(struct mvebu_mbus_state *mbus,
int win, phys_addr_t base, size_t size,
phys_addr_t remap, u8 target,
u8 attr)
{
void __iomem *addr = mbus->mbuswins_base +
mbus->soc->win_cfg_offset(win);
u32 ctrl, remap_addr;
if (!is_power_of_2(size)) {
WARN(true, "Invalid MBus window size: 0x%zx\n", size);
return -EINVAL;
}
if ((base & (phys_addr_t)(size - 1)) != 0) {
WARN(true, "Invalid MBus base/size: %pa len 0x%zx\n", &base,
size);
return -EINVAL;
}
ctrl = ((size - 1) & WIN_CTRL_SIZE_MASK) |
(attr << WIN_CTRL_ATTR_SHIFT) |
(target << WIN_CTRL_TGT_SHIFT) |
WIN_CTRL_ENABLE;
if (mbus->hw_io_coherency)
ctrl |= WIN_CTRL_SYNCBARRIER;
writel(base & WIN_BASE_LOW, addr + WIN_BASE_OFF);
writel(ctrl, addr + WIN_CTRL_OFF);
if (mvebu_mbus_window_is_remappable(mbus, win)) {
void __iomem *addr_rmp = mbus->mbuswins_base +
mbus->soc->win_remap_offset(win);
if (remap == MVEBU_MBUS_NO_REMAP)
remap_addr = base;
else
remap_addr = remap;
writel(remap_addr & WIN_REMAP_LOW, addr_rmp + WIN_REMAP_LO_OFF);
writel(0, addr_rmp + WIN_REMAP_HI_OFF);
}
return 0;
}
static int mvebu_mbus_alloc_window(struct mvebu_mbus_state *mbus,
phys_addr_t base, size_t size,
phys_addr_t remap, u8 target,
u8 attr)
{
int win;
if (remap == MVEBU_MBUS_NO_REMAP) {
for (win = 0; win < mbus->soc->num_wins; win++) {
if (mvebu_mbus_window_is_remappable(mbus, win))
continue;
if (mvebu_mbus_window_is_free(mbus, win))
return mvebu_mbus_setup_window(mbus, win, base,
size, remap,
target, attr);
}
}
for (win = 0; win < mbus->soc->num_wins; win++) {
/* Skip window if need remap but is not supported */
if ((remap != MVEBU_MBUS_NO_REMAP) &&
!mvebu_mbus_window_is_remappable(mbus, win))
continue;
if (mvebu_mbus_window_is_free(mbus, win))
return mvebu_mbus_setup_window(mbus, win, base, size,
remap, target, attr);
}
return -ENOMEM;
}
/*
* Debugfs debugging
*/
/* Common function used for Dove, Kirkwood, Armada 370/XP and Orion 5x */
static int mvebu_sdram_debug_show_orion(struct mvebu_mbus_state *mbus,
struct seq_file *seq, void *v)
{
int i;
for (i = 0; i < 4; i++) {
u32 basereg = readl(mbus->sdramwins_base + DDR_BASE_CS_OFF(i));
u32 sizereg = readl(mbus->sdramwins_base + DDR_SIZE_CS_OFF(i));
u64 base;
u32 size;
if (!(sizereg & DDR_SIZE_ENABLED)) {
seq_printf(seq, "[%d] disabled\n", i);
continue;
}
base = ((u64)basereg & DDR_BASE_CS_HIGH_MASK) << 32;
base |= basereg & DDR_BASE_CS_LOW_MASK;
size = (sizereg | ~DDR_SIZE_MASK);
seq_printf(seq, "[%d] %016llx - %016llx : cs%d\n",
i, (unsigned long long)base,
(unsigned long long)base + size + 1,
(sizereg & DDR_SIZE_CS_MASK) >> DDR_SIZE_CS_SHIFT);
}
return 0;
}
/* Special function for Dove */
static int mvebu_sdram_debug_show_dove(struct mvebu_mbus_state *mbus,
struct seq_file *seq, void *v)
{
int i;
for (i = 0; i < 2; i++) {
u32 map = readl(mbus->sdramwins_base + DOVE_DDR_BASE_CS_OFF(i));
u64 base;
u32 size;
if (!(map & 1)) {
seq_printf(seq, "[%d] disabled\n", i);
continue;
}
base = map & 0xff800000;
size = 0x100000 << (((map & 0x000f0000) >> 16) - 4);
seq_printf(seq, "[%d] %016llx - %016llx : cs%d\n",
i, (unsigned long long)base,
(unsigned long long)base + size, i);
}
return 0;
}
static int mvebu_sdram_debug_show(struct seq_file *seq, void *v)
{
struct mvebu_mbus_state *mbus = &mbus_state;
return mbus->soc->show_cpu_target(mbus, seq, v);
}
DEFINE_SHOW_ATTRIBUTE(mvebu_sdram_debug);
static int mvebu_devs_debug_show(struct seq_file *seq, void *v)
{
struct mvebu_mbus_state *mbus = &mbus_state;
int win;
for (win = 0; win < mbus->soc->num_wins; win++) {
u64 wbase, wremap;
u32 wsize;
u8 wtarget, wattr;
int enabled;
mvebu_mbus_read_window(mbus, win,
&enabled, &wbase, &wsize,
&wtarget, &wattr, &wremap);
if (!enabled) {
seq_printf(seq, "[%02d] disabled\n", win);
continue;
}
seq_printf(seq, "[%02d] %016llx - %016llx : %04x:%04x",
win, (unsigned long long)wbase,
(unsigned long long)(wbase + wsize), wtarget, wattr);
if (!is_power_of_2(wsize) ||
((wbase & (u64)(wsize - 1)) != 0))
seq_puts(seq, " (Invalid base/size!!)");
if (mvebu_mbus_window_is_remappable(mbus, win)) {
seq_printf(seq, " (remap %016llx)\n",
(unsigned long long)wremap);
} else
seq_printf(seq, "\n");
}
return 0;
}
DEFINE_SHOW_ATTRIBUTE(mvebu_devs_debug);
/*
* SoC-specific functions and definitions
*/
static unsigned int generic_mbus_win_cfg_offset(int win)
{
return win << 4;
}
static unsigned int armada_370_xp_mbus_win_cfg_offset(int win)
{
/* The register layout is a bit annoying and the below code
* tries to cope with it.
* - At offset 0x0, there are the registers for the first 8
* windows, with 4 registers of 32 bits per window (ctrl,
* base, remap low, remap high)
* - Then at offset 0x80, there is a hole of 0x10 bytes for
* the internal registers base address and internal units
* sync barrier register.
* - Then at offset 0x90, there the registers for 12
* windows, with only 2 registers of 32 bits per window
* (ctrl, base).
*/
if (win < 8)
return win << 4;
else
return 0x90 + ((win - 8) << 3);
}
static unsigned int mv78xx0_mbus_win_cfg_offset(int win)
{
if (win < 8)
return win << 4;
else
return 0x900 + ((win - 8) << 4);
}
static unsigned int generic_mbus_win_remap_2_offset(int win)
{
if (win < 2)
return generic_mbus_win_cfg_offset(win);
else
return MVEBU_MBUS_NO_REMAP;
}
static unsigned int generic_mbus_win_remap_4_offset(int win)
{
if (win < 4)
return generic_mbus_win_cfg_offset(win);
else
return MVEBU_MBUS_NO_REMAP;
}
static unsigned int generic_mbus_win_remap_8_offset(int win)
{
if (win < 8)
return generic_mbus_win_cfg_offset(win);
else
return MVEBU_MBUS_NO_REMAP;
}
static unsigned int armada_xp_mbus_win_remap_offset(int win)
{
if (win < 8)
return generic_mbus_win_cfg_offset(win);
else if (win == 13)
return 0xF0 - WIN_REMAP_LO_OFF;
else
return MVEBU_MBUS_NO_REMAP;
}
/*
* Use the memblock information to find the MBus bridge hole in the
* physical address space.
*/
static void __init
mvebu_mbus_find_bridge_hole(uint64_t *start, uint64_t *end)
{
phys_addr_t reg_start, reg_end;
uint64_t i, s = 0;
for_each_mem_range(i, ®_start, ®_end) {
/*
* This part of the memory is above 4 GB, so we don't
* care for the MBus bridge hole.
*/
if ((u64)reg_start >= 0x100000000ULL)
continue;
/*
* The MBus bridge hole is at the end of the RAM under
* the 4 GB limit.
*/
if (reg_end > s)
s = reg_end;
}
*start = s;
*end = 0x100000000ULL;
}
/*
* This function fills in the mvebu_mbus_dram_info_nooverlap data
* structure, by looking at the mvebu_mbus_dram_info data, and
* removing the parts of it that overlap with I/O windows.
*/
static void __init
mvebu_mbus_setup_cpu_target_nooverlap(struct mvebu_mbus_state *mbus)
{
uint64_t mbus_bridge_base, mbus_bridge_end;
int cs_nooverlap = 0;
int i;
mvebu_mbus_find_bridge_hole(&mbus_bridge_base, &mbus_bridge_end);
for (i = 0; i < mvebu_mbus_dram_info.num_cs; i++) {
struct mbus_dram_window *w;
u64 base, size, end;
w = &mvebu_mbus_dram_info.cs[i];
base = w->base;
size = w->size;
end = base + size;
/*
* The CS is fully enclosed inside the MBus bridge
* area, so ignore it.
*/
if (base >= mbus_bridge_base && end <= mbus_bridge_end)
continue;
/*
* Beginning of CS overlaps with end of MBus, raise CS
* base address, and shrink its size.
*/
if (base >= mbus_bridge_base && end > mbus_bridge_end) {
size -= mbus_bridge_end - base;
base = mbus_bridge_end;
}
/*
* End of CS overlaps with beginning of MBus, shrink
* CS size.
*/
if (base < mbus_bridge_base && end > mbus_bridge_base)
size -= end - mbus_bridge_base;
w = &mvebu_mbus_dram_info_nooverlap.cs[cs_nooverlap++];
w->cs_index = i;
w->mbus_attr = 0xf & ~(1 << i);
if (mbus->hw_io_coherency)
w->mbus_attr |= ATTR_HW_COHERENCY;
w->base = base;
w->size = size;
}
mvebu_mbus_dram_info_nooverlap.mbus_dram_target_id = TARGET_DDR;
mvebu_mbus_dram_info_nooverlap.num_cs = cs_nooverlap;
}
static void __init
mvebu_mbus_default_setup_cpu_target(struct mvebu_mbus_state *mbus)
{
int i;
int cs;
mvebu_mbus_dram_info.mbus_dram_target_id = TARGET_DDR;
for (i = 0, cs = 0; i < 4; i++) {
u32 base = readl(mbus->sdramwins_base + DDR_BASE_CS_OFF(i));
u32 size = readl(mbus->sdramwins_base + DDR_SIZE_CS_OFF(i));
/*
* We only take care of entries for which the chip
* select is enabled, and that don't have high base
* address bits set (devices can only access the first
* 32 bits of the memory).
*/
if ((size & DDR_SIZE_ENABLED) &&
!(base & DDR_BASE_CS_HIGH_MASK)) {
struct mbus_dram_window *w;
w = &mvebu_mbus_dram_info.cs[cs++];
w->cs_index = i;
w->mbus_attr = 0xf & ~(1 << i);
if (mbus->hw_io_coherency)
w->mbus_attr |= ATTR_HW_COHERENCY;
w->base = base & DDR_BASE_CS_LOW_MASK;
w->size = (u64)(size | ~DDR_SIZE_MASK) + 1;
}
}
mvebu_mbus_dram_info.num_cs = cs;
}
static int
mvebu_mbus_default_save_cpu_target(struct mvebu_mbus_state *mbus,
u32 __iomem *store_addr)
{
int i;
for (i = 0; i < 4; i++) {
u32 base = readl(mbus->sdramwins_base + DDR_BASE_CS_OFF(i));
u32 size = readl(mbus->sdramwins_base + DDR_SIZE_CS_OFF(i));
writel(mbus->sdramwins_phys_base + DDR_BASE_CS_OFF(i),
store_addr++);
writel(base, store_addr++);
writel(mbus->sdramwins_phys_base + DDR_SIZE_CS_OFF(i),
store_addr++);
writel(size, store_addr++);
}
/* We've written 16 words to the store address */
return 16;
}
static void __init
mvebu_mbus_dove_setup_cpu_target(struct mvebu_mbus_state *mbus)
{
int i;
int cs;
mvebu_mbus_dram_info.mbus_dram_target_id = TARGET_DDR;
for (i = 0, cs = 0; i < 2; i++) {
u32 map = readl(mbus->sdramwins_base + DOVE_DDR_BASE_CS_OFF(i));
/*
* Chip select enabled?
*/
if (map & 1) {
struct mbus_dram_window *w;
w = &mvebu_mbus_dram_info.cs[cs++];
w->cs_index = i;
w->mbus_attr = 0; /* CS address decoding done inside */
/* the DDR controller, no need to */
/* provide attributes */
w->base = map & 0xff800000;
w->size = 0x100000 << (((map & 0x000f0000) >> 16) - 4);
}
}
mvebu_mbus_dram_info.num_cs = cs;
}
static int
mvebu_mbus_dove_save_cpu_target(struct mvebu_mbus_state *mbus,
u32 __iomem *store_addr)
{
int i;
for (i = 0; i < 2; i++) {
u32 map = readl(mbus->sdramwins_base + DOVE_DDR_BASE_CS_OFF(i));
writel(mbus->sdramwins_phys_base + DOVE_DDR_BASE_CS_OFF(i),
store_addr++);
writel(map, store_addr++);
}
/* We've written 4 words to the store address */
return 4;
}
int mvebu_mbus_save_cpu_target(u32 __iomem *store_addr)
{
return mbus_state.soc->save_cpu_target(&mbus_state, store_addr);
}
static const struct mvebu_mbus_soc_data armada_370_mbus_data = {
.num_wins = 20,
.has_mbus_bridge = true,
.win_cfg_offset = armada_370_xp_mbus_win_cfg_offset,
.win_remap_offset = generic_mbus_win_remap_8_offset,
.setup_cpu_target = mvebu_mbus_default_setup_cpu_target,
.show_cpu_target = mvebu_sdram_debug_show_orion,
.save_cpu_target = mvebu_mbus_default_save_cpu_target,
};
static const struct mvebu_mbus_soc_data armada_xp_mbus_data = {
.num_wins = 20,
.has_mbus_bridge = true,
.win_cfg_offset = armada_370_xp_mbus_win_cfg_offset,
.win_remap_offset = armada_xp_mbus_win_remap_offset,
.setup_cpu_target = mvebu_mbus_default_setup_cpu_target,
.show_cpu_target = mvebu_sdram_debug_show_orion,
.save_cpu_target = mvebu_mbus_default_save_cpu_target,
};
static const struct mvebu_mbus_soc_data kirkwood_mbus_data = {
.num_wins = 8,
.win_cfg_offset = generic_mbus_win_cfg_offset,
.save_cpu_target = mvebu_mbus_default_save_cpu_target,
.win_remap_offset = generic_mbus_win_remap_4_offset,
.setup_cpu_target = mvebu_mbus_default_setup_cpu_target,
.show_cpu_target = mvebu_sdram_debug_show_orion,
};
static const struct mvebu_mbus_soc_data dove_mbus_data = {
.num_wins = 8,
.win_cfg_offset = generic_mbus_win_cfg_offset,
.save_cpu_target = mvebu_mbus_dove_save_cpu_target,
.win_remap_offset = generic_mbus_win_remap_4_offset,
.setup_cpu_target = mvebu_mbus_dove_setup_cpu_target,
.show_cpu_target = mvebu_sdram_debug_show_dove,
};
/*
* Some variants of Orion5x have 4 remappable windows, some other have
* only two of them.
*/
static const struct mvebu_mbus_soc_data orion5x_4win_mbus_data = {
.num_wins = 8,
.win_cfg_offset = generic_mbus_win_cfg_offset,
.save_cpu_target = mvebu_mbus_default_save_cpu_target,
.win_remap_offset = generic_mbus_win_remap_4_offset,
.setup_cpu_target = mvebu_mbus_default_setup_cpu_target,
.show_cpu_target = mvebu_sdram_debug_show_orion,
};
static const struct mvebu_mbus_soc_data orion5x_2win_mbus_data = {
.num_wins = 8,
.win_cfg_offset = generic_mbus_win_cfg_offset,
.save_cpu_target = mvebu_mbus_default_save_cpu_target,
.win_remap_offset = generic_mbus_win_remap_2_offset,
.setup_cpu_target = mvebu_mbus_default_setup_cpu_target,
.show_cpu_target = mvebu_sdram_debug_show_orion,
};
static const struct mvebu_mbus_soc_data mv78xx0_mbus_data = {
.num_wins = 14,
.win_cfg_offset = mv78xx0_mbus_win_cfg_offset,
.save_cpu_target = mvebu_mbus_default_save_cpu_target,
.win_remap_offset = generic_mbus_win_remap_8_offset,
.setup_cpu_target = mvebu_mbus_default_setup_cpu_target,
.show_cpu_target = mvebu_sdram_debug_show_orion,
};
static const struct of_device_id of_mvebu_mbus_ids[] = {
{ .compatible = "marvell,armada370-mbus",
.data = &armada_370_mbus_data, },
{ .compatible = "marvell,armada375-mbus",
.data = &armada_xp_mbus_data, },
{ .compatible = "marvell,armada380-mbus",
.data = &armada_xp_mbus_data, },
{ .compatible = "marvell,armadaxp-mbus",
.data = &armada_xp_mbus_data, },
{ .compatible = "marvell,kirkwood-mbus",
.data = &kirkwood_mbus_data, },
{ .compatible = "marvell,dove-mbus",
.data = &dove_mbus_data, },
{ .compatible = "marvell,orion5x-88f5281-mbus",
.data = &orion5x_4win_mbus_data, },
{ .compatible = "marvell,orion5x-88f5182-mbus",
.data = &orion5x_2win_mbus_data, },
{ .compatible = "marvell,orion5x-88f5181-mbus",
.data = &orion5x_2win_mbus_data, },
{ .compatible = "marvell,orion5x-88f6183-mbus",
.data = &orion5x_4win_mbus_data, },
{ .compatible = "marvell,mv78xx0-mbus",
.data = &mv78xx0_mbus_data, },
{ },
};
/*
* Public API of the driver
*/
int mvebu_mbus_add_window_remap_by_id(unsigned int target,
unsigned int attribute,
phys_addr_t base, size_t size,
phys_addr_t remap)
{
struct mvebu_mbus_state *s = &mbus_state;
if (!mvebu_mbus_window_conflicts(s, base, size, target, attribute)) {
pr_err("cannot add window '%x:%x', conflicts with another window\n",
target, attribute);
return -EINVAL;
}
return mvebu_mbus_alloc_window(s, base, size, remap, target, attribute);
}
EXPORT_SYMBOL_GPL(mvebu_mbus_add_window_remap_by_id);
int mvebu_mbus_add_window_by_id(unsigned int target, unsigned int attribute,
phys_addr_t base, size_t size)
{
return mvebu_mbus_add_window_remap_by_id(target, attribute, base,
size, MVEBU_MBUS_NO_REMAP);
}
EXPORT_SYMBOL_GPL(mvebu_mbus_add_window_by_id);
int mvebu_mbus_del_window(phys_addr_t base, size_t size)
{
int win;
win = mvebu_mbus_find_window(&mbus_state, base, size);
if (win < 0)
return win;
mvebu_mbus_disable_window(&mbus_state, win);
return 0;
}
EXPORT_SYMBOL_GPL(mvebu_mbus_del_window);
void mvebu_mbus_get_pcie_mem_aperture(struct resource *res)
{
if (!res)
return;
*res = mbus_state.pcie_mem_aperture;
}
EXPORT_SYMBOL_GPL(mvebu_mbus_get_pcie_mem_aperture);
void mvebu_mbus_get_pcie_io_aperture(struct resource *res)
{
if (!res)
return;
*res = mbus_state.pcie_io_aperture;
}
EXPORT_SYMBOL_GPL(mvebu_mbus_get_pcie_io_aperture);
int mvebu_mbus_get_dram_win_info(phys_addr_t phyaddr, u8 *target, u8 *attr)
{
const struct mbus_dram_target_info *dram;
int i;
/* Get dram info */
dram = mv_mbus_dram_info();
if (!dram) {
pr_err("missing DRAM information\n");
return -ENODEV;
}
/* Try to find matching DRAM window for phyaddr */
for (i = 0; i < dram->num_cs; i++) {
const struct mbus_dram_window *cs = dram->cs + i;
if (cs->base <= phyaddr &&
phyaddr <= (cs->base + cs->size - 1)) {
*target = dram->mbus_dram_target_id;
*attr = cs->mbus_attr;
return 0;
}
}
pr_err("invalid dram address %pa\n", &phyaddr);
return -EINVAL;
}
EXPORT_SYMBOL_GPL(mvebu_mbus_get_dram_win_info);
int mvebu_mbus_get_io_win_info(phys_addr_t phyaddr, u32 *size, u8 *target,
u8 *attr)
{
int win;
for (win = 0; win < mbus_state.soc->num_wins; win++) {
u64 wbase;
int enabled;
mvebu_mbus_read_window(&mbus_state, win, &enabled, &wbase,
size, target, attr, NULL);
if (!enabled)
continue;
if (wbase <= phyaddr && phyaddr <= wbase + *size)
return win;
}
return -EINVAL;
}
EXPORT_SYMBOL_GPL(mvebu_mbus_get_io_win_info);
static __init int mvebu_mbus_debugfs_init(void)
{
struct mvebu_mbus_state *s = &mbus_state;
/*
* If no base has been initialized, doesn't make sense to
* register the debugfs entries. We may be on a multiplatform
* kernel that isn't running a Marvell EBU SoC.
*/
if (!s->mbuswins_base)
return 0;
s->debugfs_root = debugfs_create_dir("mvebu-mbus", NULL);
if (s->debugfs_root) {
s->debugfs_sdram = debugfs_create_file("sdram", S_IRUGO,
s->debugfs_root, NULL,
&mvebu_sdram_debug_fops);
s->debugfs_devs = debugfs_create_file("devices", S_IRUGO,
s->debugfs_root, NULL,
&mvebu_devs_debug_fops);
}
return 0;
}
fs_initcall(mvebu_mbus_debugfs_init);
static int mvebu_mbus_suspend(void)
{
struct mvebu_mbus_state *s = &mbus_state;
int win;
if (!s->mbusbridge_base)
return -ENODEV;
for (win = 0; win < s->soc->num_wins; win++) {
void __iomem *addr = s->mbuswins_base +
s->soc->win_cfg_offset(win);
void __iomem *addr_rmp;
s->wins[win].base = readl(addr + WIN_BASE_OFF);
s->wins[win].ctrl = readl(addr + WIN_CTRL_OFF);
if (!mvebu_mbus_window_is_remappable(s, win))
continue;
addr_rmp = s->mbuswins_base +
s->soc->win_remap_offset(win);
s->wins[win].remap_lo = readl(addr_rmp + WIN_REMAP_LO_OFF);
s->wins[win].remap_hi = readl(addr_rmp + WIN_REMAP_HI_OFF);
}
s->mbus_bridge_ctrl = readl(s->mbusbridge_base +
MBUS_BRIDGE_CTRL_OFF);
s->mbus_bridge_base = readl(s->mbusbridge_base +
MBUS_BRIDGE_BASE_OFF);
return 0;
}
static void mvebu_mbus_resume(void)
{
struct mvebu_mbus_state *s = &mbus_state;
int win;
writel(s->mbus_bridge_ctrl,
s->mbusbridge_base + MBUS_BRIDGE_CTRL_OFF);
writel(s->mbus_bridge_base,
s->mbusbridge_base + MBUS_BRIDGE_BASE_OFF);
for (win = 0; win < s->soc->num_wins; win++) {
void __iomem *addr = s->mbuswins_base +
s->soc->win_cfg_offset(win);
void __iomem *addr_rmp;
writel(s->wins[win].base, addr + WIN_BASE_OFF);
writel(s->wins[win].ctrl, addr + WIN_CTRL_OFF);
if (!mvebu_mbus_window_is_remappable(s, win))
continue;
addr_rmp = s->mbuswins_base +
s->soc->win_remap_offset(win);
writel(s->wins[win].remap_lo, addr_rmp + WIN_REMAP_LO_OFF);
writel(s->wins[win].remap_hi, addr_rmp + WIN_REMAP_HI_OFF);
}
}
static struct syscore_ops mvebu_mbus_syscore_ops = {
.suspend = mvebu_mbus_suspend,
.resume = mvebu_mbus_resume,
};
static int __init mvebu_mbus_common_init(struct mvebu_mbus_state *mbus,
phys_addr_t mbuswins_phys_base,
size_t mbuswins_size,
phys_addr_t sdramwins_phys_base,
size_t sdramwins_size,
phys_addr_t mbusbridge_phys_base,
size_t mbusbridge_size,
bool is_coherent)
{
int win;
mbus->mbuswins_base = ioremap(mbuswins_phys_base, mbuswins_size);
if (!mbus->mbuswins_base)
return -ENOMEM;
mbus->sdramwins_base = ioremap(sdramwins_phys_base, sdramwins_size);
if (!mbus->sdramwins_base) {
iounmap(mbus->mbuswins_base);
return -ENOMEM;
}
mbus->sdramwins_phys_base = sdramwins_phys_base;
if (mbusbridge_phys_base) {
mbus->mbusbridge_base = ioremap(mbusbridge_phys_base,
mbusbridge_size);
if (!mbus->mbusbridge_base) {
iounmap(mbus->sdramwins_base);
iounmap(mbus->mbuswins_base);
return -ENOMEM;
}
} else
mbus->mbusbridge_base = NULL;
for (win = 0; win < mbus->soc->num_wins; win++)
mvebu_mbus_disable_window(mbus, win);
mbus->soc->setup_cpu_target(mbus);
mvebu_mbus_setup_cpu_target_nooverlap(mbus);
if (is_coherent)
writel(UNIT_SYNC_BARRIER_ALL,
mbus->mbuswins_base + UNIT_SYNC_BARRIER_OFF);
register_syscore_ops(&mvebu_mbus_syscore_ops);
return 0;
}
int __init mvebu_mbus_init(const char *soc, phys_addr_t mbuswins_phys_base,
size_t mbuswins_size,
phys_addr_t sdramwins_phys_base,
size_t sdramwins_size)
{
const struct of_device_id *of_id;
for (of_id = of_mvebu_mbus_ids; of_id->compatible[0]; of_id++)
if (!strcmp(of_id->compatible, soc))
break;
if (!of_id->compatible[0]) {
pr_err("could not find a matching SoC family\n");
return -ENODEV;
}
mbus_state.soc = of_id->data;
return mvebu_mbus_common_init(&mbus_state,
mbuswins_phys_base,
mbuswins_size,
sdramwins_phys_base,
sdramwins_size, 0, 0, false);
}
#ifdef CONFIG_OF
/*
* The window IDs in the ranges DT property have the following format:
* - bits 28 to 31: MBus custom field
* - bits 24 to 27: window target ID
* - bits 16 to 23: window attribute ID
* - bits 0 to 15: unused
*/
#define CUSTOM(id) (((id) & 0xF0000000) >> 24)
#define TARGET(id) (((id) & 0x0F000000) >> 24)
#define ATTR(id) (((id) & 0x00FF0000) >> 16)
static int __init mbus_dt_setup_win(struct mvebu_mbus_state *mbus,
u32 base, u32 size,
u8 target, u8 attr)
{
if (!mvebu_mbus_window_conflicts(mbus, base, size, target, attr)) {
pr_err("cannot add window '%04x:%04x', conflicts with another window\n",
target, attr);
return -EBUSY;
}
if (mvebu_mbus_alloc_window(mbus, base, size, MVEBU_MBUS_NO_REMAP,
target, attr)) {
pr_err("cannot add window '%04x:%04x', too many windows\n",
target, attr);
return -ENOMEM;
}
return 0;
}
static int __init mbus_dt_setup(struct mvebu_mbus_state *mbus,
struct device_node *np)
{
int ret;
struct of_range_parser parser;
struct of_range range;
ret = of_range_parser_init(&parser, np);
if (ret < 0)
return 0;
for_each_of_range(&parser, &range) {
u32 windowid = upper_32_bits(range.bus_addr);
u8 target, attr;
/*
* An entry with a non-zero custom field do not
* correspond to a static window, so skip it.
*/
if (CUSTOM(windowid))
continue;
target = TARGET(windowid);
attr = ATTR(windowid);
ret = mbus_dt_setup_win(mbus, range.cpu_addr, range.size, target, attr);
if (ret < 0)
return ret;
}
return 0;
}
static void __init mvebu_mbus_get_pcie_resources(struct device_node *np,
struct resource *mem,
struct resource *io)
{
u32 reg[2];
int ret;
/*
* These are optional, so we make sure that resource_size(x) will
* return 0.
*/
memset(mem, 0, sizeof(struct resource));
mem->end = -1;
memset(io, 0, sizeof(struct resource));
io->end = -1;
ret = of_property_read_u32_array(np, "pcie-mem-aperture", reg, ARRAY_SIZE(reg));
if (!ret) {
mem->start = reg[0];
mem->end = mem->start + reg[1] - 1;
mem->flags = IORESOURCE_MEM;
}
ret = of_property_read_u32_array(np, "pcie-io-aperture", reg, ARRAY_SIZE(reg));
if (!ret) {
io->start = reg[0];
io->end = io->start + reg[1] - 1;
io->flags = IORESOURCE_IO;
}
}
int __init mvebu_mbus_dt_init(bool is_coherent)
{
struct resource mbuswins_res, sdramwins_res, mbusbridge_res;
struct device_node *np, *controller;
const struct of_device_id *of_id;
const __be32 *prop;
int ret;
np = of_find_matching_node_and_match(NULL, of_mvebu_mbus_ids, &of_id);
if (!np) {
pr_err("could not find a matching SoC family\n");
return -ENODEV;
}
mbus_state.soc = of_id->data;
prop = of_get_property(np, "controller", NULL);
if (!prop) {
pr_err("required 'controller' property missing\n");
return -EINVAL;
}
controller = of_find_node_by_phandle(be32_to_cpup(prop));
if (!controller) {
pr_err("could not find an 'mbus-controller' node\n");
return -ENODEV;
}
if (of_address_to_resource(controller, 0, &mbuswins_res)) {
pr_err("cannot get MBUS register address\n");
return -EINVAL;
}
if (of_address_to_resource(controller, 1, &sdramwins_res)) {
pr_err("cannot get SDRAM register address\n");
return -EINVAL;
}
/*
* Set the resource to 0 so that it can be left unmapped by
* mvebu_mbus_common_init() if the DT doesn't carry the
* necessary information. This is needed to preserve backward
* compatibility.
*/
memset(&mbusbridge_res, 0, sizeof(mbusbridge_res));
if (mbus_state.soc->has_mbus_bridge) {
if (of_address_to_resource(controller, 2, &mbusbridge_res))
pr_warn(FW_WARN "deprecated mbus-mvebu Device Tree, suspend/resume will not work\n");
}
mbus_state.hw_io_coherency = is_coherent;
/* Get optional pcie-{mem,io}-aperture properties */
mvebu_mbus_get_pcie_resources(np, &mbus_state.pcie_mem_aperture,
&mbus_state.pcie_io_aperture);
ret = mvebu_mbus_common_init(&mbus_state,
mbuswins_res.start,
resource_size(&mbuswins_res),
sdramwins_res.start,
resource_size(&sdramwins_res),
mbusbridge_res.start,
resource_size(&mbusbridge_res),
is_coherent);
if (ret)
return ret;
/* Setup statically declared windows in the DT */
return mbus_dt_setup(&mbus_state, np);
}
#endif
| linux-master | drivers/bus/mvebu-mbus.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2015 Masahiro Yamada <[email protected]>
*/
#include <linux/io.h>
#include <linux/log2.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
/* System Bus Controller registers */
#define UNIPHIER_SBC_BASE 0x100 /* base address of bank0 space */
#define UNIPHIER_SBC_BASE_BE BIT(0) /* bank_enable */
#define UNIPHIER_SBC_CTRL0 0x200 /* timing parameter 0 of bank0 */
#define UNIPHIER_SBC_CTRL1 0x204 /* timing parameter 1 of bank0 */
#define UNIPHIER_SBC_CTRL2 0x208 /* timing parameter 2 of bank0 */
#define UNIPHIER_SBC_CTRL3 0x20c /* timing parameter 3 of bank0 */
#define UNIPHIER_SBC_CTRL4 0x300 /* timing parameter 4 of bank0 */
#define UNIPHIER_SBC_STRIDE 0x10 /* register stride to next bank */
#define UNIPHIER_SBC_NR_BANKS 8 /* number of banks (chip select) */
#define UNIPHIER_SBC_BASE_DUMMY 0xffffffff /* data to squash bank 0, 1 */
struct uniphier_system_bus_bank {
u32 base;
u32 end;
};
struct uniphier_system_bus_priv {
struct device *dev;
void __iomem *membase;
struct uniphier_system_bus_bank bank[UNIPHIER_SBC_NR_BANKS];
};
static int uniphier_system_bus_add_bank(struct uniphier_system_bus_priv *priv,
int bank, u32 addr, u64 paddr, u32 size)
{
u64 end, mask;
dev_dbg(priv->dev,
"range found: bank = %d, addr = %08x, paddr = %08llx, size = %08x\n",
bank, addr, paddr, size);
if (bank >= ARRAY_SIZE(priv->bank)) {
dev_err(priv->dev, "unsupported bank number %d\n", bank);
return -EINVAL;
}
if (priv->bank[bank].base || priv->bank[bank].end) {
dev_err(priv->dev,
"range for bank %d has already been specified\n", bank);
return -EINVAL;
}
if (paddr > U32_MAX) {
dev_err(priv->dev, "base address %llx is too high\n", paddr);
return -EINVAL;
}
end = paddr + size;
if (addr > paddr) {
dev_err(priv->dev,
"base %08x cannot be mapped to %08llx of parent\n",
addr, paddr);
return -EINVAL;
}
paddr -= addr;
paddr = round_down(paddr, 0x00020000);
end = round_up(end, 0x00020000);
if (end > U32_MAX) {
dev_err(priv->dev, "end address %08llx is too high\n", end);
return -EINVAL;
}
mask = paddr ^ (end - 1);
mask = roundup_pow_of_two(mask);
paddr = round_down(paddr, mask);
end = round_up(end, mask);
priv->bank[bank].base = paddr;
priv->bank[bank].end = end;
dev_dbg(priv->dev, "range added: bank = %d, addr = %08x, end = %08x\n",
bank, priv->bank[bank].base, priv->bank[bank].end);
return 0;
}
static int uniphier_system_bus_check_overlap(
const struct uniphier_system_bus_priv *priv)
{
int i, j;
for (i = 0; i < ARRAY_SIZE(priv->bank); i++) {
for (j = i + 1; j < ARRAY_SIZE(priv->bank); j++) {
if (priv->bank[i].end > priv->bank[j].base &&
priv->bank[i].base < priv->bank[j].end) {
dev_err(priv->dev,
"region overlap between bank%d and bank%d\n",
i, j);
return -EINVAL;
}
}
}
return 0;
}
static void uniphier_system_bus_check_boot_swap(
struct uniphier_system_bus_priv *priv)
{
void __iomem *base_reg = priv->membase + UNIPHIER_SBC_BASE;
int is_swapped;
is_swapped = !(readl(base_reg) & UNIPHIER_SBC_BASE_BE);
dev_dbg(priv->dev, "Boot Swap: %s\n", is_swapped ? "on" : "off");
/*
* If BOOT_SWAP was asserted on power-on-reset, the CS0 and CS1 are
* swapped. In this case, bank0 and bank1 should be swapped as well.
*/
if (is_swapped)
swap(priv->bank[0], priv->bank[1]);
}
static void uniphier_system_bus_set_reg(
const struct uniphier_system_bus_priv *priv)
{
void __iomem *base_reg = priv->membase + UNIPHIER_SBC_BASE;
u32 base, end, mask, val;
int i;
for (i = 0; i < ARRAY_SIZE(priv->bank); i++) {
base = priv->bank[i].base;
end = priv->bank[i].end;
if (base == end) {
/*
* If SBC_BASE0 or SBC_BASE1 is set to zero, the access
* to anywhere in the system bus space is routed to
* bank 0 (if boot swap if off) or bank 1 (if boot swap
* if on). It means that CPUs cannot get access to
* bank 2 or later. In other words, bank 0/1 cannot
* be disabled even if its bank_enable bits is cleared.
* This seems odd, but it is how this hardware goes.
* As a workaround, dummy data (0xffffffff) should be
* set when the bank 0/1 is unused. As for bank 2 and
* later, they can be simply disable by clearing the
* bank_enable bit.
*/
if (i < 2)
val = UNIPHIER_SBC_BASE_DUMMY;
else
val = 0;
} else {
mask = base ^ (end - 1);
val = base & 0xfffe0000;
val |= (~mask >> 16) & 0xfffe;
val |= UNIPHIER_SBC_BASE_BE;
}
dev_dbg(priv->dev, "SBC_BASE[%d] = 0x%08x\n", i, val);
writel(val, base_reg + UNIPHIER_SBC_STRIDE * i);
}
}
static int uniphier_system_bus_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct uniphier_system_bus_priv *priv;
struct of_range_parser parser;
struct of_range range;
int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->membase = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->membase))
return PTR_ERR(priv->membase);
priv->dev = dev;
ret = of_range_parser_init(&parser, dev->of_node);
if (ret)
return ret;
for_each_of_range(&parser, &range) {
if (range.cpu_addr == OF_BAD_ADDR)
return -EINVAL;
ret = uniphier_system_bus_add_bank(priv,
upper_32_bits(range.bus_addr),
lower_32_bits(range.bus_addr),
range.cpu_addr, range.size);
if (ret)
return ret;
}
ret = uniphier_system_bus_check_overlap(priv);
if (ret)
return ret;
uniphier_system_bus_check_boot_swap(priv);
uniphier_system_bus_set_reg(priv);
platform_set_drvdata(pdev, priv);
/* Now, the bus is configured. Populate platform_devices below it */
return of_platform_default_populate(dev->of_node, NULL, dev);
}
static int __maybe_unused uniphier_system_bus_resume(struct device *dev)
{
uniphier_system_bus_set_reg(dev_get_drvdata(dev));
return 0;
}
static const struct dev_pm_ops uniphier_system_bus_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(NULL, uniphier_system_bus_resume)
};
static const struct of_device_id uniphier_system_bus_match[] = {
{ .compatible = "socionext,uniphier-system-bus" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, uniphier_system_bus_match);
static struct platform_driver uniphier_system_bus_driver = {
.probe = uniphier_system_bus_probe,
.driver = {
.name = "uniphier-system-bus",
.of_match_table = uniphier_system_bus_match,
.pm = &uniphier_system_bus_pm_ops,
},
};
module_platform_driver(uniphier_system_bus_driver);
MODULE_AUTHOR("Masahiro Yamada <[email protected]>");
MODULE_DESCRIPTION("UniPhier System Bus driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/bus/uniphier-system-bus.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* OMAP L3 Interconnect error handling driver
*
* Copyright (C) 2011-2015 Texas Instruments Incorporated - http://www.ti.com/
* Santosh Shilimkar <[email protected]>
* Sricharan <[email protected]>
*/
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include "omap_l3_noc.h"
/**
* l3_handle_target() - Handle Target specific parse and reporting
* @l3: pointer to l3 struct
* @base: base address of clkdm
* @flag_mux: flagmux corresponding to the event
* @err_src: error source index of the slave (target)
*
* This does the second part of the error interrupt handling:
* 3) Parse in the slave information
* 4) Print the logged information.
* 5) Add dump stack to provide kernel trace.
* 6) Clear the source if known.
*
* This handles two types of errors:
* 1) Custom errors in L3 :
* Target like DMM/FW/EMIF generates SRESP=ERR error
* 2) Standard L3 error:
* - Unsupported CMD.
* L3 tries to access target while it is idle
* - OCP disconnect.
* - Address hole error:
* If DSS/ISS/FDIF/USBHOSTFS access a target where they
* do not have connectivity, the error is logged in
* their default target which is DMM2.
*
* On High Secure devices, firewall errors are possible and those
* can be trapped as well. But the trapping is implemented as part
* secure software and hence need not be implemented here.
*/
static int l3_handle_target(struct omap_l3 *l3, void __iomem *base,
struct l3_flagmux_data *flag_mux, int err_src)
{
int k;
u32 std_err_main, clear, masterid;
u8 op_code, m_req_info;
void __iomem *l3_targ_base;
void __iomem *l3_targ_stderr, *l3_targ_slvofslsb, *l3_targ_mstaddr;
void __iomem *l3_targ_hdr, *l3_targ_info;
struct l3_target_data *l3_targ_inst;
struct l3_masters_data *master;
char *target_name, *master_name = "UN IDENTIFIED";
char *err_description;
char err_string[30] = { 0 };
char info_string[60] = { 0 };
/* We DONOT expect err_src to go out of bounds */
BUG_ON(err_src > MAX_CLKDM_TARGETS);
if (err_src < flag_mux->num_targ_data) {
l3_targ_inst = &flag_mux->l3_targ[err_src];
target_name = l3_targ_inst->name;
l3_targ_base = base + l3_targ_inst->offset;
} else {
target_name = L3_TARGET_NOT_SUPPORTED;
}
if (target_name == L3_TARGET_NOT_SUPPORTED)
return -ENODEV;
/* Read the stderrlog_main_source from clk domain */
l3_targ_stderr = l3_targ_base + L3_TARG_STDERRLOG_MAIN;
l3_targ_slvofslsb = l3_targ_base + L3_TARG_STDERRLOG_SLVOFSLSB;
std_err_main = readl_relaxed(l3_targ_stderr);
switch (std_err_main & CUSTOM_ERROR) {
case STANDARD_ERROR:
err_description = "Standard";
snprintf(err_string, sizeof(err_string),
": At Address: 0x%08X ",
readl_relaxed(l3_targ_slvofslsb));
l3_targ_mstaddr = l3_targ_base + L3_TARG_STDERRLOG_MSTADDR;
l3_targ_hdr = l3_targ_base + L3_TARG_STDERRLOG_HDR;
l3_targ_info = l3_targ_base + L3_TARG_STDERRLOG_INFO;
break;
case CUSTOM_ERROR:
err_description = "Custom";
l3_targ_mstaddr = l3_targ_base +
L3_TARG_STDERRLOG_CINFO_MSTADDR;
l3_targ_hdr = l3_targ_base + L3_TARG_STDERRLOG_CINFO_OPCODE;
l3_targ_info = l3_targ_base + L3_TARG_STDERRLOG_CINFO_INFO;
break;
default:
/* Nothing to be handled here as of now */
return 0;
}
/* STDERRLOG_MSTADDR Stores the NTTP master address. */
masterid = (readl_relaxed(l3_targ_mstaddr) &
l3->mst_addr_mask) >> __ffs(l3->mst_addr_mask);
for (k = 0, master = l3->l3_masters; k < l3->num_masters;
k++, master++) {
if (masterid == master->id) {
master_name = master->name;
break;
}
}
op_code = readl_relaxed(l3_targ_hdr) & 0x7;
m_req_info = readl_relaxed(l3_targ_info) & 0xF;
snprintf(info_string, sizeof(info_string),
": %s in %s mode during %s access",
(m_req_info & BIT(0)) ? "Opcode Fetch" : "Data Access",
(m_req_info & BIT(1)) ? "Supervisor" : "User",
(m_req_info & BIT(3)) ? "Debug" : "Functional");
WARN(true,
"%s:L3 %s Error: MASTER %s TARGET %s (%s)%s%s\n",
dev_name(l3->dev),
err_description,
master_name, target_name,
l3_transaction_type[op_code],
err_string, info_string);
/* clear the std error log*/
clear = std_err_main | CLEAR_STDERR_LOG;
writel_relaxed(clear, l3_targ_stderr);
return 0;
}
/**
* l3_interrupt_handler() - interrupt handler for l3 events
* @irq: irq number
* @_l3: pointer to l3 structure
*
* Interrupt Handler for L3 error detection.
* 1) Identify the L3 clockdomain partition to which the error belongs to.
* 2) Identify the slave where the error information is logged
* ... handle the slave event..
* 7) if the slave is unknown, mask out the slave.
*/
static irqreturn_t l3_interrupt_handler(int irq, void *_l3)
{
struct omap_l3 *l3 = _l3;
int inttype, i, ret;
int err_src = 0;
u32 err_reg, mask_val;
void __iomem *base, *mask_reg;
struct l3_flagmux_data *flag_mux;
/* Get the Type of interrupt */
inttype = irq == l3->app_irq ? L3_APPLICATION_ERROR : L3_DEBUG_ERROR;
for (i = 0; i < l3->num_modules; i++) {
/*
* Read the regerr register of the clock domain
* to determine the source
*/
base = l3->l3_base[i];
flag_mux = l3->l3_flagmux[i];
err_reg = readl_relaxed(base + flag_mux->offset +
L3_FLAGMUX_REGERR0 + (inttype << 3));
err_reg &= ~(inttype ? flag_mux->mask_app_bits :
flag_mux->mask_dbg_bits);
/* Get the corresponding error and analyse */
if (err_reg) {
/* Identify the source from control status register */
err_src = __ffs(err_reg);
ret = l3_handle_target(l3, base, flag_mux, err_src);
/*
* Certain plaforms may have "undocumented" status
* pending on boot. So dont generate a severe warning
* here. Just mask it off to prevent the error from
* reoccuring and locking up the system.
*/
if (ret) {
dev_err(l3->dev,
"L3 %s error: target %d mod:%d %s\n",
inttype ? "debug" : "application",
err_src, i, "(unclearable)");
mask_reg = base + flag_mux->offset +
L3_FLAGMUX_MASK0 + (inttype << 3);
mask_val = readl_relaxed(mask_reg);
mask_val &= ~(1 << err_src);
writel_relaxed(mask_val, mask_reg);
/* Mark these bits as to be ignored */
if (inttype)
flag_mux->mask_app_bits |= 1 << err_src;
else
flag_mux->mask_dbg_bits |= 1 << err_src;
}
/* Error found so break the for loop */
return IRQ_HANDLED;
}
}
dev_err(l3->dev, "L3 %s IRQ not handled!!\n",
inttype ? "debug" : "application");
return IRQ_NONE;
}
static const struct of_device_id l3_noc_match[] = {
{.compatible = "ti,omap4-l3-noc", .data = &omap4_l3_data},
{.compatible = "ti,omap5-l3-noc", .data = &omap5_l3_data},
{.compatible = "ti,dra7-l3-noc", .data = &dra_l3_data},
{.compatible = "ti,am4372-l3-noc", .data = &am4372_l3_data},
{},
};
MODULE_DEVICE_TABLE(of, l3_noc_match);
static int omap_l3_probe(struct platform_device *pdev)
{
const struct of_device_id *of_id;
static struct omap_l3 *l3;
int ret, i, res_idx;
of_id = of_match_device(l3_noc_match, &pdev->dev);
if (!of_id) {
dev_err(&pdev->dev, "OF data missing\n");
return -EINVAL;
}
l3 = devm_kzalloc(&pdev->dev, sizeof(*l3), GFP_KERNEL);
if (!l3)
return -ENOMEM;
memcpy(l3, of_id->data, sizeof(*l3));
l3->dev = &pdev->dev;
platform_set_drvdata(pdev, l3);
/* Get mem resources */
for (i = 0, res_idx = 0; i < l3->num_modules; i++) {
struct resource *res;
if (l3->l3_base[i] == L3_BASE_IS_SUBMODULE) {
/* First entry cannot be submodule */
BUG_ON(i == 0);
l3->l3_base[i] = l3->l3_base[i - 1];
continue;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, res_idx);
l3->l3_base[i] = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(l3->l3_base[i])) {
dev_err(l3->dev, "ioremap %d failed\n", i);
return PTR_ERR(l3->l3_base[i]);
}
res_idx++;
}
/*
* Setup interrupt Handlers
*/
l3->debug_irq = platform_get_irq(pdev, 0);
ret = devm_request_irq(l3->dev, l3->debug_irq, l3_interrupt_handler,
IRQF_NO_THREAD, "l3-dbg-irq", l3);
if (ret) {
dev_err(l3->dev, "request_irq failed for %d\n",
l3->debug_irq);
return ret;
}
l3->app_irq = platform_get_irq(pdev, 1);
ret = devm_request_irq(l3->dev, l3->app_irq, l3_interrupt_handler,
IRQF_NO_THREAD, "l3-app-irq", l3);
if (ret)
dev_err(l3->dev, "request_irq failed for %d\n", l3->app_irq);
return ret;
}
#ifdef CONFIG_PM_SLEEP
/**
* l3_resume_noirq() - resume function for l3_noc
* @dev: pointer to l3_noc device structure
*
* We only have the resume handler only since we
* have already maintained the delta register
* configuration as part of configuring the system
*/
static int l3_resume_noirq(struct device *dev)
{
struct omap_l3 *l3 = dev_get_drvdata(dev);
int i;
struct l3_flagmux_data *flag_mux;
void __iomem *base, *mask_regx = NULL;
u32 mask_val;
for (i = 0; i < l3->num_modules; i++) {
base = l3->l3_base[i];
flag_mux = l3->l3_flagmux[i];
if (!flag_mux->mask_app_bits && !flag_mux->mask_dbg_bits)
continue;
mask_regx = base + flag_mux->offset + L3_FLAGMUX_MASK0 +
(L3_APPLICATION_ERROR << 3);
mask_val = readl_relaxed(mask_regx);
mask_val &= ~(flag_mux->mask_app_bits);
writel_relaxed(mask_val, mask_regx);
mask_regx = base + flag_mux->offset + L3_FLAGMUX_MASK0 +
(L3_DEBUG_ERROR << 3);
mask_val = readl_relaxed(mask_regx);
mask_val &= ~(flag_mux->mask_dbg_bits);
writel_relaxed(mask_val, mask_regx);
}
/* Dummy read to force OCP barrier */
if (mask_regx)
(void)readl(mask_regx);
return 0;
}
static const struct dev_pm_ops l3_dev_pm_ops = {
SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(NULL, l3_resume_noirq)
};
#define L3_DEV_PM_OPS (&l3_dev_pm_ops)
#else
#define L3_DEV_PM_OPS NULL
#endif
static struct platform_driver omap_l3_driver = {
.probe = omap_l3_probe,
.driver = {
.name = "omap_l3_noc",
.pm = L3_DEV_PM_OPS,
.of_match_table = of_match_ptr(l3_noc_match),
},
};
static int __init omap_l3_init(void)
{
return platform_driver_register(&omap_l3_driver);
}
postcore_initcall_sync(omap_l3_init);
static void __exit omap_l3_exit(void)
{
platform_driver_unregister(&omap_l3_driver);
}
module_exit(omap_l3_exit);
MODULE_AUTHOR("Santosh Shilimkar");
MODULE_AUTHOR("Sricharan R");
MODULE_DESCRIPTION("OMAP L3 Interconnect error handling driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/bus/omap_l3_noc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* ti-sysc.c - Texas Instruments sysc interconnect target driver
*/
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/clkdev.h>
#include <linux/cpu_pm.h>
#include <linux/delay.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/slab.h>
#include <linux/sys_soc.h>
#include <linux/timekeeping.h>
#include <linux/iopoll.h>
#include <linux/platform_data/ti-sysc.h>
#include <dt-bindings/bus/ti-sysc.h>
#define DIS_ISP BIT(2)
#define DIS_IVA BIT(1)
#define DIS_SGX BIT(0)
#define SOC_FLAG(match, flag) { .machine = match, .data = (void *)(flag), }
#define MAX_MODULE_SOFTRESET_WAIT 10000
enum sysc_soc {
SOC_UNKNOWN,
SOC_2420,
SOC_2430,
SOC_3430,
SOC_3630,
SOC_4430,
SOC_4460,
SOC_4470,
SOC_5430,
SOC_AM3,
SOC_AM4,
SOC_DRA7,
};
struct sysc_address {
unsigned long base;
struct list_head node;
};
struct sysc_module {
struct sysc *ddata;
struct list_head node;
};
struct sysc_soc_info {
unsigned long general_purpose:1;
enum sysc_soc soc;
struct mutex list_lock; /* disabled and restored modules list lock */
struct list_head disabled_modules;
struct list_head restored_modules;
struct notifier_block nb;
};
enum sysc_clocks {
SYSC_FCK,
SYSC_ICK,
SYSC_OPTFCK0,
SYSC_OPTFCK1,
SYSC_OPTFCK2,
SYSC_OPTFCK3,
SYSC_OPTFCK4,
SYSC_OPTFCK5,
SYSC_OPTFCK6,
SYSC_OPTFCK7,
SYSC_MAX_CLOCKS,
};
static struct sysc_soc_info *sysc_soc;
static const char * const reg_names[] = { "rev", "sysc", "syss", };
static const char * const clock_names[SYSC_MAX_CLOCKS] = {
"fck", "ick", "opt0", "opt1", "opt2", "opt3", "opt4",
"opt5", "opt6", "opt7",
};
#define SYSC_IDLEMODE_MASK 3
#define SYSC_CLOCKACTIVITY_MASK 3
/**
* struct sysc - TI sysc interconnect target module registers and capabilities
* @dev: struct device pointer
* @module_pa: physical address of the interconnect target module
* @module_size: size of the interconnect target module
* @module_va: virtual address of the interconnect target module
* @offsets: register offsets from module base
* @mdata: ti-sysc to hwmod translation data for a module
* @clocks: clocks used by the interconnect target module
* @clock_roles: clock role names for the found clocks
* @nr_clocks: number of clocks used by the interconnect target module
* @rsts: resets used by the interconnect target module
* @legacy_mode: configured for legacy mode if set
* @cap: interconnect target module capabilities
* @cfg: interconnect target module configuration
* @cookie: data used by legacy platform callbacks
* @name: name if available
* @revision: interconnect target module revision
* @sysconfig: saved sysconfig register value
* @reserved: target module is reserved and already in use
* @enabled: sysc runtime enabled status
* @needs_resume: runtime resume needed on resume from suspend
* @child_needs_resume: runtime resume needed for child on resume from suspend
* @disable_on_idle: status flag used for disabling modules with resets
* @idle_work: work structure used to perform delayed idle on a module
* @pre_reset_quirk: module specific pre-reset quirk
* @post_reset_quirk: module specific post-reset quirk
* @reset_done_quirk: module specific reset done quirk
* @module_enable_quirk: module specific enable quirk
* @module_disable_quirk: module specific disable quirk
* @module_unlock_quirk: module specific sysconfig unlock quirk
* @module_lock_quirk: module specific sysconfig lock quirk
*/
struct sysc {
struct device *dev;
u64 module_pa;
u32 module_size;
void __iomem *module_va;
int offsets[SYSC_MAX_REGS];
struct ti_sysc_module_data *mdata;
struct clk **clocks;
const char **clock_roles;
int nr_clocks;
struct reset_control *rsts;
const char *legacy_mode;
const struct sysc_capabilities *cap;
struct sysc_config cfg;
struct ti_sysc_cookie cookie;
const char *name;
u32 revision;
u32 sysconfig;
unsigned int reserved:1;
unsigned int enabled:1;
unsigned int needs_resume:1;
unsigned int child_needs_resume:1;
struct delayed_work idle_work;
void (*pre_reset_quirk)(struct sysc *sysc);
void (*post_reset_quirk)(struct sysc *sysc);
void (*reset_done_quirk)(struct sysc *sysc);
void (*module_enable_quirk)(struct sysc *sysc);
void (*module_disable_quirk)(struct sysc *sysc);
void (*module_unlock_quirk)(struct sysc *sysc);
void (*module_lock_quirk)(struct sysc *sysc);
};
static void sysc_parse_dts_quirks(struct sysc *ddata, struct device_node *np,
bool is_child);
static int sysc_reset(struct sysc *ddata);
static void sysc_write(struct sysc *ddata, int offset, u32 value)
{
if (ddata->cfg.quirks & SYSC_QUIRK_16BIT) {
writew_relaxed(value & 0xffff, ddata->module_va + offset);
/* Only i2c revision has LO and HI register with stride of 4 */
if (ddata->offsets[SYSC_REVISION] >= 0 &&
offset == ddata->offsets[SYSC_REVISION]) {
u16 hi = value >> 16;
writew_relaxed(hi, ddata->module_va + offset + 4);
}
return;
}
writel_relaxed(value, ddata->module_va + offset);
}
static u32 sysc_read(struct sysc *ddata, int offset)
{
if (ddata->cfg.quirks & SYSC_QUIRK_16BIT) {
u32 val;
val = readw_relaxed(ddata->module_va + offset);
/* Only i2c revision has LO and HI register with stride of 4 */
if (ddata->offsets[SYSC_REVISION] >= 0 &&
offset == ddata->offsets[SYSC_REVISION]) {
u16 tmp = readw_relaxed(ddata->module_va + offset + 4);
val |= tmp << 16;
}
return val;
}
return readl_relaxed(ddata->module_va + offset);
}
static bool sysc_opt_clks_needed(struct sysc *ddata)
{
return !!(ddata->cfg.quirks & SYSC_QUIRK_OPT_CLKS_NEEDED);
}
static u32 sysc_read_revision(struct sysc *ddata)
{
int offset = ddata->offsets[SYSC_REVISION];
if (offset < 0)
return 0;
return sysc_read(ddata, offset);
}
static u32 sysc_read_sysconfig(struct sysc *ddata)
{
int offset = ddata->offsets[SYSC_SYSCONFIG];
if (offset < 0)
return 0;
return sysc_read(ddata, offset);
}
static u32 sysc_read_sysstatus(struct sysc *ddata)
{
int offset = ddata->offsets[SYSC_SYSSTATUS];
if (offset < 0)
return 0;
return sysc_read(ddata, offset);
}
static int sysc_poll_reset_sysstatus(struct sysc *ddata)
{
int error, retries;
u32 syss_done, rstval;
if (ddata->cfg.quirks & SYSS_QUIRK_RESETDONE_INVERTED)
syss_done = 0;
else
syss_done = ddata->cfg.syss_mask;
if (likely(!timekeeping_suspended)) {
error = readx_poll_timeout_atomic(sysc_read_sysstatus, ddata,
rstval, (rstval & ddata->cfg.syss_mask) ==
syss_done, 100, MAX_MODULE_SOFTRESET_WAIT);
} else {
retries = MAX_MODULE_SOFTRESET_WAIT;
while (retries--) {
rstval = sysc_read_sysstatus(ddata);
if ((rstval & ddata->cfg.syss_mask) == syss_done)
return 0;
udelay(2); /* Account for udelay flakeyness */
}
error = -ETIMEDOUT;
}
return error;
}
static int sysc_poll_reset_sysconfig(struct sysc *ddata)
{
int error, retries;
u32 sysc_mask, rstval;
sysc_mask = BIT(ddata->cap->regbits->srst_shift);
if (likely(!timekeeping_suspended)) {
error = readx_poll_timeout_atomic(sysc_read_sysconfig, ddata,
rstval, !(rstval & sysc_mask),
100, MAX_MODULE_SOFTRESET_WAIT);
} else {
retries = MAX_MODULE_SOFTRESET_WAIT;
while (retries--) {
rstval = sysc_read_sysconfig(ddata);
if (!(rstval & sysc_mask))
return 0;
udelay(2); /* Account for udelay flakeyness */
}
error = -ETIMEDOUT;
}
return error;
}
/* Poll on reset status */
static int sysc_wait_softreset(struct sysc *ddata)
{
int syss_offset, error = 0;
if (ddata->cap->regbits->srst_shift < 0)
return 0;
syss_offset = ddata->offsets[SYSC_SYSSTATUS];
if (syss_offset >= 0)
error = sysc_poll_reset_sysstatus(ddata);
else if (ddata->cfg.quirks & SYSC_QUIRK_RESET_STATUS)
error = sysc_poll_reset_sysconfig(ddata);
return error;
}
static int sysc_add_named_clock_from_child(struct sysc *ddata,
const char *name,
const char *optfck_name)
{
struct device_node *np = ddata->dev->of_node;
struct device_node *child;
struct clk_lookup *cl;
struct clk *clock;
const char *n;
if (name)
n = name;
else
n = optfck_name;
/* Does the clock alias already exist? */
clock = of_clk_get_by_name(np, n);
if (!IS_ERR(clock)) {
clk_put(clock);
return 0;
}
child = of_get_next_available_child(np, NULL);
if (!child)
return -ENODEV;
clock = devm_get_clk_from_child(ddata->dev, child, name);
if (IS_ERR(clock))
return PTR_ERR(clock);
/*
* Use clkdev_add() instead of clkdev_alloc() to avoid the MAX_DEV_ID
* limit for clk_get(). If cl ever needs to be freed, it should be done
* with clkdev_drop().
*/
cl = kzalloc(sizeof(*cl), GFP_KERNEL);
if (!cl)
return -ENOMEM;
cl->con_id = n;
cl->dev_id = dev_name(ddata->dev);
cl->clk = clock;
clkdev_add(cl);
clk_put(clock);
return 0;
}
static int sysc_init_ext_opt_clock(struct sysc *ddata, const char *name)
{
const char *optfck_name;
int error, index;
if (ddata->nr_clocks < SYSC_OPTFCK0)
index = SYSC_OPTFCK0;
else
index = ddata->nr_clocks;
if (name)
optfck_name = name;
else
optfck_name = clock_names[index];
error = sysc_add_named_clock_from_child(ddata, name, optfck_name);
if (error)
return error;
ddata->clock_roles[index] = optfck_name;
ddata->nr_clocks++;
return 0;
}
static int sysc_get_one_clock(struct sysc *ddata, const char *name)
{
int error, i, index = -ENODEV;
if (!strncmp(clock_names[SYSC_FCK], name, 3))
index = SYSC_FCK;
else if (!strncmp(clock_names[SYSC_ICK], name, 3))
index = SYSC_ICK;
if (index < 0) {
for (i = SYSC_OPTFCK0; i < SYSC_MAX_CLOCKS; i++) {
if (!ddata->clocks[i]) {
index = i;
break;
}
}
}
if (index < 0) {
dev_err(ddata->dev, "clock %s not added\n", name);
return index;
}
ddata->clocks[index] = devm_clk_get(ddata->dev, name);
if (IS_ERR(ddata->clocks[index])) {
dev_err(ddata->dev, "clock get error for %s: %li\n",
name, PTR_ERR(ddata->clocks[index]));
return PTR_ERR(ddata->clocks[index]);
}
error = clk_prepare(ddata->clocks[index]);
if (error) {
dev_err(ddata->dev, "clock prepare error for %s: %i\n",
name, error);
return error;
}
return 0;
}
static int sysc_get_clocks(struct sysc *ddata)
{
struct device_node *np = ddata->dev->of_node;
struct property *prop;
const char *name;
int nr_fck = 0, nr_ick = 0, i, error = 0;
ddata->clock_roles = devm_kcalloc(ddata->dev,
SYSC_MAX_CLOCKS,
sizeof(*ddata->clock_roles),
GFP_KERNEL);
if (!ddata->clock_roles)
return -ENOMEM;
of_property_for_each_string(np, "clock-names", prop, name) {
if (!strncmp(clock_names[SYSC_FCK], name, 3))
nr_fck++;
if (!strncmp(clock_names[SYSC_ICK], name, 3))
nr_ick++;
ddata->clock_roles[ddata->nr_clocks] = name;
ddata->nr_clocks++;
}
if (ddata->nr_clocks < 1)
return 0;
if ((ddata->cfg.quirks & SYSC_QUIRK_EXT_OPT_CLOCK)) {
error = sysc_init_ext_opt_clock(ddata, NULL);
if (error)
return error;
}
if (ddata->nr_clocks > SYSC_MAX_CLOCKS) {
dev_err(ddata->dev, "too many clocks for %pOF\n", np);
return -EINVAL;
}
if (nr_fck > 1 || nr_ick > 1) {
dev_err(ddata->dev, "max one fck and ick for %pOF\n", np);
return -EINVAL;
}
/* Always add a slot for main clocks fck and ick even if unused */
if (!nr_fck)
ddata->nr_clocks++;
if (!nr_ick)
ddata->nr_clocks++;
ddata->clocks = devm_kcalloc(ddata->dev,
ddata->nr_clocks, sizeof(*ddata->clocks),
GFP_KERNEL);
if (!ddata->clocks)
return -ENOMEM;
for (i = 0; i < SYSC_MAX_CLOCKS; i++) {
const char *name = ddata->clock_roles[i];
if (!name)
continue;
error = sysc_get_one_clock(ddata, name);
if (error)
return error;
}
return 0;
}
static int sysc_enable_main_clocks(struct sysc *ddata)
{
struct clk *clock;
int i, error;
if (!ddata->clocks)
return 0;
for (i = 0; i < SYSC_OPTFCK0; i++) {
clock = ddata->clocks[i];
/* Main clocks may not have ick */
if (IS_ERR_OR_NULL(clock))
continue;
error = clk_enable(clock);
if (error)
goto err_disable;
}
return 0;
err_disable:
for (i--; i >= 0; i--) {
clock = ddata->clocks[i];
/* Main clocks may not have ick */
if (IS_ERR_OR_NULL(clock))
continue;
clk_disable(clock);
}
return error;
}
static void sysc_disable_main_clocks(struct sysc *ddata)
{
struct clk *clock;
int i;
if (!ddata->clocks)
return;
for (i = 0; i < SYSC_OPTFCK0; i++) {
clock = ddata->clocks[i];
if (IS_ERR_OR_NULL(clock))
continue;
clk_disable(clock);
}
}
static int sysc_enable_opt_clocks(struct sysc *ddata)
{
struct clk *clock;
int i, error;
if (!ddata->clocks || ddata->nr_clocks < SYSC_OPTFCK0 + 1)
return 0;
for (i = SYSC_OPTFCK0; i < SYSC_MAX_CLOCKS; i++) {
clock = ddata->clocks[i];
/* Assume no holes for opt clocks */
if (IS_ERR_OR_NULL(clock))
return 0;
error = clk_enable(clock);
if (error)
goto err_disable;
}
return 0;
err_disable:
for (i--; i >= 0; i--) {
clock = ddata->clocks[i];
if (IS_ERR_OR_NULL(clock))
continue;
clk_disable(clock);
}
return error;
}
static void sysc_disable_opt_clocks(struct sysc *ddata)
{
struct clk *clock;
int i;
if (!ddata->clocks || ddata->nr_clocks < SYSC_OPTFCK0 + 1)
return;
for (i = SYSC_OPTFCK0; i < SYSC_MAX_CLOCKS; i++) {
clock = ddata->clocks[i];
/* Assume no holes for opt clocks */
if (IS_ERR_OR_NULL(clock))
return;
clk_disable(clock);
}
}
static void sysc_clkdm_deny_idle(struct sysc *ddata)
{
struct ti_sysc_platform_data *pdata;
if (ddata->legacy_mode || (ddata->cfg.quirks & SYSC_QUIRK_CLKDM_NOAUTO))
return;
pdata = dev_get_platdata(ddata->dev);
if (pdata && pdata->clkdm_deny_idle)
pdata->clkdm_deny_idle(ddata->dev, &ddata->cookie);
}
static void sysc_clkdm_allow_idle(struct sysc *ddata)
{
struct ti_sysc_platform_data *pdata;
if (ddata->legacy_mode || (ddata->cfg.quirks & SYSC_QUIRK_CLKDM_NOAUTO))
return;
pdata = dev_get_platdata(ddata->dev);
if (pdata && pdata->clkdm_allow_idle)
pdata->clkdm_allow_idle(ddata->dev, &ddata->cookie);
}
/**
* sysc_init_resets - init rstctrl reset line if configured
* @ddata: device driver data
*
* See sysc_rstctrl_reset_deassert().
*/
static int sysc_init_resets(struct sysc *ddata)
{
ddata->rsts =
devm_reset_control_get_optional_shared(ddata->dev, "rstctrl");
return PTR_ERR_OR_ZERO(ddata->rsts);
}
/**
* sysc_parse_and_check_child_range - parses module IO region from ranges
* @ddata: device driver data
*
* In general we only need rev, syss, and sysc registers and not the whole
* module range. But we do want the offsets for these registers from the
* module base. This allows us to check them against the legacy hwmod
* platform data. Let's also check the ranges are configured properly.
*/
static int sysc_parse_and_check_child_range(struct sysc *ddata)
{
struct device_node *np = ddata->dev->of_node;
struct of_range_parser parser;
struct of_range range;
int error;
error = of_range_parser_init(&parser, np);
if (error)
return error;
for_each_of_range(&parser, &range) {
ddata->module_pa = range.cpu_addr;
ddata->module_size = range.size;
break;
}
return 0;
}
/* Interconnect instances to probe before l4_per instances */
static struct resource early_bus_ranges[] = {
/* am3/4 l4_wkup */
{ .start = 0x44c00000, .end = 0x44c00000 + 0x300000, },
/* omap4/5 and dra7 l4_cfg */
{ .start = 0x4a000000, .end = 0x4a000000 + 0x300000, },
/* omap4 l4_wkup */
{ .start = 0x4a300000, .end = 0x4a300000 + 0x30000, },
/* omap5 and dra7 l4_wkup without dra7 dcan segment */
{ .start = 0x4ae00000, .end = 0x4ae00000 + 0x30000, },
};
static atomic_t sysc_defer = ATOMIC_INIT(10);
/**
* sysc_defer_non_critical - defer non_critical interconnect probing
* @ddata: device driver data
*
* We want to probe l4_cfg and l4_wkup interconnect instances before any
* l4_per instances as l4_per instances depend on resources on l4_cfg and
* l4_wkup interconnects.
*/
static int sysc_defer_non_critical(struct sysc *ddata)
{
struct resource *res;
int i;
if (!atomic_read(&sysc_defer))
return 0;
for (i = 0; i < ARRAY_SIZE(early_bus_ranges); i++) {
res = &early_bus_ranges[i];
if (ddata->module_pa >= res->start &&
ddata->module_pa <= res->end) {
atomic_set(&sysc_defer, 0);
return 0;
}
}
atomic_dec_if_positive(&sysc_defer);
return -EPROBE_DEFER;
}
static struct device_node *stdout_path;
static void sysc_init_stdout_path(struct sysc *ddata)
{
struct device_node *np = NULL;
const char *uart;
if (IS_ERR(stdout_path))
return;
if (stdout_path)
return;
np = of_find_node_by_path("/chosen");
if (!np)
goto err;
uart = of_get_property(np, "stdout-path", NULL);
if (!uart)
goto err;
np = of_find_node_by_path(uart);
if (!np)
goto err;
stdout_path = np;
return;
err:
stdout_path = ERR_PTR(-ENODEV);
}
static void sysc_check_quirk_stdout(struct sysc *ddata,
struct device_node *np)
{
sysc_init_stdout_path(ddata);
if (np != stdout_path)
return;
ddata->cfg.quirks |= SYSC_QUIRK_NO_IDLE_ON_INIT |
SYSC_QUIRK_NO_RESET_ON_INIT;
}
/**
* sysc_check_one_child - check child configuration
* @ddata: device driver data
* @np: child device node
*
* Let's avoid messy situations where we have new interconnect target
* node but children have "ti,hwmods". These belong to the interconnect
* target node and are managed by this driver.
*/
static void sysc_check_one_child(struct sysc *ddata,
struct device_node *np)
{
const char *name;
name = of_get_property(np, "ti,hwmods", NULL);
if (name && !of_device_is_compatible(np, "ti,sysc"))
dev_warn(ddata->dev, "really a child ti,hwmods property?");
sysc_check_quirk_stdout(ddata, np);
sysc_parse_dts_quirks(ddata, np, true);
}
static void sysc_check_children(struct sysc *ddata)
{
struct device_node *child;
for_each_child_of_node(ddata->dev->of_node, child)
sysc_check_one_child(ddata, child);
}
/*
* So far only I2C uses 16-bit read access with clockactivity with revision
* in two registers with stride of 4. We can detect this based on the rev
* register size to configure things far enough to be able to properly read
* the revision register.
*/
static void sysc_check_quirk_16bit(struct sysc *ddata, struct resource *res)
{
if (resource_size(res) == 8)
ddata->cfg.quirks |= SYSC_QUIRK_16BIT | SYSC_QUIRK_USE_CLOCKACT;
}
/**
* sysc_parse_one - parses the interconnect target module registers
* @ddata: device driver data
* @reg: register to parse
*/
static int sysc_parse_one(struct sysc *ddata, enum sysc_registers reg)
{
struct resource *res;
const char *name;
switch (reg) {
case SYSC_REVISION:
case SYSC_SYSCONFIG:
case SYSC_SYSSTATUS:
name = reg_names[reg];
break;
default:
return -EINVAL;
}
res = platform_get_resource_byname(to_platform_device(ddata->dev),
IORESOURCE_MEM, name);
if (!res) {
ddata->offsets[reg] = -ENODEV;
return 0;
}
ddata->offsets[reg] = res->start - ddata->module_pa;
if (reg == SYSC_REVISION)
sysc_check_quirk_16bit(ddata, res);
return 0;
}
static int sysc_parse_registers(struct sysc *ddata)
{
int i, error;
for (i = 0; i < SYSC_MAX_REGS; i++) {
error = sysc_parse_one(ddata, i);
if (error)
return error;
}
return 0;
}
/**
* sysc_check_registers - check for misconfigured register overlaps
* @ddata: device driver data
*/
static int sysc_check_registers(struct sysc *ddata)
{
int i, j, nr_regs = 0, nr_matches = 0;
for (i = 0; i < SYSC_MAX_REGS; i++) {
if (ddata->offsets[i] < 0)
continue;
if (ddata->offsets[i] > (ddata->module_size - 4)) {
dev_err(ddata->dev, "register outside module range");
return -EINVAL;
}
for (j = 0; j < SYSC_MAX_REGS; j++) {
if (ddata->offsets[j] < 0)
continue;
if (ddata->offsets[i] == ddata->offsets[j])
nr_matches++;
}
nr_regs++;
}
if (nr_matches > nr_regs) {
dev_err(ddata->dev, "overlapping registers: (%i/%i)",
nr_regs, nr_matches);
return -EINVAL;
}
return 0;
}
/**
* sysc_ioremap - ioremap register space for the interconnect target module
* @ddata: device driver data
*
* Note that the interconnect target module registers can be anywhere
* within the interconnect target module range. For example, SGX has
* them at offset 0x1fc00 in the 32MB module address space. And cpsw
* has them at offset 0x1200 in the CPSW_WR child. Usually the
* interconnect target module registers are at the beginning of
* the module range though.
*/
static int sysc_ioremap(struct sysc *ddata)
{
int size;
if (ddata->offsets[SYSC_REVISION] < 0 &&
ddata->offsets[SYSC_SYSCONFIG] < 0 &&
ddata->offsets[SYSC_SYSSTATUS] < 0) {
size = ddata->module_size;
} else {
size = max3(ddata->offsets[SYSC_REVISION],
ddata->offsets[SYSC_SYSCONFIG],
ddata->offsets[SYSC_SYSSTATUS]);
if (size < SZ_1K)
size = SZ_1K;
if ((size + sizeof(u32)) > ddata->module_size)
size = ddata->module_size;
}
ddata->module_va = devm_ioremap(ddata->dev,
ddata->module_pa,
size + sizeof(u32));
if (!ddata->module_va)
return -EIO;
return 0;
}
/**
* sysc_map_and_check_registers - ioremap and check device registers
* @ddata: device driver data
*/
static int sysc_map_and_check_registers(struct sysc *ddata)
{
struct device_node *np = ddata->dev->of_node;
int error;
error = sysc_parse_and_check_child_range(ddata);
if (error)
return error;
error = sysc_defer_non_critical(ddata);
if (error)
return error;
sysc_check_children(ddata);
if (!of_property_present(np, "reg"))
return 0;
error = sysc_parse_registers(ddata);
if (error)
return error;
error = sysc_ioremap(ddata);
if (error)
return error;
error = sysc_check_registers(ddata);
if (error)
return error;
return 0;
}
/**
* sysc_show_rev - read and show interconnect target module revision
* @bufp: buffer to print the information to
* @ddata: device driver data
*/
static int sysc_show_rev(char *bufp, struct sysc *ddata)
{
int len;
if (ddata->offsets[SYSC_REVISION] < 0)
return sprintf(bufp, ":NA");
len = sprintf(bufp, ":%08x", ddata->revision);
return len;
}
static int sysc_show_reg(struct sysc *ddata,
char *bufp, enum sysc_registers reg)
{
if (ddata->offsets[reg] < 0)
return sprintf(bufp, ":NA");
return sprintf(bufp, ":%x", ddata->offsets[reg]);
}
static int sysc_show_name(char *bufp, struct sysc *ddata)
{
if (!ddata->name)
return 0;
return sprintf(bufp, ":%s", ddata->name);
}
/**
* sysc_show_registers - show information about interconnect target module
* @ddata: device driver data
*/
static void sysc_show_registers(struct sysc *ddata)
{
char buf[128];
char *bufp = buf;
int i;
for (i = 0; i < SYSC_MAX_REGS; i++)
bufp += sysc_show_reg(ddata, bufp, i);
bufp += sysc_show_rev(bufp, ddata);
bufp += sysc_show_name(bufp, ddata);
dev_dbg(ddata->dev, "%llx:%x%s\n",
ddata->module_pa, ddata->module_size,
buf);
}
/**
* sysc_write_sysconfig - handle sysconfig quirks for register write
* @ddata: device driver data
* @value: register value
*/
static void sysc_write_sysconfig(struct sysc *ddata, u32 value)
{
if (ddata->module_unlock_quirk)
ddata->module_unlock_quirk(ddata);
sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], value);
if (ddata->module_lock_quirk)
ddata->module_lock_quirk(ddata);
}
#define SYSC_IDLE_MASK (SYSC_NR_IDLEMODES - 1)
#define SYSC_CLOCACT_ICK 2
/* Caller needs to manage sysc_clkdm_deny_idle() and sysc_clkdm_allow_idle() */
static int sysc_enable_module(struct device *dev)
{
struct sysc *ddata;
const struct sysc_regbits *regbits;
u32 reg, idlemodes, best_mode;
int error;
ddata = dev_get_drvdata(dev);
/*
* Some modules like DSS reset automatically on idle. Enable optional
* reset clocks and wait for OCP softreset to complete.
*/
if (ddata->cfg.quirks & SYSC_QUIRK_OPT_CLKS_IN_RESET) {
error = sysc_enable_opt_clocks(ddata);
if (error) {
dev_err(ddata->dev,
"Optional clocks failed for enable: %i\n",
error);
return error;
}
}
/*
* Some modules like i2c and hdq1w have unusable reset status unless
* the module reset quirk is enabled. Skip status check on enable.
*/
if (!(ddata->cfg.quirks & SYSC_MODULE_QUIRK_ENA_RESETDONE)) {
error = sysc_wait_softreset(ddata);
if (error)
dev_warn(ddata->dev, "OCP softreset timed out\n");
}
if (ddata->cfg.quirks & SYSC_QUIRK_OPT_CLKS_IN_RESET)
sysc_disable_opt_clocks(ddata);
/*
* Some subsystem private interconnects, like DSS top level module,
* need only the automatic OCP softreset handling with no sysconfig
* register bits to configure.
*/
if (ddata->offsets[SYSC_SYSCONFIG] == -ENODEV)
return 0;
regbits = ddata->cap->regbits;
reg = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
/*
* Set CLOCKACTIVITY, we only use it for ick. And we only configure it
* based on the SYSC_QUIRK_USE_CLOCKACT flag, not based on the hardware
* capabilities. See the old HWMOD_SET_DEFAULT_CLOCKACT flag.
*/
if (regbits->clkact_shift >= 0 &&
(ddata->cfg.quirks & SYSC_QUIRK_USE_CLOCKACT))
reg |= SYSC_CLOCACT_ICK << regbits->clkact_shift;
/* Set SIDLE mode */
idlemodes = ddata->cfg.sidlemodes;
if (!idlemodes || regbits->sidle_shift < 0)
goto set_midle;
if (ddata->cfg.quirks & (SYSC_QUIRK_SWSUP_SIDLE |
SYSC_QUIRK_SWSUP_SIDLE_ACT)) {
best_mode = SYSC_IDLE_NO;
} else {
best_mode = fls(ddata->cfg.sidlemodes) - 1;
if (best_mode > SYSC_IDLE_MASK) {
dev_err(dev, "%s: invalid sidlemode\n", __func__);
return -EINVAL;
}
/* Set WAKEUP */
if (regbits->enwkup_shift >= 0 &&
ddata->cfg.sysc_val & BIT(regbits->enwkup_shift))
reg |= BIT(regbits->enwkup_shift);
}
reg &= ~(SYSC_IDLE_MASK << regbits->sidle_shift);
reg |= best_mode << regbits->sidle_shift;
sysc_write_sysconfig(ddata, reg);
set_midle:
/* Set MIDLE mode */
idlemodes = ddata->cfg.midlemodes;
if (!idlemodes || regbits->midle_shift < 0)
goto set_autoidle;
best_mode = fls(ddata->cfg.midlemodes) - 1;
if (best_mode > SYSC_IDLE_MASK) {
dev_err(dev, "%s: invalid midlemode\n", __func__);
error = -EINVAL;
goto save_context;
}
if (ddata->cfg.quirks & SYSC_QUIRK_SWSUP_MSTANDBY)
best_mode = SYSC_IDLE_NO;
reg &= ~(SYSC_IDLE_MASK << regbits->midle_shift);
reg |= best_mode << regbits->midle_shift;
sysc_write_sysconfig(ddata, reg);
set_autoidle:
/* Autoidle bit must enabled separately if available */
if (regbits->autoidle_shift >= 0 &&
ddata->cfg.sysc_val & BIT(regbits->autoidle_shift)) {
reg |= 1 << regbits->autoidle_shift;
sysc_write_sysconfig(ddata, reg);
}
error = 0;
save_context:
/* Save context and flush posted write */
ddata->sysconfig = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
if (ddata->module_enable_quirk)
ddata->module_enable_quirk(ddata);
return error;
}
static int sysc_best_idle_mode(u32 idlemodes, u32 *best_mode)
{
if (idlemodes & BIT(SYSC_IDLE_SMART_WKUP))
*best_mode = SYSC_IDLE_SMART_WKUP;
else if (idlemodes & BIT(SYSC_IDLE_SMART))
*best_mode = SYSC_IDLE_SMART;
else if (idlemodes & BIT(SYSC_IDLE_FORCE))
*best_mode = SYSC_IDLE_FORCE;
else
return -EINVAL;
return 0;
}
/* Caller needs to manage sysc_clkdm_deny_idle() and sysc_clkdm_allow_idle() */
static int sysc_disable_module(struct device *dev)
{
struct sysc *ddata;
const struct sysc_regbits *regbits;
u32 reg, idlemodes, best_mode;
int ret;
ddata = dev_get_drvdata(dev);
if (ddata->offsets[SYSC_SYSCONFIG] == -ENODEV)
return 0;
if (ddata->module_disable_quirk)
ddata->module_disable_quirk(ddata);
regbits = ddata->cap->regbits;
reg = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
/* Set MIDLE mode */
idlemodes = ddata->cfg.midlemodes;
if (!idlemodes || regbits->midle_shift < 0)
goto set_sidle;
ret = sysc_best_idle_mode(idlemodes, &best_mode);
if (ret) {
dev_err(dev, "%s: invalid midlemode\n", __func__);
return ret;
}
if (ddata->cfg.quirks & (SYSC_QUIRK_SWSUP_MSTANDBY) ||
ddata->cfg.quirks & (SYSC_QUIRK_FORCE_MSTANDBY))
best_mode = SYSC_IDLE_FORCE;
reg &= ~(SYSC_IDLE_MASK << regbits->midle_shift);
reg |= best_mode << regbits->midle_shift;
sysc_write_sysconfig(ddata, reg);
set_sidle:
/* Set SIDLE mode */
idlemodes = ddata->cfg.sidlemodes;
if (!idlemodes || regbits->sidle_shift < 0) {
ret = 0;
goto save_context;
}
if (ddata->cfg.quirks & SYSC_QUIRK_SWSUP_SIDLE) {
best_mode = SYSC_IDLE_FORCE;
} else {
ret = sysc_best_idle_mode(idlemodes, &best_mode);
if (ret) {
dev_err(dev, "%s: invalid sidlemode\n", __func__);
ret = -EINVAL;
goto save_context;
}
}
reg &= ~(SYSC_IDLE_MASK << regbits->sidle_shift);
reg |= best_mode << regbits->sidle_shift;
if (regbits->autoidle_shift >= 0 &&
ddata->cfg.sysc_val & BIT(regbits->autoidle_shift))
reg |= 1 << regbits->autoidle_shift;
sysc_write_sysconfig(ddata, reg);
ret = 0;
save_context:
/* Save context and flush posted write */
ddata->sysconfig = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
return ret;
}
static int __maybe_unused sysc_runtime_suspend_legacy(struct device *dev,
struct sysc *ddata)
{
struct ti_sysc_platform_data *pdata;
int error;
pdata = dev_get_platdata(ddata->dev);
if (!pdata)
return 0;
if (!pdata->idle_module)
return -ENODEV;
error = pdata->idle_module(dev, &ddata->cookie);
if (error)
dev_err(dev, "%s: could not idle: %i\n",
__func__, error);
reset_control_assert(ddata->rsts);
return 0;
}
static int __maybe_unused sysc_runtime_resume_legacy(struct device *dev,
struct sysc *ddata)
{
struct ti_sysc_platform_data *pdata;
int error;
pdata = dev_get_platdata(ddata->dev);
if (!pdata)
return 0;
if (!pdata->enable_module)
return -ENODEV;
error = pdata->enable_module(dev, &ddata->cookie);
if (error)
dev_err(dev, "%s: could not enable: %i\n",
__func__, error);
reset_control_deassert(ddata->rsts);
return 0;
}
static int __maybe_unused sysc_runtime_suspend(struct device *dev)
{
struct sysc *ddata;
int error = 0;
ddata = dev_get_drvdata(dev);
if (!ddata->enabled)
return 0;
sysc_clkdm_deny_idle(ddata);
if (ddata->legacy_mode) {
error = sysc_runtime_suspend_legacy(dev, ddata);
if (error)
goto err_allow_idle;
} else {
error = sysc_disable_module(dev);
if (error)
goto err_allow_idle;
}
sysc_disable_main_clocks(ddata);
if (sysc_opt_clks_needed(ddata))
sysc_disable_opt_clocks(ddata);
ddata->enabled = false;
err_allow_idle:
sysc_clkdm_allow_idle(ddata);
reset_control_assert(ddata->rsts);
return error;
}
static int __maybe_unused sysc_runtime_resume(struct device *dev)
{
struct sysc *ddata;
int error = 0;
ddata = dev_get_drvdata(dev);
if (ddata->enabled)
return 0;
sysc_clkdm_deny_idle(ddata);
if (sysc_opt_clks_needed(ddata)) {
error = sysc_enable_opt_clocks(ddata);
if (error)
goto err_allow_idle;
}
error = sysc_enable_main_clocks(ddata);
if (error)
goto err_opt_clocks;
reset_control_deassert(ddata->rsts);
if (ddata->legacy_mode) {
error = sysc_runtime_resume_legacy(dev, ddata);
if (error)
goto err_main_clocks;
} else {
error = sysc_enable_module(dev);
if (error)
goto err_main_clocks;
}
ddata->enabled = true;
sysc_clkdm_allow_idle(ddata);
return 0;
err_main_clocks:
sysc_disable_main_clocks(ddata);
err_opt_clocks:
if (sysc_opt_clks_needed(ddata))
sysc_disable_opt_clocks(ddata);
err_allow_idle:
sysc_clkdm_allow_idle(ddata);
return error;
}
/*
* Checks if device context was lost. Assumes the sysconfig register value
* after lost context is different from the configured value. Only works for
* enabled devices.
*
* Eventually we may want to also add support to using the context lost
* registers that some SoCs have.
*/
static int sysc_check_context(struct sysc *ddata)
{
u32 reg;
if (!ddata->enabled)
return -ENODATA;
reg = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
if (reg == ddata->sysconfig)
return 0;
return -EACCES;
}
static int sysc_reinit_module(struct sysc *ddata, bool leave_enabled)
{
struct device *dev = ddata->dev;
int error;
if (ddata->enabled) {
/* Nothing to do if enabled and context not lost */
error = sysc_check_context(ddata);
if (!error)
return 0;
/* Disable target module if it is enabled */
error = sysc_runtime_suspend(dev);
if (error)
dev_warn(dev, "reinit suspend failed: %i\n", error);
}
/* Enable target module */
error = sysc_runtime_resume(dev);
if (error)
dev_warn(dev, "reinit resume failed: %i\n", error);
/* Some modules like am335x gpmc need reset and restore of sysconfig */
if (ddata->cfg.quirks & SYSC_QUIRK_RESET_ON_CTX_LOST) {
error = sysc_reset(ddata);
if (error)
dev_warn(dev, "reinit reset failed: %i\n", error);
sysc_write_sysconfig(ddata, ddata->sysconfig);
}
if (leave_enabled)
return error;
/* Disable target module if no leave_enabled was set */
error = sysc_runtime_suspend(dev);
if (error)
dev_warn(dev, "reinit suspend failed: %i\n", error);
return error;
}
static int __maybe_unused sysc_noirq_suspend(struct device *dev)
{
struct sysc *ddata;
ddata = dev_get_drvdata(dev);
if (ddata->cfg.quirks &
(SYSC_QUIRK_LEGACY_IDLE | SYSC_QUIRK_NO_IDLE))
return 0;
if (!ddata->enabled)
return 0;
ddata->needs_resume = 1;
return sysc_runtime_suspend(dev);
}
static int __maybe_unused sysc_noirq_resume(struct device *dev)
{
struct sysc *ddata;
int error = 0;
ddata = dev_get_drvdata(dev);
if (ddata->cfg.quirks &
(SYSC_QUIRK_LEGACY_IDLE | SYSC_QUIRK_NO_IDLE))
return 0;
if (ddata->cfg.quirks & SYSC_QUIRK_REINIT_ON_RESUME) {
error = sysc_reinit_module(ddata, ddata->needs_resume);
if (error)
dev_warn(dev, "noirq_resume failed: %i\n", error);
} else if (ddata->needs_resume) {
error = sysc_runtime_resume(dev);
if (error)
dev_warn(dev, "noirq_resume failed: %i\n", error);
}
ddata->needs_resume = 0;
return error;
}
static const struct dev_pm_ops sysc_pm_ops = {
SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(sysc_noirq_suspend, sysc_noirq_resume)
SET_RUNTIME_PM_OPS(sysc_runtime_suspend,
sysc_runtime_resume,
NULL)
};
/* Module revision register based quirks */
struct sysc_revision_quirk {
const char *name;
u32 base;
int rev_offset;
int sysc_offset;
int syss_offset;
u32 revision;
u32 revision_mask;
u32 quirks;
};
#define SYSC_QUIRK(optname, optbase, optrev, optsysc, optsyss, \
optrev_val, optrevmask, optquirkmask) \
{ \
.name = (optname), \
.base = (optbase), \
.rev_offset = (optrev), \
.sysc_offset = (optsysc), \
.syss_offset = (optsyss), \
.revision = (optrev_val), \
.revision_mask = (optrevmask), \
.quirks = (optquirkmask), \
}
static const struct sysc_revision_quirk sysc_revision_quirks[] = {
/* These drivers need to be fixed to not use pm_runtime_irq_safe() */
SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000046, 0xffffffff,
SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE),
SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000052, 0xffffffff,
SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE),
/* Uarts on omap4 and later */
SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x50411e03, 0xffff00ff,
SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE),
SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x47422e03, 0xffffffff,
SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE),
SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x47424e03, 0xffffffff,
SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE),
/* Quirks that need to be set based on the module address */
SYSC_QUIRK("mcpdm", 0x40132000, 0, 0x10, -ENODEV, 0x50000800, 0xffffffff,
SYSC_QUIRK_EXT_OPT_CLOCK | SYSC_QUIRK_NO_RESET_ON_INIT |
SYSC_QUIRK_SWSUP_SIDLE),
/* Quirks that need to be set based on detected module */
SYSC_QUIRK("aess", 0, 0, 0x10, -ENODEV, 0x40000000, 0xffffffff,
SYSC_MODULE_QUIRK_AESS),
/* Errata i893 handling for dra7 dcan1 and 2 */
SYSC_QUIRK("dcan", 0x4ae3c000, 0x20, -ENODEV, -ENODEV, 0xa3170504, 0xffffffff,
SYSC_QUIRK_CLKDM_NOAUTO),
SYSC_QUIRK("dcan", 0x48480000, 0x20, -ENODEV, -ENODEV, 0xa3170504, 0xffffffff,
SYSC_QUIRK_CLKDM_NOAUTO),
SYSC_QUIRK("dss", 0x4832a000, 0, 0x10, 0x14, 0x00000020, 0xffffffff,
SYSC_QUIRK_OPT_CLKS_IN_RESET | SYSC_MODULE_QUIRK_DSS_RESET),
SYSC_QUIRK("dss", 0x58000000, 0, -ENODEV, 0x14, 0x00000040, 0xffffffff,
SYSC_QUIRK_OPT_CLKS_IN_RESET | SYSC_MODULE_QUIRK_DSS_RESET),
SYSC_QUIRK("dss", 0x58000000, 0, -ENODEV, 0x14, 0x00000061, 0xffffffff,
SYSC_QUIRK_OPT_CLKS_IN_RESET | SYSC_MODULE_QUIRK_DSS_RESET),
SYSC_QUIRK("dwc3", 0x48880000, 0, 0x10, -ENODEV, 0x500a0200, 0xffffffff,
SYSC_QUIRK_CLKDM_NOAUTO),
SYSC_QUIRK("dwc3", 0x488c0000, 0, 0x10, -ENODEV, 0x500a0200, 0xffffffff,
SYSC_QUIRK_CLKDM_NOAUTO),
SYSC_QUIRK("gpio", 0, 0, 0x10, 0x114, 0x50600801, 0xffff00ff,
SYSC_QUIRK_OPT_CLKS_IN_RESET),
SYSC_QUIRK("gpmc", 0, 0, 0x10, 0x14, 0x00000060, 0xffffffff,
SYSC_QUIRK_REINIT_ON_CTX_LOST | SYSC_QUIRK_RESET_ON_CTX_LOST |
SYSC_QUIRK_GPMC_DEBUG),
SYSC_QUIRK("hdmi", 0, 0, 0x10, -ENODEV, 0x50030200, 0xffffffff,
SYSC_QUIRK_OPT_CLKS_NEEDED),
SYSC_QUIRK("hdq1w", 0, 0, 0x14, 0x18, 0x00000006, 0xffffffff,
SYSC_MODULE_QUIRK_HDQ1W | SYSC_MODULE_QUIRK_ENA_RESETDONE),
SYSC_QUIRK("hdq1w", 0, 0, 0x14, 0x18, 0x0000000a, 0xffffffff,
SYSC_MODULE_QUIRK_HDQ1W | SYSC_MODULE_QUIRK_ENA_RESETDONE),
SYSC_QUIRK("i2c", 0, 0, 0x20, 0x10, 0x00000036, 0x000000ff,
SYSC_MODULE_QUIRK_I2C | SYSC_MODULE_QUIRK_ENA_RESETDONE),
SYSC_QUIRK("i2c", 0, 0, 0x20, 0x10, 0x0000003c, 0x000000ff,
SYSC_MODULE_QUIRK_I2C | SYSC_MODULE_QUIRK_ENA_RESETDONE),
SYSC_QUIRK("i2c", 0, 0, 0x20, 0x10, 0x00000040, 0x000000ff,
SYSC_MODULE_QUIRK_I2C | SYSC_MODULE_QUIRK_ENA_RESETDONE),
SYSC_QUIRK("i2c", 0, 0, 0x10, 0x90, 0x5040000a, 0xfffff0f0,
SYSC_MODULE_QUIRK_I2C | SYSC_MODULE_QUIRK_ENA_RESETDONE),
SYSC_QUIRK("gpu", 0x50000000, 0x14, -ENODEV, -ENODEV, 0x00010201, 0xffffffff, 0),
SYSC_QUIRK("gpu", 0x50000000, 0xfe00, 0xfe10, -ENODEV, 0x40000000 , 0xffffffff,
SYSC_MODULE_QUIRK_SGX),
SYSC_QUIRK("lcdc", 0, 0, 0x54, -ENODEV, 0x4f201000, 0xffffffff,
SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
SYSC_QUIRK("mcasp", 0, 0, 0x4, -ENODEV, 0x44306302, 0xffffffff,
SYSC_QUIRK_SWSUP_SIDLE),
SYSC_QUIRK("rtc", 0, 0x74, 0x78, -ENODEV, 0x4eb01908, 0xffff00f0,
SYSC_MODULE_QUIRK_RTC_UNLOCK),
SYSC_QUIRK("tptc", 0, 0, 0x10, -ENODEV, 0x40006c00, 0xffffefff,
SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
SYSC_QUIRK("tptc", 0, 0, -ENODEV, -ENODEV, 0x40007c00, 0xffffffff,
SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
SYSC_QUIRK("sata", 0, 0xfc, 0x1100, -ENODEV, 0x5e412000, 0xffffffff,
SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
SYSC_QUIRK("usb_host_hs", 0, 0, 0x10, 0x14, 0x50700100, 0xffffffff,
SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
SYSC_QUIRK("usb_host_hs", 0, 0, 0x10, -ENODEV, 0x50700101, 0xffffffff,
SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
SYSC_QUIRK("usb_otg_hs", 0, 0x400, 0x404, 0x408, 0x00000033,
0xffffffff, SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY |
SYSC_MODULE_QUIRK_OTG),
SYSC_QUIRK("usb_otg_hs", 0, 0x400, 0x404, 0x408, 0x00000040,
0xffffffff, SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY |
SYSC_MODULE_QUIRK_OTG),
SYSC_QUIRK("usb_otg_hs", 0, 0x400, 0x404, 0x408, 0x00000050,
0xffffffff, SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY |
SYSC_MODULE_QUIRK_OTG),
SYSC_QUIRK("usb_otg_hs", 0, 0, 0x10, -ENODEV, 0x4ea2080d, 0xffffffff,
SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY |
SYSC_QUIRK_REINIT_ON_CTX_LOST),
SYSC_QUIRK("wdt", 0, 0, 0x10, 0x14, 0x502a0500, 0xfffff0f0,
SYSC_MODULE_QUIRK_WDT),
/* PRUSS on am3, am4 and am5 */
SYSC_QUIRK("pruss", 0, 0x26000, 0x26004, -ENODEV, 0x47000000, 0xff000000,
SYSC_MODULE_QUIRK_PRUSS),
/* Watchdog on am3 and am4 */
SYSC_QUIRK("wdt", 0x44e35000, 0, 0x10, 0x14, 0x502a0500, 0xfffff0f0,
SYSC_MODULE_QUIRK_WDT | SYSC_QUIRK_SWSUP_SIDLE),
#ifdef DEBUG
SYSC_QUIRK("adc", 0, 0, 0x10, -ENODEV, 0x47300001, 0xffffffff, 0),
SYSC_QUIRK("atl", 0, 0, -ENODEV, -ENODEV, 0x0a070100, 0xffffffff, 0),
SYSC_QUIRK("cm", 0, 0, -ENODEV, -ENODEV, 0x40000301, 0xffffffff, 0),
SYSC_QUIRK("control", 0, 0, 0x10, -ENODEV, 0x40000900, 0xffffffff, 0),
SYSC_QUIRK("cpgmac", 0, 0x1200, 0x1208, 0x1204, 0x4edb1902,
0xffff00f0, 0),
SYSC_QUIRK("dcan", 0, 0x20, -ENODEV, -ENODEV, 0xa3170504, 0xffffffff, 0),
SYSC_QUIRK("dcan", 0, 0x20, -ENODEV, -ENODEV, 0x4edb1902, 0xffffffff, 0),
SYSC_QUIRK("dispc", 0x4832a400, 0, 0x10, 0x14, 0x00000030, 0xffffffff, 0),
SYSC_QUIRK("dispc", 0x58001000, 0, 0x10, 0x14, 0x00000040, 0xffffffff, 0),
SYSC_QUIRK("dispc", 0x58001000, 0, 0x10, 0x14, 0x00000051, 0xffffffff, 0),
SYSC_QUIRK("dmic", 0, 0, 0x10, -ENODEV, 0x50010000, 0xffffffff, 0),
SYSC_QUIRK("dsi", 0x58004000, 0, 0x10, 0x14, 0x00000030, 0xffffffff, 0),
SYSC_QUIRK("dsi", 0x58005000, 0, 0x10, 0x14, 0x00000030, 0xffffffff, 0),
SYSC_QUIRK("dsi", 0x58005000, 0, 0x10, 0x14, 0x00000040, 0xffffffff, 0),
SYSC_QUIRK("dsi", 0x58009000, 0, 0x10, 0x14, 0x00000040, 0xffffffff, 0),
SYSC_QUIRK("dwc3", 0, 0, 0x10, -ENODEV, 0x500a0200, 0xffffffff, 0),
SYSC_QUIRK("d2d", 0x4a0b6000, 0, 0x10, 0x14, 0x00000010, 0xffffffff, 0),
SYSC_QUIRK("d2d", 0x4a0cd000, 0, 0x10, 0x14, 0x00000010, 0xffffffff, 0),
SYSC_QUIRK("elm", 0x48080000, 0, 0x10, 0x14, 0x00000020, 0xffffffff, 0),
SYSC_QUIRK("emif", 0, 0, -ENODEV, -ENODEV, 0x40441403, 0xffff0fff, 0),
SYSC_QUIRK("emif", 0, 0, -ENODEV, -ENODEV, 0x50440500, 0xffffffff, 0),
SYSC_QUIRK("epwmss", 0, 0, 0x4, -ENODEV, 0x47400001, 0xffffffff, 0),
SYSC_QUIRK("gpu", 0, 0x1fc00, 0x1fc10, -ENODEV, 0, 0, 0),
SYSC_QUIRK("gpu", 0, 0xfe00, 0xfe10, -ENODEV, 0x40000000 , 0xffffffff, 0),
SYSC_QUIRK("hdmi", 0, 0, 0x10, -ENODEV, 0x50031d00, 0xffffffff, 0),
SYSC_QUIRK("hsi", 0, 0, 0x10, 0x14, 0x50043101, 0xffffffff, 0),
SYSC_QUIRK("iss", 0, 0, 0x10, -ENODEV, 0x40000101, 0xffffffff, 0),
SYSC_QUIRK("keypad", 0x4a31c000, 0, 0x10, 0x14, 0x00000020, 0xffffffff, 0),
SYSC_QUIRK("mcasp", 0, 0, 0x4, -ENODEV, 0x44307b02, 0xffffffff, 0),
SYSC_QUIRK("mcbsp", 0, -ENODEV, 0x8c, -ENODEV, 0, 0, 0),
SYSC_QUIRK("mcspi", 0, 0, 0x10, -ENODEV, 0x40300a0b, 0xffff00ff, 0),
SYSC_QUIRK("mcspi", 0, 0, 0x110, 0x114, 0x40300a0b, 0xffffffff, 0),
SYSC_QUIRK("mailbox", 0, 0, 0x10, -ENODEV, 0x00000400, 0xffffffff, 0),
SYSC_QUIRK("m3", 0, 0, -ENODEV, -ENODEV, 0x5f580105, 0x0fff0f00, 0),
SYSC_QUIRK("ocp2scp", 0, 0, 0x10, 0x14, 0x50060005, 0xfffffff0, 0),
SYSC_QUIRK("ocp2scp", 0, 0, -ENODEV, -ENODEV, 0x50060007, 0xffffffff, 0),
SYSC_QUIRK("padconf", 0, 0, 0x10, -ENODEV, 0x4fff0800, 0xffffffff, 0),
SYSC_QUIRK("padconf", 0, 0, -ENODEV, -ENODEV, 0x40001100, 0xffffffff, 0),
SYSC_QUIRK("pcie", 0x51000000, -ENODEV, -ENODEV, -ENODEV, 0, 0, 0),
SYSC_QUIRK("pcie", 0x51800000, -ENODEV, -ENODEV, -ENODEV, 0, 0, 0),
SYSC_QUIRK("prcm", 0, 0, -ENODEV, -ENODEV, 0x40000100, 0xffffffff, 0),
SYSC_QUIRK("prcm", 0, 0, -ENODEV, -ENODEV, 0x00004102, 0xffffffff, 0),
SYSC_QUIRK("prcm", 0, 0, -ENODEV, -ENODEV, 0x40000400, 0xffffffff, 0),
SYSC_QUIRK("rfbi", 0x4832a800, 0, 0x10, 0x14, 0x00000010, 0xffffffff, 0),
SYSC_QUIRK("rfbi", 0x58002000, 0, 0x10, 0x14, 0x00000010, 0xffffffff, 0),
SYSC_QUIRK("scm", 0, 0, 0x10, -ENODEV, 0x40000900, 0xffffffff, 0),
SYSC_QUIRK("scm", 0, 0, -ENODEV, -ENODEV, 0x4e8b0100, 0xffffffff, 0),
SYSC_QUIRK("scm", 0, 0, -ENODEV, -ENODEV, 0x4f000100, 0xffffffff, 0),
SYSC_QUIRK("scm", 0, 0, -ENODEV, -ENODEV, 0x40000900, 0xffffffff, 0),
SYSC_QUIRK("scrm", 0, 0, -ENODEV, -ENODEV, 0x00000010, 0xffffffff, 0),
SYSC_QUIRK("sdio", 0, 0, 0x10, -ENODEV, 0x40202301, 0xffff0ff0, 0),
SYSC_QUIRK("sdio", 0, 0x2fc, 0x110, 0x114, 0x31010000, 0xffffffff, 0),
SYSC_QUIRK("sdma", 0, 0, 0x2c, 0x28, 0x00010900, 0xffffffff, 0),
SYSC_QUIRK("sham", 0, 0x100, 0x110, 0x114, 0x40000c03, 0xffffffff, 0),
SYSC_QUIRK("slimbus", 0, 0, 0x10, -ENODEV, 0x40000902, 0xffffffff, 0),
SYSC_QUIRK("slimbus", 0, 0, 0x10, -ENODEV, 0x40002903, 0xffffffff, 0),
SYSC_QUIRK("smartreflex", 0, -ENODEV, 0x24, -ENODEV, 0x00000000, 0xffffffff, 0),
SYSC_QUIRK("smartreflex", 0, -ENODEV, 0x38, -ENODEV, 0x00000000, 0xffffffff, 0),
SYSC_QUIRK("spinlock", 0, 0, 0x10, -ENODEV, 0x50020000, 0xffffffff, 0),
SYSC_QUIRK("rng", 0, 0x1fe0, 0x1fe4, -ENODEV, 0x00000020, 0xffffffff, 0),
SYSC_QUIRK("timer", 0, 0, 0x10, 0x14, 0x00000013, 0xffffffff, 0),
SYSC_QUIRK("timer", 0, 0, 0x10, 0x14, 0x00000015, 0xffffffff, 0),
/* Some timers on omap4 and later */
SYSC_QUIRK("timer", 0, 0, 0x10, -ENODEV, 0x50002100, 0xffffffff, 0),
SYSC_QUIRK("timer", 0, 0, 0x10, -ENODEV, 0x4fff1301, 0xffff00ff, 0),
SYSC_QUIRK("timer32k", 0, 0, 0x4, -ENODEV, 0x00000040, 0xffffffff, 0),
SYSC_QUIRK("timer32k", 0, 0, 0x4, -ENODEV, 0x00000011, 0xffffffff, 0),
SYSC_QUIRK("timer32k", 0, 0, 0x4, -ENODEV, 0x00000060, 0xffffffff, 0),
SYSC_QUIRK("tpcc", 0, 0, -ENODEV, -ENODEV, 0x40014c00, 0xffffffff, 0),
SYSC_QUIRK("usbhstll", 0, 0, 0x10, 0x14, 0x00000004, 0xffffffff, 0),
SYSC_QUIRK("usbhstll", 0, 0, 0x10, 0x14, 0x00000008, 0xffffffff, 0),
SYSC_QUIRK("venc", 0x58003000, 0, -ENODEV, -ENODEV, 0x00000002, 0xffffffff, 0),
SYSC_QUIRK("vfpe", 0, 0, 0x104, -ENODEV, 0x4d001200, 0xffffffff, 0),
#endif
};
/*
* Early quirks based on module base and register offsets only that are
* needed before the module revision can be read
*/
static void sysc_init_early_quirks(struct sysc *ddata)
{
const struct sysc_revision_quirk *q;
int i;
for (i = 0; i < ARRAY_SIZE(sysc_revision_quirks); i++) {
q = &sysc_revision_quirks[i];
if (!q->base)
continue;
if (q->base != ddata->module_pa)
continue;
if (q->rev_offset != ddata->offsets[SYSC_REVISION])
continue;
if (q->sysc_offset != ddata->offsets[SYSC_SYSCONFIG])
continue;
if (q->syss_offset != ddata->offsets[SYSC_SYSSTATUS])
continue;
ddata->name = q->name;
ddata->cfg.quirks |= q->quirks;
}
}
/* Quirks that also consider the revision register value */
static void sysc_init_revision_quirks(struct sysc *ddata)
{
const struct sysc_revision_quirk *q;
int i;
for (i = 0; i < ARRAY_SIZE(sysc_revision_quirks); i++) {
q = &sysc_revision_quirks[i];
if (q->base && q->base != ddata->module_pa)
continue;
if (q->rev_offset != ddata->offsets[SYSC_REVISION])
continue;
if (q->sysc_offset != ddata->offsets[SYSC_SYSCONFIG])
continue;
if (q->syss_offset != ddata->offsets[SYSC_SYSSTATUS])
continue;
if (q->revision == ddata->revision ||
(q->revision & q->revision_mask) ==
(ddata->revision & q->revision_mask)) {
ddata->name = q->name;
ddata->cfg.quirks |= q->quirks;
}
}
}
/*
* DSS needs dispc outputs disabled to reset modules. Returns mask of
* enabled DSS interrupts. Eventually we may be able to do this on
* dispc init rather than top-level DSS init.
*/
static u32 sysc_quirk_dispc(struct sysc *ddata, int dispc_offset,
bool disable)
{
bool lcd_en, digit_en, lcd2_en = false, lcd3_en = false;
const int lcd_en_mask = BIT(0), digit_en_mask = BIT(1);
int manager_count;
bool framedonetv_irq = true;
u32 val, irq_mask = 0;
switch (sysc_soc->soc) {
case SOC_2420 ... SOC_3630:
manager_count = 2;
framedonetv_irq = false;
break;
case SOC_4430 ... SOC_4470:
manager_count = 3;
break;
case SOC_5430:
case SOC_DRA7:
manager_count = 4;
break;
case SOC_AM4:
manager_count = 1;
framedonetv_irq = false;
break;
case SOC_UNKNOWN:
default:
return 0;
}
/* Remap the whole module range to be able to reset dispc outputs */
devm_iounmap(ddata->dev, ddata->module_va);
ddata->module_va = devm_ioremap(ddata->dev,
ddata->module_pa,
ddata->module_size);
if (!ddata->module_va)
return -EIO;
/* DISP_CONTROL, shut down lcd and digit on disable if enabled */
val = sysc_read(ddata, dispc_offset + 0x40);
lcd_en = val & lcd_en_mask;
digit_en = val & digit_en_mask;
if (lcd_en)
irq_mask |= BIT(0); /* FRAMEDONE */
if (digit_en) {
if (framedonetv_irq)
irq_mask |= BIT(24); /* FRAMEDONETV */
else
irq_mask |= BIT(2) | BIT(3); /* EVSYNC bits */
}
if (disable && (lcd_en || digit_en))
sysc_write(ddata, dispc_offset + 0x40,
val & ~(lcd_en_mask | digit_en_mask));
if (manager_count <= 2)
return irq_mask;
/* DISPC_CONTROL2 */
val = sysc_read(ddata, dispc_offset + 0x238);
lcd2_en = val & lcd_en_mask;
if (lcd2_en)
irq_mask |= BIT(22); /* FRAMEDONE2 */
if (disable && lcd2_en)
sysc_write(ddata, dispc_offset + 0x238,
val & ~lcd_en_mask);
if (manager_count <= 3)
return irq_mask;
/* DISPC_CONTROL3 */
val = sysc_read(ddata, dispc_offset + 0x848);
lcd3_en = val & lcd_en_mask;
if (lcd3_en)
irq_mask |= BIT(30); /* FRAMEDONE3 */
if (disable && lcd3_en)
sysc_write(ddata, dispc_offset + 0x848,
val & ~lcd_en_mask);
return irq_mask;
}
/* DSS needs child outputs disabled and SDI registers cleared for reset */
static void sysc_pre_reset_quirk_dss(struct sysc *ddata)
{
const int dispc_offset = 0x1000;
int error;
u32 irq_mask, val;
/* Get enabled outputs */
irq_mask = sysc_quirk_dispc(ddata, dispc_offset, false);
if (!irq_mask)
return;
/* Clear IRQSTATUS */
sysc_write(ddata, dispc_offset + 0x18, irq_mask);
/* Disable outputs */
val = sysc_quirk_dispc(ddata, dispc_offset, true);
/* Poll IRQSTATUS */
error = readl_poll_timeout(ddata->module_va + dispc_offset + 0x18,
val, val != irq_mask, 100, 50);
if (error)
dev_warn(ddata->dev, "%s: timed out %08x !+ %08x\n",
__func__, val, irq_mask);
if (sysc_soc->soc == SOC_3430) {
/* Clear DSS_SDI_CONTROL */
sysc_write(ddata, 0x44, 0);
/* Clear DSS_PLL_CONTROL */
sysc_write(ddata, 0x48, 0);
}
/* Clear DSS_CONTROL to switch DSS clock sources to PRCM if not */
sysc_write(ddata, 0x40, 0);
}
/* 1-wire needs module's internal clocks enabled for reset */
static void sysc_pre_reset_quirk_hdq1w(struct sysc *ddata)
{
int offset = 0x0c; /* HDQ_CTRL_STATUS */
u16 val;
val = sysc_read(ddata, offset);
val |= BIT(5);
sysc_write(ddata, offset, val);
}
/* AESS (Audio Engine SubSystem) needs autogating set after enable */
static void sysc_module_enable_quirk_aess(struct sysc *ddata)
{
int offset = 0x7c; /* AESS_AUTO_GATING_ENABLE */
sysc_write(ddata, offset, 1);
}
/* I2C needs to be disabled for reset */
static void sysc_clk_quirk_i2c(struct sysc *ddata, bool enable)
{
int offset;
u16 val;
/* I2C_CON, omap2/3 is different from omap4 and later */
if ((ddata->revision & 0xffffff00) == 0x001f0000)
offset = 0x24;
else
offset = 0xa4;
/* I2C_EN */
val = sysc_read(ddata, offset);
if (enable)
val |= BIT(15);
else
val &= ~BIT(15);
sysc_write(ddata, offset, val);
}
static void sysc_pre_reset_quirk_i2c(struct sysc *ddata)
{
sysc_clk_quirk_i2c(ddata, false);
}
static void sysc_post_reset_quirk_i2c(struct sysc *ddata)
{
sysc_clk_quirk_i2c(ddata, true);
}
/* RTC on am3 and 4 needs to be unlocked and locked for sysconfig */
static void sysc_quirk_rtc(struct sysc *ddata, bool lock)
{
u32 val, kick0_val = 0, kick1_val = 0;
unsigned long flags;
int error;
if (!lock) {
kick0_val = 0x83e70b13;
kick1_val = 0x95a4f1e0;
}
local_irq_save(flags);
/* RTC_STATUS BUSY bit may stay active for 1/32768 seconds (~30 usec) */
error = readl_poll_timeout_atomic(ddata->module_va + 0x44, val,
!(val & BIT(0)), 100, 50);
if (error)
dev_warn(ddata->dev, "rtc busy timeout\n");
/* Now we have ~15 microseconds to read/write various registers */
sysc_write(ddata, 0x6c, kick0_val);
sysc_write(ddata, 0x70, kick1_val);
local_irq_restore(flags);
}
static void sysc_module_unlock_quirk_rtc(struct sysc *ddata)
{
sysc_quirk_rtc(ddata, false);
}
static void sysc_module_lock_quirk_rtc(struct sysc *ddata)
{
sysc_quirk_rtc(ddata, true);
}
/* OTG omap2430 glue layer up to omap4 needs OTG_FORCESTDBY configured */
static void sysc_module_enable_quirk_otg(struct sysc *ddata)
{
int offset = 0x414; /* OTG_FORCESTDBY */
sysc_write(ddata, offset, 0);
}
static void sysc_module_disable_quirk_otg(struct sysc *ddata)
{
int offset = 0x414; /* OTG_FORCESTDBY */
u32 val = BIT(0); /* ENABLEFORCE */
sysc_write(ddata, offset, val);
}
/* 36xx SGX needs a quirk for to bypass OCP IPG interrupt logic */
static void sysc_module_enable_quirk_sgx(struct sysc *ddata)
{
int offset = 0xff08; /* OCP_DEBUG_CONFIG */
u32 val = BIT(31); /* THALIA_INT_BYPASS */
sysc_write(ddata, offset, val);
}
/* Watchdog timer needs a disable sequence after reset */
static void sysc_reset_done_quirk_wdt(struct sysc *ddata)
{
int wps, spr, error;
u32 val;
wps = 0x34;
spr = 0x48;
sysc_write(ddata, spr, 0xaaaa);
error = readl_poll_timeout(ddata->module_va + wps, val,
!(val & 0x10), 100,
MAX_MODULE_SOFTRESET_WAIT);
if (error)
dev_warn(ddata->dev, "wdt disable step1 failed\n");
sysc_write(ddata, spr, 0x5555);
error = readl_poll_timeout(ddata->module_va + wps, val,
!(val & 0x10), 100,
MAX_MODULE_SOFTRESET_WAIT);
if (error)
dev_warn(ddata->dev, "wdt disable step2 failed\n");
}
/* PRUSS needs to set MSTANDBY_INIT inorder to idle properly */
static void sysc_module_disable_quirk_pruss(struct sysc *ddata)
{
u32 reg;
reg = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
reg |= SYSC_PRUSS_STANDBY_INIT;
sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg);
}
static void sysc_init_module_quirks(struct sysc *ddata)
{
if (ddata->legacy_mode || !ddata->name)
return;
if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_HDQ1W) {
ddata->pre_reset_quirk = sysc_pre_reset_quirk_hdq1w;
return;
}
#ifdef CONFIG_OMAP_GPMC_DEBUG
if (ddata->cfg.quirks & SYSC_QUIRK_GPMC_DEBUG) {
ddata->cfg.quirks |= SYSC_QUIRK_NO_RESET_ON_INIT;
return;
}
#endif
if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_I2C) {
ddata->pre_reset_quirk = sysc_pre_reset_quirk_i2c;
ddata->post_reset_quirk = sysc_post_reset_quirk_i2c;
return;
}
if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_AESS)
ddata->module_enable_quirk = sysc_module_enable_quirk_aess;
if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_DSS_RESET)
ddata->pre_reset_quirk = sysc_pre_reset_quirk_dss;
if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_RTC_UNLOCK) {
ddata->module_unlock_quirk = sysc_module_unlock_quirk_rtc;
ddata->module_lock_quirk = sysc_module_lock_quirk_rtc;
return;
}
if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_OTG) {
ddata->module_enable_quirk = sysc_module_enable_quirk_otg;
ddata->module_disable_quirk = sysc_module_disable_quirk_otg;
}
if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_SGX)
ddata->module_enable_quirk = sysc_module_enable_quirk_sgx;
if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_WDT) {
ddata->reset_done_quirk = sysc_reset_done_quirk_wdt;
ddata->module_disable_quirk = sysc_reset_done_quirk_wdt;
}
if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_PRUSS)
ddata->module_disable_quirk = sysc_module_disable_quirk_pruss;
}
static int sysc_clockdomain_init(struct sysc *ddata)
{
struct ti_sysc_platform_data *pdata = dev_get_platdata(ddata->dev);
struct clk *fck = NULL, *ick = NULL;
int error;
if (!pdata || !pdata->init_clockdomain)
return 0;
switch (ddata->nr_clocks) {
case 2:
ick = ddata->clocks[SYSC_ICK];
fallthrough;
case 1:
fck = ddata->clocks[SYSC_FCK];
break;
case 0:
return 0;
}
error = pdata->init_clockdomain(ddata->dev, fck, ick, &ddata->cookie);
if (!error || error == -ENODEV)
return 0;
return error;
}
/*
* Note that pdata->init_module() typically does a reset first. After
* pdata->init_module() is done, PM runtime can be used for the interconnect
* target module.
*/
static int sysc_legacy_init(struct sysc *ddata)
{
struct ti_sysc_platform_data *pdata = dev_get_platdata(ddata->dev);
int error;
if (!pdata || !pdata->init_module)
return 0;
error = pdata->init_module(ddata->dev, ddata->mdata, &ddata->cookie);
if (error == -EEXIST)
error = 0;
return error;
}
/*
* Note that the caller must ensure the interconnect target module is enabled
* before calling reset. Otherwise reset will not complete.
*/
static int sysc_reset(struct sysc *ddata)
{
int sysc_offset, sysc_val, error;
u32 sysc_mask;
sysc_offset = ddata->offsets[SYSC_SYSCONFIG];
if (ddata->legacy_mode ||
ddata->cap->regbits->srst_shift < 0 ||
ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT)
return 0;
sysc_mask = BIT(ddata->cap->regbits->srst_shift);
if (ddata->pre_reset_quirk)
ddata->pre_reset_quirk(ddata);
if (sysc_offset >= 0) {
sysc_val = sysc_read_sysconfig(ddata);
sysc_val |= sysc_mask;
sysc_write(ddata, sysc_offset, sysc_val);
/* Flush posted write */
sysc_val = sysc_read_sysconfig(ddata);
}
if (ddata->cfg.srst_udelay)
usleep_range(ddata->cfg.srst_udelay,
ddata->cfg.srst_udelay * 2);
if (ddata->post_reset_quirk)
ddata->post_reset_quirk(ddata);
error = sysc_wait_softreset(ddata);
if (error)
dev_warn(ddata->dev, "OCP softreset timed out\n");
if (ddata->reset_done_quirk)
ddata->reset_done_quirk(ddata);
return error;
}
/*
* At this point the module is configured enough to read the revision but
* module may not be completely configured yet to use PM runtime. Enable
* all clocks directly during init to configure the quirks needed for PM
* runtime based on the revision register.
*/
static int sysc_init_module(struct sysc *ddata)
{
bool rstctrl_deasserted = false;
int error = 0;
error = sysc_clockdomain_init(ddata);
if (error)
return error;
sysc_clkdm_deny_idle(ddata);
/*
* Always enable clocks. The bootloader may or may not have enabled
* the related clocks.
*/
error = sysc_enable_opt_clocks(ddata);
if (error)
return error;
error = sysc_enable_main_clocks(ddata);
if (error)
goto err_opt_clocks;
if (!(ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT)) {
error = reset_control_deassert(ddata->rsts);
if (error)
goto err_main_clocks;
rstctrl_deasserted = true;
}
ddata->revision = sysc_read_revision(ddata);
sysc_init_revision_quirks(ddata);
sysc_init_module_quirks(ddata);
if (ddata->legacy_mode) {
error = sysc_legacy_init(ddata);
if (error)
goto err_main_clocks;
}
if (!ddata->legacy_mode) {
error = sysc_enable_module(ddata->dev);
if (error)
goto err_main_clocks;
}
error = sysc_reset(ddata);
if (error)
dev_err(ddata->dev, "Reset failed with %d\n", error);
if (error && !ddata->legacy_mode)
sysc_disable_module(ddata->dev);
err_main_clocks:
if (error)
sysc_disable_main_clocks(ddata);
err_opt_clocks:
/* No re-enable of clockdomain autoidle to prevent module autoidle */
if (error) {
sysc_disable_opt_clocks(ddata);
sysc_clkdm_allow_idle(ddata);
}
if (error && rstctrl_deasserted &&
!(ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT))
reset_control_assert(ddata->rsts);
return error;
}
static int sysc_init_sysc_mask(struct sysc *ddata)
{
struct device_node *np = ddata->dev->of_node;
int error;
u32 val;
error = of_property_read_u32(np, "ti,sysc-mask", &val);
if (error)
return 0;
ddata->cfg.sysc_val = val & ddata->cap->sysc_mask;
return 0;
}
static int sysc_init_idlemode(struct sysc *ddata, u8 *idlemodes,
const char *name)
{
struct device_node *np = ddata->dev->of_node;
struct property *prop;
const __be32 *p;
u32 val;
of_property_for_each_u32(np, name, prop, p, val) {
if (val >= SYSC_NR_IDLEMODES) {
dev_err(ddata->dev, "invalid idlemode: %i\n", val);
return -EINVAL;
}
*idlemodes |= (1 << val);
}
return 0;
}
static int sysc_init_idlemodes(struct sysc *ddata)
{
int error;
error = sysc_init_idlemode(ddata, &ddata->cfg.midlemodes,
"ti,sysc-midle");
if (error)
return error;
error = sysc_init_idlemode(ddata, &ddata->cfg.sidlemodes,
"ti,sysc-sidle");
if (error)
return error;
return 0;
}
/*
* Only some devices on omap4 and later have SYSCONFIG reset done
* bit. We can detect this if there is no SYSSTATUS at all, or the
* SYSTATUS bit 0 is not used. Note that some SYSSTATUS registers
* have multiple bits for the child devices like OHCI and EHCI.
* Depends on SYSC being parsed first.
*/
static int sysc_init_syss_mask(struct sysc *ddata)
{
struct device_node *np = ddata->dev->of_node;
int error;
u32 val;
error = of_property_read_u32(np, "ti,syss-mask", &val);
if (error) {
if ((ddata->cap->type == TI_SYSC_OMAP4 ||
ddata->cap->type == TI_SYSC_OMAP4_TIMER) &&
(ddata->cfg.sysc_val & SYSC_OMAP4_SOFTRESET))
ddata->cfg.quirks |= SYSC_QUIRK_RESET_STATUS;
return 0;
}
if (!(val & 1) && (ddata->cfg.sysc_val & SYSC_OMAP4_SOFTRESET))
ddata->cfg.quirks |= SYSC_QUIRK_RESET_STATUS;
ddata->cfg.syss_mask = val;
return 0;
}
/*
* Many child device drivers need to have fck and opt clocks available
* to get the clock rate for device internal configuration etc.
*/
static int sysc_child_add_named_clock(struct sysc *ddata,
struct device *child,
const char *name)
{
struct clk *clk;
struct clk_lookup *l;
int error = 0;
if (!name)
return 0;
clk = clk_get(child, name);
if (!IS_ERR(clk)) {
error = -EEXIST;
goto put_clk;
}
clk = clk_get(ddata->dev, name);
if (IS_ERR(clk))
return -ENODEV;
l = clkdev_create(clk, name, dev_name(child));
if (!l)
error = -ENOMEM;
put_clk:
clk_put(clk);
return error;
}
static int sysc_child_add_clocks(struct sysc *ddata,
struct device *child)
{
int i, error;
for (i = 0; i < ddata->nr_clocks; i++) {
error = sysc_child_add_named_clock(ddata,
child,
ddata->clock_roles[i]);
if (error && error != -EEXIST) {
dev_err(ddata->dev, "could not add child clock %s: %i\n",
ddata->clock_roles[i], error);
return error;
}
}
return 0;
}
static struct device_type sysc_device_type = {
};
static struct sysc *sysc_child_to_parent(struct device *dev)
{
struct device *parent = dev->parent;
if (!parent || parent->type != &sysc_device_type)
return NULL;
return dev_get_drvdata(parent);
}
static int __maybe_unused sysc_child_runtime_suspend(struct device *dev)
{
struct sysc *ddata;
int error;
ddata = sysc_child_to_parent(dev);
error = pm_generic_runtime_suspend(dev);
if (error)
return error;
if (!ddata->enabled)
return 0;
return sysc_runtime_suspend(ddata->dev);
}
static int __maybe_unused sysc_child_runtime_resume(struct device *dev)
{
struct sysc *ddata;
int error;
ddata = sysc_child_to_parent(dev);
if (!ddata->enabled) {
error = sysc_runtime_resume(ddata->dev);
if (error < 0)
dev_err(ddata->dev,
"%s error: %i\n", __func__, error);
}
return pm_generic_runtime_resume(dev);
}
#ifdef CONFIG_PM_SLEEP
static int sysc_child_suspend_noirq(struct device *dev)
{
struct sysc *ddata;
int error;
ddata = sysc_child_to_parent(dev);
dev_dbg(ddata->dev, "%s %s\n", __func__,
ddata->name ? ddata->name : "");
error = pm_generic_suspend_noirq(dev);
if (error) {
dev_err(dev, "%s error at %i: %i\n",
__func__, __LINE__, error);
return error;
}
if (!pm_runtime_status_suspended(dev)) {
error = pm_generic_runtime_suspend(dev);
if (error) {
dev_dbg(dev, "%s busy at %i: %i\n",
__func__, __LINE__, error);
return 0;
}
error = sysc_runtime_suspend(ddata->dev);
if (error) {
dev_err(dev, "%s error at %i: %i\n",
__func__, __LINE__, error);
return error;
}
ddata->child_needs_resume = true;
}
return 0;
}
static int sysc_child_resume_noirq(struct device *dev)
{
struct sysc *ddata;
int error;
ddata = sysc_child_to_parent(dev);
dev_dbg(ddata->dev, "%s %s\n", __func__,
ddata->name ? ddata->name : "");
if (ddata->child_needs_resume) {
ddata->child_needs_resume = false;
error = sysc_runtime_resume(ddata->dev);
if (error)
dev_err(ddata->dev,
"%s runtime resume error: %i\n",
__func__, error);
error = pm_generic_runtime_resume(dev);
if (error)
dev_err(ddata->dev,
"%s generic runtime resume: %i\n",
__func__, error);
}
return pm_generic_resume_noirq(dev);
}
#endif
static struct dev_pm_domain sysc_child_pm_domain = {
.ops = {
SET_RUNTIME_PM_OPS(sysc_child_runtime_suspend,
sysc_child_runtime_resume,
NULL)
USE_PLATFORM_PM_SLEEP_OPS
SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(sysc_child_suspend_noirq,
sysc_child_resume_noirq)
}
};
/* Caller needs to take list_lock if ever used outside of cpu_pm */
static void sysc_reinit_modules(struct sysc_soc_info *soc)
{
struct sysc_module *module;
struct sysc *ddata;
list_for_each_entry(module, &sysc_soc->restored_modules, node) {
ddata = module->ddata;
sysc_reinit_module(ddata, ddata->enabled);
}
}
/**
* sysc_context_notifier - optionally reset and restore module after idle
* @nb: notifier block
* @cmd: unused
* @v: unused
*
* Some interconnect target modules need to be restored, or reset and restored
* on CPU_PM CPU_PM_CLUSTER_EXIT notifier. This is needed at least for am335x
* OTG and GPMC target modules even if the modules are unused.
*/
static int sysc_context_notifier(struct notifier_block *nb, unsigned long cmd,
void *v)
{
struct sysc_soc_info *soc;
soc = container_of(nb, struct sysc_soc_info, nb);
switch (cmd) {
case CPU_CLUSTER_PM_ENTER:
break;
case CPU_CLUSTER_PM_ENTER_FAILED: /* No need to restore context */
break;
case CPU_CLUSTER_PM_EXIT:
sysc_reinit_modules(soc);
break;
}
return NOTIFY_OK;
}
/**
* sysc_add_restored - optionally add reset and restore quirk hanlling
* @ddata: device data
*/
static void sysc_add_restored(struct sysc *ddata)
{
struct sysc_module *restored_module;
restored_module = kzalloc(sizeof(*restored_module), GFP_KERNEL);
if (!restored_module)
return;
restored_module->ddata = ddata;
mutex_lock(&sysc_soc->list_lock);
list_add(&restored_module->node, &sysc_soc->restored_modules);
if (sysc_soc->nb.notifier_call)
goto out_unlock;
sysc_soc->nb.notifier_call = sysc_context_notifier;
cpu_pm_register_notifier(&sysc_soc->nb);
out_unlock:
mutex_unlock(&sysc_soc->list_lock);
}
/**
* sysc_legacy_idle_quirk - handle children in omap_device compatible way
* @ddata: device driver data
* @child: child device driver
*
* Allow idle for child devices as done with _od_runtime_suspend().
* Otherwise many child devices will not idle because of the permanent
* parent usecount set in pm_runtime_irq_safe().
*
* Note that the long term solution is to just modify the child device
* drivers to not set pm_runtime_irq_safe() and then this can be just
* dropped.
*/
static void sysc_legacy_idle_quirk(struct sysc *ddata, struct device *child)
{
if (ddata->cfg.quirks & SYSC_QUIRK_LEGACY_IDLE)
dev_pm_domain_set(child, &sysc_child_pm_domain);
}
static int sysc_notifier_call(struct notifier_block *nb,
unsigned long event, void *device)
{
struct device *dev = device;
struct sysc *ddata;
int error;
ddata = sysc_child_to_parent(dev);
if (!ddata)
return NOTIFY_DONE;
switch (event) {
case BUS_NOTIFY_ADD_DEVICE:
error = sysc_child_add_clocks(ddata, dev);
if (error)
return error;
sysc_legacy_idle_quirk(ddata, dev);
break;
default:
break;
}
return NOTIFY_DONE;
}
static struct notifier_block sysc_nb = {
.notifier_call = sysc_notifier_call,
};
/* Device tree configured quirks */
struct sysc_dts_quirk {
const char *name;
u32 mask;
};
static const struct sysc_dts_quirk sysc_dts_quirks[] = {
{ .name = "ti,no-idle-on-init",
.mask = SYSC_QUIRK_NO_IDLE_ON_INIT, },
{ .name = "ti,no-reset-on-init",
.mask = SYSC_QUIRK_NO_RESET_ON_INIT, },
{ .name = "ti,no-idle",
.mask = SYSC_QUIRK_NO_IDLE, },
};
static void sysc_parse_dts_quirks(struct sysc *ddata, struct device_node *np,
bool is_child)
{
const struct property *prop;
int i, len;
for (i = 0; i < ARRAY_SIZE(sysc_dts_quirks); i++) {
const char *name = sysc_dts_quirks[i].name;
prop = of_get_property(np, name, &len);
if (!prop)
continue;
ddata->cfg.quirks |= sysc_dts_quirks[i].mask;
if (is_child) {
dev_warn(ddata->dev,
"dts flag should be at module level for %s\n",
name);
}
}
}
static int sysc_init_dts_quirks(struct sysc *ddata)
{
struct device_node *np = ddata->dev->of_node;
int error;
u32 val;
ddata->legacy_mode = of_get_property(np, "ti,hwmods", NULL);
sysc_parse_dts_quirks(ddata, np, false);
error = of_property_read_u32(np, "ti,sysc-delay-us", &val);
if (!error) {
if (val > 255) {
dev_warn(ddata->dev, "bad ti,sysc-delay-us: %i\n",
val);
}
ddata->cfg.srst_udelay = (u8)val;
}
return 0;
}
static void sysc_unprepare(struct sysc *ddata)
{
int i;
if (!ddata->clocks)
return;
for (i = 0; i < SYSC_MAX_CLOCKS; i++) {
if (!IS_ERR_OR_NULL(ddata->clocks[i]))
clk_unprepare(ddata->clocks[i]);
}
}
/*
* Common sysc register bits found on omap2, also known as type1
*/
static const struct sysc_regbits sysc_regbits_omap2 = {
.dmadisable_shift = -ENODEV,
.midle_shift = 12,
.sidle_shift = 3,
.clkact_shift = 8,
.emufree_shift = 5,
.enwkup_shift = 2,
.srst_shift = 1,
.autoidle_shift = 0,
};
static const struct sysc_capabilities sysc_omap2 = {
.type = TI_SYSC_OMAP2,
.sysc_mask = SYSC_OMAP2_CLOCKACTIVITY | SYSC_OMAP2_EMUFREE |
SYSC_OMAP2_ENAWAKEUP | SYSC_OMAP2_SOFTRESET |
SYSC_OMAP2_AUTOIDLE,
.regbits = &sysc_regbits_omap2,
};
/* All omap2 and 3 timers, and timers 1, 2 & 10 on omap 4 and 5 */
static const struct sysc_capabilities sysc_omap2_timer = {
.type = TI_SYSC_OMAP2_TIMER,
.sysc_mask = SYSC_OMAP2_CLOCKACTIVITY | SYSC_OMAP2_EMUFREE |
SYSC_OMAP2_ENAWAKEUP | SYSC_OMAP2_SOFTRESET |
SYSC_OMAP2_AUTOIDLE,
.regbits = &sysc_regbits_omap2,
.mod_quirks = SYSC_QUIRK_USE_CLOCKACT,
};
/*
* SHAM2 (SHA1/MD5) sysc found on omap3, a variant of sysc_regbits_omap2
* with different sidle position
*/
static const struct sysc_regbits sysc_regbits_omap3_sham = {
.dmadisable_shift = -ENODEV,
.midle_shift = -ENODEV,
.sidle_shift = 4,
.clkact_shift = -ENODEV,
.enwkup_shift = -ENODEV,
.srst_shift = 1,
.autoidle_shift = 0,
.emufree_shift = -ENODEV,
};
static const struct sysc_capabilities sysc_omap3_sham = {
.type = TI_SYSC_OMAP3_SHAM,
.sysc_mask = SYSC_OMAP2_SOFTRESET | SYSC_OMAP2_AUTOIDLE,
.regbits = &sysc_regbits_omap3_sham,
};
/*
* AES register bits found on omap3 and later, a variant of
* sysc_regbits_omap2 with different sidle position
*/
static const struct sysc_regbits sysc_regbits_omap3_aes = {
.dmadisable_shift = -ENODEV,
.midle_shift = -ENODEV,
.sidle_shift = 6,
.clkact_shift = -ENODEV,
.enwkup_shift = -ENODEV,
.srst_shift = 1,
.autoidle_shift = 0,
.emufree_shift = -ENODEV,
};
static const struct sysc_capabilities sysc_omap3_aes = {
.type = TI_SYSC_OMAP3_AES,
.sysc_mask = SYSC_OMAP2_SOFTRESET | SYSC_OMAP2_AUTOIDLE,
.regbits = &sysc_regbits_omap3_aes,
};
/*
* Common sysc register bits found on omap4, also known as type2
*/
static const struct sysc_regbits sysc_regbits_omap4 = {
.dmadisable_shift = 16,
.midle_shift = 4,
.sidle_shift = 2,
.clkact_shift = -ENODEV,
.enwkup_shift = -ENODEV,
.emufree_shift = 1,
.srst_shift = 0,
.autoidle_shift = -ENODEV,
};
static const struct sysc_capabilities sysc_omap4 = {
.type = TI_SYSC_OMAP4,
.sysc_mask = SYSC_OMAP4_DMADISABLE | SYSC_OMAP4_FREEEMU |
SYSC_OMAP4_SOFTRESET,
.regbits = &sysc_regbits_omap4,
};
static const struct sysc_capabilities sysc_omap4_timer = {
.type = TI_SYSC_OMAP4_TIMER,
.sysc_mask = SYSC_OMAP4_DMADISABLE | SYSC_OMAP4_FREEEMU |
SYSC_OMAP4_SOFTRESET,
.regbits = &sysc_regbits_omap4,
};
/*
* Common sysc register bits found on omap4, also known as type3
*/
static const struct sysc_regbits sysc_regbits_omap4_simple = {
.dmadisable_shift = -ENODEV,
.midle_shift = 2,
.sidle_shift = 0,
.clkact_shift = -ENODEV,
.enwkup_shift = -ENODEV,
.srst_shift = -ENODEV,
.emufree_shift = -ENODEV,
.autoidle_shift = -ENODEV,
};
static const struct sysc_capabilities sysc_omap4_simple = {
.type = TI_SYSC_OMAP4_SIMPLE,
.regbits = &sysc_regbits_omap4_simple,
};
/*
* SmartReflex sysc found on omap34xx
*/
static const struct sysc_regbits sysc_regbits_omap34xx_sr = {
.dmadisable_shift = -ENODEV,
.midle_shift = -ENODEV,
.sidle_shift = -ENODEV,
.clkact_shift = 20,
.enwkup_shift = -ENODEV,
.srst_shift = -ENODEV,
.emufree_shift = -ENODEV,
.autoidle_shift = -ENODEV,
};
static const struct sysc_capabilities sysc_34xx_sr = {
.type = TI_SYSC_OMAP34XX_SR,
.sysc_mask = SYSC_OMAP2_CLOCKACTIVITY,
.regbits = &sysc_regbits_omap34xx_sr,
.mod_quirks = SYSC_QUIRK_USE_CLOCKACT | SYSC_QUIRK_UNCACHED |
SYSC_QUIRK_LEGACY_IDLE,
};
/*
* SmartReflex sysc found on omap36xx and later
*/
static const struct sysc_regbits sysc_regbits_omap36xx_sr = {
.dmadisable_shift = -ENODEV,
.midle_shift = -ENODEV,
.sidle_shift = 24,
.clkact_shift = -ENODEV,
.enwkup_shift = 26,
.srst_shift = -ENODEV,
.emufree_shift = -ENODEV,
.autoidle_shift = -ENODEV,
};
static const struct sysc_capabilities sysc_36xx_sr = {
.type = TI_SYSC_OMAP36XX_SR,
.sysc_mask = SYSC_OMAP3_SR_ENAWAKEUP,
.regbits = &sysc_regbits_omap36xx_sr,
.mod_quirks = SYSC_QUIRK_UNCACHED | SYSC_QUIRK_LEGACY_IDLE,
};
static const struct sysc_capabilities sysc_omap4_sr = {
.type = TI_SYSC_OMAP4_SR,
.regbits = &sysc_regbits_omap36xx_sr,
.mod_quirks = SYSC_QUIRK_LEGACY_IDLE,
};
/*
* McASP register bits found on omap4 and later
*/
static const struct sysc_regbits sysc_regbits_omap4_mcasp = {
.dmadisable_shift = -ENODEV,
.midle_shift = -ENODEV,
.sidle_shift = 0,
.clkact_shift = -ENODEV,
.enwkup_shift = -ENODEV,
.srst_shift = -ENODEV,
.emufree_shift = -ENODEV,
.autoidle_shift = -ENODEV,
};
static const struct sysc_capabilities sysc_omap4_mcasp = {
.type = TI_SYSC_OMAP4_MCASP,
.regbits = &sysc_regbits_omap4_mcasp,
.mod_quirks = SYSC_QUIRK_OPT_CLKS_NEEDED,
};
/*
* McASP found on dra7 and later
*/
static const struct sysc_capabilities sysc_dra7_mcasp = {
.type = TI_SYSC_OMAP4_SIMPLE,
.regbits = &sysc_regbits_omap4_simple,
.mod_quirks = SYSC_QUIRK_OPT_CLKS_NEEDED,
};
/*
* FS USB host found on omap4 and later
*/
static const struct sysc_regbits sysc_regbits_omap4_usb_host_fs = {
.dmadisable_shift = -ENODEV,
.midle_shift = -ENODEV,
.sidle_shift = 24,
.clkact_shift = -ENODEV,
.enwkup_shift = 26,
.srst_shift = -ENODEV,
.emufree_shift = -ENODEV,
.autoidle_shift = -ENODEV,
};
static const struct sysc_capabilities sysc_omap4_usb_host_fs = {
.type = TI_SYSC_OMAP4_USB_HOST_FS,
.sysc_mask = SYSC_OMAP2_ENAWAKEUP,
.regbits = &sysc_regbits_omap4_usb_host_fs,
};
static const struct sysc_regbits sysc_regbits_dra7_mcan = {
.dmadisable_shift = -ENODEV,
.midle_shift = -ENODEV,
.sidle_shift = -ENODEV,
.clkact_shift = -ENODEV,
.enwkup_shift = 4,
.srst_shift = 0,
.emufree_shift = -ENODEV,
.autoidle_shift = -ENODEV,
};
static const struct sysc_capabilities sysc_dra7_mcan = {
.type = TI_SYSC_DRA7_MCAN,
.sysc_mask = SYSC_DRA7_MCAN_ENAWAKEUP | SYSC_OMAP4_SOFTRESET,
.regbits = &sysc_regbits_dra7_mcan,
.mod_quirks = SYSS_QUIRK_RESETDONE_INVERTED,
};
/*
* PRUSS found on some AM33xx, AM437x and AM57xx SoCs
*/
static const struct sysc_capabilities sysc_pruss = {
.type = TI_SYSC_PRUSS,
.sysc_mask = SYSC_PRUSS_STANDBY_INIT | SYSC_PRUSS_SUB_MWAIT,
.regbits = &sysc_regbits_omap4_simple,
.mod_quirks = SYSC_MODULE_QUIRK_PRUSS,
};
static int sysc_init_pdata(struct sysc *ddata)
{
struct ti_sysc_platform_data *pdata = dev_get_platdata(ddata->dev);
struct ti_sysc_module_data *mdata;
if (!pdata)
return 0;
mdata = devm_kzalloc(ddata->dev, sizeof(*mdata), GFP_KERNEL);
if (!mdata)
return -ENOMEM;
if (ddata->legacy_mode) {
mdata->name = ddata->legacy_mode;
mdata->module_pa = ddata->module_pa;
mdata->module_size = ddata->module_size;
mdata->offsets = ddata->offsets;
mdata->nr_offsets = SYSC_MAX_REGS;
mdata->cap = ddata->cap;
mdata->cfg = &ddata->cfg;
}
ddata->mdata = mdata;
return 0;
}
static int sysc_init_match(struct sysc *ddata)
{
const struct sysc_capabilities *cap;
cap = of_device_get_match_data(ddata->dev);
if (!cap)
return -EINVAL;
ddata->cap = cap;
if (ddata->cap)
ddata->cfg.quirks |= ddata->cap->mod_quirks;
return 0;
}
static void ti_sysc_idle(struct work_struct *work)
{
struct sysc *ddata;
ddata = container_of(work, struct sysc, idle_work.work);
/*
* One time decrement of clock usage counts if left on from init.
* Note that we disable opt clocks unconditionally in this case
* as they are enabled unconditionally during init without
* considering sysc_opt_clks_needed() at that point.
*/
if (ddata->cfg.quirks & (SYSC_QUIRK_NO_IDLE |
SYSC_QUIRK_NO_IDLE_ON_INIT)) {
sysc_disable_main_clocks(ddata);
sysc_disable_opt_clocks(ddata);
sysc_clkdm_allow_idle(ddata);
}
/* Keep permanent PM runtime usage count for SYSC_QUIRK_NO_IDLE */
if (ddata->cfg.quirks & SYSC_QUIRK_NO_IDLE)
return;
/*
* Decrement PM runtime usage count for SYSC_QUIRK_NO_IDLE_ON_INIT
* and SYSC_QUIRK_NO_RESET_ON_INIT
*/
if (pm_runtime_active(ddata->dev))
pm_runtime_put_sync(ddata->dev);
}
/*
* SoC model and features detection. Only needed for SoCs that need
* special handling for quirks, no need to list others.
*/
static const struct soc_device_attribute sysc_soc_match[] = {
SOC_FLAG("OMAP242*", SOC_2420),
SOC_FLAG("OMAP243*", SOC_2430),
SOC_FLAG("OMAP3[45]*", SOC_3430),
SOC_FLAG("OMAP3[67]*", SOC_3630),
SOC_FLAG("OMAP443*", SOC_4430),
SOC_FLAG("OMAP446*", SOC_4460),
SOC_FLAG("OMAP447*", SOC_4470),
SOC_FLAG("OMAP54*", SOC_5430),
SOC_FLAG("AM433", SOC_AM3),
SOC_FLAG("AM43*", SOC_AM4),
SOC_FLAG("DRA7*", SOC_DRA7),
{ /* sentinel */ }
};
/*
* List of SoCs variants with disabled features. By default we assume all
* devices in the device tree are available so no need to list those SoCs.
*/
static const struct soc_device_attribute sysc_soc_feat_match[] = {
/* OMAP3430/3530 and AM3517 variants with some accelerators disabled */
SOC_FLAG("AM3505", DIS_SGX),
SOC_FLAG("OMAP3525", DIS_SGX),
SOC_FLAG("OMAP3515", DIS_IVA | DIS_SGX),
SOC_FLAG("OMAP3503", DIS_ISP | DIS_IVA | DIS_SGX),
/* OMAP3630/DM3730 variants with some accelerators disabled */
SOC_FLAG("AM3703", DIS_IVA | DIS_SGX),
SOC_FLAG("DM3725", DIS_SGX),
SOC_FLAG("OMAP3611", DIS_ISP | DIS_IVA | DIS_SGX),
SOC_FLAG("OMAP3615/AM3715", DIS_IVA),
SOC_FLAG("OMAP3621", DIS_ISP),
{ /* sentinel */ }
};
static int sysc_add_disabled(unsigned long base)
{
struct sysc_address *disabled_module;
disabled_module = kzalloc(sizeof(*disabled_module), GFP_KERNEL);
if (!disabled_module)
return -ENOMEM;
disabled_module->base = base;
mutex_lock(&sysc_soc->list_lock);
list_add(&disabled_module->node, &sysc_soc->disabled_modules);
mutex_unlock(&sysc_soc->list_lock);
return 0;
}
/*
* One time init to detect the booted SoC, disable unavailable features
* and initialize list for optional cpu_pm notifier.
*
* Note that we initialize static data shared across all ti-sysc instances
* so ddata is only used for SoC type. This can be called from module_init
* once we no longer need to rely on platform data.
*/
static int sysc_init_static_data(struct sysc *ddata)
{
const struct soc_device_attribute *match;
struct ti_sysc_platform_data *pdata;
unsigned long features = 0;
struct device_node *np;
if (sysc_soc)
return 0;
sysc_soc = kzalloc(sizeof(*sysc_soc), GFP_KERNEL);
if (!sysc_soc)
return -ENOMEM;
mutex_init(&sysc_soc->list_lock);
INIT_LIST_HEAD(&sysc_soc->disabled_modules);
INIT_LIST_HEAD(&sysc_soc->restored_modules);
sysc_soc->general_purpose = true;
pdata = dev_get_platdata(ddata->dev);
if (pdata && pdata->soc_type_gp)
sysc_soc->general_purpose = pdata->soc_type_gp();
match = soc_device_match(sysc_soc_match);
if (match && match->data)
sysc_soc->soc = (enum sysc_soc)(uintptr_t)match->data;
/*
* Check and warn about possible old incomplete dtb. We now want to see
* simple-pm-bus instead of simple-bus in the dtb for genpd using SoCs.
*/
switch (sysc_soc->soc) {
case SOC_AM3:
case SOC_AM4:
case SOC_4430 ... SOC_4470:
case SOC_5430:
case SOC_DRA7:
np = of_find_node_by_path("/ocp");
WARN_ONCE(np && of_device_is_compatible(np, "simple-bus"),
"ti-sysc: Incomplete old dtb, please update\n");
break;
default:
break;
}
/* Ignore devices that are not available on HS and EMU SoCs */
if (!sysc_soc->general_purpose) {
switch (sysc_soc->soc) {
case SOC_3430 ... SOC_3630:
sysc_add_disabled(0x48304000); /* timer12 */
break;
case SOC_AM3:
sysc_add_disabled(0x48310000); /* rng */
break;
default:
break;
}
}
match = soc_device_match(sysc_soc_feat_match);
if (!match)
return 0;
if (match->data)
features = (unsigned long)match->data;
/*
* Add disabled devices to the list based on the module base.
* Note that this must be done before we attempt to access the
* device and have module revision checks working.
*/
if (features & DIS_ISP)
sysc_add_disabled(0x480bd400);
if (features & DIS_IVA)
sysc_add_disabled(0x5d000000);
if (features & DIS_SGX)
sysc_add_disabled(0x50000000);
return 0;
}
static void sysc_cleanup_static_data(void)
{
struct sysc_module *restored_module;
struct sysc_address *disabled_module;
struct list_head *pos, *tmp;
if (!sysc_soc)
return;
if (sysc_soc->nb.notifier_call)
cpu_pm_unregister_notifier(&sysc_soc->nb);
mutex_lock(&sysc_soc->list_lock);
list_for_each_safe(pos, tmp, &sysc_soc->restored_modules) {
restored_module = list_entry(pos, struct sysc_module, node);
list_del(pos);
kfree(restored_module);
}
list_for_each_safe(pos, tmp, &sysc_soc->disabled_modules) {
disabled_module = list_entry(pos, struct sysc_address, node);
list_del(pos);
kfree(disabled_module);
}
mutex_unlock(&sysc_soc->list_lock);
}
static int sysc_check_disabled_devices(struct sysc *ddata)
{
struct sysc_address *disabled_module;
int error = 0;
mutex_lock(&sysc_soc->list_lock);
list_for_each_entry(disabled_module, &sysc_soc->disabled_modules, node) {
if (ddata->module_pa == disabled_module->base) {
dev_dbg(ddata->dev, "module disabled for this SoC\n");
error = -ENODEV;
break;
}
}
mutex_unlock(&sysc_soc->list_lock);
return error;
}
/*
* Ignore timers tagged with no-reset and no-idle. These are likely in use,
* for example by drivers/clocksource/timer-ti-dm-systimer.c. If more checks
* are needed, we could also look at the timer register configuration.
*/
static int sysc_check_active_timer(struct sysc *ddata)
{
int error;
if (ddata->cap->type != TI_SYSC_OMAP2_TIMER &&
ddata->cap->type != TI_SYSC_OMAP4_TIMER)
return 0;
/*
* Quirk for omap3 beagleboard revision A to B4 to use gpt12.
* Revision C and later are fixed with commit 23885389dbbb ("ARM:
* dts: Fix timer regression for beagleboard revision c"). This all
* can be dropped if we stop supporting old beagleboard revisions
* A to B4 at some point.
*/
if (sysc_soc->soc == SOC_3430)
error = -ENXIO;
else
error = -EBUSY;
if ((ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT) &&
(ddata->cfg.quirks & SYSC_QUIRK_NO_IDLE))
return error;
return 0;
}
static const struct of_device_id sysc_match_table[] = {
{ .compatible = "simple-bus", },
{ /* sentinel */ },
};
static int sysc_probe(struct platform_device *pdev)
{
struct ti_sysc_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct sysc *ddata;
int error;
ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
if (!ddata)
return -ENOMEM;
ddata->offsets[SYSC_REVISION] = -ENODEV;
ddata->offsets[SYSC_SYSCONFIG] = -ENODEV;
ddata->offsets[SYSC_SYSSTATUS] = -ENODEV;
ddata->dev = &pdev->dev;
platform_set_drvdata(pdev, ddata);
error = sysc_init_static_data(ddata);
if (error)
return error;
error = sysc_init_match(ddata);
if (error)
return error;
error = sysc_init_dts_quirks(ddata);
if (error)
return error;
error = sysc_map_and_check_registers(ddata);
if (error)
return error;
error = sysc_init_sysc_mask(ddata);
if (error)
return error;
error = sysc_init_idlemodes(ddata);
if (error)
return error;
error = sysc_init_syss_mask(ddata);
if (error)
return error;
error = sysc_init_pdata(ddata);
if (error)
return error;
sysc_init_early_quirks(ddata);
error = sysc_check_disabled_devices(ddata);
if (error)
return error;
error = sysc_check_active_timer(ddata);
if (error == -ENXIO)
ddata->reserved = true;
else if (error)
return error;
error = sysc_get_clocks(ddata);
if (error)
return error;
error = sysc_init_resets(ddata);
if (error)
goto unprepare;
error = sysc_init_module(ddata);
if (error)
goto unprepare;
pm_runtime_enable(ddata->dev);
error = pm_runtime_resume_and_get(ddata->dev);
if (error < 0) {
pm_runtime_disable(ddata->dev);
goto unprepare;
}
/* Balance use counts as PM runtime should have enabled these all */
if (!(ddata->cfg.quirks &
(SYSC_QUIRK_NO_IDLE | SYSC_QUIRK_NO_IDLE_ON_INIT))) {
sysc_disable_main_clocks(ddata);
sysc_disable_opt_clocks(ddata);
sysc_clkdm_allow_idle(ddata);
}
if (!(ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT))
reset_control_assert(ddata->rsts);
sysc_show_registers(ddata);
ddata->dev->type = &sysc_device_type;
if (!ddata->reserved) {
error = of_platform_populate(ddata->dev->of_node,
sysc_match_table,
pdata ? pdata->auxdata : NULL,
ddata->dev);
if (error)
goto err;
}
INIT_DELAYED_WORK(&ddata->idle_work, ti_sysc_idle);
/* At least earlycon won't survive without deferred idle */
if (ddata->cfg.quirks & (SYSC_QUIRK_NO_IDLE |
SYSC_QUIRK_NO_IDLE_ON_INIT |
SYSC_QUIRK_NO_RESET_ON_INIT)) {
schedule_delayed_work(&ddata->idle_work, 3000);
} else {
pm_runtime_put(&pdev->dev);
}
if (ddata->cfg.quirks & SYSC_QUIRK_REINIT_ON_CTX_LOST)
sysc_add_restored(ddata);
return 0;
err:
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
unprepare:
sysc_unprepare(ddata);
return error;
}
static int sysc_remove(struct platform_device *pdev)
{
struct sysc *ddata = platform_get_drvdata(pdev);
int error;
/* Device can still be enabled, see deferred idle quirk in probe */
if (cancel_delayed_work_sync(&ddata->idle_work))
ti_sysc_idle(&ddata->idle_work.work);
error = pm_runtime_resume_and_get(ddata->dev);
if (error < 0) {
pm_runtime_disable(ddata->dev);
goto unprepare;
}
of_platform_depopulate(&pdev->dev);
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
if (!reset_control_status(ddata->rsts))
reset_control_assert(ddata->rsts);
unprepare:
sysc_unprepare(ddata);
return 0;
}
static const struct of_device_id sysc_match[] = {
{ .compatible = "ti,sysc-omap2", .data = &sysc_omap2, },
{ .compatible = "ti,sysc-omap2-timer", .data = &sysc_omap2_timer, },
{ .compatible = "ti,sysc-omap4", .data = &sysc_omap4, },
{ .compatible = "ti,sysc-omap4-timer", .data = &sysc_omap4_timer, },
{ .compatible = "ti,sysc-omap4-simple", .data = &sysc_omap4_simple, },
{ .compatible = "ti,sysc-omap3430-sr", .data = &sysc_34xx_sr, },
{ .compatible = "ti,sysc-omap3630-sr", .data = &sysc_36xx_sr, },
{ .compatible = "ti,sysc-omap4-sr", .data = &sysc_omap4_sr, },
{ .compatible = "ti,sysc-omap3-sham", .data = &sysc_omap3_sham, },
{ .compatible = "ti,sysc-omap-aes", .data = &sysc_omap3_aes, },
{ .compatible = "ti,sysc-mcasp", .data = &sysc_omap4_mcasp, },
{ .compatible = "ti,sysc-dra7-mcasp", .data = &sysc_dra7_mcasp, },
{ .compatible = "ti,sysc-usb-host-fs",
.data = &sysc_omap4_usb_host_fs, },
{ .compatible = "ti,sysc-dra7-mcan", .data = &sysc_dra7_mcan, },
{ .compatible = "ti,sysc-pruss", .data = &sysc_pruss, },
{ },
};
MODULE_DEVICE_TABLE(of, sysc_match);
static struct platform_driver sysc_driver = {
.probe = sysc_probe,
.remove = sysc_remove,
.driver = {
.name = "ti-sysc",
.of_match_table = sysc_match,
.pm = &sysc_pm_ops,
},
};
static int __init sysc_init(void)
{
bus_register_notifier(&platform_bus_type, &sysc_nb);
return platform_driver_register(&sysc_driver);
}
module_init(sysc_init);
static void __exit sysc_exit(void)
{
bus_unregister_notifier(&platform_bus_type, &sysc_nb);
platform_driver_unregister(&sysc_driver);
sysc_cleanup_static_data();
}
module_exit(sysc_exit);
MODULE_DESCRIPTION("TI sysc interconnect target driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/bus/ti-sysc.c |
/*
* Bus driver for MIPS Common Device Memory Map (CDMM).
*
* Copyright (C) 2014-2015 Imagination Technologies Ltd.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/atomic.h>
#include <linux/err.h>
#include <linux/cpu.h>
#include <linux/cpumask.h>
#include <linux/io.h>
#include <linux/of_address.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/smp.h>
#include <asm/cdmm.h>
#include <asm/hazards.h>
#include <asm/mipsregs.h>
/* Access control and status register fields */
#define CDMM_ACSR_DEVTYPE_SHIFT 24
#define CDMM_ACSR_DEVTYPE (255ul << CDMM_ACSR_DEVTYPE_SHIFT)
#define CDMM_ACSR_DEVSIZE_SHIFT 16
#define CDMM_ACSR_DEVSIZE (31ul << CDMM_ACSR_DEVSIZE_SHIFT)
#define CDMM_ACSR_DEVREV_SHIFT 12
#define CDMM_ACSR_DEVREV (15ul << CDMM_ACSR_DEVREV_SHIFT)
#define CDMM_ACSR_UW (1ul << 3)
#define CDMM_ACSR_UR (1ul << 2)
#define CDMM_ACSR_SW (1ul << 1)
#define CDMM_ACSR_SR (1ul << 0)
/* Each block of device registers is 64 bytes */
#define CDMM_DRB_SIZE 64
#define to_mips_cdmm_driver(d) container_of(d, struct mips_cdmm_driver, drv)
/* Default physical base address */
static phys_addr_t mips_cdmm_default_base;
/* Bus operations */
static const struct mips_cdmm_device_id *
mips_cdmm_lookup(const struct mips_cdmm_device_id *table,
struct mips_cdmm_device *dev)
{
int ret = 0;
for (; table->type; ++table) {
ret = (dev->type == table->type);
if (ret)
break;
}
return ret ? table : NULL;
}
static int mips_cdmm_match(struct device *dev, struct device_driver *drv)
{
struct mips_cdmm_device *cdev = to_mips_cdmm_device(dev);
struct mips_cdmm_driver *cdrv = to_mips_cdmm_driver(drv);
return mips_cdmm_lookup(cdrv->id_table, cdev) != NULL;
}
static int mips_cdmm_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
const struct mips_cdmm_device *cdev = to_mips_cdmm_device(dev);
int retval = 0;
retval = add_uevent_var(env, "CDMM_CPU=%u", cdev->cpu);
if (retval)
return retval;
retval = add_uevent_var(env, "CDMM_TYPE=0x%02x", cdev->type);
if (retval)
return retval;
retval = add_uevent_var(env, "CDMM_REV=%u", cdev->rev);
if (retval)
return retval;
retval = add_uevent_var(env, "MODALIAS=mipscdmm:t%02X", cdev->type);
return retval;
}
/* Device attributes */
#define CDMM_ATTR(name, fmt, arg...) \
static ssize_t name##_show(struct device *_dev, \
struct device_attribute *attr, char *buf) \
{ \
struct mips_cdmm_device *dev = to_mips_cdmm_device(_dev); \
return sprintf(buf, fmt, arg); \
} \
static DEVICE_ATTR_RO(name);
CDMM_ATTR(cpu, "%u\n", dev->cpu);
CDMM_ATTR(type, "0x%02x\n", dev->type);
CDMM_ATTR(revision, "%u\n", dev->rev);
CDMM_ATTR(modalias, "mipscdmm:t%02X\n", dev->type);
CDMM_ATTR(resource, "\t%016llx\t%016llx\t%016lx\n",
(unsigned long long)dev->res.start,
(unsigned long long)dev->res.end,
dev->res.flags);
static struct attribute *mips_cdmm_dev_attrs[] = {
&dev_attr_cpu.attr,
&dev_attr_type.attr,
&dev_attr_revision.attr,
&dev_attr_modalias.attr,
&dev_attr_resource.attr,
NULL,
};
ATTRIBUTE_GROUPS(mips_cdmm_dev);
struct bus_type mips_cdmm_bustype = {
.name = "cdmm",
.dev_groups = mips_cdmm_dev_groups,
.match = mips_cdmm_match,
.uevent = mips_cdmm_uevent,
};
EXPORT_SYMBOL_GPL(mips_cdmm_bustype);
/*
* Standard driver callback helpers.
*
* All the CDMM driver callbacks need to be executed on the appropriate CPU from
* workqueues. For the standard driver callbacks we need a work function
* (mips_cdmm_{void,int}_work()) to do the actual call from the right CPU, and a
* wrapper function (generated with BUILD_PERCPU_HELPER) to arrange for the work
* function to be called on that CPU.
*/
/**
* struct mips_cdmm_work_dev - Data for per-device call work.
* @fn: CDMM driver callback function to call for the device.
* @dev: CDMM device to pass to @fn.
*/
struct mips_cdmm_work_dev {
void *fn;
struct mips_cdmm_device *dev;
};
/**
* mips_cdmm_void_work() - Call a void returning CDMM driver callback.
* @data: struct mips_cdmm_work_dev pointer.
*
* A work_on_cpu() callback function to call an arbitrary CDMM driver callback
* function which doesn't return a value.
*/
static long mips_cdmm_void_work(void *data)
{
struct mips_cdmm_work_dev *work = data;
void (*fn)(struct mips_cdmm_device *) = work->fn;
fn(work->dev);
return 0;
}
/**
* mips_cdmm_int_work() - Call an int returning CDMM driver callback.
* @data: struct mips_cdmm_work_dev pointer.
*
* A work_on_cpu() callback function to call an arbitrary CDMM driver callback
* function which returns an int.
*/
static long mips_cdmm_int_work(void *data)
{
struct mips_cdmm_work_dev *work = data;
int (*fn)(struct mips_cdmm_device *) = work->fn;
return fn(work->dev);
}
#define _BUILD_RET_void
#define _BUILD_RET_int return
/**
* BUILD_PERCPU_HELPER() - Helper to call a CDMM driver callback on right CPU.
* @_ret: Return type (void or int).
* @_name: Name of CDMM driver callback function.
*
* Generates a specific device callback function to call a CDMM driver callback
* function on the appropriate CPU for the device, and if applicable return the
* result.
*/
#define BUILD_PERCPU_HELPER(_ret, _name) \
static _ret mips_cdmm_##_name(struct device *dev) \
{ \
struct mips_cdmm_device *cdev = to_mips_cdmm_device(dev); \
struct mips_cdmm_driver *cdrv = to_mips_cdmm_driver(dev->driver); \
struct mips_cdmm_work_dev work = { \
.fn = cdrv->_name, \
.dev = cdev, \
}; \
\
_BUILD_RET_##_ret work_on_cpu(cdev->cpu, \
mips_cdmm_##_ret##_work, &work); \
}
/* Driver callback functions */
BUILD_PERCPU_HELPER(int, probe) /* int mips_cdmm_probe(struct device) */
BUILD_PERCPU_HELPER(int, remove) /* int mips_cdmm_remove(struct device) */
BUILD_PERCPU_HELPER(void, shutdown) /* void mips_cdmm_shutdown(struct device) */
/* Driver registration */
/**
* mips_cdmm_driver_register() - Register a CDMM driver.
* @drv: CDMM driver information.
*
* Register a CDMM driver with the CDMM subsystem. The driver will be informed
* of matching devices which are discovered.
*
* Returns: 0 on success.
*/
int mips_cdmm_driver_register(struct mips_cdmm_driver *drv)
{
drv->drv.bus = &mips_cdmm_bustype;
if (drv->probe)
drv->drv.probe = mips_cdmm_probe;
if (drv->remove)
drv->drv.remove = mips_cdmm_remove;
if (drv->shutdown)
drv->drv.shutdown = mips_cdmm_shutdown;
return driver_register(&drv->drv);
}
EXPORT_SYMBOL_GPL(mips_cdmm_driver_register);
/**
* mips_cdmm_driver_unregister() - Unregister a CDMM driver.
* @drv: CDMM driver information.
*
* Unregister a CDMM driver from the CDMM subsystem.
*/
void mips_cdmm_driver_unregister(struct mips_cdmm_driver *drv)
{
driver_unregister(&drv->drv);
}
EXPORT_SYMBOL_GPL(mips_cdmm_driver_unregister);
/* CDMM initialisation and bus discovery */
/**
* struct mips_cdmm_bus - Info about CDMM bus.
* @phys: Physical address at which it is mapped.
* @regs: Virtual address where registers can be accessed.
* @drbs: Total number of DRBs.
* @drbs_reserved: Number of DRBs reserved.
* @discovered: Whether the devices on the bus have been discovered yet.
* @offline: Whether the CDMM bus is going offline (or very early
* coming back online), in which case it should be
* reconfigured each time.
*/
struct mips_cdmm_bus {
phys_addr_t phys;
void __iomem *regs;
unsigned int drbs;
unsigned int drbs_reserved;
bool discovered;
bool offline;
};
static struct mips_cdmm_bus mips_cdmm_boot_bus;
static DEFINE_PER_CPU(struct mips_cdmm_bus *, mips_cdmm_buses);
static atomic_t mips_cdmm_next_id = ATOMIC_INIT(-1);
/**
* mips_cdmm_get_bus() - Get the per-CPU CDMM bus information.
*
* Get information about the per-CPU CDMM bus, if the bus is present.
*
* The caller must prevent migration to another CPU, either by disabling
* pre-emption or by running from a pinned kernel thread.
*
* Returns: Pointer to CDMM bus information for the current CPU.
* May return ERR_PTR(-errno) in case of error, so check with
* IS_ERR().
*/
static struct mips_cdmm_bus *mips_cdmm_get_bus(void)
{
struct mips_cdmm_bus *bus, **bus_p;
unsigned long flags;
unsigned int cpu;
if (!cpu_has_cdmm)
return ERR_PTR(-ENODEV);
cpu = smp_processor_id();
/* Avoid early use of per-cpu primitives before initialised */
if (cpu == 0)
return &mips_cdmm_boot_bus;
/* Get bus pointer */
bus_p = per_cpu_ptr(&mips_cdmm_buses, cpu);
local_irq_save(flags);
bus = *bus_p;
/* Attempt allocation if NULL */
if (unlikely(!bus)) {
bus = kzalloc(sizeof(*bus), GFP_ATOMIC);
if (unlikely(!bus))
bus = ERR_PTR(-ENOMEM);
else
*bus_p = bus;
}
local_irq_restore(flags);
return bus;
}
/**
* mips_cdmm_cur_base() - Find current physical base address of CDMM region.
*
* Returns: Physical base address of CDMM region according to cdmmbase CP0
* register, or 0 if the CDMM region is disabled.
*/
static phys_addr_t mips_cdmm_cur_base(void)
{
unsigned long cdmmbase = read_c0_cdmmbase();
if (!(cdmmbase & MIPS_CDMMBASE_EN))
return 0;
return (cdmmbase >> MIPS_CDMMBASE_ADDR_SHIFT)
<< MIPS_CDMMBASE_ADDR_START;
}
/**
* mips_cdmm_phys_base() - Choose a physical base address for CDMM region.
*
* Picking a suitable physical address at which to map the CDMM region is
* platform specific, so this weak function can be overridden by platform
* code to pick a suitable value if none is configured by the bootloader.
* By default this method tries to find a CDMM-specific node in the system
* dtb. Note that this won't work for early serial console.
*/
phys_addr_t __weak mips_cdmm_phys_base(void)
{
struct device_node *np;
struct resource res;
int err;
np = of_find_compatible_node(NULL, NULL, "mti,mips-cdmm");
if (np) {
err = of_address_to_resource(np, 0, &res);
of_node_put(np);
if (!err)
return res.start;
}
return 0;
}
/**
* mips_cdmm_setup() - Ensure the CDMM bus is initialised and usable.
* @bus: Pointer to bus information for current CPU.
* IS_ERR(bus) is checked, so no need for caller to check.
*
* The caller must prevent migration to another CPU, either by disabling
* pre-emption or by running from a pinned kernel thread.
*
* Returns 0 on success, -errno on failure.
*/
static int mips_cdmm_setup(struct mips_cdmm_bus *bus)
{
unsigned long cdmmbase, flags;
int ret = 0;
if (IS_ERR(bus))
return PTR_ERR(bus);
local_irq_save(flags);
/* Don't set up bus a second time unless marked offline */
if (bus->offline) {
/* If CDMM region is still set up, nothing to do */
if (bus->phys == mips_cdmm_cur_base())
goto out;
/*
* The CDMM region isn't set up as expected, so it needs
* reconfiguring, but then we can stop checking it.
*/
bus->offline = false;
} else if (bus->phys > 1) {
goto out;
}
/* If the CDMM region is already configured, inherit that setup */
if (!bus->phys)
bus->phys = mips_cdmm_cur_base();
/* Otherwise, ask platform code for suggestions */
if (!bus->phys)
bus->phys = mips_cdmm_phys_base();
/* Otherwise, copy what other CPUs have done */
if (!bus->phys)
bus->phys = mips_cdmm_default_base;
/* Otherwise, complain once */
if (!bus->phys) {
bus->phys = 1;
/*
* If you hit this, either your bootloader needs to set up the
* CDMM on the boot CPU, or else you need to implement
* mips_cdmm_phys_base() for your platform (see asm/cdmm.h).
*/
pr_err("cdmm%u: Failed to choose a physical base\n",
smp_processor_id());
}
/* Already complained? */
if (bus->phys == 1) {
ret = -ENOMEM;
goto out;
}
/* Record our success for other CPUs to copy */
mips_cdmm_default_base = bus->phys;
pr_debug("cdmm%u: Enabling CDMM region at %pa\n",
smp_processor_id(), &bus->phys);
/* Enable CDMM */
cdmmbase = read_c0_cdmmbase();
cdmmbase &= (1ul << MIPS_CDMMBASE_ADDR_SHIFT) - 1;
cdmmbase |= (bus->phys >> MIPS_CDMMBASE_ADDR_START)
<< MIPS_CDMMBASE_ADDR_SHIFT;
cdmmbase |= MIPS_CDMMBASE_EN;
write_c0_cdmmbase(cdmmbase);
tlbw_use_hazard();
bus->regs = (void __iomem *)CKSEG1ADDR(bus->phys);
bus->drbs = 1 + ((cdmmbase & MIPS_CDMMBASE_SIZE) >>
MIPS_CDMMBASE_SIZE_SHIFT);
bus->drbs_reserved = !!(cdmmbase & MIPS_CDMMBASE_CI);
out:
local_irq_restore(flags);
return ret;
}
/**
* mips_cdmm_early_probe() - Minimally probe for a specific device on CDMM.
* @dev_type: CDMM type code to look for.
*
* Minimally configure the in-CPU Common Device Memory Map (CDMM) and look for a
* specific device. This can be used to find a device very early in boot for
* example to configure an early FDC console device.
*
* The caller must prevent migration to another CPU, either by disabling
* pre-emption or by running from a pinned kernel thread.
*
* Returns: MMIO pointer to device memory. The caller can read the ACSR
* register to find more information about the device (such as the
* version number or the number of blocks).
* May return IOMEM_ERR_PTR(-errno) in case of error, so check with
* IS_ERR().
*/
void __iomem *mips_cdmm_early_probe(unsigned int dev_type)
{
struct mips_cdmm_bus *bus;
void __iomem *cdmm;
u32 acsr;
unsigned int drb, type, size;
int err;
if (WARN_ON(!dev_type))
return IOMEM_ERR_PTR(-ENODEV);
bus = mips_cdmm_get_bus();
err = mips_cdmm_setup(bus);
if (err)
return IOMEM_ERR_PTR(err);
/* Skip the first block if it's reserved for more registers */
drb = bus->drbs_reserved;
cdmm = bus->regs;
/* Look for a specific device type */
for (; drb < bus->drbs; drb += size + 1) {
acsr = __raw_readl(cdmm + drb * CDMM_DRB_SIZE);
type = (acsr & CDMM_ACSR_DEVTYPE) >> CDMM_ACSR_DEVTYPE_SHIFT;
if (type == dev_type)
return cdmm + drb * CDMM_DRB_SIZE;
size = (acsr & CDMM_ACSR_DEVSIZE) >> CDMM_ACSR_DEVSIZE_SHIFT;
}
return IOMEM_ERR_PTR(-ENODEV);
}
EXPORT_SYMBOL_GPL(mips_cdmm_early_probe);
/**
* mips_cdmm_release() - Release a removed CDMM device.
* @dev: Device object
*
* Clean up the struct mips_cdmm_device for an unused CDMM device. This is
* called automatically by the driver core when a device is removed.
*/
static void mips_cdmm_release(struct device *dev)
{
struct mips_cdmm_device *cdev = to_mips_cdmm_device(dev);
kfree(cdev);
}
/**
* mips_cdmm_bus_discover() - Discover the devices on the CDMM bus.
* @bus: CDMM bus information, must already be set up.
*/
static void mips_cdmm_bus_discover(struct mips_cdmm_bus *bus)
{
void __iomem *cdmm;
u32 acsr;
unsigned int drb, type, size, rev;
struct mips_cdmm_device *dev;
unsigned int cpu = smp_processor_id();
int ret = 0;
int id = 0;
/* Skip the first block if it's reserved for more registers */
drb = bus->drbs_reserved;
cdmm = bus->regs;
/* Discover devices */
bus->discovered = true;
pr_info("cdmm%u discovery (%u blocks)\n", cpu, bus->drbs);
for (; drb < bus->drbs; drb += size + 1) {
acsr = __raw_readl(cdmm + drb * CDMM_DRB_SIZE);
type = (acsr & CDMM_ACSR_DEVTYPE) >> CDMM_ACSR_DEVTYPE_SHIFT;
size = (acsr & CDMM_ACSR_DEVSIZE) >> CDMM_ACSR_DEVSIZE_SHIFT;
rev = (acsr & CDMM_ACSR_DEVREV) >> CDMM_ACSR_DEVREV_SHIFT;
if (!type)
continue;
pr_info("cdmm%u-%u: @%u (%#x..%#x), type 0x%02x, rev %u\n",
cpu, id, drb, drb * CDMM_DRB_SIZE,
(drb + size + 1) * CDMM_DRB_SIZE - 1,
type, rev);
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
break;
dev->cpu = cpu;
dev->res.start = bus->phys + drb * CDMM_DRB_SIZE;
dev->res.end = bus->phys +
(drb + size + 1) * CDMM_DRB_SIZE - 1;
dev->res.flags = IORESOURCE_MEM;
dev->type = type;
dev->rev = rev;
dev->dev.parent = get_cpu_device(cpu);
dev->dev.bus = &mips_cdmm_bustype;
dev->dev.id = atomic_inc_return(&mips_cdmm_next_id);
dev->dev.release = mips_cdmm_release;
dev_set_name(&dev->dev, "cdmm%u-%u", cpu, id);
++id;
ret = device_register(&dev->dev);
if (ret)
put_device(&dev->dev);
}
}
/*
* CPU hotplug and initialisation
*
* All the CDMM driver callbacks need to be executed on the appropriate CPU from
* workqueues. For the CPU callbacks, they need to be called for all devices on
* that CPU, so the work function calls bus_for_each_dev, using a helper
* (generated with BUILD_PERDEV_HELPER) to call the driver callback if the
* device's CPU matches.
*/
/**
* BUILD_PERDEV_HELPER() - Helper to call a CDMM driver callback if CPU matches.
* @_name: Name of CDMM driver callback function.
*
* Generates a bus_for_each_dev callback function to call a specific CDMM driver
* callback function for the device if the device's CPU matches that pointed to
* by the data argument.
*
* This is used for informing drivers for all devices on a given CPU of some
* event (such as the CPU going online/offline).
*
* It is expected to already be called from the appropriate CPU.
*/
#define BUILD_PERDEV_HELPER(_name) \
static int mips_cdmm_##_name##_helper(struct device *dev, void *data) \
{ \
struct mips_cdmm_device *cdev = to_mips_cdmm_device(dev); \
struct mips_cdmm_driver *cdrv; \
unsigned int cpu = *(unsigned int *)data; \
\
if (cdev->cpu != cpu || !dev->driver) \
return 0; \
\
cdrv = to_mips_cdmm_driver(dev->driver); \
if (!cdrv->_name) \
return 0; \
return cdrv->_name(cdev); \
}
/* bus_for_each_dev callback helper functions */
BUILD_PERDEV_HELPER(cpu_down) /* int mips_cdmm_cpu_down_helper(...) */
BUILD_PERDEV_HELPER(cpu_up) /* int mips_cdmm_cpu_up_helper(...) */
/**
* mips_cdmm_cpu_down_prep() - Callback for CPUHP DOWN_PREP:
* Tear down the CDMM bus.
* @cpu: unsigned int CPU number.
*
* This function is executed on the hotplugged CPU and calls the CDMM
* driver cpu_down callback for all devices on that CPU.
*/
static int mips_cdmm_cpu_down_prep(unsigned int cpu)
{
struct mips_cdmm_bus *bus;
long ret;
/* Inform all the devices on the bus */
ret = bus_for_each_dev(&mips_cdmm_bustype, NULL, &cpu,
mips_cdmm_cpu_down_helper);
/*
* While bus is offline, each use of it should reconfigure it just in
* case it is first use when coming back online again.
*/
bus = mips_cdmm_get_bus();
if (!IS_ERR(bus))
bus->offline = true;
return ret;
}
/**
* mips_cdmm_cpu_online() - Callback for CPUHP ONLINE: Bring up the CDMM bus.
* @cpu: unsigned int CPU number.
*
* This work_on_cpu callback function is executed on a given CPU to discover
* CDMM devices on that CPU, or to call the CDMM driver cpu_up callback for all
* devices already discovered on that CPU.
*
* It is used as work_on_cpu callback function during
* initialisation. When CPUs are brought online the function is
* invoked directly on the hotplugged CPU.
*/
static int mips_cdmm_cpu_online(unsigned int cpu)
{
struct mips_cdmm_bus *bus;
long ret;
bus = mips_cdmm_get_bus();
ret = mips_cdmm_setup(bus);
if (ret)
return ret;
/* Bus now set up, so we can drop the offline flag if still set */
bus->offline = false;
if (!bus->discovered)
mips_cdmm_bus_discover(bus);
else
/* Inform all the devices on the bus */
ret = bus_for_each_dev(&mips_cdmm_bustype, NULL, &cpu,
mips_cdmm_cpu_up_helper);
return ret;
}
/**
* mips_cdmm_init() - Initialise CDMM bus.
*
* Initialise CDMM bus, discover CDMM devices for online CPUs, and arrange for
* hotplug notifications so the CDMM drivers can be kept up to date.
*/
static int __init mips_cdmm_init(void)
{
int ret;
/* Register the bus */
ret = bus_register(&mips_cdmm_bustype);
if (ret)
return ret;
/* We want to be notified about new CPUs */
ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "bus/cdmm:online",
mips_cdmm_cpu_online, mips_cdmm_cpu_down_prep);
if (ret < 0)
pr_warn("cdmm: Failed to register CPU notifier\n");
return ret;
}
subsys_initcall(mips_cdmm_init);
| linux-master | drivers/bus/mips_cdmm.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* OMAP3XXX L3 Interconnect Driver
*
* Copyright (C) 2011 Texas Corporation
* Felipe Balbi <[email protected]>
* Santosh Shilimkar <[email protected]>
* Sricharan <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
#include "omap_l3_smx.h"
static inline u64 omap3_l3_readll(void __iomem *base, u16 reg)
{
return __raw_readll(base + reg);
}
static inline void omap3_l3_writell(void __iomem *base, u16 reg, u64 value)
{
__raw_writell(value, base + reg);
}
static inline enum omap3_l3_code omap3_l3_decode_error_code(u64 error)
{
return (error & 0x0f000000) >> L3_ERROR_LOG_CODE;
}
static inline u32 omap3_l3_decode_addr(u64 error_addr)
{
return error_addr & 0xffffffff;
}
static inline unsigned omap3_l3_decode_cmd(u64 error)
{
return (error & 0x07) >> L3_ERROR_LOG_CMD;
}
static inline enum omap3_l3_initiator_id omap3_l3_decode_initid(u64 error)
{
return (error & 0xff00) >> L3_ERROR_LOG_INITID;
}
static inline unsigned omap3_l3_decode_req_info(u64 error)
{
return (error >> 32) & 0xffff;
}
static char *omap3_l3_code_string(u8 code)
{
switch (code) {
case OMAP_L3_CODE_NOERROR:
return "No Error";
case OMAP_L3_CODE_UNSUP_CMD:
return "Unsupported Command";
case OMAP_L3_CODE_ADDR_HOLE:
return "Address Hole";
case OMAP_L3_CODE_PROTECT_VIOLATION:
return "Protection Violation";
case OMAP_L3_CODE_IN_BAND_ERR:
return "In-band Error";
case OMAP_L3_CODE_REQ_TOUT_NOT_ACCEPT:
return "Request Timeout Not Accepted";
case OMAP_L3_CODE_REQ_TOUT_NO_RESP:
return "Request Timeout, no response";
default:
return "UNKNOWN error";
}
}
static char *omap3_l3_initiator_string(u8 initid)
{
switch (initid) {
case OMAP_L3_LCD:
return "LCD";
case OMAP_L3_SAD2D:
return "SAD2D";
case OMAP_L3_IA_MPU_SS_1:
case OMAP_L3_IA_MPU_SS_2:
case OMAP_L3_IA_MPU_SS_3:
case OMAP_L3_IA_MPU_SS_4:
case OMAP_L3_IA_MPU_SS_5:
return "MPU";
case OMAP_L3_IA_IVA_SS_1:
case OMAP_L3_IA_IVA_SS_2:
case OMAP_L3_IA_IVA_SS_3:
return "IVA_SS";
case OMAP_L3_IA_IVA_SS_DMA_1:
case OMAP_L3_IA_IVA_SS_DMA_2:
case OMAP_L3_IA_IVA_SS_DMA_3:
case OMAP_L3_IA_IVA_SS_DMA_4:
case OMAP_L3_IA_IVA_SS_DMA_5:
case OMAP_L3_IA_IVA_SS_DMA_6:
return "IVA_SS_DMA";
case OMAP_L3_IA_SGX:
return "SGX";
case OMAP_L3_IA_CAM_1:
case OMAP_L3_IA_CAM_2:
case OMAP_L3_IA_CAM_3:
return "CAM";
case OMAP_L3_IA_DAP:
return "DAP";
case OMAP_L3_SDMA_WR_1:
case OMAP_L3_SDMA_WR_2:
return "SDMA_WR";
case OMAP_L3_SDMA_RD_1:
case OMAP_L3_SDMA_RD_2:
case OMAP_L3_SDMA_RD_3:
case OMAP_L3_SDMA_RD_4:
return "SDMA_RD";
case OMAP_L3_USBOTG:
return "USB_OTG";
case OMAP_L3_USBHOST:
return "USB_HOST";
default:
return "UNKNOWN Initiator";
}
}
/*
* omap3_l3_block_irq - handles a register block's irq
* @l3: struct omap3_l3 *
* @base: register block base address
* @error: L3_ERROR_LOG register of our block
*
* Called in hard-irq context. Caller should take care of locking
*
* OMAP36xx TRM gives, on page 2001, Figure 9-10, the Typical Error
* Analysis Sequence, we are following that sequence here, please
* refer to that Figure for more information on the subject.
*/
static irqreturn_t omap3_l3_block_irq(struct omap3_l3 *l3,
u64 error, int error_addr)
{
u8 code = omap3_l3_decode_error_code(error);
u8 initid = omap3_l3_decode_initid(error);
u8 multi = error & L3_ERROR_LOG_MULTI;
u32 address = omap3_l3_decode_addr(error_addr);
pr_err("%s seen by %s %s at address %x\n",
omap3_l3_code_string(code),
omap3_l3_initiator_string(initid),
multi ? "Multiple Errors" : "", address);
WARN_ON(1);
return IRQ_HANDLED;
}
static irqreturn_t omap3_l3_app_irq(int irq, void *_l3)
{
struct omap3_l3 *l3 = _l3;
u64 status, clear;
u64 error;
u64 error_addr;
u64 err_source = 0;
void __iomem *base;
int int_type;
irqreturn_t ret = IRQ_NONE;
int_type = irq == l3->app_irq ? L3_APPLICATION_ERROR : L3_DEBUG_ERROR;
if (!int_type)
status = omap3_l3_readll(l3->rt, L3_SI_FLAG_STATUS_0);
else
status = omap3_l3_readll(l3->rt, L3_SI_FLAG_STATUS_1);
/* identify the error source */
err_source = __ffs(status);
base = l3->rt + omap3_l3_bases[int_type][err_source];
error = omap3_l3_readll(base, L3_ERROR_LOG);
if (error) {
error_addr = omap3_l3_readll(base, L3_ERROR_LOG_ADDR);
ret |= omap3_l3_block_irq(l3, error, error_addr);
}
/*
* if we have a timeout error, there's nothing we can
* do besides rebooting the board. So let's BUG on any
* of such errors and handle the others. timeout error
* is severe and not expected to occur.
*/
BUG_ON(!int_type && status & L3_STATUS_0_TIMEOUT_MASK);
/* Clear the status register */
clear = (L3_AGENT_STATUS_CLEAR_IA << int_type) |
L3_AGENT_STATUS_CLEAR_TA;
omap3_l3_writell(base, L3_AGENT_STATUS, clear);
/* clear the error log register */
omap3_l3_writell(base, L3_ERROR_LOG, error);
return ret;
}
#if IS_BUILTIN(CONFIG_OF)
static const struct of_device_id omap3_l3_match[] = {
{
.compatible = "ti,omap3-l3-smx",
},
{ },
};
MODULE_DEVICE_TABLE(of, omap3_l3_match);
#endif
static int omap3_l3_probe(struct platform_device *pdev)
{
struct omap3_l3 *l3;
struct resource *res;
int ret;
l3 = kzalloc(sizeof(*l3), GFP_KERNEL);
if (!l3)
return -ENOMEM;
platform_set_drvdata(pdev, l3);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(&pdev->dev, "couldn't find resource\n");
ret = -ENODEV;
goto err0;
}
l3->rt = ioremap(res->start, resource_size(res));
if (!l3->rt) {
dev_err(&pdev->dev, "ioremap failed\n");
ret = -ENOMEM;
goto err0;
}
l3->debug_irq = platform_get_irq(pdev, 0);
ret = request_irq(l3->debug_irq, omap3_l3_app_irq, IRQF_TRIGGER_RISING,
"l3-debug-irq", l3);
if (ret) {
dev_err(&pdev->dev, "couldn't request debug irq\n");
goto err1;
}
l3->app_irq = platform_get_irq(pdev, 1);
ret = request_irq(l3->app_irq, omap3_l3_app_irq, IRQF_TRIGGER_RISING,
"l3-app-irq", l3);
if (ret) {
dev_err(&pdev->dev, "couldn't request app irq\n");
goto err2;
}
return 0;
err2:
free_irq(l3->debug_irq, l3);
err1:
iounmap(l3->rt);
err0:
kfree(l3);
return ret;
}
static int omap3_l3_remove(struct platform_device *pdev)
{
struct omap3_l3 *l3 = platform_get_drvdata(pdev);
free_irq(l3->app_irq, l3);
free_irq(l3->debug_irq, l3);
iounmap(l3->rt);
kfree(l3);
return 0;
}
static struct platform_driver omap3_l3_driver = {
.probe = omap3_l3_probe,
.remove = omap3_l3_remove,
.driver = {
.name = "omap_l3_smx",
.of_match_table = of_match_ptr(omap3_l3_match),
},
};
static int __init omap3_l3_init(void)
{
return platform_driver_register(&omap3_l3_driver);
}
postcore_initcall_sync(omap3_l3_init);
static void __exit omap3_l3_exit(void)
{
platform_driver_unregister(&omap3_l3_driver);
}
module_exit(omap3_l3_exit);
MODULE_AUTHOR("Felipe Balbi");
MODULE_AUTHOR("Santosh Shilimkar");
MODULE_AUTHOR("Sricharan R");
MODULE_DESCRIPTION("OMAP3XXX L3 Interconnect Driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/bus/omap_l3_smx.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* ARM Integrator Logical Module bus driver
* Copyright (C) 2020 Linaro Ltd.
* Author: Linus Walleij <[email protected]>
*
* See the device tree bindings for this block for more details on the
* hardware.
*/
#include <linux/module.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/bitops.h>
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
/* All information about the connected logic modules are in here */
#define INTEGRATOR_SC_DEC_OFFSET 0x10
/* Base address for the expansion modules */
#define INTEGRATOR_AP_EXP_BASE 0xc0000000
#define INTEGRATOR_AP_EXP_STRIDE 0x10000000
static int integrator_lm_populate(int num, struct device *dev)
{
struct device_node *np = dev->of_node;
struct device_node *child;
u32 base;
int ret;
base = INTEGRATOR_AP_EXP_BASE + (num * INTEGRATOR_AP_EXP_STRIDE);
/* Walk over the child nodes and see what chipselects we use */
for_each_available_child_of_node(np, child) {
struct resource res;
ret = of_address_to_resource(child, 0, &res);
if (ret) {
dev_info(dev, "no valid address on child\n");
continue;
}
/* First populate the syscon then any devices */
if (res.start == base) {
dev_info(dev, "populate module @0x%08x from DT\n",
base);
ret = of_platform_default_populate(child, NULL, dev);
if (ret) {
dev_err(dev, "failed to populate module\n");
of_node_put(child);
return ret;
}
}
}
return 0;
}
static const struct of_device_id integrator_ap_syscon_match[] = {
{ .compatible = "arm,integrator-ap-syscon"},
{ },
};
static int integrator_ap_lm_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *syscon;
static struct regmap *map;
u32 val;
int ret;
int i;
/* Look up the system controller */
syscon = of_find_matching_node(NULL, integrator_ap_syscon_match);
if (!syscon) {
dev_err(dev,
"could not find Integrator/AP system controller\n");
return -ENODEV;
}
map = syscon_node_to_regmap(syscon);
if (IS_ERR(map)) {
dev_err(dev,
"could not find Integrator/AP system controller\n");
return PTR_ERR(map);
}
ret = regmap_read(map, INTEGRATOR_SC_DEC_OFFSET, &val);
if (ret) {
dev_err(dev, "could not read from Integrator/AP syscon\n");
return ret;
}
/* Loop over the connected modules */
for (i = 0; i < 4; i++) {
if (!(val & BIT(4 + i)))
continue;
dev_info(dev, "detected module in slot %d\n", i);
ret = integrator_lm_populate(i, dev);
if (ret)
return ret;
}
return 0;
}
static const struct of_device_id integrator_ap_lm_match[] = {
{ .compatible = "arm,integrator-ap-lm"},
{ },
};
static struct platform_driver integrator_ap_lm_driver = {
.probe = integrator_ap_lm_probe,
.driver = {
.name = "integratorap-lm",
.of_match_table = integrator_ap_lm_match,
},
};
module_platform_driver(integrator_ap_lm_driver);
MODULE_AUTHOR("Linus Walleij <[email protected]>");
MODULE_DESCRIPTION("Integrator AP Logical Module driver");
| linux-master | drivers/bus/arm-integrator-lm.c |
/*
* CCI cache coherent interconnect driver
*
* Copyright (C) 2013 ARM Ltd.
* Author: Lorenzo Pieralisi <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed "as is" WITHOUT ANY WARRANTY of any
* kind, whether express or implied; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/arm-cci.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <asm/cacheflush.h>
#include <asm/smp_plat.h>
static void __iomem *cci_ctrl_base __ro_after_init;
static unsigned long cci_ctrl_phys __ro_after_init;
#ifdef CONFIG_ARM_CCI400_PORT_CTRL
struct cci_nb_ports {
unsigned int nb_ace;
unsigned int nb_ace_lite;
};
static const struct cci_nb_ports cci400_ports = {
.nb_ace = 2,
.nb_ace_lite = 3
};
#define CCI400_PORTS_DATA (&cci400_ports)
#else
#define CCI400_PORTS_DATA (NULL)
#endif
static const struct of_device_id arm_cci_matches[] = {
#ifdef CONFIG_ARM_CCI400_COMMON
{.compatible = "arm,cci-400", .data = CCI400_PORTS_DATA },
#endif
#ifdef CONFIG_ARM_CCI5xx_PMU
{ .compatible = "arm,cci-500", },
{ .compatible = "arm,cci-550", },
#endif
{},
};
static const struct of_dev_auxdata arm_cci_auxdata[] = {
OF_DEV_AUXDATA("arm,cci-400-pmu", 0, NULL, &cci_ctrl_base),
OF_DEV_AUXDATA("arm,cci-400-pmu,r0", 0, NULL, &cci_ctrl_base),
OF_DEV_AUXDATA("arm,cci-400-pmu,r1", 0, NULL, &cci_ctrl_base),
OF_DEV_AUXDATA("arm,cci-500-pmu,r0", 0, NULL, &cci_ctrl_base),
OF_DEV_AUXDATA("arm,cci-550-pmu,r0", 0, NULL, &cci_ctrl_base),
{}
};
#define DRIVER_NAME "ARM-CCI"
static int cci_platform_probe(struct platform_device *pdev)
{
if (!cci_probed())
return -ENODEV;
return of_platform_populate(pdev->dev.of_node, NULL,
arm_cci_auxdata, &pdev->dev);
}
static struct platform_driver cci_platform_driver = {
.driver = {
.name = DRIVER_NAME,
.of_match_table = arm_cci_matches,
},
.probe = cci_platform_probe,
};
static int __init cci_platform_init(void)
{
return platform_driver_register(&cci_platform_driver);
}
#ifdef CONFIG_ARM_CCI400_PORT_CTRL
#define CCI_PORT_CTRL 0x0
#define CCI_CTRL_STATUS 0xc
#define CCI_ENABLE_SNOOP_REQ 0x1
#define CCI_ENABLE_DVM_REQ 0x2
#define CCI_ENABLE_REQ (CCI_ENABLE_SNOOP_REQ | CCI_ENABLE_DVM_REQ)
enum cci_ace_port_type {
ACE_INVALID_PORT = 0x0,
ACE_PORT,
ACE_LITE_PORT,
};
struct cci_ace_port {
void __iomem *base;
unsigned long phys;
enum cci_ace_port_type type;
struct device_node *dn;
};
static struct cci_ace_port *ports;
static unsigned int nb_cci_ports;
struct cpu_port {
u64 mpidr;
u32 port;
};
/*
* Use the port MSB as valid flag, shift can be made dynamic
* by computing number of bits required for port indexes.
* Code disabling CCI cpu ports runs with D-cache invalidated
* and SCTLR bit clear so data accesses must be kept to a minimum
* to improve performance; for now shift is left static to
* avoid one more data access while disabling the CCI port.
*/
#define PORT_VALID_SHIFT 31
#define PORT_VALID (0x1 << PORT_VALID_SHIFT)
static inline void init_cpu_port(struct cpu_port *port, u32 index, u64 mpidr)
{
port->port = PORT_VALID | index;
port->mpidr = mpidr;
}
static inline bool cpu_port_is_valid(struct cpu_port *port)
{
return !!(port->port & PORT_VALID);
}
static inline bool cpu_port_match(struct cpu_port *port, u64 mpidr)
{
return port->mpidr == (mpidr & MPIDR_HWID_BITMASK);
}
static struct cpu_port cpu_port[NR_CPUS];
/**
* __cci_ace_get_port - Function to retrieve the port index connected to
* a cpu or device.
*
* @dn: device node of the device to look-up
* @type: port type
*
* Return value:
* - CCI port index if success
* - -ENODEV if failure
*/
static int __cci_ace_get_port(struct device_node *dn, int type)
{
int i;
bool ace_match;
struct device_node *cci_portn;
cci_portn = of_parse_phandle(dn, "cci-control-port", 0);
for (i = 0; i < nb_cci_ports; i++) {
ace_match = ports[i].type == type;
if (ace_match && cci_portn == ports[i].dn)
return i;
}
return -ENODEV;
}
int cci_ace_get_port(struct device_node *dn)
{
return __cci_ace_get_port(dn, ACE_LITE_PORT);
}
EXPORT_SYMBOL_GPL(cci_ace_get_port);
static void cci_ace_init_ports(void)
{
int port, cpu;
struct device_node *cpun;
/*
* Port index look-up speeds up the function disabling ports by CPU,
* since the logical to port index mapping is done once and does
* not change after system boot.
* The stashed index array is initialized for all possible CPUs
* at probe time.
*/
for_each_possible_cpu(cpu) {
/* too early to use cpu->of_node */
cpun = of_get_cpu_node(cpu, NULL);
if (WARN(!cpun, "Missing cpu device node\n"))
continue;
port = __cci_ace_get_port(cpun, ACE_PORT);
if (port < 0)
continue;
init_cpu_port(&cpu_port[cpu], port, cpu_logical_map(cpu));
}
for_each_possible_cpu(cpu) {
WARN(!cpu_port_is_valid(&cpu_port[cpu]),
"CPU %u does not have an associated CCI port\n",
cpu);
}
}
/*
* Functions to enable/disable a CCI interconnect slave port
*
* They are called by low-level power management code to disable slave
* interfaces snoops and DVM broadcast.
* Since they may execute with cache data allocation disabled and
* after the caches have been cleaned and invalidated the functions provide
* no explicit locking since they may run with D-cache disabled, so normal
* cacheable kernel locks based on ldrex/strex may not work.
* Locking has to be provided by BSP implementations to ensure proper
* operations.
*/
/**
* cci_port_control() - function to control a CCI port
*
* @port: index of the port to setup
* @enable: if true enables the port, if false disables it
*/
static void notrace cci_port_control(unsigned int port, bool enable)
{
void __iomem *base = ports[port].base;
writel_relaxed(enable ? CCI_ENABLE_REQ : 0, base + CCI_PORT_CTRL);
/*
* This function is called from power down procedures
* and must not execute any instruction that might
* cause the processor to be put in a quiescent state
* (eg wfi). Hence, cpu_relax() can not be added to this
* read loop to optimize power, since it might hide possibly
* disruptive operations.
*/
while (readl_relaxed(cci_ctrl_base + CCI_CTRL_STATUS) & 0x1)
;
}
/**
* cci_disable_port_by_cpu() - function to disable a CCI port by CPU
* reference
*
* @mpidr: mpidr of the CPU whose CCI port should be disabled
*
* Disabling a CCI port for a CPU implies disabling the CCI port
* controlling that CPU cluster. Code disabling CPU CCI ports
* must make sure that the CPU running the code is the last active CPU
* in the cluster ie all other CPUs are quiescent in a low power state.
*
* Return:
* 0 on success
* -ENODEV on port look-up failure
*/
int notrace cci_disable_port_by_cpu(u64 mpidr)
{
int cpu;
bool is_valid;
for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
is_valid = cpu_port_is_valid(&cpu_port[cpu]);
if (is_valid && cpu_port_match(&cpu_port[cpu], mpidr)) {
cci_port_control(cpu_port[cpu].port, false);
return 0;
}
}
return -ENODEV;
}
EXPORT_SYMBOL_GPL(cci_disable_port_by_cpu);
/**
* cci_enable_port_for_self() - enable a CCI port for calling CPU
*
* Enabling a CCI port for the calling CPU implies enabling the CCI
* port controlling that CPU's cluster. Caller must make sure that the
* CPU running the code is the first active CPU in the cluster and all
* other CPUs are quiescent in a low power state or waiting for this CPU
* to complete the CCI initialization.
*
* Because this is called when the MMU is still off and with no stack,
* the code must be position independent and ideally rely on callee
* clobbered registers only. To achieve this we must code this function
* entirely in assembler.
*
* On success this returns with the proper CCI port enabled. In case of
* any failure this never returns as the inability to enable the CCI is
* fatal and there is no possible recovery at this stage.
*/
asmlinkage void __naked cci_enable_port_for_self(void)
{
asm volatile ("\n"
" .arch armv7-a\n"
" mrc p15, 0, r0, c0, c0, 5 @ get MPIDR value \n"
" and r0, r0, #"__stringify(MPIDR_HWID_BITMASK)" \n"
" adr r1, 5f \n"
" ldr r2, [r1] \n"
" add r1, r1, r2 @ &cpu_port \n"
" add ip, r1, %[sizeof_cpu_port] \n"
/* Loop over the cpu_port array looking for a matching MPIDR */
"1: ldr r2, [r1, %[offsetof_cpu_port_mpidr_lsb]] \n"
" cmp r2, r0 @ compare MPIDR \n"
" bne 2f \n"
/* Found a match, now test port validity */
" ldr r3, [r1, %[offsetof_cpu_port_port]] \n"
" tst r3, #"__stringify(PORT_VALID)" \n"
" bne 3f \n"
/* no match, loop with the next cpu_port entry */
"2: add r1, r1, %[sizeof_struct_cpu_port] \n"
" cmp r1, ip @ done? \n"
" blo 1b \n"
/* CCI port not found -- cheaply try to stall this CPU */
"cci_port_not_found: \n"
" wfi \n"
" wfe \n"
" b cci_port_not_found \n"
/* Use matched port index to look up the corresponding ports entry */
"3: bic r3, r3, #"__stringify(PORT_VALID)" \n"
" adr r0, 6f \n"
" ldmia r0, {r1, r2} \n"
" sub r1, r1, r0 @ virt - phys \n"
" ldr r0, [r0, r2] @ *(&ports) \n"
" mov r2, %[sizeof_struct_ace_port] \n"
" mla r0, r2, r3, r0 @ &ports[index] \n"
" sub r0, r0, r1 @ virt_to_phys() \n"
/* Enable the CCI port */
" ldr r0, [r0, %[offsetof_port_phys]] \n"
" mov r3, %[cci_enable_req]\n"
" str r3, [r0, #"__stringify(CCI_PORT_CTRL)"] \n"
/* poll the status reg for completion */
" adr r1, 7f \n"
" ldr r0, [r1] \n"
" ldr r0, [r0, r1] @ cci_ctrl_base \n"
"4: ldr r1, [r0, #"__stringify(CCI_CTRL_STATUS)"] \n"
" tst r1, %[cci_control_status_bits] \n"
" bne 4b \n"
" mov r0, #0 \n"
" bx lr \n"
" .align 2 \n"
"5: .word cpu_port - . \n"
"6: .word . \n"
" .word ports - 6b \n"
"7: .word cci_ctrl_phys - . \n"
: :
[sizeof_cpu_port] "i" (sizeof(cpu_port)),
[cci_enable_req] "i" cpu_to_le32(CCI_ENABLE_REQ),
[cci_control_status_bits] "i" cpu_to_le32(1),
#ifndef __ARMEB__
[offsetof_cpu_port_mpidr_lsb] "i" (offsetof(struct cpu_port, mpidr)),
#else
[offsetof_cpu_port_mpidr_lsb] "i" (offsetof(struct cpu_port, mpidr)+4),
#endif
[offsetof_cpu_port_port] "i" (offsetof(struct cpu_port, port)),
[sizeof_struct_cpu_port] "i" (sizeof(struct cpu_port)),
[sizeof_struct_ace_port] "i" (sizeof(struct cci_ace_port)),
[offsetof_port_phys] "i" (offsetof(struct cci_ace_port, phys)) );
}
/**
* __cci_control_port_by_device() - function to control a CCI port by device
* reference
*
* @dn: device node pointer of the device whose CCI port should be
* controlled
* @enable: if true enables the port, if false disables it
*
* Return:
* 0 on success
* -ENODEV on port look-up failure
*/
int notrace __cci_control_port_by_device(struct device_node *dn, bool enable)
{
int port;
if (!dn)
return -ENODEV;
port = __cci_ace_get_port(dn, ACE_LITE_PORT);
if (WARN_ONCE(port < 0, "node %pOF ACE lite port look-up failure\n",
dn))
return -ENODEV;
cci_port_control(port, enable);
return 0;
}
EXPORT_SYMBOL_GPL(__cci_control_port_by_device);
/**
* __cci_control_port_by_index() - function to control a CCI port by port index
*
* @port: port index previously retrieved with cci_ace_get_port()
* @enable: if true enables the port, if false disables it
*
* Return:
* 0 on success
* -ENODEV on port index out of range
* -EPERM if operation carried out on an ACE PORT
*/
int notrace __cci_control_port_by_index(u32 port, bool enable)
{
if (port >= nb_cci_ports || ports[port].type == ACE_INVALID_PORT)
return -ENODEV;
/*
* CCI control for ports connected to CPUS is extremely fragile
* and must be made to go through a specific and controlled
* interface (ie cci_disable_port_by_cpu(); control by general purpose
* indexing is therefore disabled for ACE ports.
*/
if (ports[port].type == ACE_PORT)
return -EPERM;
cci_port_control(port, enable);
return 0;
}
EXPORT_SYMBOL_GPL(__cci_control_port_by_index);
static const struct of_device_id arm_cci_ctrl_if_matches[] = {
{.compatible = "arm,cci-400-ctrl-if", },
{},
};
static int cci_probe_ports(struct device_node *np)
{
struct cci_nb_ports const *cci_config;
int ret, i, nb_ace = 0, nb_ace_lite = 0;
struct device_node *cp;
struct resource res;
const char *match_str;
bool is_ace;
cci_config = of_match_node(arm_cci_matches, np)->data;
if (!cci_config)
return -ENODEV;
nb_cci_ports = cci_config->nb_ace + cci_config->nb_ace_lite;
ports = kcalloc(nb_cci_ports, sizeof(*ports), GFP_KERNEL);
if (!ports)
return -ENOMEM;
for_each_available_child_of_node(np, cp) {
if (!of_match_node(arm_cci_ctrl_if_matches, cp))
continue;
i = nb_ace + nb_ace_lite;
if (i >= nb_cci_ports)
break;
if (of_property_read_string(cp, "interface-type",
&match_str)) {
WARN(1, "node %pOF missing interface-type property\n",
cp);
continue;
}
is_ace = strcmp(match_str, "ace") == 0;
if (!is_ace && strcmp(match_str, "ace-lite")) {
WARN(1, "node %pOF containing invalid interface-type property, skipping it\n",
cp);
continue;
}
ret = of_address_to_resource(cp, 0, &res);
if (!ret) {
ports[i].base = ioremap(res.start, resource_size(&res));
ports[i].phys = res.start;
}
if (ret || !ports[i].base) {
WARN(1, "unable to ioremap CCI port %d\n", i);
continue;
}
if (is_ace) {
if (WARN_ON(nb_ace >= cci_config->nb_ace))
continue;
ports[i].type = ACE_PORT;
++nb_ace;
} else {
if (WARN_ON(nb_ace_lite >= cci_config->nb_ace_lite))
continue;
ports[i].type = ACE_LITE_PORT;
++nb_ace_lite;
}
ports[i].dn = cp;
}
/*
* If there is no CCI port that is under kernel control
* return early and report probe status.
*/
if (!nb_ace && !nb_ace_lite)
return -ENODEV;
/* initialize a stashed array of ACE ports to speed-up look-up */
cci_ace_init_ports();
/*
* Multi-cluster systems may need this data when non-coherent, during
* cluster power-up/power-down. Make sure it reaches main memory.
*/
sync_cache_w(&cci_ctrl_base);
sync_cache_w(&cci_ctrl_phys);
sync_cache_w(&ports);
sync_cache_w(&cpu_port);
__sync_cache_range_w(ports, sizeof(*ports) * nb_cci_ports);
pr_info("ARM CCI driver probed\n");
return 0;
}
#else /* !CONFIG_ARM_CCI400_PORT_CTRL */
static inline int cci_probe_ports(struct device_node *np)
{
return 0;
}
#endif /* CONFIG_ARM_CCI400_PORT_CTRL */
static int cci_probe(void)
{
int ret;
struct device_node *np;
struct resource res;
np = of_find_matching_node(NULL, arm_cci_matches);
if (!of_device_is_available(np))
return -ENODEV;
ret = of_address_to_resource(np, 0, &res);
if (!ret) {
cci_ctrl_base = ioremap(res.start, resource_size(&res));
cci_ctrl_phys = res.start;
}
if (ret || !cci_ctrl_base) {
WARN(1, "unable to ioremap CCI ctrl\n");
return -ENXIO;
}
return cci_probe_ports(np);
}
static int cci_init_status = -EAGAIN;
static DEFINE_MUTEX(cci_probing);
static int cci_init(void)
{
if (cci_init_status != -EAGAIN)
return cci_init_status;
mutex_lock(&cci_probing);
if (cci_init_status == -EAGAIN)
cci_init_status = cci_probe();
mutex_unlock(&cci_probing);
return cci_init_status;
}
/*
* To sort out early init calls ordering a helper function is provided to
* check if the CCI driver has beed initialized. Function check if the driver
* has been initialized, if not it calls the init function that probes
* the driver and updates the return value.
*/
bool cci_probed(void)
{
return cci_init() == 0;
}
EXPORT_SYMBOL_GPL(cci_probed);
early_initcall(cci_init);
core_initcall(cci_platform_init);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("ARM CCI support");
| linux-master | drivers/bus/arm-cci.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
*
* Copyright (C) 2014 ARM Limited
*/
#include <linux/err.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/of_platform.h>
#include <linux/sched/signal.h>
#include <linux/slab.h>
#include <linux/vexpress.h>
#define SYS_MISC 0x0
#define SYS_MISC_MASTERSITE (1 << 14)
#define SYS_PROCID0 0x24
#define SYS_PROCID1 0x28
#define SYS_HBI_MASK 0xfff
#define SYS_PROCIDx_HBI_SHIFT 0
#define SYS_CFGDATA 0x40
#define SYS_CFGCTRL 0x44
#define SYS_CFGCTRL_START (1 << 31)
#define SYS_CFGCTRL_WRITE (1 << 30)
#define SYS_CFGCTRL_DCC(n) (((n) & 0xf) << 26)
#define SYS_CFGCTRL_FUNC(n) (((n) & 0x3f) << 20)
#define SYS_CFGCTRL_SITE(n) (((n) & 0x3) << 16)
#define SYS_CFGCTRL_POSITION(n) (((n) & 0xf) << 12)
#define SYS_CFGCTRL_DEVICE(n) (((n) & 0xfff) << 0)
#define SYS_CFGSTAT 0x48
#define SYS_CFGSTAT_ERR (1 << 1)
#define SYS_CFGSTAT_COMPLETE (1 << 0)
#define VEXPRESS_SITE_MB 0
#define VEXPRESS_SITE_DB1 1
#define VEXPRESS_SITE_DB2 2
#define VEXPRESS_SITE_MASTER 0xf
struct vexpress_syscfg {
struct device *dev;
void __iomem *base;
struct list_head funcs;
};
struct vexpress_syscfg_func {
struct list_head list;
struct vexpress_syscfg *syscfg;
struct regmap *regmap;
int num_templates;
u32 template[]; /* Keep it last! */
};
struct vexpress_config_bridge_ops {
struct regmap * (*regmap_init)(struct device *dev, void *context);
void (*regmap_exit)(struct regmap *regmap, void *context);
};
struct vexpress_config_bridge {
struct vexpress_config_bridge_ops *ops;
void *context;
};
static DEFINE_MUTEX(vexpress_config_mutex);
static u32 vexpress_config_site_master = VEXPRESS_SITE_MASTER;
static void vexpress_config_set_master(u32 site)
{
vexpress_config_site_master = site;
}
static void vexpress_config_lock(void *arg)
{
mutex_lock(&vexpress_config_mutex);
}
static void vexpress_config_unlock(void *arg)
{
mutex_unlock(&vexpress_config_mutex);
}
static void vexpress_config_find_prop(struct device_node *node,
const char *name, u32 *val)
{
/* Default value */
*val = 0;
of_node_get(node);
while (node) {
if (of_property_read_u32(node, name, val) == 0) {
of_node_put(node);
return;
}
node = of_get_next_parent(node);
}
}
static int vexpress_config_get_topo(struct device_node *node, u32 *site,
u32 *position, u32 *dcc)
{
vexpress_config_find_prop(node, "arm,vexpress,site", site);
if (*site == VEXPRESS_SITE_MASTER)
*site = vexpress_config_site_master;
if (WARN_ON(vexpress_config_site_master == VEXPRESS_SITE_MASTER))
return -EINVAL;
vexpress_config_find_prop(node, "arm,vexpress,position", position);
vexpress_config_find_prop(node, "arm,vexpress,dcc", dcc);
return 0;
}
static void vexpress_config_devres_release(struct device *dev, void *res)
{
struct vexpress_config_bridge *bridge = dev_get_drvdata(dev->parent);
struct regmap *regmap = res;
bridge->ops->regmap_exit(regmap, bridge->context);
}
struct regmap *devm_regmap_init_vexpress_config(struct device *dev)
{
struct vexpress_config_bridge *bridge;
struct regmap *regmap;
struct regmap **res;
bridge = dev_get_drvdata(dev->parent);
if (WARN_ON(!bridge))
return ERR_PTR(-EINVAL);
res = devres_alloc(vexpress_config_devres_release, sizeof(*res),
GFP_KERNEL);
if (!res)
return ERR_PTR(-ENOMEM);
regmap = (bridge->ops->regmap_init)(dev, bridge->context);
if (IS_ERR(regmap)) {
devres_free(res);
return regmap;
}
*res = regmap;
devres_add(dev, res);
return regmap;
}
EXPORT_SYMBOL_GPL(devm_regmap_init_vexpress_config);
static int vexpress_syscfg_exec(struct vexpress_syscfg_func *func,
int index, bool write, u32 *data)
{
struct vexpress_syscfg *syscfg = func->syscfg;
u32 command, status;
int tries;
long timeout;
if (WARN_ON(index >= func->num_templates))
return -EINVAL;
command = readl(syscfg->base + SYS_CFGCTRL);
if (WARN_ON(command & SYS_CFGCTRL_START))
return -EBUSY;
command = func->template[index];
command |= SYS_CFGCTRL_START;
command |= write ? SYS_CFGCTRL_WRITE : 0;
/* Use a canary for reads */
if (!write)
*data = 0xdeadbeef;
dev_dbg(syscfg->dev, "func %p, command %x, data %x\n",
func, command, *data);
writel(*data, syscfg->base + SYS_CFGDATA);
writel(0, syscfg->base + SYS_CFGSTAT);
writel(command, syscfg->base + SYS_CFGCTRL);
mb();
/* The operation can take ages... Go to sleep, 100us initially */
tries = 100;
timeout = 100;
do {
if (!irqs_disabled()) {
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(usecs_to_jiffies(timeout));
if (signal_pending(current))
return -EINTR;
} else {
udelay(timeout);
}
status = readl(syscfg->base + SYS_CFGSTAT);
if (status & SYS_CFGSTAT_ERR)
return -EFAULT;
if (timeout > 20)
timeout -= 20;
} while (--tries && !(status & SYS_CFGSTAT_COMPLETE));
if (WARN_ON_ONCE(!tries))
return -ETIMEDOUT;
if (!write) {
*data = readl(syscfg->base + SYS_CFGDATA);
dev_dbg(syscfg->dev, "func %p, read data %x\n", func, *data);
}
return 0;
}
static int vexpress_syscfg_read(void *context, unsigned int index,
unsigned int *val)
{
struct vexpress_syscfg_func *func = context;
return vexpress_syscfg_exec(func, index, false, val);
}
static int vexpress_syscfg_write(void *context, unsigned int index,
unsigned int val)
{
struct vexpress_syscfg_func *func = context;
return vexpress_syscfg_exec(func, index, true, &val);
}
static struct regmap_config vexpress_syscfg_regmap_config = {
.lock = vexpress_config_lock,
.unlock = vexpress_config_unlock,
.reg_bits = 32,
.val_bits = 32,
.reg_read = vexpress_syscfg_read,
.reg_write = vexpress_syscfg_write,
.reg_format_endian = REGMAP_ENDIAN_LITTLE,
.val_format_endian = REGMAP_ENDIAN_LITTLE,
};
static struct regmap *vexpress_syscfg_regmap_init(struct device *dev,
void *context)
{
int err;
struct vexpress_syscfg *syscfg = context;
struct vexpress_syscfg_func *func;
struct property *prop;
const __be32 *val = NULL;
__be32 energy_quirk[4];
int num;
u32 site, position, dcc;
int i;
err = vexpress_config_get_topo(dev->of_node, &site,
&position, &dcc);
if (err)
return ERR_PTR(err);
prop = of_find_property(dev->of_node,
"arm,vexpress-sysreg,func", NULL);
if (!prop)
return ERR_PTR(-EINVAL);
num = prop->length / sizeof(u32) / 2;
val = prop->value;
/*
* "arm,vexpress-energy" function used to be described
* by its first device only, now it requires both
*/
if (num == 1 && of_device_is_compatible(dev->of_node,
"arm,vexpress-energy")) {
num = 2;
energy_quirk[0] = *val;
energy_quirk[2] = *val++;
energy_quirk[1] = *val;
energy_quirk[3] = cpu_to_be32(be32_to_cpup(val) + 1);
val = energy_quirk;
}
func = kzalloc(struct_size(func, template, num), GFP_KERNEL);
if (!func)
return ERR_PTR(-ENOMEM);
func->syscfg = syscfg;
func->num_templates = num;
for (i = 0; i < num; i++) {
u32 function, device;
function = be32_to_cpup(val++);
device = be32_to_cpup(val++);
dev_dbg(dev, "func %p: %u/%u/%u/%u/%u\n",
func, site, position, dcc,
function, device);
func->template[i] = SYS_CFGCTRL_DCC(dcc);
func->template[i] |= SYS_CFGCTRL_SITE(site);
func->template[i] |= SYS_CFGCTRL_POSITION(position);
func->template[i] |= SYS_CFGCTRL_FUNC(function);
func->template[i] |= SYS_CFGCTRL_DEVICE(device);
}
vexpress_syscfg_regmap_config.max_register = num - 1;
func->regmap = regmap_init(dev, NULL, func,
&vexpress_syscfg_regmap_config);
if (IS_ERR(func->regmap)) {
void *err = func->regmap;
kfree(func);
return err;
}
list_add(&func->list, &syscfg->funcs);
return func->regmap;
}
static void vexpress_syscfg_regmap_exit(struct regmap *regmap, void *context)
{
struct vexpress_syscfg *syscfg = context;
struct vexpress_syscfg_func *func, *tmp;
regmap_exit(regmap);
list_for_each_entry_safe(func, tmp, &syscfg->funcs, list) {
if (func->regmap == regmap) {
list_del(&syscfg->funcs);
kfree(func);
break;
}
}
}
static struct vexpress_config_bridge_ops vexpress_syscfg_bridge_ops = {
.regmap_init = vexpress_syscfg_regmap_init,
.regmap_exit = vexpress_syscfg_regmap_exit,
};
static int vexpress_syscfg_probe(struct platform_device *pdev)
{
struct vexpress_syscfg *syscfg;
struct vexpress_config_bridge *bridge;
struct device_node *node;
int master;
u32 dt_hbi;
syscfg = devm_kzalloc(&pdev->dev, sizeof(*syscfg), GFP_KERNEL);
if (!syscfg)
return -ENOMEM;
syscfg->dev = &pdev->dev;
INIT_LIST_HEAD(&syscfg->funcs);
syscfg->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(syscfg->base))
return PTR_ERR(syscfg->base);
bridge = devm_kmalloc(&pdev->dev, sizeof(*bridge), GFP_KERNEL);
if (!bridge)
return -ENOMEM;
bridge->ops = &vexpress_syscfg_bridge_ops;
bridge->context = syscfg;
dev_set_drvdata(&pdev->dev, bridge);
master = readl(syscfg->base + SYS_MISC) & SYS_MISC_MASTERSITE ?
VEXPRESS_SITE_DB2 : VEXPRESS_SITE_DB1;
vexpress_config_set_master(master);
/* Confirm board type against DT property, if available */
if (of_property_read_u32(of_root, "arm,hbi", &dt_hbi) == 0) {
u32 id = readl(syscfg->base + (master == VEXPRESS_SITE_DB1 ?
SYS_PROCID0 : SYS_PROCID1));
u32 hbi = (id >> SYS_PROCIDx_HBI_SHIFT) & SYS_HBI_MASK;
if (WARN_ON(dt_hbi != hbi))
dev_warn(&pdev->dev, "DT HBI (%x) is not matching hardware (%x)!\n",
dt_hbi, hbi);
}
for_each_compatible_node(node, NULL, "arm,vexpress,config-bus") {
struct device_node *bridge_np;
bridge_np = of_parse_phandle(node, "arm,vexpress,config-bridge", 0);
if (bridge_np != pdev->dev.parent->of_node)
continue;
of_platform_populate(node, NULL, NULL, &pdev->dev);
}
return 0;
}
static const struct platform_device_id vexpress_syscfg_id_table[] = {
{ "vexpress-syscfg", },
{},
};
MODULE_DEVICE_TABLE(platform, vexpress_syscfg_id_table);
static struct platform_driver vexpress_syscfg_driver = {
.driver.name = "vexpress-syscfg",
.id_table = vexpress_syscfg_id_table,
.probe = vexpress_syscfg_probe,
};
module_platform_driver(vexpress_syscfg_driver);
MODULE_LICENSE("GPL v2");
| linux-master | drivers/bus/vexpress-config.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Simple Power-Managed Bus Driver
*
* Copyright (C) 2014-2015 Glider bvba
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/clk.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
struct simple_pm_bus {
struct clk_bulk_data *clks;
int num_clks;
};
static int simple_pm_bus_probe(struct platform_device *pdev)
{
const struct device *dev = &pdev->dev;
const struct of_dev_auxdata *lookup = dev_get_platdata(dev);
struct device_node *np = dev->of_node;
const struct of_device_id *match;
struct simple_pm_bus *bus;
/*
* Allow user to use driver_override to bind this driver to a
* transparent bus device which has a different compatible string
* that's not listed in simple_pm_bus_of_match. We don't want to do any
* of the simple-pm-bus tasks for these devices, so return early.
*/
if (pdev->driver_override)
return 0;
match = of_match_device(dev->driver->of_match_table, dev);
/*
* These are transparent bus devices (not simple-pm-bus matches) that
* have their child nodes populated automatically. So, don't need to
* do anything more. We only match with the device if this driver is
* the most specific match because we don't want to incorrectly bind to
* a device that has a more specific driver.
*/
if (match && match->data) {
if (of_property_match_string(np, "compatible", match->compatible) == 0)
return 0;
else
return -ENODEV;
}
bus = devm_kzalloc(&pdev->dev, sizeof(*bus), GFP_KERNEL);
if (!bus)
return -ENOMEM;
bus->num_clks = devm_clk_bulk_get_all(&pdev->dev, &bus->clks);
if (bus->num_clks < 0)
return dev_err_probe(&pdev->dev, bus->num_clks, "failed to get clocks\n");
dev_set_drvdata(&pdev->dev, bus);
dev_dbg(&pdev->dev, "%s\n", __func__);
pm_runtime_enable(&pdev->dev);
if (np)
of_platform_populate(np, NULL, lookup, &pdev->dev);
return 0;
}
static int simple_pm_bus_remove(struct platform_device *pdev)
{
const void *data = of_device_get_match_data(&pdev->dev);
if (pdev->driver_override || data)
return 0;
dev_dbg(&pdev->dev, "%s\n", __func__);
pm_runtime_disable(&pdev->dev);
return 0;
}
static int simple_pm_bus_runtime_suspend(struct device *dev)
{
struct simple_pm_bus *bus = dev_get_drvdata(dev);
clk_bulk_disable_unprepare(bus->num_clks, bus->clks);
return 0;
}
static int simple_pm_bus_runtime_resume(struct device *dev)
{
struct simple_pm_bus *bus = dev_get_drvdata(dev);
int ret;
ret = clk_bulk_prepare_enable(bus->num_clks, bus->clks);
if (ret) {
dev_err(dev, "failed to enable clocks: %d\n", ret);
return ret;
}
return 0;
}
static const struct dev_pm_ops simple_pm_bus_pm_ops = {
RUNTIME_PM_OPS(simple_pm_bus_runtime_suspend, simple_pm_bus_runtime_resume, NULL)
NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
};
#define ONLY_BUS ((void *) 1) /* Match if the device is only a bus. */
static const struct of_device_id simple_pm_bus_of_match[] = {
{ .compatible = "simple-pm-bus", },
{ .compatible = "simple-bus", .data = ONLY_BUS },
{ .compatible = "simple-mfd", .data = ONLY_BUS },
{ .compatible = "isa", .data = ONLY_BUS },
{ .compatible = "arm,amba-bus", .data = ONLY_BUS },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, simple_pm_bus_of_match);
static struct platform_driver simple_pm_bus_driver = {
.probe = simple_pm_bus_probe,
.remove = simple_pm_bus_remove,
.driver = {
.name = "simple-pm-bus",
.of_match_table = simple_pm_bus_of_match,
.pm = pm_ptr(&simple_pm_bus_pm_ops),
},
};
module_platform_driver(simple_pm_bus_driver);
MODULE_DESCRIPTION("Simple Power-Managed Bus Driver");
MODULE_AUTHOR("Geert Uytterhoeven <[email protected]>");
| linux-master | drivers/bus/simple-pm-bus.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* omap-ocp2scp.c - transform ocp interface protocol to scp protocol
*
* Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com
* Author: Kishon Vijay Abraham I <[email protected]>
*/
#include <linux/io.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/err.h>
#include <linux/pm_runtime.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#define OCP2SCP_TIMING 0x18
#define SYNC2_MASK 0xf
static int ocp2scp_remove_devices(struct device *dev, void *c)
{
struct platform_device *pdev = to_platform_device(dev);
platform_device_unregister(pdev);
return 0;
}
static int omap_ocp2scp_probe(struct platform_device *pdev)
{
int ret;
u32 reg;
void __iomem *regs;
struct resource *res;
struct device_node *np = pdev->dev.of_node;
if (np) {
ret = of_platform_populate(np, NULL, NULL, &pdev->dev);
if (ret) {
dev_err(&pdev->dev,
"failed to add resources for ocp2scp child\n");
goto err0;
}
}
pm_runtime_enable(&pdev->dev);
/*
* As per AM572x TRM: http://www.ti.com/lit/ug/spruhz6/spruhz6.pdf
* under section 26.3.2.2, table 26-26 OCP2SCP TIMING Caution;
* As per OMAP4430 TRM: http://www.ti.com/lit/ug/swpu231ap/swpu231ap.pdf
* under section 23.12.6.2.2 , Table 23-1213 OCP2SCP TIMING Caution;
* As per OMAP4460 TRM: http://www.ti.com/lit/ug/swpu235ab/swpu235ab.pdf
* under section 23.12.6.2.2, Table 23-1213 OCP2SCP TIMING Caution;
* As per OMAP543x TRM http://www.ti.com/lit/pdf/swpu249
* under section 27.3.2.2, Table 27-27 OCP2SCP TIMING Caution;
*
* Read path of OCP2SCP is not working properly due to low reset value
* of SYNC2 parameter in OCP2SCP. Suggested reset value is 0x6 or more.
*/
if (!of_device_is_compatible(np, "ti,am437x-ocp2scp")) {
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
regs = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(regs)) {
ret = PTR_ERR(regs);
goto err1;
}
pm_runtime_get_sync(&pdev->dev);
reg = readl_relaxed(regs + OCP2SCP_TIMING);
reg &= ~(SYNC2_MASK);
reg |= 0x6;
writel_relaxed(reg, regs + OCP2SCP_TIMING);
pm_runtime_put_sync(&pdev->dev);
}
return 0;
err1:
pm_runtime_disable(&pdev->dev);
err0:
device_for_each_child(&pdev->dev, NULL, ocp2scp_remove_devices);
return ret;
}
static int omap_ocp2scp_remove(struct platform_device *pdev)
{
pm_runtime_disable(&pdev->dev);
device_for_each_child(&pdev->dev, NULL, ocp2scp_remove_devices);
return 0;
}
#ifdef CONFIG_OF
static const struct of_device_id omap_ocp2scp_id_table[] = {
{ .compatible = "ti,omap-ocp2scp" },
{ .compatible = "ti,am437x-ocp2scp" },
{}
};
MODULE_DEVICE_TABLE(of, omap_ocp2scp_id_table);
#endif
static struct platform_driver omap_ocp2scp_driver = {
.probe = omap_ocp2scp_probe,
.remove = omap_ocp2scp_remove,
.driver = {
.name = "omap-ocp2scp",
.of_match_table = of_match_ptr(omap_ocp2scp_id_table),
},
};
module_platform_driver(omap_ocp2scp_driver);
MODULE_ALIAS("platform:omap-ocp2scp");
MODULE_AUTHOR("Texas Instruments Inc.");
MODULE_DESCRIPTION("OMAP OCP2SCP driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/bus/omap-ocp2scp.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2020, The Linux Foundation. All rights reserved.
*
*/
#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/mhi.h>
#include <linux/module.h>
#include "internal.h"
static int mhi_debugfs_states_show(struct seq_file *m, void *d)
{
struct mhi_controller *mhi_cntrl = m->private;
/* states */
seq_printf(m, "PM state: %s Device: %s MHI state: %s EE: %s wake: %s\n",
to_mhi_pm_state_str(mhi_cntrl->pm_state),
mhi_is_active(mhi_cntrl) ? "Active" : "Inactive",
mhi_state_str(mhi_cntrl->dev_state),
TO_MHI_EXEC_STR(mhi_cntrl->ee),
mhi_cntrl->wake_set ? "true" : "false");
/* counters */
seq_printf(m, "M0: %u M2: %u M3: %u", mhi_cntrl->M0, mhi_cntrl->M2,
mhi_cntrl->M3);
seq_printf(m, " device wake: %u pending packets: %u\n",
atomic_read(&mhi_cntrl->dev_wake),
atomic_read(&mhi_cntrl->pending_pkts));
return 0;
}
static int mhi_debugfs_events_show(struct seq_file *m, void *d)
{
struct mhi_controller *mhi_cntrl = m->private;
struct mhi_event *mhi_event;
struct mhi_event_ctxt *er_ctxt;
int i;
if (!mhi_is_active(mhi_cntrl)) {
seq_puts(m, "Device not ready\n");
return -ENODEV;
}
er_ctxt = mhi_cntrl->mhi_ctxt->er_ctxt;
mhi_event = mhi_cntrl->mhi_event;
for (i = 0; i < mhi_cntrl->total_ev_rings;
i++, er_ctxt++, mhi_event++) {
struct mhi_ring *ring = &mhi_event->ring;
if (mhi_event->offload_ev) {
seq_printf(m, "Index: %d is an offload event ring\n",
i);
continue;
}
seq_printf(m, "Index: %d intmod count: %lu time: %lu",
i, (le32_to_cpu(er_ctxt->intmod) & EV_CTX_INTMODC_MASK) >>
__ffs(EV_CTX_INTMODC_MASK),
(le32_to_cpu(er_ctxt->intmod) & EV_CTX_INTMODT_MASK) >>
__ffs(EV_CTX_INTMODT_MASK));
seq_printf(m, " base: 0x%0llx len: 0x%llx", le64_to_cpu(er_ctxt->rbase),
le64_to_cpu(er_ctxt->rlen));
seq_printf(m, " rp: 0x%llx wp: 0x%llx", le64_to_cpu(er_ctxt->rp),
le64_to_cpu(er_ctxt->wp));
seq_printf(m, " local rp: 0x%pK db: 0x%pad\n", ring->rp,
&mhi_event->db_cfg.db_val);
}
return 0;
}
static int mhi_debugfs_channels_show(struct seq_file *m, void *d)
{
struct mhi_controller *mhi_cntrl = m->private;
struct mhi_chan *mhi_chan;
struct mhi_chan_ctxt *chan_ctxt;
int i;
if (!mhi_is_active(mhi_cntrl)) {
seq_puts(m, "Device not ready\n");
return -ENODEV;
}
mhi_chan = mhi_cntrl->mhi_chan;
chan_ctxt = mhi_cntrl->mhi_ctxt->chan_ctxt;
for (i = 0; i < mhi_cntrl->max_chan; i++, chan_ctxt++, mhi_chan++) {
struct mhi_ring *ring = &mhi_chan->tre_ring;
if (mhi_chan->offload_ch) {
seq_printf(m, "%s(%u) is an offload channel\n",
mhi_chan->name, mhi_chan->chan);
continue;
}
if (!mhi_chan->mhi_dev)
continue;
seq_printf(m,
"%s(%u) state: 0x%lx brstmode: 0x%lx pollcfg: 0x%lx",
mhi_chan->name, mhi_chan->chan, (le32_to_cpu(chan_ctxt->chcfg) &
CHAN_CTX_CHSTATE_MASK) >> __ffs(CHAN_CTX_CHSTATE_MASK),
(le32_to_cpu(chan_ctxt->chcfg) & CHAN_CTX_BRSTMODE_MASK) >>
__ffs(CHAN_CTX_BRSTMODE_MASK), (le32_to_cpu(chan_ctxt->chcfg) &
CHAN_CTX_POLLCFG_MASK) >> __ffs(CHAN_CTX_POLLCFG_MASK));
seq_printf(m, " type: 0x%x event ring: %u", le32_to_cpu(chan_ctxt->chtype),
le32_to_cpu(chan_ctxt->erindex));
seq_printf(m, " base: 0x%llx len: 0x%llx rp: 0x%llx wp: 0x%llx",
le64_to_cpu(chan_ctxt->rbase), le64_to_cpu(chan_ctxt->rlen),
le64_to_cpu(chan_ctxt->rp), le64_to_cpu(chan_ctxt->wp));
seq_printf(m, " local rp: 0x%pK local wp: 0x%pK db: 0x%pad\n",
ring->rp, ring->wp,
&mhi_chan->db_cfg.db_val);
}
return 0;
}
static int mhi_device_info_show(struct device *dev, void *data)
{
struct mhi_device *mhi_dev;
if (dev->bus != &mhi_bus_type)
return 0;
mhi_dev = to_mhi_device(dev);
seq_printf((struct seq_file *)data, "%s: type: %s dev_wake: %u",
mhi_dev->name, mhi_dev->dev_type ? "Controller" : "Transfer",
mhi_dev->dev_wake);
/* for transfer device types only */
if (mhi_dev->dev_type == MHI_DEVICE_XFER)
seq_printf((struct seq_file *)data, " channels: %u(UL)/%u(DL)",
mhi_dev->ul_chan_id, mhi_dev->dl_chan_id);
seq_puts((struct seq_file *)data, "\n");
return 0;
}
static int mhi_debugfs_devices_show(struct seq_file *m, void *d)
{
struct mhi_controller *mhi_cntrl = m->private;
if (!mhi_is_active(mhi_cntrl)) {
seq_puts(m, "Device not ready\n");
return -ENODEV;
}
/* Show controller and client(s) info */
mhi_device_info_show(&mhi_cntrl->mhi_dev->dev, m);
device_for_each_child(&mhi_cntrl->mhi_dev->dev, m, mhi_device_info_show);
return 0;
}
static int mhi_debugfs_regdump_show(struct seq_file *m, void *d)
{
struct mhi_controller *mhi_cntrl = m->private;
enum mhi_state state;
enum mhi_ee_type ee;
int i, ret = -EIO;
u32 val;
void __iomem *mhi_base = mhi_cntrl->regs;
void __iomem *bhi_base = mhi_cntrl->bhi;
void __iomem *bhie_base = mhi_cntrl->bhie;
void __iomem *wake_db = mhi_cntrl->wake_db;
struct {
const char *name;
int offset;
void __iomem *base;
} regs[] = {
{ "MHI_REGLEN", MHIREGLEN, mhi_base},
{ "MHI_VER", MHIVER, mhi_base},
{ "MHI_CFG", MHICFG, mhi_base},
{ "MHI_CTRL", MHICTRL, mhi_base},
{ "MHI_STATUS", MHISTATUS, mhi_base},
{ "MHI_WAKE_DB", 0, wake_db},
{ "BHI_EXECENV", BHI_EXECENV, bhi_base},
{ "BHI_STATUS", BHI_STATUS, bhi_base},
{ "BHI_ERRCODE", BHI_ERRCODE, bhi_base},
{ "BHI_ERRDBG1", BHI_ERRDBG1, bhi_base},
{ "BHI_ERRDBG2", BHI_ERRDBG2, bhi_base},
{ "BHI_ERRDBG3", BHI_ERRDBG3, bhi_base},
{ "BHIE_TXVEC_DB", BHIE_TXVECDB_OFFS, bhie_base},
{ "BHIE_TXVEC_STATUS", BHIE_TXVECSTATUS_OFFS, bhie_base},
{ "BHIE_RXVEC_DB", BHIE_RXVECDB_OFFS, bhie_base},
{ "BHIE_RXVEC_STATUS", BHIE_RXVECSTATUS_OFFS, bhie_base},
{ NULL },
};
if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state))
return ret;
seq_printf(m, "Host PM state: %s Device state: %s EE: %s\n",
to_mhi_pm_state_str(mhi_cntrl->pm_state),
mhi_state_str(mhi_cntrl->dev_state),
TO_MHI_EXEC_STR(mhi_cntrl->ee));
state = mhi_get_mhi_state(mhi_cntrl);
ee = mhi_get_exec_env(mhi_cntrl);
seq_printf(m, "Device EE: %s state: %s\n", TO_MHI_EXEC_STR(ee),
mhi_state_str(state));
for (i = 0; regs[i].name; i++) {
if (!regs[i].base)
continue;
ret = mhi_read_reg(mhi_cntrl, regs[i].base, regs[i].offset,
&val);
if (ret)
continue;
seq_printf(m, "%s: 0x%x\n", regs[i].name, val);
}
return 0;
}
static int mhi_debugfs_device_wake_show(struct seq_file *m, void *d)
{
struct mhi_controller *mhi_cntrl = m->private;
struct mhi_device *mhi_dev = mhi_cntrl->mhi_dev;
if (!mhi_is_active(mhi_cntrl)) {
seq_puts(m, "Device not ready\n");
return -ENODEV;
}
seq_printf(m,
"Wake count: %d\n%s\n", mhi_dev->dev_wake,
"Usage: echo get/put > device_wake to vote/unvote for M0");
return 0;
}
static ssize_t mhi_debugfs_device_wake_write(struct file *file,
const char __user *ubuf,
size_t count, loff_t *ppos)
{
struct seq_file *m = file->private_data;
struct mhi_controller *mhi_cntrl = m->private;
struct mhi_device *mhi_dev = mhi_cntrl->mhi_dev;
char buf[16];
int ret = -EINVAL;
if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
return -EFAULT;
if (!strncmp(buf, "get", 3)) {
ret = mhi_device_get_sync(mhi_dev);
} else if (!strncmp(buf, "put", 3)) {
mhi_device_put(mhi_dev);
ret = 0;
}
return ret ? ret : count;
}
static int mhi_debugfs_timeout_ms_show(struct seq_file *m, void *d)
{
struct mhi_controller *mhi_cntrl = m->private;
seq_printf(m, "%u ms\n", mhi_cntrl->timeout_ms);
return 0;
}
static ssize_t mhi_debugfs_timeout_ms_write(struct file *file,
const char __user *ubuf,
size_t count, loff_t *ppos)
{
struct seq_file *m = file->private_data;
struct mhi_controller *mhi_cntrl = m->private;
u32 timeout_ms;
if (kstrtou32_from_user(ubuf, count, 0, &timeout_ms))
return -EINVAL;
mhi_cntrl->timeout_ms = timeout_ms;
return count;
}
static int mhi_debugfs_states_open(struct inode *inode, struct file *fp)
{
return single_open(fp, mhi_debugfs_states_show, inode->i_private);
}
static int mhi_debugfs_events_open(struct inode *inode, struct file *fp)
{
return single_open(fp, mhi_debugfs_events_show, inode->i_private);
}
static int mhi_debugfs_channels_open(struct inode *inode, struct file *fp)
{
return single_open(fp, mhi_debugfs_channels_show, inode->i_private);
}
static int mhi_debugfs_devices_open(struct inode *inode, struct file *fp)
{
return single_open(fp, mhi_debugfs_devices_show, inode->i_private);
}
static int mhi_debugfs_regdump_open(struct inode *inode, struct file *fp)
{
return single_open(fp, mhi_debugfs_regdump_show, inode->i_private);
}
static int mhi_debugfs_device_wake_open(struct inode *inode, struct file *fp)
{
return single_open(fp, mhi_debugfs_device_wake_show, inode->i_private);
}
static int mhi_debugfs_timeout_ms_open(struct inode *inode, struct file *fp)
{
return single_open(fp, mhi_debugfs_timeout_ms_show, inode->i_private);
}
static const struct file_operations debugfs_states_fops = {
.open = mhi_debugfs_states_open,
.release = single_release,
.read = seq_read,
};
static const struct file_operations debugfs_events_fops = {
.open = mhi_debugfs_events_open,
.release = single_release,
.read = seq_read,
};
static const struct file_operations debugfs_channels_fops = {
.open = mhi_debugfs_channels_open,
.release = single_release,
.read = seq_read,
};
static const struct file_operations debugfs_devices_fops = {
.open = mhi_debugfs_devices_open,
.release = single_release,
.read = seq_read,
};
static const struct file_operations debugfs_regdump_fops = {
.open = mhi_debugfs_regdump_open,
.release = single_release,
.read = seq_read,
};
static const struct file_operations debugfs_device_wake_fops = {
.open = mhi_debugfs_device_wake_open,
.write = mhi_debugfs_device_wake_write,
.release = single_release,
.read = seq_read,
};
static const struct file_operations debugfs_timeout_ms_fops = {
.open = mhi_debugfs_timeout_ms_open,
.write = mhi_debugfs_timeout_ms_write,
.release = single_release,
.read = seq_read,
};
static struct dentry *mhi_debugfs_root;
void mhi_create_debugfs(struct mhi_controller *mhi_cntrl)
{
mhi_cntrl->debugfs_dentry =
debugfs_create_dir(dev_name(&mhi_cntrl->mhi_dev->dev),
mhi_debugfs_root);
debugfs_create_file("states", 0444, mhi_cntrl->debugfs_dentry,
mhi_cntrl, &debugfs_states_fops);
debugfs_create_file("events", 0444, mhi_cntrl->debugfs_dentry,
mhi_cntrl, &debugfs_events_fops);
debugfs_create_file("channels", 0444, mhi_cntrl->debugfs_dentry,
mhi_cntrl, &debugfs_channels_fops);
debugfs_create_file("devices", 0444, mhi_cntrl->debugfs_dentry,
mhi_cntrl, &debugfs_devices_fops);
debugfs_create_file("regdump", 0444, mhi_cntrl->debugfs_dentry,
mhi_cntrl, &debugfs_regdump_fops);
debugfs_create_file("device_wake", 0644, mhi_cntrl->debugfs_dentry,
mhi_cntrl, &debugfs_device_wake_fops);
debugfs_create_file("timeout_ms", 0644, mhi_cntrl->debugfs_dentry,
mhi_cntrl, &debugfs_timeout_ms_fops);
}
void mhi_destroy_debugfs(struct mhi_controller *mhi_cntrl)
{
debugfs_remove_recursive(mhi_cntrl->debugfs_dentry);
mhi_cntrl->debugfs_dentry = NULL;
}
void mhi_debugfs_init(void)
{
mhi_debugfs_root = debugfs_create_dir(mhi_bus_type.name, NULL);
}
void mhi_debugfs_exit(void)
{
debugfs_remove_recursive(mhi_debugfs_root);
}
| linux-master | drivers/bus/mhi/host/debugfs.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
*
*/
#include <linux/bitfield.h>
#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/dma-direction.h>
#include <linux/dma-mapping.h>
#include <linux/idr.h>
#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/mhi.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/wait.h>
#include "internal.h"
static DEFINE_IDA(mhi_controller_ida);
const char * const mhi_ee_str[MHI_EE_MAX] = {
[MHI_EE_PBL] = "PRIMARY BOOTLOADER",
[MHI_EE_SBL] = "SECONDARY BOOTLOADER",
[MHI_EE_AMSS] = "MISSION MODE",
[MHI_EE_RDDM] = "RAMDUMP DOWNLOAD MODE",
[MHI_EE_WFW] = "WLAN FIRMWARE",
[MHI_EE_PTHRU] = "PASS THROUGH",
[MHI_EE_EDL] = "EMERGENCY DOWNLOAD",
[MHI_EE_FP] = "FLASH PROGRAMMER",
[MHI_EE_DISABLE_TRANSITION] = "DISABLE",
[MHI_EE_NOT_SUPPORTED] = "NOT SUPPORTED",
};
const char * const dev_state_tran_str[DEV_ST_TRANSITION_MAX] = {
[DEV_ST_TRANSITION_PBL] = "PBL",
[DEV_ST_TRANSITION_READY] = "READY",
[DEV_ST_TRANSITION_SBL] = "SBL",
[DEV_ST_TRANSITION_MISSION_MODE] = "MISSION MODE",
[DEV_ST_TRANSITION_FP] = "FLASH PROGRAMMER",
[DEV_ST_TRANSITION_SYS_ERR] = "SYS ERROR",
[DEV_ST_TRANSITION_DISABLE] = "DISABLE",
};
const char * const mhi_ch_state_type_str[MHI_CH_STATE_TYPE_MAX] = {
[MHI_CH_STATE_TYPE_RESET] = "RESET",
[MHI_CH_STATE_TYPE_STOP] = "STOP",
[MHI_CH_STATE_TYPE_START] = "START",
};
static const char * const mhi_pm_state_str[] = {
[MHI_PM_STATE_DISABLE] = "DISABLE",
[MHI_PM_STATE_POR] = "POWER ON RESET",
[MHI_PM_STATE_M0] = "M0",
[MHI_PM_STATE_M2] = "M2",
[MHI_PM_STATE_M3_ENTER] = "M?->M3",
[MHI_PM_STATE_M3] = "M3",
[MHI_PM_STATE_M3_EXIT] = "M3->M0",
[MHI_PM_STATE_FW_DL_ERR] = "Firmware Download Error",
[MHI_PM_STATE_SYS_ERR_DETECT] = "SYS ERROR Detect",
[MHI_PM_STATE_SYS_ERR_PROCESS] = "SYS ERROR Process",
[MHI_PM_STATE_SHUTDOWN_PROCESS] = "SHUTDOWN Process",
[MHI_PM_STATE_LD_ERR_FATAL_DETECT] = "Linkdown or Error Fatal Detect",
};
const char *to_mhi_pm_state_str(u32 state)
{
int index;
if (state)
index = __fls(state);
if (!state || index >= ARRAY_SIZE(mhi_pm_state_str))
return "Invalid State";
return mhi_pm_state_str[index];
}
static ssize_t serial_number_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct mhi_device *mhi_dev = to_mhi_device(dev);
struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
return sysfs_emit(buf, "Serial Number: %u\n",
mhi_cntrl->serial_number);
}
static DEVICE_ATTR_RO(serial_number);
static ssize_t oem_pk_hash_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct mhi_device *mhi_dev = to_mhi_device(dev);
struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
int i, cnt = 0;
for (i = 0; i < ARRAY_SIZE(mhi_cntrl->oem_pk_hash); i++)
cnt += sysfs_emit_at(buf, cnt, "OEMPKHASH[%d]: 0x%x\n",
i, mhi_cntrl->oem_pk_hash[i]);
return cnt;
}
static DEVICE_ATTR_RO(oem_pk_hash);
static ssize_t soc_reset_store(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t count)
{
struct mhi_device *mhi_dev = to_mhi_device(dev);
struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
mhi_soc_reset(mhi_cntrl);
return count;
}
static DEVICE_ATTR_WO(soc_reset);
static struct attribute *mhi_dev_attrs[] = {
&dev_attr_serial_number.attr,
&dev_attr_oem_pk_hash.attr,
&dev_attr_soc_reset.attr,
NULL,
};
ATTRIBUTE_GROUPS(mhi_dev);
/* MHI protocol requires the transfer ring to be aligned with ring length */
static int mhi_alloc_aligned_ring(struct mhi_controller *mhi_cntrl,
struct mhi_ring *ring,
u64 len)
{
ring->alloc_size = len + (len - 1);
ring->pre_aligned = dma_alloc_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size,
&ring->dma_handle, GFP_KERNEL);
if (!ring->pre_aligned)
return -ENOMEM;
ring->iommu_base = (ring->dma_handle + (len - 1)) & ~(len - 1);
ring->base = ring->pre_aligned + (ring->iommu_base - ring->dma_handle);
return 0;
}
void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl)
{
int i;
struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
if (mhi_event->offload_ev)
continue;
free_irq(mhi_cntrl->irq[mhi_event->irq], mhi_event);
}
free_irq(mhi_cntrl->irq[0], mhi_cntrl);
}
int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl)
{
struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
struct device *dev = &mhi_cntrl->mhi_dev->dev;
unsigned long irq_flags = IRQF_SHARED | IRQF_NO_SUSPEND;
int i, ret;
/* if controller driver has set irq_flags, use it */
if (mhi_cntrl->irq_flags)
irq_flags = mhi_cntrl->irq_flags;
/* Setup BHI_INTVEC IRQ */
ret = request_threaded_irq(mhi_cntrl->irq[0], mhi_intvec_handler,
mhi_intvec_threaded_handler,
irq_flags,
"bhi", mhi_cntrl);
if (ret)
return ret;
/*
* IRQs should be enabled during mhi_async_power_up(), so disable them explicitly here.
* Due to the use of IRQF_SHARED flag as default while requesting IRQs, we assume that
* IRQ_NOAUTOEN is not applicable.
*/
disable_irq(mhi_cntrl->irq[0]);
for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
if (mhi_event->offload_ev)
continue;
if (mhi_event->irq >= mhi_cntrl->nr_irqs) {
dev_err(dev, "irq %d not available for event ring\n",
mhi_event->irq);
ret = -EINVAL;
goto error_request;
}
ret = request_irq(mhi_cntrl->irq[mhi_event->irq],
mhi_irq_handler,
irq_flags,
"mhi", mhi_event);
if (ret) {
dev_err(dev, "Error requesting irq:%d for ev:%d\n",
mhi_cntrl->irq[mhi_event->irq], i);
goto error_request;
}
disable_irq(mhi_cntrl->irq[mhi_event->irq]);
}
return 0;
error_request:
for (--i, --mhi_event; i >= 0; i--, mhi_event--) {
if (mhi_event->offload_ev)
continue;
free_irq(mhi_cntrl->irq[mhi_event->irq], mhi_event);
}
free_irq(mhi_cntrl->irq[0], mhi_cntrl);
return ret;
}
void mhi_deinit_dev_ctxt(struct mhi_controller *mhi_cntrl)
{
int i;
struct mhi_ctxt *mhi_ctxt = mhi_cntrl->mhi_ctxt;
struct mhi_cmd *mhi_cmd;
struct mhi_event *mhi_event;
struct mhi_ring *ring;
mhi_cmd = mhi_cntrl->mhi_cmd;
for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++) {
ring = &mhi_cmd->ring;
dma_free_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size,
ring->pre_aligned, ring->dma_handle);
ring->base = NULL;
ring->iommu_base = 0;
}
dma_free_coherent(mhi_cntrl->cntrl_dev,
sizeof(*mhi_ctxt->cmd_ctxt) * NR_OF_CMD_RINGS,
mhi_ctxt->cmd_ctxt, mhi_ctxt->cmd_ctxt_addr);
mhi_event = mhi_cntrl->mhi_event;
for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
if (mhi_event->offload_ev)
continue;
ring = &mhi_event->ring;
dma_free_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size,
ring->pre_aligned, ring->dma_handle);
ring->base = NULL;
ring->iommu_base = 0;
}
dma_free_coherent(mhi_cntrl->cntrl_dev, sizeof(*mhi_ctxt->er_ctxt) *
mhi_cntrl->total_ev_rings, mhi_ctxt->er_ctxt,
mhi_ctxt->er_ctxt_addr);
dma_free_coherent(mhi_cntrl->cntrl_dev, sizeof(*mhi_ctxt->chan_ctxt) *
mhi_cntrl->max_chan, mhi_ctxt->chan_ctxt,
mhi_ctxt->chan_ctxt_addr);
kfree(mhi_ctxt);
mhi_cntrl->mhi_ctxt = NULL;
}
int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl)
{
struct mhi_ctxt *mhi_ctxt;
struct mhi_chan_ctxt *chan_ctxt;
struct mhi_event_ctxt *er_ctxt;
struct mhi_cmd_ctxt *cmd_ctxt;
struct mhi_chan *mhi_chan;
struct mhi_event *mhi_event;
struct mhi_cmd *mhi_cmd;
u32 tmp;
int ret = -ENOMEM, i;
atomic_set(&mhi_cntrl->dev_wake, 0);
atomic_set(&mhi_cntrl->pending_pkts, 0);
mhi_ctxt = kzalloc(sizeof(*mhi_ctxt), GFP_KERNEL);
if (!mhi_ctxt)
return -ENOMEM;
/* Setup channel ctxt */
mhi_ctxt->chan_ctxt = dma_alloc_coherent(mhi_cntrl->cntrl_dev,
sizeof(*mhi_ctxt->chan_ctxt) *
mhi_cntrl->max_chan,
&mhi_ctxt->chan_ctxt_addr,
GFP_KERNEL);
if (!mhi_ctxt->chan_ctxt)
goto error_alloc_chan_ctxt;
mhi_chan = mhi_cntrl->mhi_chan;
chan_ctxt = mhi_ctxt->chan_ctxt;
for (i = 0; i < mhi_cntrl->max_chan; i++, chan_ctxt++, mhi_chan++) {
/* Skip if it is an offload channel */
if (mhi_chan->offload_ch)
continue;
tmp = le32_to_cpu(chan_ctxt->chcfg);
tmp &= ~CHAN_CTX_CHSTATE_MASK;
tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_DISABLED);
tmp &= ~CHAN_CTX_BRSTMODE_MASK;
tmp |= FIELD_PREP(CHAN_CTX_BRSTMODE_MASK, mhi_chan->db_cfg.brstmode);
tmp &= ~CHAN_CTX_POLLCFG_MASK;
tmp |= FIELD_PREP(CHAN_CTX_POLLCFG_MASK, mhi_chan->db_cfg.pollcfg);
chan_ctxt->chcfg = cpu_to_le32(tmp);
chan_ctxt->chtype = cpu_to_le32(mhi_chan->type);
chan_ctxt->erindex = cpu_to_le32(mhi_chan->er_index);
mhi_chan->ch_state = MHI_CH_STATE_DISABLED;
mhi_chan->tre_ring.db_addr = (void __iomem *)&chan_ctxt->wp;
}
/* Setup event context */
mhi_ctxt->er_ctxt = dma_alloc_coherent(mhi_cntrl->cntrl_dev,
sizeof(*mhi_ctxt->er_ctxt) *
mhi_cntrl->total_ev_rings,
&mhi_ctxt->er_ctxt_addr,
GFP_KERNEL);
if (!mhi_ctxt->er_ctxt)
goto error_alloc_er_ctxt;
er_ctxt = mhi_ctxt->er_ctxt;
mhi_event = mhi_cntrl->mhi_event;
for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++,
mhi_event++) {
struct mhi_ring *ring = &mhi_event->ring;
/* Skip if it is an offload event */
if (mhi_event->offload_ev)
continue;
tmp = le32_to_cpu(er_ctxt->intmod);
tmp &= ~EV_CTX_INTMODC_MASK;
tmp &= ~EV_CTX_INTMODT_MASK;
tmp |= FIELD_PREP(EV_CTX_INTMODT_MASK, mhi_event->intmod);
er_ctxt->intmod = cpu_to_le32(tmp);
er_ctxt->ertype = cpu_to_le32(MHI_ER_TYPE_VALID);
er_ctxt->msivec = cpu_to_le32(mhi_event->irq);
mhi_event->db_cfg.db_mode = true;
ring->el_size = sizeof(struct mhi_ring_element);
ring->len = ring->el_size * ring->elements;
ret = mhi_alloc_aligned_ring(mhi_cntrl, ring, ring->len);
if (ret)
goto error_alloc_er;
/*
* If the read pointer equals to the write pointer, then the
* ring is empty
*/
ring->rp = ring->wp = ring->base;
er_ctxt->rbase = cpu_to_le64(ring->iommu_base);
er_ctxt->rp = er_ctxt->wp = er_ctxt->rbase;
er_ctxt->rlen = cpu_to_le64(ring->len);
ring->ctxt_wp = &er_ctxt->wp;
}
/* Setup cmd context */
ret = -ENOMEM;
mhi_ctxt->cmd_ctxt = dma_alloc_coherent(mhi_cntrl->cntrl_dev,
sizeof(*mhi_ctxt->cmd_ctxt) *
NR_OF_CMD_RINGS,
&mhi_ctxt->cmd_ctxt_addr,
GFP_KERNEL);
if (!mhi_ctxt->cmd_ctxt)
goto error_alloc_er;
mhi_cmd = mhi_cntrl->mhi_cmd;
cmd_ctxt = mhi_ctxt->cmd_ctxt;
for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) {
struct mhi_ring *ring = &mhi_cmd->ring;
ring->el_size = sizeof(struct mhi_ring_element);
ring->elements = CMD_EL_PER_RING;
ring->len = ring->el_size * ring->elements;
ret = mhi_alloc_aligned_ring(mhi_cntrl, ring, ring->len);
if (ret)
goto error_alloc_cmd;
ring->rp = ring->wp = ring->base;
cmd_ctxt->rbase = cpu_to_le64(ring->iommu_base);
cmd_ctxt->rp = cmd_ctxt->wp = cmd_ctxt->rbase;
cmd_ctxt->rlen = cpu_to_le64(ring->len);
ring->ctxt_wp = &cmd_ctxt->wp;
}
mhi_cntrl->mhi_ctxt = mhi_ctxt;
return 0;
error_alloc_cmd:
for (--i, --mhi_cmd; i >= 0; i--, mhi_cmd--) {
struct mhi_ring *ring = &mhi_cmd->ring;
dma_free_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size,
ring->pre_aligned, ring->dma_handle);
}
dma_free_coherent(mhi_cntrl->cntrl_dev,
sizeof(*mhi_ctxt->cmd_ctxt) * NR_OF_CMD_RINGS,
mhi_ctxt->cmd_ctxt, mhi_ctxt->cmd_ctxt_addr);
i = mhi_cntrl->total_ev_rings;
mhi_event = mhi_cntrl->mhi_event + i;
error_alloc_er:
for (--i, --mhi_event; i >= 0; i--, mhi_event--) {
struct mhi_ring *ring = &mhi_event->ring;
if (mhi_event->offload_ev)
continue;
dma_free_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size,
ring->pre_aligned, ring->dma_handle);
}
dma_free_coherent(mhi_cntrl->cntrl_dev, sizeof(*mhi_ctxt->er_ctxt) *
mhi_cntrl->total_ev_rings, mhi_ctxt->er_ctxt,
mhi_ctxt->er_ctxt_addr);
error_alloc_er_ctxt:
dma_free_coherent(mhi_cntrl->cntrl_dev, sizeof(*mhi_ctxt->chan_ctxt) *
mhi_cntrl->max_chan, mhi_ctxt->chan_ctxt,
mhi_ctxt->chan_ctxt_addr);
error_alloc_chan_ctxt:
kfree(mhi_ctxt);
return ret;
}
int mhi_init_mmio(struct mhi_controller *mhi_cntrl)
{
u32 val;
int i, ret;
struct mhi_chan *mhi_chan;
struct mhi_event *mhi_event;
void __iomem *base = mhi_cntrl->regs;
struct device *dev = &mhi_cntrl->mhi_dev->dev;
struct {
u32 offset;
u32 val;
} reg_info[] = {
{
CCABAP_HIGHER,
upper_32_bits(mhi_cntrl->mhi_ctxt->chan_ctxt_addr),
},
{
CCABAP_LOWER,
lower_32_bits(mhi_cntrl->mhi_ctxt->chan_ctxt_addr),
},
{
ECABAP_HIGHER,
upper_32_bits(mhi_cntrl->mhi_ctxt->er_ctxt_addr),
},
{
ECABAP_LOWER,
lower_32_bits(mhi_cntrl->mhi_ctxt->er_ctxt_addr),
},
{
CRCBAP_HIGHER,
upper_32_bits(mhi_cntrl->mhi_ctxt->cmd_ctxt_addr),
},
{
CRCBAP_LOWER,
lower_32_bits(mhi_cntrl->mhi_ctxt->cmd_ctxt_addr),
},
{
MHICTRLBASE_HIGHER,
upper_32_bits(mhi_cntrl->iova_start),
},
{
MHICTRLBASE_LOWER,
lower_32_bits(mhi_cntrl->iova_start),
},
{
MHIDATABASE_HIGHER,
upper_32_bits(mhi_cntrl->iova_start),
},
{
MHIDATABASE_LOWER,
lower_32_bits(mhi_cntrl->iova_start),
},
{
MHICTRLLIMIT_HIGHER,
upper_32_bits(mhi_cntrl->iova_stop),
},
{
MHICTRLLIMIT_LOWER,
lower_32_bits(mhi_cntrl->iova_stop),
},
{
MHIDATALIMIT_HIGHER,
upper_32_bits(mhi_cntrl->iova_stop),
},
{
MHIDATALIMIT_LOWER,
lower_32_bits(mhi_cntrl->iova_stop),
},
{0, 0}
};
dev_dbg(dev, "Initializing MHI registers\n");
/* Read channel db offset */
ret = mhi_read_reg(mhi_cntrl, base, CHDBOFF, &val);
if (ret) {
dev_err(dev, "Unable to read CHDBOFF register\n");
return -EIO;
}
if (val >= mhi_cntrl->reg_len - (8 * MHI_DEV_WAKE_DB)) {
dev_err(dev, "CHDB offset: 0x%x is out of range: 0x%zx\n",
val, mhi_cntrl->reg_len - (8 * MHI_DEV_WAKE_DB));
return -ERANGE;
}
/* Setup wake db */
mhi_cntrl->wake_db = base + val + (8 * MHI_DEV_WAKE_DB);
mhi_cntrl->wake_set = false;
/* Setup channel db address for each channel in tre_ring */
mhi_chan = mhi_cntrl->mhi_chan;
for (i = 0; i < mhi_cntrl->max_chan; i++, val += 8, mhi_chan++)
mhi_chan->tre_ring.db_addr = base + val;
/* Read event ring db offset */
ret = mhi_read_reg(mhi_cntrl, base, ERDBOFF, &val);
if (ret) {
dev_err(dev, "Unable to read ERDBOFF register\n");
return -EIO;
}
if (val >= mhi_cntrl->reg_len - (8 * mhi_cntrl->total_ev_rings)) {
dev_err(dev, "ERDB offset: 0x%x is out of range: 0x%zx\n",
val, mhi_cntrl->reg_len - (8 * mhi_cntrl->total_ev_rings));
return -ERANGE;
}
/* Setup event db address for each ev_ring */
mhi_event = mhi_cntrl->mhi_event;
for (i = 0; i < mhi_cntrl->total_ev_rings; i++, val += 8, mhi_event++) {
if (mhi_event->offload_ev)
continue;
mhi_event->ring.db_addr = base + val;
}
/* Setup DB register for primary CMD rings */
mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING].ring.db_addr = base + CRDB_LOWER;
/* Write to MMIO registers */
for (i = 0; reg_info[i].offset; i++)
mhi_write_reg(mhi_cntrl, base, reg_info[i].offset,
reg_info[i].val);
ret = mhi_write_reg_field(mhi_cntrl, base, MHICFG, MHICFG_NER_MASK,
mhi_cntrl->total_ev_rings);
if (ret) {
dev_err(dev, "Unable to write MHICFG register\n");
return ret;
}
ret = mhi_write_reg_field(mhi_cntrl, base, MHICFG, MHICFG_NHWER_MASK,
mhi_cntrl->hw_ev_rings);
if (ret) {
dev_err(dev, "Unable to write MHICFG register\n");
return ret;
}
return 0;
}
void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl,
struct mhi_chan *mhi_chan)
{
struct mhi_ring *buf_ring;
struct mhi_ring *tre_ring;
struct mhi_chan_ctxt *chan_ctxt;
u32 tmp;
buf_ring = &mhi_chan->buf_ring;
tre_ring = &mhi_chan->tre_ring;
chan_ctxt = &mhi_cntrl->mhi_ctxt->chan_ctxt[mhi_chan->chan];
if (!chan_ctxt->rbase) /* Already uninitialized */
return;
dma_free_coherent(mhi_cntrl->cntrl_dev, tre_ring->alloc_size,
tre_ring->pre_aligned, tre_ring->dma_handle);
vfree(buf_ring->base);
buf_ring->base = tre_ring->base = NULL;
tre_ring->ctxt_wp = NULL;
chan_ctxt->rbase = 0;
chan_ctxt->rlen = 0;
chan_ctxt->rp = 0;
chan_ctxt->wp = 0;
tmp = le32_to_cpu(chan_ctxt->chcfg);
tmp &= ~CHAN_CTX_CHSTATE_MASK;
tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_DISABLED);
chan_ctxt->chcfg = cpu_to_le32(tmp);
/* Update to all cores */
smp_wmb();
}
int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl,
struct mhi_chan *mhi_chan)
{
struct mhi_ring *buf_ring;
struct mhi_ring *tre_ring;
struct mhi_chan_ctxt *chan_ctxt;
u32 tmp;
int ret;
buf_ring = &mhi_chan->buf_ring;
tre_ring = &mhi_chan->tre_ring;
tre_ring->el_size = sizeof(struct mhi_ring_element);
tre_ring->len = tre_ring->el_size * tre_ring->elements;
chan_ctxt = &mhi_cntrl->mhi_ctxt->chan_ctxt[mhi_chan->chan];
ret = mhi_alloc_aligned_ring(mhi_cntrl, tre_ring, tre_ring->len);
if (ret)
return -ENOMEM;
buf_ring->el_size = sizeof(struct mhi_buf_info);
buf_ring->len = buf_ring->el_size * buf_ring->elements;
buf_ring->base = vzalloc(buf_ring->len);
if (!buf_ring->base) {
dma_free_coherent(mhi_cntrl->cntrl_dev, tre_ring->alloc_size,
tre_ring->pre_aligned, tre_ring->dma_handle);
return -ENOMEM;
}
tmp = le32_to_cpu(chan_ctxt->chcfg);
tmp &= ~CHAN_CTX_CHSTATE_MASK;
tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_ENABLED);
chan_ctxt->chcfg = cpu_to_le32(tmp);
chan_ctxt->rbase = cpu_to_le64(tre_ring->iommu_base);
chan_ctxt->rp = chan_ctxt->wp = chan_ctxt->rbase;
chan_ctxt->rlen = cpu_to_le64(tre_ring->len);
tre_ring->ctxt_wp = &chan_ctxt->wp;
tre_ring->rp = tre_ring->wp = tre_ring->base;
buf_ring->rp = buf_ring->wp = buf_ring->base;
mhi_chan->db_cfg.db_mode = 1;
/* Update to all cores */
smp_wmb();
return 0;
}
static int parse_ev_cfg(struct mhi_controller *mhi_cntrl,
const struct mhi_controller_config *config)
{
struct mhi_event *mhi_event;
const struct mhi_event_config *event_cfg;
struct device *dev = mhi_cntrl->cntrl_dev;
int i, num;
num = config->num_events;
mhi_cntrl->total_ev_rings = num;
mhi_cntrl->mhi_event = kcalloc(num, sizeof(*mhi_cntrl->mhi_event),
GFP_KERNEL);
if (!mhi_cntrl->mhi_event)
return -ENOMEM;
/* Populate event ring */
mhi_event = mhi_cntrl->mhi_event;
for (i = 0; i < num; i++) {
event_cfg = &config->event_cfg[i];
mhi_event->er_index = i;
mhi_event->ring.elements = event_cfg->num_elements;
mhi_event->intmod = event_cfg->irq_moderation_ms;
mhi_event->irq = event_cfg->irq;
if (event_cfg->channel != U32_MAX) {
/* This event ring has a dedicated channel */
mhi_event->chan = event_cfg->channel;
if (mhi_event->chan >= mhi_cntrl->max_chan) {
dev_err(dev,
"Event Ring channel not available\n");
goto error_ev_cfg;
}
mhi_event->mhi_chan =
&mhi_cntrl->mhi_chan[mhi_event->chan];
}
/* Priority is fixed to 1 for now */
mhi_event->priority = 1;
mhi_event->db_cfg.brstmode = event_cfg->mode;
if (MHI_INVALID_BRSTMODE(mhi_event->db_cfg.brstmode))
goto error_ev_cfg;
if (mhi_event->db_cfg.brstmode == MHI_DB_BRST_ENABLE)
mhi_event->db_cfg.process_db = mhi_db_brstmode;
else
mhi_event->db_cfg.process_db = mhi_db_brstmode_disable;
mhi_event->data_type = event_cfg->data_type;
switch (mhi_event->data_type) {
case MHI_ER_DATA:
mhi_event->process_event = mhi_process_data_event_ring;
break;
case MHI_ER_CTRL:
mhi_event->process_event = mhi_process_ctrl_ev_ring;
break;
default:
dev_err(dev, "Event Ring type not supported\n");
goto error_ev_cfg;
}
mhi_event->hw_ring = event_cfg->hardware_event;
if (mhi_event->hw_ring)
mhi_cntrl->hw_ev_rings++;
else
mhi_cntrl->sw_ev_rings++;
mhi_event->cl_manage = event_cfg->client_managed;
mhi_event->offload_ev = event_cfg->offload_channel;
mhi_event++;
}
return 0;
error_ev_cfg:
kfree(mhi_cntrl->mhi_event);
return -EINVAL;
}
static int parse_ch_cfg(struct mhi_controller *mhi_cntrl,
const struct mhi_controller_config *config)
{
const struct mhi_channel_config *ch_cfg;
struct device *dev = mhi_cntrl->cntrl_dev;
int i;
u32 chan;
mhi_cntrl->max_chan = config->max_channels;
/*
* The allocation of MHI channels can exceed 32KB in some scenarios,
* so to avoid any memory possible allocation failures, vzalloc is
* used here
*/
mhi_cntrl->mhi_chan = vcalloc(mhi_cntrl->max_chan,
sizeof(*mhi_cntrl->mhi_chan));
if (!mhi_cntrl->mhi_chan)
return -ENOMEM;
INIT_LIST_HEAD(&mhi_cntrl->lpm_chans);
/* Populate channel configurations */
for (i = 0; i < config->num_channels; i++) {
struct mhi_chan *mhi_chan;
ch_cfg = &config->ch_cfg[i];
chan = ch_cfg->num;
if (chan >= mhi_cntrl->max_chan) {
dev_err(dev, "Channel %d not available\n", chan);
goto error_chan_cfg;
}
mhi_chan = &mhi_cntrl->mhi_chan[chan];
mhi_chan->name = ch_cfg->name;
mhi_chan->chan = chan;
mhi_chan->tre_ring.elements = ch_cfg->num_elements;
if (!mhi_chan->tre_ring.elements)
goto error_chan_cfg;
/*
* For some channels, local ring length should be bigger than
* the transfer ring length due to internal logical channels
* in device. So host can queue much more buffers than transfer
* ring length. Example, RSC channels should have a larger local
* channel length than transfer ring length.
*/
mhi_chan->buf_ring.elements = ch_cfg->local_elements;
if (!mhi_chan->buf_ring.elements)
mhi_chan->buf_ring.elements = mhi_chan->tre_ring.elements;
mhi_chan->er_index = ch_cfg->event_ring;
mhi_chan->dir = ch_cfg->dir;
/*
* For most channels, chtype is identical to channel directions.
* So, if it is not defined then assign channel direction to
* chtype
*/
mhi_chan->type = ch_cfg->type;
if (!mhi_chan->type)
mhi_chan->type = (enum mhi_ch_type)mhi_chan->dir;
mhi_chan->ee_mask = ch_cfg->ee_mask;
mhi_chan->db_cfg.pollcfg = ch_cfg->pollcfg;
mhi_chan->lpm_notify = ch_cfg->lpm_notify;
mhi_chan->offload_ch = ch_cfg->offload_channel;
mhi_chan->db_cfg.reset_req = ch_cfg->doorbell_mode_switch;
mhi_chan->pre_alloc = ch_cfg->auto_queue;
mhi_chan->wake_capable = ch_cfg->wake_capable;
/*
* If MHI host allocates buffers, then the channel direction
* should be DMA_FROM_DEVICE
*/
if (mhi_chan->pre_alloc && mhi_chan->dir != DMA_FROM_DEVICE) {
dev_err(dev, "Invalid channel configuration\n");
goto error_chan_cfg;
}
/*
* Bi-directional and direction less channel must be an
* offload channel
*/
if ((mhi_chan->dir == DMA_BIDIRECTIONAL ||
mhi_chan->dir == DMA_NONE) && !mhi_chan->offload_ch) {
dev_err(dev, "Invalid channel configuration\n");
goto error_chan_cfg;
}
if (!mhi_chan->offload_ch) {
mhi_chan->db_cfg.brstmode = ch_cfg->doorbell;
if (MHI_INVALID_BRSTMODE(mhi_chan->db_cfg.brstmode)) {
dev_err(dev, "Invalid Door bell mode\n");
goto error_chan_cfg;
}
}
if (mhi_chan->db_cfg.brstmode == MHI_DB_BRST_ENABLE)
mhi_chan->db_cfg.process_db = mhi_db_brstmode;
else
mhi_chan->db_cfg.process_db = mhi_db_brstmode_disable;
mhi_chan->configured = true;
if (mhi_chan->lpm_notify)
list_add_tail(&mhi_chan->node, &mhi_cntrl->lpm_chans);
}
return 0;
error_chan_cfg:
vfree(mhi_cntrl->mhi_chan);
return -EINVAL;
}
static int parse_config(struct mhi_controller *mhi_cntrl,
const struct mhi_controller_config *config)
{
int ret;
/* Parse MHI channel configuration */
ret = parse_ch_cfg(mhi_cntrl, config);
if (ret)
return ret;
/* Parse MHI event configuration */
ret = parse_ev_cfg(mhi_cntrl, config);
if (ret)
goto error_ev_cfg;
mhi_cntrl->timeout_ms = config->timeout_ms;
if (!mhi_cntrl->timeout_ms)
mhi_cntrl->timeout_ms = MHI_TIMEOUT_MS;
mhi_cntrl->bounce_buf = config->use_bounce_buf;
mhi_cntrl->buffer_len = config->buf_len;
if (!mhi_cntrl->buffer_len)
mhi_cntrl->buffer_len = MHI_MAX_MTU;
/* By default, host is allowed to ring DB in both M0 and M2 states */
mhi_cntrl->db_access = MHI_PM_M0 | MHI_PM_M2;
if (config->m2_no_db)
mhi_cntrl->db_access &= ~MHI_PM_M2;
return 0;
error_ev_cfg:
vfree(mhi_cntrl->mhi_chan);
return ret;
}
int mhi_register_controller(struct mhi_controller *mhi_cntrl,
const struct mhi_controller_config *config)
{
struct mhi_event *mhi_event;
struct mhi_chan *mhi_chan;
struct mhi_cmd *mhi_cmd;
struct mhi_device *mhi_dev;
u32 soc_info;
int ret, i;
if (!mhi_cntrl || !mhi_cntrl->cntrl_dev || !mhi_cntrl->regs ||
!mhi_cntrl->runtime_get || !mhi_cntrl->runtime_put ||
!mhi_cntrl->status_cb || !mhi_cntrl->read_reg ||
!mhi_cntrl->write_reg || !mhi_cntrl->nr_irqs ||
!mhi_cntrl->irq || !mhi_cntrl->reg_len)
return -EINVAL;
ret = parse_config(mhi_cntrl, config);
if (ret)
return -EINVAL;
mhi_cntrl->mhi_cmd = kcalloc(NR_OF_CMD_RINGS,
sizeof(*mhi_cntrl->mhi_cmd), GFP_KERNEL);
if (!mhi_cntrl->mhi_cmd) {
ret = -ENOMEM;
goto err_free_event;
}
INIT_LIST_HEAD(&mhi_cntrl->transition_list);
mutex_init(&mhi_cntrl->pm_mutex);
rwlock_init(&mhi_cntrl->pm_lock);
spin_lock_init(&mhi_cntrl->transition_lock);
spin_lock_init(&mhi_cntrl->wlock);
INIT_WORK(&mhi_cntrl->st_worker, mhi_pm_st_worker);
init_waitqueue_head(&mhi_cntrl->state_event);
mhi_cntrl->hiprio_wq = alloc_ordered_workqueue("mhi_hiprio_wq", WQ_HIGHPRI);
if (!mhi_cntrl->hiprio_wq) {
dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate workqueue\n");
ret = -ENOMEM;
goto err_free_cmd;
}
mhi_cmd = mhi_cntrl->mhi_cmd;
for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++)
spin_lock_init(&mhi_cmd->lock);
mhi_event = mhi_cntrl->mhi_event;
for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
/* Skip for offload events */
if (mhi_event->offload_ev)
continue;
mhi_event->mhi_cntrl = mhi_cntrl;
spin_lock_init(&mhi_event->lock);
if (mhi_event->data_type == MHI_ER_CTRL)
tasklet_init(&mhi_event->task, mhi_ctrl_ev_task,
(ulong)mhi_event);
else
tasklet_init(&mhi_event->task, mhi_ev_task,
(ulong)mhi_event);
}
mhi_chan = mhi_cntrl->mhi_chan;
for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
mutex_init(&mhi_chan->mutex);
init_completion(&mhi_chan->completion);
rwlock_init(&mhi_chan->lock);
/* used in setting bei field of TRE */
mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index];
mhi_chan->intmod = mhi_event->intmod;
}
if (mhi_cntrl->bounce_buf) {
mhi_cntrl->map_single = mhi_map_single_use_bb;
mhi_cntrl->unmap_single = mhi_unmap_single_use_bb;
} else {
mhi_cntrl->map_single = mhi_map_single_no_bb;
mhi_cntrl->unmap_single = mhi_unmap_single_no_bb;
}
/* Read the MHI device info */
ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs,
SOC_HW_VERSION_OFFS, &soc_info);
if (ret)
goto err_destroy_wq;
mhi_cntrl->family_number = FIELD_GET(SOC_HW_VERSION_FAM_NUM_BMSK, soc_info);
mhi_cntrl->device_number = FIELD_GET(SOC_HW_VERSION_DEV_NUM_BMSK, soc_info);
mhi_cntrl->major_version = FIELD_GET(SOC_HW_VERSION_MAJOR_VER_BMSK, soc_info);
mhi_cntrl->minor_version = FIELD_GET(SOC_HW_VERSION_MINOR_VER_BMSK, soc_info);
mhi_cntrl->index = ida_alloc(&mhi_controller_ida, GFP_KERNEL);
if (mhi_cntrl->index < 0) {
ret = mhi_cntrl->index;
goto err_destroy_wq;
}
ret = mhi_init_irq_setup(mhi_cntrl);
if (ret)
goto err_ida_free;
/* Register controller with MHI bus */
mhi_dev = mhi_alloc_device(mhi_cntrl);
if (IS_ERR(mhi_dev)) {
dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate MHI device\n");
ret = PTR_ERR(mhi_dev);
goto error_setup_irq;
}
mhi_dev->dev_type = MHI_DEVICE_CONTROLLER;
mhi_dev->mhi_cntrl = mhi_cntrl;
dev_set_name(&mhi_dev->dev, "mhi%d", mhi_cntrl->index);
mhi_dev->name = dev_name(&mhi_dev->dev);
/* Init wakeup source */
device_init_wakeup(&mhi_dev->dev, true);
ret = device_add(&mhi_dev->dev);
if (ret)
goto err_release_dev;
mhi_cntrl->mhi_dev = mhi_dev;
mhi_create_debugfs(mhi_cntrl);
return 0;
err_release_dev:
put_device(&mhi_dev->dev);
error_setup_irq:
mhi_deinit_free_irq(mhi_cntrl);
err_ida_free:
ida_free(&mhi_controller_ida, mhi_cntrl->index);
err_destroy_wq:
destroy_workqueue(mhi_cntrl->hiprio_wq);
err_free_cmd:
kfree(mhi_cntrl->mhi_cmd);
err_free_event:
kfree(mhi_cntrl->mhi_event);
vfree(mhi_cntrl->mhi_chan);
return ret;
}
EXPORT_SYMBOL_GPL(mhi_register_controller);
void mhi_unregister_controller(struct mhi_controller *mhi_cntrl)
{
struct mhi_device *mhi_dev = mhi_cntrl->mhi_dev;
struct mhi_chan *mhi_chan = mhi_cntrl->mhi_chan;
unsigned int i;
mhi_deinit_free_irq(mhi_cntrl);
mhi_destroy_debugfs(mhi_cntrl);
destroy_workqueue(mhi_cntrl->hiprio_wq);
kfree(mhi_cntrl->mhi_cmd);
kfree(mhi_cntrl->mhi_event);
/* Drop the references to MHI devices created for channels */
for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
if (!mhi_chan->mhi_dev)
continue;
put_device(&mhi_chan->mhi_dev->dev);
}
vfree(mhi_cntrl->mhi_chan);
device_del(&mhi_dev->dev);
put_device(&mhi_dev->dev);
ida_free(&mhi_controller_ida, mhi_cntrl->index);
}
EXPORT_SYMBOL_GPL(mhi_unregister_controller);
struct mhi_controller *mhi_alloc_controller(void)
{
struct mhi_controller *mhi_cntrl;
mhi_cntrl = kzalloc(sizeof(*mhi_cntrl), GFP_KERNEL);
return mhi_cntrl;
}
EXPORT_SYMBOL_GPL(mhi_alloc_controller);
void mhi_free_controller(struct mhi_controller *mhi_cntrl)
{
kfree(mhi_cntrl);
}
EXPORT_SYMBOL_GPL(mhi_free_controller);
int mhi_prepare_for_power_up(struct mhi_controller *mhi_cntrl)
{
struct device *dev = &mhi_cntrl->mhi_dev->dev;
u32 bhi_off, bhie_off;
int ret;
mutex_lock(&mhi_cntrl->pm_mutex);
ret = mhi_init_dev_ctxt(mhi_cntrl);
if (ret)
goto error_dev_ctxt;
ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIOFF, &bhi_off);
if (ret) {
dev_err(dev, "Error getting BHI offset\n");
goto error_reg_offset;
}
if (bhi_off >= mhi_cntrl->reg_len) {
dev_err(dev, "BHI offset: 0x%x is out of range: 0x%zx\n",
bhi_off, mhi_cntrl->reg_len);
ret = -ERANGE;
goto error_reg_offset;
}
mhi_cntrl->bhi = mhi_cntrl->regs + bhi_off;
if (mhi_cntrl->fbc_download || mhi_cntrl->rddm_size) {
ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIEOFF,
&bhie_off);
if (ret) {
dev_err(dev, "Error getting BHIE offset\n");
goto error_reg_offset;
}
if (bhie_off >= mhi_cntrl->reg_len) {
dev_err(dev,
"BHIe offset: 0x%x is out of range: 0x%zx\n",
bhie_off, mhi_cntrl->reg_len);
ret = -ERANGE;
goto error_reg_offset;
}
mhi_cntrl->bhie = mhi_cntrl->regs + bhie_off;
}
if (mhi_cntrl->rddm_size) {
/*
* This controller supports RDDM, so we need to manually clear
* BHIE RX registers since POR values are undefined.
*/
memset_io(mhi_cntrl->bhie + BHIE_RXVECADDR_LOW_OFFS,
0, BHIE_RXVECSTATUS_OFFS - BHIE_RXVECADDR_LOW_OFFS +
4);
/*
* Allocate RDDM table for debugging purpose if specified
*/
mhi_alloc_bhie_table(mhi_cntrl, &mhi_cntrl->rddm_image,
mhi_cntrl->rddm_size);
if (mhi_cntrl->rddm_image) {
ret = mhi_rddm_prepare(mhi_cntrl,
mhi_cntrl->rddm_image);
if (ret) {
mhi_free_bhie_table(mhi_cntrl,
mhi_cntrl->rddm_image);
goto error_reg_offset;
}
}
}
mutex_unlock(&mhi_cntrl->pm_mutex);
return 0;
error_reg_offset:
mhi_deinit_dev_ctxt(mhi_cntrl);
error_dev_ctxt:
mutex_unlock(&mhi_cntrl->pm_mutex);
return ret;
}
EXPORT_SYMBOL_GPL(mhi_prepare_for_power_up);
void mhi_unprepare_after_power_down(struct mhi_controller *mhi_cntrl)
{
if (mhi_cntrl->fbc_image) {
mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image);
mhi_cntrl->fbc_image = NULL;
}
if (mhi_cntrl->rddm_image) {
mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->rddm_image);
mhi_cntrl->rddm_image = NULL;
}
mhi_cntrl->bhi = NULL;
mhi_cntrl->bhie = NULL;
mhi_deinit_dev_ctxt(mhi_cntrl);
}
EXPORT_SYMBOL_GPL(mhi_unprepare_after_power_down);
static void mhi_release_device(struct device *dev)
{
struct mhi_device *mhi_dev = to_mhi_device(dev);
/*
* We need to set the mhi_chan->mhi_dev to NULL here since the MHI
* devices for the channels will only get created if the mhi_dev
* associated with it is NULL. This scenario will happen during the
* controller suspend and resume.
*/
if (mhi_dev->ul_chan)
mhi_dev->ul_chan->mhi_dev = NULL;
if (mhi_dev->dl_chan)
mhi_dev->dl_chan->mhi_dev = NULL;
kfree(mhi_dev);
}
struct mhi_device *mhi_alloc_device(struct mhi_controller *mhi_cntrl)
{
struct mhi_device *mhi_dev;
struct device *dev;
mhi_dev = kzalloc(sizeof(*mhi_dev), GFP_KERNEL);
if (!mhi_dev)
return ERR_PTR(-ENOMEM);
dev = &mhi_dev->dev;
device_initialize(dev);
dev->bus = &mhi_bus_type;
dev->release = mhi_release_device;
if (mhi_cntrl->mhi_dev) {
/* for MHI client devices, parent is the MHI controller device */
dev->parent = &mhi_cntrl->mhi_dev->dev;
} else {
/* for MHI controller device, parent is the bus device (e.g. pci device) */
dev->parent = mhi_cntrl->cntrl_dev;
}
mhi_dev->mhi_cntrl = mhi_cntrl;
mhi_dev->dev_wake = 0;
return mhi_dev;
}
static int mhi_driver_probe(struct device *dev)
{
struct mhi_device *mhi_dev = to_mhi_device(dev);
struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
struct device_driver *drv = dev->driver;
struct mhi_driver *mhi_drv = to_mhi_driver(drv);
struct mhi_event *mhi_event;
struct mhi_chan *ul_chan = mhi_dev->ul_chan;
struct mhi_chan *dl_chan = mhi_dev->dl_chan;
int ret;
/* Bring device out of LPM */
ret = mhi_device_get_sync(mhi_dev);
if (ret)
return ret;
ret = -EINVAL;
if (ul_chan) {
/*
* If channel supports LPM notifications then status_cb should
* be provided
*/
if (ul_chan->lpm_notify && !mhi_drv->status_cb)
goto exit_probe;
/* For non-offload channels then xfer_cb should be provided */
if (!ul_chan->offload_ch && !mhi_drv->ul_xfer_cb)
goto exit_probe;
ul_chan->xfer_cb = mhi_drv->ul_xfer_cb;
}
ret = -EINVAL;
if (dl_chan) {
/*
* If channel supports LPM notifications then status_cb should
* be provided
*/
if (dl_chan->lpm_notify && !mhi_drv->status_cb)
goto exit_probe;
/* For non-offload channels then xfer_cb should be provided */
if (!dl_chan->offload_ch && !mhi_drv->dl_xfer_cb)
goto exit_probe;
mhi_event = &mhi_cntrl->mhi_event[dl_chan->er_index];
/*
* If the channel event ring is managed by client, then
* status_cb must be provided so that the framework can
* notify pending data
*/
if (mhi_event->cl_manage && !mhi_drv->status_cb)
goto exit_probe;
dl_chan->xfer_cb = mhi_drv->dl_xfer_cb;
}
/* Call the user provided probe function */
ret = mhi_drv->probe(mhi_dev, mhi_dev->id);
if (ret)
goto exit_probe;
mhi_device_put(mhi_dev);
return ret;
exit_probe:
mhi_unprepare_from_transfer(mhi_dev);
mhi_device_put(mhi_dev);
return ret;
}
static int mhi_driver_remove(struct device *dev)
{
struct mhi_device *mhi_dev = to_mhi_device(dev);
struct mhi_driver *mhi_drv = to_mhi_driver(dev->driver);
struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
struct mhi_chan *mhi_chan;
enum mhi_ch_state ch_state[] = {
MHI_CH_STATE_DISABLED,
MHI_CH_STATE_DISABLED
};
int dir;
/* Skip if it is a controller device */
if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
return 0;
/* Reset both channels */
for (dir = 0; dir < 2; dir++) {
mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan;
if (!mhi_chan)
continue;
/* Wake all threads waiting for completion */
write_lock_irq(&mhi_chan->lock);
mhi_chan->ccs = MHI_EV_CC_INVALID;
complete_all(&mhi_chan->completion);
write_unlock_irq(&mhi_chan->lock);
/* Set the channel state to disabled */
mutex_lock(&mhi_chan->mutex);
write_lock_irq(&mhi_chan->lock);
ch_state[dir] = mhi_chan->ch_state;
mhi_chan->ch_state = MHI_CH_STATE_SUSPENDED;
write_unlock_irq(&mhi_chan->lock);
/* Reset the non-offload channel */
if (!mhi_chan->offload_ch)
mhi_reset_chan(mhi_cntrl, mhi_chan);
mutex_unlock(&mhi_chan->mutex);
}
mhi_drv->remove(mhi_dev);
/* De-init channel if it was enabled */
for (dir = 0; dir < 2; dir++) {
mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan;
if (!mhi_chan)
continue;
mutex_lock(&mhi_chan->mutex);
if ((ch_state[dir] == MHI_CH_STATE_ENABLED ||
ch_state[dir] == MHI_CH_STATE_STOP) &&
!mhi_chan->offload_ch)
mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan);
mhi_chan->ch_state = MHI_CH_STATE_DISABLED;
mutex_unlock(&mhi_chan->mutex);
}
while (mhi_dev->dev_wake)
mhi_device_put(mhi_dev);
return 0;
}
int __mhi_driver_register(struct mhi_driver *mhi_drv, struct module *owner)
{
struct device_driver *driver = &mhi_drv->driver;
if (!mhi_drv->probe || !mhi_drv->remove)
return -EINVAL;
driver->bus = &mhi_bus_type;
driver->owner = owner;
driver->probe = mhi_driver_probe;
driver->remove = mhi_driver_remove;
return driver_register(driver);
}
EXPORT_SYMBOL_GPL(__mhi_driver_register);
void mhi_driver_unregister(struct mhi_driver *mhi_drv)
{
driver_unregister(&mhi_drv->driver);
}
EXPORT_SYMBOL_GPL(mhi_driver_unregister);
static int mhi_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
const struct mhi_device *mhi_dev = to_mhi_device(dev);
return add_uevent_var(env, "MODALIAS=" MHI_DEVICE_MODALIAS_FMT,
mhi_dev->name);
}
static int mhi_match(struct device *dev, struct device_driver *drv)
{
struct mhi_device *mhi_dev = to_mhi_device(dev);
struct mhi_driver *mhi_drv = to_mhi_driver(drv);
const struct mhi_device_id *id;
/*
* If the device is a controller type then there is no client driver
* associated with it
*/
if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
return 0;
for (id = mhi_drv->id_table; id->chan[0]; id++)
if (!strcmp(mhi_dev->name, id->chan)) {
mhi_dev->id = id;
return 1;
}
return 0;
};
struct bus_type mhi_bus_type = {
.name = "mhi",
.dev_name = "mhi",
.match = mhi_match,
.uevent = mhi_uevent,
.dev_groups = mhi_dev_groups,
};
static int __init mhi_init(void)
{
mhi_debugfs_init();
return bus_register(&mhi_bus_type);
}
static void __exit mhi_exit(void)
{
mhi_debugfs_exit();
bus_unregister(&mhi_bus_type);
}
postcore_initcall(mhi_init);
module_exit(mhi_exit);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Modem Host Interface");
| linux-master | drivers/bus/mhi/host/init.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
*
*/
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/dma-direction.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/mhi.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/wait.h>
#include "internal.h"
/*
* Not all MHI state transitions are synchronous. Transitions like Linkdown,
* SYS_ERR, and shutdown can happen anytime asynchronously. This function will
* transition to a new state only if we're allowed to.
*
* Priority increases as we go down. For instance, from any state in L0, the
* transition can be made to states in L1, L2 and L3. A notable exception to
* this rule is state DISABLE. From DISABLE state we can only transition to
* POR state. Also, while in L2 state, user cannot jump back to previous
* L1 or L0 states.
*
* Valid transitions:
* L0: DISABLE <--> POR
* POR <--> POR
* POR -> M0 -> M2 --> M0
* POR -> FW_DL_ERR
* FW_DL_ERR <--> FW_DL_ERR
* M0 <--> M0
* M0 -> FW_DL_ERR
* M0 -> M3_ENTER -> M3 -> M3_EXIT --> M0
* L1: SYS_ERR_DETECT -> SYS_ERR_PROCESS --> POR
* L2: SHUTDOWN_PROCESS -> LD_ERR_FATAL_DETECT
* SHUTDOWN_PROCESS -> DISABLE
* L3: LD_ERR_FATAL_DETECT <--> LD_ERR_FATAL_DETECT
* LD_ERR_FATAL_DETECT -> DISABLE
*/
static const struct mhi_pm_transitions dev_state_transitions[] = {
/* L0 States */
{
MHI_PM_DISABLE,
MHI_PM_POR
},
{
MHI_PM_POR,
MHI_PM_POR | MHI_PM_DISABLE | MHI_PM_M0 |
MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_FW_DL_ERR
},
{
MHI_PM_M0,
MHI_PM_M0 | MHI_PM_M2 | MHI_PM_M3_ENTER |
MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_FW_DL_ERR
},
{
MHI_PM_M2,
MHI_PM_M0 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
MHI_PM_LD_ERR_FATAL_DETECT
},
{
MHI_PM_M3_ENTER,
MHI_PM_M3 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
MHI_PM_LD_ERR_FATAL_DETECT
},
{
MHI_PM_M3,
MHI_PM_M3_EXIT | MHI_PM_SYS_ERR_DETECT |
MHI_PM_LD_ERR_FATAL_DETECT
},
{
MHI_PM_M3_EXIT,
MHI_PM_M0 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
MHI_PM_LD_ERR_FATAL_DETECT
},
{
MHI_PM_FW_DL_ERR,
MHI_PM_FW_DL_ERR | MHI_PM_SYS_ERR_DETECT |
MHI_PM_SHUTDOWN_PROCESS | MHI_PM_LD_ERR_FATAL_DETECT
},
/* L1 States */
{
MHI_PM_SYS_ERR_DETECT,
MHI_PM_SYS_ERR_PROCESS | MHI_PM_SHUTDOWN_PROCESS |
MHI_PM_LD_ERR_FATAL_DETECT
},
{
MHI_PM_SYS_ERR_PROCESS,
MHI_PM_POR | MHI_PM_SHUTDOWN_PROCESS |
MHI_PM_LD_ERR_FATAL_DETECT
},
/* L2 States */
{
MHI_PM_SHUTDOWN_PROCESS,
MHI_PM_DISABLE | MHI_PM_LD_ERR_FATAL_DETECT
},
/* L3 States */
{
MHI_PM_LD_ERR_FATAL_DETECT,
MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_DISABLE
},
};
enum mhi_pm_state __must_check mhi_tryset_pm_state(struct mhi_controller *mhi_cntrl,
enum mhi_pm_state state)
{
unsigned long cur_state = mhi_cntrl->pm_state;
int index = find_last_bit(&cur_state, 32);
if (unlikely(index >= ARRAY_SIZE(dev_state_transitions)))
return cur_state;
if (unlikely(dev_state_transitions[index].from_state != cur_state))
return cur_state;
if (unlikely(!(dev_state_transitions[index].to_states & state)))
return cur_state;
mhi_cntrl->pm_state = state;
return mhi_cntrl->pm_state;
}
void mhi_set_mhi_state(struct mhi_controller *mhi_cntrl, enum mhi_state state)
{
struct device *dev = &mhi_cntrl->mhi_dev->dev;
int ret;
if (state == MHI_STATE_RESET) {
ret = mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
MHICTRL_RESET_MASK, 1);
} else {
ret = mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
MHICTRL_MHISTATE_MASK, state);
}
if (ret)
dev_err(dev, "Failed to set MHI state to: %s\n",
mhi_state_str(state));
}
/* NOP for backward compatibility, host allowed to ring DB in M2 state */
static void mhi_toggle_dev_wake_nop(struct mhi_controller *mhi_cntrl)
{
}
static void mhi_toggle_dev_wake(struct mhi_controller *mhi_cntrl)
{
mhi_cntrl->wake_get(mhi_cntrl, false);
mhi_cntrl->wake_put(mhi_cntrl, true);
}
/* Handle device ready state transition */
int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl)
{
struct mhi_event *mhi_event;
enum mhi_pm_state cur_state;
struct device *dev = &mhi_cntrl->mhi_dev->dev;
u32 interval_us = 25000; /* poll register field every 25 milliseconds */
int ret, i;
/* Check if device entered error state */
if (MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state)) {
dev_err(dev, "Device link is not accessible\n");
return -EIO;
}
/* Wait for RESET to be cleared and READY bit to be set by the device */
ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
MHICTRL_RESET_MASK, 0, interval_us);
if (ret) {
dev_err(dev, "Device failed to clear MHI Reset\n");
return ret;
}
ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHISTATUS,
MHISTATUS_READY_MASK, 1, interval_us);
if (ret) {
dev_err(dev, "Device failed to enter MHI Ready\n");
return ret;
}
dev_dbg(dev, "Device in READY State\n");
write_lock_irq(&mhi_cntrl->pm_lock);
cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_POR);
mhi_cntrl->dev_state = MHI_STATE_READY;
write_unlock_irq(&mhi_cntrl->pm_lock);
if (cur_state != MHI_PM_POR) {
dev_err(dev, "Error moving to state %s from %s\n",
to_mhi_pm_state_str(MHI_PM_POR),
to_mhi_pm_state_str(cur_state));
return -EIO;
}
read_lock_bh(&mhi_cntrl->pm_lock);
if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
dev_err(dev, "Device registers not accessible\n");
goto error_mmio;
}
/* Configure MMIO registers */
ret = mhi_init_mmio(mhi_cntrl);
if (ret) {
dev_err(dev, "Error configuring MMIO registers\n");
goto error_mmio;
}
/* Add elements to all SW event rings */
mhi_event = mhi_cntrl->mhi_event;
for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
struct mhi_ring *ring = &mhi_event->ring;
/* Skip if this is an offload or HW event */
if (mhi_event->offload_ev || mhi_event->hw_ring)
continue;
ring->wp = ring->base + ring->len - ring->el_size;
*ring->ctxt_wp = cpu_to_le64(ring->iommu_base + ring->len - ring->el_size);
/* Update all cores */
smp_wmb();
/* Ring the event ring db */
spin_lock_irq(&mhi_event->lock);
mhi_ring_er_db(mhi_event);
spin_unlock_irq(&mhi_event->lock);
}
/* Set MHI to M0 state */
mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M0);
read_unlock_bh(&mhi_cntrl->pm_lock);
return 0;
error_mmio:
read_unlock_bh(&mhi_cntrl->pm_lock);
return -EIO;
}
int mhi_pm_m0_transition(struct mhi_controller *mhi_cntrl)
{
enum mhi_pm_state cur_state;
struct mhi_chan *mhi_chan;
struct device *dev = &mhi_cntrl->mhi_dev->dev;
int i;
write_lock_irq(&mhi_cntrl->pm_lock);
mhi_cntrl->dev_state = MHI_STATE_M0;
cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M0);
write_unlock_irq(&mhi_cntrl->pm_lock);
if (unlikely(cur_state != MHI_PM_M0)) {
dev_err(dev, "Unable to transition to M0 state\n");
return -EIO;
}
mhi_cntrl->M0++;
/* Wake up the device */
read_lock_bh(&mhi_cntrl->pm_lock);
mhi_cntrl->wake_get(mhi_cntrl, true);
/* Ring all event rings and CMD ring only if we're in mission mode */
if (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) {
struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
struct mhi_cmd *mhi_cmd =
&mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING];
for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
if (mhi_event->offload_ev)
continue;
spin_lock_irq(&mhi_event->lock);
mhi_ring_er_db(mhi_event);
spin_unlock_irq(&mhi_event->lock);
}
/* Only ring primary cmd ring if ring is not empty */
spin_lock_irq(&mhi_cmd->lock);
if (mhi_cmd->ring.rp != mhi_cmd->ring.wp)
mhi_ring_cmd_db(mhi_cntrl, mhi_cmd);
spin_unlock_irq(&mhi_cmd->lock);
}
/* Ring channel DB registers */
mhi_chan = mhi_cntrl->mhi_chan;
for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
if (mhi_chan->db_cfg.reset_req) {
write_lock_irq(&mhi_chan->lock);
mhi_chan->db_cfg.db_mode = true;
write_unlock_irq(&mhi_chan->lock);
}
read_lock_irq(&mhi_chan->lock);
/* Only ring DB if ring is not empty */
if (tre_ring->base && tre_ring->wp != tre_ring->rp &&
mhi_chan->ch_state == MHI_CH_STATE_ENABLED)
mhi_ring_chan_db(mhi_cntrl, mhi_chan);
read_unlock_irq(&mhi_chan->lock);
}
mhi_cntrl->wake_put(mhi_cntrl, false);
read_unlock_bh(&mhi_cntrl->pm_lock);
wake_up_all(&mhi_cntrl->state_event);
return 0;
}
/*
* After receiving the MHI state change event from the device indicating the
* transition to M1 state, the host can transition the device to M2 state
* for keeping it in low power state.
*/
void mhi_pm_m1_transition(struct mhi_controller *mhi_cntrl)
{
enum mhi_pm_state state;
struct device *dev = &mhi_cntrl->mhi_dev->dev;
write_lock_irq(&mhi_cntrl->pm_lock);
state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M2);
if (state == MHI_PM_M2) {
mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M2);
mhi_cntrl->dev_state = MHI_STATE_M2;
write_unlock_irq(&mhi_cntrl->pm_lock);
mhi_cntrl->M2++;
wake_up_all(&mhi_cntrl->state_event);
/* If there are any pending resources, exit M2 immediately */
if (unlikely(atomic_read(&mhi_cntrl->pending_pkts) ||
atomic_read(&mhi_cntrl->dev_wake))) {
dev_dbg(dev,
"Exiting M2, pending_pkts: %d dev_wake: %d\n",
atomic_read(&mhi_cntrl->pending_pkts),
atomic_read(&mhi_cntrl->dev_wake));
read_lock_bh(&mhi_cntrl->pm_lock);
mhi_cntrl->wake_get(mhi_cntrl, true);
mhi_cntrl->wake_put(mhi_cntrl, true);
read_unlock_bh(&mhi_cntrl->pm_lock);
} else {
mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_IDLE);
}
} else {
write_unlock_irq(&mhi_cntrl->pm_lock);
}
}
/* MHI M3 completion handler */
int mhi_pm_m3_transition(struct mhi_controller *mhi_cntrl)
{
enum mhi_pm_state state;
struct device *dev = &mhi_cntrl->mhi_dev->dev;
write_lock_irq(&mhi_cntrl->pm_lock);
mhi_cntrl->dev_state = MHI_STATE_M3;
state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3);
write_unlock_irq(&mhi_cntrl->pm_lock);
if (state != MHI_PM_M3) {
dev_err(dev, "Unable to transition to M3 state\n");
return -EIO;
}
mhi_cntrl->M3++;
wake_up_all(&mhi_cntrl->state_event);
return 0;
}
/* Handle device Mission Mode transition */
static int mhi_pm_mission_mode_transition(struct mhi_controller *mhi_cntrl)
{
struct mhi_event *mhi_event;
struct device *dev = &mhi_cntrl->mhi_dev->dev;
enum mhi_ee_type ee = MHI_EE_MAX, current_ee = mhi_cntrl->ee;
int i, ret;
dev_dbg(dev, "Processing Mission Mode transition\n");
write_lock_irq(&mhi_cntrl->pm_lock);
if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state))
ee = mhi_get_exec_env(mhi_cntrl);
if (!MHI_IN_MISSION_MODE(ee)) {
mhi_cntrl->pm_state = MHI_PM_LD_ERR_FATAL_DETECT;
write_unlock_irq(&mhi_cntrl->pm_lock);
wake_up_all(&mhi_cntrl->state_event);
return -EIO;
}
mhi_cntrl->ee = ee;
write_unlock_irq(&mhi_cntrl->pm_lock);
wake_up_all(&mhi_cntrl->state_event);
device_for_each_child(&mhi_cntrl->mhi_dev->dev, ¤t_ee,
mhi_destroy_device);
mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_MISSION_MODE);
/* Force MHI to be in M0 state before continuing */
ret = __mhi_device_get_sync(mhi_cntrl);
if (ret)
return ret;
read_lock_bh(&mhi_cntrl->pm_lock);
if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
ret = -EIO;
goto error_mission_mode;
}
/* Add elements to all HW event rings */
mhi_event = mhi_cntrl->mhi_event;
for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
struct mhi_ring *ring = &mhi_event->ring;
if (mhi_event->offload_ev || !mhi_event->hw_ring)
continue;
ring->wp = ring->base + ring->len - ring->el_size;
*ring->ctxt_wp = cpu_to_le64(ring->iommu_base + ring->len - ring->el_size);
/* Update to all cores */
smp_wmb();
spin_lock_irq(&mhi_event->lock);
if (MHI_DB_ACCESS_VALID(mhi_cntrl))
mhi_ring_er_db(mhi_event);
spin_unlock_irq(&mhi_event->lock);
}
read_unlock_bh(&mhi_cntrl->pm_lock);
/*
* The MHI devices are only created when the client device switches its
* Execution Environment (EE) to either SBL or AMSS states
*/
mhi_create_devices(mhi_cntrl);
read_lock_bh(&mhi_cntrl->pm_lock);
error_mission_mode:
mhi_cntrl->wake_put(mhi_cntrl, false);
read_unlock_bh(&mhi_cntrl->pm_lock);
return ret;
}
/* Handle shutdown transitions */
static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl)
{
enum mhi_pm_state cur_state;
struct mhi_event *mhi_event;
struct mhi_cmd_ctxt *cmd_ctxt;
struct mhi_cmd *mhi_cmd;
struct mhi_event_ctxt *er_ctxt;
struct device *dev = &mhi_cntrl->mhi_dev->dev;
int ret, i;
dev_dbg(dev, "Processing disable transition with PM state: %s\n",
to_mhi_pm_state_str(mhi_cntrl->pm_state));
mutex_lock(&mhi_cntrl->pm_mutex);
/* Trigger MHI RESET so that the device will not access host memory */
if (!MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state)) {
/* Skip MHI RESET if in RDDM state */
if (mhi_cntrl->rddm_image && mhi_get_exec_env(mhi_cntrl) == MHI_EE_RDDM)
goto skip_mhi_reset;
dev_dbg(dev, "Triggering MHI Reset in device\n");
mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET);
/* Wait for the reset bit to be cleared by the device */
ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
MHICTRL_RESET_MASK, 0, 25000);
if (ret)
dev_err(dev, "Device failed to clear MHI Reset\n");
/*
* Device will clear BHI_INTVEC as a part of RESET processing,
* hence re-program it
*/
mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
if (!MHI_IN_PBL(mhi_get_exec_env(mhi_cntrl))) {
/* wait for ready to be set */
ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs,
MHISTATUS,
MHISTATUS_READY_MASK, 1, 25000);
if (ret)
dev_err(dev, "Device failed to enter READY state\n");
}
}
skip_mhi_reset:
dev_dbg(dev,
"Waiting for all pending event ring processing to complete\n");
mhi_event = mhi_cntrl->mhi_event;
for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
if (mhi_event->offload_ev)
continue;
disable_irq(mhi_cntrl->irq[mhi_event->irq]);
tasklet_kill(&mhi_event->task);
}
/* Release lock and wait for all pending threads to complete */
mutex_unlock(&mhi_cntrl->pm_mutex);
dev_dbg(dev, "Waiting for all pending threads to complete\n");
wake_up_all(&mhi_cntrl->state_event);
dev_dbg(dev, "Reset all active channels and remove MHI devices\n");
device_for_each_child(&mhi_cntrl->mhi_dev->dev, NULL, mhi_destroy_device);
mutex_lock(&mhi_cntrl->pm_mutex);
WARN_ON(atomic_read(&mhi_cntrl->dev_wake));
WARN_ON(atomic_read(&mhi_cntrl->pending_pkts));
/* Reset the ev rings and cmd rings */
dev_dbg(dev, "Resetting EV CTXT and CMD CTXT\n");
mhi_cmd = mhi_cntrl->mhi_cmd;
cmd_ctxt = mhi_cntrl->mhi_ctxt->cmd_ctxt;
for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) {
struct mhi_ring *ring = &mhi_cmd->ring;
ring->rp = ring->base;
ring->wp = ring->base;
cmd_ctxt->rp = cmd_ctxt->rbase;
cmd_ctxt->wp = cmd_ctxt->rbase;
}
mhi_event = mhi_cntrl->mhi_event;
er_ctxt = mhi_cntrl->mhi_ctxt->er_ctxt;
for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++,
mhi_event++) {
struct mhi_ring *ring = &mhi_event->ring;
/* Skip offload events */
if (mhi_event->offload_ev)
continue;
ring->rp = ring->base;
ring->wp = ring->base;
er_ctxt->rp = er_ctxt->rbase;
er_ctxt->wp = er_ctxt->rbase;
}
/* Move to disable state */
write_lock_irq(&mhi_cntrl->pm_lock);
cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_DISABLE);
write_unlock_irq(&mhi_cntrl->pm_lock);
if (unlikely(cur_state != MHI_PM_DISABLE))
dev_err(dev, "Error moving from PM state: %s to: %s\n",
to_mhi_pm_state_str(cur_state),
to_mhi_pm_state_str(MHI_PM_DISABLE));
dev_dbg(dev, "Exiting with PM state: %s, MHI state: %s\n",
to_mhi_pm_state_str(mhi_cntrl->pm_state),
mhi_state_str(mhi_cntrl->dev_state));
mutex_unlock(&mhi_cntrl->pm_mutex);
}
/* Handle system error transitions */
static void mhi_pm_sys_error_transition(struct mhi_controller *mhi_cntrl)
{
enum mhi_pm_state cur_state, prev_state;
enum dev_st_transition next_state;
struct mhi_event *mhi_event;
struct mhi_cmd_ctxt *cmd_ctxt;
struct mhi_cmd *mhi_cmd;
struct mhi_event_ctxt *er_ctxt;
struct device *dev = &mhi_cntrl->mhi_dev->dev;
int ret, i;
dev_dbg(dev, "Transitioning from PM state: %s to: %s\n",
to_mhi_pm_state_str(mhi_cntrl->pm_state),
to_mhi_pm_state_str(MHI_PM_SYS_ERR_PROCESS));
/* We must notify MHI control driver so it can clean up first */
mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_SYS_ERROR);
mutex_lock(&mhi_cntrl->pm_mutex);
write_lock_irq(&mhi_cntrl->pm_lock);
prev_state = mhi_cntrl->pm_state;
cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_SYS_ERR_PROCESS);
write_unlock_irq(&mhi_cntrl->pm_lock);
if (cur_state != MHI_PM_SYS_ERR_PROCESS) {
dev_err(dev, "Failed to transition from PM state: %s to: %s\n",
to_mhi_pm_state_str(cur_state),
to_mhi_pm_state_str(MHI_PM_SYS_ERR_PROCESS));
goto exit_sys_error_transition;
}
mhi_cntrl->ee = MHI_EE_DISABLE_TRANSITION;
mhi_cntrl->dev_state = MHI_STATE_RESET;
/* Wake up threads waiting for state transition */
wake_up_all(&mhi_cntrl->state_event);
/* Trigger MHI RESET so that the device will not access host memory */
if (MHI_REG_ACCESS_VALID(prev_state)) {
u32 in_reset = -1;
unsigned long timeout = msecs_to_jiffies(mhi_cntrl->timeout_ms);
dev_dbg(dev, "Triggering MHI Reset in device\n");
mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET);
/* Wait for the reset bit to be cleared by the device */
ret = wait_event_timeout(mhi_cntrl->state_event,
mhi_read_reg_field(mhi_cntrl,
mhi_cntrl->regs,
MHICTRL,
MHICTRL_RESET_MASK,
&in_reset) ||
!in_reset, timeout);
if (!ret || in_reset) {
dev_err(dev, "Device failed to exit MHI Reset state\n");
goto exit_sys_error_transition;
}
/*
* Device will clear BHI_INTVEC as a part of RESET processing,
* hence re-program it
*/
mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
}
dev_dbg(dev,
"Waiting for all pending event ring processing to complete\n");
mhi_event = mhi_cntrl->mhi_event;
for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
if (mhi_event->offload_ev)
continue;
tasklet_kill(&mhi_event->task);
}
/* Release lock and wait for all pending threads to complete */
mutex_unlock(&mhi_cntrl->pm_mutex);
dev_dbg(dev, "Waiting for all pending threads to complete\n");
wake_up_all(&mhi_cntrl->state_event);
dev_dbg(dev, "Reset all active channels and remove MHI devices\n");
device_for_each_child(&mhi_cntrl->mhi_dev->dev, NULL, mhi_destroy_device);
mutex_lock(&mhi_cntrl->pm_mutex);
WARN_ON(atomic_read(&mhi_cntrl->dev_wake));
WARN_ON(atomic_read(&mhi_cntrl->pending_pkts));
/* Reset the ev rings and cmd rings */
dev_dbg(dev, "Resetting EV CTXT and CMD CTXT\n");
mhi_cmd = mhi_cntrl->mhi_cmd;
cmd_ctxt = mhi_cntrl->mhi_ctxt->cmd_ctxt;
for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) {
struct mhi_ring *ring = &mhi_cmd->ring;
ring->rp = ring->base;
ring->wp = ring->base;
cmd_ctxt->rp = cmd_ctxt->rbase;
cmd_ctxt->wp = cmd_ctxt->rbase;
}
mhi_event = mhi_cntrl->mhi_event;
er_ctxt = mhi_cntrl->mhi_ctxt->er_ctxt;
for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++,
mhi_event++) {
struct mhi_ring *ring = &mhi_event->ring;
/* Skip offload events */
if (mhi_event->offload_ev)
continue;
ring->rp = ring->base;
ring->wp = ring->base;
er_ctxt->rp = er_ctxt->rbase;
er_ctxt->wp = er_ctxt->rbase;
}
/* Transition to next state */
if (MHI_IN_PBL(mhi_get_exec_env(mhi_cntrl))) {
write_lock_irq(&mhi_cntrl->pm_lock);
cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_POR);
write_unlock_irq(&mhi_cntrl->pm_lock);
if (cur_state != MHI_PM_POR) {
dev_err(dev, "Error moving to state %s from %s\n",
to_mhi_pm_state_str(MHI_PM_POR),
to_mhi_pm_state_str(cur_state));
goto exit_sys_error_transition;
}
next_state = DEV_ST_TRANSITION_PBL;
} else {
next_state = DEV_ST_TRANSITION_READY;
}
mhi_queue_state_transition(mhi_cntrl, next_state);
exit_sys_error_transition:
dev_dbg(dev, "Exiting with PM state: %s, MHI state: %s\n",
to_mhi_pm_state_str(mhi_cntrl->pm_state),
mhi_state_str(mhi_cntrl->dev_state));
mutex_unlock(&mhi_cntrl->pm_mutex);
}
/* Queue a new work item and schedule work */
int mhi_queue_state_transition(struct mhi_controller *mhi_cntrl,
enum dev_st_transition state)
{
struct state_transition *item = kmalloc(sizeof(*item), GFP_ATOMIC);
unsigned long flags;
if (!item)
return -ENOMEM;
item->state = state;
spin_lock_irqsave(&mhi_cntrl->transition_lock, flags);
list_add_tail(&item->node, &mhi_cntrl->transition_list);
spin_unlock_irqrestore(&mhi_cntrl->transition_lock, flags);
queue_work(mhi_cntrl->hiprio_wq, &mhi_cntrl->st_worker);
return 0;
}
/* SYS_ERR worker */
void mhi_pm_sys_err_handler(struct mhi_controller *mhi_cntrl)
{
struct device *dev = &mhi_cntrl->mhi_dev->dev;
/* skip if controller supports RDDM */
if (mhi_cntrl->rddm_image) {
dev_dbg(dev, "Controller supports RDDM, skip SYS_ERROR\n");
return;
}
mhi_queue_state_transition(mhi_cntrl, DEV_ST_TRANSITION_SYS_ERR);
}
/* Device State Transition worker */
void mhi_pm_st_worker(struct work_struct *work)
{
struct state_transition *itr, *tmp;
LIST_HEAD(head);
struct mhi_controller *mhi_cntrl = container_of(work,
struct mhi_controller,
st_worker);
struct device *dev = &mhi_cntrl->mhi_dev->dev;
spin_lock_irq(&mhi_cntrl->transition_lock);
list_splice_tail_init(&mhi_cntrl->transition_list, &head);
spin_unlock_irq(&mhi_cntrl->transition_lock);
list_for_each_entry_safe(itr, tmp, &head, node) {
list_del(&itr->node);
dev_dbg(dev, "Handling state transition: %s\n",
TO_DEV_STATE_TRANS_STR(itr->state));
switch (itr->state) {
case DEV_ST_TRANSITION_PBL:
write_lock_irq(&mhi_cntrl->pm_lock);
if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state))
mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl);
write_unlock_irq(&mhi_cntrl->pm_lock);
mhi_fw_load_handler(mhi_cntrl);
break;
case DEV_ST_TRANSITION_SBL:
write_lock_irq(&mhi_cntrl->pm_lock);
mhi_cntrl->ee = MHI_EE_SBL;
write_unlock_irq(&mhi_cntrl->pm_lock);
/*
* The MHI devices are only created when the client
* device switches its Execution Environment (EE) to
* either SBL or AMSS states
*/
mhi_create_devices(mhi_cntrl);
if (mhi_cntrl->fbc_download)
mhi_download_amss_image(mhi_cntrl);
break;
case DEV_ST_TRANSITION_MISSION_MODE:
mhi_pm_mission_mode_transition(mhi_cntrl);
break;
case DEV_ST_TRANSITION_FP:
write_lock_irq(&mhi_cntrl->pm_lock);
mhi_cntrl->ee = MHI_EE_FP;
write_unlock_irq(&mhi_cntrl->pm_lock);
mhi_create_devices(mhi_cntrl);
break;
case DEV_ST_TRANSITION_READY:
mhi_ready_state_transition(mhi_cntrl);
break;
case DEV_ST_TRANSITION_SYS_ERR:
mhi_pm_sys_error_transition(mhi_cntrl);
break;
case DEV_ST_TRANSITION_DISABLE:
mhi_pm_disable_transition(mhi_cntrl);
break;
default:
break;
}
kfree(itr);
}
}
int mhi_pm_suspend(struct mhi_controller *mhi_cntrl)
{
struct mhi_chan *itr, *tmp;
struct device *dev = &mhi_cntrl->mhi_dev->dev;
enum mhi_pm_state new_state;
int ret;
if (mhi_cntrl->pm_state == MHI_PM_DISABLE)
return -EINVAL;
if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
return -EIO;
/* Return busy if there are any pending resources */
if (atomic_read(&mhi_cntrl->dev_wake) ||
atomic_read(&mhi_cntrl->pending_pkts))
return -EBUSY;
/* Take MHI out of M2 state */
read_lock_bh(&mhi_cntrl->pm_lock);
mhi_cntrl->wake_get(mhi_cntrl, false);
read_unlock_bh(&mhi_cntrl->pm_lock);
ret = wait_event_timeout(mhi_cntrl->state_event,
mhi_cntrl->dev_state == MHI_STATE_M0 ||
mhi_cntrl->dev_state == MHI_STATE_M1 ||
MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
msecs_to_jiffies(mhi_cntrl->timeout_ms));
read_lock_bh(&mhi_cntrl->pm_lock);
mhi_cntrl->wake_put(mhi_cntrl, false);
read_unlock_bh(&mhi_cntrl->pm_lock);
if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
dev_err(dev,
"Could not enter M0/M1 state");
return -EIO;
}
write_lock_irq(&mhi_cntrl->pm_lock);
if (atomic_read(&mhi_cntrl->dev_wake) ||
atomic_read(&mhi_cntrl->pending_pkts)) {
write_unlock_irq(&mhi_cntrl->pm_lock);
return -EBUSY;
}
dev_dbg(dev, "Allowing M3 transition\n");
new_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3_ENTER);
if (new_state != MHI_PM_M3_ENTER) {
write_unlock_irq(&mhi_cntrl->pm_lock);
dev_err(dev,
"Error setting to PM state: %s from: %s\n",
to_mhi_pm_state_str(MHI_PM_M3_ENTER),
to_mhi_pm_state_str(mhi_cntrl->pm_state));
return -EIO;
}
/* Set MHI to M3 and wait for completion */
mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M3);
write_unlock_irq(&mhi_cntrl->pm_lock);
dev_dbg(dev, "Waiting for M3 completion\n");
ret = wait_event_timeout(mhi_cntrl->state_event,
mhi_cntrl->dev_state == MHI_STATE_M3 ||
MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
msecs_to_jiffies(mhi_cntrl->timeout_ms));
if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
dev_err(dev,
"Did not enter M3 state, MHI state: %s, PM state: %s\n",
mhi_state_str(mhi_cntrl->dev_state),
to_mhi_pm_state_str(mhi_cntrl->pm_state));
return -EIO;
}
/* Notify clients about entering LPM */
list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) {
mutex_lock(&itr->mutex);
if (itr->mhi_dev)
mhi_notify(itr->mhi_dev, MHI_CB_LPM_ENTER);
mutex_unlock(&itr->mutex);
}
return 0;
}
EXPORT_SYMBOL_GPL(mhi_pm_suspend);
static int __mhi_pm_resume(struct mhi_controller *mhi_cntrl, bool force)
{
struct mhi_chan *itr, *tmp;
struct device *dev = &mhi_cntrl->mhi_dev->dev;
enum mhi_pm_state cur_state;
int ret;
dev_dbg(dev, "Entered with PM state: %s, MHI state: %s\n",
to_mhi_pm_state_str(mhi_cntrl->pm_state),
mhi_state_str(mhi_cntrl->dev_state));
if (mhi_cntrl->pm_state == MHI_PM_DISABLE)
return 0;
if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
return -EIO;
if (mhi_get_mhi_state(mhi_cntrl) != MHI_STATE_M3) {
dev_warn(dev, "Resuming from non M3 state (%s)\n",
mhi_state_str(mhi_get_mhi_state(mhi_cntrl)));
if (!force)
return -EINVAL;
}
/* Notify clients about exiting LPM */
list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) {
mutex_lock(&itr->mutex);
if (itr->mhi_dev)
mhi_notify(itr->mhi_dev, MHI_CB_LPM_EXIT);
mutex_unlock(&itr->mutex);
}
write_lock_irq(&mhi_cntrl->pm_lock);
cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3_EXIT);
if (cur_state != MHI_PM_M3_EXIT) {
write_unlock_irq(&mhi_cntrl->pm_lock);
dev_info(dev,
"Error setting to PM state: %s from: %s\n",
to_mhi_pm_state_str(MHI_PM_M3_EXIT),
to_mhi_pm_state_str(mhi_cntrl->pm_state));
return -EIO;
}
/* Set MHI to M0 and wait for completion */
mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M0);
write_unlock_irq(&mhi_cntrl->pm_lock);
ret = wait_event_timeout(mhi_cntrl->state_event,
mhi_cntrl->dev_state == MHI_STATE_M0 ||
mhi_cntrl->dev_state == MHI_STATE_M2 ||
MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
msecs_to_jiffies(mhi_cntrl->timeout_ms));
if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
dev_err(dev,
"Did not enter M0 state, MHI state: %s, PM state: %s\n",
mhi_state_str(mhi_cntrl->dev_state),
to_mhi_pm_state_str(mhi_cntrl->pm_state));
return -EIO;
}
return 0;
}
int mhi_pm_resume(struct mhi_controller *mhi_cntrl)
{
return __mhi_pm_resume(mhi_cntrl, false);
}
EXPORT_SYMBOL_GPL(mhi_pm_resume);
int mhi_pm_resume_force(struct mhi_controller *mhi_cntrl)
{
return __mhi_pm_resume(mhi_cntrl, true);
}
EXPORT_SYMBOL_GPL(mhi_pm_resume_force);
int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl)
{
int ret;
/* Wake up the device */
read_lock_bh(&mhi_cntrl->pm_lock);
if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
read_unlock_bh(&mhi_cntrl->pm_lock);
return -EIO;
}
mhi_cntrl->wake_get(mhi_cntrl, true);
if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
mhi_trigger_resume(mhi_cntrl);
read_unlock_bh(&mhi_cntrl->pm_lock);
ret = wait_event_timeout(mhi_cntrl->state_event,
mhi_cntrl->pm_state == MHI_PM_M0 ||
MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
msecs_to_jiffies(mhi_cntrl->timeout_ms));
if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
read_lock_bh(&mhi_cntrl->pm_lock);
mhi_cntrl->wake_put(mhi_cntrl, false);
read_unlock_bh(&mhi_cntrl->pm_lock);
return -EIO;
}
return 0;
}
/* Assert device wake db */
static void mhi_assert_dev_wake(struct mhi_controller *mhi_cntrl, bool force)
{
unsigned long flags;
/*
* If force flag is set, then increment the wake count value and
* ring wake db
*/
if (unlikely(force)) {
spin_lock_irqsave(&mhi_cntrl->wlock, flags);
atomic_inc(&mhi_cntrl->dev_wake);
if (MHI_WAKE_DB_FORCE_SET_VALID(mhi_cntrl->pm_state) &&
!mhi_cntrl->wake_set) {
mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 1);
mhi_cntrl->wake_set = true;
}
spin_unlock_irqrestore(&mhi_cntrl->wlock, flags);
} else {
/*
* If resources are already requested, then just increment
* the wake count value and return
*/
if (likely(atomic_add_unless(&mhi_cntrl->dev_wake, 1, 0)))
return;
spin_lock_irqsave(&mhi_cntrl->wlock, flags);
if ((atomic_inc_return(&mhi_cntrl->dev_wake) == 1) &&
MHI_WAKE_DB_SET_VALID(mhi_cntrl->pm_state) &&
!mhi_cntrl->wake_set) {
mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 1);
mhi_cntrl->wake_set = true;
}
spin_unlock_irqrestore(&mhi_cntrl->wlock, flags);
}
}
/* De-assert device wake db */
static void mhi_deassert_dev_wake(struct mhi_controller *mhi_cntrl,
bool override)
{
unsigned long flags;
/*
* Only continue if there is a single resource, else just decrement
* and return
*/
if (likely(atomic_add_unless(&mhi_cntrl->dev_wake, -1, 1)))
return;
spin_lock_irqsave(&mhi_cntrl->wlock, flags);
if ((atomic_dec_return(&mhi_cntrl->dev_wake) == 0) &&
MHI_WAKE_DB_CLEAR_VALID(mhi_cntrl->pm_state) && !override &&
mhi_cntrl->wake_set) {
mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 0);
mhi_cntrl->wake_set = false;
}
spin_unlock_irqrestore(&mhi_cntrl->wlock, flags);
}
int mhi_async_power_up(struct mhi_controller *mhi_cntrl)
{
struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
enum mhi_state state;
enum mhi_ee_type current_ee;
enum dev_st_transition next_state;
struct device *dev = &mhi_cntrl->mhi_dev->dev;
u32 interval_us = 25000; /* poll register field every 25 milliseconds */
int ret, i;
dev_info(dev, "Requested to power ON\n");
/* Supply default wake routines if not provided by controller driver */
if (!mhi_cntrl->wake_get || !mhi_cntrl->wake_put ||
!mhi_cntrl->wake_toggle) {
mhi_cntrl->wake_get = mhi_assert_dev_wake;
mhi_cntrl->wake_put = mhi_deassert_dev_wake;
mhi_cntrl->wake_toggle = (mhi_cntrl->db_access & MHI_PM_M2) ?
mhi_toggle_dev_wake_nop : mhi_toggle_dev_wake;
}
mutex_lock(&mhi_cntrl->pm_mutex);
mhi_cntrl->pm_state = MHI_PM_DISABLE;
/* Setup BHI INTVEC */
write_lock_irq(&mhi_cntrl->pm_lock);
mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
mhi_cntrl->pm_state = MHI_PM_POR;
mhi_cntrl->ee = MHI_EE_MAX;
current_ee = mhi_get_exec_env(mhi_cntrl);
write_unlock_irq(&mhi_cntrl->pm_lock);
/* Confirm that the device is in valid exec env */
if (!MHI_POWER_UP_CAPABLE(current_ee)) {
dev_err(dev, "%s is not a valid EE for power on\n",
TO_MHI_EXEC_STR(current_ee));
ret = -EIO;
goto error_exit;
}
state = mhi_get_mhi_state(mhi_cntrl);
dev_dbg(dev, "Attempting power on with EE: %s, state: %s\n",
TO_MHI_EXEC_STR(current_ee), mhi_state_str(state));
if (state == MHI_STATE_SYS_ERR) {
mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET);
ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
MHICTRL_RESET_MASK, 0, interval_us);
if (ret) {
dev_info(dev, "Failed to reset MHI due to syserr state\n");
goto error_exit;
}
/*
* device cleares INTVEC as part of RESET processing,
* re-program it
*/
mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
}
/* IRQs have been requested during probe, so we just need to enable them. */
enable_irq(mhi_cntrl->irq[0]);
for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
if (mhi_event->offload_ev)
continue;
enable_irq(mhi_cntrl->irq[mhi_event->irq]);
}
/* Transition to next state */
next_state = MHI_IN_PBL(current_ee) ?
DEV_ST_TRANSITION_PBL : DEV_ST_TRANSITION_READY;
mhi_queue_state_transition(mhi_cntrl, next_state);
mutex_unlock(&mhi_cntrl->pm_mutex);
dev_info(dev, "Power on setup success\n");
return 0;
error_exit:
mhi_cntrl->pm_state = MHI_PM_DISABLE;
mutex_unlock(&mhi_cntrl->pm_mutex);
return ret;
}
EXPORT_SYMBOL_GPL(mhi_async_power_up);
void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful)
{
enum mhi_pm_state cur_state, transition_state;
struct device *dev = &mhi_cntrl->mhi_dev->dev;
mutex_lock(&mhi_cntrl->pm_mutex);
write_lock_irq(&mhi_cntrl->pm_lock);
cur_state = mhi_cntrl->pm_state;
if (cur_state == MHI_PM_DISABLE) {
write_unlock_irq(&mhi_cntrl->pm_lock);
mutex_unlock(&mhi_cntrl->pm_mutex);
return; /* Already powered down */
}
/* If it's not a graceful shutdown, force MHI to linkdown state */
transition_state = (graceful) ? MHI_PM_SHUTDOWN_PROCESS :
MHI_PM_LD_ERR_FATAL_DETECT;
cur_state = mhi_tryset_pm_state(mhi_cntrl, transition_state);
if (cur_state != transition_state) {
dev_err(dev, "Failed to move to state: %s from: %s\n",
to_mhi_pm_state_str(transition_state),
to_mhi_pm_state_str(mhi_cntrl->pm_state));
/* Force link down or error fatal detected state */
mhi_cntrl->pm_state = MHI_PM_LD_ERR_FATAL_DETECT;
}
/* mark device inactive to avoid any further host processing */
mhi_cntrl->ee = MHI_EE_DISABLE_TRANSITION;
mhi_cntrl->dev_state = MHI_STATE_RESET;
wake_up_all(&mhi_cntrl->state_event);
write_unlock_irq(&mhi_cntrl->pm_lock);
mutex_unlock(&mhi_cntrl->pm_mutex);
mhi_queue_state_transition(mhi_cntrl, DEV_ST_TRANSITION_DISABLE);
/* Wait for shutdown to complete */
flush_work(&mhi_cntrl->st_worker);
disable_irq(mhi_cntrl->irq[0]);
}
EXPORT_SYMBOL_GPL(mhi_power_down);
int mhi_sync_power_up(struct mhi_controller *mhi_cntrl)
{
int ret = mhi_async_power_up(mhi_cntrl);
if (ret)
return ret;
wait_event_timeout(mhi_cntrl->state_event,
MHI_IN_MISSION_MODE(mhi_cntrl->ee) ||
MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
msecs_to_jiffies(mhi_cntrl->timeout_ms));
ret = (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) ? 0 : -ETIMEDOUT;
if (ret)
mhi_power_down(mhi_cntrl, false);
return ret;
}
EXPORT_SYMBOL(mhi_sync_power_up);
int mhi_force_rddm_mode(struct mhi_controller *mhi_cntrl)
{
struct device *dev = &mhi_cntrl->mhi_dev->dev;
int ret;
/* Check if device is already in RDDM */
if (mhi_cntrl->ee == MHI_EE_RDDM)
return 0;
dev_dbg(dev, "Triggering SYS_ERR to force RDDM state\n");
mhi_set_mhi_state(mhi_cntrl, MHI_STATE_SYS_ERR);
/* Wait for RDDM event */
ret = wait_event_timeout(mhi_cntrl->state_event,
mhi_cntrl->ee == MHI_EE_RDDM,
msecs_to_jiffies(mhi_cntrl->timeout_ms));
ret = ret ? 0 : -EIO;
return ret;
}
EXPORT_SYMBOL_GPL(mhi_force_rddm_mode);
void mhi_device_get(struct mhi_device *mhi_dev)
{
struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
mhi_dev->dev_wake++;
read_lock_bh(&mhi_cntrl->pm_lock);
if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
mhi_trigger_resume(mhi_cntrl);
mhi_cntrl->wake_get(mhi_cntrl, true);
read_unlock_bh(&mhi_cntrl->pm_lock);
}
EXPORT_SYMBOL_GPL(mhi_device_get);
int mhi_device_get_sync(struct mhi_device *mhi_dev)
{
struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
int ret;
ret = __mhi_device_get_sync(mhi_cntrl);
if (!ret)
mhi_dev->dev_wake++;
return ret;
}
EXPORT_SYMBOL_GPL(mhi_device_get_sync);
void mhi_device_put(struct mhi_device *mhi_dev)
{
struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
mhi_dev->dev_wake--;
read_lock_bh(&mhi_cntrl->pm_lock);
if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
mhi_trigger_resume(mhi_cntrl);
mhi_cntrl->wake_put(mhi_cntrl, false);
read_unlock_bh(&mhi_cntrl->pm_lock);
}
EXPORT_SYMBOL_GPL(mhi_device_put);
| linux-master | drivers/bus/mhi/host/pm.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
*
*/
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/dma-direction.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/mhi.h>
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include "internal.h"
int __must_check mhi_read_reg(struct mhi_controller *mhi_cntrl,
void __iomem *base, u32 offset, u32 *out)
{
return mhi_cntrl->read_reg(mhi_cntrl, base + offset, out);
}
int __must_check mhi_read_reg_field(struct mhi_controller *mhi_cntrl,
void __iomem *base, u32 offset,
u32 mask, u32 *out)
{
u32 tmp;
int ret;
ret = mhi_read_reg(mhi_cntrl, base, offset, &tmp);
if (ret)
return ret;
*out = (tmp & mask) >> __ffs(mask);
return 0;
}
int __must_check mhi_poll_reg_field(struct mhi_controller *mhi_cntrl,
void __iomem *base, u32 offset,
u32 mask, u32 val, u32 delayus)
{
int ret;
u32 out, retry = (mhi_cntrl->timeout_ms * 1000) / delayus;
while (retry--) {
ret = mhi_read_reg_field(mhi_cntrl, base, offset, mask, &out);
if (ret)
return ret;
if (out == val)
return 0;
fsleep(delayus);
}
return -ETIMEDOUT;
}
void mhi_write_reg(struct mhi_controller *mhi_cntrl, void __iomem *base,
u32 offset, u32 val)
{
mhi_cntrl->write_reg(mhi_cntrl, base + offset, val);
}
int __must_check mhi_write_reg_field(struct mhi_controller *mhi_cntrl,
void __iomem *base, u32 offset, u32 mask,
u32 val)
{
int ret;
u32 tmp;
ret = mhi_read_reg(mhi_cntrl, base, offset, &tmp);
if (ret)
return ret;
tmp &= ~mask;
tmp |= (val << __ffs(mask));
mhi_write_reg(mhi_cntrl, base, offset, tmp);
return 0;
}
void mhi_write_db(struct mhi_controller *mhi_cntrl, void __iomem *db_addr,
dma_addr_t db_val)
{
mhi_write_reg(mhi_cntrl, db_addr, 4, upper_32_bits(db_val));
mhi_write_reg(mhi_cntrl, db_addr, 0, lower_32_bits(db_val));
}
void mhi_db_brstmode(struct mhi_controller *mhi_cntrl,
struct db_cfg *db_cfg,
void __iomem *db_addr,
dma_addr_t db_val)
{
if (db_cfg->db_mode) {
db_cfg->db_val = db_val;
mhi_write_db(mhi_cntrl, db_addr, db_val);
db_cfg->db_mode = 0;
}
}
void mhi_db_brstmode_disable(struct mhi_controller *mhi_cntrl,
struct db_cfg *db_cfg,
void __iomem *db_addr,
dma_addr_t db_val)
{
db_cfg->db_val = db_val;
mhi_write_db(mhi_cntrl, db_addr, db_val);
}
void mhi_ring_er_db(struct mhi_event *mhi_event)
{
struct mhi_ring *ring = &mhi_event->ring;
mhi_event->db_cfg.process_db(mhi_event->mhi_cntrl, &mhi_event->db_cfg,
ring->db_addr, le64_to_cpu(*ring->ctxt_wp));
}
void mhi_ring_cmd_db(struct mhi_controller *mhi_cntrl, struct mhi_cmd *mhi_cmd)
{
dma_addr_t db;
struct mhi_ring *ring = &mhi_cmd->ring;
db = ring->iommu_base + (ring->wp - ring->base);
*ring->ctxt_wp = cpu_to_le64(db);
mhi_write_db(mhi_cntrl, ring->db_addr, db);
}
void mhi_ring_chan_db(struct mhi_controller *mhi_cntrl,
struct mhi_chan *mhi_chan)
{
struct mhi_ring *ring = &mhi_chan->tre_ring;
dma_addr_t db;
db = ring->iommu_base + (ring->wp - ring->base);
/*
* Writes to the new ring element must be visible to the hardware
* before letting h/w know there is new element to fetch.
*/
dma_wmb();
*ring->ctxt_wp = cpu_to_le64(db);
mhi_chan->db_cfg.process_db(mhi_cntrl, &mhi_chan->db_cfg,
ring->db_addr, db);
}
enum mhi_ee_type mhi_get_exec_env(struct mhi_controller *mhi_cntrl)
{
u32 exec;
int ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_EXECENV, &exec);
return (ret) ? MHI_EE_MAX : exec;
}
EXPORT_SYMBOL_GPL(mhi_get_exec_env);
enum mhi_state mhi_get_mhi_state(struct mhi_controller *mhi_cntrl)
{
u32 state;
int ret = mhi_read_reg_field(mhi_cntrl, mhi_cntrl->regs, MHISTATUS,
MHISTATUS_MHISTATE_MASK, &state);
return ret ? MHI_STATE_MAX : state;
}
EXPORT_SYMBOL_GPL(mhi_get_mhi_state);
void mhi_soc_reset(struct mhi_controller *mhi_cntrl)
{
if (mhi_cntrl->reset) {
mhi_cntrl->reset(mhi_cntrl);
return;
}
/* Generic MHI SoC reset */
mhi_write_reg(mhi_cntrl, mhi_cntrl->regs, MHI_SOC_RESET_REQ_OFFSET,
MHI_SOC_RESET_REQ);
}
EXPORT_SYMBOL_GPL(mhi_soc_reset);
int mhi_map_single_no_bb(struct mhi_controller *mhi_cntrl,
struct mhi_buf_info *buf_info)
{
buf_info->p_addr = dma_map_single(mhi_cntrl->cntrl_dev,
buf_info->v_addr, buf_info->len,
buf_info->dir);
if (dma_mapping_error(mhi_cntrl->cntrl_dev, buf_info->p_addr))
return -ENOMEM;
return 0;
}
int mhi_map_single_use_bb(struct mhi_controller *mhi_cntrl,
struct mhi_buf_info *buf_info)
{
void *buf = dma_alloc_coherent(mhi_cntrl->cntrl_dev, buf_info->len,
&buf_info->p_addr, GFP_ATOMIC);
if (!buf)
return -ENOMEM;
if (buf_info->dir == DMA_TO_DEVICE)
memcpy(buf, buf_info->v_addr, buf_info->len);
buf_info->bb_addr = buf;
return 0;
}
void mhi_unmap_single_no_bb(struct mhi_controller *mhi_cntrl,
struct mhi_buf_info *buf_info)
{
dma_unmap_single(mhi_cntrl->cntrl_dev, buf_info->p_addr, buf_info->len,
buf_info->dir);
}
void mhi_unmap_single_use_bb(struct mhi_controller *mhi_cntrl,
struct mhi_buf_info *buf_info)
{
if (buf_info->dir == DMA_FROM_DEVICE)
memcpy(buf_info->v_addr, buf_info->bb_addr, buf_info->len);
dma_free_coherent(mhi_cntrl->cntrl_dev, buf_info->len,
buf_info->bb_addr, buf_info->p_addr);
}
static int get_nr_avail_ring_elements(struct mhi_controller *mhi_cntrl,
struct mhi_ring *ring)
{
int nr_el;
if (ring->wp < ring->rp) {
nr_el = ((ring->rp - ring->wp) / ring->el_size) - 1;
} else {
nr_el = (ring->rp - ring->base) / ring->el_size;
nr_el += ((ring->base + ring->len - ring->wp) /
ring->el_size) - 1;
}
return nr_el;
}
static void *mhi_to_virtual(struct mhi_ring *ring, dma_addr_t addr)
{
return (addr - ring->iommu_base) + ring->base;
}
static void mhi_add_ring_element(struct mhi_controller *mhi_cntrl,
struct mhi_ring *ring)
{
ring->wp += ring->el_size;
if (ring->wp >= (ring->base + ring->len))
ring->wp = ring->base;
/* smp update */
smp_wmb();
}
static void mhi_del_ring_element(struct mhi_controller *mhi_cntrl,
struct mhi_ring *ring)
{
ring->rp += ring->el_size;
if (ring->rp >= (ring->base + ring->len))
ring->rp = ring->base;
/* smp update */
smp_wmb();
}
static bool is_valid_ring_ptr(struct mhi_ring *ring, dma_addr_t addr)
{
return addr >= ring->iommu_base && addr < ring->iommu_base + ring->len;
}
int mhi_destroy_device(struct device *dev, void *data)
{
struct mhi_chan *ul_chan, *dl_chan;
struct mhi_device *mhi_dev;
struct mhi_controller *mhi_cntrl;
enum mhi_ee_type ee = MHI_EE_MAX;
if (dev->bus != &mhi_bus_type)
return 0;
mhi_dev = to_mhi_device(dev);
mhi_cntrl = mhi_dev->mhi_cntrl;
/* Only destroy virtual devices thats attached to bus */
if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
return 0;
ul_chan = mhi_dev->ul_chan;
dl_chan = mhi_dev->dl_chan;
/*
* If execution environment is specified, remove only those devices that
* started in them based on ee_mask for the channels as we move on to a
* different execution environment
*/
if (data)
ee = *(enum mhi_ee_type *)data;
/*
* For the suspend and resume case, this function will get called
* without mhi_unregister_controller(). Hence, we need to drop the
* references to mhi_dev created for ul and dl channels. We can
* be sure that there will be no instances of mhi_dev left after
* this.
*/
if (ul_chan) {
if (ee != MHI_EE_MAX && !(ul_chan->ee_mask & BIT(ee)))
return 0;
put_device(&ul_chan->mhi_dev->dev);
}
if (dl_chan) {
if (ee != MHI_EE_MAX && !(dl_chan->ee_mask & BIT(ee)))
return 0;
put_device(&dl_chan->mhi_dev->dev);
}
dev_dbg(&mhi_cntrl->mhi_dev->dev, "destroy device for chan:%s\n",
mhi_dev->name);
/* Notify the client and remove the device from MHI bus */
device_del(dev);
put_device(dev);
return 0;
}
int mhi_get_free_desc_count(struct mhi_device *mhi_dev,
enum dma_data_direction dir)
{
struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ?
mhi_dev->ul_chan : mhi_dev->dl_chan;
struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
return get_nr_avail_ring_elements(mhi_cntrl, tre_ring);
}
EXPORT_SYMBOL_GPL(mhi_get_free_desc_count);
void mhi_notify(struct mhi_device *mhi_dev, enum mhi_callback cb_reason)
{
struct mhi_driver *mhi_drv;
if (!mhi_dev->dev.driver)
return;
mhi_drv = to_mhi_driver(mhi_dev->dev.driver);
if (mhi_drv->status_cb)
mhi_drv->status_cb(mhi_dev, cb_reason);
}
EXPORT_SYMBOL_GPL(mhi_notify);
/* Bind MHI channels to MHI devices */
void mhi_create_devices(struct mhi_controller *mhi_cntrl)
{
struct mhi_chan *mhi_chan;
struct mhi_device *mhi_dev;
struct device *dev = &mhi_cntrl->mhi_dev->dev;
int i, ret;
mhi_chan = mhi_cntrl->mhi_chan;
for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
if (!mhi_chan->configured || mhi_chan->mhi_dev ||
!(mhi_chan->ee_mask & BIT(mhi_cntrl->ee)))
continue;
mhi_dev = mhi_alloc_device(mhi_cntrl);
if (IS_ERR(mhi_dev))
return;
mhi_dev->dev_type = MHI_DEVICE_XFER;
switch (mhi_chan->dir) {
case DMA_TO_DEVICE:
mhi_dev->ul_chan = mhi_chan;
mhi_dev->ul_chan_id = mhi_chan->chan;
break;
case DMA_FROM_DEVICE:
/* We use dl_chan as offload channels */
mhi_dev->dl_chan = mhi_chan;
mhi_dev->dl_chan_id = mhi_chan->chan;
break;
default:
dev_err(dev, "Direction not supported\n");
put_device(&mhi_dev->dev);
return;
}
get_device(&mhi_dev->dev);
mhi_chan->mhi_dev = mhi_dev;
/* Check next channel if it matches */
if ((i + 1) < mhi_cntrl->max_chan && mhi_chan[1].configured) {
if (!strcmp(mhi_chan[1].name, mhi_chan->name)) {
i++;
mhi_chan++;
if (mhi_chan->dir == DMA_TO_DEVICE) {
mhi_dev->ul_chan = mhi_chan;
mhi_dev->ul_chan_id = mhi_chan->chan;
} else {
mhi_dev->dl_chan = mhi_chan;
mhi_dev->dl_chan_id = mhi_chan->chan;
}
get_device(&mhi_dev->dev);
mhi_chan->mhi_dev = mhi_dev;
}
}
/* Channel name is same for both UL and DL */
mhi_dev->name = mhi_chan->name;
dev_set_name(&mhi_dev->dev, "%s_%s",
dev_name(&mhi_cntrl->mhi_dev->dev),
mhi_dev->name);
/* Init wakeup source if available */
if (mhi_dev->dl_chan && mhi_dev->dl_chan->wake_capable)
device_init_wakeup(&mhi_dev->dev, true);
ret = device_add(&mhi_dev->dev);
if (ret)
put_device(&mhi_dev->dev);
}
}
irqreturn_t mhi_irq_handler(int irq_number, void *dev)
{
struct mhi_event *mhi_event = dev;
struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl;
struct mhi_event_ctxt *er_ctxt;
struct mhi_ring *ev_ring = &mhi_event->ring;
dma_addr_t ptr;
void *dev_rp;
/*
* If CONFIG_DEBUG_SHIRQ is set, the IRQ handler will get invoked during __free_irq()
* and by that time mhi_ctxt() would've freed. So check for the existence of mhi_ctxt
* before handling the IRQs.
*/
if (!mhi_cntrl->mhi_ctxt) {
dev_dbg(&mhi_cntrl->mhi_dev->dev,
"mhi_ctxt has been freed\n");
return IRQ_HANDLED;
}
er_ctxt = &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
ptr = le64_to_cpu(er_ctxt->rp);
if (!is_valid_ring_ptr(ev_ring, ptr)) {
dev_err(&mhi_cntrl->mhi_dev->dev,
"Event ring rp points outside of the event ring\n");
return IRQ_HANDLED;
}
dev_rp = mhi_to_virtual(ev_ring, ptr);
/* Only proceed if event ring has pending events */
if (ev_ring->rp == dev_rp)
return IRQ_HANDLED;
/* For client managed event ring, notify pending data */
if (mhi_event->cl_manage) {
struct mhi_chan *mhi_chan = mhi_event->mhi_chan;
struct mhi_device *mhi_dev = mhi_chan->mhi_dev;
if (mhi_dev)
mhi_notify(mhi_dev, MHI_CB_PENDING_DATA);
} else {
tasklet_schedule(&mhi_event->task);
}
return IRQ_HANDLED;
}
irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *priv)
{
struct mhi_controller *mhi_cntrl = priv;
struct device *dev = &mhi_cntrl->mhi_dev->dev;
enum mhi_state state;
enum mhi_pm_state pm_state = 0;
enum mhi_ee_type ee;
write_lock_irq(&mhi_cntrl->pm_lock);
if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
write_unlock_irq(&mhi_cntrl->pm_lock);
goto exit_intvec;
}
state = mhi_get_mhi_state(mhi_cntrl);
ee = mhi_get_exec_env(mhi_cntrl);
dev_dbg(dev, "local ee: %s state: %s device ee: %s state: %s\n",
TO_MHI_EXEC_STR(mhi_cntrl->ee),
mhi_state_str(mhi_cntrl->dev_state),
TO_MHI_EXEC_STR(ee), mhi_state_str(state));
if (state == MHI_STATE_SYS_ERR) {
dev_dbg(dev, "System error detected\n");
pm_state = mhi_tryset_pm_state(mhi_cntrl,
MHI_PM_SYS_ERR_DETECT);
}
write_unlock_irq(&mhi_cntrl->pm_lock);
if (pm_state != MHI_PM_SYS_ERR_DETECT)
goto exit_intvec;
switch (ee) {
case MHI_EE_RDDM:
/* proceed if power down is not already in progress */
if (mhi_cntrl->rddm_image && mhi_is_active(mhi_cntrl)) {
mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM);
mhi_cntrl->ee = ee;
wake_up_all(&mhi_cntrl->state_event);
}
break;
case MHI_EE_PBL:
case MHI_EE_EDL:
case MHI_EE_PTHRU:
mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_FATAL_ERROR);
mhi_cntrl->ee = ee;
wake_up_all(&mhi_cntrl->state_event);
mhi_pm_sys_err_handler(mhi_cntrl);
break;
default:
wake_up_all(&mhi_cntrl->state_event);
mhi_pm_sys_err_handler(mhi_cntrl);
break;
}
exit_intvec:
return IRQ_HANDLED;
}
irqreturn_t mhi_intvec_handler(int irq_number, void *dev)
{
struct mhi_controller *mhi_cntrl = dev;
/* Wake up events waiting for state change */
wake_up_all(&mhi_cntrl->state_event);
return IRQ_WAKE_THREAD;
}
static void mhi_recycle_ev_ring_element(struct mhi_controller *mhi_cntrl,
struct mhi_ring *ring)
{
/* Update the WP */
ring->wp += ring->el_size;
if (ring->wp >= (ring->base + ring->len))
ring->wp = ring->base;
*ring->ctxt_wp = cpu_to_le64(ring->iommu_base + (ring->wp - ring->base));
/* Update the RP */
ring->rp += ring->el_size;
if (ring->rp >= (ring->base + ring->len))
ring->rp = ring->base;
/* Update to all cores */
smp_wmb();
}
static int parse_xfer_event(struct mhi_controller *mhi_cntrl,
struct mhi_ring_element *event,
struct mhi_chan *mhi_chan)
{
struct mhi_ring *buf_ring, *tre_ring;
struct device *dev = &mhi_cntrl->mhi_dev->dev;
struct mhi_result result;
unsigned long flags = 0;
u32 ev_code;
ev_code = MHI_TRE_GET_EV_CODE(event);
buf_ring = &mhi_chan->buf_ring;
tre_ring = &mhi_chan->tre_ring;
result.transaction_status = (ev_code == MHI_EV_CC_OVERFLOW) ?
-EOVERFLOW : 0;
/*
* If it's a DB Event then we need to grab the lock
* with preemption disabled and as a write because we
* have to update db register and there are chances that
* another thread could be doing the same.
*/
if (ev_code >= MHI_EV_CC_OOB)
write_lock_irqsave(&mhi_chan->lock, flags);
else
read_lock_bh(&mhi_chan->lock);
if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED)
goto end_process_tx_event;
switch (ev_code) {
case MHI_EV_CC_OVERFLOW:
case MHI_EV_CC_EOB:
case MHI_EV_CC_EOT:
{
dma_addr_t ptr = MHI_TRE_GET_EV_PTR(event);
struct mhi_ring_element *local_rp, *ev_tre;
void *dev_rp;
struct mhi_buf_info *buf_info;
u16 xfer_len;
if (!is_valid_ring_ptr(tre_ring, ptr)) {
dev_err(&mhi_cntrl->mhi_dev->dev,
"Event element points outside of the tre ring\n");
break;
}
/* Get the TRB this event points to */
ev_tre = mhi_to_virtual(tre_ring, ptr);
dev_rp = ev_tre + 1;
if (dev_rp >= (tre_ring->base + tre_ring->len))
dev_rp = tre_ring->base;
result.dir = mhi_chan->dir;
local_rp = tre_ring->rp;
while (local_rp != dev_rp) {
buf_info = buf_ring->rp;
/* If it's the last TRE, get length from the event */
if (local_rp == ev_tre)
xfer_len = MHI_TRE_GET_EV_LEN(event);
else
xfer_len = buf_info->len;
/* Unmap if it's not pre-mapped by client */
if (likely(!buf_info->pre_mapped))
mhi_cntrl->unmap_single(mhi_cntrl, buf_info);
result.buf_addr = buf_info->cb_buf;
/* truncate to buf len if xfer_len is larger */
result.bytes_xferd =
min_t(u16, xfer_len, buf_info->len);
mhi_del_ring_element(mhi_cntrl, buf_ring);
mhi_del_ring_element(mhi_cntrl, tre_ring);
local_rp = tre_ring->rp;
/* notify client */
mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
if (mhi_chan->dir == DMA_TO_DEVICE) {
atomic_dec(&mhi_cntrl->pending_pkts);
/* Release the reference got from mhi_queue() */
mhi_cntrl->runtime_put(mhi_cntrl);
}
/*
* Recycle the buffer if buffer is pre-allocated,
* if there is an error, not much we can do apart
* from dropping the packet
*/
if (mhi_chan->pre_alloc) {
if (mhi_queue_buf(mhi_chan->mhi_dev,
mhi_chan->dir,
buf_info->cb_buf,
buf_info->len, MHI_EOT)) {
dev_err(dev,
"Error recycling buffer for chan:%d\n",
mhi_chan->chan);
kfree(buf_info->cb_buf);
}
}
}
break;
} /* CC_EOT */
case MHI_EV_CC_OOB:
case MHI_EV_CC_DB_MODE:
{
unsigned long pm_lock_flags;
mhi_chan->db_cfg.db_mode = 1;
read_lock_irqsave(&mhi_cntrl->pm_lock, pm_lock_flags);
if (tre_ring->wp != tre_ring->rp &&
MHI_DB_ACCESS_VALID(mhi_cntrl)) {
mhi_ring_chan_db(mhi_cntrl, mhi_chan);
}
read_unlock_irqrestore(&mhi_cntrl->pm_lock, pm_lock_flags);
break;
}
case MHI_EV_CC_BAD_TRE:
default:
dev_err(dev, "Unknown event 0x%x\n", ev_code);
break;
} /* switch(MHI_EV_READ_CODE(EV_TRB_CODE,event)) */
end_process_tx_event:
if (ev_code >= MHI_EV_CC_OOB)
write_unlock_irqrestore(&mhi_chan->lock, flags);
else
read_unlock_bh(&mhi_chan->lock);
return 0;
}
static int parse_rsc_event(struct mhi_controller *mhi_cntrl,
struct mhi_ring_element *event,
struct mhi_chan *mhi_chan)
{
struct mhi_ring *buf_ring, *tre_ring;
struct mhi_buf_info *buf_info;
struct mhi_result result;
int ev_code;
u32 cookie; /* offset to local descriptor */
u16 xfer_len;
buf_ring = &mhi_chan->buf_ring;
tre_ring = &mhi_chan->tre_ring;
ev_code = MHI_TRE_GET_EV_CODE(event);
cookie = MHI_TRE_GET_EV_COOKIE(event);
xfer_len = MHI_TRE_GET_EV_LEN(event);
/* Received out of bound cookie */
WARN_ON(cookie >= buf_ring->len);
buf_info = buf_ring->base + cookie;
result.transaction_status = (ev_code == MHI_EV_CC_OVERFLOW) ?
-EOVERFLOW : 0;
/* truncate to buf len if xfer_len is larger */
result.bytes_xferd = min_t(u16, xfer_len, buf_info->len);
result.buf_addr = buf_info->cb_buf;
result.dir = mhi_chan->dir;
read_lock_bh(&mhi_chan->lock);
if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED)
goto end_process_rsc_event;
WARN_ON(!buf_info->used);
/* notify the client */
mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
/*
* Note: We're arbitrarily incrementing RP even though, completion
* packet we processed might not be the same one, reason we can do this
* is because device guaranteed to cache descriptors in order it
* receive, so even though completion event is different we can re-use
* all descriptors in between.
* Example:
* Transfer Ring has descriptors: A, B, C, D
* Last descriptor host queue is D (WP) and first descriptor
* host queue is A (RP).
* The completion event we just serviced is descriptor C.
* Then we can safely queue descriptors to replace A, B, and C
* even though host did not receive any completions.
*/
mhi_del_ring_element(mhi_cntrl, tre_ring);
buf_info->used = false;
end_process_rsc_event:
read_unlock_bh(&mhi_chan->lock);
return 0;
}
static void mhi_process_cmd_completion(struct mhi_controller *mhi_cntrl,
struct mhi_ring_element *tre)
{
dma_addr_t ptr = MHI_TRE_GET_EV_PTR(tre);
struct mhi_cmd *cmd_ring = &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING];
struct mhi_ring *mhi_ring = &cmd_ring->ring;
struct mhi_ring_element *cmd_pkt;
struct mhi_chan *mhi_chan;
u32 chan;
if (!is_valid_ring_ptr(mhi_ring, ptr)) {
dev_err(&mhi_cntrl->mhi_dev->dev,
"Event element points outside of the cmd ring\n");
return;
}
cmd_pkt = mhi_to_virtual(mhi_ring, ptr);
chan = MHI_TRE_GET_CMD_CHID(cmd_pkt);
if (chan < mhi_cntrl->max_chan &&
mhi_cntrl->mhi_chan[chan].configured) {
mhi_chan = &mhi_cntrl->mhi_chan[chan];
write_lock_bh(&mhi_chan->lock);
mhi_chan->ccs = MHI_TRE_GET_EV_CODE(tre);
complete(&mhi_chan->completion);
write_unlock_bh(&mhi_chan->lock);
} else {
dev_err(&mhi_cntrl->mhi_dev->dev,
"Completion packet for invalid channel ID: %d\n", chan);
}
mhi_del_ring_element(mhi_cntrl, mhi_ring);
}
int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
struct mhi_event *mhi_event,
u32 event_quota)
{
struct mhi_ring_element *dev_rp, *local_rp;
struct mhi_ring *ev_ring = &mhi_event->ring;
struct mhi_event_ctxt *er_ctxt =
&mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
struct mhi_chan *mhi_chan;
struct device *dev = &mhi_cntrl->mhi_dev->dev;
u32 chan;
int count = 0;
dma_addr_t ptr = le64_to_cpu(er_ctxt->rp);
/*
* This is a quick check to avoid unnecessary event processing
* in case MHI is already in error state, but it's still possible
* to transition to error state while processing events
*/
if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state)))
return -EIO;
if (!is_valid_ring_ptr(ev_ring, ptr)) {
dev_err(&mhi_cntrl->mhi_dev->dev,
"Event ring rp points outside of the event ring\n");
return -EIO;
}
dev_rp = mhi_to_virtual(ev_ring, ptr);
local_rp = ev_ring->rp;
while (dev_rp != local_rp) {
enum mhi_pkt_type type = MHI_TRE_GET_EV_TYPE(local_rp);
switch (type) {
case MHI_PKT_TYPE_BW_REQ_EVENT:
{
struct mhi_link_info *link_info;
link_info = &mhi_cntrl->mhi_link_info;
write_lock_irq(&mhi_cntrl->pm_lock);
link_info->target_link_speed =
MHI_TRE_GET_EV_LINKSPEED(local_rp);
link_info->target_link_width =
MHI_TRE_GET_EV_LINKWIDTH(local_rp);
write_unlock_irq(&mhi_cntrl->pm_lock);
dev_dbg(dev, "Received BW_REQ event\n");
mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_BW_REQ);
break;
}
case MHI_PKT_TYPE_STATE_CHANGE_EVENT:
{
enum mhi_state new_state;
new_state = MHI_TRE_GET_EV_STATE(local_rp);
dev_dbg(dev, "State change event to state: %s\n",
mhi_state_str(new_state));
switch (new_state) {
case MHI_STATE_M0:
mhi_pm_m0_transition(mhi_cntrl);
break;
case MHI_STATE_M1:
mhi_pm_m1_transition(mhi_cntrl);
break;
case MHI_STATE_M3:
mhi_pm_m3_transition(mhi_cntrl);
break;
case MHI_STATE_SYS_ERR:
{
enum mhi_pm_state pm_state;
dev_dbg(dev, "System error detected\n");
write_lock_irq(&mhi_cntrl->pm_lock);
pm_state = mhi_tryset_pm_state(mhi_cntrl,
MHI_PM_SYS_ERR_DETECT);
write_unlock_irq(&mhi_cntrl->pm_lock);
if (pm_state == MHI_PM_SYS_ERR_DETECT)
mhi_pm_sys_err_handler(mhi_cntrl);
break;
}
default:
dev_err(dev, "Invalid state: %s\n",
mhi_state_str(new_state));
}
break;
}
case MHI_PKT_TYPE_CMD_COMPLETION_EVENT:
mhi_process_cmd_completion(mhi_cntrl, local_rp);
break;
case MHI_PKT_TYPE_EE_EVENT:
{
enum dev_st_transition st = DEV_ST_TRANSITION_MAX;
enum mhi_ee_type event = MHI_TRE_GET_EV_EXECENV(local_rp);
dev_dbg(dev, "Received EE event: %s\n",
TO_MHI_EXEC_STR(event));
switch (event) {
case MHI_EE_SBL:
st = DEV_ST_TRANSITION_SBL;
break;
case MHI_EE_WFW:
case MHI_EE_AMSS:
st = DEV_ST_TRANSITION_MISSION_MODE;
break;
case MHI_EE_FP:
st = DEV_ST_TRANSITION_FP;
break;
case MHI_EE_RDDM:
mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM);
write_lock_irq(&mhi_cntrl->pm_lock);
mhi_cntrl->ee = event;
write_unlock_irq(&mhi_cntrl->pm_lock);
wake_up_all(&mhi_cntrl->state_event);
break;
default:
dev_err(dev,
"Unhandled EE event: 0x%x\n", type);
}
if (st != DEV_ST_TRANSITION_MAX)
mhi_queue_state_transition(mhi_cntrl, st);
break;
}
case MHI_PKT_TYPE_TX_EVENT:
chan = MHI_TRE_GET_EV_CHID(local_rp);
WARN_ON(chan >= mhi_cntrl->max_chan);
/*
* Only process the event ring elements whose channel
* ID is within the maximum supported range.
*/
if (chan < mhi_cntrl->max_chan) {
mhi_chan = &mhi_cntrl->mhi_chan[chan];
if (!mhi_chan->configured)
break;
parse_xfer_event(mhi_cntrl, local_rp, mhi_chan);
}
break;
default:
dev_err(dev, "Unhandled event type: %d\n", type);
break;
}
mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring);
local_rp = ev_ring->rp;
ptr = le64_to_cpu(er_ctxt->rp);
if (!is_valid_ring_ptr(ev_ring, ptr)) {
dev_err(&mhi_cntrl->mhi_dev->dev,
"Event ring rp points outside of the event ring\n");
return -EIO;
}
dev_rp = mhi_to_virtual(ev_ring, ptr);
count++;
}
read_lock_bh(&mhi_cntrl->pm_lock);
/* Ring EV DB only if there is any pending element to process */
if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)) && count)
mhi_ring_er_db(mhi_event);
read_unlock_bh(&mhi_cntrl->pm_lock);
return count;
}
int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl,
struct mhi_event *mhi_event,
u32 event_quota)
{
struct mhi_ring_element *dev_rp, *local_rp;
struct mhi_ring *ev_ring = &mhi_event->ring;
struct mhi_event_ctxt *er_ctxt =
&mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
int count = 0;
u32 chan;
struct mhi_chan *mhi_chan;
dma_addr_t ptr = le64_to_cpu(er_ctxt->rp);
if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state)))
return -EIO;
if (!is_valid_ring_ptr(ev_ring, ptr)) {
dev_err(&mhi_cntrl->mhi_dev->dev,
"Event ring rp points outside of the event ring\n");
return -EIO;
}
dev_rp = mhi_to_virtual(ev_ring, ptr);
local_rp = ev_ring->rp;
while (dev_rp != local_rp && event_quota > 0) {
enum mhi_pkt_type type = MHI_TRE_GET_EV_TYPE(local_rp);
chan = MHI_TRE_GET_EV_CHID(local_rp);
WARN_ON(chan >= mhi_cntrl->max_chan);
/*
* Only process the event ring elements whose channel
* ID is within the maximum supported range.
*/
if (chan < mhi_cntrl->max_chan &&
mhi_cntrl->mhi_chan[chan].configured) {
mhi_chan = &mhi_cntrl->mhi_chan[chan];
if (likely(type == MHI_PKT_TYPE_TX_EVENT)) {
parse_xfer_event(mhi_cntrl, local_rp, mhi_chan);
event_quota--;
} else if (type == MHI_PKT_TYPE_RSC_TX_EVENT) {
parse_rsc_event(mhi_cntrl, local_rp, mhi_chan);
event_quota--;
}
}
mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring);
local_rp = ev_ring->rp;
ptr = le64_to_cpu(er_ctxt->rp);
if (!is_valid_ring_ptr(ev_ring, ptr)) {
dev_err(&mhi_cntrl->mhi_dev->dev,
"Event ring rp points outside of the event ring\n");
return -EIO;
}
dev_rp = mhi_to_virtual(ev_ring, ptr);
count++;
}
read_lock_bh(&mhi_cntrl->pm_lock);
/* Ring EV DB only if there is any pending element to process */
if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)) && count)
mhi_ring_er_db(mhi_event);
read_unlock_bh(&mhi_cntrl->pm_lock);
return count;
}
void mhi_ev_task(unsigned long data)
{
struct mhi_event *mhi_event = (struct mhi_event *)data;
struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl;
/* process all pending events */
spin_lock_bh(&mhi_event->lock);
mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX);
spin_unlock_bh(&mhi_event->lock);
}
void mhi_ctrl_ev_task(unsigned long data)
{
struct mhi_event *mhi_event = (struct mhi_event *)data;
struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl;
struct device *dev = &mhi_cntrl->mhi_dev->dev;
enum mhi_state state;
enum mhi_pm_state pm_state = 0;
int ret;
/*
* We can check PM state w/o a lock here because there is no way
* PM state can change from reg access valid to no access while this
* thread being executed.
*/
if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
/*
* We may have a pending event but not allowed to
* process it since we are probably in a suspended state,
* so trigger a resume.
*/
mhi_trigger_resume(mhi_cntrl);
return;
}
/* Process ctrl events */
ret = mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX);
/*
* We received an IRQ but no events to process, maybe device went to
* SYS_ERR state? Check the state to confirm.
*/
if (!ret) {
write_lock_irq(&mhi_cntrl->pm_lock);
state = mhi_get_mhi_state(mhi_cntrl);
if (state == MHI_STATE_SYS_ERR) {
dev_dbg(dev, "System error detected\n");
pm_state = mhi_tryset_pm_state(mhi_cntrl,
MHI_PM_SYS_ERR_DETECT);
}
write_unlock_irq(&mhi_cntrl->pm_lock);
if (pm_state == MHI_PM_SYS_ERR_DETECT)
mhi_pm_sys_err_handler(mhi_cntrl);
}
}
static bool mhi_is_ring_full(struct mhi_controller *mhi_cntrl,
struct mhi_ring *ring)
{
void *tmp = ring->wp + ring->el_size;
if (tmp >= (ring->base + ring->len))
tmp = ring->base;
return (tmp == ring->rp);
}
static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info,
enum dma_data_direction dir, enum mhi_flags mflags)
{
struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
mhi_dev->dl_chan;
struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
unsigned long flags;
int ret;
if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)))
return -EIO;
read_lock_irqsave(&mhi_cntrl->pm_lock, flags);
ret = mhi_is_ring_full(mhi_cntrl, tre_ring);
if (unlikely(ret)) {
ret = -EAGAIN;
goto exit_unlock;
}
ret = mhi_gen_tre(mhi_cntrl, mhi_chan, buf_info, mflags);
if (unlikely(ret))
goto exit_unlock;
/* Packet is queued, take a usage ref to exit M3 if necessary
* for host->device buffer, balanced put is done on buffer completion
* for device->host buffer, balanced put is after ringing the DB
*/
mhi_cntrl->runtime_get(mhi_cntrl);
/* Assert dev_wake (to exit/prevent M1/M2)*/
mhi_cntrl->wake_toggle(mhi_cntrl);
if (mhi_chan->dir == DMA_TO_DEVICE)
atomic_inc(&mhi_cntrl->pending_pkts);
if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
mhi_ring_chan_db(mhi_cntrl, mhi_chan);
if (dir == DMA_FROM_DEVICE)
mhi_cntrl->runtime_put(mhi_cntrl);
exit_unlock:
read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);
return ret;
}
int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir,
struct sk_buff *skb, size_t len, enum mhi_flags mflags)
{
struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
mhi_dev->dl_chan;
struct mhi_buf_info buf_info = { };
buf_info.v_addr = skb->data;
buf_info.cb_buf = skb;
buf_info.len = len;
if (unlikely(mhi_chan->pre_alloc))
return -EINVAL;
return mhi_queue(mhi_dev, &buf_info, dir, mflags);
}
EXPORT_SYMBOL_GPL(mhi_queue_skb);
int mhi_queue_dma(struct mhi_device *mhi_dev, enum dma_data_direction dir,
struct mhi_buf *mhi_buf, size_t len, enum mhi_flags mflags)
{
struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
mhi_dev->dl_chan;
struct mhi_buf_info buf_info = { };
buf_info.p_addr = mhi_buf->dma_addr;
buf_info.cb_buf = mhi_buf;
buf_info.pre_mapped = true;
buf_info.len = len;
if (unlikely(mhi_chan->pre_alloc))
return -EINVAL;
return mhi_queue(mhi_dev, &buf_info, dir, mflags);
}
EXPORT_SYMBOL_GPL(mhi_queue_dma);
int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
struct mhi_buf_info *info, enum mhi_flags flags)
{
struct mhi_ring *buf_ring, *tre_ring;
struct mhi_ring_element *mhi_tre;
struct mhi_buf_info *buf_info;
int eot, eob, chain, bei;
int ret;
buf_ring = &mhi_chan->buf_ring;
tre_ring = &mhi_chan->tre_ring;
buf_info = buf_ring->wp;
WARN_ON(buf_info->used);
buf_info->pre_mapped = info->pre_mapped;
if (info->pre_mapped)
buf_info->p_addr = info->p_addr;
else
buf_info->v_addr = info->v_addr;
buf_info->cb_buf = info->cb_buf;
buf_info->wp = tre_ring->wp;
buf_info->dir = mhi_chan->dir;
buf_info->len = info->len;
if (!info->pre_mapped) {
ret = mhi_cntrl->map_single(mhi_cntrl, buf_info);
if (ret)
return ret;
}
eob = !!(flags & MHI_EOB);
eot = !!(flags & MHI_EOT);
chain = !!(flags & MHI_CHAIN);
bei = !!(mhi_chan->intmod);
mhi_tre = tre_ring->wp;
mhi_tre->ptr = MHI_TRE_DATA_PTR(buf_info->p_addr);
mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(info->len);
mhi_tre->dword[1] = MHI_TRE_DATA_DWORD1(bei, eot, eob, chain);
/* increment WP */
mhi_add_ring_element(mhi_cntrl, tre_ring);
mhi_add_ring_element(mhi_cntrl, buf_ring);
return 0;
}
int mhi_queue_buf(struct mhi_device *mhi_dev, enum dma_data_direction dir,
void *buf, size_t len, enum mhi_flags mflags)
{
struct mhi_buf_info buf_info = { };
buf_info.v_addr = buf;
buf_info.cb_buf = buf;
buf_info.len = len;
return mhi_queue(mhi_dev, &buf_info, dir, mflags);
}
EXPORT_SYMBOL_GPL(mhi_queue_buf);
bool mhi_queue_is_full(struct mhi_device *mhi_dev, enum dma_data_direction dir)
{
struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ?
mhi_dev->ul_chan : mhi_dev->dl_chan;
struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
return mhi_is_ring_full(mhi_cntrl, tre_ring);
}
EXPORT_SYMBOL_GPL(mhi_queue_is_full);
int mhi_send_cmd(struct mhi_controller *mhi_cntrl,
struct mhi_chan *mhi_chan,
enum mhi_cmd_type cmd)
{
struct mhi_ring_element *cmd_tre = NULL;
struct mhi_cmd *mhi_cmd = &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING];
struct mhi_ring *ring = &mhi_cmd->ring;
struct device *dev = &mhi_cntrl->mhi_dev->dev;
int chan = 0;
if (mhi_chan)
chan = mhi_chan->chan;
spin_lock_bh(&mhi_cmd->lock);
if (!get_nr_avail_ring_elements(mhi_cntrl, ring)) {
spin_unlock_bh(&mhi_cmd->lock);
return -ENOMEM;
}
/* prepare the cmd tre */
cmd_tre = ring->wp;
switch (cmd) {
case MHI_CMD_RESET_CHAN:
cmd_tre->ptr = MHI_TRE_CMD_RESET_PTR;
cmd_tre->dword[0] = MHI_TRE_CMD_RESET_DWORD0;
cmd_tre->dword[1] = MHI_TRE_CMD_RESET_DWORD1(chan);
break;
case MHI_CMD_STOP_CHAN:
cmd_tre->ptr = MHI_TRE_CMD_STOP_PTR;
cmd_tre->dword[0] = MHI_TRE_CMD_STOP_DWORD0;
cmd_tre->dword[1] = MHI_TRE_CMD_STOP_DWORD1(chan);
break;
case MHI_CMD_START_CHAN:
cmd_tre->ptr = MHI_TRE_CMD_START_PTR;
cmd_tre->dword[0] = MHI_TRE_CMD_START_DWORD0;
cmd_tre->dword[1] = MHI_TRE_CMD_START_DWORD1(chan);
break;
default:
dev_err(dev, "Command not supported\n");
break;
}
/* queue to hardware */
mhi_add_ring_element(mhi_cntrl, ring);
read_lock_bh(&mhi_cntrl->pm_lock);
if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
mhi_ring_cmd_db(mhi_cntrl, mhi_cmd);
read_unlock_bh(&mhi_cntrl->pm_lock);
spin_unlock_bh(&mhi_cmd->lock);
return 0;
}
static int mhi_update_channel_state(struct mhi_controller *mhi_cntrl,
struct mhi_chan *mhi_chan,
enum mhi_ch_state_type to_state)
{
struct device *dev = &mhi_chan->mhi_dev->dev;
enum mhi_cmd_type cmd = MHI_CMD_NOP;
int ret;
dev_dbg(dev, "%d: Updating channel state to: %s\n", mhi_chan->chan,
TO_CH_STATE_TYPE_STR(to_state));
switch (to_state) {
case MHI_CH_STATE_TYPE_RESET:
write_lock_irq(&mhi_chan->lock);
if (mhi_chan->ch_state != MHI_CH_STATE_STOP &&
mhi_chan->ch_state != MHI_CH_STATE_ENABLED &&
mhi_chan->ch_state != MHI_CH_STATE_SUSPENDED) {
write_unlock_irq(&mhi_chan->lock);
return -EINVAL;
}
mhi_chan->ch_state = MHI_CH_STATE_DISABLED;
write_unlock_irq(&mhi_chan->lock);
cmd = MHI_CMD_RESET_CHAN;
break;
case MHI_CH_STATE_TYPE_STOP:
if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED)
return -EINVAL;
cmd = MHI_CMD_STOP_CHAN;
break;
case MHI_CH_STATE_TYPE_START:
if (mhi_chan->ch_state != MHI_CH_STATE_STOP &&
mhi_chan->ch_state != MHI_CH_STATE_DISABLED)
return -EINVAL;
cmd = MHI_CMD_START_CHAN;
break;
default:
dev_err(dev, "%d: Channel state update to %s not allowed\n",
mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state));
return -EINVAL;
}
/* bring host and device out of suspended states */
ret = mhi_device_get_sync(mhi_cntrl->mhi_dev);
if (ret)
return ret;
mhi_cntrl->runtime_get(mhi_cntrl);
reinit_completion(&mhi_chan->completion);
ret = mhi_send_cmd(mhi_cntrl, mhi_chan, cmd);
if (ret) {
dev_err(dev, "%d: Failed to send %s channel command\n",
mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state));
goto exit_channel_update;
}
ret = wait_for_completion_timeout(&mhi_chan->completion,
msecs_to_jiffies(mhi_cntrl->timeout_ms));
if (!ret || mhi_chan->ccs != MHI_EV_CC_SUCCESS) {
dev_err(dev,
"%d: Failed to receive %s channel command completion\n",
mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state));
ret = -EIO;
goto exit_channel_update;
}
ret = 0;
if (to_state != MHI_CH_STATE_TYPE_RESET) {
write_lock_irq(&mhi_chan->lock);
mhi_chan->ch_state = (to_state == MHI_CH_STATE_TYPE_START) ?
MHI_CH_STATE_ENABLED : MHI_CH_STATE_STOP;
write_unlock_irq(&mhi_chan->lock);
}
dev_dbg(dev, "%d: Channel state change to %s successful\n",
mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state));
exit_channel_update:
mhi_cntrl->runtime_put(mhi_cntrl);
mhi_device_put(mhi_cntrl->mhi_dev);
return ret;
}
static void mhi_unprepare_channel(struct mhi_controller *mhi_cntrl,
struct mhi_chan *mhi_chan)
{
int ret;
struct device *dev = &mhi_chan->mhi_dev->dev;
mutex_lock(&mhi_chan->mutex);
if (!(BIT(mhi_cntrl->ee) & mhi_chan->ee_mask)) {
dev_dbg(dev, "Current EE: %s Required EE Mask: 0x%x\n",
TO_MHI_EXEC_STR(mhi_cntrl->ee), mhi_chan->ee_mask);
goto exit_unprepare_channel;
}
/* no more processing events for this channel */
ret = mhi_update_channel_state(mhi_cntrl, mhi_chan,
MHI_CH_STATE_TYPE_RESET);
if (ret)
dev_err(dev, "%d: Failed to reset channel, still resetting\n",
mhi_chan->chan);
exit_unprepare_channel:
write_lock_irq(&mhi_chan->lock);
mhi_chan->ch_state = MHI_CH_STATE_DISABLED;
write_unlock_irq(&mhi_chan->lock);
if (!mhi_chan->offload_ch) {
mhi_reset_chan(mhi_cntrl, mhi_chan);
mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan);
}
dev_dbg(dev, "%d: successfully reset\n", mhi_chan->chan);
mutex_unlock(&mhi_chan->mutex);
}
int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
struct mhi_chan *mhi_chan, unsigned int flags)
{
int ret = 0;
struct device *dev = &mhi_chan->mhi_dev->dev;
if (!(BIT(mhi_cntrl->ee) & mhi_chan->ee_mask)) {
dev_err(dev, "Current EE: %s Required EE Mask: 0x%x\n",
TO_MHI_EXEC_STR(mhi_cntrl->ee), mhi_chan->ee_mask);
return -ENOTCONN;
}
mutex_lock(&mhi_chan->mutex);
/* Check of client manages channel context for offload channels */
if (!mhi_chan->offload_ch) {
ret = mhi_init_chan_ctxt(mhi_cntrl, mhi_chan);
if (ret)
goto error_init_chan;
}
ret = mhi_update_channel_state(mhi_cntrl, mhi_chan,
MHI_CH_STATE_TYPE_START);
if (ret)
goto error_pm_state;
if (mhi_chan->dir == DMA_FROM_DEVICE)
mhi_chan->pre_alloc = !!(flags & MHI_CH_INBOUND_ALLOC_BUFS);
/* Pre-allocate buffer for xfer ring */
if (mhi_chan->pre_alloc) {
int nr_el = get_nr_avail_ring_elements(mhi_cntrl,
&mhi_chan->tre_ring);
size_t len = mhi_cntrl->buffer_len;
while (nr_el--) {
void *buf;
struct mhi_buf_info info = { };
buf = kmalloc(len, GFP_KERNEL);
if (!buf) {
ret = -ENOMEM;
goto error_pre_alloc;
}
/* Prepare transfer descriptors */
info.v_addr = buf;
info.cb_buf = buf;
info.len = len;
ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &info, MHI_EOT);
if (ret) {
kfree(buf);
goto error_pre_alloc;
}
}
read_lock_bh(&mhi_cntrl->pm_lock);
if (MHI_DB_ACCESS_VALID(mhi_cntrl)) {
read_lock_irq(&mhi_chan->lock);
mhi_ring_chan_db(mhi_cntrl, mhi_chan);
read_unlock_irq(&mhi_chan->lock);
}
read_unlock_bh(&mhi_cntrl->pm_lock);
}
mutex_unlock(&mhi_chan->mutex);
return 0;
error_pm_state:
if (!mhi_chan->offload_ch)
mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan);
error_init_chan:
mutex_unlock(&mhi_chan->mutex);
return ret;
error_pre_alloc:
mutex_unlock(&mhi_chan->mutex);
mhi_unprepare_channel(mhi_cntrl, mhi_chan);
return ret;
}
static void mhi_mark_stale_events(struct mhi_controller *mhi_cntrl,
struct mhi_event *mhi_event,
struct mhi_event_ctxt *er_ctxt,
int chan)
{
struct mhi_ring_element *dev_rp, *local_rp;
struct mhi_ring *ev_ring;
struct device *dev = &mhi_cntrl->mhi_dev->dev;
unsigned long flags;
dma_addr_t ptr;
dev_dbg(dev, "Marking all events for chan: %d as stale\n", chan);
ev_ring = &mhi_event->ring;
/* mark all stale events related to channel as STALE event */
spin_lock_irqsave(&mhi_event->lock, flags);
ptr = le64_to_cpu(er_ctxt->rp);
if (!is_valid_ring_ptr(ev_ring, ptr)) {
dev_err(&mhi_cntrl->mhi_dev->dev,
"Event ring rp points outside of the event ring\n");
dev_rp = ev_ring->rp;
} else {
dev_rp = mhi_to_virtual(ev_ring, ptr);
}
local_rp = ev_ring->rp;
while (dev_rp != local_rp) {
if (MHI_TRE_GET_EV_TYPE(local_rp) == MHI_PKT_TYPE_TX_EVENT &&
chan == MHI_TRE_GET_EV_CHID(local_rp))
local_rp->dword[1] = MHI_TRE_EV_DWORD1(chan,
MHI_PKT_TYPE_STALE_EVENT);
local_rp++;
if (local_rp == (ev_ring->base + ev_ring->len))
local_rp = ev_ring->base;
}
dev_dbg(dev, "Finished marking events as stale events\n");
spin_unlock_irqrestore(&mhi_event->lock, flags);
}
static void mhi_reset_data_chan(struct mhi_controller *mhi_cntrl,
struct mhi_chan *mhi_chan)
{
struct mhi_ring *buf_ring, *tre_ring;
struct mhi_result result;
/* Reset any pending buffers */
buf_ring = &mhi_chan->buf_ring;
tre_ring = &mhi_chan->tre_ring;
result.transaction_status = -ENOTCONN;
result.bytes_xferd = 0;
while (tre_ring->rp != tre_ring->wp) {
struct mhi_buf_info *buf_info = buf_ring->rp;
if (mhi_chan->dir == DMA_TO_DEVICE) {
atomic_dec(&mhi_cntrl->pending_pkts);
/* Release the reference got from mhi_queue() */
mhi_cntrl->runtime_put(mhi_cntrl);
}
if (!buf_info->pre_mapped)
mhi_cntrl->unmap_single(mhi_cntrl, buf_info);
mhi_del_ring_element(mhi_cntrl, buf_ring);
mhi_del_ring_element(mhi_cntrl, tre_ring);
if (mhi_chan->pre_alloc) {
kfree(buf_info->cb_buf);
} else {
result.buf_addr = buf_info->cb_buf;
mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
}
}
}
void mhi_reset_chan(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan)
{
struct mhi_event *mhi_event;
struct mhi_event_ctxt *er_ctxt;
int chan = mhi_chan->chan;
/* Nothing to reset, client doesn't queue buffers */
if (mhi_chan->offload_ch)
return;
read_lock_bh(&mhi_cntrl->pm_lock);
mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index];
er_ctxt = &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_chan->er_index];
mhi_mark_stale_events(mhi_cntrl, mhi_event, er_ctxt, chan);
mhi_reset_data_chan(mhi_cntrl, mhi_chan);
read_unlock_bh(&mhi_cntrl->pm_lock);
}
static int __mhi_prepare_for_transfer(struct mhi_device *mhi_dev, unsigned int flags)
{
int ret, dir;
struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
struct mhi_chan *mhi_chan;
for (dir = 0; dir < 2; dir++) {
mhi_chan = dir ? mhi_dev->dl_chan : mhi_dev->ul_chan;
if (!mhi_chan)
continue;
ret = mhi_prepare_channel(mhi_cntrl, mhi_chan, flags);
if (ret)
goto error_open_chan;
}
return 0;
error_open_chan:
for (--dir; dir >= 0; dir--) {
mhi_chan = dir ? mhi_dev->dl_chan : mhi_dev->ul_chan;
if (!mhi_chan)
continue;
mhi_unprepare_channel(mhi_cntrl, mhi_chan);
}
return ret;
}
int mhi_prepare_for_transfer(struct mhi_device *mhi_dev)
{
return __mhi_prepare_for_transfer(mhi_dev, 0);
}
EXPORT_SYMBOL_GPL(mhi_prepare_for_transfer);
int mhi_prepare_for_transfer_autoqueue(struct mhi_device *mhi_dev)
{
return __mhi_prepare_for_transfer(mhi_dev, MHI_CH_INBOUND_ALLOC_BUFS);
}
EXPORT_SYMBOL_GPL(mhi_prepare_for_transfer_autoqueue);
void mhi_unprepare_from_transfer(struct mhi_device *mhi_dev)
{
struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
struct mhi_chan *mhi_chan;
int dir;
for (dir = 0; dir < 2; dir++) {
mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan;
if (!mhi_chan)
continue;
mhi_unprepare_channel(mhi_cntrl, mhi_chan);
}
}
EXPORT_SYMBOL_GPL(mhi_unprepare_from_transfer);
| linux-master | drivers/bus/mhi/host/main.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* MHI PCI driver - MHI over PCI controller driver
*
* This module is a generic driver for registering MHI-over-PCI devices,
* such as PCIe QCOM modems.
*
* Copyright (C) 2020 Linaro Ltd <[email protected]>
*/
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/mhi.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/pm_runtime.h>
#include <linux/timer.h>
#include <linux/workqueue.h>
#define MHI_PCI_DEFAULT_BAR_NUM 0
#define MHI_POST_RESET_DELAY_MS 2000
#define HEALTH_CHECK_PERIOD (HZ * 2)
/* PCI VID definitions */
#define PCI_VENDOR_ID_THALES 0x1269
#define PCI_VENDOR_ID_QUECTEL 0x1eac
/**
* struct mhi_pci_dev_info - MHI PCI device specific information
* @config: MHI controller configuration
* @name: name of the PCI module
* @fw: firmware path (if any)
* @edl: emergency download mode firmware path (if any)
* @bar_num: PCI base address register to use for MHI MMIO register space
* @dma_data_width: DMA transfer word size (32 or 64 bits)
* @mru_default: default MRU size for MBIM network packets
* @sideband_wake: Devices using dedicated sideband GPIO for wakeup instead
* of inband wake support (such as sdx24)
*/
struct mhi_pci_dev_info {
const struct mhi_controller_config *config;
const char *name;
const char *fw;
const char *edl;
unsigned int bar_num;
unsigned int dma_data_width;
unsigned int mru_default;
bool sideband_wake;
};
#define MHI_CHANNEL_CONFIG_UL(ch_num, ch_name, el_count, ev_ring) \
{ \
.num = ch_num, \
.name = ch_name, \
.num_elements = el_count, \
.event_ring = ev_ring, \
.dir = DMA_TO_DEVICE, \
.ee_mask = BIT(MHI_EE_AMSS), \
.pollcfg = 0, \
.doorbell = MHI_DB_BRST_DISABLE, \
.lpm_notify = false, \
.offload_channel = false, \
.doorbell_mode_switch = false, \
} \
#define MHI_CHANNEL_CONFIG_DL(ch_num, ch_name, el_count, ev_ring) \
{ \
.num = ch_num, \
.name = ch_name, \
.num_elements = el_count, \
.event_ring = ev_ring, \
.dir = DMA_FROM_DEVICE, \
.ee_mask = BIT(MHI_EE_AMSS), \
.pollcfg = 0, \
.doorbell = MHI_DB_BRST_DISABLE, \
.lpm_notify = false, \
.offload_channel = false, \
.doorbell_mode_switch = false, \
}
#define MHI_CHANNEL_CONFIG_DL_AUTOQUEUE(ch_num, ch_name, el_count, ev_ring) \
{ \
.num = ch_num, \
.name = ch_name, \
.num_elements = el_count, \
.event_ring = ev_ring, \
.dir = DMA_FROM_DEVICE, \
.ee_mask = BIT(MHI_EE_AMSS), \
.pollcfg = 0, \
.doorbell = MHI_DB_BRST_DISABLE, \
.lpm_notify = false, \
.offload_channel = false, \
.doorbell_mode_switch = false, \
.auto_queue = true, \
}
#define MHI_EVENT_CONFIG_CTRL(ev_ring, el_count) \
{ \
.num_elements = el_count, \
.irq_moderation_ms = 0, \
.irq = (ev_ring) + 1, \
.priority = 1, \
.mode = MHI_DB_BRST_DISABLE, \
.data_type = MHI_ER_CTRL, \
.hardware_event = false, \
.client_managed = false, \
.offload_channel = false, \
}
#define MHI_CHANNEL_CONFIG_HW_UL(ch_num, ch_name, el_count, ev_ring) \
{ \
.num = ch_num, \
.name = ch_name, \
.num_elements = el_count, \
.event_ring = ev_ring, \
.dir = DMA_TO_DEVICE, \
.ee_mask = BIT(MHI_EE_AMSS), \
.pollcfg = 0, \
.doorbell = MHI_DB_BRST_ENABLE, \
.lpm_notify = false, \
.offload_channel = false, \
.doorbell_mode_switch = true, \
} \
#define MHI_CHANNEL_CONFIG_HW_DL(ch_num, ch_name, el_count, ev_ring) \
{ \
.num = ch_num, \
.name = ch_name, \
.num_elements = el_count, \
.event_ring = ev_ring, \
.dir = DMA_FROM_DEVICE, \
.ee_mask = BIT(MHI_EE_AMSS), \
.pollcfg = 0, \
.doorbell = MHI_DB_BRST_ENABLE, \
.lpm_notify = false, \
.offload_channel = false, \
.doorbell_mode_switch = true, \
}
#define MHI_CHANNEL_CONFIG_UL_SBL(ch_num, ch_name, el_count, ev_ring) \
{ \
.num = ch_num, \
.name = ch_name, \
.num_elements = el_count, \
.event_ring = ev_ring, \
.dir = DMA_TO_DEVICE, \
.ee_mask = BIT(MHI_EE_SBL), \
.pollcfg = 0, \
.doorbell = MHI_DB_BRST_DISABLE, \
.lpm_notify = false, \
.offload_channel = false, \
.doorbell_mode_switch = false, \
} \
#define MHI_CHANNEL_CONFIG_DL_SBL(ch_num, ch_name, el_count, ev_ring) \
{ \
.num = ch_num, \
.name = ch_name, \
.num_elements = el_count, \
.event_ring = ev_ring, \
.dir = DMA_FROM_DEVICE, \
.ee_mask = BIT(MHI_EE_SBL), \
.pollcfg = 0, \
.doorbell = MHI_DB_BRST_DISABLE, \
.lpm_notify = false, \
.offload_channel = false, \
.doorbell_mode_switch = false, \
}
#define MHI_CHANNEL_CONFIG_UL_FP(ch_num, ch_name, el_count, ev_ring) \
{ \
.num = ch_num, \
.name = ch_name, \
.num_elements = el_count, \
.event_ring = ev_ring, \
.dir = DMA_TO_DEVICE, \
.ee_mask = BIT(MHI_EE_FP), \
.pollcfg = 0, \
.doorbell = MHI_DB_BRST_DISABLE, \
.lpm_notify = false, \
.offload_channel = false, \
.doorbell_mode_switch = false, \
} \
#define MHI_CHANNEL_CONFIG_DL_FP(ch_num, ch_name, el_count, ev_ring) \
{ \
.num = ch_num, \
.name = ch_name, \
.num_elements = el_count, \
.event_ring = ev_ring, \
.dir = DMA_FROM_DEVICE, \
.ee_mask = BIT(MHI_EE_FP), \
.pollcfg = 0, \
.doorbell = MHI_DB_BRST_DISABLE, \
.lpm_notify = false, \
.offload_channel = false, \
.doorbell_mode_switch = false, \
}
#define MHI_EVENT_CONFIG_DATA(ev_ring, el_count) \
{ \
.num_elements = el_count, \
.irq_moderation_ms = 5, \
.irq = (ev_ring) + 1, \
.priority = 1, \
.mode = MHI_DB_BRST_DISABLE, \
.data_type = MHI_ER_DATA, \
.hardware_event = false, \
.client_managed = false, \
.offload_channel = false, \
}
#define MHI_EVENT_CONFIG_SW_DATA(ev_ring, el_count) \
{ \
.num_elements = el_count, \
.irq_moderation_ms = 0, \
.irq = (ev_ring) + 1, \
.priority = 1, \
.mode = MHI_DB_BRST_DISABLE, \
.data_type = MHI_ER_DATA, \
.hardware_event = false, \
.client_managed = false, \
.offload_channel = false, \
}
#define MHI_EVENT_CONFIG_HW_DATA(ev_ring, el_count, ch_num) \
{ \
.num_elements = el_count, \
.irq_moderation_ms = 1, \
.irq = (ev_ring) + 1, \
.priority = 1, \
.mode = MHI_DB_BRST_DISABLE, \
.data_type = MHI_ER_DATA, \
.hardware_event = true, \
.client_managed = false, \
.offload_channel = false, \
.channel = ch_num, \
}
static const struct mhi_channel_config modem_qcom_v1_mhi_channels[] = {
MHI_CHANNEL_CONFIG_UL(4, "DIAG", 16, 1),
MHI_CHANNEL_CONFIG_DL(5, "DIAG", 16, 1),
MHI_CHANNEL_CONFIG_UL(12, "MBIM", 4, 0),
MHI_CHANNEL_CONFIG_DL(13, "MBIM", 4, 0),
MHI_CHANNEL_CONFIG_UL(14, "QMI", 4, 0),
MHI_CHANNEL_CONFIG_DL(15, "QMI", 4, 0),
MHI_CHANNEL_CONFIG_UL(20, "IPCR", 8, 0),
MHI_CHANNEL_CONFIG_DL_AUTOQUEUE(21, "IPCR", 8, 0),
MHI_CHANNEL_CONFIG_UL_FP(34, "FIREHOSE", 32, 0),
MHI_CHANNEL_CONFIG_DL_FP(35, "FIREHOSE", 32, 0),
MHI_CHANNEL_CONFIG_UL(46, "IP_SW0", 64, 2),
MHI_CHANNEL_CONFIG_DL(47, "IP_SW0", 64, 3),
MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0", 128, 4),
MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0", 128, 5),
};
static struct mhi_event_config modem_qcom_v1_mhi_events[] = {
/* first ring is control+data ring */
MHI_EVENT_CONFIG_CTRL(0, 64),
/* DIAG dedicated event ring */
MHI_EVENT_CONFIG_DATA(1, 128),
/* Software channels dedicated event ring */
MHI_EVENT_CONFIG_SW_DATA(2, 64),
MHI_EVENT_CONFIG_SW_DATA(3, 64),
/* Hardware channels request dedicated hardware event rings */
MHI_EVENT_CONFIG_HW_DATA(4, 1024, 100),
MHI_EVENT_CONFIG_HW_DATA(5, 2048, 101)
};
static const struct mhi_controller_config modem_qcom_v1_mhiv_config = {
.max_channels = 128,
.timeout_ms = 8000,
.num_channels = ARRAY_SIZE(modem_qcom_v1_mhi_channels),
.ch_cfg = modem_qcom_v1_mhi_channels,
.num_events = ARRAY_SIZE(modem_qcom_v1_mhi_events),
.event_cfg = modem_qcom_v1_mhi_events,
};
static const struct mhi_pci_dev_info mhi_qcom_sdx65_info = {
.name = "qcom-sdx65m",
.fw = "qcom/sdx65m/xbl.elf",
.edl = "qcom/sdx65m/edl.mbn",
.config = &modem_qcom_v1_mhiv_config,
.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
.dma_data_width = 32,
.sideband_wake = false,
};
static const struct mhi_pci_dev_info mhi_qcom_sdx55_info = {
.name = "qcom-sdx55m",
.fw = "qcom/sdx55m/sbl1.mbn",
.edl = "qcom/sdx55m/edl.mbn",
.config = &modem_qcom_v1_mhiv_config,
.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
.dma_data_width = 32,
.mru_default = 32768,
.sideband_wake = false,
};
static const struct mhi_pci_dev_info mhi_qcom_sdx24_info = {
.name = "qcom-sdx24",
.edl = "qcom/prog_firehose_sdx24.mbn",
.config = &modem_qcom_v1_mhiv_config,
.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
.dma_data_width = 32,
.sideband_wake = true,
};
static const struct mhi_channel_config mhi_quectel_em1xx_channels[] = {
MHI_CHANNEL_CONFIG_UL(0, "NMEA", 32, 0),
MHI_CHANNEL_CONFIG_DL(1, "NMEA", 32, 0),
MHI_CHANNEL_CONFIG_UL_SBL(2, "SAHARA", 32, 0),
MHI_CHANNEL_CONFIG_DL_SBL(3, "SAHARA", 32, 0),
MHI_CHANNEL_CONFIG_UL(4, "DIAG", 32, 1),
MHI_CHANNEL_CONFIG_DL(5, "DIAG", 32, 1),
MHI_CHANNEL_CONFIG_UL(12, "MBIM", 32, 0),
MHI_CHANNEL_CONFIG_DL(13, "MBIM", 32, 0),
MHI_CHANNEL_CONFIG_UL(32, "DUN", 32, 0),
MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0),
/* The EDL firmware is a flash-programmer exposing firehose protocol */
MHI_CHANNEL_CONFIG_UL_FP(34, "FIREHOSE", 32, 0),
MHI_CHANNEL_CONFIG_DL_FP(35, "FIREHOSE", 32, 0),
MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0_MBIM", 128, 2),
MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 128, 3),
};
static struct mhi_event_config mhi_quectel_em1xx_events[] = {
MHI_EVENT_CONFIG_CTRL(0, 128),
MHI_EVENT_CONFIG_DATA(1, 128),
MHI_EVENT_CONFIG_HW_DATA(2, 1024, 100),
MHI_EVENT_CONFIG_HW_DATA(3, 1024, 101)
};
static const struct mhi_controller_config modem_quectel_em1xx_config = {
.max_channels = 128,
.timeout_ms = 20000,
.num_channels = ARRAY_SIZE(mhi_quectel_em1xx_channels),
.ch_cfg = mhi_quectel_em1xx_channels,
.num_events = ARRAY_SIZE(mhi_quectel_em1xx_events),
.event_cfg = mhi_quectel_em1xx_events,
};
static const struct mhi_pci_dev_info mhi_quectel_em1xx_info = {
.name = "quectel-em1xx",
.edl = "qcom/prog_firehose_sdx24.mbn",
.config = &modem_quectel_em1xx_config,
.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
.dma_data_width = 32,
.mru_default = 32768,
.sideband_wake = true,
};
static const struct mhi_pci_dev_info mhi_quectel_rm5xx_info = {
.name = "quectel-rm5xx",
.edl = "qcom/prog_firehose_sdx6x.elf",
.config = &modem_quectel_em1xx_config,
.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
.dma_data_width = 32,
.mru_default = 32768,
.sideband_wake = true,
};
static const struct mhi_channel_config mhi_foxconn_sdx55_channels[] = {
MHI_CHANNEL_CONFIG_UL(0, "LOOPBACK", 32, 0),
MHI_CHANNEL_CONFIG_DL(1, "LOOPBACK", 32, 0),
MHI_CHANNEL_CONFIG_UL(4, "DIAG", 32, 1),
MHI_CHANNEL_CONFIG_DL(5, "DIAG", 32, 1),
MHI_CHANNEL_CONFIG_UL(12, "MBIM", 32, 0),
MHI_CHANNEL_CONFIG_DL(13, "MBIM", 32, 0),
MHI_CHANNEL_CONFIG_UL(32, "DUN", 32, 0),
MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0),
MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0_MBIM", 128, 2),
MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 128, 3),
};
static struct mhi_event_config mhi_foxconn_sdx55_events[] = {
MHI_EVENT_CONFIG_CTRL(0, 128),
MHI_EVENT_CONFIG_DATA(1, 128),
MHI_EVENT_CONFIG_HW_DATA(2, 1024, 100),
MHI_EVENT_CONFIG_HW_DATA(3, 1024, 101)
};
static const struct mhi_controller_config modem_foxconn_sdx55_config = {
.max_channels = 128,
.timeout_ms = 20000,
.num_channels = ARRAY_SIZE(mhi_foxconn_sdx55_channels),
.ch_cfg = mhi_foxconn_sdx55_channels,
.num_events = ARRAY_SIZE(mhi_foxconn_sdx55_events),
.event_cfg = mhi_foxconn_sdx55_events,
};
static const struct mhi_pci_dev_info mhi_foxconn_sdx24_info = {
.name = "foxconn-sdx24",
.config = &modem_foxconn_sdx55_config,
.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
.dma_data_width = 32,
.mru_default = 32768,
.sideband_wake = false,
};
static const struct mhi_pci_dev_info mhi_foxconn_sdx55_info = {
.name = "foxconn-sdx55",
.fw = "qcom/sdx55m/sbl1.mbn",
.edl = "qcom/sdx55m/edl.mbn",
.config = &modem_foxconn_sdx55_config,
.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
.dma_data_width = 32,
.mru_default = 32768,
.sideband_wake = false,
};
static const struct mhi_pci_dev_info mhi_foxconn_sdx65_info = {
.name = "foxconn-sdx65",
.config = &modem_foxconn_sdx55_config,
.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
.dma_data_width = 32,
.mru_default = 32768,
.sideband_wake = false,
};
static const struct mhi_channel_config mhi_mv3x_channels[] = {
MHI_CHANNEL_CONFIG_UL(0, "LOOPBACK", 64, 0),
MHI_CHANNEL_CONFIG_DL(1, "LOOPBACK", 64, 0),
/* MBIM Control Channel */
MHI_CHANNEL_CONFIG_UL(12, "MBIM", 64, 0),
MHI_CHANNEL_CONFIG_DL(13, "MBIM", 64, 0),
/* MBIM Data Channel */
MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0_MBIM", 512, 2),
MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 512, 3),
};
static struct mhi_event_config mhi_mv3x_events[] = {
MHI_EVENT_CONFIG_CTRL(0, 256),
MHI_EVENT_CONFIG_DATA(1, 256),
MHI_EVENT_CONFIG_HW_DATA(2, 1024, 100),
MHI_EVENT_CONFIG_HW_DATA(3, 1024, 101),
};
static const struct mhi_controller_config modem_mv3x_config = {
.max_channels = 128,
.timeout_ms = 20000,
.num_channels = ARRAY_SIZE(mhi_mv3x_channels),
.ch_cfg = mhi_mv3x_channels,
.num_events = ARRAY_SIZE(mhi_mv3x_events),
.event_cfg = mhi_mv3x_events,
};
static const struct mhi_pci_dev_info mhi_mv31_info = {
.name = "cinterion-mv31",
.config = &modem_mv3x_config,
.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
.dma_data_width = 32,
.mru_default = 32768,
};
static const struct mhi_pci_dev_info mhi_mv32_info = {
.name = "cinterion-mv32",
.config = &modem_mv3x_config,
.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
.dma_data_width = 32,
.mru_default = 32768,
};
static const struct mhi_channel_config mhi_sierra_em919x_channels[] = {
MHI_CHANNEL_CONFIG_UL_SBL(2, "SAHARA", 32, 0),
MHI_CHANNEL_CONFIG_DL_SBL(3, "SAHARA", 256, 0),
MHI_CHANNEL_CONFIG_UL(4, "DIAG", 32, 0),
MHI_CHANNEL_CONFIG_DL(5, "DIAG", 32, 0),
MHI_CHANNEL_CONFIG_UL(12, "MBIM", 128, 0),
MHI_CHANNEL_CONFIG_DL(13, "MBIM", 128, 0),
MHI_CHANNEL_CONFIG_UL(14, "QMI", 32, 0),
MHI_CHANNEL_CONFIG_DL(15, "QMI", 32, 0),
MHI_CHANNEL_CONFIG_UL(32, "DUN", 32, 0),
MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0),
MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0", 512, 1),
MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0", 512, 2),
};
static struct mhi_event_config modem_sierra_em919x_mhi_events[] = {
/* first ring is control+data and DIAG ring */
MHI_EVENT_CONFIG_CTRL(0, 2048),
/* Hardware channels request dedicated hardware event rings */
MHI_EVENT_CONFIG_HW_DATA(1, 2048, 100),
MHI_EVENT_CONFIG_HW_DATA(2, 2048, 101)
};
static const struct mhi_controller_config modem_sierra_em919x_config = {
.max_channels = 128,
.timeout_ms = 24000,
.num_channels = ARRAY_SIZE(mhi_sierra_em919x_channels),
.ch_cfg = mhi_sierra_em919x_channels,
.num_events = ARRAY_SIZE(modem_sierra_em919x_mhi_events),
.event_cfg = modem_sierra_em919x_mhi_events,
};
static const struct mhi_pci_dev_info mhi_sierra_em919x_info = {
.name = "sierra-em919x",
.config = &modem_sierra_em919x_config,
.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
.dma_data_width = 32,
.sideband_wake = false,
};
static const struct mhi_channel_config mhi_telit_fn980_hw_v1_channels[] = {
MHI_CHANNEL_CONFIG_UL(14, "QMI", 32, 0),
MHI_CHANNEL_CONFIG_DL(15, "QMI", 32, 0),
MHI_CHANNEL_CONFIG_UL(20, "IPCR", 16, 0),
MHI_CHANNEL_CONFIG_DL_AUTOQUEUE(21, "IPCR", 16, 0),
MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0", 128, 1),
MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0", 128, 2),
};
static struct mhi_event_config mhi_telit_fn980_hw_v1_events[] = {
MHI_EVENT_CONFIG_CTRL(0, 128),
MHI_EVENT_CONFIG_HW_DATA(1, 1024, 100),
MHI_EVENT_CONFIG_HW_DATA(2, 2048, 101)
};
static struct mhi_controller_config modem_telit_fn980_hw_v1_config = {
.max_channels = 128,
.timeout_ms = 20000,
.num_channels = ARRAY_SIZE(mhi_telit_fn980_hw_v1_channels),
.ch_cfg = mhi_telit_fn980_hw_v1_channels,
.num_events = ARRAY_SIZE(mhi_telit_fn980_hw_v1_events),
.event_cfg = mhi_telit_fn980_hw_v1_events,
};
static const struct mhi_pci_dev_info mhi_telit_fn980_hw_v1_info = {
.name = "telit-fn980-hwv1",
.fw = "qcom/sdx55m/sbl1.mbn",
.edl = "qcom/sdx55m/edl.mbn",
.config = &modem_telit_fn980_hw_v1_config,
.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
.dma_data_width = 32,
.mru_default = 32768,
.sideband_wake = false,
};
static const struct mhi_channel_config mhi_telit_fn990_channels[] = {
MHI_CHANNEL_CONFIG_UL_SBL(2, "SAHARA", 32, 0),
MHI_CHANNEL_CONFIG_DL_SBL(3, "SAHARA", 32, 0),
MHI_CHANNEL_CONFIG_UL(4, "DIAG", 64, 1),
MHI_CHANNEL_CONFIG_DL(5, "DIAG", 64, 1),
MHI_CHANNEL_CONFIG_UL(12, "MBIM", 32, 0),
MHI_CHANNEL_CONFIG_DL(13, "MBIM", 32, 0),
MHI_CHANNEL_CONFIG_UL(32, "DUN", 32, 0),
MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0),
MHI_CHANNEL_CONFIG_UL(92, "DUN2", 32, 1),
MHI_CHANNEL_CONFIG_DL(93, "DUN2", 32, 1),
MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0_MBIM", 128, 2),
MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 128, 3),
};
static struct mhi_event_config mhi_telit_fn990_events[] = {
MHI_EVENT_CONFIG_CTRL(0, 128),
MHI_EVENT_CONFIG_DATA(1, 128),
MHI_EVENT_CONFIG_HW_DATA(2, 1024, 100),
MHI_EVENT_CONFIG_HW_DATA(3, 2048, 101)
};
static const struct mhi_controller_config modem_telit_fn990_config = {
.max_channels = 128,
.timeout_ms = 20000,
.num_channels = ARRAY_SIZE(mhi_telit_fn990_channels),
.ch_cfg = mhi_telit_fn990_channels,
.num_events = ARRAY_SIZE(mhi_telit_fn990_events),
.event_cfg = mhi_telit_fn990_events,
};
static const struct mhi_pci_dev_info mhi_telit_fn990_info = {
.name = "telit-fn990",
.config = &modem_telit_fn990_config,
.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
.dma_data_width = 32,
.sideband_wake = false,
.mru_default = 32768,
};
/* Keep the list sorted based on the PID. New VID should be added as the last entry */
static const struct pci_device_id mhi_pci_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0304),
.driver_data = (kernel_ulong_t) &mhi_qcom_sdx24_info },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0306, PCI_VENDOR_ID_QCOM, 0x010c),
.driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info },
/* EM919x (sdx55), use the same vid:pid as qcom-sdx55m */
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0306, 0x18d7, 0x0200),
.driver_data = (kernel_ulong_t) &mhi_sierra_em919x_info },
/* Telit FN980 hardware revision v1 */
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0306, 0x1C5D, 0x2000),
.driver_data = (kernel_ulong_t) &mhi_telit_fn980_hw_v1_info },
{ PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0306),
.driver_data = (kernel_ulong_t) &mhi_qcom_sdx55_info },
/* Telit FN990 */
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0308, 0x1c5d, 0x2010),
.driver_data = (kernel_ulong_t) &mhi_telit_fn990_info },
/* Telit FE990 */
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0308, 0x1c5d, 0x2015),
.driver_data = (kernel_ulong_t) &mhi_telit_fn990_info },
{ PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0308),
.driver_data = (kernel_ulong_t) &mhi_qcom_sdx65_info },
{ PCI_DEVICE(PCI_VENDOR_ID_QUECTEL, 0x1001), /* EM120R-GL (sdx24) */
.driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info },
{ PCI_DEVICE(PCI_VENDOR_ID_QUECTEL, 0x1002), /* EM160R-GL (sdx24) */
.driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info },
/* RM520N-GL (sdx6x), eSIM */
{ PCI_DEVICE(PCI_VENDOR_ID_QUECTEL, 0x1004),
.driver_data = (kernel_ulong_t) &mhi_quectel_rm5xx_info },
/* RM520N-GL (sdx6x), Lenovo variant */
{ PCI_DEVICE(PCI_VENDOR_ID_QUECTEL, 0x1007),
.driver_data = (kernel_ulong_t) &mhi_quectel_rm5xx_info },
{ PCI_DEVICE(PCI_VENDOR_ID_QUECTEL, 0x100d), /* EM160R-GL (sdx24) */
.driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info },
{ PCI_DEVICE(PCI_VENDOR_ID_QUECTEL, 0x2001), /* EM120R-GL for FCCL (sdx24) */
.driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info },
/* T99W175 (sdx55), Both for eSIM and Non-eSIM */
{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0ab),
.driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info },
/* DW5930e (sdx55), With eSIM, It's also T99W175 */
{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0b0),
.driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info },
/* DW5930e (sdx55), Non-eSIM, It's also T99W175 */
{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0b1),
.driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info },
/* T99W175 (sdx55), Based on Qualcomm new baseline */
{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0bf),
.driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info },
/* T99W175 (sdx55) */
{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0c3),
.driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info },
/* T99W368 (sdx65) */
{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0d8),
.driver_data = (kernel_ulong_t) &mhi_foxconn_sdx65_info },
/* T99W373 (sdx62) */
{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0d9),
.driver_data = (kernel_ulong_t) &mhi_foxconn_sdx65_info },
/* T99W510 (sdx24), variant 1 */
{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0f0),
.driver_data = (kernel_ulong_t) &mhi_foxconn_sdx24_info },
/* T99W510 (sdx24), variant 2 */
{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0f1),
.driver_data = (kernel_ulong_t) &mhi_foxconn_sdx24_info },
/* T99W510 (sdx24), variant 3 */
{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0f2),
.driver_data = (kernel_ulong_t) &mhi_foxconn_sdx24_info },
/* DW5932e-eSIM (sdx62), With eSIM */
{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0f5),
.driver_data = (kernel_ulong_t) &mhi_foxconn_sdx65_info },
/* DW5932e (sdx62), Non-eSIM */
{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0f9),
.driver_data = (kernel_ulong_t) &mhi_foxconn_sdx65_info },
/* MV31-W (Cinterion) */
{ PCI_DEVICE(PCI_VENDOR_ID_THALES, 0x00b3),
.driver_data = (kernel_ulong_t) &mhi_mv31_info },
/* MV31-W (Cinterion), based on new baseline */
{ PCI_DEVICE(PCI_VENDOR_ID_THALES, 0x00b4),
.driver_data = (kernel_ulong_t) &mhi_mv31_info },
/* MV32-WA (Cinterion) */
{ PCI_DEVICE(PCI_VENDOR_ID_THALES, 0x00ba),
.driver_data = (kernel_ulong_t) &mhi_mv32_info },
/* MV32-WB (Cinterion) */
{ PCI_DEVICE(PCI_VENDOR_ID_THALES, 0x00bb),
.driver_data = (kernel_ulong_t) &mhi_mv32_info },
/* T99W175 (sdx55), HP variant */
{ PCI_DEVICE(0x03f0, 0x0a6c),
.driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info },
{ }
};
MODULE_DEVICE_TABLE(pci, mhi_pci_id_table);
enum mhi_pci_device_status {
MHI_PCI_DEV_STARTED,
MHI_PCI_DEV_SUSPENDED,
};
struct mhi_pci_device {
struct mhi_controller mhi_cntrl;
struct pci_saved_state *pci_state;
struct work_struct recovery_work;
struct timer_list health_check_timer;
unsigned long status;
};
static int mhi_pci_read_reg(struct mhi_controller *mhi_cntrl,
void __iomem *addr, u32 *out)
{
*out = readl(addr);
return 0;
}
static void mhi_pci_write_reg(struct mhi_controller *mhi_cntrl,
void __iomem *addr, u32 val)
{
writel(val, addr);
}
static void mhi_pci_status_cb(struct mhi_controller *mhi_cntrl,
enum mhi_callback cb)
{
struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev);
/* Nothing to do for now */
switch (cb) {
case MHI_CB_FATAL_ERROR:
case MHI_CB_SYS_ERROR:
dev_warn(&pdev->dev, "firmware crashed (%u)\n", cb);
pm_runtime_forbid(&pdev->dev);
break;
case MHI_CB_EE_MISSION_MODE:
pm_runtime_allow(&pdev->dev);
break;
default:
break;
}
}
static void mhi_pci_wake_get_nop(struct mhi_controller *mhi_cntrl, bool force)
{
/* no-op */
}
static void mhi_pci_wake_put_nop(struct mhi_controller *mhi_cntrl, bool override)
{
/* no-op */
}
static void mhi_pci_wake_toggle_nop(struct mhi_controller *mhi_cntrl)
{
/* no-op */
}
static bool mhi_pci_is_alive(struct mhi_controller *mhi_cntrl)
{
struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev);
u16 vendor = 0;
if (pci_read_config_word(pdev, PCI_VENDOR_ID, &vendor))
return false;
if (vendor == (u16) ~0 || vendor == 0)
return false;
return true;
}
static int mhi_pci_claim(struct mhi_controller *mhi_cntrl,
unsigned int bar_num, u64 dma_mask)
{
struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev);
int err;
err = pci_assign_resource(pdev, bar_num);
if (err)
return err;
err = pcim_enable_device(pdev);
if (err) {
dev_err(&pdev->dev, "failed to enable pci device: %d\n", err);
return err;
}
err = pcim_iomap_regions(pdev, 1 << bar_num, pci_name(pdev));
if (err) {
dev_err(&pdev->dev, "failed to map pci region: %d\n", err);
return err;
}
mhi_cntrl->regs = pcim_iomap_table(pdev)[bar_num];
mhi_cntrl->reg_len = pci_resource_len(pdev, bar_num);
err = dma_set_mask_and_coherent(&pdev->dev, dma_mask);
if (err) {
dev_err(&pdev->dev, "Cannot set proper DMA mask\n");
return err;
}
pci_set_master(pdev);
return 0;
}
static int mhi_pci_get_irqs(struct mhi_controller *mhi_cntrl,
const struct mhi_controller_config *mhi_cntrl_config)
{
struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev);
int nr_vectors, i;
int *irq;
/*
* Alloc one MSI vector for BHI + one vector per event ring, ideally...
* No explicit pci_free_irq_vectors required, done by pcim_release.
*/
mhi_cntrl->nr_irqs = 1 + mhi_cntrl_config->num_events;
nr_vectors = pci_alloc_irq_vectors(pdev, 1, mhi_cntrl->nr_irqs, PCI_IRQ_MSI);
if (nr_vectors < 0) {
dev_err(&pdev->dev, "Error allocating MSI vectors %d\n",
nr_vectors);
return nr_vectors;
}
if (nr_vectors < mhi_cntrl->nr_irqs) {
dev_warn(&pdev->dev, "using shared MSI\n");
/* Patch msi vectors, use only one (shared) */
for (i = 0; i < mhi_cntrl_config->num_events; i++)
mhi_cntrl_config->event_cfg[i].irq = 0;
mhi_cntrl->nr_irqs = 1;
}
irq = devm_kcalloc(&pdev->dev, mhi_cntrl->nr_irqs, sizeof(int), GFP_KERNEL);
if (!irq)
return -ENOMEM;
for (i = 0; i < mhi_cntrl->nr_irqs; i++) {
int vector = i >= nr_vectors ? (nr_vectors - 1) : i;
irq[i] = pci_irq_vector(pdev, vector);
}
mhi_cntrl->irq = irq;
return 0;
}
static int mhi_pci_runtime_get(struct mhi_controller *mhi_cntrl)
{
/* The runtime_get() MHI callback means:
* Do whatever is requested to leave M3.
*/
return pm_runtime_get(mhi_cntrl->cntrl_dev);
}
static void mhi_pci_runtime_put(struct mhi_controller *mhi_cntrl)
{
/* The runtime_put() MHI callback means:
* Device can be moved in M3 state.
*/
pm_runtime_mark_last_busy(mhi_cntrl->cntrl_dev);
pm_runtime_put(mhi_cntrl->cntrl_dev);
}
static void mhi_pci_recovery_work(struct work_struct *work)
{
struct mhi_pci_device *mhi_pdev = container_of(work, struct mhi_pci_device,
recovery_work);
struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev);
int err;
dev_warn(&pdev->dev, "device recovery started\n");
del_timer(&mhi_pdev->health_check_timer);
pm_runtime_forbid(&pdev->dev);
/* Clean up MHI state */
if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) {
mhi_power_down(mhi_cntrl, false);
mhi_unprepare_after_power_down(mhi_cntrl);
}
pci_set_power_state(pdev, PCI_D0);
pci_load_saved_state(pdev, mhi_pdev->pci_state);
pci_restore_state(pdev);
if (!mhi_pci_is_alive(mhi_cntrl))
goto err_try_reset;
err = mhi_prepare_for_power_up(mhi_cntrl);
if (err)
goto err_try_reset;
err = mhi_sync_power_up(mhi_cntrl);
if (err)
goto err_unprepare;
dev_dbg(&pdev->dev, "Recovery completed\n");
set_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status);
mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
return;
err_unprepare:
mhi_unprepare_after_power_down(mhi_cntrl);
err_try_reset:
if (pci_reset_function(pdev))
dev_err(&pdev->dev, "Recovery failed\n");
}
static void health_check(struct timer_list *t)
{
struct mhi_pci_device *mhi_pdev = from_timer(mhi_pdev, t, health_check_timer);
struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
if (!test_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status) ||
test_bit(MHI_PCI_DEV_SUSPENDED, &mhi_pdev->status))
return;
if (!mhi_pci_is_alive(mhi_cntrl)) {
dev_err(mhi_cntrl->cntrl_dev, "Device died\n");
queue_work(system_long_wq, &mhi_pdev->recovery_work);
return;
}
/* reschedule in two seconds */
mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
}
static int mhi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
const struct mhi_pci_dev_info *info = (struct mhi_pci_dev_info *) id->driver_data;
const struct mhi_controller_config *mhi_cntrl_config;
struct mhi_pci_device *mhi_pdev;
struct mhi_controller *mhi_cntrl;
int err;
dev_info(&pdev->dev, "MHI PCI device found: %s\n", info->name);
/* mhi_pdev.mhi_cntrl must be zero-initialized */
mhi_pdev = devm_kzalloc(&pdev->dev, sizeof(*mhi_pdev), GFP_KERNEL);
if (!mhi_pdev)
return -ENOMEM;
INIT_WORK(&mhi_pdev->recovery_work, mhi_pci_recovery_work);
timer_setup(&mhi_pdev->health_check_timer, health_check, 0);
mhi_cntrl_config = info->config;
mhi_cntrl = &mhi_pdev->mhi_cntrl;
mhi_cntrl->cntrl_dev = &pdev->dev;
mhi_cntrl->iova_start = 0;
mhi_cntrl->iova_stop = (dma_addr_t)DMA_BIT_MASK(info->dma_data_width);
mhi_cntrl->fw_image = info->fw;
mhi_cntrl->edl_image = info->edl;
mhi_cntrl->read_reg = mhi_pci_read_reg;
mhi_cntrl->write_reg = mhi_pci_write_reg;
mhi_cntrl->status_cb = mhi_pci_status_cb;
mhi_cntrl->runtime_get = mhi_pci_runtime_get;
mhi_cntrl->runtime_put = mhi_pci_runtime_put;
mhi_cntrl->mru = info->mru_default;
if (info->sideband_wake) {
mhi_cntrl->wake_get = mhi_pci_wake_get_nop;
mhi_cntrl->wake_put = mhi_pci_wake_put_nop;
mhi_cntrl->wake_toggle = mhi_pci_wake_toggle_nop;
}
err = mhi_pci_claim(mhi_cntrl, info->bar_num, DMA_BIT_MASK(info->dma_data_width));
if (err)
return err;
err = mhi_pci_get_irqs(mhi_cntrl, mhi_cntrl_config);
if (err)
return err;
pci_set_drvdata(pdev, mhi_pdev);
/* Have stored pci confspace at hand for restore in sudden PCI error.
* cache the state locally and discard the PCI core one.
*/
pci_save_state(pdev);
mhi_pdev->pci_state = pci_store_saved_state(pdev);
pci_load_saved_state(pdev, NULL);
err = mhi_register_controller(mhi_cntrl, mhi_cntrl_config);
if (err)
return err;
/* MHI bus does not power up the controller by default */
err = mhi_prepare_for_power_up(mhi_cntrl);
if (err) {
dev_err(&pdev->dev, "failed to prepare MHI controller\n");
goto err_unregister;
}
err = mhi_sync_power_up(mhi_cntrl);
if (err) {
dev_err(&pdev->dev, "failed to power up MHI controller\n");
goto err_unprepare;
}
set_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status);
/* start health check */
mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
/* Only allow runtime-suspend if PME capable (for wakeup) */
if (pci_pme_capable(pdev, PCI_D3hot)) {
pm_runtime_set_autosuspend_delay(&pdev->dev, 2000);
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_mark_last_busy(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev);
}
return 0;
err_unprepare:
mhi_unprepare_after_power_down(mhi_cntrl);
err_unregister:
mhi_unregister_controller(mhi_cntrl);
return err;
}
static void mhi_pci_remove(struct pci_dev *pdev)
{
struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev);
struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
del_timer_sync(&mhi_pdev->health_check_timer);
cancel_work_sync(&mhi_pdev->recovery_work);
if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) {
mhi_power_down(mhi_cntrl, true);
mhi_unprepare_after_power_down(mhi_cntrl);
}
/* balancing probe put_noidle */
if (pci_pme_capable(pdev, PCI_D3hot))
pm_runtime_get_noresume(&pdev->dev);
mhi_unregister_controller(mhi_cntrl);
}
static void mhi_pci_shutdown(struct pci_dev *pdev)
{
mhi_pci_remove(pdev);
pci_set_power_state(pdev, PCI_D3hot);
}
static void mhi_pci_reset_prepare(struct pci_dev *pdev)
{
struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev);
struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
dev_info(&pdev->dev, "reset\n");
del_timer(&mhi_pdev->health_check_timer);
/* Clean up MHI state */
if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) {
mhi_power_down(mhi_cntrl, false);
mhi_unprepare_after_power_down(mhi_cntrl);
}
/* cause internal device reset */
mhi_soc_reset(mhi_cntrl);
/* Be sure device reset has been executed */
msleep(MHI_POST_RESET_DELAY_MS);
}
static void mhi_pci_reset_done(struct pci_dev *pdev)
{
struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev);
struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
int err;
/* Restore initial known working PCI state */
pci_load_saved_state(pdev, mhi_pdev->pci_state);
pci_restore_state(pdev);
/* Is device status available ? */
if (!mhi_pci_is_alive(mhi_cntrl)) {
dev_err(&pdev->dev, "reset failed\n");
return;
}
err = mhi_prepare_for_power_up(mhi_cntrl);
if (err) {
dev_err(&pdev->dev, "failed to prepare MHI controller\n");
return;
}
err = mhi_sync_power_up(mhi_cntrl);
if (err) {
dev_err(&pdev->dev, "failed to power up MHI controller\n");
mhi_unprepare_after_power_down(mhi_cntrl);
return;
}
set_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status);
mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
}
static pci_ers_result_t mhi_pci_error_detected(struct pci_dev *pdev,
pci_channel_state_t state)
{
struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev);
struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
dev_err(&pdev->dev, "PCI error detected, state = %u\n", state);
if (state == pci_channel_io_perm_failure)
return PCI_ERS_RESULT_DISCONNECT;
/* Clean up MHI state */
if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) {
mhi_power_down(mhi_cntrl, false);
mhi_unprepare_after_power_down(mhi_cntrl);
} else {
/* Nothing to do */
return PCI_ERS_RESULT_RECOVERED;
}
pci_disable_device(pdev);
return PCI_ERS_RESULT_NEED_RESET;
}
static pci_ers_result_t mhi_pci_slot_reset(struct pci_dev *pdev)
{
if (pci_enable_device(pdev)) {
dev_err(&pdev->dev, "Cannot re-enable PCI device after reset.\n");
return PCI_ERS_RESULT_DISCONNECT;
}
return PCI_ERS_RESULT_RECOVERED;
}
static void mhi_pci_io_resume(struct pci_dev *pdev)
{
struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev);
dev_err(&pdev->dev, "PCI slot reset done\n");
queue_work(system_long_wq, &mhi_pdev->recovery_work);
}
static const struct pci_error_handlers mhi_pci_err_handler = {
.error_detected = mhi_pci_error_detected,
.slot_reset = mhi_pci_slot_reset,
.resume = mhi_pci_io_resume,
.reset_prepare = mhi_pci_reset_prepare,
.reset_done = mhi_pci_reset_done,
};
static int __maybe_unused mhi_pci_runtime_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct mhi_pci_device *mhi_pdev = dev_get_drvdata(dev);
struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
int err;
if (test_and_set_bit(MHI_PCI_DEV_SUSPENDED, &mhi_pdev->status))
return 0;
del_timer(&mhi_pdev->health_check_timer);
cancel_work_sync(&mhi_pdev->recovery_work);
if (!test_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status) ||
mhi_cntrl->ee != MHI_EE_AMSS)
goto pci_suspend; /* Nothing to do at MHI level */
/* Transition to M3 state */
err = mhi_pm_suspend(mhi_cntrl);
if (err) {
dev_err(&pdev->dev, "failed to suspend device: %d\n", err);
clear_bit(MHI_PCI_DEV_SUSPENDED, &mhi_pdev->status);
return -EBUSY;
}
pci_suspend:
pci_disable_device(pdev);
pci_wake_from_d3(pdev, true);
return 0;
}
static int __maybe_unused mhi_pci_runtime_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct mhi_pci_device *mhi_pdev = dev_get_drvdata(dev);
struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
int err;
if (!test_and_clear_bit(MHI_PCI_DEV_SUSPENDED, &mhi_pdev->status))
return 0;
err = pci_enable_device(pdev);
if (err)
goto err_recovery;
pci_set_master(pdev);
pci_wake_from_d3(pdev, false);
if (!test_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status) ||
mhi_cntrl->ee != MHI_EE_AMSS)
return 0; /* Nothing to do at MHI level */
/* Exit M3, transition to M0 state */
err = mhi_pm_resume(mhi_cntrl);
if (err) {
dev_err(&pdev->dev, "failed to resume device: %d\n", err);
goto err_recovery;
}
/* Resume health check */
mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
/* It can be a remote wakeup (no mhi runtime_get), update access time */
pm_runtime_mark_last_busy(dev);
return 0;
err_recovery:
/* Do not fail to not mess up our PCI device state, the device likely
* lost power (d3cold) and we simply need to reset it from the recovery
* procedure, trigger the recovery asynchronously to prevent system
* suspend exit delaying.
*/
queue_work(system_long_wq, &mhi_pdev->recovery_work);
pm_runtime_mark_last_busy(dev);
return 0;
}
static int __maybe_unused mhi_pci_suspend(struct device *dev)
{
pm_runtime_disable(dev);
return mhi_pci_runtime_suspend(dev);
}
static int __maybe_unused mhi_pci_resume(struct device *dev)
{
int ret;
/* Depending the platform, device may have lost power (d3cold), we need
* to resume it now to check its state and recover when necessary.
*/
ret = mhi_pci_runtime_resume(dev);
pm_runtime_enable(dev);
return ret;
}
static int __maybe_unused mhi_pci_freeze(struct device *dev)
{
struct mhi_pci_device *mhi_pdev = dev_get_drvdata(dev);
struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
/* We want to stop all operations, hibernation does not guarantee that
* device will be in the same state as before freezing, especially if
* the intermediate restore kernel reinitializes MHI device with new
* context.
*/
flush_work(&mhi_pdev->recovery_work);
if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) {
mhi_power_down(mhi_cntrl, true);
mhi_unprepare_after_power_down(mhi_cntrl);
}
return 0;
}
static int __maybe_unused mhi_pci_restore(struct device *dev)
{
struct mhi_pci_device *mhi_pdev = dev_get_drvdata(dev);
/* Reinitialize the device */
queue_work(system_long_wq, &mhi_pdev->recovery_work);
return 0;
}
static const struct dev_pm_ops mhi_pci_pm_ops = {
SET_RUNTIME_PM_OPS(mhi_pci_runtime_suspend, mhi_pci_runtime_resume, NULL)
#ifdef CONFIG_PM_SLEEP
.suspend = mhi_pci_suspend,
.resume = mhi_pci_resume,
.freeze = mhi_pci_freeze,
.thaw = mhi_pci_restore,
.poweroff = mhi_pci_freeze,
.restore = mhi_pci_restore,
#endif
};
static struct pci_driver mhi_pci_driver = {
.name = "mhi-pci-generic",
.id_table = mhi_pci_id_table,
.probe = mhi_pci_probe,
.remove = mhi_pci_remove,
.shutdown = mhi_pci_shutdown,
.err_handler = &mhi_pci_err_handler,
.driver.pm = &mhi_pci_pm_ops
};
module_pci_driver(mhi_pci_driver);
MODULE_AUTHOR("Loic Poulain <[email protected]>");
MODULE_DESCRIPTION("Modem Host Interface (MHI) PCI controller driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/bus/mhi/host/pci_generic.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
*
*/
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/dma-direction.h>
#include <linux/dma-mapping.h>
#include <linux/firmware.h>
#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/mhi.h>
#include <linux/module.h>
#include <linux/random.h>
#include <linux/slab.h>
#include <linux/wait.h>
#include "internal.h"
/* Setup RDDM vector table for RDDM transfer and program RXVEC */
int mhi_rddm_prepare(struct mhi_controller *mhi_cntrl,
struct image_info *img_info)
{
struct mhi_buf *mhi_buf = img_info->mhi_buf;
struct bhi_vec_entry *bhi_vec = img_info->bhi_vec;
void __iomem *base = mhi_cntrl->bhie;
struct device *dev = &mhi_cntrl->mhi_dev->dev;
u32 sequence_id;
unsigned int i;
int ret;
for (i = 0; i < img_info->entries - 1; i++, mhi_buf++, bhi_vec++) {
bhi_vec->dma_addr = mhi_buf->dma_addr;
bhi_vec->size = mhi_buf->len;
}
dev_dbg(dev, "BHIe programming for RDDM\n");
mhi_write_reg(mhi_cntrl, base, BHIE_RXVECADDR_HIGH_OFFS,
upper_32_bits(mhi_buf->dma_addr));
mhi_write_reg(mhi_cntrl, base, BHIE_RXVECADDR_LOW_OFFS,
lower_32_bits(mhi_buf->dma_addr));
mhi_write_reg(mhi_cntrl, base, BHIE_RXVECSIZE_OFFS, mhi_buf->len);
sequence_id = MHI_RANDOM_U32_NONZERO(BHIE_RXVECSTATUS_SEQNUM_BMSK);
ret = mhi_write_reg_field(mhi_cntrl, base, BHIE_RXVECDB_OFFS,
BHIE_RXVECDB_SEQNUM_BMSK, sequence_id);
if (ret) {
dev_err(dev, "Failed to write sequence ID for BHIE_RXVECDB\n");
return ret;
}
dev_dbg(dev, "Address: %p and len: 0x%zx sequence: %u\n",
&mhi_buf->dma_addr, mhi_buf->len, sequence_id);
return 0;
}
/* Collect RDDM buffer during kernel panic */
static int __mhi_download_rddm_in_panic(struct mhi_controller *mhi_cntrl)
{
int ret;
u32 rx_status;
enum mhi_ee_type ee;
const u32 delayus = 2000;
u32 retry = (mhi_cntrl->timeout_ms * 1000) / delayus;
const u32 rddm_timeout_us = 200000;
int rddm_retry = rddm_timeout_us / delayus;
void __iomem *base = mhi_cntrl->bhie;
struct device *dev = &mhi_cntrl->mhi_dev->dev;
dev_dbg(dev, "Entered with pm_state:%s dev_state:%s ee:%s\n",
to_mhi_pm_state_str(mhi_cntrl->pm_state),
mhi_state_str(mhi_cntrl->dev_state),
TO_MHI_EXEC_STR(mhi_cntrl->ee));
/*
* This should only be executing during a kernel panic, we expect all
* other cores to shutdown while we're collecting RDDM buffer. After
* returning from this function, we expect the device to reset.
*
* Normaly, we read/write pm_state only after grabbing the
* pm_lock, since we're in a panic, skipping it. Also there is no
* gurantee that this state change would take effect since
* we're setting it w/o grabbing pm_lock
*/
mhi_cntrl->pm_state = MHI_PM_LD_ERR_FATAL_DETECT;
/* update should take the effect immediately */
smp_wmb();
/*
* Make sure device is not already in RDDM. In case the device asserts
* and a kernel panic follows, device will already be in RDDM.
* Do not trigger SYS ERR again and proceed with waiting for
* image download completion.
*/
ee = mhi_get_exec_env(mhi_cntrl);
if (ee == MHI_EE_MAX)
goto error_exit_rddm;
if (ee != MHI_EE_RDDM) {
dev_dbg(dev, "Trigger device into RDDM mode using SYS ERR\n");
mhi_set_mhi_state(mhi_cntrl, MHI_STATE_SYS_ERR);
dev_dbg(dev, "Waiting for device to enter RDDM\n");
while (rddm_retry--) {
ee = mhi_get_exec_env(mhi_cntrl);
if (ee == MHI_EE_RDDM)
break;
udelay(delayus);
}
if (rddm_retry <= 0) {
/* Hardware reset so force device to enter RDDM */
dev_dbg(dev,
"Did not enter RDDM, do a host req reset\n");
mhi_soc_reset(mhi_cntrl);
udelay(delayus);
}
ee = mhi_get_exec_env(mhi_cntrl);
}
dev_dbg(dev,
"Waiting for RDDM image download via BHIe, current EE:%s\n",
TO_MHI_EXEC_STR(ee));
while (retry--) {
ret = mhi_read_reg_field(mhi_cntrl, base, BHIE_RXVECSTATUS_OFFS,
BHIE_RXVECSTATUS_STATUS_BMSK, &rx_status);
if (ret)
return -EIO;
if (rx_status == BHIE_RXVECSTATUS_STATUS_XFER_COMPL)
return 0;
udelay(delayus);
}
ee = mhi_get_exec_env(mhi_cntrl);
ret = mhi_read_reg(mhi_cntrl, base, BHIE_RXVECSTATUS_OFFS, &rx_status);
dev_err(dev, "RXVEC_STATUS: 0x%x\n", rx_status);
error_exit_rddm:
dev_err(dev, "RDDM transfer failed. Current EE: %s\n",
TO_MHI_EXEC_STR(ee));
return -EIO;
}
/* Download RDDM image from device */
int mhi_download_rddm_image(struct mhi_controller *mhi_cntrl, bool in_panic)
{
void __iomem *base = mhi_cntrl->bhie;
struct device *dev = &mhi_cntrl->mhi_dev->dev;
u32 rx_status;
if (in_panic)
return __mhi_download_rddm_in_panic(mhi_cntrl);
dev_dbg(dev, "Waiting for RDDM image download via BHIe\n");
/* Wait for the image download to complete */
wait_event_timeout(mhi_cntrl->state_event,
mhi_read_reg_field(mhi_cntrl, base,
BHIE_RXVECSTATUS_OFFS,
BHIE_RXVECSTATUS_STATUS_BMSK,
&rx_status) || rx_status,
msecs_to_jiffies(mhi_cntrl->timeout_ms));
return (rx_status == BHIE_RXVECSTATUS_STATUS_XFER_COMPL) ? 0 : -EIO;
}
EXPORT_SYMBOL_GPL(mhi_download_rddm_image);
static int mhi_fw_load_bhie(struct mhi_controller *mhi_cntrl,
const struct mhi_buf *mhi_buf)
{
void __iomem *base = mhi_cntrl->bhie;
struct device *dev = &mhi_cntrl->mhi_dev->dev;
rwlock_t *pm_lock = &mhi_cntrl->pm_lock;
u32 tx_status, sequence_id;
int ret;
read_lock_bh(pm_lock);
if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
read_unlock_bh(pm_lock);
return -EIO;
}
sequence_id = MHI_RANDOM_U32_NONZERO(BHIE_TXVECSTATUS_SEQNUM_BMSK);
dev_dbg(dev, "Starting image download via BHIe. Sequence ID: %u\n",
sequence_id);
mhi_write_reg(mhi_cntrl, base, BHIE_TXVECADDR_HIGH_OFFS,
upper_32_bits(mhi_buf->dma_addr));
mhi_write_reg(mhi_cntrl, base, BHIE_TXVECADDR_LOW_OFFS,
lower_32_bits(mhi_buf->dma_addr));
mhi_write_reg(mhi_cntrl, base, BHIE_TXVECSIZE_OFFS, mhi_buf->len);
ret = mhi_write_reg_field(mhi_cntrl, base, BHIE_TXVECDB_OFFS,
BHIE_TXVECDB_SEQNUM_BMSK, sequence_id);
read_unlock_bh(pm_lock);
if (ret)
return ret;
/* Wait for the image download to complete */
ret = wait_event_timeout(mhi_cntrl->state_event,
MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) ||
mhi_read_reg_field(mhi_cntrl, base,
BHIE_TXVECSTATUS_OFFS,
BHIE_TXVECSTATUS_STATUS_BMSK,
&tx_status) || tx_status,
msecs_to_jiffies(mhi_cntrl->timeout_ms));
if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) ||
tx_status != BHIE_TXVECSTATUS_STATUS_XFER_COMPL)
return -EIO;
return (!ret) ? -ETIMEDOUT : 0;
}
static int mhi_fw_load_bhi(struct mhi_controller *mhi_cntrl,
dma_addr_t dma_addr,
size_t size)
{
u32 tx_status, val, session_id;
int i, ret;
void __iomem *base = mhi_cntrl->bhi;
rwlock_t *pm_lock = &mhi_cntrl->pm_lock;
struct device *dev = &mhi_cntrl->mhi_dev->dev;
struct {
char *name;
u32 offset;
} error_reg[] = {
{ "ERROR_CODE", BHI_ERRCODE },
{ "ERROR_DBG1", BHI_ERRDBG1 },
{ "ERROR_DBG2", BHI_ERRDBG2 },
{ "ERROR_DBG3", BHI_ERRDBG3 },
{ NULL },
};
read_lock_bh(pm_lock);
if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
read_unlock_bh(pm_lock);
goto invalid_pm_state;
}
session_id = MHI_RANDOM_U32_NONZERO(BHI_TXDB_SEQNUM_BMSK);
dev_dbg(dev, "Starting image download via BHI. Session ID: %u\n",
session_id);
mhi_write_reg(mhi_cntrl, base, BHI_STATUS, 0);
mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_HIGH,
upper_32_bits(dma_addr));
mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_LOW,
lower_32_bits(dma_addr));
mhi_write_reg(mhi_cntrl, base, BHI_IMGSIZE, size);
mhi_write_reg(mhi_cntrl, base, BHI_IMGTXDB, session_id);
read_unlock_bh(pm_lock);
/* Wait for the image download to complete */
ret = wait_event_timeout(mhi_cntrl->state_event,
MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) ||
mhi_read_reg_field(mhi_cntrl, base, BHI_STATUS,
BHI_STATUS_MASK, &tx_status) || tx_status,
msecs_to_jiffies(mhi_cntrl->timeout_ms));
if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
goto invalid_pm_state;
if (tx_status == BHI_STATUS_ERROR) {
dev_err(dev, "Image transfer failed\n");
read_lock_bh(pm_lock);
if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
for (i = 0; error_reg[i].name; i++) {
ret = mhi_read_reg(mhi_cntrl, base,
error_reg[i].offset, &val);
if (ret)
break;
dev_err(dev, "Reg: %s value: 0x%x\n",
error_reg[i].name, val);
}
}
read_unlock_bh(pm_lock);
goto invalid_pm_state;
}
return (!ret) ? -ETIMEDOUT : 0;
invalid_pm_state:
return -EIO;
}
void mhi_free_bhie_table(struct mhi_controller *mhi_cntrl,
struct image_info *image_info)
{
int i;
struct mhi_buf *mhi_buf = image_info->mhi_buf;
for (i = 0; i < image_info->entries; i++, mhi_buf++)
dma_free_coherent(mhi_cntrl->cntrl_dev, mhi_buf->len,
mhi_buf->buf, mhi_buf->dma_addr);
kfree(image_info->mhi_buf);
kfree(image_info);
}
int mhi_alloc_bhie_table(struct mhi_controller *mhi_cntrl,
struct image_info **image_info,
size_t alloc_size)
{
size_t seg_size = mhi_cntrl->seg_len;
int segments = DIV_ROUND_UP(alloc_size, seg_size) + 1;
int i;
struct image_info *img_info;
struct mhi_buf *mhi_buf;
img_info = kzalloc(sizeof(*img_info), GFP_KERNEL);
if (!img_info)
return -ENOMEM;
/* Allocate memory for entries */
img_info->mhi_buf = kcalloc(segments, sizeof(*img_info->mhi_buf),
GFP_KERNEL);
if (!img_info->mhi_buf)
goto error_alloc_mhi_buf;
/* Allocate and populate vector table */
mhi_buf = img_info->mhi_buf;
for (i = 0; i < segments; i++, mhi_buf++) {
size_t vec_size = seg_size;
/* Vector table is the last entry */
if (i == segments - 1)
vec_size = sizeof(struct bhi_vec_entry) * i;
mhi_buf->len = vec_size;
mhi_buf->buf = dma_alloc_coherent(mhi_cntrl->cntrl_dev,
vec_size, &mhi_buf->dma_addr,
GFP_KERNEL);
if (!mhi_buf->buf)
goto error_alloc_segment;
}
img_info->bhi_vec = img_info->mhi_buf[segments - 1].buf;
img_info->entries = segments;
*image_info = img_info;
return 0;
error_alloc_segment:
for (--i, --mhi_buf; i >= 0; i--, mhi_buf--)
dma_free_coherent(mhi_cntrl->cntrl_dev, mhi_buf->len,
mhi_buf->buf, mhi_buf->dma_addr);
error_alloc_mhi_buf:
kfree(img_info);
return -ENOMEM;
}
static void mhi_firmware_copy(struct mhi_controller *mhi_cntrl,
const u8 *buf, size_t remainder,
struct image_info *img_info)
{
size_t to_cpy;
struct mhi_buf *mhi_buf = img_info->mhi_buf;
struct bhi_vec_entry *bhi_vec = img_info->bhi_vec;
while (remainder) {
to_cpy = min(remainder, mhi_buf->len);
memcpy(mhi_buf->buf, buf, to_cpy);
bhi_vec->dma_addr = mhi_buf->dma_addr;
bhi_vec->size = to_cpy;
buf += to_cpy;
remainder -= to_cpy;
bhi_vec++;
mhi_buf++;
}
}
void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl)
{
const struct firmware *firmware = NULL;
struct device *dev = &mhi_cntrl->mhi_dev->dev;
enum mhi_pm_state new_state;
const char *fw_name;
const u8 *fw_data;
void *buf;
dma_addr_t dma_addr;
size_t size, fw_sz;
int i, ret;
if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
dev_err(dev, "Device MHI is not in valid state\n");
return;
}
/* save hardware info from BHI */
ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_SERIALNU,
&mhi_cntrl->serial_number);
if (ret)
dev_err(dev, "Could not capture serial number via BHI\n");
for (i = 0; i < ARRAY_SIZE(mhi_cntrl->oem_pk_hash); i++) {
ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_OEMPKHASH(i),
&mhi_cntrl->oem_pk_hash[i]);
if (ret) {
dev_err(dev, "Could not capture OEM PK HASH via BHI\n");
break;
}
}
/* wait for ready on pass through or any other execution environment */
if (!MHI_FW_LOAD_CAPABLE(mhi_cntrl->ee))
goto fw_load_ready_state;
fw_name = (mhi_cntrl->ee == MHI_EE_EDL) ?
mhi_cntrl->edl_image : mhi_cntrl->fw_image;
/* check if the driver has already provided the firmware data */
if (!fw_name && mhi_cntrl->fbc_download &&
mhi_cntrl->fw_data && mhi_cntrl->fw_sz) {
if (!mhi_cntrl->sbl_size) {
dev_err(dev, "fw_data provided but no sbl_size\n");
goto error_fw_load;
}
size = mhi_cntrl->sbl_size;
fw_data = mhi_cntrl->fw_data;
fw_sz = mhi_cntrl->fw_sz;
goto skip_req_fw;
}
if (!fw_name || (mhi_cntrl->fbc_download && (!mhi_cntrl->sbl_size ||
!mhi_cntrl->seg_len))) {
dev_err(dev,
"No firmware image defined or !sbl_size || !seg_len\n");
goto error_fw_load;
}
ret = request_firmware(&firmware, fw_name, dev);
if (ret) {
dev_err(dev, "Error loading firmware: %d\n", ret);
goto error_fw_load;
}
size = (mhi_cntrl->fbc_download) ? mhi_cntrl->sbl_size : firmware->size;
/* SBL size provided is maximum size, not necessarily the image size */
if (size > firmware->size)
size = firmware->size;
fw_data = firmware->data;
fw_sz = firmware->size;
skip_req_fw:
buf = dma_alloc_coherent(mhi_cntrl->cntrl_dev, size, &dma_addr,
GFP_KERNEL);
if (!buf) {
release_firmware(firmware);
goto error_fw_load;
}
/* Download image using BHI */
memcpy(buf, fw_data, size);
ret = mhi_fw_load_bhi(mhi_cntrl, dma_addr, size);
dma_free_coherent(mhi_cntrl->cntrl_dev, size, buf, dma_addr);
/* Error or in EDL mode, we're done */
if (ret) {
dev_err(dev, "MHI did not load image over BHI, ret: %d\n", ret);
release_firmware(firmware);
goto error_fw_load;
}
/* Wait for ready since EDL image was loaded */
if (fw_name && fw_name == mhi_cntrl->edl_image) {
release_firmware(firmware);
goto fw_load_ready_state;
}
write_lock_irq(&mhi_cntrl->pm_lock);
mhi_cntrl->dev_state = MHI_STATE_RESET;
write_unlock_irq(&mhi_cntrl->pm_lock);
/*
* If we're doing fbc, populate vector tables while
* device transitioning into MHI READY state
*/
if (mhi_cntrl->fbc_download) {
ret = mhi_alloc_bhie_table(mhi_cntrl, &mhi_cntrl->fbc_image, fw_sz);
if (ret) {
release_firmware(firmware);
goto error_fw_load;
}
/* Load the firmware into BHIE vec table */
mhi_firmware_copy(mhi_cntrl, fw_data, fw_sz, mhi_cntrl->fbc_image);
}
release_firmware(firmware);
fw_load_ready_state:
/* Transitioning into MHI RESET->READY state */
ret = mhi_ready_state_transition(mhi_cntrl);
if (ret) {
dev_err(dev, "MHI did not enter READY state\n");
goto error_ready_state;
}
dev_info(dev, "Wait for device to enter SBL or Mission mode\n");
return;
error_ready_state:
if (mhi_cntrl->fbc_download) {
mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image);
mhi_cntrl->fbc_image = NULL;
}
error_fw_load:
write_lock_irq(&mhi_cntrl->pm_lock);
new_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_FW_DL_ERR);
write_unlock_irq(&mhi_cntrl->pm_lock);
if (new_state == MHI_PM_FW_DL_ERR)
wake_up_all(&mhi_cntrl->state_event);
}
int mhi_download_amss_image(struct mhi_controller *mhi_cntrl)
{
struct image_info *image_info = mhi_cntrl->fbc_image;
struct device *dev = &mhi_cntrl->mhi_dev->dev;
enum mhi_pm_state new_state;
int ret;
if (!image_info)
return -EIO;
ret = mhi_fw_load_bhie(mhi_cntrl,
/* Vector table is the last entry */
&image_info->mhi_buf[image_info->entries - 1]);
if (ret) {
dev_err(dev, "MHI did not load AMSS, ret:%d\n", ret);
write_lock_irq(&mhi_cntrl->pm_lock);
new_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_FW_DL_ERR);
write_unlock_irq(&mhi_cntrl->pm_lock);
if (new_state == MHI_PM_FW_DL_ERR)
wake_up_all(&mhi_cntrl->state_event);
}
return ret;
}
| linux-master | drivers/bus/mhi/host/boot.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2022 Linaro Ltd.
* Author: Manivannan Sadhasivam <[email protected]>
*/
#include <linux/errno.h>
#include <linux/mhi_ep.h>
#include "internal.h"
bool __must_check mhi_ep_check_mhi_state(struct mhi_ep_cntrl *mhi_cntrl,
enum mhi_state cur_mhi_state,
enum mhi_state mhi_state)
{
if (mhi_state == MHI_STATE_SYS_ERR)
return true; /* Allowed in any state */
if (mhi_state == MHI_STATE_READY)
return cur_mhi_state == MHI_STATE_RESET;
if (mhi_state == MHI_STATE_M0)
return cur_mhi_state == MHI_STATE_M3 || cur_mhi_state == MHI_STATE_READY;
if (mhi_state == MHI_STATE_M3)
return cur_mhi_state == MHI_STATE_M0;
return false;
}
int mhi_ep_set_mhi_state(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state mhi_state)
{
struct device *dev = &mhi_cntrl->mhi_dev->dev;
if (!mhi_ep_check_mhi_state(mhi_cntrl, mhi_cntrl->mhi_state, mhi_state)) {
dev_err(dev, "MHI state change to %s from %s is not allowed!\n",
mhi_state_str(mhi_state),
mhi_state_str(mhi_cntrl->mhi_state));
return -EACCES;
}
/* TODO: Add support for M1 and M2 states */
if (mhi_state == MHI_STATE_M1 || mhi_state == MHI_STATE_M2) {
dev_err(dev, "MHI state (%s) not supported\n", mhi_state_str(mhi_state));
return -EOPNOTSUPP;
}
mhi_ep_mmio_masked_write(mhi_cntrl, EP_MHISTATUS, MHISTATUS_MHISTATE_MASK, mhi_state);
mhi_cntrl->mhi_state = mhi_state;
if (mhi_state == MHI_STATE_READY)
mhi_ep_mmio_masked_write(mhi_cntrl, EP_MHISTATUS, MHISTATUS_READY_MASK, 1);
if (mhi_state == MHI_STATE_SYS_ERR)
mhi_ep_mmio_masked_write(mhi_cntrl, EP_MHISTATUS, MHISTATUS_SYSERR_MASK, 1);
return 0;
}
int mhi_ep_set_m0_state(struct mhi_ep_cntrl *mhi_cntrl)
{
struct device *dev = &mhi_cntrl->mhi_dev->dev;
enum mhi_state old_state;
int ret;
/* If MHI is in M3, resume suspended channels */
mutex_lock(&mhi_cntrl->state_lock);
old_state = mhi_cntrl->mhi_state;
if (old_state == MHI_STATE_M3)
mhi_ep_resume_channels(mhi_cntrl);
ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_M0);
if (ret) {
mhi_ep_handle_syserr(mhi_cntrl);
goto err_unlock;
}
/* Signal host that the device moved to M0 */
ret = mhi_ep_send_state_change_event(mhi_cntrl, MHI_STATE_M0);
if (ret) {
dev_err(dev, "Failed sending M0 state change event\n");
goto err_unlock;
}
if (old_state == MHI_STATE_READY) {
/* Send AMSS EE event to host */
ret = mhi_ep_send_ee_event(mhi_cntrl, MHI_EE_AMSS);
if (ret) {
dev_err(dev, "Failed sending AMSS EE event\n");
goto err_unlock;
}
}
err_unlock:
mutex_unlock(&mhi_cntrl->state_lock);
return ret;
}
int mhi_ep_set_m3_state(struct mhi_ep_cntrl *mhi_cntrl)
{
struct device *dev = &mhi_cntrl->mhi_dev->dev;
int ret;
mutex_lock(&mhi_cntrl->state_lock);
ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_M3);
if (ret) {
mhi_ep_handle_syserr(mhi_cntrl);
goto err_unlock;
}
mhi_ep_suspend_channels(mhi_cntrl);
/* Signal host that the device moved to M3 */
ret = mhi_ep_send_state_change_event(mhi_cntrl, MHI_STATE_M3);
if (ret) {
dev_err(dev, "Failed sending M3 state change event\n");
goto err_unlock;
}
err_unlock:
mutex_unlock(&mhi_cntrl->state_lock);
return ret;
}
int mhi_ep_set_ready_state(struct mhi_ep_cntrl *mhi_cntrl)
{
struct device *dev = &mhi_cntrl->mhi_dev->dev;
enum mhi_state mhi_state;
int ret, is_ready;
mutex_lock(&mhi_cntrl->state_lock);
/* Ensure that the MHISTATUS is set to RESET by host */
mhi_state = mhi_ep_mmio_masked_read(mhi_cntrl, EP_MHISTATUS, MHISTATUS_MHISTATE_MASK);
is_ready = mhi_ep_mmio_masked_read(mhi_cntrl, EP_MHISTATUS, MHISTATUS_READY_MASK);
if (mhi_state != MHI_STATE_RESET || is_ready) {
dev_err(dev, "READY state transition failed. MHI host not in RESET state\n");
ret = -EIO;
goto err_unlock;
}
ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_READY);
if (ret)
mhi_ep_handle_syserr(mhi_cntrl);
err_unlock:
mutex_unlock(&mhi_cntrl->state_lock);
return ret;
}
| linux-master | drivers/bus/mhi/ep/sm.c |
// SPDX-License-Identifier: GPL-2.0
/*
* MHI Endpoint bus stack
*
* Copyright (C) 2022 Linaro Ltd.
* Author: Manivannan Sadhasivam <[email protected]>
*/
#include <linux/bitfield.h>
#include <linux/delay.h>
#include <linux/dma-direction.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/mhi_ep.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include "internal.h"
#define M0_WAIT_DELAY_MS 100
#define M0_WAIT_COUNT 100
static DEFINE_IDA(mhi_ep_cntrl_ida);
static int mhi_ep_create_device(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id);
static int mhi_ep_destroy_device(struct device *dev, void *data);
static int mhi_ep_send_event(struct mhi_ep_cntrl *mhi_cntrl, u32 ring_idx,
struct mhi_ring_element *el, bool bei)
{
struct device *dev = &mhi_cntrl->mhi_dev->dev;
union mhi_ep_ring_ctx *ctx;
struct mhi_ep_ring *ring;
int ret;
mutex_lock(&mhi_cntrl->event_lock);
ring = &mhi_cntrl->mhi_event[ring_idx].ring;
ctx = (union mhi_ep_ring_ctx *)&mhi_cntrl->ev_ctx_cache[ring_idx];
if (!ring->started) {
ret = mhi_ep_ring_start(mhi_cntrl, ring, ctx);
if (ret) {
dev_err(dev, "Error starting event ring (%u)\n", ring_idx);
goto err_unlock;
}
}
/* Add element to the event ring */
ret = mhi_ep_ring_add_element(ring, el);
if (ret) {
dev_err(dev, "Error adding element to event ring (%u)\n", ring_idx);
goto err_unlock;
}
mutex_unlock(&mhi_cntrl->event_lock);
/*
* Raise IRQ to host only if the BEI flag is not set in TRE. Host might
* set this flag for interrupt moderation as per MHI protocol.
*/
if (!bei)
mhi_cntrl->raise_irq(mhi_cntrl, ring->irq_vector);
return 0;
err_unlock:
mutex_unlock(&mhi_cntrl->event_lock);
return ret;
}
static int mhi_ep_send_completion_event(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring,
struct mhi_ring_element *tre, u32 len, enum mhi_ev_ccs code)
{
struct mhi_ring_element event = {};
event.ptr = cpu_to_le64(ring->rbase + ring->rd_offset * sizeof(*tre));
event.dword[0] = MHI_TRE_EV_DWORD0(code, len);
event.dword[1] = MHI_TRE_EV_DWORD1(ring->ch_id, MHI_PKT_TYPE_TX_EVENT);
return mhi_ep_send_event(mhi_cntrl, ring->er_index, &event, MHI_TRE_DATA_GET_BEI(tre));
}
int mhi_ep_send_state_change_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state state)
{
struct mhi_ring_element event = {};
event.dword[0] = MHI_SC_EV_DWORD0(state);
event.dword[1] = MHI_SC_EV_DWORD1(MHI_PKT_TYPE_STATE_CHANGE_EVENT);
return mhi_ep_send_event(mhi_cntrl, 0, &event, 0);
}
int mhi_ep_send_ee_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_ee_type exec_env)
{
struct mhi_ring_element event = {};
event.dword[0] = MHI_EE_EV_DWORD0(exec_env);
event.dword[1] = MHI_SC_EV_DWORD1(MHI_PKT_TYPE_EE_EVENT);
return mhi_ep_send_event(mhi_cntrl, 0, &event, 0);
}
static int mhi_ep_send_cmd_comp_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_ev_ccs code)
{
struct mhi_ep_ring *ring = &mhi_cntrl->mhi_cmd->ring;
struct mhi_ring_element event = {};
event.ptr = cpu_to_le64(ring->rbase + ring->rd_offset * sizeof(struct mhi_ring_element));
event.dword[0] = MHI_CC_EV_DWORD0(code);
event.dword[1] = MHI_CC_EV_DWORD1(MHI_PKT_TYPE_CMD_COMPLETION_EVENT);
return mhi_ep_send_event(mhi_cntrl, 0, &event, 0);
}
static int mhi_ep_process_cmd_ring(struct mhi_ep_ring *ring, struct mhi_ring_element *el)
{
struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl;
struct device *dev = &mhi_cntrl->mhi_dev->dev;
struct mhi_result result = {};
struct mhi_ep_chan *mhi_chan;
struct mhi_ep_ring *ch_ring;
u32 tmp, ch_id;
int ret;
ch_id = MHI_TRE_GET_CMD_CHID(el);
/* Check if the channel is supported by the controller */
if ((ch_id >= mhi_cntrl->max_chan) || !mhi_cntrl->mhi_chan[ch_id].name) {
dev_dbg(dev, "Channel (%u) not supported!\n", ch_id);
return -ENODEV;
}
mhi_chan = &mhi_cntrl->mhi_chan[ch_id];
ch_ring = &mhi_cntrl->mhi_chan[ch_id].ring;
switch (MHI_TRE_GET_CMD_TYPE(el)) {
case MHI_PKT_TYPE_START_CHAN_CMD:
dev_dbg(dev, "Received START command for channel (%u)\n", ch_id);
mutex_lock(&mhi_chan->lock);
/* Initialize and configure the corresponding channel ring */
if (!ch_ring->started) {
ret = mhi_ep_ring_start(mhi_cntrl, ch_ring,
(union mhi_ep_ring_ctx *)&mhi_cntrl->ch_ctx_cache[ch_id]);
if (ret) {
dev_err(dev, "Failed to start ring for channel (%u)\n", ch_id);
ret = mhi_ep_send_cmd_comp_event(mhi_cntrl,
MHI_EV_CC_UNDEFINED_ERR);
if (ret)
dev_err(dev, "Error sending completion event: %d\n", ret);
goto err_unlock;
}
}
/* Set channel state to RUNNING */
mhi_chan->state = MHI_CH_STATE_RUNNING;
tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[ch_id].chcfg);
tmp &= ~CHAN_CTX_CHSTATE_MASK;
tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_RUNNING);
mhi_cntrl->ch_ctx_cache[ch_id].chcfg = cpu_to_le32(tmp);
ret = mhi_ep_send_cmd_comp_event(mhi_cntrl, MHI_EV_CC_SUCCESS);
if (ret) {
dev_err(dev, "Error sending command completion event (%u)\n",
MHI_EV_CC_SUCCESS);
goto err_unlock;
}
mutex_unlock(&mhi_chan->lock);
/*
* Create MHI device only during UL channel start. Since the MHI
* channels operate in a pair, we'll associate both UL and DL
* channels to the same device.
*
* We also need to check for mhi_dev != NULL because, the host
* will issue START_CHAN command during resume and we don't
* destroy the device during suspend.
*/
if (!(ch_id % 2) && !mhi_chan->mhi_dev) {
ret = mhi_ep_create_device(mhi_cntrl, ch_id);
if (ret) {
dev_err(dev, "Error creating device for channel (%u)\n", ch_id);
mhi_ep_handle_syserr(mhi_cntrl);
return ret;
}
}
/* Finally, enable DB for the channel */
mhi_ep_mmio_enable_chdb(mhi_cntrl, ch_id);
break;
case MHI_PKT_TYPE_STOP_CHAN_CMD:
dev_dbg(dev, "Received STOP command for channel (%u)\n", ch_id);
if (!ch_ring->started) {
dev_err(dev, "Channel (%u) not opened\n", ch_id);
return -ENODEV;
}
mutex_lock(&mhi_chan->lock);
/* Disable DB for the channel */
mhi_ep_mmio_disable_chdb(mhi_cntrl, ch_id);
/* Send channel disconnect status to client drivers */
if (mhi_chan->xfer_cb) {
result.transaction_status = -ENOTCONN;
result.bytes_xferd = 0;
mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
}
/* Set channel state to STOP */
mhi_chan->state = MHI_CH_STATE_STOP;
tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[ch_id].chcfg);
tmp &= ~CHAN_CTX_CHSTATE_MASK;
tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_STOP);
mhi_cntrl->ch_ctx_cache[ch_id].chcfg = cpu_to_le32(tmp);
ret = mhi_ep_send_cmd_comp_event(mhi_cntrl, MHI_EV_CC_SUCCESS);
if (ret) {
dev_err(dev, "Error sending command completion event (%u)\n",
MHI_EV_CC_SUCCESS);
goto err_unlock;
}
mutex_unlock(&mhi_chan->lock);
break;
case MHI_PKT_TYPE_RESET_CHAN_CMD:
dev_dbg(dev, "Received RESET command for channel (%u)\n", ch_id);
if (!ch_ring->started) {
dev_err(dev, "Channel (%u) not opened\n", ch_id);
return -ENODEV;
}
mutex_lock(&mhi_chan->lock);
/* Stop and reset the transfer ring */
mhi_ep_ring_reset(mhi_cntrl, ch_ring);
/* Send channel disconnect status to client driver */
if (mhi_chan->xfer_cb) {
result.transaction_status = -ENOTCONN;
result.bytes_xferd = 0;
mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
}
/* Set channel state to DISABLED */
mhi_chan->state = MHI_CH_STATE_DISABLED;
tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[ch_id].chcfg);
tmp &= ~CHAN_CTX_CHSTATE_MASK;
tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_DISABLED);
mhi_cntrl->ch_ctx_cache[ch_id].chcfg = cpu_to_le32(tmp);
ret = mhi_ep_send_cmd_comp_event(mhi_cntrl, MHI_EV_CC_SUCCESS);
if (ret) {
dev_err(dev, "Error sending command completion event (%u)\n",
MHI_EV_CC_SUCCESS);
goto err_unlock;
}
mutex_unlock(&mhi_chan->lock);
break;
default:
dev_err(dev, "Invalid command received: %lu for channel (%u)\n",
MHI_TRE_GET_CMD_TYPE(el), ch_id);
return -EINVAL;
}
return 0;
err_unlock:
mutex_unlock(&mhi_chan->lock);
return ret;
}
bool mhi_ep_queue_is_empty(struct mhi_ep_device *mhi_dev, enum dma_data_direction dir)
{
struct mhi_ep_chan *mhi_chan = (dir == DMA_FROM_DEVICE) ? mhi_dev->dl_chan :
mhi_dev->ul_chan;
struct mhi_ep_cntrl *mhi_cntrl = mhi_dev->mhi_cntrl;
struct mhi_ep_ring *ring = &mhi_cntrl->mhi_chan[mhi_chan->chan].ring;
return !!(ring->rd_offset == ring->wr_offset);
}
EXPORT_SYMBOL_GPL(mhi_ep_queue_is_empty);
static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl,
struct mhi_ep_ring *ring,
struct mhi_result *result,
u32 len)
{
struct mhi_ep_chan *mhi_chan = &mhi_cntrl->mhi_chan[ring->ch_id];
struct device *dev = &mhi_cntrl->mhi_dev->dev;
size_t tr_len, read_offset, write_offset;
struct mhi_ring_element *el;
bool tr_done = false;
void *write_addr;
u64 read_addr;
u32 buf_left;
int ret;
buf_left = len;
do {
/* Don't process the transfer ring if the channel is not in RUNNING state */
if (mhi_chan->state != MHI_CH_STATE_RUNNING) {
dev_err(dev, "Channel not available\n");
return -ENODEV;
}
el = &ring->ring_cache[ring->rd_offset];
/* Check if there is data pending to be read from previous read operation */
if (mhi_chan->tre_bytes_left) {
dev_dbg(dev, "TRE bytes remaining: %u\n", mhi_chan->tre_bytes_left);
tr_len = min(buf_left, mhi_chan->tre_bytes_left);
} else {
mhi_chan->tre_loc = MHI_TRE_DATA_GET_PTR(el);
mhi_chan->tre_size = MHI_TRE_DATA_GET_LEN(el);
mhi_chan->tre_bytes_left = mhi_chan->tre_size;
tr_len = min(buf_left, mhi_chan->tre_size);
}
read_offset = mhi_chan->tre_size - mhi_chan->tre_bytes_left;
write_offset = len - buf_left;
read_addr = mhi_chan->tre_loc + read_offset;
write_addr = result->buf_addr + write_offset;
dev_dbg(dev, "Reading %zd bytes from channel (%u)\n", tr_len, ring->ch_id);
ret = mhi_cntrl->read_from_host(mhi_cntrl, read_addr, write_addr, tr_len);
if (ret < 0) {
dev_err(&mhi_chan->mhi_dev->dev, "Error reading from channel\n");
return ret;
}
buf_left -= tr_len;
mhi_chan->tre_bytes_left -= tr_len;
/*
* Once the TRE (Transfer Ring Element) of a TD (Transfer Descriptor) has been
* read completely:
*
* 1. Send completion event to the host based on the flags set in TRE.
* 2. Increment the local read offset of the transfer ring.
*/
if (!mhi_chan->tre_bytes_left) {
/*
* The host will split the data packet into multiple TREs if it can't fit
* the packet in a single TRE. In that case, CHAIN flag will be set by the
* host for all TREs except the last one.
*/
if (MHI_TRE_DATA_GET_CHAIN(el)) {
/*
* IEOB (Interrupt on End of Block) flag will be set by the host if
* it expects the completion event for all TREs of a TD.
*/
if (MHI_TRE_DATA_GET_IEOB(el)) {
ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el,
MHI_TRE_DATA_GET_LEN(el),
MHI_EV_CC_EOB);
if (ret < 0) {
dev_err(&mhi_chan->mhi_dev->dev,
"Error sending transfer compl. event\n");
return ret;
}
}
} else {
/*
* IEOT (Interrupt on End of Transfer) flag will be set by the host
* for the last TRE of the TD and expects the completion event for
* the same.
*/
if (MHI_TRE_DATA_GET_IEOT(el)) {
ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el,
MHI_TRE_DATA_GET_LEN(el),
MHI_EV_CC_EOT);
if (ret < 0) {
dev_err(&mhi_chan->mhi_dev->dev,
"Error sending transfer compl. event\n");
return ret;
}
}
tr_done = true;
}
mhi_ep_ring_inc_index(ring);
}
result->bytes_xferd += tr_len;
} while (buf_left && !tr_done);
return 0;
}
static int mhi_ep_process_ch_ring(struct mhi_ep_ring *ring, struct mhi_ring_element *el)
{
struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl;
struct mhi_result result = {};
u32 len = MHI_EP_DEFAULT_MTU;
struct mhi_ep_chan *mhi_chan;
int ret;
mhi_chan = &mhi_cntrl->mhi_chan[ring->ch_id];
/*
* Bail out if transfer callback is not registered for the channel.
* This is most likely due to the client driver not loaded at this point.
*/
if (!mhi_chan->xfer_cb) {
dev_err(&mhi_chan->mhi_dev->dev, "Client driver not available\n");
return -ENODEV;
}
if (ring->ch_id % 2) {
/* DL channel */
result.dir = mhi_chan->dir;
mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
} else {
/* UL channel */
result.buf_addr = kzalloc(len, GFP_KERNEL);
if (!result.buf_addr)
return -ENOMEM;
do {
ret = mhi_ep_read_channel(mhi_cntrl, ring, &result, len);
if (ret < 0) {
dev_err(&mhi_chan->mhi_dev->dev, "Failed to read channel\n");
kfree(result.buf_addr);
return ret;
}
result.dir = mhi_chan->dir;
mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
result.bytes_xferd = 0;
memset(result.buf_addr, 0, len);
/* Read until the ring becomes empty */
} while (!mhi_ep_queue_is_empty(mhi_chan->mhi_dev, DMA_TO_DEVICE));
kfree(result.buf_addr);
}
return 0;
}
/* TODO: Handle partially formed TDs */
int mhi_ep_queue_skb(struct mhi_ep_device *mhi_dev, struct sk_buff *skb)
{
struct mhi_ep_cntrl *mhi_cntrl = mhi_dev->mhi_cntrl;
struct mhi_ep_chan *mhi_chan = mhi_dev->dl_chan;
struct device *dev = &mhi_chan->mhi_dev->dev;
struct mhi_ring_element *el;
u32 buf_left, read_offset;
struct mhi_ep_ring *ring;
enum mhi_ev_ccs code;
void *read_addr;
u64 write_addr;
size_t tr_len;
u32 tre_len;
int ret;
buf_left = skb->len;
ring = &mhi_cntrl->mhi_chan[mhi_chan->chan].ring;
mutex_lock(&mhi_chan->lock);
do {
/* Don't process the transfer ring if the channel is not in RUNNING state */
if (mhi_chan->state != MHI_CH_STATE_RUNNING) {
dev_err(dev, "Channel not available\n");
ret = -ENODEV;
goto err_exit;
}
if (mhi_ep_queue_is_empty(mhi_dev, DMA_FROM_DEVICE)) {
dev_err(dev, "TRE not available!\n");
ret = -ENOSPC;
goto err_exit;
}
el = &ring->ring_cache[ring->rd_offset];
tre_len = MHI_TRE_DATA_GET_LEN(el);
tr_len = min(buf_left, tre_len);
read_offset = skb->len - buf_left;
read_addr = skb->data + read_offset;
write_addr = MHI_TRE_DATA_GET_PTR(el);
dev_dbg(dev, "Writing %zd bytes to channel (%u)\n", tr_len, ring->ch_id);
ret = mhi_cntrl->write_to_host(mhi_cntrl, read_addr, write_addr, tr_len);
if (ret < 0) {
dev_err(dev, "Error writing to the channel\n");
goto err_exit;
}
buf_left -= tr_len;
/*
* For all TREs queued by the host for DL channel, only the EOT flag will be set.
* If the packet doesn't fit into a single TRE, send the OVERFLOW event to
* the host so that the host can adjust the packet boundary to next TREs. Else send
* the EOT event to the host indicating the packet boundary.
*/
if (buf_left)
code = MHI_EV_CC_OVERFLOW;
else
code = MHI_EV_CC_EOT;
ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el, tr_len, code);
if (ret) {
dev_err(dev, "Error sending transfer completion event\n");
goto err_exit;
}
mhi_ep_ring_inc_index(ring);
} while (buf_left);
mutex_unlock(&mhi_chan->lock);
return 0;
err_exit:
mutex_unlock(&mhi_chan->lock);
return ret;
}
EXPORT_SYMBOL_GPL(mhi_ep_queue_skb);
static int mhi_ep_cache_host_cfg(struct mhi_ep_cntrl *mhi_cntrl)
{
size_t cmd_ctx_host_size, ch_ctx_host_size, ev_ctx_host_size;
struct device *dev = &mhi_cntrl->mhi_dev->dev;
int ret;
/* Update the number of event rings (NER) programmed by the host */
mhi_ep_mmio_update_ner(mhi_cntrl);
dev_dbg(dev, "Number of Event rings: %u, HW Event rings: %u\n",
mhi_cntrl->event_rings, mhi_cntrl->hw_event_rings);
ch_ctx_host_size = sizeof(struct mhi_chan_ctxt) * mhi_cntrl->max_chan;
ev_ctx_host_size = sizeof(struct mhi_event_ctxt) * mhi_cntrl->event_rings;
cmd_ctx_host_size = sizeof(struct mhi_cmd_ctxt) * NR_OF_CMD_RINGS;
/* Get the channel context base pointer from host */
mhi_ep_mmio_get_chc_base(mhi_cntrl);
/* Allocate and map memory for caching host channel context */
ret = mhi_cntrl->alloc_map(mhi_cntrl, mhi_cntrl->ch_ctx_host_pa,
&mhi_cntrl->ch_ctx_cache_phys,
(void __iomem **) &mhi_cntrl->ch_ctx_cache,
ch_ctx_host_size);
if (ret) {
dev_err(dev, "Failed to allocate and map ch_ctx_cache\n");
return ret;
}
/* Get the event context base pointer from host */
mhi_ep_mmio_get_erc_base(mhi_cntrl);
/* Allocate and map memory for caching host event context */
ret = mhi_cntrl->alloc_map(mhi_cntrl, mhi_cntrl->ev_ctx_host_pa,
&mhi_cntrl->ev_ctx_cache_phys,
(void __iomem **) &mhi_cntrl->ev_ctx_cache,
ev_ctx_host_size);
if (ret) {
dev_err(dev, "Failed to allocate and map ev_ctx_cache\n");
goto err_ch_ctx;
}
/* Get the command context base pointer from host */
mhi_ep_mmio_get_crc_base(mhi_cntrl);
/* Allocate and map memory for caching host command context */
ret = mhi_cntrl->alloc_map(mhi_cntrl, mhi_cntrl->cmd_ctx_host_pa,
&mhi_cntrl->cmd_ctx_cache_phys,
(void __iomem **) &mhi_cntrl->cmd_ctx_cache,
cmd_ctx_host_size);
if (ret) {
dev_err(dev, "Failed to allocate and map cmd_ctx_cache\n");
goto err_ev_ctx;
}
/* Initialize command ring */
ret = mhi_ep_ring_start(mhi_cntrl, &mhi_cntrl->mhi_cmd->ring,
(union mhi_ep_ring_ctx *)mhi_cntrl->cmd_ctx_cache);
if (ret) {
dev_err(dev, "Failed to start the command ring\n");
goto err_cmd_ctx;
}
return ret;
err_cmd_ctx:
mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->cmd_ctx_host_pa, mhi_cntrl->cmd_ctx_cache_phys,
(void __iomem *) mhi_cntrl->cmd_ctx_cache, cmd_ctx_host_size);
err_ev_ctx:
mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->ev_ctx_host_pa, mhi_cntrl->ev_ctx_cache_phys,
(void __iomem *) mhi_cntrl->ev_ctx_cache, ev_ctx_host_size);
err_ch_ctx:
mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->ch_ctx_host_pa, mhi_cntrl->ch_ctx_cache_phys,
(void __iomem *) mhi_cntrl->ch_ctx_cache, ch_ctx_host_size);
return ret;
}
static void mhi_ep_free_host_cfg(struct mhi_ep_cntrl *mhi_cntrl)
{
size_t cmd_ctx_host_size, ch_ctx_host_size, ev_ctx_host_size;
ch_ctx_host_size = sizeof(struct mhi_chan_ctxt) * mhi_cntrl->max_chan;
ev_ctx_host_size = sizeof(struct mhi_event_ctxt) * mhi_cntrl->event_rings;
cmd_ctx_host_size = sizeof(struct mhi_cmd_ctxt) * NR_OF_CMD_RINGS;
mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->cmd_ctx_host_pa, mhi_cntrl->cmd_ctx_cache_phys,
(void __iomem *) mhi_cntrl->cmd_ctx_cache, cmd_ctx_host_size);
mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->ev_ctx_host_pa, mhi_cntrl->ev_ctx_cache_phys,
(void __iomem *) mhi_cntrl->ev_ctx_cache, ev_ctx_host_size);
mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->ch_ctx_host_pa, mhi_cntrl->ch_ctx_cache_phys,
(void __iomem *) mhi_cntrl->ch_ctx_cache, ch_ctx_host_size);
}
static void mhi_ep_enable_int(struct mhi_ep_cntrl *mhi_cntrl)
{
/*
* Doorbell interrupts are enabled when the corresponding channel gets started.
* Enabling all interrupts here triggers spurious irqs as some of the interrupts
* associated with hw channels always get triggered.
*/
mhi_ep_mmio_enable_ctrl_interrupt(mhi_cntrl);
mhi_ep_mmio_enable_cmdb_interrupt(mhi_cntrl);
}
static int mhi_ep_enable(struct mhi_ep_cntrl *mhi_cntrl)
{
struct device *dev = &mhi_cntrl->mhi_dev->dev;
enum mhi_state state;
bool mhi_reset;
u32 count = 0;
int ret;
/* Wait for Host to set the M0 state */
do {
msleep(M0_WAIT_DELAY_MS);
mhi_ep_mmio_get_mhi_state(mhi_cntrl, &state, &mhi_reset);
if (mhi_reset) {
/* Clear the MHI reset if host is in reset state */
mhi_ep_mmio_clear_reset(mhi_cntrl);
dev_info(dev, "Detected Host reset while waiting for M0\n");
}
count++;
} while (state != MHI_STATE_M0 && count < M0_WAIT_COUNT);
if (state != MHI_STATE_M0) {
dev_err(dev, "Host failed to enter M0\n");
return -ETIMEDOUT;
}
ret = mhi_ep_cache_host_cfg(mhi_cntrl);
if (ret) {
dev_err(dev, "Failed to cache host config\n");
return ret;
}
mhi_ep_mmio_set_env(mhi_cntrl, MHI_EE_AMSS);
/* Enable all interrupts now */
mhi_ep_enable_int(mhi_cntrl);
return 0;
}
static void mhi_ep_cmd_ring_worker(struct work_struct *work)
{
struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, cmd_ring_work);
struct mhi_ep_ring *ring = &mhi_cntrl->mhi_cmd->ring;
struct device *dev = &mhi_cntrl->mhi_dev->dev;
struct mhi_ring_element *el;
int ret;
/* Update the write offset for the ring */
ret = mhi_ep_update_wr_offset(ring);
if (ret) {
dev_err(dev, "Error updating write offset for ring\n");
return;
}
/* Sanity check to make sure there are elements in the ring */
if (ring->rd_offset == ring->wr_offset)
return;
/*
* Process command ring element till write offset. In case of an error, just try to
* process next element.
*/
while (ring->rd_offset != ring->wr_offset) {
el = &ring->ring_cache[ring->rd_offset];
ret = mhi_ep_process_cmd_ring(ring, el);
if (ret && ret != -ENODEV)
dev_err(dev, "Error processing cmd ring element: %zu\n", ring->rd_offset);
mhi_ep_ring_inc_index(ring);
}
}
static void mhi_ep_ch_ring_worker(struct work_struct *work)
{
struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, ch_ring_work);
struct device *dev = &mhi_cntrl->mhi_dev->dev;
struct mhi_ep_ring_item *itr, *tmp;
struct mhi_ring_element *el;
struct mhi_ep_ring *ring;
struct mhi_ep_chan *chan;
unsigned long flags;
LIST_HEAD(head);
int ret;
spin_lock_irqsave(&mhi_cntrl->list_lock, flags);
list_splice_tail_init(&mhi_cntrl->ch_db_list, &head);
spin_unlock_irqrestore(&mhi_cntrl->list_lock, flags);
/* Process each queued channel ring. In case of an error, just process next element. */
list_for_each_entry_safe(itr, tmp, &head, node) {
list_del(&itr->node);
ring = itr->ring;
chan = &mhi_cntrl->mhi_chan[ring->ch_id];
mutex_lock(&chan->lock);
/*
* The ring could've stopped while we waited to grab the (chan->lock), so do
* a sanity check before going further.
*/
if (!ring->started) {
mutex_unlock(&chan->lock);
kfree(itr);
continue;
}
/* Update the write offset for the ring */
ret = mhi_ep_update_wr_offset(ring);
if (ret) {
dev_err(dev, "Error updating write offset for ring\n");
mutex_unlock(&chan->lock);
kfree(itr);
continue;
}
/* Sanity check to make sure there are elements in the ring */
if (ring->rd_offset == ring->wr_offset) {
mutex_unlock(&chan->lock);
kfree(itr);
continue;
}
el = &ring->ring_cache[ring->rd_offset];
dev_dbg(dev, "Processing the ring for channel (%u)\n", ring->ch_id);
ret = mhi_ep_process_ch_ring(ring, el);
if (ret) {
dev_err(dev, "Error processing ring for channel (%u): %d\n",
ring->ch_id, ret);
mutex_unlock(&chan->lock);
kfree(itr);
continue;
}
mutex_unlock(&chan->lock);
kfree(itr);
}
}
static void mhi_ep_state_worker(struct work_struct *work)
{
struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, state_work);
struct device *dev = &mhi_cntrl->mhi_dev->dev;
struct mhi_ep_state_transition *itr, *tmp;
unsigned long flags;
LIST_HEAD(head);
int ret;
spin_lock_irqsave(&mhi_cntrl->list_lock, flags);
list_splice_tail_init(&mhi_cntrl->st_transition_list, &head);
spin_unlock_irqrestore(&mhi_cntrl->list_lock, flags);
list_for_each_entry_safe(itr, tmp, &head, node) {
list_del(&itr->node);
dev_dbg(dev, "Handling MHI state transition to %s\n",
mhi_state_str(itr->state));
switch (itr->state) {
case MHI_STATE_M0:
ret = mhi_ep_set_m0_state(mhi_cntrl);
if (ret)
dev_err(dev, "Failed to transition to M0 state\n");
break;
case MHI_STATE_M3:
ret = mhi_ep_set_m3_state(mhi_cntrl);
if (ret)
dev_err(dev, "Failed to transition to M3 state\n");
break;
default:
dev_err(dev, "Invalid MHI state transition: %d\n", itr->state);
break;
}
kfree(itr);
}
}
static void mhi_ep_queue_channel_db(struct mhi_ep_cntrl *mhi_cntrl, unsigned long ch_int,
u32 ch_idx)
{
struct mhi_ep_ring_item *item;
struct mhi_ep_ring *ring;
bool work = !!ch_int;
LIST_HEAD(head);
u32 i;
/* First add the ring items to a local list */
for_each_set_bit(i, &ch_int, 32) {
/* Channel index varies for each register: 0, 32, 64, 96 */
u32 ch_id = ch_idx + i;
ring = &mhi_cntrl->mhi_chan[ch_id].ring;
item = kzalloc(sizeof(*item), GFP_ATOMIC);
if (!item)
return;
item->ring = ring;
list_add_tail(&item->node, &head);
}
/* Now, splice the local list into ch_db_list and queue the work item */
if (work) {
spin_lock(&mhi_cntrl->list_lock);
list_splice_tail_init(&head, &mhi_cntrl->ch_db_list);
spin_unlock(&mhi_cntrl->list_lock);
queue_work(mhi_cntrl->wq, &mhi_cntrl->ch_ring_work);
}
}
/*
* Channel interrupt statuses are contained in 4 registers each of 32bit length.
* For checking all interrupts, we need to loop through each registers and then
* check for bits set.
*/
static void mhi_ep_check_channel_interrupt(struct mhi_ep_cntrl *mhi_cntrl)
{
u32 ch_int, ch_idx, i;
/* Bail out if there is no channel doorbell interrupt */
if (!mhi_ep_mmio_read_chdb_status_interrupts(mhi_cntrl))
return;
for (i = 0; i < MHI_MASK_ROWS_CH_DB; i++) {
ch_idx = i * MHI_MASK_CH_LEN;
/* Only process channel interrupt if the mask is enabled */
ch_int = mhi_cntrl->chdb[i].status & mhi_cntrl->chdb[i].mask;
if (ch_int) {
mhi_ep_queue_channel_db(mhi_cntrl, ch_int, ch_idx);
mhi_ep_mmio_write(mhi_cntrl, MHI_CHDB_INT_CLEAR_n(i),
mhi_cntrl->chdb[i].status);
}
}
}
static void mhi_ep_process_ctrl_interrupt(struct mhi_ep_cntrl *mhi_cntrl,
enum mhi_state state)
{
struct mhi_ep_state_transition *item;
item = kzalloc(sizeof(*item), GFP_ATOMIC);
if (!item)
return;
item->state = state;
spin_lock(&mhi_cntrl->list_lock);
list_add_tail(&item->node, &mhi_cntrl->st_transition_list);
spin_unlock(&mhi_cntrl->list_lock);
queue_work(mhi_cntrl->wq, &mhi_cntrl->state_work);
}
/*
* Interrupt handler that services interrupts raised by the host writing to
* MHICTRL and Command ring doorbell (CRDB) registers for state change and
* channel interrupts.
*/
static irqreturn_t mhi_ep_irq(int irq, void *data)
{
struct mhi_ep_cntrl *mhi_cntrl = data;
struct device *dev = &mhi_cntrl->mhi_dev->dev;
enum mhi_state state;
u32 int_value;
bool mhi_reset;
/* Acknowledge the ctrl interrupt */
int_value = mhi_ep_mmio_read(mhi_cntrl, MHI_CTRL_INT_STATUS);
mhi_ep_mmio_write(mhi_cntrl, MHI_CTRL_INT_CLEAR, int_value);
/* Check for ctrl interrupt */
if (FIELD_GET(MHI_CTRL_INT_STATUS_MSK, int_value)) {
dev_dbg(dev, "Processing ctrl interrupt\n");
mhi_ep_mmio_get_mhi_state(mhi_cntrl, &state, &mhi_reset);
if (mhi_reset) {
dev_info(dev, "Host triggered MHI reset!\n");
disable_irq_nosync(mhi_cntrl->irq);
schedule_work(&mhi_cntrl->reset_work);
return IRQ_HANDLED;
}
mhi_ep_process_ctrl_interrupt(mhi_cntrl, state);
}
/* Check for command doorbell interrupt */
if (FIELD_GET(MHI_CTRL_INT_STATUS_CRDB_MSK, int_value)) {
dev_dbg(dev, "Processing command doorbell interrupt\n");
queue_work(mhi_cntrl->wq, &mhi_cntrl->cmd_ring_work);
}
/* Check for channel interrupts */
mhi_ep_check_channel_interrupt(mhi_cntrl);
return IRQ_HANDLED;
}
static void mhi_ep_abort_transfer(struct mhi_ep_cntrl *mhi_cntrl)
{
struct mhi_ep_ring *ch_ring, *ev_ring;
struct mhi_result result = {};
struct mhi_ep_chan *mhi_chan;
int i;
/* Stop all the channels */
for (i = 0; i < mhi_cntrl->max_chan; i++) {
mhi_chan = &mhi_cntrl->mhi_chan[i];
if (!mhi_chan->ring.started)
continue;
mutex_lock(&mhi_chan->lock);
/* Send channel disconnect status to client drivers */
if (mhi_chan->xfer_cb) {
result.transaction_status = -ENOTCONN;
result.bytes_xferd = 0;
mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
}
mhi_chan->state = MHI_CH_STATE_DISABLED;
mutex_unlock(&mhi_chan->lock);
}
flush_workqueue(mhi_cntrl->wq);
/* Destroy devices associated with all channels */
device_for_each_child(&mhi_cntrl->mhi_dev->dev, NULL, mhi_ep_destroy_device);
/* Stop and reset the transfer rings */
for (i = 0; i < mhi_cntrl->max_chan; i++) {
mhi_chan = &mhi_cntrl->mhi_chan[i];
if (!mhi_chan->ring.started)
continue;
ch_ring = &mhi_cntrl->mhi_chan[i].ring;
mutex_lock(&mhi_chan->lock);
mhi_ep_ring_reset(mhi_cntrl, ch_ring);
mutex_unlock(&mhi_chan->lock);
}
/* Stop and reset the event rings */
for (i = 0; i < mhi_cntrl->event_rings; i++) {
ev_ring = &mhi_cntrl->mhi_event[i].ring;
if (!ev_ring->started)
continue;
mutex_lock(&mhi_cntrl->event_lock);
mhi_ep_ring_reset(mhi_cntrl, ev_ring);
mutex_unlock(&mhi_cntrl->event_lock);
}
/* Stop and reset the command ring */
mhi_ep_ring_reset(mhi_cntrl, &mhi_cntrl->mhi_cmd->ring);
mhi_ep_free_host_cfg(mhi_cntrl);
mhi_ep_mmio_mask_interrupts(mhi_cntrl);
mhi_cntrl->enabled = false;
}
static void mhi_ep_reset_worker(struct work_struct *work)
{
struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, reset_work);
enum mhi_state cur_state;
mhi_ep_power_down(mhi_cntrl);
mutex_lock(&mhi_cntrl->state_lock);
/* Reset MMIO to signal host that the MHI_RESET is completed in endpoint */
mhi_ep_mmio_reset(mhi_cntrl);
cur_state = mhi_cntrl->mhi_state;
/*
* Only proceed further if the reset is due to SYS_ERR. The host will
* issue reset during shutdown also and we don't need to do re-init in
* that case.
*/
if (cur_state == MHI_STATE_SYS_ERR)
mhi_ep_power_up(mhi_cntrl);
mutex_unlock(&mhi_cntrl->state_lock);
}
/*
* We don't need to do anything special other than setting the MHI SYS_ERR
* state. The host will reset all contexts and issue MHI RESET so that we
* could also recover from error state.
*/
void mhi_ep_handle_syserr(struct mhi_ep_cntrl *mhi_cntrl)
{
struct device *dev = &mhi_cntrl->mhi_dev->dev;
int ret;
ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_SYS_ERR);
if (ret)
return;
/* Signal host that the device went to SYS_ERR state */
ret = mhi_ep_send_state_change_event(mhi_cntrl, MHI_STATE_SYS_ERR);
if (ret)
dev_err(dev, "Failed sending SYS_ERR state change event: %d\n", ret);
}
int mhi_ep_power_up(struct mhi_ep_cntrl *mhi_cntrl)
{
struct device *dev = &mhi_cntrl->mhi_dev->dev;
int ret, i;
/*
* Mask all interrupts until the state machine is ready. Interrupts will
* be enabled later with mhi_ep_enable().
*/
mhi_ep_mmio_mask_interrupts(mhi_cntrl);
mhi_ep_mmio_init(mhi_cntrl);
mhi_cntrl->mhi_event = kzalloc(mhi_cntrl->event_rings * (sizeof(*mhi_cntrl->mhi_event)),
GFP_KERNEL);
if (!mhi_cntrl->mhi_event)
return -ENOMEM;
/* Initialize command, channel and event rings */
mhi_ep_ring_init(&mhi_cntrl->mhi_cmd->ring, RING_TYPE_CMD, 0);
for (i = 0; i < mhi_cntrl->max_chan; i++)
mhi_ep_ring_init(&mhi_cntrl->mhi_chan[i].ring, RING_TYPE_CH, i);
for (i = 0; i < mhi_cntrl->event_rings; i++)
mhi_ep_ring_init(&mhi_cntrl->mhi_event[i].ring, RING_TYPE_ER, i);
mhi_cntrl->mhi_state = MHI_STATE_RESET;
/* Set AMSS EE before signaling ready state */
mhi_ep_mmio_set_env(mhi_cntrl, MHI_EE_AMSS);
/* All set, notify the host that we are ready */
ret = mhi_ep_set_ready_state(mhi_cntrl);
if (ret)
goto err_free_event;
dev_dbg(dev, "READY state notification sent to the host\n");
ret = mhi_ep_enable(mhi_cntrl);
if (ret) {
dev_err(dev, "Failed to enable MHI endpoint\n");
goto err_free_event;
}
enable_irq(mhi_cntrl->irq);
mhi_cntrl->enabled = true;
return 0;
err_free_event:
kfree(mhi_cntrl->mhi_event);
return ret;
}
EXPORT_SYMBOL_GPL(mhi_ep_power_up);
void mhi_ep_power_down(struct mhi_ep_cntrl *mhi_cntrl)
{
if (mhi_cntrl->enabled) {
mhi_ep_abort_transfer(mhi_cntrl);
kfree(mhi_cntrl->mhi_event);
disable_irq(mhi_cntrl->irq);
}
}
EXPORT_SYMBOL_GPL(mhi_ep_power_down);
void mhi_ep_suspend_channels(struct mhi_ep_cntrl *mhi_cntrl)
{
struct mhi_ep_chan *mhi_chan;
u32 tmp;
int i;
for (i = 0; i < mhi_cntrl->max_chan; i++) {
mhi_chan = &mhi_cntrl->mhi_chan[i];
if (!mhi_chan->mhi_dev)
continue;
mutex_lock(&mhi_chan->lock);
/* Skip if the channel is not currently running */
tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[i].chcfg);
if (FIELD_GET(CHAN_CTX_CHSTATE_MASK, tmp) != MHI_CH_STATE_RUNNING) {
mutex_unlock(&mhi_chan->lock);
continue;
}
dev_dbg(&mhi_chan->mhi_dev->dev, "Suspending channel\n");
/* Set channel state to SUSPENDED */
mhi_chan->state = MHI_CH_STATE_SUSPENDED;
tmp &= ~CHAN_CTX_CHSTATE_MASK;
tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_SUSPENDED);
mhi_cntrl->ch_ctx_cache[i].chcfg = cpu_to_le32(tmp);
mutex_unlock(&mhi_chan->lock);
}
}
void mhi_ep_resume_channels(struct mhi_ep_cntrl *mhi_cntrl)
{
struct mhi_ep_chan *mhi_chan;
u32 tmp;
int i;
for (i = 0; i < mhi_cntrl->max_chan; i++) {
mhi_chan = &mhi_cntrl->mhi_chan[i];
if (!mhi_chan->mhi_dev)
continue;
mutex_lock(&mhi_chan->lock);
/* Skip if the channel is not currently suspended */
tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[i].chcfg);
if (FIELD_GET(CHAN_CTX_CHSTATE_MASK, tmp) != MHI_CH_STATE_SUSPENDED) {
mutex_unlock(&mhi_chan->lock);
continue;
}
dev_dbg(&mhi_chan->mhi_dev->dev, "Resuming channel\n");
/* Set channel state to RUNNING */
mhi_chan->state = MHI_CH_STATE_RUNNING;
tmp &= ~CHAN_CTX_CHSTATE_MASK;
tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_RUNNING);
mhi_cntrl->ch_ctx_cache[i].chcfg = cpu_to_le32(tmp);
mutex_unlock(&mhi_chan->lock);
}
}
static void mhi_ep_release_device(struct device *dev)
{
struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev);
if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
mhi_dev->mhi_cntrl->mhi_dev = NULL;
/*
* We need to set the mhi_chan->mhi_dev to NULL here since the MHI
* devices for the channels will only get created in mhi_ep_create_device()
* if the mhi_dev associated with it is NULL.
*/
if (mhi_dev->ul_chan)
mhi_dev->ul_chan->mhi_dev = NULL;
if (mhi_dev->dl_chan)
mhi_dev->dl_chan->mhi_dev = NULL;
kfree(mhi_dev);
}
static struct mhi_ep_device *mhi_ep_alloc_device(struct mhi_ep_cntrl *mhi_cntrl,
enum mhi_device_type dev_type)
{
struct mhi_ep_device *mhi_dev;
struct device *dev;
mhi_dev = kzalloc(sizeof(*mhi_dev), GFP_KERNEL);
if (!mhi_dev)
return ERR_PTR(-ENOMEM);
dev = &mhi_dev->dev;
device_initialize(dev);
dev->bus = &mhi_ep_bus_type;
dev->release = mhi_ep_release_device;
/* Controller device is always allocated first */
if (dev_type == MHI_DEVICE_CONTROLLER)
/* for MHI controller device, parent is the bus device (e.g. PCI EPF) */
dev->parent = mhi_cntrl->cntrl_dev;
else
/* for MHI client devices, parent is the MHI controller device */
dev->parent = &mhi_cntrl->mhi_dev->dev;
mhi_dev->mhi_cntrl = mhi_cntrl;
mhi_dev->dev_type = dev_type;
return mhi_dev;
}
/*
* MHI channels are always defined in pairs with UL as the even numbered
* channel and DL as odd numbered one. This function gets UL channel (primary)
* as the ch_id and always looks after the next entry in channel list for
* the corresponding DL channel (secondary).
*/
static int mhi_ep_create_device(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id)
{
struct mhi_ep_chan *mhi_chan = &mhi_cntrl->mhi_chan[ch_id];
struct device *dev = mhi_cntrl->cntrl_dev;
struct mhi_ep_device *mhi_dev;
int ret;
/* Check if the channel name is same for both UL and DL */
if (strcmp(mhi_chan->name, mhi_chan[1].name)) {
dev_err(dev, "UL and DL channel names are not same: (%s) != (%s)\n",
mhi_chan->name, mhi_chan[1].name);
return -EINVAL;
}
mhi_dev = mhi_ep_alloc_device(mhi_cntrl, MHI_DEVICE_XFER);
if (IS_ERR(mhi_dev))
return PTR_ERR(mhi_dev);
/* Configure primary channel */
mhi_dev->ul_chan = mhi_chan;
get_device(&mhi_dev->dev);
mhi_chan->mhi_dev = mhi_dev;
/* Configure secondary channel as well */
mhi_chan++;
mhi_dev->dl_chan = mhi_chan;
get_device(&mhi_dev->dev);
mhi_chan->mhi_dev = mhi_dev;
/* Channel name is same for both UL and DL */
mhi_dev->name = mhi_chan->name;
ret = dev_set_name(&mhi_dev->dev, "%s_%s",
dev_name(&mhi_cntrl->mhi_dev->dev),
mhi_dev->name);
if (ret) {
put_device(&mhi_dev->dev);
return ret;
}
ret = device_add(&mhi_dev->dev);
if (ret)
put_device(&mhi_dev->dev);
return ret;
}
static int mhi_ep_destroy_device(struct device *dev, void *data)
{
struct mhi_ep_device *mhi_dev;
struct mhi_ep_cntrl *mhi_cntrl;
struct mhi_ep_chan *ul_chan, *dl_chan;
if (dev->bus != &mhi_ep_bus_type)
return 0;
mhi_dev = to_mhi_ep_device(dev);
mhi_cntrl = mhi_dev->mhi_cntrl;
/* Only destroy devices created for channels */
if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
return 0;
ul_chan = mhi_dev->ul_chan;
dl_chan = mhi_dev->dl_chan;
if (ul_chan)
put_device(&ul_chan->mhi_dev->dev);
if (dl_chan)
put_device(&dl_chan->mhi_dev->dev);
dev_dbg(&mhi_cntrl->mhi_dev->dev, "Destroying device for chan:%s\n",
mhi_dev->name);
/* Notify the client and remove the device from MHI bus */
device_del(dev);
put_device(dev);
return 0;
}
static int mhi_ep_chan_init(struct mhi_ep_cntrl *mhi_cntrl,
const struct mhi_ep_cntrl_config *config)
{
const struct mhi_ep_channel_config *ch_cfg;
struct device *dev = mhi_cntrl->cntrl_dev;
u32 chan, i;
int ret = -EINVAL;
mhi_cntrl->max_chan = config->max_channels;
/*
* Allocate max_channels supported by the MHI endpoint and populate
* only the defined channels
*/
mhi_cntrl->mhi_chan = kcalloc(mhi_cntrl->max_chan, sizeof(*mhi_cntrl->mhi_chan),
GFP_KERNEL);
if (!mhi_cntrl->mhi_chan)
return -ENOMEM;
for (i = 0; i < config->num_channels; i++) {
struct mhi_ep_chan *mhi_chan;
ch_cfg = &config->ch_cfg[i];
chan = ch_cfg->num;
if (chan >= mhi_cntrl->max_chan) {
dev_err(dev, "Channel (%u) exceeds maximum available channels (%u)\n",
chan, mhi_cntrl->max_chan);
goto error_chan_cfg;
}
/* Bi-directional and direction less channels are not supported */
if (ch_cfg->dir == DMA_BIDIRECTIONAL || ch_cfg->dir == DMA_NONE) {
dev_err(dev, "Invalid direction (%u) for channel (%u)\n",
ch_cfg->dir, chan);
goto error_chan_cfg;
}
mhi_chan = &mhi_cntrl->mhi_chan[chan];
mhi_chan->name = ch_cfg->name;
mhi_chan->chan = chan;
mhi_chan->dir = ch_cfg->dir;
mutex_init(&mhi_chan->lock);
}
return 0;
error_chan_cfg:
kfree(mhi_cntrl->mhi_chan);
return ret;
}
/*
* Allocate channel and command rings here. Event rings will be allocated
* in mhi_ep_power_up() as the config comes from the host.
*/
int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl,
const struct mhi_ep_cntrl_config *config)
{
struct mhi_ep_device *mhi_dev;
int ret;
if (!mhi_cntrl || !mhi_cntrl->cntrl_dev || !mhi_cntrl->mmio || !mhi_cntrl->irq)
return -EINVAL;
ret = mhi_ep_chan_init(mhi_cntrl, config);
if (ret)
return ret;
mhi_cntrl->mhi_cmd = kcalloc(NR_OF_CMD_RINGS, sizeof(*mhi_cntrl->mhi_cmd), GFP_KERNEL);
if (!mhi_cntrl->mhi_cmd) {
ret = -ENOMEM;
goto err_free_ch;
}
INIT_WORK(&mhi_cntrl->state_work, mhi_ep_state_worker);
INIT_WORK(&mhi_cntrl->reset_work, mhi_ep_reset_worker);
INIT_WORK(&mhi_cntrl->cmd_ring_work, mhi_ep_cmd_ring_worker);
INIT_WORK(&mhi_cntrl->ch_ring_work, mhi_ep_ch_ring_worker);
mhi_cntrl->wq = alloc_workqueue("mhi_ep_wq", 0, 0);
if (!mhi_cntrl->wq) {
ret = -ENOMEM;
goto err_free_cmd;
}
INIT_LIST_HEAD(&mhi_cntrl->st_transition_list);
INIT_LIST_HEAD(&mhi_cntrl->ch_db_list);
spin_lock_init(&mhi_cntrl->list_lock);
mutex_init(&mhi_cntrl->state_lock);
mutex_init(&mhi_cntrl->event_lock);
/* Set MHI version and AMSS EE before enumeration */
mhi_ep_mmio_write(mhi_cntrl, EP_MHIVER, config->mhi_version);
mhi_ep_mmio_set_env(mhi_cntrl, MHI_EE_AMSS);
/* Set controller index */
ret = ida_alloc(&mhi_ep_cntrl_ida, GFP_KERNEL);
if (ret < 0)
goto err_destroy_wq;
mhi_cntrl->index = ret;
irq_set_status_flags(mhi_cntrl->irq, IRQ_NOAUTOEN);
ret = request_irq(mhi_cntrl->irq, mhi_ep_irq, IRQF_TRIGGER_HIGH,
"doorbell_irq", mhi_cntrl);
if (ret) {
dev_err(mhi_cntrl->cntrl_dev, "Failed to request Doorbell IRQ\n");
goto err_ida_free;
}
/* Allocate the controller device */
mhi_dev = mhi_ep_alloc_device(mhi_cntrl, MHI_DEVICE_CONTROLLER);
if (IS_ERR(mhi_dev)) {
dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate controller device\n");
ret = PTR_ERR(mhi_dev);
goto err_free_irq;
}
ret = dev_set_name(&mhi_dev->dev, "mhi_ep%u", mhi_cntrl->index);
if (ret)
goto err_put_dev;
mhi_dev->name = dev_name(&mhi_dev->dev);
mhi_cntrl->mhi_dev = mhi_dev;
ret = device_add(&mhi_dev->dev);
if (ret)
goto err_put_dev;
dev_dbg(&mhi_dev->dev, "MHI EP Controller registered\n");
return 0;
err_put_dev:
put_device(&mhi_dev->dev);
err_free_irq:
free_irq(mhi_cntrl->irq, mhi_cntrl);
err_ida_free:
ida_free(&mhi_ep_cntrl_ida, mhi_cntrl->index);
err_destroy_wq:
destroy_workqueue(mhi_cntrl->wq);
err_free_cmd:
kfree(mhi_cntrl->mhi_cmd);
err_free_ch:
kfree(mhi_cntrl->mhi_chan);
return ret;
}
EXPORT_SYMBOL_GPL(mhi_ep_register_controller);
/*
* It is expected that the controller drivers will power down the MHI EP stack
* using "mhi_ep_power_down()" before calling this function to unregister themselves.
*/
void mhi_ep_unregister_controller(struct mhi_ep_cntrl *mhi_cntrl)
{
struct mhi_ep_device *mhi_dev = mhi_cntrl->mhi_dev;
destroy_workqueue(mhi_cntrl->wq);
free_irq(mhi_cntrl->irq, mhi_cntrl);
kfree(mhi_cntrl->mhi_cmd);
kfree(mhi_cntrl->mhi_chan);
device_del(&mhi_dev->dev);
put_device(&mhi_dev->dev);
ida_free(&mhi_ep_cntrl_ida, mhi_cntrl->index);
}
EXPORT_SYMBOL_GPL(mhi_ep_unregister_controller);
static int mhi_ep_driver_probe(struct device *dev)
{
struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev);
struct mhi_ep_driver *mhi_drv = to_mhi_ep_driver(dev->driver);
struct mhi_ep_chan *ul_chan = mhi_dev->ul_chan;
struct mhi_ep_chan *dl_chan = mhi_dev->dl_chan;
ul_chan->xfer_cb = mhi_drv->ul_xfer_cb;
dl_chan->xfer_cb = mhi_drv->dl_xfer_cb;
return mhi_drv->probe(mhi_dev, mhi_dev->id);
}
static int mhi_ep_driver_remove(struct device *dev)
{
struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev);
struct mhi_ep_driver *mhi_drv = to_mhi_ep_driver(dev->driver);
struct mhi_result result = {};
struct mhi_ep_chan *mhi_chan;
int dir;
/* Skip if it is a controller device */
if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
return 0;
/* Disconnect the channels associated with the driver */
for (dir = 0; dir < 2; dir++) {
mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan;
if (!mhi_chan)
continue;
mutex_lock(&mhi_chan->lock);
/* Send channel disconnect status to the client driver */
if (mhi_chan->xfer_cb) {
result.transaction_status = -ENOTCONN;
result.bytes_xferd = 0;
mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
}
mhi_chan->state = MHI_CH_STATE_DISABLED;
mhi_chan->xfer_cb = NULL;
mutex_unlock(&mhi_chan->lock);
}
/* Remove the client driver now */
mhi_drv->remove(mhi_dev);
return 0;
}
int __mhi_ep_driver_register(struct mhi_ep_driver *mhi_drv, struct module *owner)
{
struct device_driver *driver = &mhi_drv->driver;
if (!mhi_drv->probe || !mhi_drv->remove)
return -EINVAL;
/* Client drivers should have callbacks defined for both channels */
if (!mhi_drv->ul_xfer_cb || !mhi_drv->dl_xfer_cb)
return -EINVAL;
driver->bus = &mhi_ep_bus_type;
driver->owner = owner;
driver->probe = mhi_ep_driver_probe;
driver->remove = mhi_ep_driver_remove;
return driver_register(driver);
}
EXPORT_SYMBOL_GPL(__mhi_ep_driver_register);
void mhi_ep_driver_unregister(struct mhi_ep_driver *mhi_drv)
{
driver_unregister(&mhi_drv->driver);
}
EXPORT_SYMBOL_GPL(mhi_ep_driver_unregister);
static int mhi_ep_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
const struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev);
return add_uevent_var(env, "MODALIAS=" MHI_EP_DEVICE_MODALIAS_FMT,
mhi_dev->name);
}
static int mhi_ep_match(struct device *dev, struct device_driver *drv)
{
struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev);
struct mhi_ep_driver *mhi_drv = to_mhi_ep_driver(drv);
const struct mhi_device_id *id;
/*
* If the device is a controller type then there is no client driver
* associated with it
*/
if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
return 0;
for (id = mhi_drv->id_table; id->chan[0]; id++)
if (!strcmp(mhi_dev->name, id->chan)) {
mhi_dev->id = id;
return 1;
}
return 0;
};
struct bus_type mhi_ep_bus_type = {
.name = "mhi_ep",
.dev_name = "mhi_ep",
.match = mhi_ep_match,
.uevent = mhi_ep_uevent,
};
static int __init mhi_ep_init(void)
{
return bus_register(&mhi_ep_bus_type);
}
static void __exit mhi_ep_exit(void)
{
bus_unregister(&mhi_ep_bus_type);
}
postcore_initcall(mhi_ep_init);
module_exit(mhi_ep_exit);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("MHI Bus Endpoint stack");
MODULE_AUTHOR("Manivannan Sadhasivam <[email protected]>");
| linux-master | drivers/bus/mhi/ep/main.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2022 Linaro Ltd.
* Author: Manivannan Sadhasivam <[email protected]>
*/
#include <linux/mhi_ep.h>
#include "internal.h"
size_t mhi_ep_ring_addr2offset(struct mhi_ep_ring *ring, u64 ptr)
{
return (ptr - ring->rbase) / sizeof(struct mhi_ring_element);
}
static u32 mhi_ep_ring_num_elems(struct mhi_ep_ring *ring)
{
__le64 rlen;
memcpy_fromio(&rlen, (void __iomem *) &ring->ring_ctx->generic.rlen, sizeof(u64));
return le64_to_cpu(rlen) / sizeof(struct mhi_ring_element);
}
void mhi_ep_ring_inc_index(struct mhi_ep_ring *ring)
{
ring->rd_offset = (ring->rd_offset + 1) % ring->ring_size;
}
static int __mhi_ep_cache_ring(struct mhi_ep_ring *ring, size_t end)
{
struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl;
struct device *dev = &mhi_cntrl->mhi_dev->dev;
size_t start, copy_size;
int ret;
/* Don't proceed in the case of event ring. This happens during mhi_ep_ring_start(). */
if (ring->type == RING_TYPE_ER)
return 0;
/* No need to cache the ring if write pointer is unmodified */
if (ring->wr_offset == end)
return 0;
start = ring->wr_offset;
if (start < end) {
copy_size = (end - start) * sizeof(struct mhi_ring_element);
ret = mhi_cntrl->read_from_host(mhi_cntrl, ring->rbase +
(start * sizeof(struct mhi_ring_element)),
&ring->ring_cache[start], copy_size);
if (ret < 0)
return ret;
} else {
copy_size = (ring->ring_size - start) * sizeof(struct mhi_ring_element);
ret = mhi_cntrl->read_from_host(mhi_cntrl, ring->rbase +
(start * sizeof(struct mhi_ring_element)),
&ring->ring_cache[start], copy_size);
if (ret < 0)
return ret;
if (end) {
ret = mhi_cntrl->read_from_host(mhi_cntrl, ring->rbase,
&ring->ring_cache[0],
end * sizeof(struct mhi_ring_element));
if (ret < 0)
return ret;
}
}
dev_dbg(dev, "Cached ring: start %zu end %zu size %zu\n", start, end, copy_size);
return 0;
}
static int mhi_ep_cache_ring(struct mhi_ep_ring *ring, u64 wr_ptr)
{
size_t wr_offset;
int ret;
wr_offset = mhi_ep_ring_addr2offset(ring, wr_ptr);
/* Cache the host ring till write offset */
ret = __mhi_ep_cache_ring(ring, wr_offset);
if (ret)
return ret;
ring->wr_offset = wr_offset;
return 0;
}
int mhi_ep_update_wr_offset(struct mhi_ep_ring *ring)
{
u64 wr_ptr;
wr_ptr = mhi_ep_mmio_get_db(ring);
return mhi_ep_cache_ring(ring, wr_ptr);
}
/* TODO: Support for adding multiple ring elements to the ring */
int mhi_ep_ring_add_element(struct mhi_ep_ring *ring, struct mhi_ring_element *el)
{
struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl;
struct device *dev = &mhi_cntrl->mhi_dev->dev;
size_t old_offset = 0;
u32 num_free_elem;
__le64 rp;
int ret;
ret = mhi_ep_update_wr_offset(ring);
if (ret) {
dev_err(dev, "Error updating write pointer\n");
return ret;
}
if (ring->rd_offset < ring->wr_offset)
num_free_elem = (ring->wr_offset - ring->rd_offset) - 1;
else
num_free_elem = ((ring->ring_size - ring->rd_offset) + ring->wr_offset) - 1;
/* Check if there is space in ring for adding at least an element */
if (!num_free_elem) {
dev_err(dev, "No space left in the ring\n");
return -ENOSPC;
}
old_offset = ring->rd_offset;
mhi_ep_ring_inc_index(ring);
dev_dbg(dev, "Adding an element to ring at offset (%zu)\n", ring->rd_offset);
/* Update rp in ring context */
rp = cpu_to_le64(ring->rd_offset * sizeof(*el) + ring->rbase);
memcpy_toio((void __iomem *) &ring->ring_ctx->generic.rp, &rp, sizeof(u64));
ret = mhi_cntrl->write_to_host(mhi_cntrl, el, ring->rbase + (old_offset * sizeof(*el)),
sizeof(*el));
if (ret < 0)
return ret;
return 0;
}
void mhi_ep_ring_init(struct mhi_ep_ring *ring, enum mhi_ep_ring_type type, u32 id)
{
ring->type = type;
if (ring->type == RING_TYPE_CMD) {
ring->db_offset_h = EP_CRDB_HIGHER;
ring->db_offset_l = EP_CRDB_LOWER;
} else if (ring->type == RING_TYPE_CH) {
ring->db_offset_h = CHDB_HIGHER_n(id);
ring->db_offset_l = CHDB_LOWER_n(id);
ring->ch_id = id;
} else {
ring->db_offset_h = ERDB_HIGHER_n(id);
ring->db_offset_l = ERDB_LOWER_n(id);
}
}
int mhi_ep_ring_start(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring,
union mhi_ep_ring_ctx *ctx)
{
struct device *dev = &mhi_cntrl->mhi_dev->dev;
__le64 val;
int ret;
ring->mhi_cntrl = mhi_cntrl;
ring->ring_ctx = ctx;
ring->ring_size = mhi_ep_ring_num_elems(ring);
memcpy_fromio(&val, (void __iomem *) &ring->ring_ctx->generic.rbase, sizeof(u64));
ring->rbase = le64_to_cpu(val);
if (ring->type == RING_TYPE_CH)
ring->er_index = le32_to_cpu(ring->ring_ctx->ch.erindex);
if (ring->type == RING_TYPE_ER)
ring->irq_vector = le32_to_cpu(ring->ring_ctx->ev.msivec);
/* During ring init, both rp and wp are equal */
memcpy_fromio(&val, (void __iomem *) &ring->ring_ctx->generic.rp, sizeof(u64));
ring->rd_offset = mhi_ep_ring_addr2offset(ring, le64_to_cpu(val));
ring->wr_offset = mhi_ep_ring_addr2offset(ring, le64_to_cpu(val));
/* Allocate ring cache memory for holding the copy of host ring */
ring->ring_cache = kcalloc(ring->ring_size, sizeof(struct mhi_ring_element), GFP_KERNEL);
if (!ring->ring_cache)
return -ENOMEM;
memcpy_fromio(&val, (void __iomem *) &ring->ring_ctx->generic.wp, sizeof(u64));
ret = mhi_ep_cache_ring(ring, le64_to_cpu(val));
if (ret) {
dev_err(dev, "Failed to cache ring\n");
kfree(ring->ring_cache);
return ret;
}
ring->started = true;
return 0;
}
void mhi_ep_ring_reset(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring)
{
ring->started = false;
kfree(ring->ring_cache);
ring->ring_cache = NULL;
}
| linux-master | drivers/bus/mhi/ep/ring.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2022 Linaro Ltd.
* Author: Manivannan Sadhasivam <[email protected]>
*/
#include <linux/bitfield.h>
#include <linux/io.h>
#include <linux/mhi_ep.h>
#include "internal.h"
u32 mhi_ep_mmio_read(struct mhi_ep_cntrl *mhi_cntrl, u32 offset)
{
return readl(mhi_cntrl->mmio + offset);
}
void mhi_ep_mmio_write(struct mhi_ep_cntrl *mhi_cntrl, u32 offset, u32 val)
{
writel(val, mhi_cntrl->mmio + offset);
}
void mhi_ep_mmio_masked_write(struct mhi_ep_cntrl *mhi_cntrl, u32 offset, u32 mask, u32 val)
{
u32 regval;
regval = mhi_ep_mmio_read(mhi_cntrl, offset);
regval &= ~mask;
regval |= (val << __ffs(mask)) & mask;
mhi_ep_mmio_write(mhi_cntrl, offset, regval);
}
u32 mhi_ep_mmio_masked_read(struct mhi_ep_cntrl *dev, u32 offset, u32 mask)
{
u32 regval;
regval = mhi_ep_mmio_read(dev, offset);
regval &= mask;
regval >>= __ffs(mask);
return regval;
}
void mhi_ep_mmio_get_mhi_state(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state *state,
bool *mhi_reset)
{
u32 regval;
regval = mhi_ep_mmio_read(mhi_cntrl, EP_MHICTRL);
*state = FIELD_GET(MHICTRL_MHISTATE_MASK, regval);
*mhi_reset = !!FIELD_GET(MHICTRL_RESET_MASK, regval);
}
static void mhi_ep_mmio_set_chdb(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id, bool enable)
{
u32 chid_mask, chid_shift, chdb_idx, val;
chid_shift = ch_id % 32;
chid_mask = BIT(chid_shift);
chdb_idx = ch_id / 32;
val = enable ? 1 : 0;
mhi_ep_mmio_masked_write(mhi_cntrl, MHI_CHDB_INT_MASK_n(chdb_idx), chid_mask, val);
/* Update the local copy of the channel mask */
mhi_cntrl->chdb[chdb_idx].mask &= ~chid_mask;
mhi_cntrl->chdb[chdb_idx].mask |= val << chid_shift;
}
void mhi_ep_mmio_enable_chdb(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id)
{
mhi_ep_mmio_set_chdb(mhi_cntrl, ch_id, true);
}
void mhi_ep_mmio_disable_chdb(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id)
{
mhi_ep_mmio_set_chdb(mhi_cntrl, ch_id, false);
}
static void mhi_ep_mmio_set_chdb_interrupts(struct mhi_ep_cntrl *mhi_cntrl, bool enable)
{
u32 val, i;
val = enable ? MHI_CHDB_INT_MASK_n_EN_ALL : 0;
for (i = 0; i < MHI_MASK_ROWS_CH_DB; i++) {
mhi_ep_mmio_write(mhi_cntrl, MHI_CHDB_INT_MASK_n(i), val);
mhi_cntrl->chdb[i].mask = val;
}
}
void mhi_ep_mmio_enable_chdb_interrupts(struct mhi_ep_cntrl *mhi_cntrl)
{
mhi_ep_mmio_set_chdb_interrupts(mhi_cntrl, true);
}
static void mhi_ep_mmio_mask_chdb_interrupts(struct mhi_ep_cntrl *mhi_cntrl)
{
mhi_ep_mmio_set_chdb_interrupts(mhi_cntrl, false);
}
bool mhi_ep_mmio_read_chdb_status_interrupts(struct mhi_ep_cntrl *mhi_cntrl)
{
bool chdb = false;
u32 i;
for (i = 0; i < MHI_MASK_ROWS_CH_DB; i++) {
mhi_cntrl->chdb[i].status = mhi_ep_mmio_read(mhi_cntrl, MHI_CHDB_INT_STATUS_n(i));
if (mhi_cntrl->chdb[i].status)
chdb = true;
}
/* Return whether a channel doorbell interrupt occurred or not */
return chdb;
}
static void mhi_ep_mmio_set_erdb_interrupts(struct mhi_ep_cntrl *mhi_cntrl, bool enable)
{
u32 val, i;
val = enable ? MHI_ERDB_INT_MASK_n_EN_ALL : 0;
for (i = 0; i < MHI_MASK_ROWS_EV_DB; i++)
mhi_ep_mmio_write(mhi_cntrl, MHI_ERDB_INT_MASK_n(i), val);
}
static void mhi_ep_mmio_mask_erdb_interrupts(struct mhi_ep_cntrl *mhi_cntrl)
{
mhi_ep_mmio_set_erdb_interrupts(mhi_cntrl, false);
}
void mhi_ep_mmio_enable_ctrl_interrupt(struct mhi_ep_cntrl *mhi_cntrl)
{
mhi_ep_mmio_masked_write(mhi_cntrl, MHI_CTRL_INT_MASK,
MHI_CTRL_MHICTRL_MASK, 1);
}
void mhi_ep_mmio_disable_ctrl_interrupt(struct mhi_ep_cntrl *mhi_cntrl)
{
mhi_ep_mmio_masked_write(mhi_cntrl, MHI_CTRL_INT_MASK,
MHI_CTRL_MHICTRL_MASK, 0);
}
void mhi_ep_mmio_enable_cmdb_interrupt(struct mhi_ep_cntrl *mhi_cntrl)
{
mhi_ep_mmio_masked_write(mhi_cntrl, MHI_CTRL_INT_MASK,
MHI_CTRL_CRDB_MASK, 1);
}
void mhi_ep_mmio_disable_cmdb_interrupt(struct mhi_ep_cntrl *mhi_cntrl)
{
mhi_ep_mmio_masked_write(mhi_cntrl, MHI_CTRL_INT_MASK,
MHI_CTRL_CRDB_MASK, 0);
}
void mhi_ep_mmio_mask_interrupts(struct mhi_ep_cntrl *mhi_cntrl)
{
mhi_ep_mmio_disable_ctrl_interrupt(mhi_cntrl);
mhi_ep_mmio_disable_cmdb_interrupt(mhi_cntrl);
mhi_ep_mmio_mask_chdb_interrupts(mhi_cntrl);
mhi_ep_mmio_mask_erdb_interrupts(mhi_cntrl);
}
static void mhi_ep_mmio_clear_interrupts(struct mhi_ep_cntrl *mhi_cntrl)
{
u32 i;
for (i = 0; i < MHI_MASK_ROWS_CH_DB; i++)
mhi_ep_mmio_write(mhi_cntrl, MHI_CHDB_INT_CLEAR_n(i),
MHI_CHDB_INT_CLEAR_n_CLEAR_ALL);
for (i = 0; i < MHI_MASK_ROWS_EV_DB; i++)
mhi_ep_mmio_write(mhi_cntrl, MHI_ERDB_INT_CLEAR_n(i),
MHI_ERDB_INT_CLEAR_n_CLEAR_ALL);
mhi_ep_mmio_write(mhi_cntrl, MHI_CTRL_INT_CLEAR,
MHI_CTRL_INT_MMIO_WR_CLEAR |
MHI_CTRL_INT_CRDB_CLEAR |
MHI_CTRL_INT_CRDB_MHICTRL_CLEAR);
}
void mhi_ep_mmio_get_chc_base(struct mhi_ep_cntrl *mhi_cntrl)
{
u32 regval;
regval = mhi_ep_mmio_read(mhi_cntrl, EP_CCABAP_HIGHER);
mhi_cntrl->ch_ctx_host_pa = regval;
mhi_cntrl->ch_ctx_host_pa <<= 32;
regval = mhi_ep_mmio_read(mhi_cntrl, EP_CCABAP_LOWER);
mhi_cntrl->ch_ctx_host_pa |= regval;
}
void mhi_ep_mmio_get_erc_base(struct mhi_ep_cntrl *mhi_cntrl)
{
u32 regval;
regval = mhi_ep_mmio_read(mhi_cntrl, EP_ECABAP_HIGHER);
mhi_cntrl->ev_ctx_host_pa = regval;
mhi_cntrl->ev_ctx_host_pa <<= 32;
regval = mhi_ep_mmio_read(mhi_cntrl, EP_ECABAP_LOWER);
mhi_cntrl->ev_ctx_host_pa |= regval;
}
void mhi_ep_mmio_get_crc_base(struct mhi_ep_cntrl *mhi_cntrl)
{
u32 regval;
regval = mhi_ep_mmio_read(mhi_cntrl, EP_CRCBAP_HIGHER);
mhi_cntrl->cmd_ctx_host_pa = regval;
mhi_cntrl->cmd_ctx_host_pa <<= 32;
regval = mhi_ep_mmio_read(mhi_cntrl, EP_CRCBAP_LOWER);
mhi_cntrl->cmd_ctx_host_pa |= regval;
}
u64 mhi_ep_mmio_get_db(struct mhi_ep_ring *ring)
{
struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl;
u64 db_offset;
u32 regval;
regval = mhi_ep_mmio_read(mhi_cntrl, ring->db_offset_h);
db_offset = regval;
db_offset <<= 32;
regval = mhi_ep_mmio_read(mhi_cntrl, ring->db_offset_l);
db_offset |= regval;
return db_offset;
}
void mhi_ep_mmio_set_env(struct mhi_ep_cntrl *mhi_cntrl, u32 value)
{
mhi_ep_mmio_write(mhi_cntrl, EP_BHI_EXECENV, value);
}
void mhi_ep_mmio_clear_reset(struct mhi_ep_cntrl *mhi_cntrl)
{
mhi_ep_mmio_masked_write(mhi_cntrl, EP_MHICTRL, MHICTRL_RESET_MASK, 0);
}
void mhi_ep_mmio_reset(struct mhi_ep_cntrl *mhi_cntrl)
{
mhi_ep_mmio_write(mhi_cntrl, EP_MHICTRL, 0);
mhi_ep_mmio_write(mhi_cntrl, EP_MHISTATUS, 0);
mhi_ep_mmio_clear_interrupts(mhi_cntrl);
}
void mhi_ep_mmio_init(struct mhi_ep_cntrl *mhi_cntrl)
{
u32 regval;
mhi_cntrl->chdb_offset = mhi_ep_mmio_read(mhi_cntrl, EP_CHDBOFF);
mhi_cntrl->erdb_offset = mhi_ep_mmio_read(mhi_cntrl, EP_ERDBOFF);
regval = mhi_ep_mmio_read(mhi_cntrl, EP_MHICFG);
mhi_cntrl->event_rings = FIELD_GET(MHICFG_NER_MASK, regval);
mhi_cntrl->hw_event_rings = FIELD_GET(MHICFG_NHWER_MASK, regval);
mhi_ep_mmio_reset(mhi_cntrl);
}
void mhi_ep_mmio_update_ner(struct mhi_ep_cntrl *mhi_cntrl)
{
u32 regval;
regval = mhi_ep_mmio_read(mhi_cntrl, EP_MHICFG);
mhi_cntrl->event_rings = FIELD_GET(MHICFG_NER_MASK, regval);
mhi_cntrl->hw_event_rings = FIELD_GET(MHICFG_NHWER_MASK, regval);
}
| linux-master | drivers/bus/mhi/ep/mmio.c |
// SPDX-License-Identifier: GPL-2.0
/*
* fsl-mc object allocator driver
*
* Copyright (C) 2013-2016 Freescale Semiconductor, Inc.
*
*/
#include <linux/module.h>
#include <linux/msi.h>
#include <linux/fsl/mc.h>
#include "fsl-mc-private.h"
static bool __must_check fsl_mc_is_allocatable(struct fsl_mc_device *mc_dev)
{
return is_fsl_mc_bus_dpbp(mc_dev) ||
is_fsl_mc_bus_dpmcp(mc_dev) ||
is_fsl_mc_bus_dpcon(mc_dev);
}
/**
* fsl_mc_resource_pool_add_device - add allocatable object to a resource
* pool of a given fsl-mc bus
*
* @mc_bus: pointer to the fsl-mc bus
* @pool_type: pool type
* @mc_dev: pointer to allocatable fsl-mc device
*/
static int __must_check fsl_mc_resource_pool_add_device(struct fsl_mc_bus
*mc_bus,
enum fsl_mc_pool_type
pool_type,
struct fsl_mc_device
*mc_dev)
{
struct fsl_mc_resource_pool *res_pool;
struct fsl_mc_resource *resource;
struct fsl_mc_device *mc_bus_dev = &mc_bus->mc_dev;
int error = -EINVAL;
if (pool_type < 0 || pool_type >= FSL_MC_NUM_POOL_TYPES)
goto out;
if (!fsl_mc_is_allocatable(mc_dev))
goto out;
if (mc_dev->resource)
goto out;
res_pool = &mc_bus->resource_pools[pool_type];
if (res_pool->type != pool_type)
goto out;
if (res_pool->mc_bus != mc_bus)
goto out;
mutex_lock(&res_pool->mutex);
if (res_pool->max_count < 0)
goto out_unlock;
if (res_pool->free_count < 0 ||
res_pool->free_count > res_pool->max_count)
goto out_unlock;
resource = devm_kzalloc(&mc_bus_dev->dev, sizeof(*resource),
GFP_KERNEL);
if (!resource) {
error = -ENOMEM;
dev_err(&mc_bus_dev->dev,
"Failed to allocate memory for fsl_mc_resource\n");
goto out_unlock;
}
resource->type = pool_type;
resource->id = mc_dev->obj_desc.id;
resource->data = mc_dev;
resource->parent_pool = res_pool;
INIT_LIST_HEAD(&resource->node);
list_add_tail(&resource->node, &res_pool->free_list);
mc_dev->resource = resource;
res_pool->free_count++;
res_pool->max_count++;
error = 0;
out_unlock:
mutex_unlock(&res_pool->mutex);
out:
return error;
}
/**
* fsl_mc_resource_pool_remove_device - remove an allocatable device from a
* resource pool
*
* @mc_dev: pointer to allocatable fsl-mc device
*
* It permanently removes an allocatable fsl-mc device from the resource
* pool. It's an error if the device is in use.
*/
static int __must_check fsl_mc_resource_pool_remove_device(struct fsl_mc_device
*mc_dev)
{
struct fsl_mc_device *mc_bus_dev;
struct fsl_mc_bus *mc_bus;
struct fsl_mc_resource_pool *res_pool;
struct fsl_mc_resource *resource;
int error = -EINVAL;
mc_bus_dev = to_fsl_mc_device(mc_dev->dev.parent);
mc_bus = to_fsl_mc_bus(mc_bus_dev);
resource = mc_dev->resource;
if (!resource || resource->data != mc_dev) {
dev_err(&mc_bus_dev->dev, "resource mismatch\n");
goto out;
}
res_pool = resource->parent_pool;
if (res_pool != &mc_bus->resource_pools[resource->type]) {
dev_err(&mc_bus_dev->dev, "pool mismatch\n");
goto out;
}
mutex_lock(&res_pool->mutex);
if (res_pool->max_count <= 0) {
dev_err(&mc_bus_dev->dev, "max_count underflow\n");
goto out_unlock;
}
if (res_pool->free_count <= 0 ||
res_pool->free_count > res_pool->max_count) {
dev_err(&mc_bus_dev->dev, "free_count mismatch\n");
goto out_unlock;
}
/*
* If the device is currently allocated, its resource is not
* in the free list and thus, the device cannot be removed.
*/
if (list_empty(&resource->node)) {
error = -EBUSY;
dev_err(&mc_bus_dev->dev,
"Device %s cannot be removed from resource pool\n",
dev_name(&mc_dev->dev));
goto out_unlock;
}
list_del_init(&resource->node);
res_pool->free_count--;
res_pool->max_count--;
devm_kfree(&mc_bus_dev->dev, resource);
mc_dev->resource = NULL;
error = 0;
out_unlock:
mutex_unlock(&res_pool->mutex);
out:
return error;
}
static const char *const fsl_mc_pool_type_strings[] = {
[FSL_MC_POOL_DPMCP] = "dpmcp",
[FSL_MC_POOL_DPBP] = "dpbp",
[FSL_MC_POOL_DPCON] = "dpcon",
[FSL_MC_POOL_IRQ] = "irq",
};
static int __must_check object_type_to_pool_type(const char *object_type,
enum fsl_mc_pool_type
*pool_type)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(fsl_mc_pool_type_strings); i++) {
if (strcmp(object_type, fsl_mc_pool_type_strings[i]) == 0) {
*pool_type = i;
return 0;
}
}
return -EINVAL;
}
int __must_check fsl_mc_resource_allocate(struct fsl_mc_bus *mc_bus,
enum fsl_mc_pool_type pool_type,
struct fsl_mc_resource **new_resource)
{
struct fsl_mc_resource_pool *res_pool;
struct fsl_mc_resource *resource;
struct fsl_mc_device *mc_bus_dev = &mc_bus->mc_dev;
int error = -EINVAL;
BUILD_BUG_ON(ARRAY_SIZE(fsl_mc_pool_type_strings) !=
FSL_MC_NUM_POOL_TYPES);
*new_resource = NULL;
if (pool_type < 0 || pool_type >= FSL_MC_NUM_POOL_TYPES)
goto out;
res_pool = &mc_bus->resource_pools[pool_type];
if (res_pool->mc_bus != mc_bus)
goto out;
mutex_lock(&res_pool->mutex);
resource = list_first_entry_or_null(&res_pool->free_list,
struct fsl_mc_resource, node);
if (!resource) {
error = -ENXIO;
dev_err(&mc_bus_dev->dev,
"No more resources of type %s left\n",
fsl_mc_pool_type_strings[pool_type]);
goto out_unlock;
}
if (resource->type != pool_type)
goto out_unlock;
if (resource->parent_pool != res_pool)
goto out_unlock;
if (res_pool->free_count <= 0 ||
res_pool->free_count > res_pool->max_count)
goto out_unlock;
list_del_init(&resource->node);
res_pool->free_count--;
error = 0;
out_unlock:
mutex_unlock(&res_pool->mutex);
*new_resource = resource;
out:
return error;
}
EXPORT_SYMBOL_GPL(fsl_mc_resource_allocate);
void fsl_mc_resource_free(struct fsl_mc_resource *resource)
{
struct fsl_mc_resource_pool *res_pool;
res_pool = resource->parent_pool;
if (resource->type != res_pool->type)
return;
mutex_lock(&res_pool->mutex);
if (res_pool->free_count < 0 ||
res_pool->free_count >= res_pool->max_count)
goto out_unlock;
if (!list_empty(&resource->node))
goto out_unlock;
list_add_tail(&resource->node, &res_pool->free_list);
res_pool->free_count++;
out_unlock:
mutex_unlock(&res_pool->mutex);
}
EXPORT_SYMBOL_GPL(fsl_mc_resource_free);
/**
* fsl_mc_object_allocate - Allocates an fsl-mc object of the given
* pool type from a given fsl-mc bus instance
*
* @mc_dev: fsl-mc device which is used in conjunction with the
* allocated object
* @pool_type: pool type
* @new_mc_adev: pointer to area where the pointer to the allocated device
* is to be returned
*
* Allocatable objects are always used in conjunction with some functional
* device. This function allocates an object of the specified type from
* the DPRC containing the functional device.
*
* NOTE: pool_type must be different from FSL_MC_POOL_MCP, since MC
* portals are allocated using fsl_mc_portal_allocate(), instead of
* this function.
*/
int __must_check fsl_mc_object_allocate(struct fsl_mc_device *mc_dev,
enum fsl_mc_pool_type pool_type,
struct fsl_mc_device **new_mc_adev)
{
struct fsl_mc_device *mc_bus_dev;
struct fsl_mc_bus *mc_bus;
struct fsl_mc_device *mc_adev;
int error = -EINVAL;
struct fsl_mc_resource *resource = NULL;
*new_mc_adev = NULL;
if (mc_dev->flags & FSL_MC_IS_DPRC)
goto error;
if (!dev_is_fsl_mc(mc_dev->dev.parent))
goto error;
if (pool_type == FSL_MC_POOL_DPMCP)
goto error;
mc_bus_dev = to_fsl_mc_device(mc_dev->dev.parent);
mc_bus = to_fsl_mc_bus(mc_bus_dev);
error = fsl_mc_resource_allocate(mc_bus, pool_type, &resource);
if (error < 0)
goto error;
mc_adev = resource->data;
if (!mc_adev) {
error = -EINVAL;
goto error;
}
mc_adev->consumer_link = device_link_add(&mc_dev->dev,
&mc_adev->dev,
DL_FLAG_AUTOREMOVE_CONSUMER);
if (!mc_adev->consumer_link) {
error = -EINVAL;
goto error;
}
*new_mc_adev = mc_adev;
return 0;
error:
if (resource)
fsl_mc_resource_free(resource);
return error;
}
EXPORT_SYMBOL_GPL(fsl_mc_object_allocate);
/**
* fsl_mc_object_free - Returns an fsl-mc object to the resource
* pool where it came from.
* @mc_adev: Pointer to the fsl-mc device
*/
void fsl_mc_object_free(struct fsl_mc_device *mc_adev)
{
struct fsl_mc_resource *resource;
resource = mc_adev->resource;
if (resource->type == FSL_MC_POOL_DPMCP)
return;
if (resource->data != mc_adev)
return;
fsl_mc_resource_free(resource);
mc_adev->consumer_link = NULL;
}
EXPORT_SYMBOL_GPL(fsl_mc_object_free);
/*
* A DPRC and the devices in the DPRC all share the same GIC-ITS device
* ID. A block of IRQs is pre-allocated and maintained in a pool
* from which devices can allocate them when needed.
*/
/*
* Initialize the interrupt pool associated with an fsl-mc bus.
* It allocates a block of IRQs from the GIC-ITS.
*/
int fsl_mc_populate_irq_pool(struct fsl_mc_device *mc_bus_dev,
unsigned int irq_count)
{
unsigned int i;
struct fsl_mc_device_irq *irq_resources;
struct fsl_mc_device_irq *mc_dev_irq;
int error;
struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev);
struct fsl_mc_resource_pool *res_pool =
&mc_bus->resource_pools[FSL_MC_POOL_IRQ];
/* do nothing if the IRQ pool is already populated */
if (mc_bus->irq_resources)
return 0;
if (irq_count == 0 ||
irq_count > FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS)
return -EINVAL;
error = fsl_mc_msi_domain_alloc_irqs(&mc_bus_dev->dev, irq_count);
if (error < 0)
return error;
irq_resources = devm_kcalloc(&mc_bus_dev->dev,
irq_count, sizeof(*irq_resources),
GFP_KERNEL);
if (!irq_resources) {
error = -ENOMEM;
goto cleanup_msi_irqs;
}
for (i = 0; i < irq_count; i++) {
mc_dev_irq = &irq_resources[i];
/*
* NOTE: This mc_dev_irq's MSI addr/value pair will be set
* by the fsl_mc_msi_write_msg() callback
*/
mc_dev_irq->resource.type = res_pool->type;
mc_dev_irq->resource.data = mc_dev_irq;
mc_dev_irq->resource.parent_pool = res_pool;
mc_dev_irq->virq = msi_get_virq(&mc_bus_dev->dev, i);
mc_dev_irq->resource.id = mc_dev_irq->virq;
INIT_LIST_HEAD(&mc_dev_irq->resource.node);
list_add_tail(&mc_dev_irq->resource.node, &res_pool->free_list);
}
res_pool->max_count = irq_count;
res_pool->free_count = irq_count;
mc_bus->irq_resources = irq_resources;
return 0;
cleanup_msi_irqs:
fsl_mc_msi_domain_free_irqs(&mc_bus_dev->dev);
return error;
}
EXPORT_SYMBOL_GPL(fsl_mc_populate_irq_pool);
/*
* Teardown the interrupt pool associated with an fsl-mc bus.
* It frees the IRQs that were allocated to the pool, back to the GIC-ITS.
*/
void fsl_mc_cleanup_irq_pool(struct fsl_mc_device *mc_bus_dev)
{
struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev);
struct fsl_mc_resource_pool *res_pool =
&mc_bus->resource_pools[FSL_MC_POOL_IRQ];
if (!mc_bus->irq_resources)
return;
if (res_pool->max_count == 0)
return;
if (res_pool->free_count != res_pool->max_count)
return;
INIT_LIST_HEAD(&res_pool->free_list);
res_pool->max_count = 0;
res_pool->free_count = 0;
mc_bus->irq_resources = NULL;
fsl_mc_msi_domain_free_irqs(&mc_bus_dev->dev);
}
EXPORT_SYMBOL_GPL(fsl_mc_cleanup_irq_pool);
/*
* Allocate the IRQs required by a given fsl-mc device.
*/
int __must_check fsl_mc_allocate_irqs(struct fsl_mc_device *mc_dev)
{
int i;
int irq_count;
int res_allocated_count = 0;
int error = -EINVAL;
struct fsl_mc_device_irq **irqs = NULL;
struct fsl_mc_bus *mc_bus;
struct fsl_mc_resource_pool *res_pool;
if (mc_dev->irqs)
return -EINVAL;
irq_count = mc_dev->obj_desc.irq_count;
if (irq_count == 0)
return -EINVAL;
if (is_fsl_mc_bus_dprc(mc_dev))
mc_bus = to_fsl_mc_bus(mc_dev);
else
mc_bus = to_fsl_mc_bus(to_fsl_mc_device(mc_dev->dev.parent));
if (!mc_bus->irq_resources)
return -EINVAL;
res_pool = &mc_bus->resource_pools[FSL_MC_POOL_IRQ];
if (res_pool->free_count < irq_count) {
dev_err(&mc_dev->dev,
"Not able to allocate %u irqs for device\n", irq_count);
return -ENOSPC;
}
irqs = devm_kcalloc(&mc_dev->dev, irq_count, sizeof(irqs[0]),
GFP_KERNEL);
if (!irqs)
return -ENOMEM;
for (i = 0; i < irq_count; i++) {
struct fsl_mc_resource *resource;
error = fsl_mc_resource_allocate(mc_bus, FSL_MC_POOL_IRQ,
&resource);
if (error < 0)
goto error_resource_alloc;
irqs[i] = to_fsl_mc_irq(resource);
res_allocated_count++;
irqs[i]->mc_dev = mc_dev;
irqs[i]->dev_irq_index = i;
}
mc_dev->irqs = irqs;
return 0;
error_resource_alloc:
for (i = 0; i < res_allocated_count; i++) {
irqs[i]->mc_dev = NULL;
fsl_mc_resource_free(&irqs[i]->resource);
}
return error;
}
EXPORT_SYMBOL_GPL(fsl_mc_allocate_irqs);
/*
* Frees the IRQs that were allocated for an fsl-mc device.
*/
void fsl_mc_free_irqs(struct fsl_mc_device *mc_dev)
{
int i;
int irq_count;
struct fsl_mc_bus *mc_bus;
struct fsl_mc_device_irq **irqs = mc_dev->irqs;
if (!irqs)
return;
irq_count = mc_dev->obj_desc.irq_count;
if (is_fsl_mc_bus_dprc(mc_dev))
mc_bus = to_fsl_mc_bus(mc_dev);
else
mc_bus = to_fsl_mc_bus(to_fsl_mc_device(mc_dev->dev.parent));
if (!mc_bus->irq_resources)
return;
for (i = 0; i < irq_count; i++) {
irqs[i]->mc_dev = NULL;
fsl_mc_resource_free(&irqs[i]->resource);
}
mc_dev->irqs = NULL;
}
EXPORT_SYMBOL_GPL(fsl_mc_free_irqs);
void fsl_mc_init_all_resource_pools(struct fsl_mc_device *mc_bus_dev)
{
int pool_type;
struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev);
for (pool_type = 0; pool_type < FSL_MC_NUM_POOL_TYPES; pool_type++) {
struct fsl_mc_resource_pool *res_pool =
&mc_bus->resource_pools[pool_type];
res_pool->type = pool_type;
res_pool->max_count = 0;
res_pool->free_count = 0;
res_pool->mc_bus = mc_bus;
INIT_LIST_HEAD(&res_pool->free_list);
mutex_init(&res_pool->mutex);
}
}
static void fsl_mc_cleanup_resource_pool(struct fsl_mc_device *mc_bus_dev,
enum fsl_mc_pool_type pool_type)
{
struct fsl_mc_resource *resource;
struct fsl_mc_resource *next;
struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev);
struct fsl_mc_resource_pool *res_pool =
&mc_bus->resource_pools[pool_type];
list_for_each_entry_safe(resource, next, &res_pool->free_list, node)
devm_kfree(&mc_bus_dev->dev, resource);
}
void fsl_mc_cleanup_all_resource_pools(struct fsl_mc_device *mc_bus_dev)
{
int pool_type;
for (pool_type = 0; pool_type < FSL_MC_NUM_POOL_TYPES; pool_type++)
fsl_mc_cleanup_resource_pool(mc_bus_dev, pool_type);
}
/*
* fsl_mc_allocator_probe - callback invoked when an allocatable device is
* being added to the system
*/
static int fsl_mc_allocator_probe(struct fsl_mc_device *mc_dev)
{
enum fsl_mc_pool_type pool_type;
struct fsl_mc_device *mc_bus_dev;
struct fsl_mc_bus *mc_bus;
int error;
if (!fsl_mc_is_allocatable(mc_dev))
return -EINVAL;
mc_bus_dev = to_fsl_mc_device(mc_dev->dev.parent);
if (!dev_is_fsl_mc(&mc_bus_dev->dev))
return -EINVAL;
mc_bus = to_fsl_mc_bus(mc_bus_dev);
error = object_type_to_pool_type(mc_dev->obj_desc.type, &pool_type);
if (error < 0)
return error;
error = fsl_mc_resource_pool_add_device(mc_bus, pool_type, mc_dev);
if (error < 0)
return error;
dev_dbg(&mc_dev->dev,
"Allocatable fsl-mc device bound to fsl_mc_allocator driver");
return 0;
}
/*
* fsl_mc_allocator_remove - callback invoked when an allocatable device is
* being removed from the system
*/
static void fsl_mc_allocator_remove(struct fsl_mc_device *mc_dev)
{
int error;
if (mc_dev->resource) {
error = fsl_mc_resource_pool_remove_device(mc_dev);
if (error < 0)
return;
}
dev_dbg(&mc_dev->dev,
"Allocatable fsl-mc device unbound from fsl_mc_allocator driver");
}
static const struct fsl_mc_device_id match_id_table[] = {
{
.vendor = FSL_MC_VENDOR_FREESCALE,
.obj_type = "dpbp",
},
{
.vendor = FSL_MC_VENDOR_FREESCALE,
.obj_type = "dpmcp",
},
{
.vendor = FSL_MC_VENDOR_FREESCALE,
.obj_type = "dpcon",
},
{.vendor = 0x0},
};
static struct fsl_mc_driver fsl_mc_allocator_driver = {
.driver = {
.name = "fsl_mc_allocator",
.pm = NULL,
},
.match_id_table = match_id_table,
.probe = fsl_mc_allocator_probe,
.remove = fsl_mc_allocator_remove,
};
int __init fsl_mc_allocator_driver_init(void)
{
return fsl_mc_driver_register(&fsl_mc_allocator_driver);
}
void fsl_mc_allocator_driver_exit(void)
{
fsl_mc_driver_unregister(&fsl_mc_allocator_driver);
}
| linux-master | drivers/bus/fsl-mc/fsl-mc-allocator.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Management Complex (MC) userspace support
*
* Copyright 2021 NXP
*
*/
#include <linux/slab.h>
#include <linux/fs.h>
#include <linux/uaccess.h>
#include <linux/miscdevice.h>
#include "fsl-mc-private.h"
struct uapi_priv_data {
struct fsl_mc_uapi *uapi;
struct fsl_mc_io *mc_io;
};
struct fsl_mc_cmd_desc {
u16 cmdid_value;
u16 cmdid_mask;
int size;
bool token;
int flags;
};
#define FSL_MC_CHECK_MODULE_ID BIT(0)
#define FSL_MC_CAP_NET_ADMIN_NEEDED BIT(1)
enum fsl_mc_cmd_index {
DPDBG_DUMP = 0,
DPDBG_SET,
DPRC_GET_CONTAINER_ID,
DPRC_CREATE_CONT,
DPRC_DESTROY_CONT,
DPRC_ASSIGN,
DPRC_UNASSIGN,
DPRC_GET_OBJ_COUNT,
DPRC_GET_OBJ,
DPRC_GET_RES_COUNT,
DPRC_GET_RES_IDS,
DPRC_SET_OBJ_LABEL,
DPRC_SET_LOCKED,
DPRC_CONNECT,
DPRC_DISCONNECT,
DPRC_GET_POOL,
DPRC_GET_POOL_COUNT,
DPRC_GET_CONNECTION,
DPCI_GET_LINK_STATE,
DPCI_GET_PEER_ATTR,
DPAIOP_GET_SL_VERSION,
DPAIOP_GET_STATE,
DPMNG_GET_VERSION,
DPSECI_GET_TX_QUEUE,
DPMAC_GET_COUNTER,
DPMAC_GET_MAC_ADDR,
DPNI_SET_PRIM_MAC,
DPNI_GET_PRIM_MAC,
DPNI_GET_STATISTICS,
DPNI_GET_LINK_STATE,
DPNI_GET_MAX_FRAME_LENGTH,
DPSW_GET_TAILDROP,
DPSW_SET_TAILDROP,
DPSW_IF_GET_COUNTER,
DPSW_IF_GET_MAX_FRAME_LENGTH,
DPDMUX_GET_COUNTER,
DPDMUX_IF_GET_MAX_FRAME_LENGTH,
GET_ATTR,
GET_IRQ_MASK,
GET_IRQ_STATUS,
CLOSE,
OPEN,
GET_API_VERSION,
DESTROY,
CREATE,
};
static struct fsl_mc_cmd_desc fsl_mc_accepted_cmds[] = {
[DPDBG_DUMP] = {
.cmdid_value = 0x1300,
.cmdid_mask = 0xFFF0,
.token = true,
.size = 28,
},
[DPDBG_SET] = {
.cmdid_value = 0x1400,
.cmdid_mask = 0xFFF0,
.token = true,
.size = 28,
},
[DPRC_GET_CONTAINER_ID] = {
.cmdid_value = 0x8300,
.cmdid_mask = 0xFFF0,
.token = false,
.size = 8,
},
[DPRC_CREATE_CONT] = {
.cmdid_value = 0x1510,
.cmdid_mask = 0xFFF0,
.token = true,
.size = 40,
.flags = FSL_MC_CAP_NET_ADMIN_NEEDED,
},
[DPRC_DESTROY_CONT] = {
.cmdid_value = 0x1520,
.cmdid_mask = 0xFFF0,
.token = true,
.size = 12,
.flags = FSL_MC_CAP_NET_ADMIN_NEEDED,
},
[DPRC_ASSIGN] = {
.cmdid_value = 0x1570,
.cmdid_mask = 0xFFF0,
.token = true,
.size = 40,
.flags = FSL_MC_CAP_NET_ADMIN_NEEDED,
},
[DPRC_UNASSIGN] = {
.cmdid_value = 0x1580,
.cmdid_mask = 0xFFF0,
.token = true,
.size = 40,
.flags = FSL_MC_CAP_NET_ADMIN_NEEDED,
},
[DPRC_GET_OBJ_COUNT] = {
.cmdid_value = 0x1590,
.cmdid_mask = 0xFFF0,
.token = true,
.size = 16,
},
[DPRC_GET_OBJ] = {
.cmdid_value = 0x15A0,
.cmdid_mask = 0xFFF0,
.token = true,
.size = 12,
},
[DPRC_GET_RES_COUNT] = {
.cmdid_value = 0x15B0,
.cmdid_mask = 0xFFF0,
.token = true,
.size = 32,
},
[DPRC_GET_RES_IDS] = {
.cmdid_value = 0x15C0,
.cmdid_mask = 0xFFF0,
.token = true,
.size = 40,
},
[DPRC_SET_OBJ_LABEL] = {
.cmdid_value = 0x1610,
.cmdid_mask = 0xFFF0,
.token = true,
.size = 48,
.flags = FSL_MC_CAP_NET_ADMIN_NEEDED,
},
[DPRC_SET_LOCKED] = {
.cmdid_value = 0x16B0,
.cmdid_mask = 0xFFF0,
.token = true,
.size = 16,
.flags = FSL_MC_CAP_NET_ADMIN_NEEDED,
},
[DPRC_CONNECT] = {
.cmdid_value = 0x1670,
.cmdid_mask = 0xFFF0,
.token = true,
.size = 56,
.flags = FSL_MC_CAP_NET_ADMIN_NEEDED,
},
[DPRC_DISCONNECT] = {
.cmdid_value = 0x1680,
.cmdid_mask = 0xFFF0,
.token = true,
.size = 32,
.flags = FSL_MC_CAP_NET_ADMIN_NEEDED,
},
[DPRC_GET_POOL] = {
.cmdid_value = 0x1690,
.cmdid_mask = 0xFFF0,
.token = true,
.size = 12,
},
[DPRC_GET_POOL_COUNT] = {
.cmdid_value = 0x16A0,
.cmdid_mask = 0xFFF0,
.token = true,
.size = 8,
},
[DPRC_GET_CONNECTION] = {
.cmdid_value = 0x16C0,
.cmdid_mask = 0xFFF0,
.token = true,
.size = 32,
},
[DPCI_GET_LINK_STATE] = {
.cmdid_value = 0x0E10,
.cmdid_mask = 0xFFF0,
.token = true,
.size = 8,
},
[DPCI_GET_PEER_ATTR] = {
.cmdid_value = 0x0E20,
.cmdid_mask = 0xFFF0,
.token = true,
.size = 8,
},
[DPAIOP_GET_SL_VERSION] = {
.cmdid_value = 0x2820,
.cmdid_mask = 0xFFF0,
.token = true,
.size = 8,
},
[DPAIOP_GET_STATE] = {
.cmdid_value = 0x2830,
.cmdid_mask = 0xFFF0,
.token = true,
.size = 8,
},
[DPMNG_GET_VERSION] = {
.cmdid_value = 0x8310,
.cmdid_mask = 0xFFF0,
.token = false,
.size = 8,
},
[DPSECI_GET_TX_QUEUE] = {
.cmdid_value = 0x1970,
.cmdid_mask = 0xFFF0,
.token = true,
.size = 14,
},
[DPMAC_GET_COUNTER] = {
.cmdid_value = 0x0c40,
.cmdid_mask = 0xFFF0,
.token = true,
.size = 9,
},
[DPMAC_GET_MAC_ADDR] = {
.cmdid_value = 0x0c50,
.cmdid_mask = 0xFFF0,
.token = true,
.size = 8,
},
[DPNI_SET_PRIM_MAC] = {
.cmdid_value = 0x2240,
.cmdid_mask = 0xFFF0,
.token = true,
.size = 16,
.flags = FSL_MC_CAP_NET_ADMIN_NEEDED,
},
[DPNI_GET_PRIM_MAC] = {
.cmdid_value = 0x2250,
.cmdid_mask = 0xFFF0,
.token = true,
.size = 8,
},
[DPNI_GET_STATISTICS] = {
.cmdid_value = 0x25D0,
.cmdid_mask = 0xFFF0,
.token = true,
.size = 10,
},
[DPNI_GET_LINK_STATE] = {
.cmdid_value = 0x2150,
.cmdid_mask = 0xFFF0,
.token = true,
.size = 8,
},
[DPNI_GET_MAX_FRAME_LENGTH] = {
.cmdid_value = 0x2170,
.cmdid_mask = 0xFFF0,
.token = true,
.size = 8,
},
[DPSW_GET_TAILDROP] = {
.cmdid_value = 0x0A80,
.cmdid_mask = 0xFFF0,
.token = true,
.size = 14,
},
[DPSW_SET_TAILDROP] = {
.cmdid_value = 0x0A90,
.cmdid_mask = 0xFFF0,
.token = true,
.size = 24,
.flags = FSL_MC_CAP_NET_ADMIN_NEEDED,
},
[DPSW_IF_GET_COUNTER] = {
.cmdid_value = 0x0340,
.cmdid_mask = 0xFFF0,
.token = true,
.size = 11,
},
[DPSW_IF_GET_MAX_FRAME_LENGTH] = {
.cmdid_value = 0x0450,
.cmdid_mask = 0xFFF0,
.token = true,
.size = 10,
},
[DPDMUX_GET_COUNTER] = {
.cmdid_value = 0x0b20,
.cmdid_mask = 0xFFF0,
.token = true,
.size = 11,
},
[DPDMUX_IF_GET_MAX_FRAME_LENGTH] = {
.cmdid_value = 0x0a20,
.cmdid_mask = 0xFFF0,
.token = true,
.size = 10,
},
[GET_ATTR] = {
.cmdid_value = 0x0040,
.cmdid_mask = 0xFFF0,
.token = true,
.size = 8,
},
[GET_IRQ_MASK] = {
.cmdid_value = 0x0150,
.cmdid_mask = 0xFFF0,
.token = true,
.size = 13,
},
[GET_IRQ_STATUS] = {
.cmdid_value = 0x0160,
.cmdid_mask = 0xFFF0,
.token = true,
.size = 13,
},
[CLOSE] = {
.cmdid_value = 0x8000,
.cmdid_mask = 0xFFF0,
.token = true,
.size = 8,
},
/* Common commands amongst all types of objects. Must be checked last. */
[OPEN] = {
.cmdid_value = 0x8000,
.cmdid_mask = 0xFC00,
.token = false,
.size = 12,
.flags = FSL_MC_CHECK_MODULE_ID,
},
[GET_API_VERSION] = {
.cmdid_value = 0xA000,
.cmdid_mask = 0xFC00,
.token = false,
.size = 8,
.flags = FSL_MC_CHECK_MODULE_ID,
},
[DESTROY] = {
.cmdid_value = 0x9800,
.cmdid_mask = 0xFC00,
.token = true,
.size = 12,
.flags = FSL_MC_CHECK_MODULE_ID | FSL_MC_CAP_NET_ADMIN_NEEDED,
},
[CREATE] = {
.cmdid_value = 0x9000,
.cmdid_mask = 0xFC00,
.token = true,
.size = 64,
.flags = FSL_MC_CHECK_MODULE_ID | FSL_MC_CAP_NET_ADMIN_NEEDED,
},
};
#define FSL_MC_NUM_ACCEPTED_CMDS ARRAY_SIZE(fsl_mc_accepted_cmds)
#define FSL_MC_MAX_MODULE_ID 0x10
static int fsl_mc_command_check(struct fsl_mc_device *mc_dev,
struct fsl_mc_command *mc_cmd)
{
struct fsl_mc_cmd_desc *desc = NULL;
int mc_cmd_max_size, i;
bool token_provided;
u16 cmdid, module_id;
char *mc_cmd_end;
char sum = 0;
/* Check if this is an accepted MC command */
cmdid = mc_cmd_hdr_read_cmdid(mc_cmd);
for (i = 0; i < FSL_MC_NUM_ACCEPTED_CMDS; i++) {
desc = &fsl_mc_accepted_cmds[i];
if ((cmdid & desc->cmdid_mask) == desc->cmdid_value)
break;
}
if (i == FSL_MC_NUM_ACCEPTED_CMDS) {
dev_err(&mc_dev->dev, "MC command 0x%04x: cmdid not accepted\n", cmdid);
return -EACCES;
}
/* Check if the size of the command is honored. Anything beyond the
* last valid byte of the command should be zeroed.
*/
mc_cmd_max_size = sizeof(*mc_cmd);
mc_cmd_end = ((char *)mc_cmd) + desc->size;
for (i = desc->size; i < mc_cmd_max_size; i++)
sum |= *mc_cmd_end++;
if (sum) {
dev_err(&mc_dev->dev, "MC command 0x%04x: garbage beyond max size of %d bytes!\n",
cmdid, desc->size);
return -EACCES;
}
/* Some MC commands request a token to be passed so that object
* identification is possible. Check if the token passed in the command
* is as expected.
*/
token_provided = mc_cmd_hdr_read_token(mc_cmd) ? true : false;
if (token_provided != desc->token) {
dev_err(&mc_dev->dev, "MC command 0x%04x: token 0x%04x is invalid!\n",
cmdid, mc_cmd_hdr_read_token(mc_cmd));
return -EACCES;
}
/* If needed, check if the module ID passed is valid */
if (desc->flags & FSL_MC_CHECK_MODULE_ID) {
/* The module ID is represented by bits [4:9] from the cmdid */
module_id = (cmdid & GENMASK(9, 4)) >> 4;
if (module_id == 0 || module_id > FSL_MC_MAX_MODULE_ID) {
dev_err(&mc_dev->dev, "MC command 0x%04x: unknown module ID 0x%x\n",
cmdid, module_id);
return -EACCES;
}
}
/* Some commands alter how hardware resources are managed. For these
* commands, check for CAP_NET_ADMIN.
*/
if (desc->flags & FSL_MC_CAP_NET_ADMIN_NEEDED) {
if (!capable(CAP_NET_ADMIN)) {
dev_err(&mc_dev->dev, "MC command 0x%04x: needs CAP_NET_ADMIN!\n",
cmdid);
return -EPERM;
}
}
return 0;
}
static int fsl_mc_uapi_send_command(struct fsl_mc_device *mc_dev, unsigned long arg,
struct fsl_mc_io *mc_io)
{
struct fsl_mc_command mc_cmd;
int error;
error = copy_from_user(&mc_cmd, (void __user *)arg, sizeof(mc_cmd));
if (error)
return -EFAULT;
error = fsl_mc_command_check(mc_dev, &mc_cmd);
if (error)
return error;
error = mc_send_command(mc_io, &mc_cmd);
if (error)
return error;
error = copy_to_user((void __user *)arg, &mc_cmd, sizeof(mc_cmd));
if (error)
return -EFAULT;
return 0;
}
static int fsl_mc_uapi_dev_open(struct inode *inode, struct file *filep)
{
struct fsl_mc_device *root_mc_device;
struct uapi_priv_data *priv_data;
struct fsl_mc_io *dynamic_mc_io;
struct fsl_mc_uapi *mc_uapi;
struct fsl_mc_bus *mc_bus;
int error;
priv_data = kzalloc(sizeof(*priv_data), GFP_KERNEL);
if (!priv_data)
return -ENOMEM;
mc_uapi = container_of(filep->private_data, struct fsl_mc_uapi, misc);
mc_bus = container_of(mc_uapi, struct fsl_mc_bus, uapi_misc);
root_mc_device = &mc_bus->mc_dev;
mutex_lock(&mc_uapi->mutex);
if (!mc_uapi->local_instance_in_use) {
priv_data->mc_io = mc_uapi->static_mc_io;
mc_uapi->local_instance_in_use = 1;
} else {
error = fsl_mc_portal_allocate(root_mc_device, 0,
&dynamic_mc_io);
if (error) {
dev_dbg(&root_mc_device->dev,
"Could not allocate MC portal\n");
goto error_portal_allocate;
}
priv_data->mc_io = dynamic_mc_io;
}
priv_data->uapi = mc_uapi;
filep->private_data = priv_data;
mutex_unlock(&mc_uapi->mutex);
return 0;
error_portal_allocate:
mutex_unlock(&mc_uapi->mutex);
kfree(priv_data);
return error;
}
static int fsl_mc_uapi_dev_release(struct inode *inode, struct file *filep)
{
struct uapi_priv_data *priv_data;
struct fsl_mc_uapi *mc_uapi;
struct fsl_mc_io *mc_io;
priv_data = filep->private_data;
mc_uapi = priv_data->uapi;
mc_io = priv_data->mc_io;
mutex_lock(&mc_uapi->mutex);
if (mc_io == mc_uapi->static_mc_io)
mc_uapi->local_instance_in_use = 0;
else
fsl_mc_portal_free(mc_io);
kfree(filep->private_data);
filep->private_data = NULL;
mutex_unlock(&mc_uapi->mutex);
return 0;
}
static long fsl_mc_uapi_dev_ioctl(struct file *file,
unsigned int cmd,
unsigned long arg)
{
struct uapi_priv_data *priv_data = file->private_data;
struct fsl_mc_device *root_mc_device;
struct fsl_mc_bus *mc_bus;
int error;
mc_bus = container_of(priv_data->uapi, struct fsl_mc_bus, uapi_misc);
root_mc_device = &mc_bus->mc_dev;
switch (cmd) {
case FSL_MC_SEND_MC_COMMAND:
error = fsl_mc_uapi_send_command(root_mc_device, arg, priv_data->mc_io);
break;
default:
dev_dbg(&root_mc_device->dev, "unexpected ioctl call number\n");
error = -EINVAL;
}
return error;
}
static const struct file_operations fsl_mc_uapi_dev_fops = {
.owner = THIS_MODULE,
.open = fsl_mc_uapi_dev_open,
.release = fsl_mc_uapi_dev_release,
.unlocked_ioctl = fsl_mc_uapi_dev_ioctl,
};
int fsl_mc_uapi_create_device_file(struct fsl_mc_bus *mc_bus)
{
struct fsl_mc_device *mc_dev = &mc_bus->mc_dev;
struct fsl_mc_uapi *mc_uapi = &mc_bus->uapi_misc;
int error;
mc_uapi->misc.minor = MISC_DYNAMIC_MINOR;
mc_uapi->misc.name = dev_name(&mc_dev->dev);
mc_uapi->misc.fops = &fsl_mc_uapi_dev_fops;
error = misc_register(&mc_uapi->misc);
if (error)
return error;
mc_uapi->static_mc_io = mc_bus->mc_dev.mc_io;
mutex_init(&mc_uapi->mutex);
return 0;
}
void fsl_mc_uapi_remove_device_file(struct fsl_mc_bus *mc_bus)
{
misc_deregister(&mc_bus->uapi_misc.misc);
}
| linux-master | drivers/bus/fsl-mc/fsl-mc-uapi.c |
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/*
* Copyright 2013-2016 Freescale Semiconductor Inc.
*
*/
#include <linux/kernel.h>
#include <linux/fsl/mc.h>
#include "fsl-mc-private.h"
/**
* dpcon_open() - Open a control session for the specified object
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @dpcon_id: DPCON unique ID
* @token: Returned token; use in subsequent API calls
*
* This function can be used to open a control session for an
* already created object; an object may have been declared in
* the DPL or by calling the dpcon_create() function.
* This function returns a unique authentication token,
* associated with the specific object ID and the specific MC
* portal; this token must be used in all subsequent commands for
* this specific object.
*
* Return: '0' on Success; Error code otherwise.
*/
int dpcon_open(struct fsl_mc_io *mc_io,
u32 cmd_flags,
int dpcon_id,
u16 *token)
{
struct fsl_mc_command cmd = { 0 };
struct dpcon_cmd_open *dpcon_cmd;
int err;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPCON_CMDID_OPEN,
cmd_flags,
0);
dpcon_cmd = (struct dpcon_cmd_open *)cmd.params;
dpcon_cmd->dpcon_id = cpu_to_le32(dpcon_id);
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
if (err)
return err;
/* retrieve response parameters */
*token = mc_cmd_hdr_read_token(&cmd);
return 0;
}
EXPORT_SYMBOL_GPL(dpcon_open);
/**
* dpcon_close() - Close the control session of the object
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @token: Token of DPCON object
*
* After this function is called, no further operations are
* allowed on the object without opening a new control session.
*
* Return: '0' on Success; Error code otherwise.
*/
int dpcon_close(struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 token)
{
struct fsl_mc_command cmd = { 0 };
/* prepare command */
cmd.header = mc_encode_cmd_header(DPCON_CMDID_CLOSE,
cmd_flags,
token);
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
EXPORT_SYMBOL_GPL(dpcon_close);
/**
* dpcon_enable() - Enable the DPCON
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @token: Token of DPCON object
*
* Return: '0' on Success; Error code otherwise
*/
int dpcon_enable(struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 token)
{
struct fsl_mc_command cmd = { 0 };
/* prepare command */
cmd.header = mc_encode_cmd_header(DPCON_CMDID_ENABLE,
cmd_flags,
token);
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
EXPORT_SYMBOL_GPL(dpcon_enable);
/**
* dpcon_disable() - Disable the DPCON
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @token: Token of DPCON object
*
* Return: '0' on Success; Error code otherwise
*/
int dpcon_disable(struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 token)
{
struct fsl_mc_command cmd = { 0 };
/* prepare command */
cmd.header = mc_encode_cmd_header(DPCON_CMDID_DISABLE,
cmd_flags,
token);
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
EXPORT_SYMBOL_GPL(dpcon_disable);
/**
* dpcon_reset() - Reset the DPCON, returns the object to initial state.
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @token: Token of DPCON object
*
* Return: '0' on Success; Error code otherwise.
*/
int dpcon_reset(struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 token)
{
struct fsl_mc_command cmd = { 0 };
/* prepare command */
cmd.header = mc_encode_cmd_header(DPCON_CMDID_RESET,
cmd_flags, token);
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
EXPORT_SYMBOL_GPL(dpcon_reset);
/**
* dpcon_get_attributes() - Retrieve DPCON attributes.
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @token: Token of DPCON object
* @attr: Object's attributes
*
* Return: '0' on Success; Error code otherwise.
*/
int dpcon_get_attributes(struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 token,
struct dpcon_attr *attr)
{
struct fsl_mc_command cmd = { 0 };
struct dpcon_rsp_get_attr *dpcon_rsp;
int err;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPCON_CMDID_GET_ATTR,
cmd_flags,
token);
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
if (err)
return err;
/* retrieve response parameters */
dpcon_rsp = (struct dpcon_rsp_get_attr *)cmd.params;
attr->id = le32_to_cpu(dpcon_rsp->id);
attr->qbman_ch_id = le16_to_cpu(dpcon_rsp->qbman_ch_id);
attr->num_priorities = dpcon_rsp->num_priorities;
return 0;
}
EXPORT_SYMBOL_GPL(dpcon_get_attributes);
/**
* dpcon_set_notification() - Set DPCON notification destination
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @token: Token of DPCON object
* @cfg: Notification parameters
*
* Return: '0' on Success; Error code otherwise
*/
int dpcon_set_notification(struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 token,
struct dpcon_notification_cfg *cfg)
{
struct fsl_mc_command cmd = { 0 };
struct dpcon_cmd_set_notification *dpcon_cmd;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPCON_CMDID_SET_NOTIFICATION,
cmd_flags,
token);
dpcon_cmd = (struct dpcon_cmd_set_notification *)cmd.params;
dpcon_cmd->dpio_id = cpu_to_le32(cfg->dpio_id);
dpcon_cmd->priority = cfg->priority;
dpcon_cmd->user_ctx = cpu_to_le64(cfg->user_ctx);
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
EXPORT_SYMBOL_GPL(dpcon_set_notification);
| linux-master | drivers/bus/fsl-mc/dpcon.c |
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/*
* Copyright 2021 NXP
*
*/
#include <linux/kernel.h>
#include <linux/fsl/mc.h>
#include "fsl-mc-private.h"
static int fsl_mc_get_open_cmd_id(const char *type)
{
static const struct {
int cmd_id;
const char *type;
} dev_ids[] = {
{ DPRTC_CMDID_OPEN, "dprtc" },
{ DPRC_CMDID_OPEN, "dprc" },
{ DPNI_CMDID_OPEN, "dpni" },
{ DPIO_CMDID_OPEN, "dpio" },
{ DPSW_CMDID_OPEN, "dpsw" },
{ DPBP_CMDID_OPEN, "dpbp" },
{ DPCON_CMDID_OPEN, "dpcon" },
{ DPMCP_CMDID_OPEN, "dpmcp" },
{ DPMAC_CMDID_OPEN, "dpmac" },
{ DPSECI_CMDID_OPEN, "dpseci" },
{ DPDMUX_CMDID_OPEN, "dpdmux" },
{ DPDCEI_CMDID_OPEN, "dpdcei" },
{ DPAIOP_CMDID_OPEN, "dpaiop" },
{ DPCI_CMDID_OPEN, "dpci" },
{ DPDMAI_CMDID_OPEN, "dpdmai" },
{ DPDBG_CMDID_OPEN, "dpdbg" },
{ 0, NULL }
};
int i;
for (i = 0; dev_ids[i].type; i++)
if (!strcmp(dev_ids[i].type, type))
return dev_ids[i].cmd_id;
return -1;
}
int fsl_mc_obj_open(struct fsl_mc_io *mc_io,
u32 cmd_flags,
int obj_id,
char *obj_type,
u16 *token)
{
struct fsl_mc_command cmd = { 0 };
struct fsl_mc_obj_cmd_open *cmd_params;
int err = 0;
int cmd_id = fsl_mc_get_open_cmd_id(obj_type);
if (cmd_id == -1)
return -ENODEV;
/* prepare command */
cmd.header = mc_encode_cmd_header(cmd_id, cmd_flags, 0);
cmd_params = (struct fsl_mc_obj_cmd_open *)cmd.params;
cmd_params->obj_id = cpu_to_le32(obj_id);
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
if (err)
return err;
/* retrieve response parameters */
*token = mc_cmd_hdr_read_token(&cmd);
return err;
}
EXPORT_SYMBOL_GPL(fsl_mc_obj_open);
int fsl_mc_obj_close(struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 token)
{
struct fsl_mc_command cmd = { 0 };
/* prepare command */
cmd.header = mc_encode_cmd_header(OBJ_CMDID_CLOSE, cmd_flags,
token);
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
EXPORT_SYMBOL_GPL(fsl_mc_obj_close);
int fsl_mc_obj_reset(struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 token)
{
struct fsl_mc_command cmd = { 0 };
/* prepare command */
cmd.header = mc_encode_cmd_header(OBJ_CMDID_RESET, cmd_flags,
token);
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
EXPORT_SYMBOL_GPL(fsl_mc_obj_reset);
| linux-master | drivers/bus/fsl-mc/obj-api.c |
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/*
* Copyright 2013-2016 Freescale Semiconductor Inc.
*
*/
#include <linux/io.h>
#include <linux/fsl/mc.h>
#include "fsl-mc-private.h"
static int fsl_mc_io_set_dpmcp(struct fsl_mc_io *mc_io,
struct fsl_mc_device *dpmcp_dev)
{
int error;
if (mc_io->dpmcp_dev)
return -EINVAL;
if (dpmcp_dev->mc_io)
return -EINVAL;
error = dpmcp_open(mc_io,
0,
dpmcp_dev->obj_desc.id,
&dpmcp_dev->mc_handle);
if (error < 0)
return error;
mc_io->dpmcp_dev = dpmcp_dev;
dpmcp_dev->mc_io = mc_io;
return 0;
}
static void fsl_mc_io_unset_dpmcp(struct fsl_mc_io *mc_io)
{
int error;
struct fsl_mc_device *dpmcp_dev = mc_io->dpmcp_dev;
error = dpmcp_close(mc_io,
0,
dpmcp_dev->mc_handle);
if (error < 0) {
dev_err(&dpmcp_dev->dev, "dpmcp_close() failed: %d\n",
error);
}
mc_io->dpmcp_dev = NULL;
dpmcp_dev->mc_io = NULL;
}
/**
* fsl_create_mc_io() - Creates an MC I/O object
*
* @dev: device to be associated with the MC I/O object
* @mc_portal_phys_addr: physical address of the MC portal to use
* @mc_portal_size: size in bytes of the MC portal
* @dpmcp_dev: Pointer to the DPMCP object associated with this MC I/O
* object or NULL if none.
* @flags: flags for the new MC I/O object
* @new_mc_io: Area to return pointer to newly created MC I/O object
*
* Returns '0' on Success; Error code otherwise.
*/
int __must_check fsl_create_mc_io(struct device *dev,
phys_addr_t mc_portal_phys_addr,
u32 mc_portal_size,
struct fsl_mc_device *dpmcp_dev,
u32 flags, struct fsl_mc_io **new_mc_io)
{
int error;
struct fsl_mc_io *mc_io;
void __iomem *mc_portal_virt_addr;
struct resource *res;
mc_io = devm_kzalloc(dev, sizeof(*mc_io), GFP_KERNEL);
if (!mc_io)
return -ENOMEM;
mc_io->dev = dev;
mc_io->flags = flags;
mc_io->portal_phys_addr = mc_portal_phys_addr;
mc_io->portal_size = mc_portal_size;
if (flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL)
raw_spin_lock_init(&mc_io->spinlock);
else
mutex_init(&mc_io->mutex);
res = devm_request_mem_region(dev,
mc_portal_phys_addr,
mc_portal_size,
"mc_portal");
if (!res) {
dev_err(dev,
"devm_request_mem_region failed for MC portal %pa\n",
&mc_portal_phys_addr);
return -EBUSY;
}
mc_portal_virt_addr = devm_ioremap(dev,
mc_portal_phys_addr,
mc_portal_size);
if (!mc_portal_virt_addr) {
dev_err(dev,
"devm_ioremap failed for MC portal %pa\n",
&mc_portal_phys_addr);
return -ENXIO;
}
mc_io->portal_virt_addr = mc_portal_virt_addr;
if (dpmcp_dev) {
error = fsl_mc_io_set_dpmcp(mc_io, dpmcp_dev);
if (error < 0)
goto error_destroy_mc_io;
}
*new_mc_io = mc_io;
return 0;
error_destroy_mc_io:
fsl_destroy_mc_io(mc_io);
return error;
}
/**
* fsl_destroy_mc_io() - Destroys an MC I/O object
*
* @mc_io: MC I/O object to destroy
*/
void fsl_destroy_mc_io(struct fsl_mc_io *mc_io)
{
struct fsl_mc_device *dpmcp_dev;
if (!mc_io)
return;
dpmcp_dev = mc_io->dpmcp_dev;
if (dpmcp_dev)
fsl_mc_io_unset_dpmcp(mc_io);
devm_iounmap(mc_io->dev, mc_io->portal_virt_addr);
devm_release_mem_region(mc_io->dev,
mc_io->portal_phys_addr,
mc_io->portal_size);
mc_io->portal_virt_addr = NULL;
devm_kfree(mc_io->dev, mc_io);
}
/**
* fsl_mc_portal_allocate - Allocates an MC portal
*
* @mc_dev: MC device for which the MC portal is to be allocated
* @mc_io_flags: Flags for the fsl_mc_io object that wraps the allocated
* MC portal.
* @new_mc_io: Pointer to area where the pointer to the fsl_mc_io object
* that wraps the allocated MC portal is to be returned
*
* This function allocates an MC portal from the device's parent DPRC,
* from the corresponding MC bus' pool of MC portals and wraps
* it in a new fsl_mc_io object. If 'mc_dev' is a DPRC itself, the
* portal is allocated from its own MC bus.
*/
int __must_check fsl_mc_portal_allocate(struct fsl_mc_device *mc_dev,
u16 mc_io_flags,
struct fsl_mc_io **new_mc_io)
{
struct fsl_mc_device *mc_bus_dev;
struct fsl_mc_bus *mc_bus;
phys_addr_t mc_portal_phys_addr;
size_t mc_portal_size;
struct fsl_mc_device *dpmcp_dev;
int error = -EINVAL;
struct fsl_mc_resource *resource = NULL;
struct fsl_mc_io *mc_io = NULL;
if (mc_dev->flags & FSL_MC_IS_DPRC) {
mc_bus_dev = mc_dev;
} else {
if (!dev_is_fsl_mc(mc_dev->dev.parent))
return error;
mc_bus_dev = to_fsl_mc_device(mc_dev->dev.parent);
}
mc_bus = to_fsl_mc_bus(mc_bus_dev);
*new_mc_io = NULL;
error = fsl_mc_resource_allocate(mc_bus, FSL_MC_POOL_DPMCP, &resource);
if (error < 0)
return error;
error = -EINVAL;
dpmcp_dev = resource->data;
if (dpmcp_dev->obj_desc.ver_major < DPMCP_MIN_VER_MAJOR ||
(dpmcp_dev->obj_desc.ver_major == DPMCP_MIN_VER_MAJOR &&
dpmcp_dev->obj_desc.ver_minor < DPMCP_MIN_VER_MINOR)) {
dev_err(&dpmcp_dev->dev,
"ERROR: Version %d.%d of DPMCP not supported.\n",
dpmcp_dev->obj_desc.ver_major,
dpmcp_dev->obj_desc.ver_minor);
error = -ENOTSUPP;
goto error_cleanup_resource;
}
mc_portal_phys_addr = dpmcp_dev->regions[0].start;
mc_portal_size = resource_size(dpmcp_dev->regions);
error = fsl_create_mc_io(&mc_bus_dev->dev,
mc_portal_phys_addr,
mc_portal_size, dpmcp_dev,
mc_io_flags, &mc_io);
if (error < 0)
goto error_cleanup_resource;
dpmcp_dev->consumer_link = device_link_add(&mc_dev->dev,
&dpmcp_dev->dev,
DL_FLAG_AUTOREMOVE_CONSUMER);
if (!dpmcp_dev->consumer_link) {
error = -EINVAL;
goto error_cleanup_mc_io;
}
*new_mc_io = mc_io;
return 0;
error_cleanup_mc_io:
fsl_destroy_mc_io(mc_io);
error_cleanup_resource:
fsl_mc_resource_free(resource);
return error;
}
EXPORT_SYMBOL_GPL(fsl_mc_portal_allocate);
/**
* fsl_mc_portal_free - Returns an MC portal to the pool of free MC portals
* of a given MC bus
*
* @mc_io: Pointer to the fsl_mc_io object that wraps the MC portal to free
*/
void fsl_mc_portal_free(struct fsl_mc_io *mc_io)
{
struct fsl_mc_device *dpmcp_dev;
struct fsl_mc_resource *resource;
/*
* Every mc_io obtained by calling fsl_mc_portal_allocate() is supposed
* to have a DPMCP object associated with.
*/
dpmcp_dev = mc_io->dpmcp_dev;
resource = dpmcp_dev->resource;
if (!resource || resource->type != FSL_MC_POOL_DPMCP)
return;
if (resource->data != dpmcp_dev)
return;
fsl_destroy_mc_io(mc_io);
fsl_mc_resource_free(resource);
dpmcp_dev->consumer_link = NULL;
}
EXPORT_SYMBOL_GPL(fsl_mc_portal_free);
/**
* fsl_mc_portal_reset - Resets the dpmcp object for a given fsl_mc_io object
*
* @mc_io: Pointer to the fsl_mc_io object that wraps the MC portal to free
*/
int fsl_mc_portal_reset(struct fsl_mc_io *mc_io)
{
int error;
struct fsl_mc_device *dpmcp_dev = mc_io->dpmcp_dev;
error = dpmcp_reset(mc_io, 0, dpmcp_dev->mc_handle);
if (error < 0) {
dev_err(&dpmcp_dev->dev, "dpmcp_reset() failed: %d\n", error);
return error;
}
return 0;
}
EXPORT_SYMBOL_GPL(fsl_mc_portal_reset);
| linux-master | drivers/bus/fsl-mc/mc-io.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Freescale Management Complex (MC) bus driver
*
* Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
* Copyright 2019-2020 NXP
* Author: German Rivera <[email protected]>
*
*/
#define pr_fmt(fmt) "fsl-mc: " fmt
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/of_address.h>
#include <linux/ioport.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/limits.h>
#include <linux/bitops.h>
#include <linux/dma-mapping.h>
#include <linux/acpi.h>
#include <linux/iommu.h>
#include <linux/dma-map-ops.h>
#include "fsl-mc-private.h"
/*
* Default DMA mask for devices on a fsl-mc bus
*/
#define FSL_MC_DEFAULT_DMA_MASK (~0ULL)
static struct fsl_mc_version mc_version;
/**
* struct fsl_mc - Private data of a "fsl,qoriq-mc" platform device
* @root_mc_bus_dev: fsl-mc device representing the root DPRC
* @num_translation_ranges: number of entries in addr_translation_ranges
* @translation_ranges: array of bus to system address translation ranges
* @fsl_mc_regs: base address of register bank
*/
struct fsl_mc {
struct fsl_mc_device *root_mc_bus_dev;
u8 num_translation_ranges;
struct fsl_mc_addr_translation_range *translation_ranges;
void __iomem *fsl_mc_regs;
};
/**
* struct fsl_mc_addr_translation_range - bus to system address translation
* range
* @mc_region_type: Type of MC region for the range being translated
* @start_mc_offset: Start MC offset of the range being translated
* @end_mc_offset: MC offset of the first byte after the range (last MC
* offset of the range is end_mc_offset - 1)
* @start_phys_addr: system physical address corresponding to start_mc_addr
*/
struct fsl_mc_addr_translation_range {
enum dprc_region_type mc_region_type;
u64 start_mc_offset;
u64 end_mc_offset;
phys_addr_t start_phys_addr;
};
#define FSL_MC_GCR1 0x0
#define GCR1_P1_STOP BIT(31)
#define GCR1_P2_STOP BIT(30)
#define FSL_MC_FAPR 0x28
#define MC_FAPR_PL BIT(18)
#define MC_FAPR_BMT BIT(17)
static phys_addr_t mc_portal_base_phys_addr;
/**
* fsl_mc_bus_match - device to driver matching callback
* @dev: the fsl-mc device to match against
* @drv: the device driver to search for matching fsl-mc object type
* structures
*
* Returns 1 on success, 0 otherwise.
*/
static int fsl_mc_bus_match(struct device *dev, struct device_driver *drv)
{
const struct fsl_mc_device_id *id;
struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(drv);
bool found = false;
/* When driver_override is set, only bind to the matching driver */
if (mc_dev->driver_override) {
found = !strcmp(mc_dev->driver_override, mc_drv->driver.name);
goto out;
}
if (!mc_drv->match_id_table)
goto out;
/*
* If the object is not 'plugged' don't match.
* Only exception is the root DPRC, which is a special case.
*/
if ((mc_dev->obj_desc.state & FSL_MC_OBJ_STATE_PLUGGED) == 0 &&
!fsl_mc_is_root_dprc(&mc_dev->dev))
goto out;
/*
* Traverse the match_id table of the given driver, trying to find
* a matching for the given device.
*/
for (id = mc_drv->match_id_table; id->vendor != 0x0; id++) {
if (id->vendor == mc_dev->obj_desc.vendor &&
strcmp(id->obj_type, mc_dev->obj_desc.type) == 0) {
found = true;
break;
}
}
out:
dev_dbg(dev, "%smatched\n", found ? "" : "not ");
return found;
}
/*
* fsl_mc_bus_uevent - callback invoked when a device is added
*/
static int fsl_mc_bus_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
const struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
if (add_uevent_var(env, "MODALIAS=fsl-mc:v%08Xd%s",
mc_dev->obj_desc.vendor,
mc_dev->obj_desc.type))
return -ENOMEM;
return 0;
}
static int fsl_mc_dma_configure(struct device *dev)
{
struct device *dma_dev = dev;
struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(dev->driver);
u32 input_id = mc_dev->icid;
int ret;
while (dev_is_fsl_mc(dma_dev))
dma_dev = dma_dev->parent;
if (dev_of_node(dma_dev))
ret = of_dma_configure_id(dev, dma_dev->of_node, 0, &input_id);
else
ret = acpi_dma_configure_id(dev, DEV_DMA_COHERENT, &input_id);
if (!ret && !mc_drv->driver_managed_dma) {
ret = iommu_device_use_default_domain(dev);
if (ret)
arch_teardown_dma_ops(dev);
}
return ret;
}
static void fsl_mc_dma_cleanup(struct device *dev)
{
struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(dev->driver);
if (!mc_drv->driver_managed_dma)
iommu_device_unuse_default_domain(dev);
}
static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
return sprintf(buf, "fsl-mc:v%08Xd%s\n", mc_dev->obj_desc.vendor,
mc_dev->obj_desc.type);
}
static DEVICE_ATTR_RO(modalias);
static ssize_t driver_override_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
int ret;
if (WARN_ON(dev->bus != &fsl_mc_bus_type))
return -EINVAL;
ret = driver_set_override(dev, &mc_dev->driver_override, buf, count);
if (ret)
return ret;
return count;
}
static ssize_t driver_override_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
return snprintf(buf, PAGE_SIZE, "%s\n", mc_dev->driver_override);
}
static DEVICE_ATTR_RW(driver_override);
static struct attribute *fsl_mc_dev_attrs[] = {
&dev_attr_modalias.attr,
&dev_attr_driver_override.attr,
NULL,
};
ATTRIBUTE_GROUPS(fsl_mc_dev);
static int scan_fsl_mc_bus(struct device *dev, void *data)
{
struct fsl_mc_device *root_mc_dev;
struct fsl_mc_bus *root_mc_bus;
if (!fsl_mc_is_root_dprc(dev))
goto exit;
root_mc_dev = to_fsl_mc_device(dev);
root_mc_bus = to_fsl_mc_bus(root_mc_dev);
mutex_lock(&root_mc_bus->scan_mutex);
dprc_scan_objects(root_mc_dev, false);
mutex_unlock(&root_mc_bus->scan_mutex);
exit:
return 0;
}
static ssize_t rescan_store(const struct bus_type *bus,
const char *buf, size_t count)
{
unsigned long val;
if (kstrtoul(buf, 0, &val) < 0)
return -EINVAL;
if (val)
bus_for_each_dev(bus, NULL, NULL, scan_fsl_mc_bus);
return count;
}
static BUS_ATTR_WO(rescan);
static int fsl_mc_bus_set_autorescan(struct device *dev, void *data)
{
struct fsl_mc_device *root_mc_dev;
unsigned long val;
char *buf = data;
if (!fsl_mc_is_root_dprc(dev))
goto exit;
root_mc_dev = to_fsl_mc_device(dev);
if (kstrtoul(buf, 0, &val) < 0)
return -EINVAL;
if (val)
enable_dprc_irq(root_mc_dev);
else
disable_dprc_irq(root_mc_dev);
exit:
return 0;
}
static int fsl_mc_bus_get_autorescan(struct device *dev, void *data)
{
struct fsl_mc_device *root_mc_dev;
char *buf = data;
if (!fsl_mc_is_root_dprc(dev))
goto exit;
root_mc_dev = to_fsl_mc_device(dev);
sprintf(buf, "%d\n", get_dprc_irq_state(root_mc_dev));
exit:
return 0;
}
static ssize_t autorescan_store(const struct bus_type *bus,
const char *buf, size_t count)
{
bus_for_each_dev(bus, NULL, (void *)buf, fsl_mc_bus_set_autorescan);
return count;
}
static ssize_t autorescan_show(const struct bus_type *bus, char *buf)
{
bus_for_each_dev(bus, NULL, (void *)buf, fsl_mc_bus_get_autorescan);
return strlen(buf);
}
static BUS_ATTR_RW(autorescan);
static struct attribute *fsl_mc_bus_attrs[] = {
&bus_attr_rescan.attr,
&bus_attr_autorescan.attr,
NULL,
};
ATTRIBUTE_GROUPS(fsl_mc_bus);
struct bus_type fsl_mc_bus_type = {
.name = "fsl-mc",
.match = fsl_mc_bus_match,
.uevent = fsl_mc_bus_uevent,
.dma_configure = fsl_mc_dma_configure,
.dma_cleanup = fsl_mc_dma_cleanup,
.dev_groups = fsl_mc_dev_groups,
.bus_groups = fsl_mc_bus_groups,
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_type);
struct device_type fsl_mc_bus_dprc_type = {
.name = "fsl_mc_bus_dprc"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dprc_type);
struct device_type fsl_mc_bus_dpni_type = {
.name = "fsl_mc_bus_dpni"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dpni_type);
struct device_type fsl_mc_bus_dpio_type = {
.name = "fsl_mc_bus_dpio"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dpio_type);
struct device_type fsl_mc_bus_dpsw_type = {
.name = "fsl_mc_bus_dpsw"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dpsw_type);
struct device_type fsl_mc_bus_dpbp_type = {
.name = "fsl_mc_bus_dpbp"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dpbp_type);
struct device_type fsl_mc_bus_dpcon_type = {
.name = "fsl_mc_bus_dpcon"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dpcon_type);
struct device_type fsl_mc_bus_dpmcp_type = {
.name = "fsl_mc_bus_dpmcp"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dpmcp_type);
struct device_type fsl_mc_bus_dpmac_type = {
.name = "fsl_mc_bus_dpmac"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dpmac_type);
struct device_type fsl_mc_bus_dprtc_type = {
.name = "fsl_mc_bus_dprtc"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dprtc_type);
struct device_type fsl_mc_bus_dpseci_type = {
.name = "fsl_mc_bus_dpseci"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dpseci_type);
struct device_type fsl_mc_bus_dpdmux_type = {
.name = "fsl_mc_bus_dpdmux"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dpdmux_type);
struct device_type fsl_mc_bus_dpdcei_type = {
.name = "fsl_mc_bus_dpdcei"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dpdcei_type);
struct device_type fsl_mc_bus_dpaiop_type = {
.name = "fsl_mc_bus_dpaiop"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dpaiop_type);
struct device_type fsl_mc_bus_dpci_type = {
.name = "fsl_mc_bus_dpci"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dpci_type);
struct device_type fsl_mc_bus_dpdmai_type = {
.name = "fsl_mc_bus_dpdmai"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dpdmai_type);
struct device_type fsl_mc_bus_dpdbg_type = {
.name = "fsl_mc_bus_dpdbg"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dpdbg_type);
static struct device_type *fsl_mc_get_device_type(const char *type)
{
static const struct {
struct device_type *dev_type;
const char *type;
} dev_types[] = {
{ &fsl_mc_bus_dprc_type, "dprc" },
{ &fsl_mc_bus_dpni_type, "dpni" },
{ &fsl_mc_bus_dpio_type, "dpio" },
{ &fsl_mc_bus_dpsw_type, "dpsw" },
{ &fsl_mc_bus_dpbp_type, "dpbp" },
{ &fsl_mc_bus_dpcon_type, "dpcon" },
{ &fsl_mc_bus_dpmcp_type, "dpmcp" },
{ &fsl_mc_bus_dpmac_type, "dpmac" },
{ &fsl_mc_bus_dprtc_type, "dprtc" },
{ &fsl_mc_bus_dpseci_type, "dpseci" },
{ &fsl_mc_bus_dpdmux_type, "dpdmux" },
{ &fsl_mc_bus_dpdcei_type, "dpdcei" },
{ &fsl_mc_bus_dpaiop_type, "dpaiop" },
{ &fsl_mc_bus_dpci_type, "dpci" },
{ &fsl_mc_bus_dpdmai_type, "dpdmai" },
{ &fsl_mc_bus_dpdbg_type, "dpdbg" },
{ NULL, NULL }
};
int i;
for (i = 0; dev_types[i].dev_type; i++)
if (!strcmp(dev_types[i].type, type))
return dev_types[i].dev_type;
return NULL;
}
static int fsl_mc_driver_probe(struct device *dev)
{
struct fsl_mc_driver *mc_drv;
struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
int error;
mc_drv = to_fsl_mc_driver(dev->driver);
error = mc_drv->probe(mc_dev);
if (error < 0) {
if (error != -EPROBE_DEFER)
dev_err(dev, "%s failed: %d\n", __func__, error);
return error;
}
return 0;
}
static int fsl_mc_driver_remove(struct device *dev)
{
struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(dev->driver);
struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
mc_drv->remove(mc_dev);
return 0;
}
static void fsl_mc_driver_shutdown(struct device *dev)
{
struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(dev->driver);
struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
mc_drv->shutdown(mc_dev);
}
/*
* __fsl_mc_driver_register - registers a child device driver with the
* MC bus
*
* This function is implicitly invoked from the registration function of
* fsl_mc device drivers, which is generated by the
* module_fsl_mc_driver() macro.
*/
int __fsl_mc_driver_register(struct fsl_mc_driver *mc_driver,
struct module *owner)
{
int error;
mc_driver->driver.owner = owner;
mc_driver->driver.bus = &fsl_mc_bus_type;
if (mc_driver->probe)
mc_driver->driver.probe = fsl_mc_driver_probe;
if (mc_driver->remove)
mc_driver->driver.remove = fsl_mc_driver_remove;
if (mc_driver->shutdown)
mc_driver->driver.shutdown = fsl_mc_driver_shutdown;
error = driver_register(&mc_driver->driver);
if (error < 0) {
pr_err("driver_register() failed for %s: %d\n",
mc_driver->driver.name, error);
return error;
}
return 0;
}
EXPORT_SYMBOL_GPL(__fsl_mc_driver_register);
/*
* fsl_mc_driver_unregister - unregisters a device driver from the
* MC bus
*/
void fsl_mc_driver_unregister(struct fsl_mc_driver *mc_driver)
{
driver_unregister(&mc_driver->driver);
}
EXPORT_SYMBOL_GPL(fsl_mc_driver_unregister);
/**
* mc_get_version() - Retrieves the Management Complex firmware
* version information
* @mc_io: Pointer to opaque I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @mc_ver_info: Returned version information structure
*
* Return: '0' on Success; Error code otherwise.
*/
static int mc_get_version(struct fsl_mc_io *mc_io,
u32 cmd_flags,
struct fsl_mc_version *mc_ver_info)
{
struct fsl_mc_command cmd = { 0 };
struct dpmng_rsp_get_version *rsp_params;
int err;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPMNG_CMDID_GET_VERSION,
cmd_flags,
0);
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
if (err)
return err;
/* retrieve response parameters */
rsp_params = (struct dpmng_rsp_get_version *)cmd.params;
mc_ver_info->revision = le32_to_cpu(rsp_params->revision);
mc_ver_info->major = le32_to_cpu(rsp_params->version_major);
mc_ver_info->minor = le32_to_cpu(rsp_params->version_minor);
return 0;
}
/**
* fsl_mc_get_version - function to retrieve the MC f/w version information
*
* Return: mc version when called after fsl-mc-bus probe; NULL otherwise.
*/
struct fsl_mc_version *fsl_mc_get_version(void)
{
if (mc_version.major)
return &mc_version;
return NULL;
}
EXPORT_SYMBOL_GPL(fsl_mc_get_version);
/*
* fsl_mc_get_root_dprc - function to traverse to the root dprc
*/
void fsl_mc_get_root_dprc(struct device *dev,
struct device **root_dprc_dev)
{
if (!dev) {
*root_dprc_dev = NULL;
} else if (!dev_is_fsl_mc(dev)) {
*root_dprc_dev = NULL;
} else {
*root_dprc_dev = dev;
while (dev_is_fsl_mc((*root_dprc_dev)->parent))
*root_dprc_dev = (*root_dprc_dev)->parent;
}
}
static int get_dprc_attr(struct fsl_mc_io *mc_io,
int container_id, struct dprc_attributes *attr)
{
u16 dprc_handle;
int error;
error = dprc_open(mc_io, 0, container_id, &dprc_handle);
if (error < 0) {
dev_err(mc_io->dev, "dprc_open() failed: %d\n", error);
return error;
}
memset(attr, 0, sizeof(struct dprc_attributes));
error = dprc_get_attributes(mc_io, 0, dprc_handle, attr);
if (error < 0) {
dev_err(mc_io->dev, "dprc_get_attributes() failed: %d\n",
error);
goto common_cleanup;
}
error = 0;
common_cleanup:
(void)dprc_close(mc_io, 0, dprc_handle);
return error;
}
static int get_dprc_icid(struct fsl_mc_io *mc_io,
int container_id, u32 *icid)
{
struct dprc_attributes attr;
int error;
error = get_dprc_attr(mc_io, container_id, &attr);
if (error == 0)
*icid = attr.icid;
return error;
}
static int translate_mc_addr(struct fsl_mc_device *mc_dev,
enum dprc_region_type mc_region_type,
u64 mc_offset, phys_addr_t *phys_addr)
{
int i;
struct device *root_dprc_dev;
struct fsl_mc *mc;
fsl_mc_get_root_dprc(&mc_dev->dev, &root_dprc_dev);
mc = dev_get_drvdata(root_dprc_dev->parent);
if (mc->num_translation_ranges == 0) {
/*
* Do identity mapping:
*/
*phys_addr = mc_offset;
return 0;
}
for (i = 0; i < mc->num_translation_ranges; i++) {
struct fsl_mc_addr_translation_range *range =
&mc->translation_ranges[i];
if (mc_region_type == range->mc_region_type &&
mc_offset >= range->start_mc_offset &&
mc_offset < range->end_mc_offset) {
*phys_addr = range->start_phys_addr +
(mc_offset - range->start_mc_offset);
return 0;
}
}
return -EFAULT;
}
static int fsl_mc_device_get_mmio_regions(struct fsl_mc_device *mc_dev,
struct fsl_mc_device *mc_bus_dev)
{
int i;
int error;
struct resource *regions;
struct fsl_mc_obj_desc *obj_desc = &mc_dev->obj_desc;
struct device *parent_dev = mc_dev->dev.parent;
enum dprc_region_type mc_region_type;
if (is_fsl_mc_bus_dprc(mc_dev) ||
is_fsl_mc_bus_dpmcp(mc_dev)) {
mc_region_type = DPRC_REGION_TYPE_MC_PORTAL;
} else if (is_fsl_mc_bus_dpio(mc_dev)) {
mc_region_type = DPRC_REGION_TYPE_QBMAN_PORTAL;
} else {
/*
* This function should not have been called for this MC object
* type, as this object type is not supposed to have MMIO
* regions
*/
return -EINVAL;
}
regions = kmalloc_array(obj_desc->region_count,
sizeof(regions[0]), GFP_KERNEL);
if (!regions)
return -ENOMEM;
for (i = 0; i < obj_desc->region_count; i++) {
struct dprc_region_desc region_desc;
error = dprc_get_obj_region(mc_bus_dev->mc_io,
0,
mc_bus_dev->mc_handle,
obj_desc->type,
obj_desc->id, i, ®ion_desc);
if (error < 0) {
dev_err(parent_dev,
"dprc_get_obj_region() failed: %d\n", error);
goto error_cleanup_regions;
}
/*
* Older MC only returned region offset and no base address
* If base address is in the region_desc use it otherwise
* revert to old mechanism
*/
if (region_desc.base_address) {
regions[i].start = region_desc.base_address +
region_desc.base_offset;
} else {
error = translate_mc_addr(mc_dev, mc_region_type,
region_desc.base_offset,
®ions[i].start);
/*
* Some versions of the MC firmware wrongly report
* 0 for register base address of the DPMCP associated
* with child DPRC objects thus rendering them unusable.
* This is particularly troublesome in ACPI boot
* scenarios where the legacy way of extracting this
* base address from the device tree does not apply.
* Given that DPMCPs share the same base address,
* workaround this by using the base address extracted
* from the root DPRC container.
*/
if (is_fsl_mc_bus_dprc(mc_dev) &&
regions[i].start == region_desc.base_offset)
regions[i].start += mc_portal_base_phys_addr;
}
if (error < 0) {
dev_err(parent_dev,
"Invalid MC offset: %#x (for %s.%d\'s region %d)\n",
region_desc.base_offset,
obj_desc->type, obj_desc->id, i);
goto error_cleanup_regions;
}
regions[i].end = regions[i].start + region_desc.size - 1;
regions[i].name = "fsl-mc object MMIO region";
regions[i].flags = region_desc.flags & IORESOURCE_BITS;
regions[i].flags |= IORESOURCE_MEM;
}
mc_dev->regions = regions;
return 0;
error_cleanup_regions:
kfree(regions);
return error;
}
/*
* fsl_mc_is_root_dprc - function to check if a given device is a root dprc
*/
bool fsl_mc_is_root_dprc(struct device *dev)
{
struct device *root_dprc_dev;
fsl_mc_get_root_dprc(dev, &root_dprc_dev);
if (!root_dprc_dev)
return false;
return dev == root_dprc_dev;
}
static void fsl_mc_device_release(struct device *dev)
{
struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
kfree(mc_dev->regions);
if (is_fsl_mc_bus_dprc(mc_dev))
kfree(to_fsl_mc_bus(mc_dev));
else
kfree(mc_dev);
}
/*
* Add a newly discovered fsl-mc device to be visible in Linux
*/
int fsl_mc_device_add(struct fsl_mc_obj_desc *obj_desc,
struct fsl_mc_io *mc_io,
struct device *parent_dev,
struct fsl_mc_device **new_mc_dev)
{
int error;
struct fsl_mc_device *mc_dev = NULL;
struct fsl_mc_bus *mc_bus = NULL;
struct fsl_mc_device *parent_mc_dev;
if (dev_is_fsl_mc(parent_dev))
parent_mc_dev = to_fsl_mc_device(parent_dev);
else
parent_mc_dev = NULL;
if (strcmp(obj_desc->type, "dprc") == 0) {
/*
* Allocate an MC bus device object:
*/
mc_bus = kzalloc(sizeof(*mc_bus), GFP_KERNEL);
if (!mc_bus)
return -ENOMEM;
mutex_init(&mc_bus->scan_mutex);
mc_dev = &mc_bus->mc_dev;
} else {
/*
* Allocate a regular fsl_mc_device object:
*/
mc_dev = kzalloc(sizeof(*mc_dev), GFP_KERNEL);
if (!mc_dev)
return -ENOMEM;
}
mc_dev->obj_desc = *obj_desc;
mc_dev->mc_io = mc_io;
device_initialize(&mc_dev->dev);
mc_dev->dev.parent = parent_dev;
mc_dev->dev.bus = &fsl_mc_bus_type;
mc_dev->dev.release = fsl_mc_device_release;
mc_dev->dev.type = fsl_mc_get_device_type(obj_desc->type);
if (!mc_dev->dev.type) {
error = -ENODEV;
dev_err(parent_dev, "unknown device type %s\n", obj_desc->type);
goto error_cleanup_dev;
}
dev_set_name(&mc_dev->dev, "%s.%d", obj_desc->type, obj_desc->id);
if (strcmp(obj_desc->type, "dprc") == 0) {
struct fsl_mc_io *mc_io2;
mc_dev->flags |= FSL_MC_IS_DPRC;
/*
* To get the DPRC's ICID, we need to open the DPRC
* in get_dprc_icid(). For child DPRCs, we do so using the
* parent DPRC's MC portal instead of the child DPRC's MC
* portal, in case the child DPRC is already opened with
* its own portal (e.g., the DPRC used by AIOP).
*
* NOTE: There cannot be more than one active open for a
* given MC object, using the same MC portal.
*/
if (parent_mc_dev) {
/*
* device being added is a child DPRC device
*/
mc_io2 = parent_mc_dev->mc_io;
} else {
/*
* device being added is the root DPRC device
*/
if (!mc_io) {
error = -EINVAL;
goto error_cleanup_dev;
}
mc_io2 = mc_io;
}
error = get_dprc_icid(mc_io2, obj_desc->id, &mc_dev->icid);
if (error < 0)
goto error_cleanup_dev;
} else {
/*
* A non-DPRC object has to be a child of a DPRC, use the
* parent's ICID and interrupt domain.
*/
mc_dev->icid = parent_mc_dev->icid;
mc_dev->dma_mask = FSL_MC_DEFAULT_DMA_MASK;
mc_dev->dev.dma_mask = &mc_dev->dma_mask;
mc_dev->dev.coherent_dma_mask = mc_dev->dma_mask;
dev_set_msi_domain(&mc_dev->dev,
dev_get_msi_domain(&parent_mc_dev->dev));
}
/*
* Get MMIO regions for the device from the MC:
*
* NOTE: the root DPRC is a special case as its MMIO region is
* obtained from the device tree
*/
if (parent_mc_dev && obj_desc->region_count != 0) {
error = fsl_mc_device_get_mmio_regions(mc_dev,
parent_mc_dev);
if (error < 0)
goto error_cleanup_dev;
}
/*
* The device-specific probe callback will get invoked by device_add()
*/
error = device_add(&mc_dev->dev);
if (error < 0) {
dev_err(parent_dev,
"device_add() failed for device %s: %d\n",
dev_name(&mc_dev->dev), error);
goto error_cleanup_dev;
}
dev_dbg(parent_dev, "added %s\n", dev_name(&mc_dev->dev));
*new_mc_dev = mc_dev;
return 0;
error_cleanup_dev:
kfree(mc_dev->regions);
kfree(mc_bus);
kfree(mc_dev);
return error;
}
EXPORT_SYMBOL_GPL(fsl_mc_device_add);
static struct notifier_block fsl_mc_nb;
/**
* fsl_mc_device_remove - Remove an fsl-mc device from being visible to
* Linux
*
* @mc_dev: Pointer to an fsl-mc device
*/
void fsl_mc_device_remove(struct fsl_mc_device *mc_dev)
{
kfree(mc_dev->driver_override);
mc_dev->driver_override = NULL;
/*
* The device-specific remove callback will get invoked by device_del()
*/
device_del(&mc_dev->dev);
put_device(&mc_dev->dev);
}
EXPORT_SYMBOL_GPL(fsl_mc_device_remove);
struct fsl_mc_device *fsl_mc_get_endpoint(struct fsl_mc_device *mc_dev,
u16 if_id)
{
struct fsl_mc_device *mc_bus_dev, *endpoint;
struct fsl_mc_obj_desc endpoint_desc = {{ 0 }};
struct dprc_endpoint endpoint1 = {{ 0 }};
struct dprc_endpoint endpoint2 = {{ 0 }};
int state, err;
mc_bus_dev = to_fsl_mc_device(mc_dev->dev.parent);
strcpy(endpoint1.type, mc_dev->obj_desc.type);
endpoint1.id = mc_dev->obj_desc.id;
endpoint1.if_id = if_id;
err = dprc_get_connection(mc_bus_dev->mc_io, 0,
mc_bus_dev->mc_handle,
&endpoint1, &endpoint2,
&state);
if (err == -ENOTCONN || state == -1)
return ERR_PTR(-ENOTCONN);
if (err < 0) {
dev_err(&mc_bus_dev->dev, "dprc_get_connection() = %d\n", err);
return ERR_PTR(err);
}
strcpy(endpoint_desc.type, endpoint2.type);
endpoint_desc.id = endpoint2.id;
endpoint = fsl_mc_device_lookup(&endpoint_desc, mc_bus_dev);
/*
* We know that the device has an endpoint because we verified by
* interrogating the firmware. This is the case when the device was not
* yet discovered by the fsl-mc bus, thus the lookup returned NULL.
* Force a rescan of the devices in this container and retry the lookup.
*/
if (!endpoint) {
struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev);
if (mutex_trylock(&mc_bus->scan_mutex)) {
err = dprc_scan_objects(mc_bus_dev, true);
mutex_unlock(&mc_bus->scan_mutex);
}
if (err < 0)
return ERR_PTR(err);
}
endpoint = fsl_mc_device_lookup(&endpoint_desc, mc_bus_dev);
/*
* This means that the endpoint might reside in a different isolation
* context (DPRC/container). Not much to do, so return a permssion
* error.
*/
if (!endpoint)
return ERR_PTR(-EPERM);
return endpoint;
}
EXPORT_SYMBOL_GPL(fsl_mc_get_endpoint);
static int get_mc_addr_translation_ranges(struct device *dev,
struct fsl_mc_addr_translation_range
**ranges,
u8 *num_ranges)
{
struct fsl_mc_addr_translation_range *r;
struct of_range_parser parser;
struct of_range range;
of_range_parser_init(&parser, dev->of_node);
*num_ranges = of_range_count(&parser);
if (!*num_ranges) {
/*
* Missing or empty ranges property ("ranges;") for the
* 'fsl,qoriq-mc' node. In this case, identity mapping
* will be used.
*/
*ranges = NULL;
return 0;
}
*ranges = devm_kcalloc(dev, *num_ranges,
sizeof(struct fsl_mc_addr_translation_range),
GFP_KERNEL);
if (!(*ranges))
return -ENOMEM;
r = *ranges;
for_each_of_range(&parser, &range) {
r->mc_region_type = range.flags;
r->start_mc_offset = range.bus_addr;
r->end_mc_offset = range.bus_addr + range.size;
r->start_phys_addr = range.cpu_addr;
r++;
}
return 0;
}
/*
* fsl_mc_bus_probe - callback invoked when the root MC bus is being
* added
*/
static int fsl_mc_bus_probe(struct platform_device *pdev)
{
struct fsl_mc_obj_desc obj_desc;
int error;
struct fsl_mc *mc;
struct fsl_mc_device *mc_bus_dev = NULL;
struct fsl_mc_io *mc_io = NULL;
int container_id;
phys_addr_t mc_portal_phys_addr;
u32 mc_portal_size, mc_stream_id;
struct resource *plat_res;
mc = devm_kzalloc(&pdev->dev, sizeof(*mc), GFP_KERNEL);
if (!mc)
return -ENOMEM;
platform_set_drvdata(pdev, mc);
plat_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (plat_res) {
mc->fsl_mc_regs = devm_ioremap_resource(&pdev->dev, plat_res);
if (IS_ERR(mc->fsl_mc_regs))
return PTR_ERR(mc->fsl_mc_regs);
}
if (mc->fsl_mc_regs) {
if (IS_ENABLED(CONFIG_ACPI) && !dev_of_node(&pdev->dev)) {
mc_stream_id = readl(mc->fsl_mc_regs + FSL_MC_FAPR);
/*
* HW ORs the PL and BMT bit, places the result in bit
* 14 of the StreamID and ORs in the ICID. Calculate it
* accordingly.
*/
mc_stream_id = (mc_stream_id & 0xffff) |
((mc_stream_id & (MC_FAPR_PL | MC_FAPR_BMT)) ?
BIT(14) : 0);
error = acpi_dma_configure_id(&pdev->dev,
DEV_DMA_COHERENT,
&mc_stream_id);
if (error == -EPROBE_DEFER)
return error;
if (error)
dev_warn(&pdev->dev,
"failed to configure dma: %d.\n",
error);
}
/*
* Some bootloaders pause the MC firmware before booting the
* kernel so that MC will not cause faults as soon as the
* SMMU probes due to the fact that there's no configuration
* in place for MC.
* At this point MC should have all its SMMU setup done so make
* sure it is resumed.
*/
writel(readl(mc->fsl_mc_regs + FSL_MC_GCR1) &
(~(GCR1_P1_STOP | GCR1_P2_STOP)),
mc->fsl_mc_regs + FSL_MC_GCR1);
}
/*
* Get physical address of MC portal for the root DPRC:
*/
plat_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
mc_portal_phys_addr = plat_res->start;
mc_portal_size = resource_size(plat_res);
mc_portal_base_phys_addr = mc_portal_phys_addr & ~0x3ffffff;
error = fsl_create_mc_io(&pdev->dev, mc_portal_phys_addr,
mc_portal_size, NULL,
FSL_MC_IO_ATOMIC_CONTEXT_PORTAL, &mc_io);
if (error < 0)
return error;
error = mc_get_version(mc_io, 0, &mc_version);
if (error != 0) {
dev_err(&pdev->dev,
"mc_get_version() failed with error %d\n", error);
goto error_cleanup_mc_io;
}
dev_info(&pdev->dev, "MC firmware version: %u.%u.%u\n",
mc_version.major, mc_version.minor, mc_version.revision);
if (dev_of_node(&pdev->dev)) {
error = get_mc_addr_translation_ranges(&pdev->dev,
&mc->translation_ranges,
&mc->num_translation_ranges);
if (error < 0)
goto error_cleanup_mc_io;
}
error = dprc_get_container_id(mc_io, 0, &container_id);
if (error < 0) {
dev_err(&pdev->dev,
"dprc_get_container_id() failed: %d\n", error);
goto error_cleanup_mc_io;
}
memset(&obj_desc, 0, sizeof(struct fsl_mc_obj_desc));
error = dprc_get_api_version(mc_io, 0,
&obj_desc.ver_major,
&obj_desc.ver_minor);
if (error < 0)
goto error_cleanup_mc_io;
obj_desc.vendor = FSL_MC_VENDOR_FREESCALE;
strcpy(obj_desc.type, "dprc");
obj_desc.id = container_id;
obj_desc.irq_count = 1;
obj_desc.region_count = 0;
error = fsl_mc_device_add(&obj_desc, mc_io, &pdev->dev, &mc_bus_dev);
if (error < 0)
goto error_cleanup_mc_io;
mc->root_mc_bus_dev = mc_bus_dev;
mc_bus_dev->dev.fwnode = pdev->dev.fwnode;
return 0;
error_cleanup_mc_io:
fsl_destroy_mc_io(mc_io);
return error;
}
/*
* fsl_mc_bus_remove - callback invoked when the root MC bus is being
* removed
*/
static int fsl_mc_bus_remove(struct platform_device *pdev)
{
struct fsl_mc *mc = platform_get_drvdata(pdev);
struct fsl_mc_io *mc_io;
if (!fsl_mc_is_root_dprc(&mc->root_mc_bus_dev->dev))
return -EINVAL;
mc_io = mc->root_mc_bus_dev->mc_io;
fsl_mc_device_remove(mc->root_mc_bus_dev);
fsl_destroy_mc_io(mc_io);
bus_unregister_notifier(&fsl_mc_bus_type, &fsl_mc_nb);
if (mc->fsl_mc_regs) {
/*
* Pause the MC firmware so that it doesn't crash in certain
* scenarios, such as kexec.
*/
writel(readl(mc->fsl_mc_regs + FSL_MC_GCR1) |
(GCR1_P1_STOP | GCR1_P2_STOP),
mc->fsl_mc_regs + FSL_MC_GCR1);
}
return 0;
}
static void fsl_mc_bus_shutdown(struct platform_device *pdev)
{
fsl_mc_bus_remove(pdev);
}
static const struct of_device_id fsl_mc_bus_match_table[] = {
{.compatible = "fsl,qoriq-mc",},
{},
};
MODULE_DEVICE_TABLE(of, fsl_mc_bus_match_table);
static const struct acpi_device_id fsl_mc_bus_acpi_match_table[] = {
{"NXP0008", 0 },
{ }
};
MODULE_DEVICE_TABLE(acpi, fsl_mc_bus_acpi_match_table);
static struct platform_driver fsl_mc_bus_driver = {
.driver = {
.name = "fsl_mc_bus",
.pm = NULL,
.of_match_table = fsl_mc_bus_match_table,
.acpi_match_table = fsl_mc_bus_acpi_match_table,
},
.probe = fsl_mc_bus_probe,
.remove = fsl_mc_bus_remove,
.shutdown = fsl_mc_bus_shutdown,
};
static int fsl_mc_bus_notifier(struct notifier_block *nb,
unsigned long action, void *data)
{
struct device *dev = data;
struct resource *res;
void __iomem *fsl_mc_regs;
if (action != BUS_NOTIFY_ADD_DEVICE)
return 0;
if (!of_match_device(fsl_mc_bus_match_table, dev) &&
!acpi_match_device(fsl_mc_bus_acpi_match_table, dev))
return 0;
res = platform_get_resource(to_platform_device(dev), IORESOURCE_MEM, 1);
if (!res)
return 0;
fsl_mc_regs = ioremap(res->start, resource_size(res));
if (!fsl_mc_regs)
return 0;
/*
* Make sure that the MC firmware is paused before the IOMMU setup for
* it is done or otherwise the firmware will crash right after the SMMU
* gets probed and enabled.
*/
writel(readl(fsl_mc_regs + FSL_MC_GCR1) | (GCR1_P1_STOP | GCR1_P2_STOP),
fsl_mc_regs + FSL_MC_GCR1);
iounmap(fsl_mc_regs);
return 0;
}
static struct notifier_block fsl_mc_nb = {
.notifier_call = fsl_mc_bus_notifier,
};
static int __init fsl_mc_bus_driver_init(void)
{
int error;
error = bus_register(&fsl_mc_bus_type);
if (error < 0) {
pr_err("bus type registration failed: %d\n", error);
goto error_cleanup_cache;
}
error = platform_driver_register(&fsl_mc_bus_driver);
if (error < 0) {
pr_err("platform_driver_register() failed: %d\n", error);
goto error_cleanup_bus;
}
error = dprc_driver_init();
if (error < 0)
goto error_cleanup_driver;
error = fsl_mc_allocator_driver_init();
if (error < 0)
goto error_cleanup_dprc_driver;
return bus_register_notifier(&platform_bus_type, &fsl_mc_nb);
error_cleanup_dprc_driver:
dprc_driver_exit();
error_cleanup_driver:
platform_driver_unregister(&fsl_mc_bus_driver);
error_cleanup_bus:
bus_unregister(&fsl_mc_bus_type);
error_cleanup_cache:
return error;
}
postcore_initcall(fsl_mc_bus_driver_init);
| linux-master | drivers/bus/fsl-mc/fsl-mc-bus.c |
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/*
* Copyright 2013-2016 Freescale Semiconductor Inc.
*
* I/O services to send MC commands to the MC hardware
*
*/
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/ioport.h>
#include <linux/device.h>
#include <linux/io.h>
#include <linux/io-64-nonatomic-hi-lo.h>
#include <linux/fsl/mc.h>
#include "fsl-mc-private.h"
/*
* Timeout in milliseconds to wait for the completion of an MC command
*/
#define MC_CMD_COMPLETION_TIMEOUT_MS 500
/*
* usleep_range() min and max values used to throttle down polling
* iterations while waiting for MC command completion
*/
#define MC_CMD_COMPLETION_POLLING_MIN_SLEEP_USECS 10
#define MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS 500
static enum mc_cmd_status mc_cmd_hdr_read_status(struct fsl_mc_command *cmd)
{
struct mc_cmd_header *hdr = (struct mc_cmd_header *)&cmd->header;
return (enum mc_cmd_status)hdr->status;
}
u16 mc_cmd_hdr_read_cmdid(struct fsl_mc_command *cmd)
{
struct mc_cmd_header *hdr = (struct mc_cmd_header *)&cmd->header;
u16 cmd_id = le16_to_cpu(hdr->cmd_id);
return cmd_id;
}
static int mc_status_to_error(enum mc_cmd_status status)
{
static const int mc_status_to_error_map[] = {
[MC_CMD_STATUS_OK] = 0,
[MC_CMD_STATUS_AUTH_ERR] = -EACCES,
[MC_CMD_STATUS_NO_PRIVILEGE] = -EPERM,
[MC_CMD_STATUS_DMA_ERR] = -EIO,
[MC_CMD_STATUS_CONFIG_ERR] = -ENXIO,
[MC_CMD_STATUS_TIMEOUT] = -ETIMEDOUT,
[MC_CMD_STATUS_NO_RESOURCE] = -ENAVAIL,
[MC_CMD_STATUS_NO_MEMORY] = -ENOMEM,
[MC_CMD_STATUS_BUSY] = -EBUSY,
[MC_CMD_STATUS_UNSUPPORTED_OP] = -ENOTSUPP,
[MC_CMD_STATUS_INVALID_STATE] = -ENODEV,
};
if ((u32)status >= ARRAY_SIZE(mc_status_to_error_map))
return -EINVAL;
return mc_status_to_error_map[status];
}
static const char *mc_status_to_string(enum mc_cmd_status status)
{
static const char *const status_strings[] = {
[MC_CMD_STATUS_OK] = "Command completed successfully",
[MC_CMD_STATUS_READY] = "Command ready to be processed",
[MC_CMD_STATUS_AUTH_ERR] = "Authentication error",
[MC_CMD_STATUS_NO_PRIVILEGE] = "No privilege",
[MC_CMD_STATUS_DMA_ERR] = "DMA or I/O error",
[MC_CMD_STATUS_CONFIG_ERR] = "Configuration error",
[MC_CMD_STATUS_TIMEOUT] = "Operation timed out",
[MC_CMD_STATUS_NO_RESOURCE] = "No resources",
[MC_CMD_STATUS_NO_MEMORY] = "No memory available",
[MC_CMD_STATUS_BUSY] = "Device is busy",
[MC_CMD_STATUS_UNSUPPORTED_OP] = "Unsupported operation",
[MC_CMD_STATUS_INVALID_STATE] = "Invalid state"
};
if ((unsigned int)status >= ARRAY_SIZE(status_strings))
return "Unknown MC error";
return status_strings[status];
}
/**
* mc_write_command - writes a command to a Management Complex (MC) portal
*
* @portal: pointer to an MC portal
* @cmd: pointer to a filled command
*/
static inline void mc_write_command(struct fsl_mc_command __iomem *portal,
struct fsl_mc_command *cmd)
{
int i;
/* copy command parameters into the portal */
for (i = 0; i < MC_CMD_NUM_OF_PARAMS; i++)
/*
* Data is already in the expected LE byte-order. Do an
* extra LE -> CPU conversion so that the CPU -> LE done in
* the device io write api puts it back in the right order.
*/
writeq_relaxed(le64_to_cpu(cmd->params[i]), &portal->params[i]);
/* submit the command by writing the header */
writeq(le64_to_cpu(cmd->header), &portal->header);
}
/**
* mc_read_response - reads the response for the last MC command from a
* Management Complex (MC) portal
*
* @portal: pointer to an MC portal
* @resp: pointer to command response buffer
*
* Returns MC_CMD_STATUS_OK on Success; Error code otherwise.
*/
static inline enum mc_cmd_status mc_read_response(struct fsl_mc_command __iomem
*portal,
struct fsl_mc_command *resp)
{
int i;
enum mc_cmd_status status;
/* Copy command response header from MC portal: */
resp->header = cpu_to_le64(readq_relaxed(&portal->header));
status = mc_cmd_hdr_read_status(resp);
if (status != MC_CMD_STATUS_OK)
return status;
/* Copy command response data from MC portal: */
for (i = 0; i < MC_CMD_NUM_OF_PARAMS; i++)
/*
* Data is expected to be in LE byte-order. Do an
* extra CPU -> LE to revert the LE -> CPU done in
* the device io read api.
*/
resp->params[i] =
cpu_to_le64(readq_relaxed(&portal->params[i]));
return status;
}
/**
* mc_polling_wait_preemptible() - Waits for the completion of an MC
* command doing preemptible polling.
* uslepp_range() is called between
* polling iterations.
* @mc_io: MC I/O object to be used
* @cmd: command buffer to receive MC response
* @mc_status: MC command completion status
*/
static int mc_polling_wait_preemptible(struct fsl_mc_io *mc_io,
struct fsl_mc_command *cmd,
enum mc_cmd_status *mc_status)
{
enum mc_cmd_status status;
unsigned long jiffies_until_timeout =
jiffies + msecs_to_jiffies(MC_CMD_COMPLETION_TIMEOUT_MS);
/*
* Wait for response from the MC hardware:
*/
for (;;) {
status = mc_read_response(mc_io->portal_virt_addr, cmd);
if (status != MC_CMD_STATUS_READY)
break;
/*
* TODO: When MC command completion interrupts are supported
* call wait function here instead of usleep_range()
*/
usleep_range(MC_CMD_COMPLETION_POLLING_MIN_SLEEP_USECS,
MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS);
if (time_after_eq(jiffies, jiffies_until_timeout)) {
dev_dbg(mc_io->dev,
"MC command timed out (portal: %pa, dprc handle: %#x, command: %#x)\n",
&mc_io->portal_phys_addr,
(unsigned int)mc_cmd_hdr_read_token(cmd),
(unsigned int)mc_cmd_hdr_read_cmdid(cmd));
return -ETIMEDOUT;
}
}
*mc_status = status;
return 0;
}
/**
* mc_polling_wait_atomic() - Waits for the completion of an MC command
* doing atomic polling. udelay() is called
* between polling iterations.
* @mc_io: MC I/O object to be used
* @cmd: command buffer to receive MC response
* @mc_status: MC command completion status
*/
static int mc_polling_wait_atomic(struct fsl_mc_io *mc_io,
struct fsl_mc_command *cmd,
enum mc_cmd_status *mc_status)
{
enum mc_cmd_status status;
unsigned long timeout_usecs = MC_CMD_COMPLETION_TIMEOUT_MS * 1000;
BUILD_BUG_ON((MC_CMD_COMPLETION_TIMEOUT_MS * 1000) %
MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS != 0);
for (;;) {
status = mc_read_response(mc_io->portal_virt_addr, cmd);
if (status != MC_CMD_STATUS_READY)
break;
udelay(MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS);
timeout_usecs -= MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS;
if (timeout_usecs == 0) {
dev_dbg(mc_io->dev,
"MC command timed out (portal: %pa, dprc handle: %#x, command: %#x)\n",
&mc_io->portal_phys_addr,
(unsigned int)mc_cmd_hdr_read_token(cmd),
(unsigned int)mc_cmd_hdr_read_cmdid(cmd));
return -ETIMEDOUT;
}
}
*mc_status = status;
return 0;
}
/**
* mc_send_command() - Sends a command to the MC device using the given
* MC I/O object
* @mc_io: MC I/O object to be used
* @cmd: command to be sent
*
* Returns '0' on Success; Error code otherwise.
*/
int mc_send_command(struct fsl_mc_io *mc_io, struct fsl_mc_command *cmd)
{
int error;
enum mc_cmd_status status;
unsigned long irq_flags = 0;
if (in_irq() && !(mc_io->flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL))
return -EINVAL;
if (mc_io->flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL)
raw_spin_lock_irqsave(&mc_io->spinlock, irq_flags);
else
mutex_lock(&mc_io->mutex);
/*
* Send command to the MC hardware:
*/
mc_write_command(mc_io->portal_virt_addr, cmd);
/*
* Wait for response from the MC hardware:
*/
if (!(mc_io->flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL))
error = mc_polling_wait_preemptible(mc_io, cmd, &status);
else
error = mc_polling_wait_atomic(mc_io, cmd, &status);
if (error < 0)
goto common_exit;
if (status != MC_CMD_STATUS_OK) {
dev_dbg(mc_io->dev,
"MC command failed: portal: %pa, dprc handle: %#x, command: %#x, status: %s (%#x)\n",
&mc_io->portal_phys_addr,
(unsigned int)mc_cmd_hdr_read_token(cmd),
(unsigned int)mc_cmd_hdr_read_cmdid(cmd),
mc_status_to_string(status),
(unsigned int)status);
error = mc_status_to_error(status);
goto common_exit;
}
error = 0;
common_exit:
if (mc_io->flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL)
raw_spin_unlock_irqrestore(&mc_io->spinlock, irq_flags);
else
mutex_unlock(&mc_io->mutex);
return error;
}
EXPORT_SYMBOL_GPL(mc_send_command);
| linux-master | drivers/bus/fsl-mc/mc-sys.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Freescale data path resource container (DPRC) driver
*
* Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
* Copyright 2019-2020 NXP
* Author: German Rivera <[email protected]>
*
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/fsl/mc.h>
#include "fsl-mc-private.h"
#define FSL_MC_DPRC_DRIVER_NAME "fsl_mc_dprc"
struct fsl_mc_child_objs {
int child_count;
struct fsl_mc_obj_desc *child_array;
};
static bool fsl_mc_device_match(struct fsl_mc_device *mc_dev,
struct fsl_mc_obj_desc *obj_desc)
{
return mc_dev->obj_desc.id == obj_desc->id &&
strcmp(mc_dev->obj_desc.type, obj_desc->type) == 0;
}
static bool fsl_mc_obj_desc_is_allocatable(struct fsl_mc_obj_desc *obj)
{
if (strcmp(obj->type, "dpmcp") == 0 ||
strcmp(obj->type, "dpcon") == 0 ||
strcmp(obj->type, "dpbp") == 0)
return true;
else
return false;
}
static int __fsl_mc_device_remove_if_not_in_mc(struct device *dev, void *data)
{
int i;
struct fsl_mc_child_objs *objs;
struct fsl_mc_device *mc_dev;
if (!dev_is_fsl_mc(dev))
return 0;
mc_dev = to_fsl_mc_device(dev);
objs = data;
for (i = 0; i < objs->child_count; i++) {
struct fsl_mc_obj_desc *obj_desc = &objs->child_array[i];
if (strlen(obj_desc->type) != 0 &&
fsl_mc_device_match(mc_dev, obj_desc))
break;
}
if (i == objs->child_count)
fsl_mc_device_remove(mc_dev);
return 0;
}
static int __fsl_mc_device_remove(struct device *dev, void *data)
{
if (!dev_is_fsl_mc(dev))
return 0;
fsl_mc_device_remove(to_fsl_mc_device(dev));
return 0;
}
/**
* dprc_remove_devices - Removes devices for objects removed from a DPRC
*
* @mc_bus_dev: pointer to the fsl-mc device that represents a DPRC object
* @obj_desc_array: array of object descriptors for child objects currently
* present in the DPRC in the MC.
* @num_child_objects_in_mc: number of entries in obj_desc_array
*
* Synchronizes the state of the Linux bus driver with the actual state of
* the MC by removing devices that represent MC objects that have
* been dynamically removed in the physical DPRC.
*/
void dprc_remove_devices(struct fsl_mc_device *mc_bus_dev,
struct fsl_mc_obj_desc *obj_desc_array,
int num_child_objects_in_mc)
{
if (num_child_objects_in_mc != 0) {
/*
* Remove child objects that are in the DPRC in Linux,
* but not in the MC:
*/
struct fsl_mc_child_objs objs;
objs.child_count = num_child_objects_in_mc;
objs.child_array = obj_desc_array;
device_for_each_child(&mc_bus_dev->dev, &objs,
__fsl_mc_device_remove_if_not_in_mc);
} else {
/*
* There are no child objects for this DPRC in the MC.
* So, remove all the child devices from Linux:
*/
device_for_each_child(&mc_bus_dev->dev, NULL,
__fsl_mc_device_remove);
}
}
EXPORT_SYMBOL_GPL(dprc_remove_devices);
static int __fsl_mc_device_match(struct device *dev, void *data)
{
struct fsl_mc_obj_desc *obj_desc = data;
struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
return fsl_mc_device_match(mc_dev, obj_desc);
}
struct fsl_mc_device *fsl_mc_device_lookup(struct fsl_mc_obj_desc *obj_desc,
struct fsl_mc_device *mc_bus_dev)
{
struct device *dev;
dev = device_find_child(&mc_bus_dev->dev, obj_desc,
__fsl_mc_device_match);
return dev ? to_fsl_mc_device(dev) : NULL;
}
/**
* check_plugged_state_change - Check change in an MC object's plugged state
*
* @mc_dev: pointer to the fsl-mc device for a given MC object
* @obj_desc: pointer to the MC object's descriptor in the MC
*
* If the plugged state has changed from unplugged to plugged, the fsl-mc
* device is bound to the corresponding device driver.
* If the plugged state has changed from plugged to unplugged, the fsl-mc
* device is unbound from the corresponding device driver.
*/
static void check_plugged_state_change(struct fsl_mc_device *mc_dev,
struct fsl_mc_obj_desc *obj_desc)
{
int error;
u32 plugged_flag_at_mc =
obj_desc->state & FSL_MC_OBJ_STATE_PLUGGED;
if (plugged_flag_at_mc !=
(mc_dev->obj_desc.state & FSL_MC_OBJ_STATE_PLUGGED)) {
if (plugged_flag_at_mc) {
mc_dev->obj_desc.state |= FSL_MC_OBJ_STATE_PLUGGED;
error = device_attach(&mc_dev->dev);
if (error < 0) {
dev_err(&mc_dev->dev,
"device_attach() failed: %d\n",
error);
}
} else {
mc_dev->obj_desc.state &= ~FSL_MC_OBJ_STATE_PLUGGED;
device_release_driver(&mc_dev->dev);
}
}
}
static void fsl_mc_obj_device_add(struct fsl_mc_device *mc_bus_dev,
struct fsl_mc_obj_desc *obj_desc)
{
int error;
struct fsl_mc_device *child_dev;
/*
* Check if device is already known to Linux:
*/
child_dev = fsl_mc_device_lookup(obj_desc, mc_bus_dev);
if (child_dev) {
check_plugged_state_change(child_dev, obj_desc);
put_device(&child_dev->dev);
} else {
error = fsl_mc_device_add(obj_desc, NULL, &mc_bus_dev->dev,
&child_dev);
if (error < 0)
return;
}
}
/**
* dprc_add_new_devices - Adds devices to the logical bus for a DPRC
*
* @mc_bus_dev: pointer to the fsl-mc device that represents a DPRC object
* @obj_desc_array: array of device descriptors for child devices currently
* present in the physical DPRC.
* @num_child_objects_in_mc: number of entries in obj_desc_array
*
* Synchronizes the state of the Linux bus driver with the actual
* state of the MC by adding objects that have been newly discovered
* in the physical DPRC.
*/
static void dprc_add_new_devices(struct fsl_mc_device *mc_bus_dev,
struct fsl_mc_obj_desc *obj_desc_array,
int num_child_objects_in_mc)
{
int i;
/* probe the allocable objects first */
for (i = 0; i < num_child_objects_in_mc; i++) {
struct fsl_mc_obj_desc *obj_desc = &obj_desc_array[i];
if (strlen(obj_desc->type) > 0 &&
fsl_mc_obj_desc_is_allocatable(obj_desc))
fsl_mc_obj_device_add(mc_bus_dev, obj_desc);
}
for (i = 0; i < num_child_objects_in_mc; i++) {
struct fsl_mc_obj_desc *obj_desc = &obj_desc_array[i];
if (strlen(obj_desc->type) > 0 &&
!fsl_mc_obj_desc_is_allocatable(obj_desc))
fsl_mc_obj_device_add(mc_bus_dev, obj_desc);
}
}
/**
* dprc_scan_objects - Discover objects in a DPRC
*
* @mc_bus_dev: pointer to the fsl-mc device that represents a DPRC object
* @alloc_interrupts: if true the function allocates the interrupt pool,
* otherwise the interrupt allocation is delayed
*
* Detects objects added and removed from a DPRC and synchronizes the
* state of the Linux bus driver, MC by adding and removing
* devices accordingly.
* Two types of devices can be found in a DPRC: allocatable objects (e.g.,
* dpbp, dpmcp) and non-allocatable devices (e.g., dprc, dpni).
* All allocatable devices needed to be probed before all non-allocatable
* devices, to ensure that device drivers for non-allocatable
* devices can allocate any type of allocatable devices.
* That is, we need to ensure that the corresponding resource pools are
* populated before they can get allocation requests from probe callbacks
* of the device drivers for the non-allocatable devices.
*/
int dprc_scan_objects(struct fsl_mc_device *mc_bus_dev,
bool alloc_interrupts)
{
int num_child_objects;
int dprc_get_obj_failures;
int error;
unsigned int irq_count = mc_bus_dev->obj_desc.irq_count;
struct fsl_mc_obj_desc *child_obj_desc_array = NULL;
struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev);
error = dprc_get_obj_count(mc_bus_dev->mc_io,
0,
mc_bus_dev->mc_handle,
&num_child_objects);
if (error < 0) {
dev_err(&mc_bus_dev->dev, "dprc_get_obj_count() failed: %d\n",
error);
return error;
}
if (num_child_objects != 0) {
int i;
child_obj_desc_array =
devm_kmalloc_array(&mc_bus_dev->dev, num_child_objects,
sizeof(*child_obj_desc_array),
GFP_KERNEL);
if (!child_obj_desc_array)
return -ENOMEM;
/*
* Discover objects currently present in the physical DPRC:
*/
dprc_get_obj_failures = 0;
for (i = 0; i < num_child_objects; i++) {
struct fsl_mc_obj_desc *obj_desc =
&child_obj_desc_array[i];
error = dprc_get_obj(mc_bus_dev->mc_io,
0,
mc_bus_dev->mc_handle,
i, obj_desc);
if (error < 0) {
dev_err(&mc_bus_dev->dev,
"dprc_get_obj(i=%d) failed: %d\n",
i, error);
/*
* Mark the obj entry as "invalid", by using the
* empty string as obj type:
*/
obj_desc->type[0] = '\0';
obj_desc->id = error;
dprc_get_obj_failures++;
continue;
}
/*
* add a quirk for all versions of dpsec < 4.0...none
* are coherent regardless of what the MC reports.
*/
if ((strcmp(obj_desc->type, "dpseci") == 0) &&
(obj_desc->ver_major < 4))
obj_desc->flags |=
FSL_MC_OBJ_FLAG_NO_MEM_SHAREABILITY;
irq_count += obj_desc->irq_count;
dev_dbg(&mc_bus_dev->dev,
"Discovered object: type %s, id %d\n",
obj_desc->type, obj_desc->id);
}
if (dprc_get_obj_failures != 0) {
dev_err(&mc_bus_dev->dev,
"%d out of %d devices could not be retrieved\n",
dprc_get_obj_failures, num_child_objects);
}
}
/*
* Allocate IRQ's before binding the scanned devices with their
* respective drivers.
*/
if (dev_get_msi_domain(&mc_bus_dev->dev)) {
if (irq_count > FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS) {
dev_warn(&mc_bus_dev->dev,
"IRQs needed (%u) exceed IRQs preallocated (%u)\n",
irq_count, FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS);
}
if (alloc_interrupts && !mc_bus->irq_resources) {
error = fsl_mc_populate_irq_pool(mc_bus_dev,
FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS);
if (error < 0)
return error;
}
}
dprc_remove_devices(mc_bus_dev, child_obj_desc_array,
num_child_objects);
dprc_add_new_devices(mc_bus_dev, child_obj_desc_array,
num_child_objects);
if (child_obj_desc_array)
devm_kfree(&mc_bus_dev->dev, child_obj_desc_array);
return 0;
}
/**
* dprc_scan_container - Scans a physical DPRC and synchronizes Linux bus state
*
* @mc_bus_dev: pointer to the fsl-mc device that represents a DPRC object
* @alloc_interrupts: if true the function allocates the interrupt pool,
* otherwise the interrupt allocation is delayed
* Scans the physical DPRC and synchronizes the state of the Linux
* bus driver with the actual state of the MC by adding and removing
* devices as appropriate.
*/
int dprc_scan_container(struct fsl_mc_device *mc_bus_dev,
bool alloc_interrupts)
{
int error = 0;
struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev);
fsl_mc_init_all_resource_pools(mc_bus_dev);
/*
* Discover objects in the DPRC:
*/
mutex_lock(&mc_bus->scan_mutex);
error = dprc_scan_objects(mc_bus_dev, alloc_interrupts);
mutex_unlock(&mc_bus->scan_mutex);
return error;
}
EXPORT_SYMBOL_GPL(dprc_scan_container);
/**
* dprc_irq0_handler - Regular ISR for DPRC interrupt 0
*
* @irq_num: IRQ number of the interrupt being handled
* @arg: Pointer to device structure
*/
static irqreturn_t dprc_irq0_handler(int irq_num, void *arg)
{
return IRQ_WAKE_THREAD;
}
/**
* dprc_irq0_handler_thread - Handler thread function for DPRC interrupt 0
*
* @irq_num: IRQ number of the interrupt being handled
* @arg: Pointer to device structure
*/
static irqreturn_t dprc_irq0_handler_thread(int irq_num, void *arg)
{
int error;
u32 status;
struct device *dev = arg;
struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_dev);
struct fsl_mc_io *mc_io = mc_dev->mc_io;
int irq = mc_dev->irqs[0]->virq;
dev_dbg(dev, "DPRC IRQ %d triggered on CPU %u\n",
irq_num, smp_processor_id());
if (!(mc_dev->flags & FSL_MC_IS_DPRC))
return IRQ_HANDLED;
mutex_lock(&mc_bus->scan_mutex);
if (irq != (u32)irq_num)
goto out;
status = 0;
error = dprc_get_irq_status(mc_io, 0, mc_dev->mc_handle, 0,
&status);
if (error < 0) {
dev_err(dev,
"dprc_get_irq_status() failed: %d\n", error);
goto out;
}
error = dprc_clear_irq_status(mc_io, 0, mc_dev->mc_handle, 0,
status);
if (error < 0) {
dev_err(dev,
"dprc_clear_irq_status() failed: %d\n", error);
goto out;
}
if (status & (DPRC_IRQ_EVENT_OBJ_ADDED |
DPRC_IRQ_EVENT_OBJ_REMOVED |
DPRC_IRQ_EVENT_CONTAINER_DESTROYED |
DPRC_IRQ_EVENT_OBJ_DESTROYED |
DPRC_IRQ_EVENT_OBJ_CREATED)) {
error = dprc_scan_objects(mc_dev, true);
if (error < 0) {
/*
* If the error is -ENXIO, we ignore it, as it indicates
* that the object scan was aborted, as we detected that
* an object was removed from the DPRC in the MC, while
* we were scanning the DPRC.
*/
if (error != -ENXIO) {
dev_err(dev, "dprc_scan_objects() failed: %d\n",
error);
}
goto out;
}
}
out:
mutex_unlock(&mc_bus->scan_mutex);
return IRQ_HANDLED;
}
/*
* Disable and clear interrupt for a given DPRC object
*/
int disable_dprc_irq(struct fsl_mc_device *mc_dev)
{
struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_dev);
int error;
struct fsl_mc_io *mc_io = mc_dev->mc_io;
/*
* Disable generation of interrupt, while we configure it:
*/
error = dprc_set_irq_enable(mc_io, 0, mc_dev->mc_handle, 0, 0);
if (error < 0) {
dev_err(&mc_dev->dev,
"Disabling DPRC IRQ failed: dprc_set_irq_enable() failed: %d\n",
error);
return error;
}
/*
* Disable all interrupt causes for the interrupt:
*/
error = dprc_set_irq_mask(mc_io, 0, mc_dev->mc_handle, 0, 0x0);
if (error < 0) {
dev_err(&mc_dev->dev,
"Disabling DPRC IRQ failed: dprc_set_irq_mask() failed: %d\n",
error);
return error;
}
/*
* Clear any leftover interrupts:
*/
error = dprc_clear_irq_status(mc_io, 0, mc_dev->mc_handle, 0, ~0x0U);
if (error < 0) {
dev_err(&mc_dev->dev,
"Disabling DPRC IRQ failed: dprc_clear_irq_status() failed: %d\n",
error);
return error;
}
mc_bus->irq_enabled = 0;
return 0;
}
int get_dprc_irq_state(struct fsl_mc_device *mc_dev)
{
struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_dev);
return mc_bus->irq_enabled;
}
static int register_dprc_irq_handler(struct fsl_mc_device *mc_dev)
{
int error;
struct fsl_mc_device_irq *irq = mc_dev->irqs[0];
/*
* NOTE: devm_request_threaded_irq() invokes the device-specific
* function that programs the MSI physically in the device
*/
error = devm_request_threaded_irq(&mc_dev->dev,
irq->virq,
dprc_irq0_handler,
dprc_irq0_handler_thread,
IRQF_NO_SUSPEND | IRQF_ONESHOT,
dev_name(&mc_dev->dev),
&mc_dev->dev);
if (error < 0) {
dev_err(&mc_dev->dev,
"devm_request_threaded_irq() failed: %d\n",
error);
return error;
}
return 0;
}
int enable_dprc_irq(struct fsl_mc_device *mc_dev)
{
struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_dev);
int error;
/*
* Enable all interrupt causes for the interrupt:
*/
error = dprc_set_irq_mask(mc_dev->mc_io, 0, mc_dev->mc_handle, 0,
~0x0u);
if (error < 0) {
dev_err(&mc_dev->dev,
"Enabling DPRC IRQ failed: dprc_set_irq_mask() failed: %d\n",
error);
return error;
}
/*
* Enable generation of the interrupt:
*/
error = dprc_set_irq_enable(mc_dev->mc_io, 0, mc_dev->mc_handle, 0, 1);
if (error < 0) {
dev_err(&mc_dev->dev,
"Enabling DPRC IRQ failed: dprc_set_irq_enable() failed: %d\n",
error);
return error;
}
mc_bus->irq_enabled = 1;
return 0;
}
/*
* Setup interrupt for a given DPRC device
*/
static int dprc_setup_irq(struct fsl_mc_device *mc_dev)
{
int error;
error = fsl_mc_allocate_irqs(mc_dev);
if (error < 0)
return error;
error = disable_dprc_irq(mc_dev);
if (error < 0)
goto error_free_irqs;
error = register_dprc_irq_handler(mc_dev);
if (error < 0)
goto error_free_irqs;
error = enable_dprc_irq(mc_dev);
if (error < 0)
goto error_free_irqs;
return 0;
error_free_irqs:
fsl_mc_free_irqs(mc_dev);
return error;
}
/**
* dprc_setup - opens and creates a mc_io for DPRC
*
* @mc_dev: Pointer to fsl-mc device representing a DPRC
*
* It opens the physical DPRC in the MC.
* It configures the DPRC portal used to communicate with MC
*/
int dprc_setup(struct fsl_mc_device *mc_dev)
{
struct device *parent_dev = mc_dev->dev.parent;
struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_dev);
struct irq_domain *mc_msi_domain;
bool mc_io_created = false;
bool msi_domain_set = false;
bool uapi_created = false;
u16 major_ver, minor_ver;
size_t region_size;
int error;
if (!is_fsl_mc_bus_dprc(mc_dev))
return -EINVAL;
if (dev_get_msi_domain(&mc_dev->dev))
return -EINVAL;
if (!mc_dev->mc_io) {
/*
* This is a child DPRC:
*/
if (!dev_is_fsl_mc(parent_dev))
return -EINVAL;
if (mc_dev->obj_desc.region_count == 0)
return -EINVAL;
region_size = resource_size(mc_dev->regions);
error = fsl_create_mc_io(&mc_dev->dev,
mc_dev->regions[0].start,
region_size,
NULL,
FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
&mc_dev->mc_io);
if (error < 0)
return error;
mc_io_created = true;
} else {
error = fsl_mc_uapi_create_device_file(mc_bus);
if (error < 0)
return -EPROBE_DEFER;
uapi_created = true;
}
mc_msi_domain = fsl_mc_find_msi_domain(&mc_dev->dev);
if (!mc_msi_domain) {
dev_warn(&mc_dev->dev,
"WARNING: MC bus without interrupt support\n");
} else {
dev_set_msi_domain(&mc_dev->dev, mc_msi_domain);
msi_domain_set = true;
}
error = dprc_open(mc_dev->mc_io, 0, mc_dev->obj_desc.id,
&mc_dev->mc_handle);
if (error < 0) {
dev_err(&mc_dev->dev, "dprc_open() failed: %d\n", error);
goto error_cleanup_msi_domain;
}
error = dprc_get_attributes(mc_dev->mc_io, 0, mc_dev->mc_handle,
&mc_bus->dprc_attr);
if (error < 0) {
dev_err(&mc_dev->dev, "dprc_get_attributes() failed: %d\n",
error);
goto error_cleanup_open;
}
error = dprc_get_api_version(mc_dev->mc_io, 0,
&major_ver,
&minor_ver);
if (error < 0) {
dev_err(&mc_dev->dev, "dprc_get_api_version() failed: %d\n",
error);
goto error_cleanup_open;
}
if (major_ver < DPRC_MIN_VER_MAJOR) {
dev_err(&mc_dev->dev,
"ERROR: DPRC version %d.%d not supported\n",
major_ver, minor_ver);
error = -ENOTSUPP;
goto error_cleanup_open;
}
return 0;
error_cleanup_open:
(void)dprc_close(mc_dev->mc_io, 0, mc_dev->mc_handle);
error_cleanup_msi_domain:
if (msi_domain_set)
dev_set_msi_domain(&mc_dev->dev, NULL);
if (mc_io_created) {
fsl_destroy_mc_io(mc_dev->mc_io);
mc_dev->mc_io = NULL;
}
if (uapi_created)
fsl_mc_uapi_remove_device_file(mc_bus);
return error;
}
EXPORT_SYMBOL_GPL(dprc_setup);
/**
* dprc_probe - callback invoked when a DPRC is being bound to this driver
*
* @mc_dev: Pointer to fsl-mc device representing a DPRC
*
* It opens the physical DPRC in the MC.
* It scans the DPRC to discover the MC objects contained in it.
* It creates the interrupt pool for the MC bus associated with the DPRC.
* It configures the interrupts for the DPRC device itself.
*/
static int dprc_probe(struct fsl_mc_device *mc_dev)
{
int error;
error = dprc_setup(mc_dev);
if (error < 0)
return error;
/*
* Discover MC objects in DPRC object:
*/
error = dprc_scan_container(mc_dev, true);
if (error < 0)
goto dprc_cleanup;
/*
* Configure interrupt for the DPRC object associated with this MC bus:
*/
error = dprc_setup_irq(mc_dev);
if (error < 0)
goto scan_cleanup;
dev_info(&mc_dev->dev, "DPRC device bound to driver");
return 0;
scan_cleanup:
device_for_each_child(&mc_dev->dev, NULL, __fsl_mc_device_remove);
dprc_cleanup:
dprc_cleanup(mc_dev);
return error;
}
/*
* Tear down interrupt for a given DPRC object
*/
static void dprc_teardown_irq(struct fsl_mc_device *mc_dev)
{
struct fsl_mc_device_irq *irq = mc_dev->irqs[0];
(void)disable_dprc_irq(mc_dev);
devm_free_irq(&mc_dev->dev, irq->virq, &mc_dev->dev);
fsl_mc_free_irqs(mc_dev);
}
/**
* dprc_cleanup - function that cleanups a DPRC
*
* @mc_dev: Pointer to fsl-mc device representing the DPRC
*
* It closes the DPRC device in the MC.
* It destroys the interrupt pool associated with this MC bus.
*/
int dprc_cleanup(struct fsl_mc_device *mc_dev)
{
struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_dev);
int error;
/* this function should be called only for DPRCs, it
* is an error to call it for regular objects
*/
if (!is_fsl_mc_bus_dprc(mc_dev))
return -EINVAL;
if (dev_get_msi_domain(&mc_dev->dev)) {
fsl_mc_cleanup_irq_pool(mc_dev);
dev_set_msi_domain(&mc_dev->dev, NULL);
}
fsl_mc_cleanup_all_resource_pools(mc_dev);
/* if this step fails we cannot go further with cleanup as there is no way of
* communicating with the firmware
*/
if (!mc_dev->mc_io) {
dev_err(&mc_dev->dev, "mc_io is NULL, tear down cannot be performed in firmware\n");
return -EINVAL;
}
error = dprc_close(mc_dev->mc_io, 0, mc_dev->mc_handle);
if (error < 0)
dev_err(&mc_dev->dev, "dprc_close() failed: %d\n", error);
if (!fsl_mc_is_root_dprc(&mc_dev->dev)) {
fsl_destroy_mc_io(mc_dev->mc_io);
mc_dev->mc_io = NULL;
} else {
fsl_mc_uapi_remove_device_file(mc_bus);
}
return 0;
}
EXPORT_SYMBOL_GPL(dprc_cleanup);
/**
* dprc_remove - callback invoked when a DPRC is being unbound from this driver
*
* @mc_dev: Pointer to fsl-mc device representing the DPRC
*
* It removes the DPRC's child objects from Linux (not from the MC) and
* closes the DPRC device in the MC.
* It tears down the interrupts that were configured for the DPRC device.
* It destroys the interrupt pool associated with this MC bus.
*/
static void dprc_remove(struct fsl_mc_device *mc_dev)
{
struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_dev);
if (!mc_bus->irq_resources) {
dev_err(&mc_dev->dev, "No irq resources, so unbinding the device failed\n");
return;
}
if (dev_get_msi_domain(&mc_dev->dev))
dprc_teardown_irq(mc_dev);
device_for_each_child(&mc_dev->dev, NULL, __fsl_mc_device_remove);
dprc_cleanup(mc_dev);
dev_info(&mc_dev->dev, "DPRC device unbound from driver");
}
static const struct fsl_mc_device_id match_id_table[] = {
{
.vendor = FSL_MC_VENDOR_FREESCALE,
.obj_type = "dprc"},
{.vendor = 0x0},
};
static struct fsl_mc_driver dprc_driver = {
.driver = {
.name = FSL_MC_DPRC_DRIVER_NAME,
.owner = THIS_MODULE,
.pm = NULL,
},
.match_id_table = match_id_table,
.probe = dprc_probe,
.remove = dprc_remove,
};
int __init dprc_driver_init(void)
{
return fsl_mc_driver_register(&dprc_driver);
}
void dprc_driver_exit(void)
{
fsl_mc_driver_unregister(&dprc_driver);
}
| linux-master | drivers/bus/fsl-mc/dprc-driver.c |
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/*
* Copyright 2013-2016 Freescale Semiconductor Inc.
* Copyright 2020 NXP
*
*/
#include <linux/kernel.h>
#include <linux/fsl/mc.h>
#include "fsl-mc-private.h"
/*
* cache the DPRC version to reduce the number of commands
* towards the mc firmware
*/
static u16 dprc_major_ver;
static u16 dprc_minor_ver;
/**
* dprc_open() - Open DPRC object for use
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @container_id: Container ID to open
* @token: Returned token of DPRC object
*
* Return: '0' on Success; Error code otherwise.
*
* @warning Required before any operation on the object.
*/
int dprc_open(struct fsl_mc_io *mc_io,
u32 cmd_flags,
int container_id,
u16 *token)
{
struct fsl_mc_command cmd = { 0 };
struct dprc_cmd_open *cmd_params;
int err;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPRC_CMDID_OPEN, cmd_flags,
0);
cmd_params = (struct dprc_cmd_open *)cmd.params;
cmd_params->container_id = cpu_to_le32(container_id);
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
if (err)
return err;
/* retrieve response parameters */
*token = mc_cmd_hdr_read_token(&cmd);
return 0;
}
EXPORT_SYMBOL_GPL(dprc_open);
/**
* dprc_close() - Close the control session of the object
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @token: Token of DPRC object
*
* After this function is called, no further operations are
* allowed on the object without opening a new control session.
*
* Return: '0' on Success; Error code otherwise.
*/
int dprc_close(struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 token)
{
struct fsl_mc_command cmd = { 0 };
/* prepare command */
cmd.header = mc_encode_cmd_header(DPRC_CMDID_CLOSE, cmd_flags,
token);
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
EXPORT_SYMBOL_GPL(dprc_close);
/**
* dprc_reset_container - Reset child container.
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @token: Token of DPRC object
* @child_container_id: ID of the container to reset
* @options: 32 bit options:
* - 0 (no bits set) - all the objects inside the container are
* reset. The child containers are entered recursively and the
* objects reset. All the objects (including the child containers)
* are closed.
* - bit 0 set - all the objects inside the container are reset.
* However the child containers are not entered recursively.
* This option is supported for API versions >= 6.5
* In case a software context crashes or becomes non-responsive, the parent
* may wish to reset its resources container before the software context is
* restarted.
*
* This routine informs all objects assigned to the child container that the
* container is being reset, so they may perform any cleanup operations that are
* needed. All objects handles that were owned by the child container shall be
* closed.
*
* Note that such request may be submitted even if the child software context
* has not crashed, but the resulting object cleanup operations will not be
* aware of that.
*
* Return: '0' on Success; Error code otherwise.
*/
int dprc_reset_container(struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 token,
int child_container_id,
u32 options)
{
struct fsl_mc_command cmd = { 0 };
struct dprc_cmd_reset_container *cmd_params;
u32 cmdid = DPRC_CMDID_RESET_CONT;
int err;
/*
* If the DPRC object version was not yet cached, cache it now.
* Otherwise use the already cached value.
*/
if (!dprc_major_ver && !dprc_minor_ver) {
err = dprc_get_api_version(mc_io, 0,
&dprc_major_ver,
&dprc_minor_ver);
if (err)
return err;
}
/*
* MC API 6.5 introduced a new field in the command used to pass
* some flags.
* Bit 0 indicates that the child containers are not recursively reset.
*/
if (dprc_major_ver > 6 || (dprc_major_ver == 6 && dprc_minor_ver >= 5))
cmdid = DPRC_CMDID_RESET_CONT_V2;
/* prepare command */
cmd.header = mc_encode_cmd_header(cmdid, cmd_flags, token);
cmd_params = (struct dprc_cmd_reset_container *)cmd.params;
cmd_params->child_container_id = cpu_to_le32(child_container_id);
cmd_params->options = cpu_to_le32(options);
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
EXPORT_SYMBOL_GPL(dprc_reset_container);
/**
* dprc_set_irq() - Set IRQ information for the DPRC to trigger an interrupt.
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @token: Token of DPRC object
* @irq_index: Identifies the interrupt index to configure
* @irq_cfg: IRQ configuration
*
* Return: '0' on Success; Error code otherwise.
*/
int dprc_set_irq(struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 token,
u8 irq_index,
struct dprc_irq_cfg *irq_cfg)
{
struct fsl_mc_command cmd = { 0 };
struct dprc_cmd_set_irq *cmd_params;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_IRQ,
cmd_flags,
token);
cmd_params = (struct dprc_cmd_set_irq *)cmd.params;
cmd_params->irq_val = cpu_to_le32(irq_cfg->val);
cmd_params->irq_index = irq_index;
cmd_params->irq_addr = cpu_to_le64(irq_cfg->paddr);
cmd_params->irq_num = cpu_to_le32(irq_cfg->irq_num);
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
/**
* dprc_set_irq_enable() - Set overall interrupt state.
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @token: Token of DPRC object
* @irq_index: The interrupt index to configure
* @en: Interrupt state - enable = 1, disable = 0
*
* Allows GPP software to control when interrupts are generated.
* Each interrupt can have up to 32 causes. The enable/disable control's the
* overall interrupt state. if the interrupt is disabled no causes will cause
* an interrupt.
*
* Return: '0' on Success; Error code otherwise.
*/
int dprc_set_irq_enable(struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 token,
u8 irq_index,
u8 en)
{
struct fsl_mc_command cmd = { 0 };
struct dprc_cmd_set_irq_enable *cmd_params;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_IRQ_ENABLE,
cmd_flags, token);
cmd_params = (struct dprc_cmd_set_irq_enable *)cmd.params;
cmd_params->enable = en & DPRC_ENABLE;
cmd_params->irq_index = irq_index;
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
/**
* dprc_set_irq_mask() - Set interrupt mask.
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @token: Token of DPRC object
* @irq_index: The interrupt index to configure
* @mask: event mask to trigger interrupt;
* each bit:
* 0 = ignore event
* 1 = consider event for asserting irq
*
* Every interrupt can have up to 32 causes and the interrupt model supports
* masking/unmasking each cause independently
*
* Return: '0' on Success; Error code otherwise.
*/
int dprc_set_irq_mask(struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 token,
u8 irq_index,
u32 mask)
{
struct fsl_mc_command cmd = { 0 };
struct dprc_cmd_set_irq_mask *cmd_params;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_IRQ_MASK,
cmd_flags, token);
cmd_params = (struct dprc_cmd_set_irq_mask *)cmd.params;
cmd_params->mask = cpu_to_le32(mask);
cmd_params->irq_index = irq_index;
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
/**
* dprc_get_irq_status() - Get the current status of any pending interrupts.
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @token: Token of DPRC object
* @irq_index: The interrupt index to configure
* @status: Returned interrupts status - one bit per cause:
* 0 = no interrupt pending
* 1 = interrupt pending
*
* Return: '0' on Success; Error code otherwise.
*/
int dprc_get_irq_status(struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 token,
u8 irq_index,
u32 *status)
{
struct fsl_mc_command cmd = { 0 };
struct dprc_cmd_get_irq_status *cmd_params;
struct dprc_rsp_get_irq_status *rsp_params;
int err;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_IRQ_STATUS,
cmd_flags, token);
cmd_params = (struct dprc_cmd_get_irq_status *)cmd.params;
cmd_params->status = cpu_to_le32(*status);
cmd_params->irq_index = irq_index;
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
if (err)
return err;
/* retrieve response parameters */
rsp_params = (struct dprc_rsp_get_irq_status *)cmd.params;
*status = le32_to_cpu(rsp_params->status);
return 0;
}
/**
* dprc_clear_irq_status() - Clear a pending interrupt's status
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @token: Token of DPRC object
* @irq_index: The interrupt index to configure
* @status: bits to clear (W1C) - one bit per cause:
* 0 = don't change
* 1 = clear status bit
*
* Return: '0' on Success; Error code otherwise.
*/
int dprc_clear_irq_status(struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 token,
u8 irq_index,
u32 status)
{
struct fsl_mc_command cmd = { 0 };
struct dprc_cmd_clear_irq_status *cmd_params;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPRC_CMDID_CLEAR_IRQ_STATUS,
cmd_flags, token);
cmd_params = (struct dprc_cmd_clear_irq_status *)cmd.params;
cmd_params->status = cpu_to_le32(status);
cmd_params->irq_index = irq_index;
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
/**
* dprc_get_attributes() - Obtains container attributes
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @token: Token of DPRC object
* @attr: Returned container attributes
*
* Return: '0' on Success; Error code otherwise.
*/
int dprc_get_attributes(struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 token,
struct dprc_attributes *attr)
{
struct fsl_mc_command cmd = { 0 };
struct dprc_rsp_get_attributes *rsp_params;
int err;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_ATTR,
cmd_flags,
token);
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
if (err)
return err;
/* retrieve response parameters */
rsp_params = (struct dprc_rsp_get_attributes *)cmd.params;
attr->container_id = le32_to_cpu(rsp_params->container_id);
attr->icid = le32_to_cpu(rsp_params->icid);
attr->options = le32_to_cpu(rsp_params->options);
attr->portal_id = le32_to_cpu(rsp_params->portal_id);
return 0;
}
/**
* dprc_get_obj_count() - Obtains the number of objects in the DPRC
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @token: Token of DPRC object
* @obj_count: Number of objects assigned to the DPRC
*
* Return: '0' on Success; Error code otherwise.
*/
int dprc_get_obj_count(struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 token,
int *obj_count)
{
struct fsl_mc_command cmd = { 0 };
struct dprc_rsp_get_obj_count *rsp_params;
int err;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ_COUNT,
cmd_flags, token);
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
if (err)
return err;
/* retrieve response parameters */
rsp_params = (struct dprc_rsp_get_obj_count *)cmd.params;
*obj_count = le32_to_cpu(rsp_params->obj_count);
return 0;
}
EXPORT_SYMBOL_GPL(dprc_get_obj_count);
/**
* dprc_get_obj() - Get general information on an object
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @token: Token of DPRC object
* @obj_index: Index of the object to be queried (< obj_count)
* @obj_desc: Returns the requested object descriptor
*
* The object descriptors are retrieved one by one by incrementing
* obj_index up to (not including) the value of obj_count returned
* from dprc_get_obj_count(). dprc_get_obj_count() must
* be called prior to dprc_get_obj().
*
* Return: '0' on Success; Error code otherwise.
*/
int dprc_get_obj(struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 token,
int obj_index,
struct fsl_mc_obj_desc *obj_desc)
{
struct fsl_mc_command cmd = { 0 };
struct dprc_cmd_get_obj *cmd_params;
struct dprc_rsp_get_obj *rsp_params;
int err;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ,
cmd_flags,
token);
cmd_params = (struct dprc_cmd_get_obj *)cmd.params;
cmd_params->obj_index = cpu_to_le32(obj_index);
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
if (err)
return err;
/* retrieve response parameters */
rsp_params = (struct dprc_rsp_get_obj *)cmd.params;
obj_desc->id = le32_to_cpu(rsp_params->id);
obj_desc->vendor = le16_to_cpu(rsp_params->vendor);
obj_desc->irq_count = rsp_params->irq_count;
obj_desc->region_count = rsp_params->region_count;
obj_desc->state = le32_to_cpu(rsp_params->state);
obj_desc->ver_major = le16_to_cpu(rsp_params->version_major);
obj_desc->ver_minor = le16_to_cpu(rsp_params->version_minor);
obj_desc->flags = le16_to_cpu(rsp_params->flags);
strncpy(obj_desc->type, rsp_params->type, 16);
obj_desc->type[15] = '\0';
strncpy(obj_desc->label, rsp_params->label, 16);
obj_desc->label[15] = '\0';
return 0;
}
EXPORT_SYMBOL_GPL(dprc_get_obj);
/**
* dprc_set_obj_irq() - Set IRQ information for object to trigger an interrupt.
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @token: Token of DPRC object
* @obj_type: Type of the object to set its IRQ
* @obj_id: ID of the object to set its IRQ
* @irq_index: The interrupt index to configure
* @irq_cfg: IRQ configuration
*
* Return: '0' on Success; Error code otherwise.
*/
int dprc_set_obj_irq(struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 token,
char *obj_type,
int obj_id,
u8 irq_index,
struct dprc_irq_cfg *irq_cfg)
{
struct fsl_mc_command cmd = { 0 };
struct dprc_cmd_set_obj_irq *cmd_params;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_OBJ_IRQ,
cmd_flags,
token);
cmd_params = (struct dprc_cmd_set_obj_irq *)cmd.params;
cmd_params->irq_val = cpu_to_le32(irq_cfg->val);
cmd_params->irq_index = irq_index;
cmd_params->irq_addr = cpu_to_le64(irq_cfg->paddr);
cmd_params->irq_num = cpu_to_le32(irq_cfg->irq_num);
cmd_params->obj_id = cpu_to_le32(obj_id);
strncpy(cmd_params->obj_type, obj_type, 16);
cmd_params->obj_type[15] = '\0';
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
EXPORT_SYMBOL_GPL(dprc_set_obj_irq);
/**
* dprc_get_obj_region() - Get region information for a specified object.
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @token: Token of DPRC object
* @obj_type: Object type as returned in dprc_get_obj()
* @obj_id: Unique object instance as returned in dprc_get_obj()
* @region_index: The specific region to query
* @region_desc: Returns the requested region descriptor
*
* Return: '0' on Success; Error code otherwise.
*/
int dprc_get_obj_region(struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 token,
char *obj_type,
int obj_id,
u8 region_index,
struct dprc_region_desc *region_desc)
{
struct fsl_mc_command cmd = { 0 };
struct dprc_cmd_get_obj_region *cmd_params;
struct dprc_rsp_get_obj_region *rsp_params;
int err;
/*
* If the DPRC object version was not yet cached, cache it now.
* Otherwise use the already cached value.
*/
if (!dprc_major_ver && !dprc_minor_ver) {
err = dprc_get_api_version(mc_io, 0,
&dprc_major_ver,
&dprc_minor_ver);
if (err)
return err;
}
if (dprc_major_ver > 6 || (dprc_major_ver == 6 && dprc_minor_ver >= 6)) {
/*
* MC API version 6.6 changed the size of the MC portals and software
* portals to 64K (as implemented by hardware). If older API is in use the
* size reported is less (64 bytes for mc portals and 4K for software
* portals).
*/
cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ_REG_V3,
cmd_flags, token);
} else if (dprc_major_ver == 6 && dprc_minor_ver >= 3) {
/*
* MC API version 6.3 introduced a new field to the region
* descriptor: base_address. If the older API is in use then the base
* address is set to zero to indicate it needs to be obtained elsewhere
* (typically the device tree).
*/
cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ_REG_V2,
cmd_flags, token);
} else {
cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ_REG,
cmd_flags, token);
}
cmd_params = (struct dprc_cmd_get_obj_region *)cmd.params;
cmd_params->obj_id = cpu_to_le32(obj_id);
cmd_params->region_index = region_index;
strncpy(cmd_params->obj_type, obj_type, 16);
cmd_params->obj_type[15] = '\0';
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
if (err)
return err;
/* retrieve response parameters */
rsp_params = (struct dprc_rsp_get_obj_region *)cmd.params;
region_desc->base_offset = le64_to_cpu(rsp_params->base_offset);
region_desc->size = le32_to_cpu(rsp_params->size);
region_desc->type = rsp_params->type;
region_desc->flags = le32_to_cpu(rsp_params->flags);
if (dprc_major_ver > 6 || (dprc_major_ver == 6 && dprc_minor_ver >= 3))
region_desc->base_address = le64_to_cpu(rsp_params->base_addr);
else
region_desc->base_address = 0;
return 0;
}
EXPORT_SYMBOL_GPL(dprc_get_obj_region);
/**
* dprc_get_api_version - Get Data Path Resource Container API version
* @mc_io: Pointer to Mc portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @major_ver: Major version of Data Path Resource Container API
* @minor_ver: Minor version of Data Path Resource Container API
*
* Return: '0' on Success; Error code otherwise.
*/
int dprc_get_api_version(struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 *major_ver,
u16 *minor_ver)
{
struct fsl_mc_command cmd = { 0 };
int err;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_API_VERSION,
cmd_flags, 0);
/* send command to mc */
err = mc_send_command(mc_io, &cmd);
if (err)
return err;
/* retrieve response parameters */
mc_cmd_read_api_version(&cmd, major_ver, minor_ver);
return 0;
}
/**
* dprc_get_container_id - Get container ID associated with a given portal.
* @mc_io: Pointer to Mc portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @container_id: Requested container id
*
* Return: '0' on Success; Error code otherwise.
*/
int dprc_get_container_id(struct fsl_mc_io *mc_io,
u32 cmd_flags,
int *container_id)
{
struct fsl_mc_command cmd = { 0 };
int err;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_CONT_ID,
cmd_flags,
0);
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
if (err)
return err;
/* retrieve response parameters */
*container_id = (int)mc_cmd_read_object_id(&cmd);
return 0;
}
/**
* dprc_get_connection() - Get connected endpoint and link status if connection
* exists.
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @token: Token of DPRC object
* @endpoint1: Endpoint 1 configuration parameters
* @endpoint2: Returned endpoint 2 configuration parameters
* @state: Returned link state:
* 1 - link is up;
* 0 - link is down;
* -1 - no connection (endpoint2 information is irrelevant)
*
* Return: '0' on Success; -ENOTCONN if connection does not exist.
*/
int dprc_get_connection(struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 token,
const struct dprc_endpoint *endpoint1,
struct dprc_endpoint *endpoint2,
int *state)
{
struct dprc_cmd_get_connection *cmd_params;
struct dprc_rsp_get_connection *rsp_params;
struct fsl_mc_command cmd = { 0 };
int err, i;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_CONNECTION,
cmd_flags,
token);
cmd_params = (struct dprc_cmd_get_connection *)cmd.params;
cmd_params->ep1_id = cpu_to_le32(endpoint1->id);
cmd_params->ep1_interface_id = cpu_to_le16(endpoint1->if_id);
for (i = 0; i < 16; i++)
cmd_params->ep1_type[i] = endpoint1->type[i];
/* send command to mc */
err = mc_send_command(mc_io, &cmd);
if (err)
return -ENOTCONN;
/* retrieve response parameters */
rsp_params = (struct dprc_rsp_get_connection *)cmd.params;
endpoint2->id = le32_to_cpu(rsp_params->ep2_id);
endpoint2->if_id = le16_to_cpu(rsp_params->ep2_interface_id);
*state = le32_to_cpu(rsp_params->state);
for (i = 0; i < 16; i++)
endpoint2->type[i] = rsp_params->ep2_type[i];
return 0;
}
| linux-master | drivers/bus/fsl-mc/dprc.c |
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/*
* Copyright 2013-2016 Freescale Semiconductor Inc.
*
*/
#include <linux/kernel.h>
#include <linux/fsl/mc.h>
#include "fsl-mc-private.h"
/**
* dpmcp_open() - Open a control session for the specified object.
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @dpmcp_id: DPMCP unique ID
* @token: Returned token; use in subsequent API calls
*
* This function can be used to open a control session for an
* already created object; an object may have been declared in
* the DPL or by calling the dpmcp_create function.
* This function returns a unique authentication token,
* associated with the specific object ID and the specific MC
* portal; this token must be used in all subsequent commands for
* this specific object
*
* Return: '0' on Success; Error code otherwise.
*/
int dpmcp_open(struct fsl_mc_io *mc_io,
u32 cmd_flags,
int dpmcp_id,
u16 *token)
{
struct fsl_mc_command cmd = { 0 };
struct dpmcp_cmd_open *cmd_params;
int err;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPMCP_CMDID_OPEN,
cmd_flags, 0);
cmd_params = (struct dpmcp_cmd_open *)cmd.params;
cmd_params->dpmcp_id = cpu_to_le32(dpmcp_id);
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
if (err)
return err;
/* retrieve response parameters */
*token = mc_cmd_hdr_read_token(&cmd);
return err;
}
/**
* dpmcp_close() - Close the control session of the object
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @token: Token of DPMCP object
*
* After this function is called, no further operations are
* allowed on the object without opening a new control session.
*
* Return: '0' on Success; Error code otherwise.
*/
int dpmcp_close(struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 token)
{
struct fsl_mc_command cmd = { 0 };
/* prepare command */
cmd.header = mc_encode_cmd_header(DPMCP_CMDID_CLOSE,
cmd_flags, token);
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
/**
* dpmcp_reset() - Reset the DPMCP, returns the object to initial state.
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @token: Token of DPMCP object
*
* Return: '0' on Success; Error code otherwise.
*/
int dpmcp_reset(struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 token)
{
struct fsl_mc_command cmd = { 0 };
/* prepare command */
cmd.header = mc_encode_cmd_header(DPMCP_CMDID_RESET,
cmd_flags, token);
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
| linux-master | drivers/bus/fsl-mc/dpmcp.c |
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/*
* Copyright 2013-2016 Freescale Semiconductor Inc.
*
*/
#include <linux/kernel.h>
#include <linux/fsl/mc.h>
#include "fsl-mc-private.h"
/**
* dpbp_open() - Open a control session for the specified object.
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @dpbp_id: DPBP unique ID
* @token: Returned token; use in subsequent API calls
*
* This function can be used to open a control session for an
* already created object; an object may have been declared in
* the DPL or by calling the dpbp_create function.
* This function returns a unique authentication token,
* associated with the specific object ID and the specific MC
* portal; this token must be used in all subsequent commands for
* this specific object
*
* Return: '0' on Success; Error code otherwise.
*/
int dpbp_open(struct fsl_mc_io *mc_io,
u32 cmd_flags,
int dpbp_id,
u16 *token)
{
struct fsl_mc_command cmd = { 0 };
struct dpbp_cmd_open *cmd_params;
int err;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPBP_CMDID_OPEN,
cmd_flags, 0);
cmd_params = (struct dpbp_cmd_open *)cmd.params;
cmd_params->dpbp_id = cpu_to_le32(dpbp_id);
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
if (err)
return err;
/* retrieve response parameters */
*token = mc_cmd_hdr_read_token(&cmd);
return err;
}
EXPORT_SYMBOL_GPL(dpbp_open);
/**
* dpbp_close() - Close the control session of the object
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @token: Token of DPBP object
*
* After this function is called, no further operations are
* allowed on the object without opening a new control session.
*
* Return: '0' on Success; Error code otherwise.
*/
int dpbp_close(struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 token)
{
struct fsl_mc_command cmd = { 0 };
/* prepare command */
cmd.header = mc_encode_cmd_header(DPBP_CMDID_CLOSE, cmd_flags,
token);
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
EXPORT_SYMBOL_GPL(dpbp_close);
/**
* dpbp_enable() - Enable the DPBP.
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @token: Token of DPBP object
*
* Return: '0' on Success; Error code otherwise.
*/
int dpbp_enable(struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 token)
{
struct fsl_mc_command cmd = { 0 };
/* prepare command */
cmd.header = mc_encode_cmd_header(DPBP_CMDID_ENABLE, cmd_flags,
token);
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
EXPORT_SYMBOL_GPL(dpbp_enable);
/**
* dpbp_disable() - Disable the DPBP.
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @token: Token of DPBP object
*
* Return: '0' on Success; Error code otherwise.
*/
int dpbp_disable(struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 token)
{
struct fsl_mc_command cmd = { 0 };
/* prepare command */
cmd.header = mc_encode_cmd_header(DPBP_CMDID_DISABLE,
cmd_flags, token);
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
EXPORT_SYMBOL_GPL(dpbp_disable);
/**
* dpbp_reset() - Reset the DPBP, returns the object to initial state.
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @token: Token of DPBP object
*
* Return: '0' on Success; Error code otherwise.
*/
int dpbp_reset(struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 token)
{
struct fsl_mc_command cmd = { 0 };
/* prepare command */
cmd.header = mc_encode_cmd_header(DPBP_CMDID_RESET,
cmd_flags, token);
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
EXPORT_SYMBOL_GPL(dpbp_reset);
/**
* dpbp_get_attributes - Retrieve DPBP attributes.
*
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @token: Token of DPBP object
* @attr: Returned object's attributes
*
* Return: '0' on Success; Error code otherwise.
*/
int dpbp_get_attributes(struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 token,
struct dpbp_attr *attr)
{
struct fsl_mc_command cmd = { 0 };
struct dpbp_rsp_get_attributes *rsp_params;
int err;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_ATTR,
cmd_flags, token);
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
if (err)
return err;
/* retrieve response parameters */
rsp_params = (struct dpbp_rsp_get_attributes *)cmd.params;
attr->bpid = le16_to_cpu(rsp_params->bpid);
attr->id = le32_to_cpu(rsp_params->id);
return 0;
}
EXPORT_SYMBOL_GPL(dpbp_get_attributes);
| linux-master | drivers/bus/fsl-mc/dpbp.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Freescale Management Complex (MC) bus driver MSI support
*
* Copyright (C) 2015-2016 Freescale Semiconductor, Inc.
* Author: German Rivera <[email protected]>
*
*/
#include <linux/of_irq.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/msi.h>
#include <linux/acpi_iort.h>
#include "fsl-mc-private.h"
#ifdef GENERIC_MSI_DOMAIN_OPS
/*
* Generate a unique ID identifying the interrupt (only used within the MSI
* irqdomain. Combine the icid with the interrupt index.
*/
static irq_hw_number_t fsl_mc_domain_calc_hwirq(struct fsl_mc_device *dev,
struct msi_desc *desc)
{
/*
* Make the base hwirq value for ICID*10000 so it is readable
* as a decimal value in /proc/interrupts.
*/
return (irq_hw_number_t)(desc->msi_index + (dev->icid * 10000));
}
static void fsl_mc_msi_set_desc(msi_alloc_info_t *arg,
struct msi_desc *desc)
{
arg->desc = desc;
arg->hwirq = fsl_mc_domain_calc_hwirq(to_fsl_mc_device(desc->dev),
desc);
}
#else
#define fsl_mc_msi_set_desc NULL
#endif
static void fsl_mc_msi_update_dom_ops(struct msi_domain_info *info)
{
struct msi_domain_ops *ops = info->ops;
if (!ops)
return;
/*
* set_desc should not be set by the caller
*/
if (!ops->set_desc)
ops->set_desc = fsl_mc_msi_set_desc;
}
static void __fsl_mc_msi_write_msg(struct fsl_mc_device *mc_bus_dev,
struct fsl_mc_device_irq *mc_dev_irq,
struct msi_desc *msi_desc)
{
int error;
struct fsl_mc_device *owner_mc_dev = mc_dev_irq->mc_dev;
struct dprc_irq_cfg irq_cfg;
/*
* msi_desc->msg.address is 0x0 when this function is invoked in
* the free_irq() code path. In this case, for the MC, we don't
* really need to "unprogram" the MSI, so we just return.
*/
if (msi_desc->msg.address_lo == 0x0 && msi_desc->msg.address_hi == 0x0)
return;
if (!owner_mc_dev)
return;
irq_cfg.paddr = ((u64)msi_desc->msg.address_hi << 32) |
msi_desc->msg.address_lo;
irq_cfg.val = msi_desc->msg.data;
irq_cfg.irq_num = msi_desc->irq;
if (owner_mc_dev == mc_bus_dev) {
/*
* IRQ is for the mc_bus_dev's DPRC itself
*/
error = dprc_set_irq(mc_bus_dev->mc_io,
MC_CMD_FLAG_INTR_DIS | MC_CMD_FLAG_PRI,
mc_bus_dev->mc_handle,
mc_dev_irq->dev_irq_index,
&irq_cfg);
if (error < 0) {
dev_err(&owner_mc_dev->dev,
"dprc_set_irq() failed: %d\n", error);
}
} else {
/*
* IRQ is for for a child device of mc_bus_dev
*/
error = dprc_set_obj_irq(mc_bus_dev->mc_io,
MC_CMD_FLAG_INTR_DIS | MC_CMD_FLAG_PRI,
mc_bus_dev->mc_handle,
owner_mc_dev->obj_desc.type,
owner_mc_dev->obj_desc.id,
mc_dev_irq->dev_irq_index,
&irq_cfg);
if (error < 0) {
dev_err(&owner_mc_dev->dev,
"dprc_obj_set_irq() failed: %d\n", error);
}
}
}
/*
* NOTE: This function is invoked with interrupts disabled
*/
static void fsl_mc_msi_write_msg(struct irq_data *irq_data,
struct msi_msg *msg)
{
struct msi_desc *msi_desc = irq_data_get_msi_desc(irq_data);
struct fsl_mc_device *mc_bus_dev = to_fsl_mc_device(msi_desc->dev);
struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev);
struct fsl_mc_device_irq *mc_dev_irq =
&mc_bus->irq_resources[msi_desc->msi_index];
msi_desc->msg = *msg;
/*
* Program the MSI (paddr, value) pair in the device:
*/
__fsl_mc_msi_write_msg(mc_bus_dev, mc_dev_irq, msi_desc);
}
static void fsl_mc_msi_update_chip_ops(struct msi_domain_info *info)
{
struct irq_chip *chip = info->chip;
if (!chip)
return;
/*
* irq_write_msi_msg should not be set by the caller
*/
if (!chip->irq_write_msi_msg)
chip->irq_write_msi_msg = fsl_mc_msi_write_msg;
}
/**
* fsl_mc_msi_create_irq_domain - Create a fsl-mc MSI interrupt domain
* @fwnode: Optional firmware node of the interrupt controller
* @info: MSI domain info
* @parent: Parent irq domain
*
* Updates the domain and chip ops and creates a fsl-mc MSI
* interrupt domain.
*
* Returns:
* A domain pointer or NULL in case of failure.
*/
struct irq_domain *fsl_mc_msi_create_irq_domain(struct fwnode_handle *fwnode,
struct msi_domain_info *info,
struct irq_domain *parent)
{
struct irq_domain *domain;
if (WARN_ON((info->flags & MSI_FLAG_LEVEL_CAPABLE)))
info->flags &= ~MSI_FLAG_LEVEL_CAPABLE;
if (info->flags & MSI_FLAG_USE_DEF_DOM_OPS)
fsl_mc_msi_update_dom_ops(info);
if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS)
fsl_mc_msi_update_chip_ops(info);
info->flags |= MSI_FLAG_ALLOC_SIMPLE_MSI_DESCS | MSI_FLAG_FREE_MSI_DESCS;
domain = msi_create_irq_domain(fwnode, info, parent);
if (domain)
irq_domain_update_bus_token(domain, DOMAIN_BUS_FSL_MC_MSI);
return domain;
}
struct irq_domain *fsl_mc_find_msi_domain(struct device *dev)
{
struct device *root_dprc_dev;
struct device *bus_dev;
struct irq_domain *msi_domain;
struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
fsl_mc_get_root_dprc(dev, &root_dprc_dev);
bus_dev = root_dprc_dev->parent;
if (bus_dev->of_node) {
msi_domain = of_msi_map_get_device_domain(dev,
mc_dev->icid,
DOMAIN_BUS_FSL_MC_MSI);
/*
* if the msi-map property is missing assume that all the
* child containers inherit the domain from the parent
*/
if (!msi_domain)
msi_domain = of_msi_get_domain(bus_dev,
bus_dev->of_node,
DOMAIN_BUS_FSL_MC_MSI);
} else {
msi_domain = iort_get_device_domain(dev, mc_dev->icid,
DOMAIN_BUS_FSL_MC_MSI);
}
return msi_domain;
}
int fsl_mc_msi_domain_alloc_irqs(struct device *dev, unsigned int irq_count)
{
int error = msi_setup_device_data(dev);
if (error)
return error;
/*
* NOTE: Calling this function will trigger the invocation of the
* its_fsl_mc_msi_prepare() callback
*/
error = msi_domain_alloc_irqs_range(dev, MSI_DEFAULT_DOMAIN, 0, irq_count - 1);
if (error)
dev_err(dev, "Failed to allocate IRQs\n");
return error;
}
void fsl_mc_msi_domain_free_irqs(struct device *dev)
{
msi_domain_free_irqs_all(dev, MSI_DEFAULT_DOMAIN);
}
| linux-master | drivers/bus/fsl-mc/fsl-mc-msi.c |
/*
* linux/drivers/message/fusion/mptbase.c
* This is the Fusion MPT base driver which supports multiple
* (SCSI + LAN) specialized protocol drivers.
* For use with LSI PCI chip/adapter(s)
* running LSI Fusion MPT (Message Passing Technology) firmware.
*
* Copyright (c) 1999-2008 LSI Corporation
* (mailto:[email protected])
*
*/
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
NO WARRANTY
THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
solely responsible for determining the appropriateness of using and
distributing the Program and assumes all risks associated with its
exercise of rights under this Agreement, including but not limited to
the risks and costs of program errors, damage to or loss of data,
programs or equipment, and unavailability or interruption of operations.
DISCLAIMER OF LIABILITY
NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/kdev_t.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
#include <linux/kthread.h>
#include <scsi/scsi_host.h>
#include "mptbase.h"
#include "lsi/mpi_log_fc.h"
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
#define my_NAME "Fusion MPT base driver"
#define my_VERSION MPT_LINUX_VERSION_COMMON
#define MYNAM "mptbase"
MODULE_AUTHOR(MODULEAUTHOR);
MODULE_DESCRIPTION(my_NAME);
MODULE_LICENSE("GPL");
MODULE_VERSION(my_VERSION);
/*
* cmd line parameters
*/
static int mpt_msi_enable_spi;
module_param(mpt_msi_enable_spi, int, 0);
MODULE_PARM_DESC(mpt_msi_enable_spi,
" Enable MSI Support for SPI controllers (default=0)");
static int mpt_msi_enable_fc;
module_param(mpt_msi_enable_fc, int, 0);
MODULE_PARM_DESC(mpt_msi_enable_fc,
" Enable MSI Support for FC controllers (default=0)");
static int mpt_msi_enable_sas;
module_param(mpt_msi_enable_sas, int, 0);
MODULE_PARM_DESC(mpt_msi_enable_sas,
" Enable MSI Support for SAS controllers (default=0)");
static int mpt_channel_mapping;
module_param(mpt_channel_mapping, int, 0);
MODULE_PARM_DESC(mpt_channel_mapping, " Mapping id's to channels (default=0)");
static int mpt_debug_level;
static int mpt_set_debug_level(const char *val, const struct kernel_param *kp);
module_param_call(mpt_debug_level, mpt_set_debug_level, param_get_int,
&mpt_debug_level, 0600);
MODULE_PARM_DESC(mpt_debug_level,
" debug level - refer to mptdebug.h - (default=0)");
int mpt_fwfault_debug;
EXPORT_SYMBOL(mpt_fwfault_debug);
module_param(mpt_fwfault_debug, int, 0600);
MODULE_PARM_DESC(mpt_fwfault_debug,
"Enable detection of Firmware fault and halt Firmware on fault - (default=0)");
static char MptCallbacksName[MPT_MAX_PROTOCOL_DRIVERS]
[MPT_MAX_CALLBACKNAME_LEN+1];
#ifdef MFCNT
static int mfcounter = 0;
#define PRINT_MF_COUNT 20000
#endif
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
* Public data...
*/
#define WHOINIT_UNKNOWN 0xAA
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
* Private data...
*/
/* Adapter link list */
LIST_HEAD(ioc_list);
/* Callback lookup table */
static MPT_CALLBACK MptCallbacks[MPT_MAX_PROTOCOL_DRIVERS];
/* Protocol driver class lookup table */
static int MptDriverClass[MPT_MAX_PROTOCOL_DRIVERS];
/* Event handler lookup table */
static MPT_EVHANDLER MptEvHandlers[MPT_MAX_PROTOCOL_DRIVERS];
/* Reset handler lookup table */
static MPT_RESETHANDLER MptResetHandlers[MPT_MAX_PROTOCOL_DRIVERS];
static struct mpt_pci_driver *MptDeviceDriverHandlers[MPT_MAX_PROTOCOL_DRIVERS];
#ifdef CONFIG_PROC_FS
static struct proc_dir_entry *mpt_proc_root_dir;
#endif
/*
* Driver Callback Index's
*/
static u8 mpt_base_index = MPT_MAX_PROTOCOL_DRIVERS;
static u8 last_drv_idx;
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
* Forward protos...
*/
static irqreturn_t mpt_interrupt(int irq, void *bus_id);
static int mptbase_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req,
MPT_FRAME_HDR *reply);
static int mpt_handshake_req_reply_wait(MPT_ADAPTER *ioc, int reqBytes,
u32 *req, int replyBytes, u16 *u16reply, int maxwait,
int sleepFlag);
static int mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag);
static void mpt_detect_bound_ports(MPT_ADAPTER *ioc, struct pci_dev *pdev);
static void mpt_adapter_disable(MPT_ADAPTER *ioc);
static void mpt_adapter_dispose(MPT_ADAPTER *ioc);
static void MptDisplayIocCapabilities(MPT_ADAPTER *ioc);
static int MakeIocReady(MPT_ADAPTER *ioc, int force, int sleepFlag);
static int GetIocFacts(MPT_ADAPTER *ioc, int sleepFlag, int reason);
static int GetPortFacts(MPT_ADAPTER *ioc, int portnum, int sleepFlag);
static int SendIocInit(MPT_ADAPTER *ioc, int sleepFlag);
static int SendPortEnable(MPT_ADAPTER *ioc, int portnum, int sleepFlag);
static int mpt_do_upload(MPT_ADAPTER *ioc, int sleepFlag);
static int mpt_downloadboot(MPT_ADAPTER *ioc, MpiFwHeader_t *pFwHeader, int sleepFlag);
static int mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag);
static int KickStart(MPT_ADAPTER *ioc, int ignore, int sleepFlag);
static int SendIocReset(MPT_ADAPTER *ioc, u8 reset_type, int sleepFlag);
static int PrimeIocFifos(MPT_ADAPTER *ioc);
static int WaitForDoorbellAck(MPT_ADAPTER *ioc, int howlong, int sleepFlag);
static int WaitForDoorbellInt(MPT_ADAPTER *ioc, int howlong, int sleepFlag);
static int WaitForDoorbellReply(MPT_ADAPTER *ioc, int howlong, int sleepFlag);
static int GetLanConfigPages(MPT_ADAPTER *ioc);
static int GetIoUnitPage2(MPT_ADAPTER *ioc);
int mptbase_sas_persist_operation(MPT_ADAPTER *ioc, u8 persist_opcode);
static int mpt_GetScsiPortSettings(MPT_ADAPTER *ioc, int portnum);
static int mpt_readScsiDevicePageHeaders(MPT_ADAPTER *ioc, int portnum);
static void mpt_read_ioc_pg_1(MPT_ADAPTER *ioc);
static void mpt_read_ioc_pg_4(MPT_ADAPTER *ioc);
static void mpt_get_manufacturing_pg_0(MPT_ADAPTER *ioc);
static int SendEventNotification(MPT_ADAPTER *ioc, u8 EvSwitch,
int sleepFlag);
static int SendEventAck(MPT_ADAPTER *ioc, EventNotificationReply_t *evnp);
static int mpt_host_page_access_control(MPT_ADAPTER *ioc, u8 access_control_value, int sleepFlag);
static int mpt_host_page_alloc(MPT_ADAPTER *ioc, pIOCInit_t ioc_init);
#ifdef CONFIG_PROC_FS
static int mpt_summary_proc_show(struct seq_file *m, void *v);
static int mpt_version_proc_show(struct seq_file *m, void *v);
static int mpt_iocinfo_proc_show(struct seq_file *m, void *v);
#endif
static void mpt_get_fw_exp_ver(char *buf, MPT_ADAPTER *ioc);
static int ProcessEventNotification(MPT_ADAPTER *ioc,
EventNotificationReply_t *evReply, int *evHandlers);
static void mpt_iocstatus_info(MPT_ADAPTER *ioc, u32 ioc_status, MPT_FRAME_HDR *mf);
static void mpt_fc_log_info(MPT_ADAPTER *ioc, u32 log_info);
static void mpt_spi_log_info(MPT_ADAPTER *ioc, u32 log_info);
static void mpt_sas_log_info(MPT_ADAPTER *ioc, u32 log_info , u8 cb_idx);
static int mpt_read_ioc_pg_3(MPT_ADAPTER *ioc);
static void mpt_inactive_raid_list_free(MPT_ADAPTER *ioc);
/* module entry point */
static int __init fusion_init (void);
static void __exit fusion_exit (void);
#define CHIPREG_READ32(addr) readl_relaxed(addr)
#define CHIPREG_READ32_dmasync(addr) readl(addr)
#define CHIPREG_WRITE32(addr,val) writel(val, addr)
#define CHIPREG_PIO_WRITE32(addr,val) outl(val, (unsigned long)addr)
#define CHIPREG_PIO_READ32(addr) inl((unsigned long)addr)
static void
pci_disable_io_access(struct pci_dev *pdev)
{
u16 command_reg;
pci_read_config_word(pdev, PCI_COMMAND, &command_reg);
command_reg &= ~1;
pci_write_config_word(pdev, PCI_COMMAND, command_reg);
}
static void
pci_enable_io_access(struct pci_dev *pdev)
{
u16 command_reg;
pci_read_config_word(pdev, PCI_COMMAND, &command_reg);
command_reg |= 1;
pci_write_config_word(pdev, PCI_COMMAND, command_reg);
}
static int mpt_set_debug_level(const char *val, const struct kernel_param *kp)
{
int ret = param_set_int(val, kp);
MPT_ADAPTER *ioc;
if (ret)
return ret;
list_for_each_entry(ioc, &ioc_list, list)
ioc->debug_level = mpt_debug_level;
return 0;
}
/**
* mpt_get_cb_idx - obtain cb_idx for registered driver
* @dclass: class driver enum
*
* Returns cb_idx, or zero means it wasn't found
**/
static u8
mpt_get_cb_idx(MPT_DRIVER_CLASS dclass)
{
u8 cb_idx;
for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--)
if (MptDriverClass[cb_idx] == dclass)
return cb_idx;
return 0;
}
/**
* mpt_is_discovery_complete - determine if discovery has completed
* @ioc: per adatper instance
*
* Returns 1 when discovery completed, else zero.
*/
static int
mpt_is_discovery_complete(MPT_ADAPTER *ioc)
{
ConfigExtendedPageHeader_t hdr;
CONFIGPARMS cfg;
SasIOUnitPage0_t *buffer;
dma_addr_t dma_handle;
int rc = 0;
memset(&hdr, 0, sizeof(ConfigExtendedPageHeader_t));
memset(&cfg, 0, sizeof(CONFIGPARMS));
hdr.PageVersion = MPI_SASIOUNITPAGE0_PAGEVERSION;
hdr.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
hdr.ExtPageType = MPI_CONFIG_EXTPAGETYPE_SAS_IO_UNIT;
cfg.cfghdr.ehdr = &hdr;
cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
if ((mpt_config(ioc, &cfg)))
goto out;
if (!hdr.ExtPageLength)
goto out;
buffer = dma_alloc_coherent(&ioc->pcidev->dev, hdr.ExtPageLength * 4,
&dma_handle, GFP_KERNEL);
if (!buffer)
goto out;
cfg.physAddr = dma_handle;
cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
if ((mpt_config(ioc, &cfg)))
goto out_free_consistent;
if (!(buffer->PhyData[0].PortFlags &
MPI_SAS_IOUNIT0_PORT_FLAGS_DISCOVERY_IN_PROGRESS))
rc = 1;
out_free_consistent:
dma_free_coherent(&ioc->pcidev->dev, hdr.ExtPageLength * 4, buffer,
dma_handle);
out:
return rc;
}
/**
* mpt_remove_dead_ioc_func - kthread context to remove dead ioc
* @arg: input argument, used to derive ioc
*
* Return 0 if controller is removed from pci subsystem.
* Return -1 for other case.
*/
static int mpt_remove_dead_ioc_func(void *arg)
{
MPT_ADAPTER *ioc = (MPT_ADAPTER *)arg;
struct pci_dev *pdev;
if (!ioc)
return -1;
pdev = ioc->pcidev;
if (!pdev)
return -1;
pci_stop_and_remove_bus_device_locked(pdev);
return 0;
}
/**
* mpt_fault_reset_work - work performed on workq after ioc fault
* @work: input argument, used to derive ioc
*
**/
static void
mpt_fault_reset_work(struct work_struct *work)
{
MPT_ADAPTER *ioc =
container_of(work, MPT_ADAPTER, fault_reset_work.work);
u32 ioc_raw_state;
int rc;
unsigned long flags;
MPT_SCSI_HOST *hd;
struct task_struct *p;
if (ioc->ioc_reset_in_progress || !ioc->active)
goto out;
ioc_raw_state = mpt_GetIocState(ioc, 0);
if ((ioc_raw_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_MASK) {
printk(MYIOC_s_INFO_FMT "%s: IOC is non-operational !!!!\n",
ioc->name, __func__);
/*
* Call mptscsih_flush_pending_cmds callback so that we
* flush all pending commands back to OS.
* This call is required to aovid deadlock at block layer.
* Dead IOC will fail to do diag reset,and this call is safe
* since dead ioc will never return any command back from HW.
*/
hd = shost_priv(ioc->sh);
ioc->schedule_dead_ioc_flush_running_cmds(hd);
/*Remove the Dead Host */
p = kthread_run(mpt_remove_dead_ioc_func, ioc,
"mpt_dead_ioc_%d", ioc->id);
if (IS_ERR(p)) {
printk(MYIOC_s_ERR_FMT
"%s: Running mpt_dead_ioc thread failed !\n",
ioc->name, __func__);
} else {
printk(MYIOC_s_WARN_FMT
"%s: Running mpt_dead_ioc thread success !\n",
ioc->name, __func__);
}
return; /* don't rearm timer */
}
if ((ioc_raw_state & MPI_IOC_STATE_MASK)
== MPI_IOC_STATE_FAULT) {
printk(MYIOC_s_WARN_FMT "IOC is in FAULT state (%04xh)!!!\n",
ioc->name, ioc_raw_state & MPI_DOORBELL_DATA_MASK);
printk(MYIOC_s_WARN_FMT "Issuing HardReset from %s!!\n",
ioc->name, __func__);
rc = mpt_HardResetHandler(ioc, CAN_SLEEP);
printk(MYIOC_s_WARN_FMT "%s: HardReset: %s\n", ioc->name,
__func__, (rc == 0) ? "success" : "failed");
ioc_raw_state = mpt_GetIocState(ioc, 0);
if ((ioc_raw_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_FAULT)
printk(MYIOC_s_WARN_FMT "IOC is in FAULT state after "
"reset (%04xh)\n", ioc->name, ioc_raw_state &
MPI_DOORBELL_DATA_MASK);
} else if (ioc->bus_type == SAS && ioc->sas_discovery_quiesce_io) {
if ((mpt_is_discovery_complete(ioc))) {
devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "clearing "
"discovery_quiesce_io flag\n", ioc->name));
ioc->sas_discovery_quiesce_io = 0;
}
}
out:
/*
* Take turns polling alternate controller
*/
if (ioc->alt_ioc)
ioc = ioc->alt_ioc;
/* rearm the timer */
spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
if (ioc->reset_work_q)
queue_delayed_work(ioc->reset_work_q, &ioc->fault_reset_work,
msecs_to_jiffies(MPT_POLLING_INTERVAL));
spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
}
/*
* Process turbo (context) reply...
*/
static void
mpt_turbo_reply(MPT_ADAPTER *ioc, u32 pa)
{
MPT_FRAME_HDR *mf = NULL;
MPT_FRAME_HDR *mr = NULL;
u16 req_idx = 0;
u8 cb_idx;
dmfprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Got TURBO reply req_idx=%08x\n",
ioc->name, pa));
switch (pa >> MPI_CONTEXT_REPLY_TYPE_SHIFT) {
case MPI_CONTEXT_REPLY_TYPE_SCSI_INIT:
req_idx = pa & 0x0000FFFF;
cb_idx = (pa & 0x00FF0000) >> 16;
mf = MPT_INDEX_2_MFPTR(ioc, req_idx);
break;
case MPI_CONTEXT_REPLY_TYPE_LAN:
cb_idx = mpt_get_cb_idx(MPTLAN_DRIVER);
/*
* Blind set of mf to NULL here was fatal
* after lan_reply says "freeme"
* Fix sort of combined with an optimization here;
* added explicit check for case where lan_reply
* was just returning 1 and doing nothing else.
* For this case skip the callback, but set up
* proper mf value first here:-)
*/
if ((pa & 0x58000000) == 0x58000000) {
req_idx = pa & 0x0000FFFF;
mf = MPT_INDEX_2_MFPTR(ioc, req_idx);
mpt_free_msg_frame(ioc, mf);
mb();
return;
}
mr = (MPT_FRAME_HDR *) CAST_U32_TO_PTR(pa);
break;
case MPI_CONTEXT_REPLY_TYPE_SCSI_TARGET:
cb_idx = mpt_get_cb_idx(MPTSTM_DRIVER);
mr = (MPT_FRAME_HDR *) CAST_U32_TO_PTR(pa);
break;
default:
cb_idx = 0;
BUG();
}
/* Check for (valid) IO callback! */
if (!cb_idx || cb_idx >= MPT_MAX_PROTOCOL_DRIVERS ||
MptCallbacks[cb_idx] == NULL) {
printk(MYIOC_s_WARN_FMT "%s: Invalid cb_idx (%d)!\n",
__func__, ioc->name, cb_idx);
goto out;
}
if (MptCallbacks[cb_idx](ioc, mf, mr))
mpt_free_msg_frame(ioc, mf);
out:
mb();
}
static void
mpt_reply(MPT_ADAPTER *ioc, u32 pa)
{
MPT_FRAME_HDR *mf;
MPT_FRAME_HDR *mr;
u16 req_idx;
u8 cb_idx;
int freeme;
u32 reply_dma_low;
u16 ioc_stat;
/* non-TURBO reply! Hmmm, something may be up...
* Newest turbo reply mechanism; get address
* via left shift 1 (get rid of MPI_ADDRESS_REPLY_A_BIT)!
*/
/* Map DMA address of reply header to cpu address.
* pa is 32 bits - but the dma address may be 32 or 64 bits
* get offset based only only the low addresses
*/
reply_dma_low = (pa <<= 1);
mr = (MPT_FRAME_HDR *)((u8 *)ioc->reply_frames +
(reply_dma_low - ioc->reply_frames_low_dma));
req_idx = le16_to_cpu(mr->u.frame.hwhdr.msgctxu.fld.req_idx);
cb_idx = mr->u.frame.hwhdr.msgctxu.fld.cb_idx;
mf = MPT_INDEX_2_MFPTR(ioc, req_idx);
dmfprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Got non-TURBO reply=%p req_idx=%x cb_idx=%x Function=%x\n",
ioc->name, mr, req_idx, cb_idx, mr->u.hdr.Function));
DBG_DUMP_REPLY_FRAME(ioc, (u32 *)mr);
/* Check/log IOC log info
*/
ioc_stat = le16_to_cpu(mr->u.reply.IOCStatus);
if (ioc_stat & MPI_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
u32 log_info = le32_to_cpu(mr->u.reply.IOCLogInfo);
if (ioc->bus_type == FC)
mpt_fc_log_info(ioc, log_info);
else if (ioc->bus_type == SPI)
mpt_spi_log_info(ioc, log_info);
else if (ioc->bus_type == SAS)
mpt_sas_log_info(ioc, log_info, cb_idx);
}
if (ioc_stat & MPI_IOCSTATUS_MASK)
mpt_iocstatus_info(ioc, (u32)ioc_stat, mf);
/* Check for (valid) IO callback! */
if (!cb_idx || cb_idx >= MPT_MAX_PROTOCOL_DRIVERS ||
MptCallbacks[cb_idx] == NULL) {
printk(MYIOC_s_WARN_FMT "%s: Invalid cb_idx (%d)!\n",
__func__, ioc->name, cb_idx);
freeme = 0;
goto out;
}
freeme = MptCallbacks[cb_idx](ioc, mf, mr);
out:
/* Flush (non-TURBO) reply with a WRITE! */
CHIPREG_WRITE32(&ioc->chip->ReplyFifo, pa);
if (freeme)
mpt_free_msg_frame(ioc, mf);
mb();
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_interrupt - MPT adapter (IOC) specific interrupt handler.
* @irq: irq number (not used)
* @bus_id: bus identifier cookie == pointer to MPT_ADAPTER structure
*
* This routine is registered via the request_irq() kernel API call,
* and handles all interrupts generated from a specific MPT adapter
* (also referred to as a IO Controller or IOC).
* This routine must clear the interrupt from the adapter and does
* so by reading the reply FIFO. Multiple replies may be processed
* per single call to this routine.
*
* This routine handles register-level access of the adapter but
* dispatches (calls) a protocol-specific callback routine to handle
* the protocol-specific details of the MPT request completion.
*/
static irqreturn_t
mpt_interrupt(int irq, void *bus_id)
{
MPT_ADAPTER *ioc = bus_id;
u32 pa = CHIPREG_READ32_dmasync(&ioc->chip->ReplyFifo);
if (pa == 0xFFFFFFFF)
return IRQ_NONE;
/*
* Drain the reply FIFO!
*/
do {
if (pa & MPI_ADDRESS_REPLY_A_BIT)
mpt_reply(ioc, pa);
else
mpt_turbo_reply(ioc, pa);
pa = CHIPREG_READ32_dmasync(&ioc->chip->ReplyFifo);
} while (pa != 0xFFFFFFFF);
return IRQ_HANDLED;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mptbase_reply - MPT base driver's callback routine
* @ioc: Pointer to MPT_ADAPTER structure
* @req: Pointer to original MPT request frame
* @reply: Pointer to MPT reply frame (NULL if TurboReply)
*
* MPT base driver's callback routine; all base driver
* "internal" request/reply processing is routed here.
* Currently used for EventNotification and EventAck handling.
*
* Returns 1 indicating original alloc'd request frame ptr
* should be freed, or 0 if it shouldn't.
*/
static int
mptbase_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply)
{
EventNotificationReply_t *pEventReply;
u8 event;
int evHandlers;
int freereq = 1;
switch (reply->u.hdr.Function) {
case MPI_FUNCTION_EVENT_NOTIFICATION:
pEventReply = (EventNotificationReply_t *)reply;
evHandlers = 0;
ProcessEventNotification(ioc, pEventReply, &evHandlers);
event = le32_to_cpu(pEventReply->Event) & 0xFF;
if (pEventReply->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY)
freereq = 0;
if (event != MPI_EVENT_EVENT_CHANGE)
break;
fallthrough;
case MPI_FUNCTION_CONFIG:
case MPI_FUNCTION_SAS_IO_UNIT_CONTROL:
ioc->mptbase_cmds.status |= MPT_MGMT_STATUS_COMMAND_GOOD;
ioc->mptbase_cmds.status |= MPT_MGMT_STATUS_RF_VALID;
memcpy(ioc->mptbase_cmds.reply, reply,
min(MPT_DEFAULT_FRAME_SIZE,
4 * reply->u.reply.MsgLength));
if (ioc->mptbase_cmds.status & MPT_MGMT_STATUS_PENDING) {
ioc->mptbase_cmds.status &= ~MPT_MGMT_STATUS_PENDING;
complete(&ioc->mptbase_cmds.done);
} else
freereq = 0;
if (ioc->mptbase_cmds.status & MPT_MGMT_STATUS_FREE_MF)
freereq = 1;
break;
case MPI_FUNCTION_EVENT_ACK:
devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"EventAck reply received\n", ioc->name));
break;
default:
printk(MYIOC_s_ERR_FMT
"Unexpected msg function (=%02Xh) reply received!\n",
ioc->name, reply->u.hdr.Function);
break;
}
/*
* Conditionally tell caller to free the original
* EventNotification/EventAck/unexpected request frame!
*/
return freereq;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_register - Register protocol-specific main callback handler.
* @cbfunc: callback function pointer
* @dclass: Protocol driver's class (%MPT_DRIVER_CLASS enum value)
* @func_name: call function's name
*
* This routine is called by a protocol-specific driver (SCSI host,
* LAN, SCSI target) to register its reply callback routine. Each
* protocol-specific driver must do this before it will be able to
* use any IOC resources, such as obtaining request frames.
*
* NOTES: The SCSI protocol driver currently calls this routine thrice
* in order to register separate callbacks; one for "normal" SCSI IO;
* one for MptScsiTaskMgmt requests; one for Scan/DV requests.
*
* Returns u8 valued "handle" in the range (and S.O.D. order)
* {N,...,7,6,5,...,1} if successful.
* A return value of MPT_MAX_PROTOCOL_DRIVERS (including zero!) should be
* considered an error by the caller.
*/
u8
mpt_register(MPT_CALLBACK cbfunc, MPT_DRIVER_CLASS dclass, char *func_name)
{
u8 cb_idx;
last_drv_idx = MPT_MAX_PROTOCOL_DRIVERS;
/*
* Search for empty callback slot in this order: {N,...,7,6,5,...,1}
* (slot/handle 0 is reserved!)
*/
for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) {
if (MptCallbacks[cb_idx] == NULL) {
MptCallbacks[cb_idx] = cbfunc;
MptDriverClass[cb_idx] = dclass;
MptEvHandlers[cb_idx] = NULL;
last_drv_idx = cb_idx;
strscpy(MptCallbacksName[cb_idx], func_name,
MPT_MAX_CALLBACKNAME_LEN+1);
break;
}
}
return last_drv_idx;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_deregister - Deregister a protocol drivers resources.
* @cb_idx: previously registered callback handle
*
* Each protocol-specific driver should call this routine when its
* module is unloaded.
*/
void
mpt_deregister(u8 cb_idx)
{
if (cb_idx && (cb_idx < MPT_MAX_PROTOCOL_DRIVERS)) {
MptCallbacks[cb_idx] = NULL;
MptDriverClass[cb_idx] = MPTUNKNOWN_DRIVER;
MptEvHandlers[cb_idx] = NULL;
last_drv_idx++;
}
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_event_register - Register protocol-specific event callback handler.
* @cb_idx: previously registered (via mpt_register) callback handle
* @ev_cbfunc: callback function
*
* This routine can be called by one or more protocol-specific drivers
* if/when they choose to be notified of MPT events.
*
* Returns 0 for success.
*/
int
mpt_event_register(u8 cb_idx, MPT_EVHANDLER ev_cbfunc)
{
if (!cb_idx || cb_idx >= MPT_MAX_PROTOCOL_DRIVERS)
return -1;
MptEvHandlers[cb_idx] = ev_cbfunc;
return 0;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_event_deregister - Deregister protocol-specific event callback handler
* @cb_idx: previously registered callback handle
*
* Each protocol-specific driver should call this routine
* when it does not (or can no longer) handle events,
* or when its module is unloaded.
*/
void
mpt_event_deregister(u8 cb_idx)
{
if (!cb_idx || cb_idx >= MPT_MAX_PROTOCOL_DRIVERS)
return;
MptEvHandlers[cb_idx] = NULL;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_reset_register - Register protocol-specific IOC reset handler.
* @cb_idx: previously registered (via mpt_register) callback handle
* @reset_func: reset function
*
* This routine can be called by one or more protocol-specific drivers
* if/when they choose to be notified of IOC resets.
*
* Returns 0 for success.
*/
int
mpt_reset_register(u8 cb_idx, MPT_RESETHANDLER reset_func)
{
if (!cb_idx || cb_idx >= MPT_MAX_PROTOCOL_DRIVERS)
return -1;
MptResetHandlers[cb_idx] = reset_func;
return 0;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_reset_deregister - Deregister protocol-specific IOC reset handler.
* @cb_idx: previously registered callback handle
*
* Each protocol-specific driver should call this routine
* when it does not (or can no longer) handle IOC reset handling,
* or when its module is unloaded.
*/
void
mpt_reset_deregister(u8 cb_idx)
{
if (!cb_idx || cb_idx >= MPT_MAX_PROTOCOL_DRIVERS)
return;
MptResetHandlers[cb_idx] = NULL;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_device_driver_register - Register device driver hooks
* @dd_cbfunc: driver callbacks struct
* @cb_idx: MPT protocol driver index
*/
int
mpt_device_driver_register(struct mpt_pci_driver * dd_cbfunc, u8 cb_idx)
{
MPT_ADAPTER *ioc;
if (!cb_idx || cb_idx >= MPT_MAX_PROTOCOL_DRIVERS)
return -EINVAL;
MptDeviceDriverHandlers[cb_idx] = dd_cbfunc;
/* call per pci device probe entry point */
list_for_each_entry(ioc, &ioc_list, list) {
if (dd_cbfunc->probe)
dd_cbfunc->probe(ioc->pcidev);
}
return 0;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_device_driver_deregister - DeRegister device driver hooks
* @cb_idx: MPT protocol driver index
*/
void
mpt_device_driver_deregister(u8 cb_idx)
{
struct mpt_pci_driver *dd_cbfunc;
MPT_ADAPTER *ioc;
if (!cb_idx || cb_idx >= MPT_MAX_PROTOCOL_DRIVERS)
return;
dd_cbfunc = MptDeviceDriverHandlers[cb_idx];
list_for_each_entry(ioc, &ioc_list, list) {
if (dd_cbfunc->remove)
dd_cbfunc->remove(ioc->pcidev);
}
MptDeviceDriverHandlers[cb_idx] = NULL;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_get_msg_frame - Obtain an MPT request frame from the pool
* @cb_idx: Handle of registered MPT protocol driver
* @ioc: Pointer to MPT adapter structure
*
* Obtain an MPT request frame from the pool (of 1024) that are
* allocated per MPT adapter.
*
* Returns pointer to a MPT request frame or %NULL if none are available
* or IOC is not active.
*/
MPT_FRAME_HDR*
mpt_get_msg_frame(u8 cb_idx, MPT_ADAPTER *ioc)
{
MPT_FRAME_HDR *mf;
unsigned long flags;
u16 req_idx; /* Request index */
/* validate handle and ioc identifier */
#ifdef MFCNT
if (!ioc->active)
printk(MYIOC_s_WARN_FMT "IOC Not Active! mpt_get_msg_frame "
"returning NULL!\n", ioc->name);
#endif
/* If interrupts are not attached, do not return a request frame */
if (!ioc->active)
return NULL;
spin_lock_irqsave(&ioc->FreeQlock, flags);
if (!list_empty(&ioc->FreeQ)) {
int req_offset;
mf = list_entry(ioc->FreeQ.next, MPT_FRAME_HDR,
u.frame.linkage.list);
list_del(&mf->u.frame.linkage.list);
mf->u.frame.linkage.arg1 = 0;
mf->u.frame.hwhdr.msgctxu.fld.cb_idx = cb_idx; /* byte */
req_offset = (u8 *)mf - (u8 *)ioc->req_frames;
/* u16! */
req_idx = req_offset / ioc->req_sz;
mf->u.frame.hwhdr.msgctxu.fld.req_idx = cpu_to_le16(req_idx);
mf->u.frame.hwhdr.msgctxu.fld.rsvd = 0;
/* Default, will be changed if necessary in SG generation */
ioc->RequestNB[req_idx] = ioc->NB_for_64_byte_frame;
#ifdef MFCNT
ioc->mfcnt++;
#endif
}
else
mf = NULL;
spin_unlock_irqrestore(&ioc->FreeQlock, flags);
#ifdef MFCNT
if (mf == NULL)
printk(MYIOC_s_WARN_FMT "IOC Active. No free Msg Frames! "
"Count 0x%x Max 0x%x\n", ioc->name, ioc->mfcnt,
ioc->req_depth);
mfcounter++;
if (mfcounter == PRINT_MF_COUNT)
printk(MYIOC_s_INFO_FMT "MF Count 0x%x Max 0x%x \n", ioc->name,
ioc->mfcnt, ioc->req_depth);
#endif
dmfprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mpt_get_msg_frame(%d,%d), got mf=%p\n",
ioc->name, cb_idx, ioc->id, mf));
return mf;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_put_msg_frame - Send a protocol-specific MPT request frame to an IOC
* @cb_idx: Handle of registered MPT protocol driver
* @ioc: Pointer to MPT adapter structure
* @mf: Pointer to MPT request frame
*
* This routine posts an MPT request frame to the request post FIFO of a
* specific MPT adapter.
*/
void
mpt_put_msg_frame(u8 cb_idx, MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf)
{
u32 mf_dma_addr;
int req_offset;
u16 req_idx; /* Request index */
/* ensure values are reset properly! */
mf->u.frame.hwhdr.msgctxu.fld.cb_idx = cb_idx; /* byte */
req_offset = (u8 *)mf - (u8 *)ioc->req_frames;
/* u16! */
req_idx = req_offset / ioc->req_sz;
mf->u.frame.hwhdr.msgctxu.fld.req_idx = cpu_to_le16(req_idx);
mf->u.frame.hwhdr.msgctxu.fld.rsvd = 0;
DBG_DUMP_PUT_MSG_FRAME(ioc, (u32 *)mf);
mf_dma_addr = (ioc->req_frames_low_dma + req_offset) | ioc->RequestNB[req_idx];
dsgprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mf_dma_addr=%x req_idx=%d "
"RequestNB=%x\n", ioc->name, mf_dma_addr, req_idx,
ioc->RequestNB[req_idx]));
CHIPREG_WRITE32(&ioc->chip->RequestFifo, mf_dma_addr);
}
/**
* mpt_put_msg_frame_hi_pri - Send a hi-pri protocol-specific MPT request frame
* @cb_idx: Handle of registered MPT protocol driver
* @ioc: Pointer to MPT adapter structure
* @mf: Pointer to MPT request frame
*
* Send a protocol-specific MPT request frame to an IOC using
* hi-priority request queue.
*
* This routine posts an MPT request frame to the request post FIFO of a
* specific MPT adapter.
**/
void
mpt_put_msg_frame_hi_pri(u8 cb_idx, MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf)
{
u32 mf_dma_addr;
int req_offset;
u16 req_idx; /* Request index */
/* ensure values are reset properly! */
mf->u.frame.hwhdr.msgctxu.fld.cb_idx = cb_idx;
req_offset = (u8 *)mf - (u8 *)ioc->req_frames;
req_idx = req_offset / ioc->req_sz;
mf->u.frame.hwhdr.msgctxu.fld.req_idx = cpu_to_le16(req_idx);
mf->u.frame.hwhdr.msgctxu.fld.rsvd = 0;
DBG_DUMP_PUT_MSG_FRAME(ioc, (u32 *)mf);
mf_dma_addr = (ioc->req_frames_low_dma + req_offset);
dsgprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mf_dma_addr=%x req_idx=%d\n",
ioc->name, mf_dma_addr, req_idx));
CHIPREG_WRITE32(&ioc->chip->RequestHiPriFifo, mf_dma_addr);
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_free_msg_frame - Place MPT request frame back on FreeQ.
* @ioc: Pointer to MPT adapter structure
* @mf: Pointer to MPT request frame
*
* This routine places a MPT request frame back on the MPT adapter's
* FreeQ.
*/
void
mpt_free_msg_frame(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf)
{
unsigned long flags;
/* Put Request back on FreeQ! */
spin_lock_irqsave(&ioc->FreeQlock, flags);
if (cpu_to_le32(mf->u.frame.linkage.arg1) == 0xdeadbeaf)
goto out;
/* signature to know if this mf is freed */
mf->u.frame.linkage.arg1 = cpu_to_le32(0xdeadbeaf);
list_add(&mf->u.frame.linkage.list, &ioc->FreeQ);
#ifdef MFCNT
ioc->mfcnt--;
#endif
out:
spin_unlock_irqrestore(&ioc->FreeQlock, flags);
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_add_sge - Place a simple 32 bit SGE at address pAddr.
* @pAddr: virtual address for SGE
* @flagslength: SGE flags and data transfer length
* @dma_addr: Physical address
*
* This routine places a MPT request frame back on the MPT adapter's
* FreeQ.
*/
static void
mpt_add_sge(void *pAddr, u32 flagslength, dma_addr_t dma_addr)
{
SGESimple32_t *pSge = (SGESimple32_t *) pAddr;
pSge->FlagsLength = cpu_to_le32(flagslength);
pSge->Address = cpu_to_le32(dma_addr);
}
/**
* mpt_add_sge_64bit - Place a simple 64 bit SGE at address pAddr.
* @pAddr: virtual address for SGE
* @flagslength: SGE flags and data transfer length
* @dma_addr: Physical address
*
* This routine places a MPT request frame back on the MPT adapter's
* FreeQ.
**/
static void
mpt_add_sge_64bit(void *pAddr, u32 flagslength, dma_addr_t dma_addr)
{
SGESimple64_t *pSge = (SGESimple64_t *) pAddr;
pSge->Address.Low = cpu_to_le32
(lower_32_bits(dma_addr));
pSge->Address.High = cpu_to_le32
(upper_32_bits(dma_addr));
pSge->FlagsLength = cpu_to_le32
((flagslength | MPT_SGE_FLAGS_64_BIT_ADDRESSING));
}
/**
* mpt_add_sge_64bit_1078 - Place a simple 64 bit SGE at address pAddr (1078 workaround).
* @pAddr: virtual address for SGE
* @flagslength: SGE flags and data transfer length
* @dma_addr: Physical address
*
* This routine places a MPT request frame back on the MPT adapter's
* FreeQ.
**/
static void
mpt_add_sge_64bit_1078(void *pAddr, u32 flagslength, dma_addr_t dma_addr)
{
SGESimple64_t *pSge = (SGESimple64_t *) pAddr;
u32 tmp;
pSge->Address.Low = cpu_to_le32
(lower_32_bits(dma_addr));
tmp = (u32)(upper_32_bits(dma_addr));
/*
* 1078 errata workaround for the 36GB limitation
*/
if ((((u64)dma_addr + MPI_SGE_LENGTH(flagslength)) >> 32) == 9) {
flagslength |=
MPI_SGE_SET_FLAGS(MPI_SGE_FLAGS_LOCAL_ADDRESS);
tmp |= (1<<31);
if (mpt_debug_level & MPT_DEBUG_36GB_MEM)
printk(KERN_DEBUG "1078 P0M2 addressing for "
"addr = 0x%llx len = %d\n",
(unsigned long long)dma_addr,
MPI_SGE_LENGTH(flagslength));
}
pSge->Address.High = cpu_to_le32(tmp);
pSge->FlagsLength = cpu_to_le32(
(flagslength | MPT_SGE_FLAGS_64_BIT_ADDRESSING));
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_add_chain - Place a 32 bit chain SGE at address pAddr.
* @pAddr: virtual address for SGE
* @next: nextChainOffset value (u32's)
* @length: length of next SGL segment
* @dma_addr: Physical address
*
*/
static void
mpt_add_chain(void *pAddr, u8 next, u16 length, dma_addr_t dma_addr)
{
SGEChain32_t *pChain = (SGEChain32_t *) pAddr;
pChain->Length = cpu_to_le16(length);
pChain->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT;
pChain->NextChainOffset = next;
pChain->Address = cpu_to_le32(dma_addr);
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_add_chain_64bit - Place a 64 bit chain SGE at address pAddr.
* @pAddr: virtual address for SGE
* @next: nextChainOffset value (u32's)
* @length: length of next SGL segment
* @dma_addr: Physical address
*
*/
static void
mpt_add_chain_64bit(void *pAddr, u8 next, u16 length, dma_addr_t dma_addr)
{
SGEChain64_t *pChain = (SGEChain64_t *) pAddr;
u32 tmp = dma_addr & 0xFFFFFFFF;
pChain->Length = cpu_to_le16(length);
pChain->Flags = (MPI_SGE_FLAGS_CHAIN_ELEMENT |
MPI_SGE_FLAGS_64_BIT_ADDRESSING);
pChain->NextChainOffset = next;
pChain->Address.Low = cpu_to_le32(tmp);
tmp = (u32)(upper_32_bits(dma_addr));
pChain->Address.High = cpu_to_le32(tmp);
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_send_handshake_request - Send MPT request via doorbell handshake method.
* @cb_idx: Handle of registered MPT protocol driver
* @ioc: Pointer to MPT adapter structure
* @reqBytes: Size of the request in bytes
* @req: Pointer to MPT request frame
* @sleepFlag: Use schedule if CAN_SLEEP else use udelay.
*
* This routine is used exclusively to send MptScsiTaskMgmt
* requests since they are required to be sent via doorbell handshake.
*
* NOTE: It is the callers responsibility to byte-swap fields in the
* request which are greater than 1 byte in size.
*
* Returns 0 for success, non-zero for failure.
*/
int
mpt_send_handshake_request(u8 cb_idx, MPT_ADAPTER *ioc, int reqBytes, u32 *req, int sleepFlag)
{
int r = 0;
u8 *req_as_bytes;
int ii;
/* State is known to be good upon entering
* this function so issue the bus reset
* request.
*/
/*
* Emulate what mpt_put_msg_frame() does /wrt to sanity
* setting cb_idx/req_idx. But ONLY if this request
* is in proper (pre-alloc'd) request buffer range...
*/
ii = MFPTR_2_MPT_INDEX(ioc,(MPT_FRAME_HDR*)req);
if (reqBytes >= 12 && ii >= 0 && ii < ioc->req_depth) {
MPT_FRAME_HDR *mf = (MPT_FRAME_HDR*)req;
mf->u.frame.hwhdr.msgctxu.fld.req_idx = cpu_to_le16(ii);
mf->u.frame.hwhdr.msgctxu.fld.cb_idx = cb_idx;
}
/* Make sure there are no doorbells */
CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
CHIPREG_WRITE32(&ioc->chip->Doorbell,
((MPI_FUNCTION_HANDSHAKE<<MPI_DOORBELL_FUNCTION_SHIFT) |
((reqBytes/4)<<MPI_DOORBELL_ADD_DWORDS_SHIFT)));
/* Wait for IOC doorbell int */
if ((ii = WaitForDoorbellInt(ioc, 5, sleepFlag)) < 0) {
return ii;
}
/* Read doorbell and check for active bit */
if (!(CHIPREG_READ32(&ioc->chip->Doorbell) & MPI_DOORBELL_ACTIVE))
return -5;
dhsprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mpt_send_handshake_request start, WaitCnt=%d\n",
ioc->name, ii));
CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
if ((r = WaitForDoorbellAck(ioc, 5, sleepFlag)) < 0) {
return -2;
}
/* Send request via doorbell handshake */
req_as_bytes = (u8 *) req;
for (ii = 0; ii < reqBytes/4; ii++) {
u32 word;
word = ((req_as_bytes[(ii*4) + 0] << 0) |
(req_as_bytes[(ii*4) + 1] << 8) |
(req_as_bytes[(ii*4) + 2] << 16) |
(req_as_bytes[(ii*4) + 3] << 24));
CHIPREG_WRITE32(&ioc->chip->Doorbell, word);
if ((r = WaitForDoorbellAck(ioc, 5, sleepFlag)) < 0) {
r = -3;
break;
}
}
if (r >= 0 && WaitForDoorbellInt(ioc, 10, sleepFlag) >= 0)
r = 0;
else
r = -4;
/* Make sure there are no doorbells */
CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
return r;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_host_page_access_control - control the IOC's Host Page Buffer access
* @ioc: Pointer to MPT adapter structure
* @access_control_value: define bits below
* @sleepFlag: Specifies whether the process can sleep
*
* Provides mechanism for the host driver to control the IOC's
* Host Page Buffer access.
*
* Access Control Value - bits[15:12]
* 0h Reserved
* 1h Enable Access { MPI_DB_HPBAC_ENABLE_ACCESS }
* 2h Disable Access { MPI_DB_HPBAC_DISABLE_ACCESS }
* 3h Free Buffer { MPI_DB_HPBAC_FREE_BUFFER }
*
* Returns 0 for success, non-zero for failure.
*/
static int
mpt_host_page_access_control(MPT_ADAPTER *ioc, u8 access_control_value, int sleepFlag)
{
/* return if in use */
if (CHIPREG_READ32(&ioc->chip->Doorbell)
& MPI_DOORBELL_ACTIVE)
return -1;
CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
CHIPREG_WRITE32(&ioc->chip->Doorbell,
((MPI_FUNCTION_HOST_PAGEBUF_ACCESS_CONTROL
<<MPI_DOORBELL_FUNCTION_SHIFT) |
(access_control_value<<12)));
/* Wait for IOC to clear Doorbell Status bit */
if (WaitForDoorbellAck(ioc, 5, sleepFlag) < 0)
return -2;
else
return 0;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_host_page_alloc - allocate system memory for the fw
* @ioc: Pointer to pointer to IOC adapter
* @ioc_init: Pointer to ioc init config page
*
* If we already allocated memory in past, then resend the same pointer.
* Returns 0 for success, non-zero for failure.
*/
static int
mpt_host_page_alloc(MPT_ADAPTER *ioc, pIOCInit_t ioc_init)
{
char *psge;
int flags_length;
u32 host_page_buffer_sz=0;
if(!ioc->HostPageBuffer) {
host_page_buffer_sz =
le32_to_cpu(ioc->facts.HostPageBufferSGE.FlagsLength) & 0xFFFFFF;
if(!host_page_buffer_sz)
return 0; /* fw doesn't need any host buffers */
/* spin till we get enough memory */
while (host_page_buffer_sz > 0) {
ioc->HostPageBuffer =
dma_alloc_coherent(&ioc->pcidev->dev,
host_page_buffer_sz,
&ioc->HostPageBuffer_dma,
GFP_KERNEL);
if (ioc->HostPageBuffer) {
dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"host_page_buffer @ %p, dma @ %x, sz=%d bytes\n",
ioc->name, ioc->HostPageBuffer,
(u32)ioc->HostPageBuffer_dma,
host_page_buffer_sz));
ioc->alloc_total += host_page_buffer_sz;
ioc->HostPageBuffer_sz = host_page_buffer_sz;
break;
}
host_page_buffer_sz -= (4*1024);
}
}
if(!ioc->HostPageBuffer) {
printk(MYIOC_s_ERR_FMT
"Failed to alloc memory for host_page_buffer!\n",
ioc->name);
return -999;
}
psge = (char *)&ioc_init->HostPageBufferSGE;
flags_length = MPI_SGE_FLAGS_SIMPLE_ELEMENT |
MPI_SGE_FLAGS_SYSTEM_ADDRESS |
MPI_SGE_FLAGS_HOST_TO_IOC |
MPI_SGE_FLAGS_END_OF_BUFFER;
flags_length = flags_length << MPI_SGE_FLAGS_SHIFT;
flags_length |= ioc->HostPageBuffer_sz;
ioc->add_sge(psge, flags_length, ioc->HostPageBuffer_dma);
ioc->facts.HostPageBufferSGE = ioc_init->HostPageBufferSGE;
return 0;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_verify_adapter - Given IOC identifier, set pointer to its adapter structure.
* @iocid: IOC unique identifier (integer)
* @iocpp: Pointer to pointer to IOC adapter
*
* Given a unique IOC identifier, set pointer to the associated MPT
* adapter structure.
*
* Returns iocid and sets iocpp if iocid is found.
* Returns -1 if iocid is not found.
*/
int
mpt_verify_adapter(int iocid, MPT_ADAPTER **iocpp)
{
MPT_ADAPTER *ioc;
list_for_each_entry(ioc,&ioc_list,list) {
if (ioc->id == iocid) {
*iocpp =ioc;
return iocid;
}
}
*iocpp = NULL;
return -1;
}
/**
* mpt_get_product_name - returns product string
* @vendor: pci vendor id
* @device: pci device id
* @revision: pci revision id
*
* Returns product string displayed when driver loads,
* in /proc/mpt/summary and /sysfs/class/scsi_host/host<X>/version_product
*
**/
static const char*
mpt_get_product_name(u16 vendor, u16 device, u8 revision)
{
char *product_str = NULL;
if (vendor == PCI_VENDOR_ID_BROCADE) {
switch (device)
{
case MPI_MANUFACTPAGE_DEVICEID_FC949E:
switch (revision)
{
case 0x00:
product_str = "BRE040 A0";
break;
case 0x01:
product_str = "BRE040 A1";
break;
default:
product_str = "BRE040";
break;
}
break;
}
goto out;
}
switch (device)
{
case MPI_MANUFACTPAGE_DEVICEID_FC909:
product_str = "LSIFC909 B1";
break;
case MPI_MANUFACTPAGE_DEVICEID_FC919:
product_str = "LSIFC919 B0";
break;
case MPI_MANUFACTPAGE_DEVICEID_FC929:
product_str = "LSIFC929 B0";
break;
case MPI_MANUFACTPAGE_DEVICEID_FC919X:
if (revision < 0x80)
product_str = "LSIFC919X A0";
else
product_str = "LSIFC919XL A1";
break;
case MPI_MANUFACTPAGE_DEVICEID_FC929X:
if (revision < 0x80)
product_str = "LSIFC929X A0";
else
product_str = "LSIFC929XL A1";
break;
case MPI_MANUFACTPAGE_DEVICEID_FC939X:
product_str = "LSIFC939X A1";
break;
case MPI_MANUFACTPAGE_DEVICEID_FC949X:
product_str = "LSIFC949X A1";
break;
case MPI_MANUFACTPAGE_DEVICEID_FC949E:
switch (revision)
{
case 0x00:
product_str = "LSIFC949E A0";
break;
case 0x01:
product_str = "LSIFC949E A1";
break;
default:
product_str = "LSIFC949E";
break;
}
break;
case MPI_MANUFACTPAGE_DEVID_53C1030:
switch (revision)
{
case 0x00:
product_str = "LSI53C1030 A0";
break;
case 0x01:
product_str = "LSI53C1030 B0";
break;
case 0x03:
product_str = "LSI53C1030 B1";
break;
case 0x07:
product_str = "LSI53C1030 B2";
break;
case 0x08:
product_str = "LSI53C1030 C0";
break;
case 0x80:
product_str = "LSI53C1030T A0";
break;
case 0x83:
product_str = "LSI53C1030T A2";
break;
case 0x87:
product_str = "LSI53C1030T A3";
break;
case 0xc1:
product_str = "LSI53C1020A A1";
break;
default:
product_str = "LSI53C1030";
break;
}
break;
case MPI_MANUFACTPAGE_DEVID_1030_53C1035:
switch (revision)
{
case 0x03:
product_str = "LSI53C1035 A2";
break;
case 0x04:
product_str = "LSI53C1035 B0";
break;
default:
product_str = "LSI53C1035";
break;
}
break;
case MPI_MANUFACTPAGE_DEVID_SAS1064:
switch (revision)
{
case 0x00:
product_str = "LSISAS1064 A1";
break;
case 0x01:
product_str = "LSISAS1064 A2";
break;
case 0x02:
product_str = "LSISAS1064 A3";
break;
case 0x03:
product_str = "LSISAS1064 A4";
break;
default:
product_str = "LSISAS1064";
break;
}
break;
case MPI_MANUFACTPAGE_DEVID_SAS1064E:
switch (revision)
{
case 0x00:
product_str = "LSISAS1064E A0";
break;
case 0x01:
product_str = "LSISAS1064E B0";
break;
case 0x02:
product_str = "LSISAS1064E B1";
break;
case 0x04:
product_str = "LSISAS1064E B2";
break;
case 0x08:
product_str = "LSISAS1064E B3";
break;
default:
product_str = "LSISAS1064E";
break;
}
break;
case MPI_MANUFACTPAGE_DEVID_SAS1068:
switch (revision)
{
case 0x00:
product_str = "LSISAS1068 A0";
break;
case 0x01:
product_str = "LSISAS1068 B0";
break;
case 0x02:
product_str = "LSISAS1068 B1";
break;
default:
product_str = "LSISAS1068";
break;
}
break;
case MPI_MANUFACTPAGE_DEVID_SAS1068E:
switch (revision)
{
case 0x00:
product_str = "LSISAS1068E A0";
break;
case 0x01:
product_str = "LSISAS1068E B0";
break;
case 0x02:
product_str = "LSISAS1068E B1";
break;
case 0x04:
product_str = "LSISAS1068E B2";
break;
case 0x08:
product_str = "LSISAS1068E B3";
break;
default:
product_str = "LSISAS1068E";
break;
}
break;
case MPI_MANUFACTPAGE_DEVID_SAS1078:
switch (revision)
{
case 0x00:
product_str = "LSISAS1078 A0";
break;
case 0x01:
product_str = "LSISAS1078 B0";
break;
case 0x02:
product_str = "LSISAS1078 C0";
break;
case 0x03:
product_str = "LSISAS1078 C1";
break;
case 0x04:
product_str = "LSISAS1078 C2";
break;
default:
product_str = "LSISAS1078";
break;
}
break;
}
out:
return product_str;
}
/**
* mpt_mapresources - map in memory mapped io
* @ioc: Pointer to pointer to IOC adapter
*
**/
static int
mpt_mapresources(MPT_ADAPTER *ioc)
{
u8 __iomem *mem;
int ii;
resource_size_t mem_phys;
unsigned long port;
u32 msize;
u32 psize;
int r = -ENODEV;
struct pci_dev *pdev;
pdev = ioc->pcidev;
ioc->bars = pci_select_bars(pdev, IORESOURCE_MEM);
if (pci_enable_device_mem(pdev)) {
printk(MYIOC_s_ERR_FMT "pci_enable_device_mem() "
"failed\n", ioc->name);
return r;
}
if (pci_request_selected_regions(pdev, ioc->bars, "mpt")) {
printk(MYIOC_s_ERR_FMT "pci_request_selected_regions() with "
"MEM failed\n", ioc->name);
goto out_pci_disable_device;
}
if (sizeof(dma_addr_t) > 4) {
const uint64_t required_mask = dma_get_required_mask
(&pdev->dev);
if (required_mask > DMA_BIT_MASK(32)
&& !dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))
&& !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
ioc->dma_mask = DMA_BIT_MASK(64);
dinitprintk(ioc, printk(MYIOC_s_INFO_FMT
": 64 BIT PCI BUS DMA ADDRESSING SUPPORTED\n",
ioc->name));
} else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))
&& !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32))) {
ioc->dma_mask = DMA_BIT_MASK(32);
dinitprintk(ioc, printk(MYIOC_s_INFO_FMT
": 32 BIT PCI BUS DMA ADDRESSING SUPPORTED\n",
ioc->name));
} else {
printk(MYIOC_s_WARN_FMT "no suitable DMA mask for %s\n",
ioc->name, pci_name(pdev));
goto out_pci_release_region;
}
} else {
if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))
&& !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32))) {
ioc->dma_mask = DMA_BIT_MASK(32);
dinitprintk(ioc, printk(MYIOC_s_INFO_FMT
": 32 BIT PCI BUS DMA ADDRESSING SUPPORTED\n",
ioc->name));
} else {
printk(MYIOC_s_WARN_FMT "no suitable DMA mask for %s\n",
ioc->name, pci_name(pdev));
goto out_pci_release_region;
}
}
mem_phys = msize = 0;
port = psize = 0;
for (ii = 0; ii < DEVICE_COUNT_RESOURCE; ii++) {
if (pci_resource_flags(pdev, ii) & PCI_BASE_ADDRESS_SPACE_IO) {
if (psize)
continue;
/* Get I/O space! */
port = pci_resource_start(pdev, ii);
psize = pci_resource_len(pdev, ii);
} else {
if (msize)
continue;
/* Get memmap */
mem_phys = pci_resource_start(pdev, ii);
msize = pci_resource_len(pdev, ii);
}
}
ioc->mem_size = msize;
mem = NULL;
/* Get logical ptr for PciMem0 space */
/*mem = ioremap(mem_phys, msize);*/
mem = ioremap(mem_phys, msize);
if (mem == NULL) {
printk(MYIOC_s_ERR_FMT ": ERROR - Unable to map adapter"
" memory!\n", ioc->name);
r = -EINVAL;
goto out_pci_release_region;
}
ioc->memmap = mem;
dinitprintk(ioc, printk(MYIOC_s_INFO_FMT "mem = %p, mem_phys = %llx\n",
ioc->name, mem, (unsigned long long)mem_phys));
ioc->mem_phys = mem_phys;
ioc->chip = (SYSIF_REGS __iomem *)mem;
/* Save Port IO values in case we need to do downloadboot */
ioc->pio_mem_phys = port;
ioc->pio_chip = (SYSIF_REGS __iomem *)port;
return 0;
out_pci_release_region:
pci_release_selected_regions(pdev, ioc->bars);
out_pci_disable_device:
pci_disable_device(pdev);
return r;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_attach - Install a PCI intelligent MPT adapter.
* @pdev: Pointer to pci_dev structure
* @id: PCI device ID information
*
* This routine performs all the steps necessary to bring the IOC of
* a MPT adapter to a OPERATIONAL state. This includes registering
* memory regions, registering the interrupt, and allocating request
* and reply memory pools.
*
* This routine also pre-fetches the LAN MAC address of a Fibre Channel
* MPT adapter.
*
* Returns 0 for success, non-zero for failure.
*
* TODO: Add support for polled controllers
*/
int
mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
{
MPT_ADAPTER *ioc;
u8 cb_idx;
int r = -ENODEV;
u8 pcixcmd;
static int mpt_ids = 0;
#ifdef CONFIG_PROC_FS
struct proc_dir_entry *dent;
#endif
ioc = kzalloc(sizeof(MPT_ADAPTER), GFP_KERNEL);
if (ioc == NULL) {
printk(KERN_ERR MYNAM ": ERROR - Insufficient memory to add adapter!\n");
return -ENOMEM;
}
ioc->id = mpt_ids++;
sprintf(ioc->name, "ioc%d", ioc->id);
dinitprintk(ioc, printk(KERN_WARNING MYNAM ": mpt_adapter_install\n"));
/*
* set initial debug level
* (refer to mptdebug.h)
*
*/
ioc->debug_level = mpt_debug_level;
if (mpt_debug_level)
printk(KERN_INFO "mpt_debug_level=%xh\n", mpt_debug_level);
dinitprintk(ioc, printk(MYIOC_s_INFO_FMT ": mpt_adapter_install\n", ioc->name));
ioc->pcidev = pdev;
if (mpt_mapresources(ioc)) {
goto out_free_ioc;
}
/*
* Setting up proper handlers for scatter gather handling
*/
if (ioc->dma_mask == DMA_BIT_MASK(64)) {
if (pdev->device == MPI_MANUFACTPAGE_DEVID_SAS1078)
ioc->add_sge = &mpt_add_sge_64bit_1078;
else
ioc->add_sge = &mpt_add_sge_64bit;
ioc->add_chain = &mpt_add_chain_64bit;
ioc->sg_addr_size = 8;
} else {
ioc->add_sge = &mpt_add_sge;
ioc->add_chain = &mpt_add_chain;
ioc->sg_addr_size = 4;
}
ioc->SGE_size = sizeof(u32) + ioc->sg_addr_size;
ioc->alloc_total = sizeof(MPT_ADAPTER);
ioc->req_sz = MPT_DEFAULT_FRAME_SIZE; /* avoid div by zero! */
ioc->reply_sz = MPT_REPLY_FRAME_SIZE;
spin_lock_init(&ioc->taskmgmt_lock);
mutex_init(&ioc->internal_cmds.mutex);
init_completion(&ioc->internal_cmds.done);
mutex_init(&ioc->mptbase_cmds.mutex);
init_completion(&ioc->mptbase_cmds.done);
mutex_init(&ioc->taskmgmt_cmds.mutex);
init_completion(&ioc->taskmgmt_cmds.done);
/* Initialize the event logging.
*/
ioc->eventTypes = 0; /* None */
ioc->eventContext = 0;
ioc->eventLogSize = 0;
ioc->events = NULL;
#ifdef MFCNT
ioc->mfcnt = 0;
#endif
ioc->sh = NULL;
ioc->cached_fw = NULL;
/* Initialize SCSI Config Data structure
*/
memset(&ioc->spi_data, 0, sizeof(SpiCfgData));
/* Initialize the fc rport list head.
*/
INIT_LIST_HEAD(&ioc->fc_rports);
/* Find lookup slot. */
INIT_LIST_HEAD(&ioc->list);
/* Initialize workqueue */
INIT_DELAYED_WORK(&ioc->fault_reset_work, mpt_fault_reset_work);
snprintf(ioc->reset_work_q_name, MPT_KOBJ_NAME_LEN,
"mpt_poll_%d", ioc->id);
ioc->reset_work_q = alloc_workqueue(ioc->reset_work_q_name,
WQ_MEM_RECLAIM, 0);
if (!ioc->reset_work_q) {
printk(MYIOC_s_ERR_FMT "Insufficient memory to add adapter!\n",
ioc->name);
r = -ENOMEM;
goto out_unmap_resources;
}
dinitprintk(ioc, printk(MYIOC_s_INFO_FMT "facts @ %p, pfacts[0] @ %p\n",
ioc->name, &ioc->facts, &ioc->pfacts[0]));
ioc->prod_name = mpt_get_product_name(pdev->vendor, pdev->device,
pdev->revision);
switch (pdev->device)
{
case MPI_MANUFACTPAGE_DEVICEID_FC939X:
case MPI_MANUFACTPAGE_DEVICEID_FC949X:
ioc->errata_flag_1064 = 1;
fallthrough;
case MPI_MANUFACTPAGE_DEVICEID_FC909:
case MPI_MANUFACTPAGE_DEVICEID_FC929:
case MPI_MANUFACTPAGE_DEVICEID_FC919:
case MPI_MANUFACTPAGE_DEVICEID_FC949E:
ioc->bus_type = FC;
break;
case MPI_MANUFACTPAGE_DEVICEID_FC929X:
if (pdev->revision < XL_929) {
/* 929X Chip Fix. Set Split transactions level
* for PCIX. Set MOST bits to zero.
*/
pci_read_config_byte(pdev, 0x6a, &pcixcmd);
pcixcmd &= 0x8F;
pci_write_config_byte(pdev, 0x6a, pcixcmd);
} else {
/* 929XL Chip Fix. Set MMRBC to 0x08.
*/
pci_read_config_byte(pdev, 0x6a, &pcixcmd);
pcixcmd |= 0x08;
pci_write_config_byte(pdev, 0x6a, pcixcmd);
}
ioc->bus_type = FC;
break;
case MPI_MANUFACTPAGE_DEVICEID_FC919X:
/* 919X Chip Fix. Set Split transactions level
* for PCIX. Set MOST bits to zero.
*/
pci_read_config_byte(pdev, 0x6a, &pcixcmd);
pcixcmd &= 0x8F;
pci_write_config_byte(pdev, 0x6a, pcixcmd);
ioc->bus_type = FC;
break;
case MPI_MANUFACTPAGE_DEVID_53C1030:
/* 1030 Chip Fix. Disable Split transactions
* for PCIX. Set MOST bits to zero if Rev < C0( = 8).
*/
if (pdev->revision < C0_1030) {
pci_read_config_byte(pdev, 0x6a, &pcixcmd);
pcixcmd &= 0x8F;
pci_write_config_byte(pdev, 0x6a, pcixcmd);
}
fallthrough;
case MPI_MANUFACTPAGE_DEVID_1030_53C1035:
ioc->bus_type = SPI;
break;
case MPI_MANUFACTPAGE_DEVID_SAS1064:
case MPI_MANUFACTPAGE_DEVID_SAS1068:
ioc->errata_flag_1064 = 1;
ioc->bus_type = SAS;
break;
case MPI_MANUFACTPAGE_DEVID_SAS1064E:
case MPI_MANUFACTPAGE_DEVID_SAS1068E:
case MPI_MANUFACTPAGE_DEVID_SAS1078:
ioc->bus_type = SAS;
break;
}
switch (ioc->bus_type) {
case SAS:
ioc->msi_enable = mpt_msi_enable_sas;
break;
case SPI:
ioc->msi_enable = mpt_msi_enable_spi;
break;
case FC:
ioc->msi_enable = mpt_msi_enable_fc;
break;
default:
ioc->msi_enable = 0;
break;
}
ioc->fw_events_off = 1;
if (ioc->errata_flag_1064)
pci_disable_io_access(pdev);
spin_lock_init(&ioc->FreeQlock);
/* Disable all! */
CHIPREG_WRITE32(&ioc->chip->IntMask, 0xFFFFFFFF);
ioc->active = 0;
CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
/* Set IOC ptr in the pcidev's driver data. */
pci_set_drvdata(ioc->pcidev, ioc);
/* Set lookup ptr. */
list_add_tail(&ioc->list, &ioc_list);
/* Check for "bound ports" (929, 929X, 1030, 1035) to reduce redundant resets.
*/
mpt_detect_bound_ports(ioc, pdev);
INIT_LIST_HEAD(&ioc->fw_event_list);
spin_lock_init(&ioc->fw_event_lock);
snprintf(ioc->fw_event_q_name, MPT_KOBJ_NAME_LEN, "mpt/%d", ioc->id);
ioc->fw_event_q = alloc_workqueue(ioc->fw_event_q_name,
WQ_MEM_RECLAIM, 0);
if (!ioc->fw_event_q) {
printk(MYIOC_s_ERR_FMT "Insufficient memory to add adapter!\n",
ioc->name);
r = -ENOMEM;
goto out_remove_ioc;
}
if ((r = mpt_do_ioc_recovery(ioc, MPT_HOSTEVENT_IOC_BRINGUP,
CAN_SLEEP)) != 0){
printk(MYIOC_s_ERR_FMT "didn't initialize properly! (%d)\n",
ioc->name, r);
destroy_workqueue(ioc->fw_event_q);
ioc->fw_event_q = NULL;
list_del(&ioc->list);
if (ioc->alt_ioc)
ioc->alt_ioc->alt_ioc = NULL;
iounmap(ioc->memmap);
if (pci_is_enabled(pdev))
pci_disable_device(pdev);
if (r != -5)
pci_release_selected_regions(pdev, ioc->bars);
destroy_workqueue(ioc->reset_work_q);
ioc->reset_work_q = NULL;
kfree(ioc);
return r;
}
/* call per device driver probe entry point */
for(cb_idx = 0; cb_idx < MPT_MAX_PROTOCOL_DRIVERS; cb_idx++) {
if(MptDeviceDriverHandlers[cb_idx] &&
MptDeviceDriverHandlers[cb_idx]->probe) {
MptDeviceDriverHandlers[cb_idx]->probe(pdev);
}
}
#ifdef CONFIG_PROC_FS
/*
* Create "/proc/mpt/iocN" subdirectory entry for each MPT adapter.
*/
dent = proc_mkdir(ioc->name, mpt_proc_root_dir);
if (dent) {
proc_create_single_data("info", S_IRUGO, dent,
mpt_iocinfo_proc_show, ioc);
proc_create_single_data("summary", S_IRUGO, dent,
mpt_summary_proc_show, ioc);
}
#endif
if (!ioc->alt_ioc)
queue_delayed_work(ioc->reset_work_q, &ioc->fault_reset_work,
msecs_to_jiffies(MPT_POLLING_INTERVAL));
return 0;
out_remove_ioc:
list_del(&ioc->list);
if (ioc->alt_ioc)
ioc->alt_ioc->alt_ioc = NULL;
destroy_workqueue(ioc->reset_work_q);
ioc->reset_work_q = NULL;
out_unmap_resources:
iounmap(ioc->memmap);
pci_disable_device(pdev);
pci_release_selected_regions(pdev, ioc->bars);
out_free_ioc:
kfree(ioc);
return r;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_detach - Remove a PCI intelligent MPT adapter.
* @pdev: Pointer to pci_dev structure
*/
void
mpt_detach(struct pci_dev *pdev)
{
MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
char pname[64];
u8 cb_idx;
unsigned long flags;
struct workqueue_struct *wq;
/*
* Stop polling ioc for fault condition
*/
spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
wq = ioc->reset_work_q;
ioc->reset_work_q = NULL;
spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
cancel_delayed_work(&ioc->fault_reset_work);
destroy_workqueue(wq);
spin_lock_irqsave(&ioc->fw_event_lock, flags);
wq = ioc->fw_event_q;
ioc->fw_event_q = NULL;
spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
destroy_workqueue(wq);
snprintf(pname, sizeof(pname), MPT_PROCFS_MPTBASEDIR "/%s/summary", ioc->name);
remove_proc_entry(pname, NULL);
snprintf(pname, sizeof(pname), MPT_PROCFS_MPTBASEDIR "/%s/info", ioc->name);
remove_proc_entry(pname, NULL);
snprintf(pname, sizeof(pname), MPT_PROCFS_MPTBASEDIR "/%s", ioc->name);
remove_proc_entry(pname, NULL);
/* call per device driver remove entry point */
for(cb_idx = 0; cb_idx < MPT_MAX_PROTOCOL_DRIVERS; cb_idx++) {
if(MptDeviceDriverHandlers[cb_idx] &&
MptDeviceDriverHandlers[cb_idx]->remove) {
MptDeviceDriverHandlers[cb_idx]->remove(pdev);
}
}
/* Disable interrupts! */
CHIPREG_WRITE32(&ioc->chip->IntMask, 0xFFFFFFFF);
ioc->active = 0;
synchronize_irq(pdev->irq);
/* Clear any lingering interrupt */
CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
CHIPREG_READ32(&ioc->chip->IntStatus);
mpt_adapter_dispose(ioc);
}
/**************************************************************************
* Power Management
*/
#ifdef CONFIG_PM
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_suspend - Fusion MPT base driver suspend routine.
* @pdev: Pointer to pci_dev structure
* @state: new state to enter
*/
int
mpt_suspend(struct pci_dev *pdev, pm_message_t state)
{
u32 device_state;
MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
device_state = pci_choose_state(pdev, state);
printk(MYIOC_s_INFO_FMT "pci-suspend: pdev=0x%p, slot=%s, Entering "
"operating state [D%d]\n", ioc->name, pdev, pci_name(pdev),
device_state);
/* put ioc into READY_STATE */
if (SendIocReset(ioc, MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET, CAN_SLEEP)) {
printk(MYIOC_s_ERR_FMT
"pci-suspend: IOC msg unit reset failed!\n", ioc->name);
}
/* disable interrupts */
CHIPREG_WRITE32(&ioc->chip->IntMask, 0xFFFFFFFF);
ioc->active = 0;
/* Clear any lingering interrupt */
CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
free_irq(ioc->pci_irq, ioc);
if (ioc->msi_enable)
pci_disable_msi(ioc->pcidev);
ioc->pci_irq = -1;
pci_save_state(pdev);
pci_disable_device(pdev);
pci_release_selected_regions(pdev, ioc->bars);
pci_set_power_state(pdev, device_state);
return 0;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_resume - Fusion MPT base driver resume routine.
* @pdev: Pointer to pci_dev structure
*/
int
mpt_resume(struct pci_dev *pdev)
{
MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
u32 device_state = pdev->current_state;
int recovery_state;
int err;
printk(MYIOC_s_INFO_FMT "pci-resume: pdev=0x%p, slot=%s, Previous "
"operating state [D%d]\n", ioc->name, pdev, pci_name(pdev),
device_state);
pci_set_power_state(pdev, PCI_D0);
pci_enable_wake(pdev, PCI_D0, 0);
pci_restore_state(pdev);
ioc->pcidev = pdev;
err = mpt_mapresources(ioc);
if (err)
return err;
if (ioc->dma_mask == DMA_BIT_MASK(64)) {
if (pdev->device == MPI_MANUFACTPAGE_DEVID_SAS1078)
ioc->add_sge = &mpt_add_sge_64bit_1078;
else
ioc->add_sge = &mpt_add_sge_64bit;
ioc->add_chain = &mpt_add_chain_64bit;
ioc->sg_addr_size = 8;
} else {
ioc->add_sge = &mpt_add_sge;
ioc->add_chain = &mpt_add_chain;
ioc->sg_addr_size = 4;
}
ioc->SGE_size = sizeof(u32) + ioc->sg_addr_size;
printk(MYIOC_s_INFO_FMT "pci-resume: ioc-state=0x%x,doorbell=0x%x\n",
ioc->name, (mpt_GetIocState(ioc, 1) >> MPI_IOC_STATE_SHIFT),
CHIPREG_READ32(&ioc->chip->Doorbell));
/*
* Errata workaround for SAS pci express:
* Upon returning to the D0 state, the contents of the doorbell will be
* stale data, and this will incorrectly signal to the host driver that
* the firmware is ready to process mpt commands. The workaround is
* to issue a diagnostic reset.
*/
if (ioc->bus_type == SAS && (pdev->device ==
MPI_MANUFACTPAGE_DEVID_SAS1068E || pdev->device ==
MPI_MANUFACTPAGE_DEVID_SAS1064E)) {
if (KickStart(ioc, 1, CAN_SLEEP) < 0) {
printk(MYIOC_s_WARN_FMT "pci-resume: Cannot recover\n",
ioc->name);
goto out;
}
}
/* bring ioc to operational state */
printk(MYIOC_s_INFO_FMT "Sending mpt_do_ioc_recovery\n", ioc->name);
recovery_state = mpt_do_ioc_recovery(ioc, MPT_HOSTEVENT_IOC_BRINGUP,
CAN_SLEEP);
if (recovery_state != 0)
printk(MYIOC_s_WARN_FMT "pci-resume: Cannot recover, "
"error:[%x]\n", ioc->name, recovery_state);
else
printk(MYIOC_s_INFO_FMT
"pci-resume: success\n", ioc->name);
out:
return 0;
}
#endif
static int
mpt_signal_reset(u8 index, MPT_ADAPTER *ioc, int reset_phase)
{
if ((MptDriverClass[index] == MPTSPI_DRIVER &&
ioc->bus_type != SPI) ||
(MptDriverClass[index] == MPTFC_DRIVER &&
ioc->bus_type != FC) ||
(MptDriverClass[index] == MPTSAS_DRIVER &&
ioc->bus_type != SAS))
/* make sure we only call the relevant reset handler
* for the bus */
return 0;
return (MptResetHandlers[index])(ioc, reset_phase);
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_do_ioc_recovery - Initialize or recover MPT adapter.
* @ioc: Pointer to MPT adapter structure
* @reason: Event word / reason
* @sleepFlag: Use schedule if CAN_SLEEP else use udelay.
*
* This routine performs all the steps necessary to bring the IOC
* to a OPERATIONAL state.
*
* This routine also pre-fetches the LAN MAC address of a Fibre Channel
* MPT adapter.
*
* Returns:
* 0 for success
* -1 if failed to get board READY
* -2 if READY but IOCFacts Failed
* -3 if READY but PrimeIOCFifos Failed
* -4 if READY but IOCInit Failed
* -5 if failed to enable_device and/or request_selected_regions
* -6 if failed to upload firmware
*/
static int
mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
{
int hard_reset_done = 0;
int alt_ioc_ready = 0;
int hard;
int rc=0;
int ii;
int ret = 0;
int reset_alt_ioc_active = 0;
int irq_allocated = 0;
u8 *a;
printk(MYIOC_s_INFO_FMT "Initiating %s\n", ioc->name,
reason == MPT_HOSTEVENT_IOC_BRINGUP ? "bringup" : "recovery");
/* Disable reply interrupts (also blocks FreeQ) */
CHIPREG_WRITE32(&ioc->chip->IntMask, 0xFFFFFFFF);
ioc->active = 0;
if (ioc->alt_ioc) {
if (ioc->alt_ioc->active ||
reason == MPT_HOSTEVENT_IOC_RECOVER) {
reset_alt_ioc_active = 1;
/* Disable alt-IOC's reply interrupts
* (and FreeQ) for a bit
**/
CHIPREG_WRITE32(&ioc->alt_ioc->chip->IntMask,
0xFFFFFFFF);
ioc->alt_ioc->active = 0;
}
}
hard = 1;
if (reason == MPT_HOSTEVENT_IOC_BRINGUP)
hard = 0;
if ((hard_reset_done = MakeIocReady(ioc, hard, sleepFlag)) < 0) {
if (hard_reset_done == -4) {
printk(MYIOC_s_WARN_FMT "Owned by PEER..skipping!\n",
ioc->name);
if (reset_alt_ioc_active && ioc->alt_ioc) {
/* (re)Enable alt-IOC! (reply interrupt, FreeQ) */
dprintk(ioc, printk(MYIOC_s_INFO_FMT
"alt_ioc reply irq re-enabled\n", ioc->alt_ioc->name));
CHIPREG_WRITE32(&ioc->alt_ioc->chip->IntMask, MPI_HIM_DIM);
ioc->alt_ioc->active = 1;
}
} else {
printk(MYIOC_s_WARN_FMT
"NOT READY WARNING!\n", ioc->name);
}
ret = -1;
goto out;
}
/* hard_reset_done = 0 if a soft reset was performed
* and 1 if a hard reset was performed.
*/
if (hard_reset_done && reset_alt_ioc_active && ioc->alt_ioc) {
if ((rc = MakeIocReady(ioc->alt_ioc, 0, sleepFlag)) == 0)
alt_ioc_ready = 1;
else
printk(MYIOC_s_WARN_FMT
": alt-ioc Not ready WARNING!\n",
ioc->alt_ioc->name);
}
for (ii=0; ii<5; ii++) {
/* Get IOC facts! Allow 5 retries */
if ((rc = GetIocFacts(ioc, sleepFlag, reason)) == 0)
break;
}
if (ii == 5) {
dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"Retry IocFacts failed rc=%x\n", ioc->name, rc));
ret = -2;
} else if (reason == MPT_HOSTEVENT_IOC_BRINGUP) {
MptDisplayIocCapabilities(ioc);
}
if (alt_ioc_ready) {
if ((rc = GetIocFacts(ioc->alt_ioc, sleepFlag, reason)) != 0) {
dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"Initial Alt IocFacts failed rc=%x\n",
ioc->name, rc));
/* Retry - alt IOC was initialized once
*/
rc = GetIocFacts(ioc->alt_ioc, sleepFlag, reason);
}
if (rc) {
dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"Retry Alt IocFacts failed rc=%x\n", ioc->name, rc));
alt_ioc_ready = 0;
reset_alt_ioc_active = 0;
} else if (reason == MPT_HOSTEVENT_IOC_BRINGUP) {
MptDisplayIocCapabilities(ioc->alt_ioc);
}
}
if ((ret == 0) && (reason == MPT_HOSTEVENT_IOC_BRINGUP) &&
(ioc->facts.Flags & MPI_IOCFACTS_FLAGS_FW_DOWNLOAD_BOOT)) {
pci_release_selected_regions(ioc->pcidev, ioc->bars);
ioc->bars = pci_select_bars(ioc->pcidev, IORESOURCE_MEM |
IORESOURCE_IO);
if (pci_enable_device(ioc->pcidev))
return -5;
if (pci_request_selected_regions(ioc->pcidev, ioc->bars,
"mpt"))
return -5;
}
/*
* Device is reset now. It must have de-asserted the interrupt line
* (if it was asserted) and it should be safe to register for the
* interrupt now.
*/
if ((ret == 0) && (reason == MPT_HOSTEVENT_IOC_BRINGUP)) {
ioc->pci_irq = -1;
if (ioc->pcidev->irq) {
if (ioc->msi_enable && !pci_enable_msi(ioc->pcidev))
printk(MYIOC_s_INFO_FMT "PCI-MSI enabled\n",
ioc->name);
else
ioc->msi_enable = 0;
rc = request_irq(ioc->pcidev->irq, mpt_interrupt,
IRQF_SHARED, ioc->name, ioc);
if (rc < 0) {
printk(MYIOC_s_ERR_FMT "Unable to allocate "
"interrupt %d!\n",
ioc->name, ioc->pcidev->irq);
if (ioc->msi_enable)
pci_disable_msi(ioc->pcidev);
ret = -EBUSY;
goto out;
}
irq_allocated = 1;
ioc->pci_irq = ioc->pcidev->irq;
pci_set_master(ioc->pcidev); /* ?? */
pci_set_drvdata(ioc->pcidev, ioc);
dinitprintk(ioc, printk(MYIOC_s_INFO_FMT
"installed at interrupt %d\n", ioc->name,
ioc->pcidev->irq));
}
}
/* Prime reply & request queues!
* (mucho alloc's) Must be done prior to
* init as upper addresses are needed for init.
* If fails, continue with alt-ioc processing
*/
dinitprintk(ioc, printk(MYIOC_s_INFO_FMT "PrimeIocFifos\n",
ioc->name));
if ((ret == 0) && ((rc = PrimeIocFifos(ioc)) != 0))
ret = -3;
/* May need to check/upload firmware & data here!
* If fails, continue with alt-ioc processing
*/
dinitprintk(ioc, printk(MYIOC_s_INFO_FMT "SendIocInit\n",
ioc->name));
if ((ret == 0) && ((rc = SendIocInit(ioc, sleepFlag)) != 0))
ret = -4;
// NEW!
if (alt_ioc_ready && ((rc = PrimeIocFifos(ioc->alt_ioc)) != 0)) {
printk(MYIOC_s_WARN_FMT
": alt-ioc (%d) FIFO mgmt alloc WARNING!\n",
ioc->alt_ioc->name, rc);
alt_ioc_ready = 0;
reset_alt_ioc_active = 0;
}
if (alt_ioc_ready) {
if ((rc = SendIocInit(ioc->alt_ioc, sleepFlag)) != 0) {
alt_ioc_ready = 0;
reset_alt_ioc_active = 0;
printk(MYIOC_s_WARN_FMT
": alt-ioc: (%d) init failure WARNING!\n",
ioc->alt_ioc->name, rc);
}
}
if (reason == MPT_HOSTEVENT_IOC_BRINGUP){
if (ioc->upload_fw) {
ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"firmware upload required!\n", ioc->name));
/* Controller is not operational, cannot do upload
*/
if (ret == 0) {
rc = mpt_do_upload(ioc, sleepFlag);
if (rc == 0) {
if (ioc->alt_ioc && ioc->alt_ioc->cached_fw) {
/*
* Maintain only one pointer to FW memory
* so there will not be two attempt to
* downloadboot onboard dual function
* chips (mpt_adapter_disable,
* mpt_diag_reset)
*/
ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"mpt_upload: alt_%s has cached_fw=%p \n",
ioc->name, ioc->alt_ioc->name, ioc->alt_ioc->cached_fw));
ioc->cached_fw = NULL;
}
} else {
printk(MYIOC_s_WARN_FMT
"firmware upload failure!\n", ioc->name);
ret = -6;
}
}
}
}
/* Enable MPT base driver management of EventNotification
* and EventAck handling.
*/
if ((ret == 0) && (!ioc->facts.EventState)) {
dinitprintk(ioc, printk(MYIOC_s_INFO_FMT
"SendEventNotification\n",
ioc->name));
ret = SendEventNotification(ioc, 1, sleepFlag); /* 1=Enable */
}
if (ioc->alt_ioc && alt_ioc_ready && !ioc->alt_ioc->facts.EventState)
rc = SendEventNotification(ioc->alt_ioc, 1, sleepFlag);
if (ret == 0) {
/* Enable! (reply interrupt) */
CHIPREG_WRITE32(&ioc->chip->IntMask, MPI_HIM_DIM);
ioc->active = 1;
}
if (rc == 0) { /* alt ioc */
if (reset_alt_ioc_active && ioc->alt_ioc) {
/* (re)Enable alt-IOC! (reply interrupt) */
dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "alt-ioc"
"reply irq re-enabled\n",
ioc->alt_ioc->name));
CHIPREG_WRITE32(&ioc->alt_ioc->chip->IntMask,
MPI_HIM_DIM);
ioc->alt_ioc->active = 1;
}
}
/* Add additional "reason" check before call to GetLanConfigPages
* (combined with GetIoUnitPage2 call). This prevents a somewhat
* recursive scenario; GetLanConfigPages times out, timer expired
* routine calls HardResetHandler, which calls into here again,
* and we try GetLanConfigPages again...
*/
if ((ret == 0) && (reason == MPT_HOSTEVENT_IOC_BRINGUP)) {
/*
* Initialize link list for inactive raid volumes.
*/
mutex_init(&ioc->raid_data.inactive_list_mutex);
INIT_LIST_HEAD(&ioc->raid_data.inactive_list);
switch (ioc->bus_type) {
case SAS:
/* clear persistency table */
if(ioc->facts.IOCExceptions &
MPI_IOCFACTS_EXCEPT_PERSISTENT_TABLE_FULL) {
ret = mptbase_sas_persist_operation(ioc,
MPI_SAS_OP_CLEAR_NOT_PRESENT);
if(ret != 0)
goto out;
}
/* Find IM volumes
*/
mpt_findImVolumes(ioc);
/* Check, and possibly reset, the coalescing value
*/
mpt_read_ioc_pg_1(ioc);
break;
case FC:
if ((ioc->pfacts[0].ProtocolFlags &
MPI_PORTFACTS_PROTOCOL_LAN) &&
(ioc->lan_cnfg_page0.Header.PageLength == 0)) {
/*
* Pre-fetch the ports LAN MAC address!
* (LANPage1_t stuff)
*/
(void) GetLanConfigPages(ioc);
a = (u8*)&ioc->lan_cnfg_page1.HardwareAddressLow;
dprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"LanAddr = %pMR\n", ioc->name, a));
}
break;
case SPI:
/* Get NVRAM and adapter maximums from SPP 0 and 2
*/
mpt_GetScsiPortSettings(ioc, 0);
/* Get version and length of SDP 1
*/
mpt_readScsiDevicePageHeaders(ioc, 0);
/* Find IM volumes
*/
if (ioc->facts.MsgVersion >= MPI_VERSION_01_02)
mpt_findImVolumes(ioc);
/* Check, and possibly reset, the coalescing value
*/
mpt_read_ioc_pg_1(ioc);
mpt_read_ioc_pg_4(ioc);
break;
}
GetIoUnitPage2(ioc);
mpt_get_manufacturing_pg_0(ioc);
}
out:
if ((ret != 0) && irq_allocated) {
free_irq(ioc->pci_irq, ioc);
if (ioc->msi_enable)
pci_disable_msi(ioc->pcidev);
}
return ret;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_detect_bound_ports - Search for matching PCI bus/dev_function
* @ioc: Pointer to MPT adapter structure
* @pdev: Pointer to (struct pci_dev) structure
*
* Search for PCI bus/dev_function which matches
* PCI bus/dev_function (+/-1) for newly discovered 929,
* 929X, 1030 or 1035.
*
* If match on PCI dev_function +/-1 is found, bind the two MPT adapters
* using alt_ioc pointer fields in their %MPT_ADAPTER structures.
*/
static void
mpt_detect_bound_ports(MPT_ADAPTER *ioc, struct pci_dev *pdev)
{
struct pci_dev *peer=NULL;
unsigned int slot = PCI_SLOT(pdev->devfn);
unsigned int func = PCI_FUNC(pdev->devfn);
MPT_ADAPTER *ioc_srch;
dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "PCI device %s devfn=%x/%x,"
" searching for devfn match on %x or %x\n",
ioc->name, pci_name(pdev), pdev->bus->number,
pdev->devfn, func-1, func+1));
peer = pci_get_slot(pdev->bus, PCI_DEVFN(slot,func-1));
if (!peer) {
peer = pci_get_slot(pdev->bus, PCI_DEVFN(slot,func+1));
if (!peer)
return;
}
list_for_each_entry(ioc_srch, &ioc_list, list) {
struct pci_dev *_pcidev = ioc_srch->pcidev;
if (_pcidev == peer) {
/* Paranoia checks */
if (ioc->alt_ioc != NULL) {
printk(MYIOC_s_WARN_FMT
"Oops, already bound (%s <==> %s)!\n",
ioc->name, ioc->name, ioc->alt_ioc->name);
break;
} else if (ioc_srch->alt_ioc != NULL) {
printk(MYIOC_s_WARN_FMT
"Oops, already bound (%s <==> %s)!\n",
ioc_srch->name, ioc_srch->name,
ioc_srch->alt_ioc->name);
break;
}
dprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"FOUND! binding %s <==> %s\n",
ioc->name, ioc->name, ioc_srch->name));
ioc_srch->alt_ioc = ioc;
ioc->alt_ioc = ioc_srch;
}
}
pci_dev_put(peer);
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_adapter_disable - Disable misbehaving MPT adapter.
* @ioc: Pointer to MPT adapter structure
*/
static void
mpt_adapter_disable(MPT_ADAPTER *ioc)
{
int sz;
int ret;
if (ioc->cached_fw != NULL) {
ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"%s: Pushing FW onto adapter\n", __func__, ioc->name));
if ((ret = mpt_downloadboot(ioc, (MpiFwHeader_t *)
ioc->cached_fw, CAN_SLEEP)) < 0) {
printk(MYIOC_s_WARN_FMT
": firmware downloadboot failure (%d)!\n",
ioc->name, ret);
}
}
/*
* Put the controller into ready state (if its not already)
*/
if (mpt_GetIocState(ioc, 1) != MPI_IOC_STATE_READY) {
if (!SendIocReset(ioc, MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET,
CAN_SLEEP)) {
if (mpt_GetIocState(ioc, 1) != MPI_IOC_STATE_READY)
printk(MYIOC_s_ERR_FMT "%s: IOC msg unit "
"reset failed to put ioc in ready state!\n",
ioc->name, __func__);
} else
printk(MYIOC_s_ERR_FMT "%s: IOC msg unit reset "
"failed!\n", ioc->name, __func__);
}
/* Disable adapter interrupts! */
synchronize_irq(ioc->pcidev->irq);
CHIPREG_WRITE32(&ioc->chip->IntMask, 0xFFFFFFFF);
ioc->active = 0;
/* Clear any lingering interrupt */
CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
CHIPREG_READ32(&ioc->chip->IntStatus);
if (ioc->alloc != NULL) {
sz = ioc->alloc_sz;
dexitprintk(ioc, printk(MYIOC_s_INFO_FMT "free @ %p, sz=%d bytes\n",
ioc->name, ioc->alloc, ioc->alloc_sz));
dma_free_coherent(&ioc->pcidev->dev, sz, ioc->alloc,
ioc->alloc_dma);
ioc->reply_frames = NULL;
ioc->req_frames = NULL;
ioc->alloc = NULL;
ioc->alloc_total -= sz;
}
if (ioc->sense_buf_pool != NULL) {
sz = (ioc->req_depth * MPT_SENSE_BUFFER_ALLOC);
dma_free_coherent(&ioc->pcidev->dev, sz, ioc->sense_buf_pool,
ioc->sense_buf_pool_dma);
ioc->sense_buf_pool = NULL;
ioc->alloc_total -= sz;
}
if (ioc->events != NULL){
sz = MPTCTL_EVENT_LOG_SIZE * sizeof(MPT_IOCTL_EVENTS);
kfree(ioc->events);
ioc->events = NULL;
ioc->alloc_total -= sz;
}
mpt_free_fw_memory(ioc);
kfree(ioc->spi_data.nvram);
mpt_inactive_raid_list_free(ioc);
kfree(ioc->raid_data.pIocPg2);
kfree(ioc->raid_data.pIocPg3);
ioc->spi_data.nvram = NULL;
ioc->raid_data.pIocPg3 = NULL;
if (ioc->spi_data.pIocPg4 != NULL) {
sz = ioc->spi_data.IocPg4Sz;
dma_free_coherent(&ioc->pcidev->dev, sz,
ioc->spi_data.pIocPg4,
ioc->spi_data.IocPg4_dma);
ioc->spi_data.pIocPg4 = NULL;
ioc->alloc_total -= sz;
}
if (ioc->ReqToChain != NULL) {
kfree(ioc->ReqToChain);
kfree(ioc->RequestNB);
ioc->ReqToChain = NULL;
}
kfree(ioc->ChainToChain);
ioc->ChainToChain = NULL;
if (ioc->HostPageBuffer != NULL) {
if((ret = mpt_host_page_access_control(ioc,
MPI_DB_HPBAC_FREE_BUFFER, NO_SLEEP)) != 0) {
printk(MYIOC_s_ERR_FMT
": %s: host page buffers free failed (%d)!\n",
ioc->name, __func__, ret);
}
dexitprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"HostPageBuffer free @ %p, sz=%d bytes\n",
ioc->name, ioc->HostPageBuffer,
ioc->HostPageBuffer_sz));
dma_free_coherent(&ioc->pcidev->dev, ioc->HostPageBuffer_sz,
ioc->HostPageBuffer, ioc->HostPageBuffer_dma);
ioc->HostPageBuffer = NULL;
ioc->HostPageBuffer_sz = 0;
ioc->alloc_total -= ioc->HostPageBuffer_sz;
}
pci_set_drvdata(ioc->pcidev, NULL);
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_adapter_dispose - Free all resources associated with an MPT adapter
* @ioc: Pointer to MPT adapter structure
*
* This routine unregisters h/w resources and frees all alloc'd memory
* associated with a MPT adapter structure.
*/
static void
mpt_adapter_dispose(MPT_ADAPTER *ioc)
{
int sz_first, sz_last;
if (ioc == NULL)
return;
sz_first = ioc->alloc_total;
mpt_adapter_disable(ioc);
if (ioc->pci_irq != -1) {
free_irq(ioc->pci_irq, ioc);
if (ioc->msi_enable)
pci_disable_msi(ioc->pcidev);
ioc->pci_irq = -1;
}
if (ioc->memmap != NULL) {
iounmap(ioc->memmap);
ioc->memmap = NULL;
}
pci_disable_device(ioc->pcidev);
pci_release_selected_regions(ioc->pcidev, ioc->bars);
/* Zap the adapter lookup ptr! */
list_del(&ioc->list);
sz_last = ioc->alloc_total;
dprintk(ioc, printk(MYIOC_s_INFO_FMT "free'd %d of %d bytes\n",
ioc->name, sz_first-sz_last+(int)sizeof(*ioc), sz_first));
if (ioc->alt_ioc)
ioc->alt_ioc->alt_ioc = NULL;
kfree(ioc);
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* MptDisplayIocCapabilities - Disply IOC's capabilities.
* @ioc: Pointer to MPT adapter structure
*/
static void
MptDisplayIocCapabilities(MPT_ADAPTER *ioc)
{
int i = 0;
printk(KERN_INFO "%s: ", ioc->name);
if (ioc->prod_name)
pr_cont("%s: ", ioc->prod_name);
pr_cont("Capabilities={");
if (ioc->pfacts[0].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_INITIATOR) {
pr_cont("Initiator");
i++;
}
if (ioc->pfacts[0].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_TARGET) {
pr_cont("%sTarget", i ? "," : "");
i++;
}
if (ioc->pfacts[0].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_LAN) {
pr_cont("%sLAN", i ? "," : "");
i++;
}
#if 0
/*
* This would probably evoke more questions than it's worth
*/
if (ioc->pfacts[0].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_TARGET) {
pr_cont("%sLogBusAddr", i ? "," : "");
i++;
}
#endif
pr_cont("}\n");
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* MakeIocReady - Get IOC to a READY state, using KickStart if needed.
* @ioc: Pointer to MPT_ADAPTER structure
* @force: Force hard KickStart of IOC
* @sleepFlag: Specifies whether the process can sleep
*
* Returns:
* 1 - DIAG reset and READY
* 0 - READY initially OR soft reset and READY
* -1 - Any failure on KickStart
* -2 - Msg Unit Reset Failed
* -3 - IO Unit Reset Failed
* -4 - IOC owned by a PEER
*/
static int
MakeIocReady(MPT_ADAPTER *ioc, int force, int sleepFlag)
{
u32 ioc_state;
int statefault = 0;
int cntdn;
int hard_reset_done = 0;
int r;
int ii;
int whoinit;
/* Get current [raw] IOC state */
ioc_state = mpt_GetIocState(ioc, 0);
dhsprintk(ioc, printk(MYIOC_s_INFO_FMT "MakeIocReady [raw] state=%08x\n", ioc->name, ioc_state));
/*
* Check to see if IOC got left/stuck in doorbell handshake
* grip of death. If so, hard reset the IOC.
*/
if (ioc_state & MPI_DOORBELL_ACTIVE) {
statefault = 1;
printk(MYIOC_s_WARN_FMT "Unexpected doorbell active!\n",
ioc->name);
}
/* Is it already READY? */
if (!statefault &&
((ioc_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_READY)) {
dinitprintk(ioc, printk(MYIOC_s_INFO_FMT
"IOC is in READY state\n", ioc->name));
return 0;
}
/*
* Check to see if IOC is in FAULT state.
*/
if ((ioc_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_FAULT) {
statefault = 2;
printk(MYIOC_s_WARN_FMT "IOC is in FAULT state!!!\n",
ioc->name);
printk(MYIOC_s_WARN_FMT " FAULT code = %04xh\n",
ioc->name, ioc_state & MPI_DOORBELL_DATA_MASK);
}
/*
* Hmmm... Did it get left operational?
*/
if ((ioc_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_OPERATIONAL) {
dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "IOC operational unexpected\n",
ioc->name));
/* Check WhoInit.
* If PCI Peer, exit.
* Else, if no fault conditions are present, issue a MessageUnitReset
* Else, fall through to KickStart case
*/
whoinit = (ioc_state & MPI_DOORBELL_WHO_INIT_MASK) >> MPI_DOORBELL_WHO_INIT_SHIFT;
dinitprintk(ioc, printk(MYIOC_s_INFO_FMT
"whoinit 0x%x statefault %d force %d\n",
ioc->name, whoinit, statefault, force));
if (whoinit == MPI_WHOINIT_PCI_PEER)
return -4;
else {
if ((statefault == 0 ) && (force == 0)) {
if ((r = SendIocReset(ioc, MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET, sleepFlag)) == 0)
return 0;
}
statefault = 3;
}
}
hard_reset_done = KickStart(ioc, statefault||force, sleepFlag);
if (hard_reset_done < 0)
return -1;
/*
* Loop here waiting for IOC to come READY.
*/
ii = 0;
cntdn = ((sleepFlag == CAN_SLEEP) ? HZ : 1000) * 5; /* 5 seconds */
while ((ioc_state = mpt_GetIocState(ioc, 1)) != MPI_IOC_STATE_READY) {
if (ioc_state == MPI_IOC_STATE_OPERATIONAL) {
/*
* BIOS or previous driver load left IOC in OP state.
* Reset messaging FIFOs.
*/
if ((r = SendIocReset(ioc, MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET, sleepFlag)) != 0) {
printk(MYIOC_s_ERR_FMT "IOC msg unit reset failed!\n", ioc->name);
return -2;
}
} else if (ioc_state == MPI_IOC_STATE_RESET) {
/*
* Something is wrong. Try to get IOC back
* to a known state.
*/
if ((r = SendIocReset(ioc, MPI_FUNCTION_IO_UNIT_RESET, sleepFlag)) != 0) {
printk(MYIOC_s_ERR_FMT "IO unit reset failed!\n", ioc->name);
return -3;
}
}
ii++; cntdn--;
if (!cntdn) {
printk(MYIOC_s_ERR_FMT
"Wait IOC_READY state (0x%x) timeout(%d)!\n",
ioc->name, ioc_state, (int)((ii+5)/HZ));
return -ETIME;
}
if (sleepFlag == CAN_SLEEP) {
msleep(1);
} else {
mdelay (1); /* 1 msec delay */
}
}
if (statefault < 3) {
printk(MYIOC_s_INFO_FMT "Recovered from %s\n", ioc->name,
statefault == 1 ? "stuck handshake" : "IOC FAULT");
}
return hard_reset_done;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_GetIocState - Get the current state of a MPT adapter.
* @ioc: Pointer to MPT_ADAPTER structure
* @cooked: Request raw or cooked IOC state
*
* Returns all IOC Doorbell register bits if cooked==0, else just the
* Doorbell bits in MPI_IOC_STATE_MASK.
*/
u32
mpt_GetIocState(MPT_ADAPTER *ioc, int cooked)
{
u32 s, sc;
/* Get! */
s = CHIPREG_READ32(&ioc->chip->Doorbell);
sc = s & MPI_IOC_STATE_MASK;
/* Save! */
ioc->last_state = sc;
return cooked ? sc : s;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* GetIocFacts - Send IOCFacts request to MPT adapter.
* @ioc: Pointer to MPT_ADAPTER structure
* @sleepFlag: Specifies whether the process can sleep
* @reason: If recovery, only update facts.
*
* Returns 0 for success, non-zero for failure.
*/
static int
GetIocFacts(MPT_ADAPTER *ioc, int sleepFlag, int reason)
{
IOCFacts_t get_facts;
IOCFactsReply_t *facts;
int r;
int req_sz;
int reply_sz;
int sz;
u32 vv;
u8 shiftFactor=1;
/* IOC *must* NOT be in RESET state! */
if (ioc->last_state == MPI_IOC_STATE_RESET) {
printk(KERN_ERR MYNAM
": ERROR - Can't get IOCFacts, %s NOT READY! (%08x)\n",
ioc->name, ioc->last_state);
return -44;
}
facts = &ioc->facts;
/* Destination (reply area)... */
reply_sz = sizeof(*facts);
memset(facts, 0, reply_sz);
/* Request area (get_facts on the stack right now!) */
req_sz = sizeof(get_facts);
memset(&get_facts, 0, req_sz);
get_facts.Function = MPI_FUNCTION_IOC_FACTS;
/* Assert: All other get_facts fields are zero! */
dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"Sending get IocFacts request req_sz=%d reply_sz=%d\n",
ioc->name, req_sz, reply_sz));
/* No non-zero fields in the get_facts request are greater than
* 1 byte in size, so we can just fire it off as is.
*/
r = mpt_handshake_req_reply_wait(ioc, req_sz, (u32*)&get_facts,
reply_sz, (u16*)facts, 5 /*seconds*/, sleepFlag);
if (r != 0)
return r;
/*
* Now byte swap (GRRR) the necessary fields before any further
* inspection of reply contents.
*
* But need to do some sanity checks on MsgLength (byte) field
* to make sure we don't zero IOC's req_sz!
*/
/* Did we get a valid reply? */
if (facts->MsgLength > offsetof(IOCFactsReply_t, RequestFrameSize)/sizeof(u32)) {
if (reason == MPT_HOSTEVENT_IOC_BRINGUP) {
/*
* If not been here, done that, save off first WhoInit value
*/
if (ioc->FirstWhoInit == WHOINIT_UNKNOWN)
ioc->FirstWhoInit = facts->WhoInit;
}
facts->MsgVersion = le16_to_cpu(facts->MsgVersion);
facts->MsgContext = le32_to_cpu(facts->MsgContext);
facts->IOCExceptions = le16_to_cpu(facts->IOCExceptions);
facts->IOCStatus = le16_to_cpu(facts->IOCStatus);
facts->IOCLogInfo = le32_to_cpu(facts->IOCLogInfo);
/* CHECKME! IOCStatus, IOCLogInfo */
facts->ReplyQueueDepth = le16_to_cpu(facts->ReplyQueueDepth);
facts->RequestFrameSize = le16_to_cpu(facts->RequestFrameSize);
/*
* FC f/w version changed between 1.1 and 1.2
* Old: u16{Major(4),Minor(4),SubMinor(8)}
* New: u32{Major(8),Minor(8),Unit(8),Dev(8)}
*/
if (facts->MsgVersion < MPI_VERSION_01_02) {
/*
* Handle old FC f/w style, convert to new...
*/
u16 oldv = le16_to_cpu(facts->Reserved_0101_FWVersion);
facts->FWVersion.Word =
((oldv<<12) & 0xFF000000) |
((oldv<<8) & 0x000FFF00);
} else
facts->FWVersion.Word = le32_to_cpu(facts->FWVersion.Word);
facts->ProductID = le16_to_cpu(facts->ProductID);
if ((ioc->facts.ProductID & MPI_FW_HEADER_PID_PROD_MASK)
> MPI_FW_HEADER_PID_PROD_TARGET_SCSI)
ioc->ir_firmware = 1;
facts->CurrentHostMfaHighAddr =
le32_to_cpu(facts->CurrentHostMfaHighAddr);
facts->GlobalCredits = le16_to_cpu(facts->GlobalCredits);
facts->CurrentSenseBufferHighAddr =
le32_to_cpu(facts->CurrentSenseBufferHighAddr);
facts->CurReplyFrameSize =
le16_to_cpu(facts->CurReplyFrameSize);
facts->IOCCapabilities = le32_to_cpu(facts->IOCCapabilities);
/*
* Handle NEW (!) IOCFactsReply fields in MPI-1.01.xx
* Older MPI-1.00.xx struct had 13 dwords, and enlarged
* to 14 in MPI-1.01.0x.
*/
if (facts->MsgLength >= (offsetof(IOCFactsReply_t,FWImageSize) + 7)/4 &&
facts->MsgVersion > MPI_VERSION_01_00) {
facts->FWImageSize = le32_to_cpu(facts->FWImageSize);
}
facts->FWImageSize = ALIGN(facts->FWImageSize, 4);
if (!facts->RequestFrameSize) {
/* Something is wrong! */
printk(MYIOC_s_ERR_FMT "IOC reported invalid 0 request size!\n",
ioc->name);
return -55;
}
r = sz = facts->BlockSize;
vv = ((63 / (sz * 4)) + 1) & 0x03;
ioc->NB_for_64_byte_frame = vv;
while ( sz )
{
shiftFactor++;
sz = sz >> 1;
}
ioc->NBShiftFactor = shiftFactor;
dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"NB_for_64_byte_frame=%x NBShiftFactor=%x BlockSize=%x\n",
ioc->name, vv, shiftFactor, r));
if (reason == MPT_HOSTEVENT_IOC_BRINGUP) {
/*
* Set values for this IOC's request & reply frame sizes,
* and request & reply queue depths...
*/
ioc->req_sz = min(MPT_DEFAULT_FRAME_SIZE, facts->RequestFrameSize * 4);
ioc->req_depth = min_t(int, MPT_MAX_REQ_DEPTH, facts->GlobalCredits);
ioc->reply_sz = MPT_REPLY_FRAME_SIZE;
ioc->reply_depth = min_t(int, MPT_DEFAULT_REPLY_DEPTH, facts->ReplyQueueDepth);
dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "reply_sz=%3d, reply_depth=%4d\n",
ioc->name, ioc->reply_sz, ioc->reply_depth));
dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "req_sz =%3d, req_depth =%4d\n",
ioc->name, ioc->req_sz, ioc->req_depth));
/* Get port facts! */
if ( (r = GetPortFacts(ioc, 0, sleepFlag)) != 0 )
return r;
}
} else {
printk(MYIOC_s_ERR_FMT
"Invalid IOC facts reply, msgLength=%d offsetof=%zd!\n",
ioc->name, facts->MsgLength, (offsetof(IOCFactsReply_t,
RequestFrameSize)/sizeof(u32)));
return -66;
}
return 0;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* GetPortFacts - Send PortFacts request to MPT adapter.
* @ioc: Pointer to MPT_ADAPTER structure
* @portnum: Port number
* @sleepFlag: Specifies whether the process can sleep
*
* Returns 0 for success, non-zero for failure.
*/
static int
GetPortFacts(MPT_ADAPTER *ioc, int portnum, int sleepFlag)
{
PortFacts_t get_pfacts;
PortFactsReply_t *pfacts;
int ii;
int req_sz;
int reply_sz;
int max_id;
/* IOC *must* NOT be in RESET state! */
if (ioc->last_state == MPI_IOC_STATE_RESET) {
printk(MYIOC_s_ERR_FMT "Can't get PortFacts NOT READY! (%08x)\n",
ioc->name, ioc->last_state );
return -4;
}
pfacts = &ioc->pfacts[portnum];
/* Destination (reply area)... */
reply_sz = sizeof(*pfacts);
memset(pfacts, 0, reply_sz);
/* Request area (get_pfacts on the stack right now!) */
req_sz = sizeof(get_pfacts);
memset(&get_pfacts, 0, req_sz);
get_pfacts.Function = MPI_FUNCTION_PORT_FACTS;
get_pfacts.PortNumber = portnum;
/* Assert: All other get_pfacts fields are zero! */
dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Sending get PortFacts(%d) request\n",
ioc->name, portnum));
/* No non-zero fields in the get_pfacts request are greater than
* 1 byte in size, so we can just fire it off as is.
*/
ii = mpt_handshake_req_reply_wait(ioc, req_sz, (u32*)&get_pfacts,
reply_sz, (u16*)pfacts, 5 /*seconds*/, sleepFlag);
if (ii != 0)
return ii;
/* Did we get a valid reply? */
/* Now byte swap the necessary fields in the response. */
pfacts->MsgContext = le32_to_cpu(pfacts->MsgContext);
pfacts->IOCStatus = le16_to_cpu(pfacts->IOCStatus);
pfacts->IOCLogInfo = le32_to_cpu(pfacts->IOCLogInfo);
pfacts->MaxDevices = le16_to_cpu(pfacts->MaxDevices);
pfacts->PortSCSIID = le16_to_cpu(pfacts->PortSCSIID);
pfacts->ProtocolFlags = le16_to_cpu(pfacts->ProtocolFlags);
pfacts->MaxPostedCmdBuffers = le16_to_cpu(pfacts->MaxPostedCmdBuffers);
pfacts->MaxPersistentIDs = le16_to_cpu(pfacts->MaxPersistentIDs);
pfacts->MaxLanBuckets = le16_to_cpu(pfacts->MaxLanBuckets);
max_id = (ioc->bus_type == SAS) ? pfacts->PortSCSIID :
pfacts->MaxDevices;
ioc->devices_per_bus = (max_id > 255) ? 256 : max_id;
ioc->number_of_buses = (ioc->devices_per_bus < 256) ? 1 : max_id/256;
/*
* Place all the devices on channels
*
* (for debuging)
*/
if (mpt_channel_mapping) {
ioc->devices_per_bus = 1;
ioc->number_of_buses = (max_id > 255) ? 255 : max_id;
}
return 0;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* SendIocInit - Send IOCInit request to MPT adapter.
* @ioc: Pointer to MPT_ADAPTER structure
* @sleepFlag: Specifies whether the process can sleep
*
* Send IOCInit followed by PortEnable to bring IOC to OPERATIONAL state.
*
* Returns 0 for success, non-zero for failure.
*/
static int
SendIocInit(MPT_ADAPTER *ioc, int sleepFlag)
{
IOCInit_t ioc_init;
MPIDefaultReply_t init_reply;
u32 state;
int r;
int count;
int cntdn;
memset(&ioc_init, 0, sizeof(ioc_init));
memset(&init_reply, 0, sizeof(init_reply));
ioc_init.WhoInit = MPI_WHOINIT_HOST_DRIVER;
ioc_init.Function = MPI_FUNCTION_IOC_INIT;
/* If we are in a recovery mode and we uploaded the FW image,
* then this pointer is not NULL. Skip the upload a second time.
* Set this flag if cached_fw set for either IOC.
*/
if (ioc->facts.Flags & MPI_IOCFACTS_FLAGS_FW_DOWNLOAD_BOOT)
ioc->upload_fw = 1;
else
ioc->upload_fw = 0;
ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "upload_fw %d facts.Flags=%x\n",
ioc->name, ioc->upload_fw, ioc->facts.Flags));
ioc_init.MaxDevices = (U8)ioc->devices_per_bus;
ioc_init.MaxBuses = (U8)ioc->number_of_buses;
dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "facts.MsgVersion=%x\n",
ioc->name, ioc->facts.MsgVersion));
if (ioc->facts.MsgVersion >= MPI_VERSION_01_05) {
// set MsgVersion and HeaderVersion host driver was built with
ioc_init.MsgVersion = cpu_to_le16(MPI_VERSION);
ioc_init.HeaderVersion = cpu_to_le16(MPI_HEADER_VERSION);
if (ioc->facts.Flags & MPI_IOCFACTS_FLAGS_HOST_PAGE_BUFFER_PERSISTENT) {
ioc_init.HostPageBufferSGE = ioc->facts.HostPageBufferSGE;
} else if(mpt_host_page_alloc(ioc, &ioc_init))
return -99;
}
ioc_init.ReplyFrameSize = cpu_to_le16(ioc->reply_sz); /* in BYTES */
if (ioc->sg_addr_size == sizeof(u64)) {
/* Save the upper 32-bits of the request
* (reply) and sense buffers.
*/
ioc_init.HostMfaHighAddr = cpu_to_le32((u32)((u64)ioc->alloc_dma >> 32));
ioc_init.SenseBufferHighAddr = cpu_to_le32((u32)((u64)ioc->sense_buf_pool_dma >> 32));
} else {
/* Force 32-bit addressing */
ioc_init.HostMfaHighAddr = cpu_to_le32(0);
ioc_init.SenseBufferHighAddr = cpu_to_le32(0);
}
ioc->facts.CurrentHostMfaHighAddr = ioc_init.HostMfaHighAddr;
ioc->facts.CurrentSenseBufferHighAddr = ioc_init.SenseBufferHighAddr;
ioc->facts.MaxDevices = ioc_init.MaxDevices;
ioc->facts.MaxBuses = ioc_init.MaxBuses;
dhsprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Sending IOCInit (req @ %p)\n",
ioc->name, &ioc_init));
r = mpt_handshake_req_reply_wait(ioc, sizeof(IOCInit_t), (u32*)&ioc_init,
sizeof(MPIDefaultReply_t), (u16*)&init_reply, 10 /*seconds*/, sleepFlag);
if (r != 0) {
printk(MYIOC_s_ERR_FMT "Sending IOCInit failed(%d)!\n",ioc->name, r);
return r;
}
/* No need to byte swap the multibyte fields in the reply
* since we don't even look at its contents.
*/
dhsprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Sending PortEnable (req @ %p)\n",
ioc->name, &ioc_init));
if ((r = SendPortEnable(ioc, 0, sleepFlag)) != 0) {
printk(MYIOC_s_ERR_FMT "Sending PortEnable failed(%d)!\n",ioc->name, r);
return r;
}
/* YIKES! SUPER IMPORTANT!!!
* Poll IocState until _OPERATIONAL while IOC is doing
* LoopInit and TargetDiscovery!
*/
count = 0;
cntdn = ((sleepFlag == CAN_SLEEP) ? HZ : 1000) * 60; /* 60 seconds */
state = mpt_GetIocState(ioc, 1);
while (state != MPI_IOC_STATE_OPERATIONAL && --cntdn) {
if (sleepFlag == CAN_SLEEP) {
msleep(1);
} else {
mdelay(1);
}
if (!cntdn) {
printk(MYIOC_s_ERR_FMT "Wait IOC_OP state timeout(%d)!\n",
ioc->name, (int)((count+5)/HZ));
return -9;
}
state = mpt_GetIocState(ioc, 1);
count++;
}
dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Wait IOC_OPERATIONAL state (cnt=%d)\n",
ioc->name, count));
ioc->aen_event_read_flag=0;
return r;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* SendPortEnable - Send PortEnable request to MPT adapter port.
* @ioc: Pointer to MPT_ADAPTER structure
* @portnum: Port number to enable
* @sleepFlag: Specifies whether the process can sleep
*
* Send PortEnable to bring IOC to OPERATIONAL state.
*
* Returns 0 for success, non-zero for failure.
*/
static int
SendPortEnable(MPT_ADAPTER *ioc, int portnum, int sleepFlag)
{
PortEnable_t port_enable;
MPIDefaultReply_t reply_buf;
int rc;
int req_sz;
int reply_sz;
/* Destination... */
reply_sz = sizeof(MPIDefaultReply_t);
memset(&reply_buf, 0, reply_sz);
req_sz = sizeof(PortEnable_t);
memset(&port_enable, 0, req_sz);
port_enable.Function = MPI_FUNCTION_PORT_ENABLE;
port_enable.PortNumber = portnum;
/* port_enable.ChainOffset = 0; */
/* port_enable.MsgFlags = 0; */
/* port_enable.MsgContext = 0; */
dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Sending Port(%d)Enable (req @ %p)\n",
ioc->name, portnum, &port_enable));
/* RAID FW may take a long time to enable
*/
if (ioc->ir_firmware || ioc->bus_type == SAS) {
rc = mpt_handshake_req_reply_wait(ioc, req_sz,
(u32*)&port_enable, reply_sz, (u16*)&reply_buf,
300 /*seconds*/, sleepFlag);
} else {
rc = mpt_handshake_req_reply_wait(ioc, req_sz,
(u32*)&port_enable, reply_sz, (u16*)&reply_buf,
30 /*seconds*/, sleepFlag);
}
return rc;
}
/**
* mpt_alloc_fw_memory - allocate firmware memory
* @ioc: Pointer to MPT_ADAPTER structure
* @size: total FW bytes
*
* If memory has already been allocated, the same (cached) value
* is returned.
*
* Return 0 if successful, or non-zero for failure
**/
int
mpt_alloc_fw_memory(MPT_ADAPTER *ioc, int size)
{
int rc;
if (ioc->cached_fw) {
rc = 0; /* use already allocated memory */
goto out;
}
else if (ioc->alt_ioc && ioc->alt_ioc->cached_fw) {
ioc->cached_fw = ioc->alt_ioc->cached_fw; /* use alt_ioc's memory */
ioc->cached_fw_dma = ioc->alt_ioc->cached_fw_dma;
rc = 0;
goto out;
}
ioc->cached_fw = dma_alloc_coherent(&ioc->pcidev->dev, size,
&ioc->cached_fw_dma, GFP_ATOMIC);
if (!ioc->cached_fw) {
printk(MYIOC_s_ERR_FMT "Unable to allocate memory for the cached firmware image!\n",
ioc->name);
rc = -1;
} else {
dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "FW Image @ %p[%p], sz=%d[%x] bytes\n",
ioc->name, ioc->cached_fw, (void *)(ulong)ioc->cached_fw_dma, size, size));
ioc->alloc_total += size;
rc = 0;
}
out:
return rc;
}
/**
* mpt_free_fw_memory - free firmware memory
* @ioc: Pointer to MPT_ADAPTER structure
*
* If alt_img is NULL, delete from ioc structure.
* Else, delete a secondary image in same format.
**/
void
mpt_free_fw_memory(MPT_ADAPTER *ioc)
{
int sz;
if (!ioc->cached_fw)
return;
sz = ioc->facts.FWImageSize;
dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "free_fw_memory: FW Image @ %p[%p], sz=%d[%x] bytes\n",
ioc->name, ioc->cached_fw, (void *)(ulong)ioc->cached_fw_dma, sz, sz));
dma_free_coherent(&ioc->pcidev->dev, sz, ioc->cached_fw,
ioc->cached_fw_dma);
ioc->alloc_total -= sz;
ioc->cached_fw = NULL;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_do_upload - Construct and Send FWUpload request to MPT adapter port.
* @ioc: Pointer to MPT_ADAPTER structure
* @sleepFlag: Specifies whether the process can sleep
*
* Returns 0 for success, >0 for handshake failure
* <0 for fw upload failure.
*
* Remark: If bound IOC and a successful FWUpload was performed
* on the bound IOC, the second image is discarded
* and memory is free'd. Both channels must upload to prevent
* IOC from running in degraded mode.
*/
static int
mpt_do_upload(MPT_ADAPTER *ioc, int sleepFlag)
{
u8 reply[sizeof(FWUploadReply_t)];
FWUpload_t *prequest;
FWUploadReply_t *preply;
FWUploadTCSGE_t *ptcsge;
u32 flagsLength;
int ii, sz, reply_sz;
int cmdStatus;
int request_size;
/* If the image size is 0, we are done.
*/
if ((sz = ioc->facts.FWImageSize) == 0)
return 0;
if (mpt_alloc_fw_memory(ioc, ioc->facts.FWImageSize) != 0)
return -ENOMEM;
dinitprintk(ioc, printk(MYIOC_s_INFO_FMT ": FW Image @ %p[%p], sz=%d[%x] bytes\n",
ioc->name, ioc->cached_fw, (void *)(ulong)ioc->cached_fw_dma, sz, sz));
prequest = (sleepFlag == NO_SLEEP) ? kzalloc(ioc->req_sz, GFP_ATOMIC) :
kzalloc(ioc->req_sz, GFP_KERNEL);
if (!prequest) {
dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "fw upload failed "
"while allocating memory \n", ioc->name));
mpt_free_fw_memory(ioc);
return -ENOMEM;
}
preply = (FWUploadReply_t *)&reply;
reply_sz = sizeof(reply);
memset(preply, 0, reply_sz);
prequest->ImageType = MPI_FW_UPLOAD_ITYPE_FW_IOC_MEM;
prequest->Function = MPI_FUNCTION_FW_UPLOAD;
ptcsge = (FWUploadTCSGE_t *) &prequest->SGL;
ptcsge->DetailsLength = 12;
ptcsge->Flags = MPI_SGE_FLAGS_TRANSACTION_ELEMENT;
ptcsge->ImageSize = cpu_to_le32(sz);
ptcsge++;
flagsLength = MPT_SGE_FLAGS_SSIMPLE_READ | sz;
ioc->add_sge((char *)ptcsge, flagsLength, ioc->cached_fw_dma);
request_size = offsetof(FWUpload_t, SGL) + sizeof(FWUploadTCSGE_t) +
ioc->SGE_size;
dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Sending FW Upload "
" (req @ %p) fw_size=%d mf_request_size=%d\n", ioc->name, prequest,
ioc->facts.FWImageSize, request_size));
DBG_DUMP_FW_REQUEST_FRAME(ioc, (u32 *)prequest);
ii = mpt_handshake_req_reply_wait(ioc, request_size, (u32 *)prequest,
reply_sz, (u16 *)preply, 65 /*seconds*/, sleepFlag);
dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "FW Upload completed "
"rc=%x \n", ioc->name, ii));
cmdStatus = -EFAULT;
if (ii == 0) {
/* Handshake transfer was complete and successful.
* Check the Reply Frame.
*/
int status;
status = le16_to_cpu(preply->IOCStatus) &
MPI_IOCSTATUS_MASK;
if (status == MPI_IOCSTATUS_SUCCESS &&
ioc->facts.FWImageSize ==
le32_to_cpu(preply->ActualImageSize))
cmdStatus = 0;
}
dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT ": do_upload cmdStatus=%d \n",
ioc->name, cmdStatus));
if (cmdStatus) {
ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "fw upload failed, "
"freeing image \n", ioc->name));
mpt_free_fw_memory(ioc);
}
kfree(prequest);
return cmdStatus;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_downloadboot - DownloadBoot code
* @ioc: Pointer to MPT_ADAPTER structure
* @pFwHeader: Pointer to firmware header info
* @sleepFlag: Specifies whether the process can sleep
*
* FwDownloadBoot requires Programmed IO access.
*
* Returns 0 for success
* -1 FW Image size is 0
* -2 No valid cached_fw Pointer
* <0 for fw upload failure.
*/
static int
mpt_downloadboot(MPT_ADAPTER *ioc, MpiFwHeader_t *pFwHeader, int sleepFlag)
{
MpiExtImageHeader_t *pExtImage;
u32 fwSize;
u32 diag0val;
int count;
u32 *ptrFw;
u32 diagRwData;
u32 nextImage;
u32 load_addr;
u32 ioc_state=0;
ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "downloadboot: fw size 0x%x (%d), FW Ptr %p\n",
ioc->name, pFwHeader->ImageSize, pFwHeader->ImageSize, pFwHeader));
CHIPREG_WRITE32(&ioc->chip->WriteSequence, 0xFF);
CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_1ST_KEY_VALUE);
CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_2ND_KEY_VALUE);
CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_3RD_KEY_VALUE);
CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_4TH_KEY_VALUE);
CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_5TH_KEY_VALUE);
CHIPREG_WRITE32(&ioc->chip->Diagnostic, (MPI_DIAG_PREVENT_IOC_BOOT | MPI_DIAG_DISABLE_ARM));
/* wait 1 msec */
if (sleepFlag == CAN_SLEEP) {
msleep(1);
} else {
mdelay (1);
}
diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic);
CHIPREG_WRITE32(&ioc->chip->Diagnostic, diag0val | MPI_DIAG_RESET_ADAPTER);
for (count = 0; count < 30; count ++) {
diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic);
if (!(diag0val & MPI_DIAG_RESET_ADAPTER)) {
ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "RESET_ADAPTER cleared, count=%d\n",
ioc->name, count));
break;
}
/* wait .1 sec */
if (sleepFlag == CAN_SLEEP) {
msleep (100);
} else {
mdelay (100);
}
}
if ( count == 30 ) {
ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "downloadboot failed! "
"Unable to get MPI_DIAG_DRWE mode, diag0val=%x\n",
ioc->name, diag0val));
return -3;
}
CHIPREG_WRITE32(&ioc->chip->WriteSequence, 0xFF);
CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_1ST_KEY_VALUE);
CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_2ND_KEY_VALUE);
CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_3RD_KEY_VALUE);
CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_4TH_KEY_VALUE);
CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_5TH_KEY_VALUE);
/* Set the DiagRwEn and Disable ARM bits */
CHIPREG_WRITE32(&ioc->chip->Diagnostic, (MPI_DIAG_RW_ENABLE | MPI_DIAG_DISABLE_ARM));
fwSize = (pFwHeader->ImageSize + 3)/4;
ptrFw = (u32 *) pFwHeader;
/* Write the LoadStartAddress to the DiagRw Address Register
* using Programmed IO
*/
if (ioc->errata_flag_1064)
pci_enable_io_access(ioc->pcidev);
CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwAddress, pFwHeader->LoadStartAddress);
ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "LoadStart addr written 0x%x \n",
ioc->name, pFwHeader->LoadStartAddress));
ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Write FW Image: 0x%x bytes @ %p\n",
ioc->name, fwSize*4, ptrFw));
while (fwSize--) {
CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwData, *ptrFw++);
}
nextImage = pFwHeader->NextImageHeaderOffset;
while (nextImage) {
pExtImage = (MpiExtImageHeader_t *) ((char *)pFwHeader + nextImage);
load_addr = pExtImage->LoadStartAddress;
fwSize = (pExtImage->ImageSize + 3) >> 2;
ptrFw = (u32 *)pExtImage;
ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Write Ext Image: 0x%x (%d) bytes @ %p load_addr=%x\n",
ioc->name, fwSize*4, fwSize*4, ptrFw, load_addr));
CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwAddress, load_addr);
while (fwSize--) {
CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwData, *ptrFw++);
}
nextImage = pExtImage->NextImageHeaderOffset;
}
/* Write the IopResetVectorRegAddr */
ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Write IopResetVector Addr=%x! \n", ioc->name, pFwHeader->IopResetRegAddr));
CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwAddress, pFwHeader->IopResetRegAddr);
/* Write the IopResetVectorValue */
ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Write IopResetVector Value=%x! \n", ioc->name, pFwHeader->IopResetVectorValue));
CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwData, pFwHeader->IopResetVectorValue);
/* Clear the internal flash bad bit - autoincrementing register,
* so must do two writes.
*/
if (ioc->bus_type == SPI) {
/*
* 1030 and 1035 H/W errata, workaround to access
* the ClearFlashBadSignatureBit
*/
CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwAddress, 0x3F000000);
diagRwData = CHIPREG_PIO_READ32(&ioc->pio_chip->DiagRwData);
diagRwData |= 0x40000000;
CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwAddress, 0x3F000000);
CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwData, diagRwData);
} else /* if((ioc->bus_type == SAS) || (ioc->bus_type == FC)) */ {
diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic);
CHIPREG_WRITE32(&ioc->chip->Diagnostic, diag0val |
MPI_DIAG_CLEAR_FLASH_BAD_SIG);
/* wait 1 msec */
if (sleepFlag == CAN_SLEEP) {
msleep (1);
} else {
mdelay (1);
}
}
if (ioc->errata_flag_1064)
pci_disable_io_access(ioc->pcidev);
diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic);
ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "downloadboot diag0val=%x, "
"turning off PREVENT_IOC_BOOT, DISABLE_ARM, RW_ENABLE\n",
ioc->name, diag0val));
diag0val &= ~(MPI_DIAG_PREVENT_IOC_BOOT | MPI_DIAG_DISABLE_ARM | MPI_DIAG_RW_ENABLE);
ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "downloadboot now diag0val=%x\n",
ioc->name, diag0val));
CHIPREG_WRITE32(&ioc->chip->Diagnostic, diag0val);
/* Write 0xFF to reset the sequencer */
CHIPREG_WRITE32(&ioc->chip->WriteSequence, 0xFF);
if (ioc->bus_type == SAS) {
ioc_state = mpt_GetIocState(ioc, 0);
if ( (GetIocFacts(ioc, sleepFlag,
MPT_HOSTEVENT_IOC_BRINGUP)) != 0 ) {
ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "GetIocFacts failed: IocState=%x\n",
ioc->name, ioc_state));
return -EFAULT;
}
}
for (count=0; count<HZ*20; count++) {
if ((ioc_state = mpt_GetIocState(ioc, 0)) & MPI_IOC_STATE_READY) {
ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"downloadboot successful! (count=%d) IocState=%x\n",
ioc->name, count, ioc_state));
if (ioc->bus_type == SAS) {
return 0;
}
if ((SendIocInit(ioc, sleepFlag)) != 0) {
ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"downloadboot: SendIocInit failed\n",
ioc->name));
return -EFAULT;
}
ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"downloadboot: SendIocInit successful\n",
ioc->name));
return 0;
}
if (sleepFlag == CAN_SLEEP) {
msleep (10);
} else {
mdelay (10);
}
}
ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"downloadboot failed! IocState=%x\n",ioc->name, ioc_state));
return -EFAULT;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* KickStart - Perform hard reset of MPT adapter.
* @ioc: Pointer to MPT_ADAPTER structure
* @force: Force hard reset
* @sleepFlag: Specifies whether the process can sleep
*
* This routine places MPT adapter in diagnostic mode via the
* WriteSequence register, and then performs a hard reset of adapter
* via the Diagnostic register.
*
* Inputs: sleepflag - CAN_SLEEP (non-interrupt thread)
* or NO_SLEEP (interrupt thread, use mdelay)
* force - 1 if doorbell active, board fault state
* board operational, IOC_RECOVERY or
* IOC_BRINGUP and there is an alt_ioc.
* 0 else
*
* Returns:
* 1 - hard reset, READY
* 0 - no reset due to History bit, READY
* -1 - no reset due to History bit but not READY
* OR reset but failed to come READY
* -2 - no reset, could not enter DIAG mode
* -3 - reset but bad FW bit
*/
static int
KickStart(MPT_ADAPTER *ioc, int force, int sleepFlag)
{
int hard_reset_done = 0;
u32 ioc_state=0;
int cnt,cntdn;
dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "KickStarting!\n", ioc->name));
if (ioc->bus_type == SPI) {
/* Always issue a Msg Unit Reset first. This will clear some
* SCSI bus hang conditions.
*/
SendIocReset(ioc, MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET, sleepFlag);
if (sleepFlag == CAN_SLEEP) {
msleep (1000);
} else {
mdelay (1000);
}
}
hard_reset_done = mpt_diag_reset(ioc, force, sleepFlag);
if (hard_reset_done < 0)
return hard_reset_done;
dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Diagnostic reset successful!\n",
ioc->name));
cntdn = ((sleepFlag == CAN_SLEEP) ? HZ : 1000) * 2; /* 2 seconds */
for (cnt=0; cnt<cntdn; cnt++) {
ioc_state = mpt_GetIocState(ioc, 1);
if ((ioc_state == MPI_IOC_STATE_READY) || (ioc_state == MPI_IOC_STATE_OPERATIONAL)) {
dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "KickStart successful! (cnt=%d)\n",
ioc->name, cnt));
return hard_reset_done;
}
if (sleepFlag == CAN_SLEEP) {
msleep (10);
} else {
mdelay (10);
}
}
dinitprintk(ioc, printk(MYIOC_s_ERR_FMT "Failed to come READY after reset! IocState=%x\n",
ioc->name, mpt_GetIocState(ioc, 0)));
return -1;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_diag_reset - Perform hard reset of the adapter.
* @ioc: Pointer to MPT_ADAPTER structure
* @ignore: Set if to honor and clear to ignore
* the reset history bit
* @sleepFlag: CAN_SLEEP if called in a non-interrupt thread,
* else set to NO_SLEEP (use mdelay instead)
*
* This routine places the adapter in diagnostic mode via the
* WriteSequence register and then performs a hard reset of adapter
* via the Diagnostic register. Adapter should be in ready state
* upon successful completion.
*
* Returns: 1 hard reset successful
* 0 no reset performed because reset history bit set
* -2 enabling diagnostic mode failed
* -3 diagnostic reset failed
*/
static int
mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag)
{
u32 diag0val;
u32 doorbell;
int hard_reset_done = 0;
int count = 0;
u32 diag1val = 0;
MpiFwHeader_t *cached_fw; /* Pointer to FW */
u8 cb_idx;
/* Clear any existing interrupts */
CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
if (ioc->pcidev->device == MPI_MANUFACTPAGE_DEVID_SAS1078) {
if (!ignore)
return 0;
drsprintk(ioc, printk(MYIOC_s_WARN_FMT "%s: Doorbell=%p; 1078 reset "
"address=%p\n", ioc->name, __func__,
&ioc->chip->Doorbell, &ioc->chip->Reset_1078));
CHIPREG_WRITE32(&ioc->chip->Reset_1078, 0x07);
if (sleepFlag == CAN_SLEEP)
msleep(1);
else
mdelay(1);
/*
* Call each currently registered protocol IOC reset handler
* with pre-reset indication.
* NOTE: If we're doing _IOC_BRINGUP, there can be no
* MptResetHandlers[] registered yet.
*/
for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) {
if (MptResetHandlers[cb_idx])
(*(MptResetHandlers[cb_idx]))(ioc,
MPT_IOC_PRE_RESET);
}
for (count = 0; count < 60; count ++) {
doorbell = CHIPREG_READ32(&ioc->chip->Doorbell);
doorbell &= MPI_IOC_STATE_MASK;
drsprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"looking for READY STATE: doorbell=%x"
" count=%d\n",
ioc->name, doorbell, count));
if (doorbell == MPI_IOC_STATE_READY) {
return 1;
}
/* wait 1 sec */
if (sleepFlag == CAN_SLEEP)
msleep(1000);
else
mdelay(1000);
}
return -1;
}
/* Use "Diagnostic reset" method! (only thing available!) */
diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic);
if (ioc->debug_level & MPT_DEBUG) {
if (ioc->alt_ioc)
diag1val = CHIPREG_READ32(&ioc->alt_ioc->chip->Diagnostic);
dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "DbG1: diag0=%08x, diag1=%08x\n",
ioc->name, diag0val, diag1val));
}
/* Do the reset if we are told to ignore the reset history
* or if the reset history is 0
*/
if (ignore || !(diag0val & MPI_DIAG_RESET_HISTORY)) {
while ((diag0val & MPI_DIAG_DRWE) == 0) {
/* Write magic sequence to WriteSequence register
* Loop until in diagnostic mode
*/
CHIPREG_WRITE32(&ioc->chip->WriteSequence, 0xFF);
CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_1ST_KEY_VALUE);
CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_2ND_KEY_VALUE);
CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_3RD_KEY_VALUE);
CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_4TH_KEY_VALUE);
CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_5TH_KEY_VALUE);
/* wait 100 msec */
if (sleepFlag == CAN_SLEEP) {
msleep (100);
} else {
mdelay (100);
}
count++;
if (count > 20) {
printk(MYIOC_s_ERR_FMT "Enable Diagnostic mode FAILED! (%02xh)\n",
ioc->name, diag0val);
return -2;
}
diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic);
dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Wrote magic DiagWriteEn sequence (%x)\n",
ioc->name, diag0val));
}
if (ioc->debug_level & MPT_DEBUG) {
if (ioc->alt_ioc)
diag1val = CHIPREG_READ32(&ioc->alt_ioc->chip->Diagnostic);
dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "DbG2: diag0=%08x, diag1=%08x\n",
ioc->name, diag0val, diag1val));
}
/*
* Disable the ARM (Bug fix)
*
*/
CHIPREG_WRITE32(&ioc->chip->Diagnostic, diag0val | MPI_DIAG_DISABLE_ARM);
mdelay(1);
/*
* Now hit the reset bit in the Diagnostic register
* (THE BIG HAMMER!) (Clears DRWE bit).
*/
CHIPREG_WRITE32(&ioc->chip->Diagnostic, diag0val | MPI_DIAG_RESET_ADAPTER);
hard_reset_done = 1;
dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Diagnostic reset performed\n",
ioc->name));
/*
* Call each currently registered protocol IOC reset handler
* with pre-reset indication.
* NOTE: If we're doing _IOC_BRINGUP, there can be no
* MptResetHandlers[] registered yet.
*/
for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) {
if (MptResetHandlers[cb_idx]) {
mpt_signal_reset(cb_idx,
ioc, MPT_IOC_PRE_RESET);
if (ioc->alt_ioc) {
mpt_signal_reset(cb_idx,
ioc->alt_ioc, MPT_IOC_PRE_RESET);
}
}
}
if (ioc->cached_fw)
cached_fw = (MpiFwHeader_t *)ioc->cached_fw;
else if (ioc->alt_ioc && ioc->alt_ioc->cached_fw)
cached_fw = (MpiFwHeader_t *)ioc->alt_ioc->cached_fw;
else
cached_fw = NULL;
if (cached_fw) {
/* If the DownloadBoot operation fails, the
* IOC will be left unusable. This is a fatal error
* case. _diag_reset will return < 0
*/
for (count = 0; count < 30; count ++) {
diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic);
if (!(diag0val & MPI_DIAG_RESET_ADAPTER)) {
break;
}
dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "cached_fw: diag0val=%x count=%d\n",
ioc->name, diag0val, count));
/* wait 1 sec */
if (sleepFlag == CAN_SLEEP) {
msleep (1000);
} else {
mdelay (1000);
}
}
if ((count = mpt_downloadboot(ioc, cached_fw, sleepFlag)) < 0) {
printk(MYIOC_s_WARN_FMT
"firmware downloadboot failure (%d)!\n", ioc->name, count);
}
} else {
/* Wait for FW to reload and for board
* to go to the READY state.
* Maximum wait is 60 seconds.
* If fail, no error will check again
* with calling program.
*/
for (count = 0; count < 60; count ++) {
doorbell = CHIPREG_READ32(&ioc->chip->Doorbell);
doorbell &= MPI_IOC_STATE_MASK;
drsprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"looking for READY STATE: doorbell=%x"
" count=%d\n", ioc->name, doorbell, count));
if (doorbell == MPI_IOC_STATE_READY) {
break;
}
/* wait 1 sec */
if (sleepFlag == CAN_SLEEP) {
msleep (1000);
} else {
mdelay (1000);
}
}
if (doorbell != MPI_IOC_STATE_READY)
printk(MYIOC_s_ERR_FMT "Failed to come READY "
"after reset! IocState=%x", ioc->name,
doorbell);
}
}
diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic);
if (ioc->debug_level & MPT_DEBUG) {
if (ioc->alt_ioc)
diag1val = CHIPREG_READ32(&ioc->alt_ioc->chip->Diagnostic);
dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "DbG3: diag0=%08x, diag1=%08x\n",
ioc->name, diag0val, diag1val));
}
/* Clear RESET_HISTORY bit! Place board in the
* diagnostic mode to update the diag register.
*/
diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic);
count = 0;
while ((diag0val & MPI_DIAG_DRWE) == 0) {
/* Write magic sequence to WriteSequence register
* Loop until in diagnostic mode
*/
CHIPREG_WRITE32(&ioc->chip->WriteSequence, 0xFF);
CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_1ST_KEY_VALUE);
CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_2ND_KEY_VALUE);
CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_3RD_KEY_VALUE);
CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_4TH_KEY_VALUE);
CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_5TH_KEY_VALUE);
/* wait 100 msec */
if (sleepFlag == CAN_SLEEP) {
msleep (100);
} else {
mdelay (100);
}
count++;
if (count > 20) {
printk(MYIOC_s_ERR_FMT "Enable Diagnostic mode FAILED! (%02xh)\n",
ioc->name, diag0val);
break;
}
diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic);
}
diag0val &= ~MPI_DIAG_RESET_HISTORY;
CHIPREG_WRITE32(&ioc->chip->Diagnostic, diag0val);
diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic);
if (diag0val & MPI_DIAG_RESET_HISTORY) {
printk(MYIOC_s_WARN_FMT "ResetHistory bit failed to clear!\n",
ioc->name);
}
/* Disable Diagnostic Mode
*/
CHIPREG_WRITE32(&ioc->chip->WriteSequence, 0xFFFFFFFF);
/* Check FW reload status flags.
*/
diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic);
if (diag0val & (MPI_DIAG_FLASH_BAD_SIG | MPI_DIAG_RESET_ADAPTER | MPI_DIAG_DISABLE_ARM)) {
printk(MYIOC_s_ERR_FMT "Diagnostic reset FAILED! (%02xh)\n",
ioc->name, diag0val);
return -3;
}
if (ioc->debug_level & MPT_DEBUG) {
if (ioc->alt_ioc)
diag1val = CHIPREG_READ32(&ioc->alt_ioc->chip->Diagnostic);
dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "DbG4: diag0=%08x, diag1=%08x\n",
ioc->name, diag0val, diag1val));
}
/*
* Reset flag that says we've enabled event notification
*/
ioc->facts.EventState = 0;
if (ioc->alt_ioc)
ioc->alt_ioc->facts.EventState = 0;
return hard_reset_done;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* SendIocReset - Send IOCReset request to MPT adapter.
* @ioc: Pointer to MPT_ADAPTER structure
* @reset_type: reset type, expected values are
* %MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET or %MPI_FUNCTION_IO_UNIT_RESET
* @sleepFlag: Specifies whether the process can sleep
*
* Send IOCReset request to the MPT adapter.
*
* Returns 0 for success, non-zero for failure.
*/
static int
SendIocReset(MPT_ADAPTER *ioc, u8 reset_type, int sleepFlag)
{
int r;
u32 state;
int cntdn, count;
drsprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Sending IOC reset(0x%02x)!\n",
ioc->name, reset_type));
CHIPREG_WRITE32(&ioc->chip->Doorbell, reset_type<<MPI_DOORBELL_FUNCTION_SHIFT);
if ((r = WaitForDoorbellAck(ioc, 5, sleepFlag)) < 0)
return r;
/* FW ACK'd request, wait for READY state
*/
count = 0;
cntdn = ((sleepFlag == CAN_SLEEP) ? HZ : 1000) * 15; /* 15 seconds */
while ((state = mpt_GetIocState(ioc, 1)) != MPI_IOC_STATE_READY) {
cntdn--;
count++;
if (!cntdn) {
if (sleepFlag != CAN_SLEEP)
count *= 10;
printk(MYIOC_s_ERR_FMT
"Wait IOC_READY state (0x%x) timeout(%d)!\n",
ioc->name, state, (int)((count+5)/HZ));
return -ETIME;
}
if (sleepFlag == CAN_SLEEP) {
msleep(1);
} else {
mdelay (1); /* 1 msec delay */
}
}
/* TODO!
* Cleanup all event stuff for this IOC; re-issue EventNotification
* request if needed.
*/
if (ioc->facts.Function)
ioc->facts.EventState = 0;
return 0;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* initChainBuffers - Allocate memory for and initialize chain buffers
* @ioc: Pointer to MPT_ADAPTER structure
*
* Allocates memory for and initializes chain buffers,
* chain buffer control arrays and spinlock.
*/
static int
initChainBuffers(MPT_ADAPTER *ioc)
{
u8 *mem;
int sz, ii, num_chain;
int scale, num_sge, numSGE;
/* ReqToChain size must equal the req_depth
* index = req_idx
*/
if (ioc->ReqToChain == NULL) {
sz = ioc->req_depth * sizeof(int);
mem = kmalloc(sz, GFP_ATOMIC);
if (mem == NULL)
return -1;
ioc->ReqToChain = (int *) mem;
dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "ReqToChain alloc @ %p, sz=%d bytes\n",
ioc->name, mem, sz));
mem = kmalloc(sz, GFP_ATOMIC);
if (mem == NULL)
return -1;
ioc->RequestNB = (int *) mem;
dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "RequestNB alloc @ %p, sz=%d bytes\n",
ioc->name, mem, sz));
}
for (ii = 0; ii < ioc->req_depth; ii++) {
ioc->ReqToChain[ii] = MPT_HOST_NO_CHAIN;
}
/* ChainToChain size must equal the total number
* of chain buffers to be allocated.
* index = chain_idx
*
* Calculate the number of chain buffers needed(plus 1) per I/O
* then multiply the maximum number of simultaneous cmds
*
* num_sge = num sge in request frame + last chain buffer
* scale = num sge per chain buffer if no chain element
*/
scale = ioc->req_sz / ioc->SGE_size;
if (ioc->sg_addr_size == sizeof(u64))
num_sge = scale + (ioc->req_sz - 60) / ioc->SGE_size;
else
num_sge = 1 + scale + (ioc->req_sz - 64) / ioc->SGE_size;
if (ioc->sg_addr_size == sizeof(u64)) {
numSGE = (scale - 1) * (ioc->facts.MaxChainDepth-1) + scale +
(ioc->req_sz - 60) / ioc->SGE_size;
} else {
numSGE = 1 + (scale - 1) * (ioc->facts.MaxChainDepth-1) +
scale + (ioc->req_sz - 64) / ioc->SGE_size;
}
dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "num_sge=%d numSGE=%d\n",
ioc->name, num_sge, numSGE));
if (ioc->bus_type == FC) {
if (numSGE > MPT_SCSI_FC_SG_DEPTH)
numSGE = MPT_SCSI_FC_SG_DEPTH;
} else {
if (numSGE > MPT_SCSI_SG_DEPTH)
numSGE = MPT_SCSI_SG_DEPTH;
}
num_chain = 1;
while (numSGE - num_sge > 0) {
num_chain++;
num_sge += (scale - 1);
}
num_chain++;
dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Now numSGE=%d num_sge=%d num_chain=%d\n",
ioc->name, numSGE, num_sge, num_chain));
if (ioc->bus_type == SPI)
num_chain *= MPT_SCSI_CAN_QUEUE;
else if (ioc->bus_type == SAS)
num_chain *= MPT_SAS_CAN_QUEUE;
else
num_chain *= MPT_FC_CAN_QUEUE;
ioc->num_chain = num_chain;
sz = num_chain * sizeof(int);
if (ioc->ChainToChain == NULL) {
mem = kmalloc(sz, GFP_ATOMIC);
if (mem == NULL)
return -1;
ioc->ChainToChain = (int *) mem;
dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "ChainToChain alloc @ %p, sz=%d bytes\n",
ioc->name, mem, sz));
} else {
mem = (u8 *) ioc->ChainToChain;
}
memset(mem, 0xFF, sz);
return num_chain;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* PrimeIocFifos - Initialize IOC request and reply FIFOs.
* @ioc: Pointer to MPT_ADAPTER structure
*
* This routine allocates memory for the MPT reply and request frame
* pools (if necessary), and primes the IOC reply FIFO with
* reply frames.
*
* Returns 0 for success, non-zero for failure.
*/
static int
PrimeIocFifos(MPT_ADAPTER *ioc)
{
MPT_FRAME_HDR *mf;
unsigned long flags;
dma_addr_t alloc_dma;
u8 *mem;
int i, reply_sz, sz, total_size, num_chain;
u64 dma_mask;
dma_mask = 0;
/* Prime reply FIFO... */
if (ioc->reply_frames == NULL) {
if ( (num_chain = initChainBuffers(ioc)) < 0)
return -1;
/*
* 1078 errata workaround for the 36GB limitation
*/
if (ioc->pcidev->device == MPI_MANUFACTPAGE_DEVID_SAS1078 &&
ioc->dma_mask > DMA_BIT_MASK(35)) {
if (!dma_set_mask(&ioc->pcidev->dev, DMA_BIT_MASK(32))
&& !dma_set_coherent_mask(&ioc->pcidev->dev, DMA_BIT_MASK(32))) {
dma_mask = DMA_BIT_MASK(35);
d36memprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"setting 35 bit addressing for "
"Request/Reply/Chain and Sense Buffers\n",
ioc->name));
} else {
/*Reseting DMA mask to 64 bit*/
dma_set_mask(&ioc->pcidev->dev,
DMA_BIT_MASK(64));
dma_set_coherent_mask(&ioc->pcidev->dev,
DMA_BIT_MASK(64));
printk(MYIOC_s_ERR_FMT
"failed setting 35 bit addressing for "
"Request/Reply/Chain and Sense Buffers\n",
ioc->name);
return -1;
}
}
total_size = reply_sz = (ioc->reply_sz * ioc->reply_depth);
dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "ReplyBuffer sz=%d bytes, ReplyDepth=%d\n",
ioc->name, ioc->reply_sz, ioc->reply_depth));
dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "ReplyBuffer sz=%d[%x] bytes\n",
ioc->name, reply_sz, reply_sz));
sz = (ioc->req_sz * ioc->req_depth);
dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "RequestBuffer sz=%d bytes, RequestDepth=%d\n",
ioc->name, ioc->req_sz, ioc->req_depth));
dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "RequestBuffer sz=%d[%x] bytes\n",
ioc->name, sz, sz));
total_size += sz;
sz = num_chain * ioc->req_sz; /* chain buffer pool size */
dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "ChainBuffer sz=%d bytes, ChainDepth=%d\n",
ioc->name, ioc->req_sz, num_chain));
dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "ChainBuffer sz=%d[%x] bytes num_chain=%d\n",
ioc->name, sz, sz, num_chain));
total_size += sz;
mem = dma_alloc_coherent(&ioc->pcidev->dev, total_size,
&alloc_dma, GFP_KERNEL);
if (mem == NULL) {
printk(MYIOC_s_ERR_FMT "Unable to allocate Reply, Request, Chain Buffers!\n",
ioc->name);
goto out_fail;
}
dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Total alloc @ %p[%p], sz=%d[%x] bytes\n",
ioc->name, mem, (void *)(ulong)alloc_dma, total_size, total_size));
memset(mem, 0, total_size);
ioc->alloc_total += total_size;
ioc->alloc = mem;
ioc->alloc_dma = alloc_dma;
ioc->alloc_sz = total_size;
ioc->reply_frames = (MPT_FRAME_HDR *) mem;
ioc->reply_frames_low_dma = (u32) (alloc_dma & 0xFFFFFFFF);
dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "ReplyBuffers @ %p[%p]\n",
ioc->name, ioc->reply_frames, (void *)(ulong)alloc_dma));
alloc_dma += reply_sz;
mem += reply_sz;
/* Request FIFO - WE manage this! */
ioc->req_frames = (MPT_FRAME_HDR *) mem;
ioc->req_frames_dma = alloc_dma;
dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "RequestBuffers @ %p[%p]\n",
ioc->name, mem, (void *)(ulong)alloc_dma));
ioc->req_frames_low_dma = (u32) (alloc_dma & 0xFFFFFFFF);
for (i = 0; i < ioc->req_depth; i++) {
alloc_dma += ioc->req_sz;
mem += ioc->req_sz;
}
ioc->ChainBuffer = mem;
ioc->ChainBufferDMA = alloc_dma;
dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "ChainBuffers @ %p(%p)\n",
ioc->name, ioc->ChainBuffer, (void *)(ulong)ioc->ChainBufferDMA));
/* Initialize the free chain Q.
*/
INIT_LIST_HEAD(&ioc->FreeChainQ);
/* Post the chain buffers to the FreeChainQ.
*/
mem = (u8 *)ioc->ChainBuffer;
for (i=0; i < num_chain; i++) {
mf = (MPT_FRAME_HDR *) mem;
list_add_tail(&mf->u.frame.linkage.list, &ioc->FreeChainQ);
mem += ioc->req_sz;
}
/* Initialize Request frames linked list
*/
alloc_dma = ioc->req_frames_dma;
mem = (u8 *) ioc->req_frames;
spin_lock_irqsave(&ioc->FreeQlock, flags);
INIT_LIST_HEAD(&ioc->FreeQ);
for (i = 0; i < ioc->req_depth; i++) {
mf = (MPT_FRAME_HDR *) mem;
/* Queue REQUESTs *internally*! */
list_add_tail(&mf->u.frame.linkage.list, &ioc->FreeQ);
mem += ioc->req_sz;
}
spin_unlock_irqrestore(&ioc->FreeQlock, flags);
sz = (ioc->req_depth * MPT_SENSE_BUFFER_ALLOC);
ioc->sense_buf_pool = dma_alloc_coherent(&ioc->pcidev->dev, sz,
&ioc->sense_buf_pool_dma, GFP_KERNEL);
if (ioc->sense_buf_pool == NULL) {
printk(MYIOC_s_ERR_FMT "Unable to allocate Sense Buffers!\n",
ioc->name);
goto out_fail;
}
ioc->sense_buf_low_dma = (u32) (ioc->sense_buf_pool_dma & 0xFFFFFFFF);
ioc->alloc_total += sz;
dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "SenseBuffers @ %p[%p]\n",
ioc->name, ioc->sense_buf_pool, (void *)(ulong)ioc->sense_buf_pool_dma));
}
/* Post Reply frames to FIFO
*/
alloc_dma = ioc->alloc_dma;
dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "ReplyBuffers @ %p[%p]\n",
ioc->name, ioc->reply_frames, (void *)(ulong)alloc_dma));
for (i = 0; i < ioc->reply_depth; i++) {
/* Write each address to the IOC! */
CHIPREG_WRITE32(&ioc->chip->ReplyFifo, alloc_dma);
alloc_dma += ioc->reply_sz;
}
if (dma_mask == DMA_BIT_MASK(35) && !dma_set_mask(&ioc->pcidev->dev,
ioc->dma_mask) && !dma_set_coherent_mask(&ioc->pcidev->dev,
ioc->dma_mask))
d36memprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"restoring 64 bit addressing\n", ioc->name));
return 0;
out_fail:
if (ioc->alloc != NULL) {
sz = ioc->alloc_sz;
dma_free_coherent(&ioc->pcidev->dev, sz, ioc->alloc,
ioc->alloc_dma);
ioc->reply_frames = NULL;
ioc->req_frames = NULL;
ioc->alloc_total -= sz;
}
if (ioc->sense_buf_pool != NULL) {
sz = (ioc->req_depth * MPT_SENSE_BUFFER_ALLOC);
dma_free_coherent(&ioc->pcidev->dev, sz, ioc->sense_buf_pool,
ioc->sense_buf_pool_dma);
ioc->sense_buf_pool = NULL;
}
if (dma_mask == DMA_BIT_MASK(35) && !dma_set_mask(&ioc->pcidev->dev,
DMA_BIT_MASK(64)) && !dma_set_coherent_mask(&ioc->pcidev->dev,
DMA_BIT_MASK(64)))
d36memprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"restoring 64 bit addressing\n", ioc->name));
return -1;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_handshake_req_reply_wait - Send MPT request to and receive reply
* from IOC via doorbell handshake method.
* @ioc: Pointer to MPT_ADAPTER structure
* @reqBytes: Size of the request in bytes
* @req: Pointer to MPT request frame
* @replyBytes: Expected size of the reply in bytes
* @u16reply: Pointer to area where reply should be written
* @maxwait: Max wait time for a reply (in seconds)
* @sleepFlag: Specifies whether the process can sleep
*
* NOTES: It is the callers responsibility to byte-swap fields in the
* request which are greater than 1 byte in size. It is also the
* callers responsibility to byte-swap response fields which are
* greater than 1 byte in size.
*
* Returns 0 for success, non-zero for failure.
*/
static int
mpt_handshake_req_reply_wait(MPT_ADAPTER *ioc, int reqBytes, u32 *req,
int replyBytes, u16 *u16reply, int maxwait, int sleepFlag)
{
MPIDefaultReply_t *mptReply;
int failcnt = 0;
int t;
/*
* Get ready to cache a handshake reply
*/
ioc->hs_reply_idx = 0;
mptReply = (MPIDefaultReply_t *) ioc->hs_reply;
mptReply->MsgLength = 0;
/*
* Make sure there are no doorbells (WRITE 0 to IntStatus reg),
* then tell IOC that we want to handshake a request of N words.
* (WRITE u32val to Doorbell reg).
*/
CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
CHIPREG_WRITE32(&ioc->chip->Doorbell,
((MPI_FUNCTION_HANDSHAKE<<MPI_DOORBELL_FUNCTION_SHIFT) |
((reqBytes/4)<<MPI_DOORBELL_ADD_DWORDS_SHIFT)));
/*
* Wait for IOC's doorbell handshake int
*/
if ((t = WaitForDoorbellInt(ioc, 5, sleepFlag)) < 0)
failcnt++;
dhsprintk(ioc, printk(MYIOC_s_DEBUG_FMT "HandShake request start reqBytes=%d, WaitCnt=%d%s\n",
ioc->name, reqBytes, t, failcnt ? " - MISSING DOORBELL HANDSHAKE!" : ""));
/* Read doorbell and check for active bit */
if (!(CHIPREG_READ32(&ioc->chip->Doorbell) & MPI_DOORBELL_ACTIVE))
return -1;
/*
* Clear doorbell int (WRITE 0 to IntStatus reg),
* then wait for IOC to ACKnowledge that it's ready for
* our handshake request.
*/
CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
if (!failcnt && (t = WaitForDoorbellAck(ioc, 5, sleepFlag)) < 0)
failcnt++;
if (!failcnt) {
int ii;
u8 *req_as_bytes = (u8 *) req;
/*
* Stuff request words via doorbell handshake,
* with ACK from IOC for each.
*/
for (ii = 0; !failcnt && ii < reqBytes/4; ii++) {
u32 word = ((req_as_bytes[(ii*4) + 0] << 0) |
(req_as_bytes[(ii*4) + 1] << 8) |
(req_as_bytes[(ii*4) + 2] << 16) |
(req_as_bytes[(ii*4) + 3] << 24));
CHIPREG_WRITE32(&ioc->chip->Doorbell, word);
if ((t = WaitForDoorbellAck(ioc, 5, sleepFlag)) < 0)
failcnt++;
}
dhsprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Handshake request frame (@%p) header\n", ioc->name, req));
DBG_DUMP_REQUEST_FRAME_HDR(ioc, (u32 *)req);
dhsprintk(ioc, printk(MYIOC_s_DEBUG_FMT "HandShake request post done, WaitCnt=%d%s\n",
ioc->name, t, failcnt ? " - MISSING DOORBELL ACK!" : ""));
/*
* Wait for completion of doorbell handshake reply from the IOC
*/
if (!failcnt && (t = WaitForDoorbellReply(ioc, maxwait, sleepFlag)) < 0)
failcnt++;
dhsprintk(ioc, printk(MYIOC_s_DEBUG_FMT "HandShake reply count=%d%s\n",
ioc->name, t, failcnt ? " - MISSING DOORBELL REPLY!" : ""));
/*
* Copy out the cached reply...
*/
for (ii=0; ii < min(replyBytes/2,mptReply->MsgLength*2); ii++)
u16reply[ii] = ioc->hs_reply[ii];
} else {
return -99;
}
return -failcnt;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* WaitForDoorbellAck - Wait for IOC doorbell handshake acknowledge
* @ioc: Pointer to MPT_ADAPTER structure
* @howlong: How long to wait (in seconds)
* @sleepFlag: Specifies whether the process can sleep
*
* This routine waits (up to ~2 seconds max) for IOC doorbell
* handshake ACKnowledge, indicated by the IOP_DOORBELL_STATUS
* bit in its IntStatus register being clear.
*
* Returns a negative value on failure, else wait loop count.
*/
static int
WaitForDoorbellAck(MPT_ADAPTER *ioc, int howlong, int sleepFlag)
{
int cntdn;
int count = 0;
u32 intstat=0;
cntdn = 1000 * howlong;
if (sleepFlag == CAN_SLEEP) {
while (--cntdn) {
msleep (1);
intstat = CHIPREG_READ32(&ioc->chip->IntStatus);
if (! (intstat & MPI_HIS_IOP_DOORBELL_STATUS))
break;
count++;
}
} else {
while (--cntdn) {
udelay (1000);
intstat = CHIPREG_READ32(&ioc->chip->IntStatus);
if (! (intstat & MPI_HIS_IOP_DOORBELL_STATUS))
break;
count++;
}
}
if (cntdn) {
dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "WaitForDoorbell ACK (count=%d)\n",
ioc->name, count));
return count;
}
printk(MYIOC_s_ERR_FMT "Doorbell ACK timeout (count=%d), IntStatus=%x!\n",
ioc->name, count, intstat);
return -1;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* WaitForDoorbellInt - Wait for IOC to set its doorbell interrupt bit
* @ioc: Pointer to MPT_ADAPTER structure
* @howlong: How long to wait (in seconds)
* @sleepFlag: Specifies whether the process can sleep
*
* This routine waits (up to ~2 seconds max) for IOC doorbell interrupt
* (MPI_HIS_DOORBELL_INTERRUPT) to be set in the IntStatus register.
*
* Returns a negative value on failure, else wait loop count.
*/
static int
WaitForDoorbellInt(MPT_ADAPTER *ioc, int howlong, int sleepFlag)
{
int cntdn;
int count = 0;
u32 intstat=0;
cntdn = 1000 * howlong;
if (sleepFlag == CAN_SLEEP) {
while (--cntdn) {
intstat = CHIPREG_READ32(&ioc->chip->IntStatus);
if (intstat & MPI_HIS_DOORBELL_INTERRUPT)
break;
msleep(1);
count++;
}
} else {
while (--cntdn) {
intstat = CHIPREG_READ32(&ioc->chip->IntStatus);
if (intstat & MPI_HIS_DOORBELL_INTERRUPT)
break;
udelay (1000);
count++;
}
}
if (cntdn) {
dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "WaitForDoorbell INT (cnt=%d) howlong=%d\n",
ioc->name, count, howlong));
return count;
}
printk(MYIOC_s_ERR_FMT "Doorbell INT timeout (count=%d), IntStatus=%x!\n",
ioc->name, count, intstat);
return -1;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* WaitForDoorbellReply - Wait for and capture an IOC handshake reply.
* @ioc: Pointer to MPT_ADAPTER structure
* @howlong: How long to wait (in seconds)
* @sleepFlag: Specifies whether the process can sleep
*
* This routine polls the IOC for a handshake reply, 16 bits at a time.
* Reply is cached to IOC private area large enough to hold a maximum
* of 128 bytes of reply data.
*
* Returns a negative value on failure, else size of reply in WORDS.
*/
static int
WaitForDoorbellReply(MPT_ADAPTER *ioc, int howlong, int sleepFlag)
{
int u16cnt = 0;
int failcnt = 0;
int t;
u16 *hs_reply = ioc->hs_reply;
volatile MPIDefaultReply_t *mptReply = (MPIDefaultReply_t *) ioc->hs_reply;
u16 hword;
hs_reply[0] = hs_reply[1] = hs_reply[7] = 0;
/*
* Get first two u16's so we can look at IOC's intended reply MsgLength
*/
u16cnt=0;
if ((t = WaitForDoorbellInt(ioc, howlong, sleepFlag)) < 0) {
failcnt++;
} else {
hs_reply[u16cnt++] = le16_to_cpu(CHIPREG_READ32(&ioc->chip->Doorbell) & 0x0000FFFF);
CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
if ((t = WaitForDoorbellInt(ioc, 5, sleepFlag)) < 0)
failcnt++;
else {
hs_reply[u16cnt++] = le16_to_cpu(CHIPREG_READ32(&ioc->chip->Doorbell) & 0x0000FFFF);
CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
}
}
dhsprintk(ioc, printk(MYIOC_s_DEBUG_FMT "WaitCnt=%d First handshake reply word=%08x%s\n",
ioc->name, t, le32_to_cpu(*(u32 *)hs_reply),
failcnt ? " - MISSING DOORBELL HANDSHAKE!" : ""));
/*
* If no error (and IOC said MsgLength is > 0), piece together
* reply 16 bits at a time.
*/
for (u16cnt=2; !failcnt && u16cnt < (2 * mptReply->MsgLength); u16cnt++) {
if ((t = WaitForDoorbellInt(ioc, 5, sleepFlag)) < 0)
failcnt++;
hword = le16_to_cpu(CHIPREG_READ32(&ioc->chip->Doorbell) & 0x0000FFFF);
/* don't overflow our IOC hs_reply[] buffer! */
if (u16cnt < ARRAY_SIZE(ioc->hs_reply))
hs_reply[u16cnt] = hword;
CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
}
if (!failcnt && (t = WaitForDoorbellInt(ioc, 5, sleepFlag)) < 0)
failcnt++;
CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
if (failcnt) {
printk(MYIOC_s_ERR_FMT "Handshake reply failure!\n",
ioc->name);
return -failcnt;
}
#if 0
else if (u16cnt != (2 * mptReply->MsgLength)) {
return -101;
}
else if ((mptReply->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
return -102;
}
#endif
dhsprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Got Handshake reply:\n", ioc->name));
DBG_DUMP_REPLY_FRAME(ioc, (u32 *)mptReply);
dhsprintk(ioc, printk(MYIOC_s_DEBUG_FMT "WaitForDoorbell REPLY WaitCnt=%d (sz=%d)\n",
ioc->name, t, u16cnt/2));
return u16cnt/2;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* GetLanConfigPages - Fetch LANConfig pages.
* @ioc: Pointer to MPT_ADAPTER structure
*
* Return: 0 for success
* -ENOMEM if no memory available
* -EPERM if not allowed due to ISR context
* -EAGAIN if no msg frames currently available
* -EFAULT for non-successful reply or no reply (timeout)
*/
static int
GetLanConfigPages(MPT_ADAPTER *ioc)
{
ConfigPageHeader_t hdr;
CONFIGPARMS cfg;
LANPage0_t *ppage0_alloc;
dma_addr_t page0_dma;
LANPage1_t *ppage1_alloc;
dma_addr_t page1_dma;
int rc = 0;
int data_sz;
int copy_sz;
/* Get LAN Page 0 header */
hdr.PageVersion = 0;
hdr.PageLength = 0;
hdr.PageNumber = 0;
hdr.PageType = MPI_CONFIG_PAGETYPE_LAN;
cfg.cfghdr.hdr = &hdr;
cfg.physAddr = -1;
cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
cfg.dir = 0;
cfg.pageAddr = 0;
cfg.timeout = 0;
if ((rc = mpt_config(ioc, &cfg)) != 0)
return rc;
if (hdr.PageLength > 0) {
data_sz = hdr.PageLength * 4;
ppage0_alloc = dma_alloc_coherent(&ioc->pcidev->dev, data_sz,
&page0_dma, GFP_KERNEL);
rc = -ENOMEM;
if (ppage0_alloc) {
memset((u8 *)ppage0_alloc, 0, data_sz);
cfg.physAddr = page0_dma;
cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
if ((rc = mpt_config(ioc, &cfg)) == 0) {
/* save the data */
copy_sz = min_t(int, sizeof(LANPage0_t), data_sz);
memcpy(&ioc->lan_cnfg_page0, ppage0_alloc, copy_sz);
}
dma_free_coherent(&ioc->pcidev->dev, data_sz,
(u8 *)ppage0_alloc, page0_dma);
/* FIXME!
* Normalize endianness of structure data,
* by byte-swapping all > 1 byte fields!
*/
}
if (rc)
return rc;
}
/* Get LAN Page 1 header */
hdr.PageVersion = 0;
hdr.PageLength = 0;
hdr.PageNumber = 1;
hdr.PageType = MPI_CONFIG_PAGETYPE_LAN;
cfg.cfghdr.hdr = &hdr;
cfg.physAddr = -1;
cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
cfg.dir = 0;
cfg.pageAddr = 0;
if ((rc = mpt_config(ioc, &cfg)) != 0)
return rc;
if (hdr.PageLength == 0)
return 0;
data_sz = hdr.PageLength * 4;
rc = -ENOMEM;
ppage1_alloc = dma_alloc_coherent(&ioc->pcidev->dev, data_sz,
&page1_dma, GFP_KERNEL);
if (ppage1_alloc) {
memset((u8 *)ppage1_alloc, 0, data_sz);
cfg.physAddr = page1_dma;
cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
if ((rc = mpt_config(ioc, &cfg)) == 0) {
/* save the data */
copy_sz = min_t(int, sizeof(LANPage1_t), data_sz);
memcpy(&ioc->lan_cnfg_page1, ppage1_alloc, copy_sz);
}
dma_free_coherent(&ioc->pcidev->dev, data_sz,
(u8 *)ppage1_alloc, page1_dma);
/* FIXME!
* Normalize endianness of structure data,
* by byte-swapping all > 1 byte fields!
*/
}
return rc;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mptbase_sas_persist_operation - Perform operation on SAS Persistent Table
* @ioc: Pointer to MPT_ADAPTER structure
* @persist_opcode: see below
*
* =============================== ======================================
* MPI_SAS_OP_CLEAR_NOT_PRESENT Free all persist TargetID mappings for
* devices not currently present.
* MPI_SAS_OP_CLEAR_ALL_PERSISTENT Clear al persist TargetID mappings
* =============================== ======================================
*
* NOTE: Don't use not this function during interrupt time.
*
* Returns 0 for success, non-zero error
*/
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
int
mptbase_sas_persist_operation(MPT_ADAPTER *ioc, u8 persist_opcode)
{
SasIoUnitControlRequest_t *sasIoUnitCntrReq;
SasIoUnitControlReply_t *sasIoUnitCntrReply;
MPT_FRAME_HDR *mf = NULL;
MPIHeader_t *mpi_hdr;
int ret = 0;
unsigned long timeleft;
mutex_lock(&ioc->mptbase_cmds.mutex);
/* init the internal cmd struct */
memset(ioc->mptbase_cmds.reply, 0 , MPT_DEFAULT_FRAME_SIZE);
INITIALIZE_MGMT_STATUS(ioc->mptbase_cmds.status)
/* insure garbage is not sent to fw */
switch(persist_opcode) {
case MPI_SAS_OP_CLEAR_NOT_PRESENT:
case MPI_SAS_OP_CLEAR_ALL_PERSISTENT:
break;
default:
ret = -1;
goto out;
}
printk(KERN_DEBUG "%s: persist_opcode=%x\n",
__func__, persist_opcode);
/* Get a MF for this command.
*/
if ((mf = mpt_get_msg_frame(mpt_base_index, ioc)) == NULL) {
printk(KERN_DEBUG "%s: no msg frames!\n", __func__);
ret = -1;
goto out;
}
mpi_hdr = (MPIHeader_t *) mf;
sasIoUnitCntrReq = (SasIoUnitControlRequest_t *)mf;
memset(sasIoUnitCntrReq,0,sizeof(SasIoUnitControlRequest_t));
sasIoUnitCntrReq->Function = MPI_FUNCTION_SAS_IO_UNIT_CONTROL;
sasIoUnitCntrReq->MsgContext = mpi_hdr->MsgContext;
sasIoUnitCntrReq->Operation = persist_opcode;
mpt_put_msg_frame(mpt_base_index, ioc, mf);
timeleft = wait_for_completion_timeout(&ioc->mptbase_cmds.done, 10*HZ);
if (!(ioc->mptbase_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
ret = -ETIME;
printk(KERN_DEBUG "%s: failed\n", __func__);
if (ioc->mptbase_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET)
goto out;
if (!timeleft) {
printk(MYIOC_s_WARN_FMT
"Issuing Reset from %s!!, doorbell=0x%08x\n",
ioc->name, __func__, mpt_GetIocState(ioc, 0));
mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP);
mpt_free_msg_frame(ioc, mf);
}
goto out;
}
if (!(ioc->mptbase_cmds.status & MPT_MGMT_STATUS_RF_VALID)) {
ret = -1;
goto out;
}
sasIoUnitCntrReply =
(SasIoUnitControlReply_t *)ioc->mptbase_cmds.reply;
if (le16_to_cpu(sasIoUnitCntrReply->IOCStatus) != MPI_IOCSTATUS_SUCCESS) {
printk(KERN_DEBUG "%s: IOCStatus=0x%X IOCLogInfo=0x%X\n",
__func__, sasIoUnitCntrReply->IOCStatus,
sasIoUnitCntrReply->IOCLogInfo);
printk(KERN_DEBUG "%s: failed\n", __func__);
ret = -1;
} else
printk(KERN_DEBUG "%s: success\n", __func__);
out:
CLEAR_MGMT_STATUS(ioc->mptbase_cmds.status)
mutex_unlock(&ioc->mptbase_cmds.mutex);
return ret;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
static void
mptbase_raid_process_event_data(MPT_ADAPTER *ioc,
MpiEventDataRaid_t * pRaidEventData)
{
int volume;
int reason;
int disk;
int status;
int flags;
int state;
volume = pRaidEventData->VolumeID;
reason = pRaidEventData->ReasonCode;
disk = pRaidEventData->PhysDiskNum;
status = le32_to_cpu(pRaidEventData->SettingsStatus);
flags = (status >> 0) & 0xff;
state = (status >> 8) & 0xff;
if (reason == MPI_EVENT_RAID_RC_DOMAIN_VAL_NEEDED) {
return;
}
if ((reason >= MPI_EVENT_RAID_RC_PHYSDISK_CREATED &&
reason <= MPI_EVENT_RAID_RC_PHYSDISK_STATUS_CHANGED) ||
(reason == MPI_EVENT_RAID_RC_SMART_DATA)) {
printk(MYIOC_s_INFO_FMT "RAID STATUS CHANGE for PhysDisk %d id=%d\n",
ioc->name, disk, volume);
} else {
printk(MYIOC_s_INFO_FMT "RAID STATUS CHANGE for VolumeID %d\n",
ioc->name, volume);
}
switch(reason) {
case MPI_EVENT_RAID_RC_VOLUME_CREATED:
printk(MYIOC_s_INFO_FMT " volume has been created\n",
ioc->name);
break;
case MPI_EVENT_RAID_RC_VOLUME_DELETED:
printk(MYIOC_s_INFO_FMT " volume has been deleted\n",
ioc->name);
break;
case MPI_EVENT_RAID_RC_VOLUME_SETTINGS_CHANGED:
printk(MYIOC_s_INFO_FMT " volume settings have been changed\n",
ioc->name);
break;
case MPI_EVENT_RAID_RC_VOLUME_STATUS_CHANGED:
printk(MYIOC_s_INFO_FMT " volume is now %s%s%s%s\n",
ioc->name,
state == MPI_RAIDVOL0_STATUS_STATE_OPTIMAL
? "optimal"
: state == MPI_RAIDVOL0_STATUS_STATE_DEGRADED
? "degraded"
: state == MPI_RAIDVOL0_STATUS_STATE_FAILED
? "failed"
: "state unknown",
flags & MPI_RAIDVOL0_STATUS_FLAG_ENABLED
? ", enabled" : "",
flags & MPI_RAIDVOL0_STATUS_FLAG_QUIESCED
? ", quiesced" : "",
flags & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS
? ", resync in progress" : "" );
break;
case MPI_EVENT_RAID_RC_VOLUME_PHYSDISK_CHANGED:
printk(MYIOC_s_INFO_FMT " volume membership of PhysDisk %d has changed\n",
ioc->name, disk);
break;
case MPI_EVENT_RAID_RC_PHYSDISK_CREATED:
printk(MYIOC_s_INFO_FMT " PhysDisk has been created\n",
ioc->name);
break;
case MPI_EVENT_RAID_RC_PHYSDISK_DELETED:
printk(MYIOC_s_INFO_FMT " PhysDisk has been deleted\n",
ioc->name);
break;
case MPI_EVENT_RAID_RC_PHYSDISK_SETTINGS_CHANGED:
printk(MYIOC_s_INFO_FMT " PhysDisk settings have been changed\n",
ioc->name);
break;
case MPI_EVENT_RAID_RC_PHYSDISK_STATUS_CHANGED:
printk(MYIOC_s_INFO_FMT " PhysDisk is now %s%s%s\n",
ioc->name,
state == MPI_PHYSDISK0_STATUS_ONLINE
? "online"
: state == MPI_PHYSDISK0_STATUS_MISSING
? "missing"
: state == MPI_PHYSDISK0_STATUS_NOT_COMPATIBLE
? "not compatible"
: state == MPI_PHYSDISK0_STATUS_FAILED
? "failed"
: state == MPI_PHYSDISK0_STATUS_INITIALIZING
? "initializing"
: state == MPI_PHYSDISK0_STATUS_OFFLINE_REQUESTED
? "offline requested"
: state == MPI_PHYSDISK0_STATUS_FAILED_REQUESTED
? "failed requested"
: state == MPI_PHYSDISK0_STATUS_OTHER_OFFLINE
? "offline"
: "state unknown",
flags & MPI_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC
? ", out of sync" : "",
flags & MPI_PHYSDISK0_STATUS_FLAG_QUIESCED
? ", quiesced" : "" );
break;
case MPI_EVENT_RAID_RC_DOMAIN_VAL_NEEDED:
printk(MYIOC_s_INFO_FMT " Domain Validation needed for PhysDisk %d\n",
ioc->name, disk);
break;
case MPI_EVENT_RAID_RC_SMART_DATA:
printk(MYIOC_s_INFO_FMT " SMART data received, ASC/ASCQ = %02xh/%02xh\n",
ioc->name, pRaidEventData->ASC, pRaidEventData->ASCQ);
break;
case MPI_EVENT_RAID_RC_REPLACE_ACTION_STARTED:
printk(MYIOC_s_INFO_FMT " replacement of PhysDisk %d has started\n",
ioc->name, disk);
break;
}
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* GetIoUnitPage2 - Retrieve BIOS version and boot order information.
* @ioc: Pointer to MPT_ADAPTER structure
*
* Returns: 0 for success
* -ENOMEM if no memory available
* -EPERM if not allowed due to ISR context
* -EAGAIN if no msg frames currently available
* -EFAULT for non-successful reply or no reply (timeout)
*/
static int
GetIoUnitPage2(MPT_ADAPTER *ioc)
{
ConfigPageHeader_t hdr;
CONFIGPARMS cfg;
IOUnitPage2_t *ppage_alloc;
dma_addr_t page_dma;
int data_sz;
int rc;
/* Get the page header */
hdr.PageVersion = 0;
hdr.PageLength = 0;
hdr.PageNumber = 2;
hdr.PageType = MPI_CONFIG_PAGETYPE_IO_UNIT;
cfg.cfghdr.hdr = &hdr;
cfg.physAddr = -1;
cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
cfg.dir = 0;
cfg.pageAddr = 0;
cfg.timeout = 0;
if ((rc = mpt_config(ioc, &cfg)) != 0)
return rc;
if (hdr.PageLength == 0)
return 0;
/* Read the config page */
data_sz = hdr.PageLength * 4;
rc = -ENOMEM;
ppage_alloc = dma_alloc_coherent(&ioc->pcidev->dev, data_sz,
&page_dma, GFP_KERNEL);
if (ppage_alloc) {
memset((u8 *)ppage_alloc, 0, data_sz);
cfg.physAddr = page_dma;
cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
/* If Good, save data */
if ((rc = mpt_config(ioc, &cfg)) == 0)
ioc->biosVersion = le32_to_cpu(ppage_alloc->BiosVersion);
dma_free_coherent(&ioc->pcidev->dev, data_sz,
(u8 *)ppage_alloc, page_dma);
}
return rc;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_GetScsiPortSettings - read SCSI Port Page 0 and 2
* @ioc: Pointer to a Adapter Strucutre
* @portnum: IOC port number
*
* Return: -EFAULT if read of config page header fails
* or if no nvram
* If read of SCSI Port Page 0 fails,
* NVRAM = MPT_HOST_NVRAM_INVALID (0xFFFFFFFF)
* Adapter settings: async, narrow
* Return 1
* If read of SCSI Port Page 2 fails,
* Adapter settings valid
* NVRAM = MPT_HOST_NVRAM_INVALID (0xFFFFFFFF)
* Return 1
* Else
* Both valid
* Return 0
* CHECK - what type of locking mechanisms should be used????
*/
static int
mpt_GetScsiPortSettings(MPT_ADAPTER *ioc, int portnum)
{
u8 *pbuf;
dma_addr_t buf_dma;
CONFIGPARMS cfg;
ConfigPageHeader_t header;
int ii;
int data, rc = 0;
/* Allocate memory
*/
if (!ioc->spi_data.nvram) {
int sz;
u8 *mem;
sz = MPT_MAX_SCSI_DEVICES * sizeof(int);
mem = kmalloc(sz, GFP_ATOMIC);
if (mem == NULL)
return -EFAULT;
ioc->spi_data.nvram = (int *) mem;
dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "SCSI device NVRAM settings @ %p, sz=%d\n",
ioc->name, ioc->spi_data.nvram, sz));
}
/* Invalidate NVRAM information
*/
for (ii=0; ii < MPT_MAX_SCSI_DEVICES; ii++) {
ioc->spi_data.nvram[ii] = MPT_HOST_NVRAM_INVALID;
}
/* Read SPP0 header, allocate memory, then read page.
*/
header.PageVersion = 0;
header.PageLength = 0;
header.PageNumber = 0;
header.PageType = MPI_CONFIG_PAGETYPE_SCSI_PORT;
cfg.cfghdr.hdr = &header;
cfg.physAddr = -1;
cfg.pageAddr = portnum;
cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
cfg.dir = 0;
cfg.timeout = 0; /* use default */
if (mpt_config(ioc, &cfg) != 0)
return -EFAULT;
if (header.PageLength > 0) {
pbuf = dma_alloc_coherent(&ioc->pcidev->dev,
header.PageLength * 4, &buf_dma,
GFP_KERNEL);
if (pbuf) {
cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
cfg.physAddr = buf_dma;
if (mpt_config(ioc, &cfg) != 0) {
ioc->spi_data.maxBusWidth = MPT_NARROW;
ioc->spi_data.maxSyncOffset = 0;
ioc->spi_data.minSyncFactor = MPT_ASYNC;
ioc->spi_data.busType = MPT_HOST_BUS_UNKNOWN;
rc = 1;
ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"Unable to read PortPage0 minSyncFactor=%x\n",
ioc->name, ioc->spi_data.minSyncFactor));
} else {
/* Save the Port Page 0 data
*/
SCSIPortPage0_t *pPP0 = (SCSIPortPage0_t *) pbuf;
pPP0->Capabilities = le32_to_cpu(pPP0->Capabilities);
pPP0->PhysicalInterface = le32_to_cpu(pPP0->PhysicalInterface);
if ( (pPP0->Capabilities & MPI_SCSIPORTPAGE0_CAP_QAS) == 0 ) {
ioc->spi_data.noQas |= MPT_TARGET_NO_NEGO_QAS;
ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"noQas due to Capabilities=%x\n",
ioc->name, pPP0->Capabilities));
}
ioc->spi_data.maxBusWidth = pPP0->Capabilities & MPI_SCSIPORTPAGE0_CAP_WIDE ? 1 : 0;
data = pPP0->Capabilities & MPI_SCSIPORTPAGE0_CAP_MAX_SYNC_OFFSET_MASK;
if (data) {
ioc->spi_data.maxSyncOffset = (u8) (data >> 16);
data = pPP0->Capabilities & MPI_SCSIPORTPAGE0_CAP_MIN_SYNC_PERIOD_MASK;
ioc->spi_data.minSyncFactor = (u8) (data >> 8);
ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"PortPage0 minSyncFactor=%x\n",
ioc->name, ioc->spi_data.minSyncFactor));
} else {
ioc->spi_data.maxSyncOffset = 0;
ioc->spi_data.minSyncFactor = MPT_ASYNC;
}
ioc->spi_data.busType = pPP0->PhysicalInterface & MPI_SCSIPORTPAGE0_PHY_SIGNAL_TYPE_MASK;
/* Update the minSyncFactor based on bus type.
*/
if ((ioc->spi_data.busType == MPI_SCSIPORTPAGE0_PHY_SIGNAL_HVD) ||
(ioc->spi_data.busType == MPI_SCSIPORTPAGE0_PHY_SIGNAL_SE)) {
if (ioc->spi_data.minSyncFactor < MPT_ULTRA) {
ioc->spi_data.minSyncFactor = MPT_ULTRA;
ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"HVD or SE detected, minSyncFactor=%x\n",
ioc->name, ioc->spi_data.minSyncFactor));
}
}
}
if (pbuf) {
dma_free_coherent(&ioc->pcidev->dev,
header.PageLength * 4, pbuf,
buf_dma);
}
}
}
/* SCSI Port Page 2 - Read the header then the page.
*/
header.PageVersion = 0;
header.PageLength = 0;
header.PageNumber = 2;
header.PageType = MPI_CONFIG_PAGETYPE_SCSI_PORT;
cfg.cfghdr.hdr = &header;
cfg.physAddr = -1;
cfg.pageAddr = portnum;
cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
cfg.dir = 0;
if (mpt_config(ioc, &cfg) != 0)
return -EFAULT;
if (header.PageLength > 0) {
/* Allocate memory and read SCSI Port Page 2
*/
pbuf = dma_alloc_coherent(&ioc->pcidev->dev,
header.PageLength * 4, &buf_dma,
GFP_KERNEL);
if (pbuf) {
cfg.action = MPI_CONFIG_ACTION_PAGE_READ_NVRAM;
cfg.physAddr = buf_dma;
if (mpt_config(ioc, &cfg) != 0) {
/* Nvram data is left with INVALID mark
*/
rc = 1;
} else if (ioc->pcidev->vendor == PCI_VENDOR_ID_ATTO) {
/* This is an ATTO adapter, read Page2 accordingly
*/
ATTO_SCSIPortPage2_t *pPP2 = (ATTO_SCSIPortPage2_t *) pbuf;
ATTODeviceInfo_t *pdevice = NULL;
u16 ATTOFlags;
/* Save the Port Page 2 data
* (reformat into a 32bit quantity)
*/
for (ii=0; ii < MPT_MAX_SCSI_DEVICES; ii++) {
pdevice = &pPP2->DeviceSettings[ii];
ATTOFlags = le16_to_cpu(pdevice->ATTOFlags);
data = 0;
/* Translate ATTO device flags to LSI format
*/
if (ATTOFlags & ATTOFLAG_DISC)
data |= (MPI_SCSIPORTPAGE2_DEVICE_DISCONNECT_ENABLE);
if (ATTOFlags & ATTOFLAG_ID_ENB)
data |= (MPI_SCSIPORTPAGE2_DEVICE_ID_SCAN_ENABLE);
if (ATTOFlags & ATTOFLAG_LUN_ENB)
data |= (MPI_SCSIPORTPAGE2_DEVICE_LUN_SCAN_ENABLE);
if (ATTOFlags & ATTOFLAG_TAGGED)
data |= (MPI_SCSIPORTPAGE2_DEVICE_TAG_QUEUE_ENABLE);
if (!(ATTOFlags & ATTOFLAG_WIDE_ENB))
data |= (MPI_SCSIPORTPAGE2_DEVICE_WIDE_DISABLE);
data = (data << 16) | (pdevice->Period << 8) | 10;
ioc->spi_data.nvram[ii] = data;
}
} else {
SCSIPortPage2_t *pPP2 = (SCSIPortPage2_t *) pbuf;
MpiDeviceInfo_t *pdevice = NULL;
/*
* Save "Set to Avoid SCSI Bus Resets" flag
*/
ioc->spi_data.bus_reset =
(le32_to_cpu(pPP2->PortFlags) &
MPI_SCSIPORTPAGE2_PORT_FLAGS_AVOID_SCSI_RESET) ?
0 : 1 ;
/* Save the Port Page 2 data
* (reformat into a 32bit quantity)
*/
data = le32_to_cpu(pPP2->PortFlags) & MPI_SCSIPORTPAGE2_PORT_FLAGS_DV_MASK;
ioc->spi_data.PortFlags = data;
for (ii=0; ii < MPT_MAX_SCSI_DEVICES; ii++) {
pdevice = &pPP2->DeviceSettings[ii];
data = (le16_to_cpu(pdevice->DeviceFlags) << 16) |
(pdevice->SyncFactor << 8) | pdevice->Timeout;
ioc->spi_data.nvram[ii] = data;
}
}
dma_free_coherent(&ioc->pcidev->dev,
header.PageLength * 4, pbuf,
buf_dma);
}
}
/* Update Adapter limits with those from NVRAM
* Comment: Don't need to do this. Target performance
* parameters will never exceed the adapters limits.
*/
return rc;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_readScsiDevicePageHeaders - save version and length of SDP1
* @ioc: Pointer to a Adapter Strucutre
* @portnum: IOC port number
*
* Return: -EFAULT if read of config page header fails
* or 0 if success.
*/
static int
mpt_readScsiDevicePageHeaders(MPT_ADAPTER *ioc, int portnum)
{
CONFIGPARMS cfg;
ConfigPageHeader_t header;
/* Read the SCSI Device Page 1 header
*/
header.PageVersion = 0;
header.PageLength = 0;
header.PageNumber = 1;
header.PageType = MPI_CONFIG_PAGETYPE_SCSI_DEVICE;
cfg.cfghdr.hdr = &header;
cfg.physAddr = -1;
cfg.pageAddr = portnum;
cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
cfg.dir = 0;
cfg.timeout = 0;
if (mpt_config(ioc, &cfg) != 0)
return -EFAULT;
ioc->spi_data.sdp1version = cfg.cfghdr.hdr->PageVersion;
ioc->spi_data.sdp1length = cfg.cfghdr.hdr->PageLength;
header.PageVersion = 0;
header.PageLength = 0;
header.PageNumber = 0;
header.PageType = MPI_CONFIG_PAGETYPE_SCSI_DEVICE;
if (mpt_config(ioc, &cfg) != 0)
return -EFAULT;
ioc->spi_data.sdp0version = cfg.cfghdr.hdr->PageVersion;
ioc->spi_data.sdp0length = cfg.cfghdr.hdr->PageLength;
dcprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Headers: 0: version %d length %d\n",
ioc->name, ioc->spi_data.sdp0version, ioc->spi_data.sdp0length));
dcprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Headers: 1: version %d length %d\n",
ioc->name, ioc->spi_data.sdp1version, ioc->spi_data.sdp1length));
return 0;
}
/**
* mpt_inactive_raid_list_free - This clears this link list.
* @ioc : pointer to per adapter structure
**/
static void
mpt_inactive_raid_list_free(MPT_ADAPTER *ioc)
{
struct inactive_raid_component_info *component_info, *pNext;
if (list_empty(&ioc->raid_data.inactive_list))
return;
mutex_lock(&ioc->raid_data.inactive_list_mutex);
list_for_each_entry_safe(component_info, pNext,
&ioc->raid_data.inactive_list, list) {
list_del(&component_info->list);
kfree(component_info);
}
mutex_unlock(&ioc->raid_data.inactive_list_mutex);
}
/**
* mpt_inactive_raid_volumes - sets up link list of phy_disk_nums for devices belonging in an inactive volume
*
* @ioc : pointer to per adapter structure
* @channel : volume channel
* @id : volume target id
**/
static void
mpt_inactive_raid_volumes(MPT_ADAPTER *ioc, u8 channel, u8 id)
{
CONFIGPARMS cfg;
ConfigPageHeader_t hdr;
dma_addr_t dma_handle;
pRaidVolumePage0_t buffer = NULL;
int i;
RaidPhysDiskPage0_t phys_disk;
struct inactive_raid_component_info *component_info;
int handle_inactive_volumes;
memset(&cfg, 0 , sizeof(CONFIGPARMS));
memset(&hdr, 0 , sizeof(ConfigPageHeader_t));
hdr.PageType = MPI_CONFIG_PAGETYPE_RAID_VOLUME;
cfg.pageAddr = (channel << 8) + id;
cfg.cfghdr.hdr = &hdr;
cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
if (mpt_config(ioc, &cfg) != 0)
goto out;
if (!hdr.PageLength)
goto out;
buffer = dma_alloc_coherent(&ioc->pcidev->dev, hdr.PageLength * 4,
&dma_handle, GFP_KERNEL);
if (!buffer)
goto out;
cfg.physAddr = dma_handle;
cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
if (mpt_config(ioc, &cfg) != 0)
goto out;
if (!buffer->NumPhysDisks)
goto out;
handle_inactive_volumes =
(buffer->VolumeStatus.Flags & MPI_RAIDVOL0_STATUS_FLAG_VOLUME_INACTIVE ||
(buffer->VolumeStatus.Flags & MPI_RAIDVOL0_STATUS_FLAG_ENABLED) == 0 ||
buffer->VolumeStatus.State == MPI_RAIDVOL0_STATUS_STATE_FAILED ||
buffer->VolumeStatus.State == MPI_RAIDVOL0_STATUS_STATE_MISSING) ? 1 : 0;
if (!handle_inactive_volumes)
goto out;
mutex_lock(&ioc->raid_data.inactive_list_mutex);
for (i = 0; i < buffer->NumPhysDisks; i++) {
if(mpt_raid_phys_disk_pg0(ioc,
buffer->PhysDisk[i].PhysDiskNum, &phys_disk) != 0)
continue;
if ((component_info = kmalloc(sizeof (*component_info),
GFP_KERNEL)) == NULL)
continue;
component_info->volumeID = id;
component_info->volumeBus = channel;
component_info->d.PhysDiskNum = phys_disk.PhysDiskNum;
component_info->d.PhysDiskBus = phys_disk.PhysDiskBus;
component_info->d.PhysDiskID = phys_disk.PhysDiskID;
component_info->d.PhysDiskIOC = phys_disk.PhysDiskIOC;
list_add_tail(&component_info->list,
&ioc->raid_data.inactive_list);
}
mutex_unlock(&ioc->raid_data.inactive_list_mutex);
out:
if (buffer)
dma_free_coherent(&ioc->pcidev->dev, hdr.PageLength * 4,
buffer, dma_handle);
}
/**
* mpt_raid_phys_disk_pg0 - returns phys disk page zero
* @ioc: Pointer to a Adapter Structure
* @phys_disk_num: io unit unique phys disk num generated by the ioc
* @phys_disk: requested payload data returned
*
* Return:
* 0 on success
* -EFAULT if read of config page header fails or data pointer not NULL
* -ENOMEM if pci_alloc failed
**/
int
mpt_raid_phys_disk_pg0(MPT_ADAPTER *ioc, u8 phys_disk_num,
RaidPhysDiskPage0_t *phys_disk)
{
CONFIGPARMS cfg;
ConfigPageHeader_t hdr;
dma_addr_t dma_handle;
pRaidPhysDiskPage0_t buffer = NULL;
int rc;
memset(&cfg, 0 , sizeof(CONFIGPARMS));
memset(&hdr, 0 , sizeof(ConfigPageHeader_t));
memset(phys_disk, 0, sizeof(RaidPhysDiskPage0_t));
hdr.PageVersion = MPI_RAIDPHYSDISKPAGE0_PAGEVERSION;
hdr.PageType = MPI_CONFIG_PAGETYPE_RAID_PHYSDISK;
cfg.cfghdr.hdr = &hdr;
cfg.physAddr = -1;
cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
if (mpt_config(ioc, &cfg) != 0) {
rc = -EFAULT;
goto out;
}
if (!hdr.PageLength) {
rc = -EFAULT;
goto out;
}
buffer = dma_alloc_coherent(&ioc->pcidev->dev, hdr.PageLength * 4,
&dma_handle, GFP_KERNEL);
if (!buffer) {
rc = -ENOMEM;
goto out;
}
cfg.physAddr = dma_handle;
cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
cfg.pageAddr = phys_disk_num;
if (mpt_config(ioc, &cfg) != 0) {
rc = -EFAULT;
goto out;
}
rc = 0;
memcpy(phys_disk, buffer, sizeof(*buffer));
phys_disk->MaxLBA = le32_to_cpu(buffer->MaxLBA);
out:
if (buffer)
dma_free_coherent(&ioc->pcidev->dev, hdr.PageLength * 4,
buffer, dma_handle);
return rc;
}
/**
* mpt_raid_phys_disk_get_num_paths - returns number paths associated to this phys_num
* @ioc: Pointer to a Adapter Structure
* @phys_disk_num: io unit unique phys disk num generated by the ioc
*
* Return:
* returns number paths
**/
int
mpt_raid_phys_disk_get_num_paths(MPT_ADAPTER *ioc, u8 phys_disk_num)
{
CONFIGPARMS cfg;
ConfigPageHeader_t hdr;
dma_addr_t dma_handle;
pRaidPhysDiskPage1_t buffer = NULL;
int rc;
memset(&cfg, 0 , sizeof(CONFIGPARMS));
memset(&hdr, 0 , sizeof(ConfigPageHeader_t));
hdr.PageVersion = MPI_RAIDPHYSDISKPAGE1_PAGEVERSION;
hdr.PageType = MPI_CONFIG_PAGETYPE_RAID_PHYSDISK;
hdr.PageNumber = 1;
cfg.cfghdr.hdr = &hdr;
cfg.physAddr = -1;
cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
if (mpt_config(ioc, &cfg) != 0) {
rc = 0;
goto out;
}
if (!hdr.PageLength) {
rc = 0;
goto out;
}
buffer = dma_alloc_coherent(&ioc->pcidev->dev, hdr.PageLength * 4,
&dma_handle, GFP_KERNEL);
if (!buffer) {
rc = 0;
goto out;
}
cfg.physAddr = dma_handle;
cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
cfg.pageAddr = phys_disk_num;
if (mpt_config(ioc, &cfg) != 0) {
rc = 0;
goto out;
}
rc = buffer->NumPhysDiskPaths;
out:
if (buffer)
dma_free_coherent(&ioc->pcidev->dev, hdr.PageLength * 4,
buffer, dma_handle);
return rc;
}
EXPORT_SYMBOL(mpt_raid_phys_disk_get_num_paths);
/**
* mpt_raid_phys_disk_pg1 - returns phys disk page 1
* @ioc: Pointer to a Adapter Structure
* @phys_disk_num: io unit unique phys disk num generated by the ioc
* @phys_disk: requested payload data returned
*
* Return:
* 0 on success
* -EFAULT if read of config page header fails or data pointer not NULL
* -ENOMEM if pci_alloc failed
**/
int
mpt_raid_phys_disk_pg1(MPT_ADAPTER *ioc, u8 phys_disk_num,
RaidPhysDiskPage1_t *phys_disk)
{
CONFIGPARMS cfg;
ConfigPageHeader_t hdr;
dma_addr_t dma_handle;
pRaidPhysDiskPage1_t buffer = NULL;
int rc;
int i;
__le64 sas_address;
memset(&cfg, 0 , sizeof(CONFIGPARMS));
memset(&hdr, 0 , sizeof(ConfigPageHeader_t));
rc = 0;
hdr.PageVersion = MPI_RAIDPHYSDISKPAGE1_PAGEVERSION;
hdr.PageType = MPI_CONFIG_PAGETYPE_RAID_PHYSDISK;
hdr.PageNumber = 1;
cfg.cfghdr.hdr = &hdr;
cfg.physAddr = -1;
cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
if (mpt_config(ioc, &cfg) != 0) {
rc = -EFAULT;
goto out;
}
if (!hdr.PageLength) {
rc = -EFAULT;
goto out;
}
buffer = dma_alloc_coherent(&ioc->pcidev->dev, hdr.PageLength * 4,
&dma_handle, GFP_KERNEL);
if (!buffer) {
rc = -ENOMEM;
goto out;
}
cfg.physAddr = dma_handle;
cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
cfg.pageAddr = phys_disk_num;
if (mpt_config(ioc, &cfg) != 0) {
rc = -EFAULT;
goto out;
}
phys_disk->NumPhysDiskPaths = buffer->NumPhysDiskPaths;
phys_disk->PhysDiskNum = phys_disk_num;
for (i = 0; i < phys_disk->NumPhysDiskPaths; i++) {
phys_disk->Path[i].PhysDiskID = buffer->Path[i].PhysDiskID;
phys_disk->Path[i].PhysDiskBus = buffer->Path[i].PhysDiskBus;
phys_disk->Path[i].OwnerIdentifier =
buffer->Path[i].OwnerIdentifier;
phys_disk->Path[i].Flags = le16_to_cpu(buffer->Path[i].Flags);
memcpy(&sas_address, &buffer->Path[i].WWID, sizeof(__le64));
sas_address = le64_to_cpu(sas_address);
memcpy(&phys_disk->Path[i].WWID, &sas_address, sizeof(__le64));
memcpy(&sas_address,
&buffer->Path[i].OwnerWWID, sizeof(__le64));
sas_address = le64_to_cpu(sas_address);
memcpy(&phys_disk->Path[i].OwnerWWID,
&sas_address, sizeof(__le64));
}
out:
if (buffer)
dma_free_coherent(&ioc->pcidev->dev, hdr.PageLength * 4,
buffer, dma_handle);
return rc;
}
EXPORT_SYMBOL(mpt_raid_phys_disk_pg1);
/**
* mpt_findImVolumes - Identify IDs of hidden disks and RAID Volumes
* @ioc: Pointer to a Adapter Strucutre
*
* Return:
* 0 on success
* -EFAULT if read of config page header fails or data pointer not NULL
* -ENOMEM if pci_alloc failed
**/
int
mpt_findImVolumes(MPT_ADAPTER *ioc)
{
IOCPage2_t *pIoc2;
u8 *mem;
dma_addr_t ioc2_dma;
CONFIGPARMS cfg;
ConfigPageHeader_t header;
int rc = 0;
int iocpage2sz;
int i;
if (!ioc->ir_firmware)
return 0;
/* Free the old page
*/
kfree(ioc->raid_data.pIocPg2);
ioc->raid_data.pIocPg2 = NULL;
mpt_inactive_raid_list_free(ioc);
/* Read IOCP2 header then the page.
*/
header.PageVersion = 0;
header.PageLength = 0;
header.PageNumber = 2;
header.PageType = MPI_CONFIG_PAGETYPE_IOC;
cfg.cfghdr.hdr = &header;
cfg.physAddr = -1;
cfg.pageAddr = 0;
cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
cfg.dir = 0;
cfg.timeout = 0;
if (mpt_config(ioc, &cfg) != 0)
return -EFAULT;
if (header.PageLength == 0)
return -EFAULT;
iocpage2sz = header.PageLength * 4;
pIoc2 = dma_alloc_coherent(&ioc->pcidev->dev, iocpage2sz, &ioc2_dma,
GFP_KERNEL);
if (!pIoc2)
return -ENOMEM;
cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
cfg.physAddr = ioc2_dma;
if (mpt_config(ioc, &cfg) != 0)
goto out;
mem = kmemdup(pIoc2, iocpage2sz, GFP_KERNEL);
if (!mem) {
rc = -ENOMEM;
goto out;
}
ioc->raid_data.pIocPg2 = (IOCPage2_t *) mem;
mpt_read_ioc_pg_3(ioc);
for (i = 0; i < pIoc2->NumActiveVolumes ; i++)
mpt_inactive_raid_volumes(ioc,
pIoc2->RaidVolume[i].VolumeBus,
pIoc2->RaidVolume[i].VolumeID);
out:
dma_free_coherent(&ioc->pcidev->dev, iocpage2sz, pIoc2, ioc2_dma);
return rc;
}
static int
mpt_read_ioc_pg_3(MPT_ADAPTER *ioc)
{
IOCPage3_t *pIoc3;
u8 *mem;
CONFIGPARMS cfg;
ConfigPageHeader_t header;
dma_addr_t ioc3_dma;
int iocpage3sz = 0;
/* Free the old page
*/
kfree(ioc->raid_data.pIocPg3);
ioc->raid_data.pIocPg3 = NULL;
/* There is at least one physical disk.
* Read and save IOC Page 3
*/
header.PageVersion = 0;
header.PageLength = 0;
header.PageNumber = 3;
header.PageType = MPI_CONFIG_PAGETYPE_IOC;
cfg.cfghdr.hdr = &header;
cfg.physAddr = -1;
cfg.pageAddr = 0;
cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
cfg.dir = 0;
cfg.timeout = 0;
if (mpt_config(ioc, &cfg) != 0)
return 0;
if (header.PageLength == 0)
return 0;
/* Read Header good, alloc memory
*/
iocpage3sz = header.PageLength * 4;
pIoc3 = dma_alloc_coherent(&ioc->pcidev->dev, iocpage3sz, &ioc3_dma,
GFP_KERNEL);
if (!pIoc3)
return 0;
/* Read the Page and save the data
* into malloc'd memory.
*/
cfg.physAddr = ioc3_dma;
cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
if (mpt_config(ioc, &cfg) == 0) {
mem = kmalloc(iocpage3sz, GFP_KERNEL);
if (mem) {
memcpy(mem, (u8 *)pIoc3, iocpage3sz);
ioc->raid_data.pIocPg3 = (IOCPage3_t *) mem;
}
}
dma_free_coherent(&ioc->pcidev->dev, iocpage3sz, pIoc3, ioc3_dma);
return 0;
}
static void
mpt_read_ioc_pg_4(MPT_ADAPTER *ioc)
{
IOCPage4_t *pIoc4;
CONFIGPARMS cfg;
ConfigPageHeader_t header;
dma_addr_t ioc4_dma;
int iocpage4sz;
/* Read and save IOC Page 4
*/
header.PageVersion = 0;
header.PageLength = 0;
header.PageNumber = 4;
header.PageType = MPI_CONFIG_PAGETYPE_IOC;
cfg.cfghdr.hdr = &header;
cfg.physAddr = -1;
cfg.pageAddr = 0;
cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
cfg.dir = 0;
cfg.timeout = 0;
if (mpt_config(ioc, &cfg) != 0)
return;
if (header.PageLength == 0)
return;
if ( (pIoc4 = ioc->spi_data.pIocPg4) == NULL ) {
iocpage4sz = (header.PageLength + 4) * 4; /* Allow 4 additional SEP's */
pIoc4 = dma_alloc_coherent(&ioc->pcidev->dev, iocpage4sz,
&ioc4_dma, GFP_KERNEL);
if (!pIoc4)
return;
ioc->alloc_total += iocpage4sz;
} else {
ioc4_dma = ioc->spi_data.IocPg4_dma;
iocpage4sz = ioc->spi_data.IocPg4Sz;
}
/* Read the Page into dma memory.
*/
cfg.physAddr = ioc4_dma;
cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
if (mpt_config(ioc, &cfg) == 0) {
ioc->spi_data.pIocPg4 = (IOCPage4_t *) pIoc4;
ioc->spi_data.IocPg4_dma = ioc4_dma;
ioc->spi_data.IocPg4Sz = iocpage4sz;
} else {
dma_free_coherent(&ioc->pcidev->dev, iocpage4sz, pIoc4,
ioc4_dma);
ioc->spi_data.pIocPg4 = NULL;
ioc->alloc_total -= iocpage4sz;
}
}
static void
mpt_read_ioc_pg_1(MPT_ADAPTER *ioc)
{
IOCPage1_t *pIoc1;
CONFIGPARMS cfg;
ConfigPageHeader_t header;
dma_addr_t ioc1_dma;
int iocpage1sz = 0;
u32 tmp;
/* Check the Coalescing Timeout in IOC Page 1
*/
header.PageVersion = 0;
header.PageLength = 0;
header.PageNumber = 1;
header.PageType = MPI_CONFIG_PAGETYPE_IOC;
cfg.cfghdr.hdr = &header;
cfg.physAddr = -1;
cfg.pageAddr = 0;
cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
cfg.dir = 0;
cfg.timeout = 0;
if (mpt_config(ioc, &cfg) != 0)
return;
if (header.PageLength == 0)
return;
/* Read Header good, alloc memory
*/
iocpage1sz = header.PageLength * 4;
pIoc1 = dma_alloc_coherent(&ioc->pcidev->dev, iocpage1sz, &ioc1_dma,
GFP_KERNEL);
if (!pIoc1)
return;
/* Read the Page and check coalescing timeout
*/
cfg.physAddr = ioc1_dma;
cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
if (mpt_config(ioc, &cfg) == 0) {
tmp = le32_to_cpu(pIoc1->Flags) & MPI_IOCPAGE1_REPLY_COALESCING;
if (tmp == MPI_IOCPAGE1_REPLY_COALESCING) {
tmp = le32_to_cpu(pIoc1->CoalescingTimeout);
dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Coalescing Enabled Timeout = %d\n",
ioc->name, tmp));
if (tmp > MPT_COALESCING_TIMEOUT) {
pIoc1->CoalescingTimeout = cpu_to_le32(MPT_COALESCING_TIMEOUT);
/* Write NVRAM and current
*/
cfg.dir = 1;
cfg.action = MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT;
if (mpt_config(ioc, &cfg) == 0) {
dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Reset Current Coalescing Timeout to = %d\n",
ioc->name, MPT_COALESCING_TIMEOUT));
cfg.action = MPI_CONFIG_ACTION_PAGE_WRITE_NVRAM;
if (mpt_config(ioc, &cfg) == 0) {
dprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"Reset NVRAM Coalescing Timeout to = %d\n",
ioc->name, MPT_COALESCING_TIMEOUT));
} else {
dprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"Reset NVRAM Coalescing Timeout Failed\n",
ioc->name));
}
} else {
dprintk(ioc, printk(MYIOC_s_WARN_FMT
"Reset of Current Coalescing Timeout Failed!\n",
ioc->name));
}
}
} else {
dprintk(ioc, printk(MYIOC_s_WARN_FMT "Coalescing Disabled\n", ioc->name));
}
}
dma_free_coherent(&ioc->pcidev->dev, iocpage1sz, pIoc1, ioc1_dma);
return;
}
static void
mpt_get_manufacturing_pg_0(MPT_ADAPTER *ioc)
{
CONFIGPARMS cfg;
ConfigPageHeader_t hdr;
dma_addr_t buf_dma;
ManufacturingPage0_t *pbuf = NULL;
memset(&cfg, 0 , sizeof(CONFIGPARMS));
memset(&hdr, 0 , sizeof(ConfigPageHeader_t));
hdr.PageType = MPI_CONFIG_PAGETYPE_MANUFACTURING;
cfg.cfghdr.hdr = &hdr;
cfg.physAddr = -1;
cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
cfg.timeout = 10;
if (mpt_config(ioc, &cfg) != 0)
goto out;
if (!cfg.cfghdr.hdr->PageLength)
goto out;
cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
pbuf = dma_alloc_coherent(&ioc->pcidev->dev, hdr.PageLength * 4,
&buf_dma, GFP_KERNEL);
if (!pbuf)
goto out;
cfg.physAddr = buf_dma;
if (mpt_config(ioc, &cfg) != 0)
goto out;
memcpy(ioc->board_name, pbuf->BoardName, sizeof(ioc->board_name));
memcpy(ioc->board_assembly, pbuf->BoardAssembly, sizeof(ioc->board_assembly));
memcpy(ioc->board_tracer, pbuf->BoardTracerNumber, sizeof(ioc->board_tracer));
out:
if (pbuf)
dma_free_coherent(&ioc->pcidev->dev, hdr.PageLength * 4, pbuf,
buf_dma);
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* SendEventNotification - Send EventNotification (on or off) request to adapter
* @ioc: Pointer to MPT_ADAPTER structure
* @EvSwitch: Event switch flags
* @sleepFlag: Specifies whether the process can sleep
*/
static int
SendEventNotification(MPT_ADAPTER *ioc, u8 EvSwitch, int sleepFlag)
{
EventNotification_t evn;
MPIDefaultReply_t reply_buf;
memset(&evn, 0, sizeof(EventNotification_t));
memset(&reply_buf, 0, sizeof(MPIDefaultReply_t));
evn.Function = MPI_FUNCTION_EVENT_NOTIFICATION;
evn.Switch = EvSwitch;
evn.MsgContext = cpu_to_le32(mpt_base_index << 16);
devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"Sending EventNotification (%d) request %p\n",
ioc->name, EvSwitch, &evn));
return mpt_handshake_req_reply_wait(ioc, sizeof(EventNotification_t),
(u32 *)&evn, sizeof(MPIDefaultReply_t), (u16 *)&reply_buf, 30,
sleepFlag);
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* SendEventAck - Send EventAck request to MPT adapter.
* @ioc: Pointer to MPT_ADAPTER structure
* @evnp: Pointer to original EventNotification request
*/
static int
SendEventAck(MPT_ADAPTER *ioc, EventNotificationReply_t *evnp)
{
EventAck_t *pAck;
if ((pAck = (EventAck_t *) mpt_get_msg_frame(mpt_base_index, ioc)) == NULL) {
dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s, no msg frames!!\n",
ioc->name, __func__));
return -1;
}
devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Sending EventAck\n", ioc->name));
pAck->Function = MPI_FUNCTION_EVENT_ACK;
pAck->ChainOffset = 0;
pAck->Reserved[0] = pAck->Reserved[1] = 0;
pAck->MsgFlags = 0;
pAck->Reserved1[0] = pAck->Reserved1[1] = pAck->Reserved1[2] = 0;
pAck->Event = evnp->Event;
pAck->EventContext = evnp->EventContext;
mpt_put_msg_frame(mpt_base_index, ioc, (MPT_FRAME_HDR *)pAck);
return 0;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_config - Generic function to issue config message
* @ioc: Pointer to an adapter structure
* @pCfg: Pointer to a configuration structure. Struct contains
* action, page address, direction, physical address
* and pointer to a configuration page header
* Page header is updated.
*
* Returns 0 for success
* -EAGAIN if no msg frames currently available
* -EFAULT for non-successful reply or no reply (timeout)
*/
int
mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *pCfg)
{
Config_t *pReq;
ConfigReply_t *pReply;
ConfigExtendedPageHeader_t *pExtHdr = NULL;
MPT_FRAME_HDR *mf;
int ii;
int flagsLength;
long timeout;
int ret;
u8 page_type = 0, extend_page;
unsigned long timeleft;
unsigned long flags;
u8 issue_hard_reset = 0;
u8 retry_count = 0;
might_sleep();
/* don't send a config page during diag reset */
spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
if (ioc->ioc_reset_in_progress) {
dfailprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"%s: busy with host reset\n", ioc->name, __func__));
spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
return -EBUSY;
}
spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
/* don't send if no chance of success */
if (!ioc->active ||
mpt_GetIocState(ioc, 1) != MPI_IOC_STATE_OPERATIONAL) {
dfailprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"%s: ioc not operational, %d, %xh\n",
ioc->name, __func__, ioc->active,
mpt_GetIocState(ioc, 0)));
return -EFAULT;
}
retry_config:
mutex_lock(&ioc->mptbase_cmds.mutex);
/* init the internal cmd struct */
memset(ioc->mptbase_cmds.reply, 0 , MPT_DEFAULT_FRAME_SIZE);
INITIALIZE_MGMT_STATUS(ioc->mptbase_cmds.status)
/* Get and Populate a free Frame
*/
if ((mf = mpt_get_msg_frame(mpt_base_index, ioc)) == NULL) {
dcprintk(ioc, printk(MYIOC_s_WARN_FMT
"mpt_config: no msg frames!\n", ioc->name));
ret = -EAGAIN;
goto out;
}
pReq = (Config_t *)mf;
pReq->Action = pCfg->action;
pReq->Reserved = 0;
pReq->ChainOffset = 0;
pReq->Function = MPI_FUNCTION_CONFIG;
/* Assume page type is not extended and clear "reserved" fields. */
pReq->ExtPageLength = 0;
pReq->ExtPageType = 0;
pReq->MsgFlags = 0;
for (ii=0; ii < 8; ii++)
pReq->Reserved2[ii] = 0;
pReq->Header.PageVersion = pCfg->cfghdr.hdr->PageVersion;
pReq->Header.PageLength = pCfg->cfghdr.hdr->PageLength;
pReq->Header.PageNumber = pCfg->cfghdr.hdr->PageNumber;
pReq->Header.PageType = (pCfg->cfghdr.hdr->PageType & MPI_CONFIG_PAGETYPE_MASK);
if ((pCfg->cfghdr.hdr->PageType & MPI_CONFIG_PAGETYPE_MASK) == MPI_CONFIG_PAGETYPE_EXTENDED) {
pExtHdr = (ConfigExtendedPageHeader_t *)pCfg->cfghdr.ehdr;
pReq->ExtPageLength = cpu_to_le16(pExtHdr->ExtPageLength);
pReq->ExtPageType = pExtHdr->ExtPageType;
pReq->Header.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
/* Page Length must be treated as a reserved field for the
* extended header.
*/
pReq->Header.PageLength = 0;
}
pReq->PageAddress = cpu_to_le32(pCfg->pageAddr);
/* Add a SGE to the config request.
*/
if (pCfg->dir)
flagsLength = MPT_SGE_FLAGS_SSIMPLE_WRITE;
else
flagsLength = MPT_SGE_FLAGS_SSIMPLE_READ;
if ((pCfg->cfghdr.hdr->PageType & MPI_CONFIG_PAGETYPE_MASK) ==
MPI_CONFIG_PAGETYPE_EXTENDED) {
flagsLength |= pExtHdr->ExtPageLength * 4;
page_type = pReq->ExtPageType;
extend_page = 1;
} else {
flagsLength |= pCfg->cfghdr.hdr->PageLength * 4;
page_type = pReq->Header.PageType;
extend_page = 0;
}
dcprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"Sending Config request type 0x%x, page 0x%x and action %d\n",
ioc->name, page_type, pReq->Header.PageNumber, pReq->Action));
ioc->add_sge((char *)&pReq->PageBufferSGE, flagsLength, pCfg->physAddr);
timeout = (pCfg->timeout < 15) ? HZ*15 : HZ*pCfg->timeout;
mpt_put_msg_frame(mpt_base_index, ioc, mf);
timeleft = wait_for_completion_timeout(&ioc->mptbase_cmds.done,
timeout);
if (!(ioc->mptbase_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
ret = -ETIME;
dfailprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"Failed Sending Config request type 0x%x, page 0x%x,"
" action %d, status %xh, time left %ld\n\n",
ioc->name, page_type, pReq->Header.PageNumber,
pReq->Action, ioc->mptbase_cmds.status, timeleft));
if (ioc->mptbase_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET)
goto out;
if (!timeleft) {
spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
if (ioc->ioc_reset_in_progress) {
spin_unlock_irqrestore(&ioc->taskmgmt_lock,
flags);
printk(MYIOC_s_INFO_FMT "%s: host reset in"
" progress mpt_config timed out.!!\n",
__func__, ioc->name);
mutex_unlock(&ioc->mptbase_cmds.mutex);
return -EFAULT;
}
spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
issue_hard_reset = 1;
}
goto out;
}
if (!(ioc->mptbase_cmds.status & MPT_MGMT_STATUS_RF_VALID)) {
ret = -1;
goto out;
}
pReply = (ConfigReply_t *)ioc->mptbase_cmds.reply;
ret = le16_to_cpu(pReply->IOCStatus) & MPI_IOCSTATUS_MASK;
if (ret == MPI_IOCSTATUS_SUCCESS) {
if (extend_page) {
pCfg->cfghdr.ehdr->ExtPageLength =
le16_to_cpu(pReply->ExtPageLength);
pCfg->cfghdr.ehdr->ExtPageType =
pReply->ExtPageType;
}
pCfg->cfghdr.hdr->PageVersion = pReply->Header.PageVersion;
pCfg->cfghdr.hdr->PageLength = pReply->Header.PageLength;
pCfg->cfghdr.hdr->PageNumber = pReply->Header.PageNumber;
pCfg->cfghdr.hdr->PageType = pReply->Header.PageType;
}
if (retry_count)
printk(MYIOC_s_INFO_FMT "Retry completed "
"ret=0x%x timeleft=%ld\n",
ioc->name, ret, timeleft);
dcprintk(ioc, printk(KERN_DEBUG "IOCStatus=%04xh, IOCLogInfo=%08xh\n",
ret, le32_to_cpu(pReply->IOCLogInfo)));
out:
CLEAR_MGMT_STATUS(ioc->mptbase_cmds.status)
mutex_unlock(&ioc->mptbase_cmds.mutex);
if (issue_hard_reset) {
issue_hard_reset = 0;
printk(MYIOC_s_WARN_FMT
"Issuing Reset from %s!!, doorbell=0x%08x\n",
ioc->name, __func__, mpt_GetIocState(ioc, 0));
if (retry_count == 0) {
if (mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP) != 0)
retry_count++;
} else
mpt_HardResetHandler(ioc, CAN_SLEEP);
mpt_free_msg_frame(ioc, mf);
/* attempt one retry for a timed out command */
if (retry_count < 2) {
printk(MYIOC_s_INFO_FMT
"Attempting Retry Config request"
" type 0x%x, page 0x%x,"
" action %d\n", ioc->name, page_type,
pCfg->cfghdr.hdr->PageNumber, pCfg->action);
retry_count++;
goto retry_config;
}
}
return ret;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_ioc_reset - Base cleanup for hard reset
* @ioc: Pointer to the adapter structure
* @reset_phase: Indicates pre- or post-reset functionality
*
* Remark: Frees resources with internally generated commands.
*/
static int
mpt_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
{
switch (reset_phase) {
case MPT_IOC_SETUP_RESET:
ioc->taskmgmt_quiesce_io = 1;
dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"%s: MPT_IOC_SETUP_RESET\n", ioc->name, __func__));
break;
case MPT_IOC_PRE_RESET:
dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"%s: MPT_IOC_PRE_RESET\n", ioc->name, __func__));
break;
case MPT_IOC_POST_RESET:
dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"%s: MPT_IOC_POST_RESET\n", ioc->name, __func__));
/* wake up mptbase_cmds */
if (ioc->mptbase_cmds.status & MPT_MGMT_STATUS_PENDING) {
ioc->mptbase_cmds.status |=
MPT_MGMT_STATUS_DID_IOCRESET;
complete(&ioc->mptbase_cmds.done);
}
/* wake up taskmgmt_cmds */
if (ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_PENDING) {
ioc->taskmgmt_cmds.status |=
MPT_MGMT_STATUS_DID_IOCRESET;
complete(&ioc->taskmgmt_cmds.done);
}
break;
default:
break;
}
return 1; /* currently means nothing really */
}
#ifdef CONFIG_PROC_FS /* { */
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
* procfs (%MPT_PROCFS_MPTBASEDIR/...) support stuff...
*/
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* procmpt_create - Create %MPT_PROCFS_MPTBASEDIR entries.
*
* Returns 0 for success, non-zero for failure.
*/
static int
procmpt_create(void)
{
mpt_proc_root_dir = proc_mkdir(MPT_PROCFS_MPTBASEDIR, NULL);
if (mpt_proc_root_dir == NULL)
return -ENOTDIR;
proc_create_single("summary", S_IRUGO, mpt_proc_root_dir,
mpt_summary_proc_show);
proc_create_single("version", S_IRUGO, mpt_proc_root_dir,
mpt_version_proc_show);
return 0;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* procmpt_destroy - Tear down %MPT_PROCFS_MPTBASEDIR entries.
*
* Returns 0 for success, non-zero for failure.
*/
static void
procmpt_destroy(void)
{
remove_proc_entry("version", mpt_proc_root_dir);
remove_proc_entry("summary", mpt_proc_root_dir);
remove_proc_entry(MPT_PROCFS_MPTBASEDIR, NULL);
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
* Handles read request from /proc/mpt/summary or /proc/mpt/iocN/summary.
*/
static void seq_mpt_print_ioc_summary(MPT_ADAPTER *ioc, struct seq_file *m, int showlan);
static int mpt_summary_proc_show(struct seq_file *m, void *v)
{
MPT_ADAPTER *ioc = m->private;
if (ioc) {
seq_mpt_print_ioc_summary(ioc, m, 1);
} else {
list_for_each_entry(ioc, &ioc_list, list) {
seq_mpt_print_ioc_summary(ioc, m, 1);
}
}
return 0;
}
static int mpt_version_proc_show(struct seq_file *m, void *v)
{
u8 cb_idx;
int scsi, fc, sas, lan, ctl, targ;
char *drvname;
seq_printf(m, "%s-%s\n", "mptlinux", MPT_LINUX_VERSION_COMMON);
seq_printf(m, " Fusion MPT base driver\n");
scsi = fc = sas = lan = ctl = targ = 0;
for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) {
drvname = NULL;
if (MptCallbacks[cb_idx]) {
switch (MptDriverClass[cb_idx]) {
case MPTSPI_DRIVER:
if (!scsi++) drvname = "SPI host";
break;
case MPTFC_DRIVER:
if (!fc++) drvname = "FC host";
break;
case MPTSAS_DRIVER:
if (!sas++) drvname = "SAS host";
break;
case MPTLAN_DRIVER:
if (!lan++) drvname = "LAN";
break;
case MPTSTM_DRIVER:
if (!targ++) drvname = "SCSI target";
break;
case MPTCTL_DRIVER:
if (!ctl++) drvname = "ioctl";
break;
}
if (drvname)
seq_printf(m, " Fusion MPT %s driver\n", drvname);
}
}
return 0;
}
static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
{
MPT_ADAPTER *ioc = m->private;
char expVer[32];
int sz;
int p;
mpt_get_fw_exp_ver(expVer, ioc);
seq_printf(m, "%s:", ioc->name);
if (ioc->facts.Flags & MPI_IOCFACTS_FLAGS_FW_DOWNLOAD_BOOT)
seq_printf(m, " (f/w download boot flag set)");
// if (ioc->facts.IOCExceptions & MPI_IOCFACTS_EXCEPT_CONFIG_CHECKSUM_FAIL)
// seq_printf(m, " CONFIG_CHECKSUM_FAIL!");
seq_printf(m, "\n ProductID = 0x%04x (%s)\n",
ioc->facts.ProductID,
ioc->prod_name);
seq_printf(m, " FWVersion = 0x%08x%s", ioc->facts.FWVersion.Word, expVer);
if (ioc->facts.FWImageSize)
seq_printf(m, " (fw_size=%d)", ioc->facts.FWImageSize);
seq_printf(m, "\n MsgVersion = 0x%04x\n", ioc->facts.MsgVersion);
seq_printf(m, " FirstWhoInit = 0x%02x\n", ioc->FirstWhoInit);
seq_printf(m, " EventState = 0x%02x\n", ioc->facts.EventState);
seq_printf(m, " CurrentHostMfaHighAddr = 0x%08x\n",
ioc->facts.CurrentHostMfaHighAddr);
seq_printf(m, " CurrentSenseBufferHighAddr = 0x%08x\n",
ioc->facts.CurrentSenseBufferHighAddr);
seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
(void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
/*
* Rounding UP to nearest 4-kB boundary here...
*/
sz = (ioc->req_sz * ioc->req_depth) + 128;
sz = ((sz + 0x1000UL - 1UL) / 0x1000) * 0x1000;
seq_printf(m, " {CurReqSz=%d} x {CurReqDepth=%d} = %d bytes ^= 0x%x\n",
ioc->req_sz, ioc->req_depth, ioc->req_sz*ioc->req_depth, sz);
seq_printf(m, " {MaxReqSz=%d} {MaxReqDepth=%d}\n",
4*ioc->facts.RequestFrameSize,
ioc->facts.GlobalCredits);
seq_printf(m, " Frames @ 0x%p (Dma @ 0x%p)\n",
(void *)ioc->alloc, (void *)(ulong)ioc->alloc_dma);
sz = (ioc->reply_sz * ioc->reply_depth) + 128;
seq_printf(m, " {CurRepSz=%d} x {CurRepDepth=%d} = %d bytes ^= 0x%x\n",
ioc->reply_sz, ioc->reply_depth, ioc->reply_sz*ioc->reply_depth, sz);
seq_printf(m, " {MaxRepSz=%d} {MaxRepDepth=%d}\n",
ioc->facts.CurReplyFrameSize,
ioc->facts.ReplyQueueDepth);
seq_printf(m, " MaxDevices = %d\n",
(ioc->facts.MaxDevices==0) ? 255 : ioc->facts.MaxDevices);
seq_printf(m, " MaxBuses = %d\n", ioc->facts.MaxBuses);
/* per-port info */
for (p=0; p < ioc->facts.NumberOfPorts; p++) {
seq_printf(m, " PortNumber = %d (of %d)\n",
p+1,
ioc->facts.NumberOfPorts);
if (ioc->bus_type == FC) {
if (ioc->pfacts[p].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_LAN) {
u8 *a = (u8*)&ioc->lan_cnfg_page1.HardwareAddressLow;
seq_printf(m, " LanAddr = %pMR\n", a);
}
seq_printf(m, " WWN = %08X%08X:%08X%08X\n",
ioc->fc_port_page0[p].WWNN.High,
ioc->fc_port_page0[p].WWNN.Low,
ioc->fc_port_page0[p].WWPN.High,
ioc->fc_port_page0[p].WWPN.Low);
}
}
return 0;
}
#endif /* CONFIG_PROC_FS } */
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
static void
mpt_get_fw_exp_ver(char *buf, MPT_ADAPTER *ioc)
{
buf[0] ='\0';
if ((ioc->facts.FWVersion.Word >> 24) == 0x0E) {
sprintf(buf, " (Exp %02d%02d)",
(ioc->facts.FWVersion.Word >> 16) & 0x00FF, /* Month */
(ioc->facts.FWVersion.Word >> 8) & 0x1F); /* Day */
/* insider hack! */
if ((ioc->facts.FWVersion.Word >> 8) & 0x80)
strcat(buf, " [MDBG]");
}
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_print_ioc_summary - Write ASCII summary of IOC to a buffer.
* @ioc: Pointer to MPT_ADAPTER structure
* @buffer: Pointer to buffer where IOC summary info should be written
* @size: Pointer to number of bytes we wrote (set by this routine)
* @len: Offset at which to start writing in buffer
* @showlan: Display LAN stuff?
*
* This routine writes (english readable) ASCII text, which represents
* a summary of IOC information, to a buffer.
*/
void
mpt_print_ioc_summary(MPT_ADAPTER *ioc, char *buffer, int *size, int len, int showlan)
{
char expVer[32];
int y;
mpt_get_fw_exp_ver(expVer, ioc);
/*
* Shorter summary of attached ioc's...
*/
y = sprintf(buffer+len, "%s: %s, %s%08xh%s, Ports=%d, MaxQ=%d",
ioc->name,
ioc->prod_name,
MPT_FW_REV_MAGIC_ID_STRING, /* "FwRev=" or somesuch */
ioc->facts.FWVersion.Word,
expVer,
ioc->facts.NumberOfPorts,
ioc->req_depth);
if (showlan && (ioc->pfacts[0].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_LAN)) {
u8 *a = (u8*)&ioc->lan_cnfg_page1.HardwareAddressLow;
y += sprintf(buffer+len+y, ", LanAddr=%pMR", a);
}
y += sprintf(buffer+len+y, ", IRQ=%d", ioc->pci_irq);
if (!ioc->active)
y += sprintf(buffer+len+y, " (disabled)");
y += sprintf(buffer+len+y, "\n");
*size = y;
}
#ifdef CONFIG_PROC_FS
static void seq_mpt_print_ioc_summary(MPT_ADAPTER *ioc, struct seq_file *m, int showlan)
{
char expVer[32];
mpt_get_fw_exp_ver(expVer, ioc);
/*
* Shorter summary of attached ioc's...
*/
seq_printf(m, "%s: %s, %s%08xh%s, Ports=%d, MaxQ=%d",
ioc->name,
ioc->prod_name,
MPT_FW_REV_MAGIC_ID_STRING, /* "FwRev=" or somesuch */
ioc->facts.FWVersion.Word,
expVer,
ioc->facts.NumberOfPorts,
ioc->req_depth);
if (showlan && (ioc->pfacts[0].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_LAN)) {
u8 *a = (u8*)&ioc->lan_cnfg_page1.HardwareAddressLow;
seq_printf(m, ", LanAddr=%pMR", a);
}
seq_printf(m, ", IRQ=%d", ioc->pci_irq);
if (!ioc->active)
seq_printf(m, " (disabled)");
seq_putc(m, '\n');
}
#endif
/**
* mpt_set_taskmgmt_in_progress_flag - set flags associated with task management
* @ioc: Pointer to MPT_ADAPTER structure
*
* Returns 0 for SUCCESS or -1 if FAILED.
*
* If -1 is return, then it was not possible to set the flags
**/
int
mpt_set_taskmgmt_in_progress_flag(MPT_ADAPTER *ioc)
{
unsigned long flags;
int retval;
spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
if (ioc->ioc_reset_in_progress || ioc->taskmgmt_in_progress ||
(ioc->alt_ioc && ioc->alt_ioc->taskmgmt_in_progress)) {
retval = -1;
goto out;
}
retval = 0;
ioc->taskmgmt_in_progress = 1;
ioc->taskmgmt_quiesce_io = 1;
if (ioc->alt_ioc) {
ioc->alt_ioc->taskmgmt_in_progress = 1;
ioc->alt_ioc->taskmgmt_quiesce_io = 1;
}
out:
spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
return retval;
}
EXPORT_SYMBOL(mpt_set_taskmgmt_in_progress_flag);
/**
* mpt_clear_taskmgmt_in_progress_flag - clear flags associated with task management
* @ioc: Pointer to MPT_ADAPTER structure
*
**/
void
mpt_clear_taskmgmt_in_progress_flag(MPT_ADAPTER *ioc)
{
unsigned long flags;
spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
ioc->taskmgmt_in_progress = 0;
ioc->taskmgmt_quiesce_io = 0;
if (ioc->alt_ioc) {
ioc->alt_ioc->taskmgmt_in_progress = 0;
ioc->alt_ioc->taskmgmt_quiesce_io = 0;
}
spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
}
EXPORT_SYMBOL(mpt_clear_taskmgmt_in_progress_flag);
/**
* mpt_halt_firmware - Halts the firmware if it is operational and panic
* the kernel
* @ioc: Pointer to MPT_ADAPTER structure
*
**/
void __noreturn
mpt_halt_firmware(MPT_ADAPTER *ioc)
{
u32 ioc_raw_state;
ioc_raw_state = mpt_GetIocState(ioc, 0);
if ((ioc_raw_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_FAULT) {
printk(MYIOC_s_ERR_FMT "IOC is in FAULT state (%04xh)!!!\n",
ioc->name, ioc_raw_state & MPI_DOORBELL_DATA_MASK);
panic("%s: IOC Fault (%04xh)!!!\n", ioc->name,
ioc_raw_state & MPI_DOORBELL_DATA_MASK);
} else {
CHIPREG_WRITE32(&ioc->chip->Doorbell, 0xC0FFEE00);
panic("%s: Firmware is halted due to command timeout\n",
ioc->name);
}
}
EXPORT_SYMBOL(mpt_halt_firmware);
/**
* mpt_SoftResetHandler - Issues a less expensive reset
* @ioc: Pointer to MPT_ADAPTER structure
* @sleepFlag: Indicates if sleep or schedule must be called.
*
* Returns 0 for SUCCESS or -1 if FAILED.
*
* Message Unit Reset - instructs the IOC to reset the Reply Post and
* Free FIFO's. All the Message Frames on Reply Free FIFO are discarded.
* All posted buffers are freed, and event notification is turned off.
* IOC doesn't reply to any outstanding request. This will transfer IOC
* to READY state.
**/
static int
mpt_SoftResetHandler(MPT_ADAPTER *ioc, int sleepFlag)
{
int rc;
int ii;
u8 cb_idx;
unsigned long flags;
u32 ioc_state;
unsigned long time_count;
dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "SoftResetHandler Entered!\n",
ioc->name));
ioc_state = mpt_GetIocState(ioc, 0) & MPI_IOC_STATE_MASK;
if (mpt_fwfault_debug)
mpt_halt_firmware(ioc);
if (ioc_state == MPI_IOC_STATE_FAULT ||
ioc_state == MPI_IOC_STATE_RESET) {
dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"skipping, either in FAULT or RESET state!\n", ioc->name));
return -1;
}
if (ioc->bus_type == FC) {
dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"skipping, because the bus type is FC!\n", ioc->name));
return -1;
}
spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
if (ioc->ioc_reset_in_progress) {
spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
return -1;
}
ioc->ioc_reset_in_progress = 1;
spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) {
if (MptResetHandlers[cb_idx])
mpt_signal_reset(cb_idx, ioc, MPT_IOC_SETUP_RESET);
}
spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
if (ioc->taskmgmt_in_progress) {
ioc->ioc_reset_in_progress = 0;
spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
return -1;
}
spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
/* Disable reply interrupts (also blocks FreeQ) */
CHIPREG_WRITE32(&ioc->chip->IntMask, 0xFFFFFFFF);
ioc->active = 0;
time_count = jiffies;
rc = SendIocReset(ioc, MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET, sleepFlag);
for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) {
if (MptResetHandlers[cb_idx])
mpt_signal_reset(cb_idx, ioc, MPT_IOC_PRE_RESET);
}
if (rc)
goto out;
ioc_state = mpt_GetIocState(ioc, 0) & MPI_IOC_STATE_MASK;
if (ioc_state != MPI_IOC_STATE_READY)
goto out;
for (ii = 0; ii < 5; ii++) {
/* Get IOC facts! Allow 5 retries */
rc = GetIocFacts(ioc, sleepFlag,
MPT_HOSTEVENT_IOC_RECOVER);
if (rc == 0)
break;
if (sleepFlag == CAN_SLEEP)
msleep(100);
else
mdelay(100);
}
if (ii == 5)
goto out;
rc = PrimeIocFifos(ioc);
if (rc != 0)
goto out;
rc = SendIocInit(ioc, sleepFlag);
if (rc != 0)
goto out;
rc = SendEventNotification(ioc, 1, sleepFlag);
if (rc != 0)
goto out;
if (ioc->hard_resets < -1)
ioc->hard_resets++;
/*
* At this point, we know soft reset succeeded.
*/
ioc->active = 1;
CHIPREG_WRITE32(&ioc->chip->IntMask, MPI_HIM_DIM);
out:
spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
ioc->ioc_reset_in_progress = 0;
ioc->taskmgmt_quiesce_io = 0;
ioc->taskmgmt_in_progress = 0;
spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
if (ioc->active) { /* otherwise, hard reset coming */
for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) {
if (MptResetHandlers[cb_idx])
mpt_signal_reset(cb_idx, ioc,
MPT_IOC_POST_RESET);
}
}
dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"SoftResetHandler: completed (%d seconds): %s\n",
ioc->name, jiffies_to_msecs(jiffies - time_count)/1000,
((rc == 0) ? "SUCCESS" : "FAILED")));
return rc;
}
/**
* mpt_Soft_Hard_ResetHandler - Try less expensive reset
* @ioc: Pointer to MPT_ADAPTER structure
* @sleepFlag: Indicates if sleep or schedule must be called.
*
* Returns 0 for SUCCESS or -1 if FAILED.
* Try for softreset first, only if it fails go for expensive
* HardReset.
**/
int
mpt_Soft_Hard_ResetHandler(MPT_ADAPTER *ioc, int sleepFlag) {
int ret = -1;
ret = mpt_SoftResetHandler(ioc, sleepFlag);
if (ret == 0)
return ret;
ret = mpt_HardResetHandler(ioc, sleepFlag);
return ret;
}
EXPORT_SYMBOL(mpt_Soft_Hard_ResetHandler);
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
* Reset Handling
*/
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_HardResetHandler - Generic reset handler
* @ioc: Pointer to MPT_ADAPTER structure
* @sleepFlag: Indicates if sleep or schedule must be called.
*
* Issues SCSI Task Management call based on input arg values.
* If TaskMgmt fails, returns associated SCSI request.
*
* Remark: _HardResetHandler can be invoked from an interrupt thread (timer)
* or a non-interrupt thread. In the former, must not call schedule().
*
* Note: A return of -1 is a FATAL error case, as it means a
* FW reload/initialization failed.
*
* Returns 0 for SUCCESS or -1 if FAILED.
*/
int
mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag)
{
int rc;
u8 cb_idx;
unsigned long flags;
unsigned long time_count;
dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "HardResetHandler Entered!\n", ioc->name));
#ifdef MFCNT
printk(MYIOC_s_INFO_FMT "HardResetHandler Entered!\n", ioc->name);
printk("MF count 0x%x !\n", ioc->mfcnt);
#endif
if (mpt_fwfault_debug)
mpt_halt_firmware(ioc);
/* Reset the adapter. Prevent more than 1 call to
* mpt_do_ioc_recovery at any instant in time.
*/
spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
if (ioc->ioc_reset_in_progress) {
spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
ioc->wait_on_reset_completion = 1;
do {
ssleep(1);
} while (ioc->ioc_reset_in_progress == 1);
ioc->wait_on_reset_completion = 0;
return ioc->reset_status;
}
if (ioc->wait_on_reset_completion) {
spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
rc = 0;
time_count = jiffies;
goto exit;
}
ioc->ioc_reset_in_progress = 1;
if (ioc->alt_ioc)
ioc->alt_ioc->ioc_reset_in_progress = 1;
spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
/* The SCSI driver needs to adjust timeouts on all current
* commands prior to the diagnostic reset being issued.
* Prevents timeouts occurring during a diagnostic reset...very bad.
* For all other protocol drivers, this is a no-op.
*/
for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) {
if (MptResetHandlers[cb_idx]) {
mpt_signal_reset(cb_idx, ioc, MPT_IOC_SETUP_RESET);
if (ioc->alt_ioc)
mpt_signal_reset(cb_idx, ioc->alt_ioc,
MPT_IOC_SETUP_RESET);
}
}
time_count = jiffies;
rc = mpt_do_ioc_recovery(ioc, MPT_HOSTEVENT_IOC_RECOVER, sleepFlag);
if (rc != 0) {
printk(KERN_WARNING MYNAM
": WARNING - (%d) Cannot recover %s, doorbell=0x%08x\n",
rc, ioc->name, mpt_GetIocState(ioc, 0));
} else {
if (ioc->hard_resets < -1)
ioc->hard_resets++;
}
spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
ioc->ioc_reset_in_progress = 0;
ioc->taskmgmt_quiesce_io = 0;
ioc->taskmgmt_in_progress = 0;
ioc->reset_status = rc;
if (ioc->alt_ioc) {
ioc->alt_ioc->ioc_reset_in_progress = 0;
ioc->alt_ioc->taskmgmt_quiesce_io = 0;
ioc->alt_ioc->taskmgmt_in_progress = 0;
}
spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) {
if (MptResetHandlers[cb_idx]) {
mpt_signal_reset(cb_idx, ioc, MPT_IOC_POST_RESET);
if (ioc->alt_ioc)
mpt_signal_reset(cb_idx,
ioc->alt_ioc, MPT_IOC_POST_RESET);
}
}
exit:
dtmprintk(ioc,
printk(MYIOC_s_DEBUG_FMT
"HardResetHandler: completed (%d seconds): %s\n", ioc->name,
jiffies_to_msecs(jiffies - time_count)/1000, ((rc == 0) ?
"SUCCESS" : "FAILED")));
return rc;
}
#ifdef CONFIG_FUSION_LOGGING
static void
mpt_display_event_info(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply)
{
char *ds = NULL;
u32 evData0;
int ii;
u8 event;
char *evStr = ioc->evStr;
event = le32_to_cpu(pEventReply->Event) & 0xFF;
evData0 = le32_to_cpu(pEventReply->Data[0]);
switch(event) {
case MPI_EVENT_NONE:
ds = "None";
break;
case MPI_EVENT_LOG_DATA:
ds = "Log Data";
break;
case MPI_EVENT_STATE_CHANGE:
ds = "State Change";
break;
case MPI_EVENT_UNIT_ATTENTION:
ds = "Unit Attention";
break;
case MPI_EVENT_IOC_BUS_RESET:
ds = "IOC Bus Reset";
break;
case MPI_EVENT_EXT_BUS_RESET:
ds = "External Bus Reset";
break;
case MPI_EVENT_RESCAN:
ds = "Bus Rescan Event";
break;
case MPI_EVENT_LINK_STATUS_CHANGE:
if (evData0 == MPI_EVENT_LINK_STATUS_FAILURE)
ds = "Link Status(FAILURE) Change";
else
ds = "Link Status(ACTIVE) Change";
break;
case MPI_EVENT_LOOP_STATE_CHANGE:
if (evData0 == MPI_EVENT_LOOP_STATE_CHANGE_LIP)
ds = "Loop State(LIP) Change";
else if (evData0 == MPI_EVENT_LOOP_STATE_CHANGE_LPE)
ds = "Loop State(LPE) Change";
else
ds = "Loop State(LPB) Change";
break;
case MPI_EVENT_LOGOUT:
ds = "Logout";
break;
case MPI_EVENT_EVENT_CHANGE:
if (evData0)
ds = "Events ON";
else
ds = "Events OFF";
break;
case MPI_EVENT_INTEGRATED_RAID:
{
u8 ReasonCode = (u8)(evData0 >> 16);
switch (ReasonCode) {
case MPI_EVENT_RAID_RC_VOLUME_CREATED :
ds = "Integrated Raid: Volume Created";
break;
case MPI_EVENT_RAID_RC_VOLUME_DELETED :
ds = "Integrated Raid: Volume Deleted";
break;
case MPI_EVENT_RAID_RC_VOLUME_SETTINGS_CHANGED :
ds = "Integrated Raid: Volume Settings Changed";
break;
case MPI_EVENT_RAID_RC_VOLUME_STATUS_CHANGED :
ds = "Integrated Raid: Volume Status Changed";
break;
case MPI_EVENT_RAID_RC_VOLUME_PHYSDISK_CHANGED :
ds = "Integrated Raid: Volume Physdisk Changed";
break;
case MPI_EVENT_RAID_RC_PHYSDISK_CREATED :
ds = "Integrated Raid: Physdisk Created";
break;
case MPI_EVENT_RAID_RC_PHYSDISK_DELETED :
ds = "Integrated Raid: Physdisk Deleted";
break;
case MPI_EVENT_RAID_RC_PHYSDISK_SETTINGS_CHANGED :
ds = "Integrated Raid: Physdisk Settings Changed";
break;
case MPI_EVENT_RAID_RC_PHYSDISK_STATUS_CHANGED :
ds = "Integrated Raid: Physdisk Status Changed";
break;
case MPI_EVENT_RAID_RC_DOMAIN_VAL_NEEDED :
ds = "Integrated Raid: Domain Validation Needed";
break;
case MPI_EVENT_RAID_RC_SMART_DATA :
ds = "Integrated Raid; Smart Data";
break;
case MPI_EVENT_RAID_RC_REPLACE_ACTION_STARTED :
ds = "Integrated Raid: Replace Action Started";
break;
default:
ds = "Integrated Raid";
break;
}
break;
}
case MPI_EVENT_SCSI_DEVICE_STATUS_CHANGE:
ds = "SCSI Device Status Change";
break;
case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
{
u8 id = (u8)(evData0);
u8 channel = (u8)(evData0 >> 8);
u8 ReasonCode = (u8)(evData0 >> 16);
switch (ReasonCode) {
case MPI_EVENT_SAS_DEV_STAT_RC_ADDED:
snprintf(evStr, EVENT_DESCR_STR_SZ,
"SAS Device Status Change: Added: "
"id=%d channel=%d", id, channel);
break;
case MPI_EVENT_SAS_DEV_STAT_RC_NOT_RESPONDING:
snprintf(evStr, EVENT_DESCR_STR_SZ,
"SAS Device Status Change: Deleted: "
"id=%d channel=%d", id, channel);
break;
case MPI_EVENT_SAS_DEV_STAT_RC_SMART_DATA:
snprintf(evStr, EVENT_DESCR_STR_SZ,
"SAS Device Status Change: SMART Data: "
"id=%d channel=%d", id, channel);
break;
case MPI_EVENT_SAS_DEV_STAT_RC_NO_PERSIST_ADDED:
snprintf(evStr, EVENT_DESCR_STR_SZ,
"SAS Device Status Change: No Persistency: "
"id=%d channel=%d", id, channel);
break;
case MPI_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED:
snprintf(evStr, EVENT_DESCR_STR_SZ,
"SAS Device Status Change: Unsupported Device "
"Discovered : id=%d channel=%d", id, channel);
break;
case MPI_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET:
snprintf(evStr, EVENT_DESCR_STR_SZ,
"SAS Device Status Change: Internal Device "
"Reset : id=%d channel=%d", id, channel);
break;
case MPI_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL:
snprintf(evStr, EVENT_DESCR_STR_SZ,
"SAS Device Status Change: Internal Task "
"Abort : id=%d channel=%d", id, channel);
break;
case MPI_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
snprintf(evStr, EVENT_DESCR_STR_SZ,
"SAS Device Status Change: Internal Abort "
"Task Set : id=%d channel=%d", id, channel);
break;
case MPI_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
snprintf(evStr, EVENT_DESCR_STR_SZ,
"SAS Device Status Change: Internal Clear "
"Task Set : id=%d channel=%d", id, channel);
break;
case MPI_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL:
snprintf(evStr, EVENT_DESCR_STR_SZ,
"SAS Device Status Change: Internal Query "
"Task : id=%d channel=%d", id, channel);
break;
default:
snprintf(evStr, EVENT_DESCR_STR_SZ,
"SAS Device Status Change: Unknown: "
"id=%d channel=%d", id, channel);
break;
}
break;
}
case MPI_EVENT_ON_BUS_TIMER_EXPIRED:
ds = "Bus Timer Expired";
break;
case MPI_EVENT_QUEUE_FULL:
{
u16 curr_depth = (u16)(evData0 >> 16);
u8 channel = (u8)(evData0 >> 8);
u8 id = (u8)(evData0);
snprintf(evStr, EVENT_DESCR_STR_SZ,
"Queue Full: channel=%d id=%d depth=%d",
channel, id, curr_depth);
break;
}
case MPI_EVENT_SAS_SES:
ds = "SAS SES Event";
break;
case MPI_EVENT_PERSISTENT_TABLE_FULL:
ds = "Persistent Table Full";
break;
case MPI_EVENT_SAS_PHY_LINK_STATUS:
{
u8 LinkRates = (u8)(evData0 >> 8);
u8 PhyNumber = (u8)(evData0);
LinkRates = (LinkRates & MPI_EVENT_SAS_PLS_LR_CURRENT_MASK) >>
MPI_EVENT_SAS_PLS_LR_CURRENT_SHIFT;
switch (LinkRates) {
case MPI_EVENT_SAS_PLS_LR_RATE_UNKNOWN:
snprintf(evStr, EVENT_DESCR_STR_SZ,
"SAS PHY Link Status: Phy=%d:"
" Rate Unknown",PhyNumber);
break;
case MPI_EVENT_SAS_PLS_LR_RATE_PHY_DISABLED:
snprintf(evStr, EVENT_DESCR_STR_SZ,
"SAS PHY Link Status: Phy=%d:"
" Phy Disabled",PhyNumber);
break;
case MPI_EVENT_SAS_PLS_LR_RATE_FAILED_SPEED_NEGOTIATION:
snprintf(evStr, EVENT_DESCR_STR_SZ,
"SAS PHY Link Status: Phy=%d:"
" Failed Speed Nego",PhyNumber);
break;
case MPI_EVENT_SAS_PLS_LR_RATE_SATA_OOB_COMPLETE:
snprintf(evStr, EVENT_DESCR_STR_SZ,
"SAS PHY Link Status: Phy=%d:"
" Sata OOB Completed",PhyNumber);
break;
case MPI_EVENT_SAS_PLS_LR_RATE_1_5:
snprintf(evStr, EVENT_DESCR_STR_SZ,
"SAS PHY Link Status: Phy=%d:"
" Rate 1.5 Gbps",PhyNumber);
break;
case MPI_EVENT_SAS_PLS_LR_RATE_3_0:
snprintf(evStr, EVENT_DESCR_STR_SZ,
"SAS PHY Link Status: Phy=%d:"
" Rate 3.0 Gbps", PhyNumber);
break;
case MPI_EVENT_SAS_PLS_LR_RATE_6_0:
snprintf(evStr, EVENT_DESCR_STR_SZ,
"SAS PHY Link Status: Phy=%d:"
" Rate 6.0 Gbps", PhyNumber);
break;
default:
snprintf(evStr, EVENT_DESCR_STR_SZ,
"SAS PHY Link Status: Phy=%d", PhyNumber);
break;
}
break;
}
case MPI_EVENT_SAS_DISCOVERY_ERROR:
ds = "SAS Discovery Error";
break;
case MPI_EVENT_IR_RESYNC_UPDATE:
{
u8 resync_complete = (u8)(evData0 >> 16);
snprintf(evStr, EVENT_DESCR_STR_SZ,
"IR Resync Update: Complete = %d:",resync_complete);
break;
}
case MPI_EVENT_IR2:
{
u8 id = (u8)(evData0);
u8 channel = (u8)(evData0 >> 8);
u8 phys_num = (u8)(evData0 >> 24);
u8 ReasonCode = (u8)(evData0 >> 16);
switch (ReasonCode) {
case MPI_EVENT_IR2_RC_LD_STATE_CHANGED:
snprintf(evStr, EVENT_DESCR_STR_SZ,
"IR2: LD State Changed: "
"id=%d channel=%d phys_num=%d",
id, channel, phys_num);
break;
case MPI_EVENT_IR2_RC_PD_STATE_CHANGED:
snprintf(evStr, EVENT_DESCR_STR_SZ,
"IR2: PD State Changed "
"id=%d channel=%d phys_num=%d",
id, channel, phys_num);
break;
case MPI_EVENT_IR2_RC_BAD_BLOCK_TABLE_FULL:
snprintf(evStr, EVENT_DESCR_STR_SZ,
"IR2: Bad Block Table Full: "
"id=%d channel=%d phys_num=%d",
id, channel, phys_num);
break;
case MPI_EVENT_IR2_RC_PD_INSERTED:
snprintf(evStr, EVENT_DESCR_STR_SZ,
"IR2: PD Inserted: "
"id=%d channel=%d phys_num=%d",
id, channel, phys_num);
break;
case MPI_EVENT_IR2_RC_PD_REMOVED:
snprintf(evStr, EVENT_DESCR_STR_SZ,
"IR2: PD Removed: "
"id=%d channel=%d phys_num=%d",
id, channel, phys_num);
break;
case MPI_EVENT_IR2_RC_FOREIGN_CFG_DETECTED:
snprintf(evStr, EVENT_DESCR_STR_SZ,
"IR2: Foreign CFG Detected: "
"id=%d channel=%d phys_num=%d",
id, channel, phys_num);
break;
case MPI_EVENT_IR2_RC_REBUILD_MEDIUM_ERROR:
snprintf(evStr, EVENT_DESCR_STR_SZ,
"IR2: Rebuild Medium Error: "
"id=%d channel=%d phys_num=%d",
id, channel, phys_num);
break;
case MPI_EVENT_IR2_RC_DUAL_PORT_ADDED:
snprintf(evStr, EVENT_DESCR_STR_SZ,
"IR2: Dual Port Added: "
"id=%d channel=%d phys_num=%d",
id, channel, phys_num);
break;
case MPI_EVENT_IR2_RC_DUAL_PORT_REMOVED:
snprintf(evStr, EVENT_DESCR_STR_SZ,
"IR2: Dual Port Removed: "
"id=%d channel=%d phys_num=%d",
id, channel, phys_num);
break;
default:
ds = "IR2";
break;
}
break;
}
case MPI_EVENT_SAS_DISCOVERY:
{
if (evData0)
ds = "SAS Discovery: Start";
else
ds = "SAS Discovery: Stop";
break;
}
case MPI_EVENT_LOG_ENTRY_ADDED:
ds = "SAS Log Entry Added";
break;
case MPI_EVENT_SAS_BROADCAST_PRIMITIVE:
{
u8 phy_num = (u8)(evData0);
u8 port_num = (u8)(evData0 >> 8);
u8 port_width = (u8)(evData0 >> 16);
u8 primitive = (u8)(evData0 >> 24);
snprintf(evStr, EVENT_DESCR_STR_SZ,
"SAS Broadcast Primitive: phy=%d port=%d "
"width=%d primitive=0x%02x",
phy_num, port_num, port_width, primitive);
break;
}
case MPI_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE:
{
u8 reason = (u8)(evData0);
switch (reason) {
case MPI_EVENT_SAS_INIT_RC_ADDED:
ds = "SAS Initiator Status Change: Added";
break;
case MPI_EVENT_SAS_INIT_RC_REMOVED:
ds = "SAS Initiator Status Change: Deleted";
break;
default:
ds = "SAS Initiator Status Change";
break;
}
break;
}
case MPI_EVENT_SAS_INIT_TABLE_OVERFLOW:
{
u8 max_init = (u8)(evData0);
u8 current_init = (u8)(evData0 >> 8);
snprintf(evStr, EVENT_DESCR_STR_SZ,
"SAS Initiator Device Table Overflow: max initiators=%02d "
"current initiators=%02d",
max_init, current_init);
break;
}
case MPI_EVENT_SAS_SMP_ERROR:
{
u8 status = (u8)(evData0);
u8 port_num = (u8)(evData0 >> 8);
u8 result = (u8)(evData0 >> 16);
if (status == MPI_EVENT_SAS_SMP_FUNCTION_RESULT_VALID)
snprintf(evStr, EVENT_DESCR_STR_SZ,
"SAS SMP Error: port=%d result=0x%02x",
port_num, result);
else if (status == MPI_EVENT_SAS_SMP_CRC_ERROR)
snprintf(evStr, EVENT_DESCR_STR_SZ,
"SAS SMP Error: port=%d : CRC Error",
port_num);
else if (status == MPI_EVENT_SAS_SMP_TIMEOUT)
snprintf(evStr, EVENT_DESCR_STR_SZ,
"SAS SMP Error: port=%d : Timeout",
port_num);
else if (status == MPI_EVENT_SAS_SMP_NO_DESTINATION)
snprintf(evStr, EVENT_DESCR_STR_SZ,
"SAS SMP Error: port=%d : No Destination",
port_num);
else if (status == MPI_EVENT_SAS_SMP_BAD_DESTINATION)
snprintf(evStr, EVENT_DESCR_STR_SZ,
"SAS SMP Error: port=%d : Bad Destination",
port_num);
else
snprintf(evStr, EVENT_DESCR_STR_SZ,
"SAS SMP Error: port=%d : status=0x%02x",
port_num, status);
break;
}
case MPI_EVENT_SAS_EXPANDER_STATUS_CHANGE:
{
u8 reason = (u8)(evData0);
switch (reason) {
case MPI_EVENT_SAS_EXP_RC_ADDED:
ds = "Expander Status Change: Added";
break;
case MPI_EVENT_SAS_EXP_RC_NOT_RESPONDING:
ds = "Expander Status Change: Deleted";
break;
default:
ds = "Expander Status Change";
break;
}
break;
}
/*
* MPT base "custom" events may be added here...
*/
default:
ds = "Unknown";
break;
}
if (ds)
strscpy(evStr, ds, EVENT_DESCR_STR_SZ);
devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"MPT event:(%02Xh) : %s\n",
ioc->name, event, evStr));
devtverboseprintk(ioc, printk(KERN_DEBUG MYNAM
": Event data:\n"));
for (ii = 0; ii < le16_to_cpu(pEventReply->EventDataLength); ii++)
devtverboseprintk(ioc, printk(" %08x",
le32_to_cpu(pEventReply->Data[ii])));
devtverboseprintk(ioc, printk(KERN_DEBUG "\n"));
}
#endif
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* ProcessEventNotification - Route EventNotificationReply to all event handlers
* @ioc: Pointer to MPT_ADAPTER structure
* @pEventReply: Pointer to EventNotification reply frame
* @evHandlers: Pointer to integer, number of event handlers
*
* Routes a received EventNotificationReply to all currently registered
* event handlers.
* Returns sum of event handlers return values.
*/
static int
ProcessEventNotification(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply, int *evHandlers)
{
u16 evDataLen;
u32 evData0 = 0;
int ii;
u8 cb_idx;
int r = 0;
int handlers = 0;
u8 event;
/*
* Do platform normalization of values
*/
event = le32_to_cpu(pEventReply->Event) & 0xFF;
evDataLen = le16_to_cpu(pEventReply->EventDataLength);
if (evDataLen) {
evData0 = le32_to_cpu(pEventReply->Data[0]);
}
#ifdef CONFIG_FUSION_LOGGING
if (evDataLen)
mpt_display_event_info(ioc, pEventReply);
#endif
/*
* Do general / base driver event processing
*/
switch(event) {
case MPI_EVENT_EVENT_CHANGE: /* 0A */
if (evDataLen) {
u8 evState = evData0 & 0xFF;
/* CHECKME! What if evState unexpectedly says OFF (0)? */
/* Update EventState field in cached IocFacts */
if (ioc->facts.Function) {
ioc->facts.EventState = evState;
}
}
break;
case MPI_EVENT_INTEGRATED_RAID:
mptbase_raid_process_event_data(ioc,
(MpiEventDataRaid_t *)pEventReply->Data);
break;
default:
break;
}
/*
* Should this event be logged? Events are written sequentially.
* When buffer is full, start again at the top.
*/
if (ioc->events && (ioc->eventTypes & ( 1 << event))) {
int idx;
idx = ioc->eventContext % MPTCTL_EVENT_LOG_SIZE;
ioc->events[idx].event = event;
ioc->events[idx].eventContext = ioc->eventContext;
for (ii = 0; ii < 2; ii++) {
if (ii < evDataLen)
ioc->events[idx].data[ii] = le32_to_cpu(pEventReply->Data[ii]);
else
ioc->events[idx].data[ii] = 0;
}
ioc->eventContext++;
}
/*
* Call each currently registered protocol event handler.
*/
for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) {
if (MptEvHandlers[cb_idx]) {
devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"Routing Event to event handler #%d\n",
ioc->name, cb_idx));
r += (*(MptEvHandlers[cb_idx]))(ioc, pEventReply);
handlers++;
}
}
/* FIXME? Examine results here? */
/*
* If needed, send (a single) EventAck.
*/
if (pEventReply->AckRequired == MPI_EVENT_NOTIFICATION_ACK_REQUIRED) {
devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"EventAck required\n",ioc->name));
if ((ii = SendEventAck(ioc, pEventReply)) != 0) {
devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT "SendEventAck returned %d\n",
ioc->name, ii));
}
}
*evHandlers = handlers;
return r;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_fc_log_info - Log information returned from Fibre Channel IOC.
* @ioc: Pointer to MPT_ADAPTER structure
* @log_info: U32 LogInfo reply word from the IOC
*
* Refer to lsi/mpi_log_fc.h.
*/
static void
mpt_fc_log_info(MPT_ADAPTER *ioc, u32 log_info)
{
char *desc = "unknown";
switch (log_info & 0xFF000000) {
case MPI_IOCLOGINFO_FC_INIT_BASE:
desc = "FCP Initiator";
break;
case MPI_IOCLOGINFO_FC_TARGET_BASE:
desc = "FCP Target";
break;
case MPI_IOCLOGINFO_FC_LAN_BASE:
desc = "LAN";
break;
case MPI_IOCLOGINFO_FC_MSG_BASE:
desc = "MPI Message Layer";
break;
case MPI_IOCLOGINFO_FC_LINK_BASE:
desc = "FC Link";
break;
case MPI_IOCLOGINFO_FC_CTX_BASE:
desc = "Context Manager";
break;
case MPI_IOCLOGINFO_FC_INVALID_FIELD_BYTE_OFFSET:
desc = "Invalid Field Offset";
break;
case MPI_IOCLOGINFO_FC_STATE_CHANGE:
desc = "State Change Info";
break;
}
printk(MYIOC_s_INFO_FMT "LogInfo(0x%08x): SubClass={%s}, Value=(0x%06x)\n",
ioc->name, log_info, desc, (log_info & 0xFFFFFF));
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_spi_log_info - Log information returned from SCSI Parallel IOC.
* @ioc: Pointer to MPT_ADAPTER structure
* @log_info: U32 LogInfo word from the IOC
*
* Refer to lsi/sp_log.h.
*/
static void
mpt_spi_log_info(MPT_ADAPTER *ioc, u32 log_info)
{
u32 info = log_info & 0x00FF0000;
char *desc = "unknown";
switch (info) {
case 0x00010000:
desc = "bug! MID not found";
break;
case 0x00020000:
desc = "Parity Error";
break;
case 0x00030000:
desc = "ASYNC Outbound Overrun";
break;
case 0x00040000:
desc = "SYNC Offset Error";
break;
case 0x00050000:
desc = "BM Change";
break;
case 0x00060000:
desc = "Msg In Overflow";
break;
case 0x00070000:
desc = "DMA Error";
break;
case 0x00080000:
desc = "Outbound DMA Overrun";
break;
case 0x00090000:
desc = "Task Management";
break;
case 0x000A0000:
desc = "Device Problem";
break;
case 0x000B0000:
desc = "Invalid Phase Change";
break;
case 0x000C0000:
desc = "Untagged Table Size";
break;
}
printk(MYIOC_s_INFO_FMT "LogInfo(0x%08x): F/W: %s\n", ioc->name, log_info, desc);
}
/* strings for sas loginfo */
static char *originator_str[] = {
"IOP", /* 00h */
"PL", /* 01h */
"IR" /* 02h */
};
static char *iop_code_str[] = {
NULL, /* 00h */
"Invalid SAS Address", /* 01h */
NULL, /* 02h */
"Invalid Page", /* 03h */
"Diag Message Error", /* 04h */
"Task Terminated", /* 05h */
"Enclosure Management", /* 06h */
"Target Mode" /* 07h */
};
static char *pl_code_str[] = {
NULL, /* 00h */
"Open Failure", /* 01h */
"Invalid Scatter Gather List", /* 02h */
"Wrong Relative Offset or Frame Length", /* 03h */
"Frame Transfer Error", /* 04h */
"Transmit Frame Connected Low", /* 05h */
"SATA Non-NCQ RW Error Bit Set", /* 06h */
"SATA Read Log Receive Data Error", /* 07h */
"SATA NCQ Fail All Commands After Error", /* 08h */
"SATA Error in Receive Set Device Bit FIS", /* 09h */
"Receive Frame Invalid Message", /* 0Ah */
"Receive Context Message Valid Error", /* 0Bh */
"Receive Frame Current Frame Error", /* 0Ch */
"SATA Link Down", /* 0Dh */
"Discovery SATA Init W IOS", /* 0Eh */
"Config Invalid Page", /* 0Fh */
"Discovery SATA Init Timeout", /* 10h */
"Reset", /* 11h */
"Abort", /* 12h */
"IO Not Yet Executed", /* 13h */
"IO Executed", /* 14h */
"Persistent Reservation Out Not Affiliation "
"Owner", /* 15h */
"Open Transmit DMA Abort", /* 16h */
"IO Device Missing Delay Retry", /* 17h */
"IO Cancelled Due to Receive Error", /* 18h */
NULL, /* 19h */
NULL, /* 1Ah */
NULL, /* 1Bh */
NULL, /* 1Ch */
NULL, /* 1Dh */
NULL, /* 1Eh */
NULL, /* 1Fh */
"Enclosure Management" /* 20h */
};
static char *ir_code_str[] = {
"Raid Action Error", /* 00h */
NULL, /* 00h */
NULL, /* 01h */
NULL, /* 02h */
NULL, /* 03h */
NULL, /* 04h */
NULL, /* 05h */
NULL, /* 06h */
NULL /* 07h */
};
static char *raid_sub_code_str[] = {
NULL, /* 00h */
"Volume Creation Failed: Data Passed too "
"Large", /* 01h */
"Volume Creation Failed: Duplicate Volumes "
"Attempted", /* 02h */
"Volume Creation Failed: Max Number "
"Supported Volumes Exceeded", /* 03h */
"Volume Creation Failed: DMA Error", /* 04h */
"Volume Creation Failed: Invalid Volume Type", /* 05h */
"Volume Creation Failed: Error Reading "
"MFG Page 4", /* 06h */
"Volume Creation Failed: Creating Internal "
"Structures", /* 07h */
NULL, /* 08h */
NULL, /* 09h */
NULL, /* 0Ah */
NULL, /* 0Bh */
NULL, /* 0Ch */
NULL, /* 0Dh */
NULL, /* 0Eh */
NULL, /* 0Fh */
"Activation failed: Already Active Volume", /* 10h */
"Activation failed: Unsupported Volume Type", /* 11h */
"Activation failed: Too Many Active Volumes", /* 12h */
"Activation failed: Volume ID in Use", /* 13h */
"Activation failed: Reported Failure", /* 14h */
"Activation failed: Importing a Volume", /* 15h */
NULL, /* 16h */
NULL, /* 17h */
NULL, /* 18h */
NULL, /* 19h */
NULL, /* 1Ah */
NULL, /* 1Bh */
NULL, /* 1Ch */
NULL, /* 1Dh */
NULL, /* 1Eh */
NULL, /* 1Fh */
"Phys Disk failed: Too Many Phys Disks", /* 20h */
"Phys Disk failed: Data Passed too Large", /* 21h */
"Phys Disk failed: DMA Error", /* 22h */
"Phys Disk failed: Invalid <channel:id>", /* 23h */
"Phys Disk failed: Creating Phys Disk Config "
"Page", /* 24h */
NULL, /* 25h */
NULL, /* 26h */
NULL, /* 27h */
NULL, /* 28h */
NULL, /* 29h */
NULL, /* 2Ah */
NULL, /* 2Bh */
NULL, /* 2Ch */
NULL, /* 2Dh */
NULL, /* 2Eh */
NULL, /* 2Fh */
"Compatibility Error: IR Disabled", /* 30h */
"Compatibility Error: Inquiry Command Failed", /* 31h */
"Compatibility Error: Device not Direct Access "
"Device ", /* 32h */
"Compatibility Error: Removable Device Found", /* 33h */
"Compatibility Error: Device SCSI Version not "
"2 or Higher", /* 34h */
"Compatibility Error: SATA Device, 48 BIT LBA "
"not Supported", /* 35h */
"Compatibility Error: Device doesn't have "
"512 Byte Block Sizes", /* 36h */
"Compatibility Error: Volume Type Check Failed", /* 37h */
"Compatibility Error: Volume Type is "
"Unsupported by FW", /* 38h */
"Compatibility Error: Disk Drive too Small for "
"use in Volume", /* 39h */
"Compatibility Error: Phys Disk for Create "
"Volume not Found", /* 3Ah */
"Compatibility Error: Too Many or too Few "
"Disks for Volume Type", /* 3Bh */
"Compatibility Error: Disk stripe Sizes "
"Must be 64KB", /* 3Ch */
"Compatibility Error: IME Size Limited to < 2TB", /* 3Dh */
};
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_sas_log_info - Log information returned from SAS IOC.
* @ioc: Pointer to MPT_ADAPTER structure
* @log_info: U32 LogInfo reply word from the IOC
* @cb_idx: callback function's handle
*
* Refer to lsi/mpi_log_sas.h.
**/
static void
mpt_sas_log_info(MPT_ADAPTER *ioc, u32 log_info, u8 cb_idx)
{
union loginfo_type {
u32 loginfo;
struct {
u32 subcode:16;
u32 code:8;
u32 originator:4;
u32 bus_type:4;
} dw;
};
union loginfo_type sas_loginfo;
char *originator_desc = NULL;
char *code_desc = NULL;
char *sub_code_desc = NULL;
sas_loginfo.loginfo = log_info;
if ((sas_loginfo.dw.bus_type != 3 /*SAS*/) &&
(sas_loginfo.dw.originator < ARRAY_SIZE(originator_str)))
return;
originator_desc = originator_str[sas_loginfo.dw.originator];
switch (sas_loginfo.dw.originator) {
case 0: /* IOP */
if (sas_loginfo.dw.code <
ARRAY_SIZE(iop_code_str))
code_desc = iop_code_str[sas_loginfo.dw.code];
break;
case 1: /* PL */
if (sas_loginfo.dw.code <
ARRAY_SIZE(pl_code_str))
code_desc = pl_code_str[sas_loginfo.dw.code];
break;
case 2: /* IR */
if (sas_loginfo.dw.code >=
ARRAY_SIZE(ir_code_str))
break;
code_desc = ir_code_str[sas_loginfo.dw.code];
if (sas_loginfo.dw.subcode >=
ARRAY_SIZE(raid_sub_code_str))
break;
if (sas_loginfo.dw.code == 0)
sub_code_desc =
raid_sub_code_str[sas_loginfo.dw.subcode];
break;
default:
return;
}
if (sub_code_desc != NULL)
printk(MYIOC_s_INFO_FMT
"LogInfo(0x%08x): Originator={%s}, Code={%s},"
" SubCode={%s} cb_idx %s\n",
ioc->name, log_info, originator_desc, code_desc,
sub_code_desc, MptCallbacksName[cb_idx]);
else if (code_desc != NULL)
printk(MYIOC_s_INFO_FMT
"LogInfo(0x%08x): Originator={%s}, Code={%s},"
" SubCode(0x%04x) cb_idx %s\n",
ioc->name, log_info, originator_desc, code_desc,
sas_loginfo.dw.subcode, MptCallbacksName[cb_idx]);
else
printk(MYIOC_s_INFO_FMT
"LogInfo(0x%08x): Originator={%s}, Code=(0x%02x),"
" SubCode(0x%04x) cb_idx %s\n",
ioc->name, log_info, originator_desc,
sas_loginfo.dw.code, sas_loginfo.dw.subcode,
MptCallbacksName[cb_idx]);
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_iocstatus_info_config - IOCSTATUS information for config pages
* @ioc: Pointer to MPT_ADAPTER structure
* @ioc_status: U32 IOCStatus word from IOC
* @mf: Pointer to MPT request frame
*
* Refer to lsi/mpi.h.
**/
static void
mpt_iocstatus_info_config(MPT_ADAPTER *ioc, u32 ioc_status, MPT_FRAME_HDR *mf)
{
Config_t *pReq = (Config_t *)mf;
char extend_desc[EVENT_DESCR_STR_SZ];
char *desc = NULL;
u32 form;
u8 page_type;
if (pReq->Header.PageType == MPI_CONFIG_PAGETYPE_EXTENDED)
page_type = pReq->ExtPageType;
else
page_type = pReq->Header.PageType;
/*
* ignore invalid page messages for GET_NEXT_HANDLE
*/
form = le32_to_cpu(pReq->PageAddress);
if (ioc_status == MPI_IOCSTATUS_CONFIG_INVALID_PAGE) {
if (page_type == MPI_CONFIG_EXTPAGETYPE_SAS_DEVICE ||
page_type == MPI_CONFIG_EXTPAGETYPE_SAS_EXPANDER ||
page_type == MPI_CONFIG_EXTPAGETYPE_ENCLOSURE) {
if ((form >> MPI_SAS_DEVICE_PGAD_FORM_SHIFT) ==
MPI_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE)
return;
}
if (page_type == MPI_CONFIG_PAGETYPE_FC_DEVICE)
if ((form & MPI_FC_DEVICE_PGAD_FORM_MASK) ==
MPI_FC_DEVICE_PGAD_FORM_NEXT_DID)
return;
}
snprintf(extend_desc, EVENT_DESCR_STR_SZ,
"type=%02Xh, page=%02Xh, action=%02Xh, form=%08Xh",
page_type, pReq->Header.PageNumber, pReq->Action, form);
switch (ioc_status) {
case MPI_IOCSTATUS_CONFIG_INVALID_ACTION: /* 0x0020 */
desc = "Config Page Invalid Action";
break;
case MPI_IOCSTATUS_CONFIG_INVALID_TYPE: /* 0x0021 */
desc = "Config Page Invalid Type";
break;
case MPI_IOCSTATUS_CONFIG_INVALID_PAGE: /* 0x0022 */
desc = "Config Page Invalid Page";
break;
case MPI_IOCSTATUS_CONFIG_INVALID_DATA: /* 0x0023 */
desc = "Config Page Invalid Data";
break;
case MPI_IOCSTATUS_CONFIG_NO_DEFAULTS: /* 0x0024 */
desc = "Config Page No Defaults";
break;
case MPI_IOCSTATUS_CONFIG_CANT_COMMIT: /* 0x0025 */
desc = "Config Page Can't Commit";
break;
}
if (!desc)
return;
dreplyprintk(ioc, printk(MYIOC_s_DEBUG_FMT "IOCStatus(0x%04X): %s: %s\n",
ioc->name, ioc_status, desc, extend_desc));
}
/**
* mpt_iocstatus_info - IOCSTATUS information returned from IOC.
* @ioc: Pointer to MPT_ADAPTER structure
* @ioc_status: U32 IOCStatus word from IOC
* @mf: Pointer to MPT request frame
*
* Refer to lsi/mpi.h.
**/
static void
mpt_iocstatus_info(MPT_ADAPTER *ioc, u32 ioc_status, MPT_FRAME_HDR *mf)
{
u32 status = ioc_status & MPI_IOCSTATUS_MASK;
char *desc = NULL;
switch (status) {
/****************************************************************************/
/* Common IOCStatus values for all replies */
/****************************************************************************/
case MPI_IOCSTATUS_INVALID_FUNCTION: /* 0x0001 */
desc = "Invalid Function";
break;
case MPI_IOCSTATUS_BUSY: /* 0x0002 */
desc = "Busy";
break;
case MPI_IOCSTATUS_INVALID_SGL: /* 0x0003 */
desc = "Invalid SGL";
break;
case MPI_IOCSTATUS_INTERNAL_ERROR: /* 0x0004 */
desc = "Internal Error";
break;
case MPI_IOCSTATUS_RESERVED: /* 0x0005 */
desc = "Reserved";
break;
case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES: /* 0x0006 */
desc = "Insufficient Resources";
break;
case MPI_IOCSTATUS_INVALID_FIELD: /* 0x0007 */
desc = "Invalid Field";
break;
case MPI_IOCSTATUS_INVALID_STATE: /* 0x0008 */
desc = "Invalid State";
break;
/****************************************************************************/
/* Config IOCStatus values */
/****************************************************************************/
case MPI_IOCSTATUS_CONFIG_INVALID_ACTION: /* 0x0020 */
case MPI_IOCSTATUS_CONFIG_INVALID_TYPE: /* 0x0021 */
case MPI_IOCSTATUS_CONFIG_INVALID_PAGE: /* 0x0022 */
case MPI_IOCSTATUS_CONFIG_INVALID_DATA: /* 0x0023 */
case MPI_IOCSTATUS_CONFIG_NO_DEFAULTS: /* 0x0024 */
case MPI_IOCSTATUS_CONFIG_CANT_COMMIT: /* 0x0025 */
mpt_iocstatus_info_config(ioc, status, mf);
break;
/****************************************************************************/
/* SCSIIO Reply (SPI, FCP, SAS) initiator values */
/* */
/* Look at mptscsih_iocstatus_info_scsiio in mptscsih.c */
/* */
/****************************************************************************/
case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR: /* 0x0040 */
case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN: /* 0x0045 */
case MPI_IOCSTATUS_SCSI_INVALID_BUS: /* 0x0041 */
case MPI_IOCSTATUS_SCSI_INVALID_TARGETID: /* 0x0042 */
case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE: /* 0x0043 */
case MPI_IOCSTATUS_SCSI_DATA_OVERRUN: /* 0x0044 */
case MPI_IOCSTATUS_SCSI_IO_DATA_ERROR: /* 0x0046 */
case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR: /* 0x0047 */
case MPI_IOCSTATUS_SCSI_TASK_TERMINATED: /* 0x0048 */
case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: /* 0x0049 */
case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED: /* 0x004A */
case MPI_IOCSTATUS_SCSI_IOC_TERMINATED: /* 0x004B */
case MPI_IOCSTATUS_SCSI_EXT_TERMINATED: /* 0x004C */
break;
/****************************************************************************/
/* SCSI Target values */
/****************************************************************************/
case MPI_IOCSTATUS_TARGET_PRIORITY_IO: /* 0x0060 */
desc = "Target: Priority IO";
break;
case MPI_IOCSTATUS_TARGET_INVALID_PORT: /* 0x0061 */
desc = "Target: Invalid Port";
break;
case MPI_IOCSTATUS_TARGET_INVALID_IO_INDEX: /* 0x0062 */
desc = "Target Invalid IO Index:";
break;
case MPI_IOCSTATUS_TARGET_ABORTED: /* 0x0063 */
desc = "Target: Aborted";
break;
case MPI_IOCSTATUS_TARGET_NO_CONN_RETRYABLE: /* 0x0064 */
desc = "Target: No Conn Retryable";
break;
case MPI_IOCSTATUS_TARGET_NO_CONNECTION: /* 0x0065 */
desc = "Target: No Connection";
break;
case MPI_IOCSTATUS_TARGET_XFER_COUNT_MISMATCH: /* 0x006A */
desc = "Target: Transfer Count Mismatch";
break;
case MPI_IOCSTATUS_TARGET_STS_DATA_NOT_SENT: /* 0x006B */
desc = "Target: STS Data not Sent";
break;
case MPI_IOCSTATUS_TARGET_DATA_OFFSET_ERROR: /* 0x006D */
desc = "Target: Data Offset Error";
break;
case MPI_IOCSTATUS_TARGET_TOO_MUCH_WRITE_DATA: /* 0x006E */
desc = "Target: Too Much Write Data";
break;
case MPI_IOCSTATUS_TARGET_IU_TOO_SHORT: /* 0x006F */
desc = "Target: IU Too Short";
break;
case MPI_IOCSTATUS_TARGET_ACK_NAK_TIMEOUT: /* 0x0070 */
desc = "Target: ACK NAK Timeout";
break;
case MPI_IOCSTATUS_TARGET_NAK_RECEIVED: /* 0x0071 */
desc = "Target: Nak Received";
break;
/****************************************************************************/
/* Fibre Channel Direct Access values */
/****************************************************************************/
case MPI_IOCSTATUS_FC_ABORTED: /* 0x0066 */
desc = "FC: Aborted";
break;
case MPI_IOCSTATUS_FC_RX_ID_INVALID: /* 0x0067 */
desc = "FC: RX ID Invalid";
break;
case MPI_IOCSTATUS_FC_DID_INVALID: /* 0x0068 */
desc = "FC: DID Invalid";
break;
case MPI_IOCSTATUS_FC_NODE_LOGGED_OUT: /* 0x0069 */
desc = "FC: Node Logged Out";
break;
case MPI_IOCSTATUS_FC_EXCHANGE_CANCELED: /* 0x006C */
desc = "FC: Exchange Canceled";
break;
/****************************************************************************/
/* LAN values */
/****************************************************************************/
case MPI_IOCSTATUS_LAN_DEVICE_NOT_FOUND: /* 0x0080 */
desc = "LAN: Device not Found";
break;
case MPI_IOCSTATUS_LAN_DEVICE_FAILURE: /* 0x0081 */
desc = "LAN: Device Failure";
break;
case MPI_IOCSTATUS_LAN_TRANSMIT_ERROR: /* 0x0082 */
desc = "LAN: Transmit Error";
break;
case MPI_IOCSTATUS_LAN_TRANSMIT_ABORTED: /* 0x0083 */
desc = "LAN: Transmit Aborted";
break;
case MPI_IOCSTATUS_LAN_RECEIVE_ERROR: /* 0x0084 */
desc = "LAN: Receive Error";
break;
case MPI_IOCSTATUS_LAN_RECEIVE_ABORTED: /* 0x0085 */
desc = "LAN: Receive Aborted";
break;
case MPI_IOCSTATUS_LAN_PARTIAL_PACKET: /* 0x0086 */
desc = "LAN: Partial Packet";
break;
case MPI_IOCSTATUS_LAN_CANCELED: /* 0x0087 */
desc = "LAN: Canceled";
break;
/****************************************************************************/
/* Serial Attached SCSI values */
/****************************************************************************/
case MPI_IOCSTATUS_SAS_SMP_REQUEST_FAILED: /* 0x0090 */
desc = "SAS: SMP Request Failed";
break;
case MPI_IOCSTATUS_SAS_SMP_DATA_OVERRUN: /* 0x0090 */
desc = "SAS: SMP Data Overrun";
break;
default:
desc = "Others";
break;
}
if (!desc)
return;
dreplyprintk(ioc, printk(MYIOC_s_DEBUG_FMT "IOCStatus(0x%04X): %s\n",
ioc->name, status, desc));
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
EXPORT_SYMBOL(mpt_attach);
EXPORT_SYMBOL(mpt_detach);
#ifdef CONFIG_PM
EXPORT_SYMBOL(mpt_resume);
EXPORT_SYMBOL(mpt_suspend);
#endif
EXPORT_SYMBOL(ioc_list);
EXPORT_SYMBOL(mpt_register);
EXPORT_SYMBOL(mpt_deregister);
EXPORT_SYMBOL(mpt_event_register);
EXPORT_SYMBOL(mpt_event_deregister);
EXPORT_SYMBOL(mpt_reset_register);
EXPORT_SYMBOL(mpt_reset_deregister);
EXPORT_SYMBOL(mpt_device_driver_register);
EXPORT_SYMBOL(mpt_device_driver_deregister);
EXPORT_SYMBOL(mpt_get_msg_frame);
EXPORT_SYMBOL(mpt_put_msg_frame);
EXPORT_SYMBOL(mpt_put_msg_frame_hi_pri);
EXPORT_SYMBOL(mpt_free_msg_frame);
EXPORT_SYMBOL(mpt_send_handshake_request);
EXPORT_SYMBOL(mpt_verify_adapter);
EXPORT_SYMBOL(mpt_GetIocState);
EXPORT_SYMBOL(mpt_print_ioc_summary);
EXPORT_SYMBOL(mpt_HardResetHandler);
EXPORT_SYMBOL(mpt_config);
EXPORT_SYMBOL(mpt_findImVolumes);
EXPORT_SYMBOL(mpt_alloc_fw_memory);
EXPORT_SYMBOL(mpt_free_fw_memory);
EXPORT_SYMBOL(mptbase_sas_persist_operation);
EXPORT_SYMBOL(mpt_raid_phys_disk_pg0);
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* fusion_init - Fusion MPT base driver initialization routine.
*
* Returns 0 for success, non-zero for failure.
*/
static int __init
fusion_init(void)
{
u8 cb_idx;
show_mptmod_ver(my_NAME, my_VERSION);
printk(KERN_INFO COPYRIGHT "\n");
for (cb_idx = 0; cb_idx < MPT_MAX_PROTOCOL_DRIVERS; cb_idx++) {
MptCallbacks[cb_idx] = NULL;
MptDriverClass[cb_idx] = MPTUNKNOWN_DRIVER;
MptEvHandlers[cb_idx] = NULL;
MptResetHandlers[cb_idx] = NULL;
}
/* Register ourselves (mptbase) in order to facilitate
* EventNotification handling.
*/
mpt_base_index = mpt_register(mptbase_reply, MPTBASE_DRIVER,
"mptbase_reply");
/* Register for hard reset handling callbacks.
*/
mpt_reset_register(mpt_base_index, mpt_ioc_reset);
#ifdef CONFIG_PROC_FS
(void) procmpt_create();
#endif
return 0;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* fusion_exit - Perform driver unload cleanup.
*
* This routine frees all resources associated with each MPT adapter
* and removes all %MPT_PROCFS_MPTBASEDIR entries.
*/
static void __exit
fusion_exit(void)
{
mpt_reset_deregister(mpt_base_index);
#ifdef CONFIG_PROC_FS
procmpt_destroy();
#endif
}
module_init(fusion_init);
module_exit(fusion_exit);
| linux-master | drivers/message/fusion/mptbase.c |
/*
* linux/drivers/message/fusion/mptlan.c
* IP Over Fibre Channel device driver.
* For use with LSI Fibre Channel PCI chip/adapters
* running LSI Fusion MPT (Message Passing Technology) firmware.
*
* Copyright (c) 2000-2008 LSI Corporation
* (mailto:[email protected])
*
*/
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
NO WARRANTY
THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
solely responsible for determining the appropriateness of using and
distributing the Program and assumes all risks associated with its
exercise of rights under this Agreement, including but not limited to
the risks and costs of program errors, damage to or loss of data,
programs or equipment, and unavailability or interruption of operations.
DISCLAIMER OF LIABILITY
NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
* Define statements used for debugging
*/
//#define MPT_LAN_IO_DEBUG
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
#include "mptlan.h"
#include <linux/init.h>
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/sched.h>
#include <linux/slab.h>
#define my_VERSION MPT_LINUX_VERSION_COMMON
#define MYNAM "mptlan"
MODULE_LICENSE("GPL");
MODULE_VERSION(my_VERSION);
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
* MPT LAN message sizes without variable part.
*/
#define MPT_LAN_RECEIVE_POST_REQUEST_SIZE \
(sizeof(LANReceivePostRequest_t) - sizeof(SGE_MPI_UNION))
/*
* Fusion MPT LAN private structures
*/
struct BufferControl {
struct sk_buff *skb;
dma_addr_t dma;
unsigned int len;
};
struct mpt_lan_priv {
MPT_ADAPTER *mpt_dev;
u8 pnum; /* Port number in the IOC. This is not a Unix network port! */
atomic_t buckets_out; /* number of unused buckets on IOC */
int bucketthresh; /* Send more when this many left */
int *mpt_txfidx; /* Free Tx Context list */
int mpt_txfidx_tail;
spinlock_t txfidx_lock;
int *mpt_rxfidx; /* Free Rx Context list */
int mpt_rxfidx_tail;
spinlock_t rxfidx_lock;
struct BufferControl *RcvCtl; /* Receive BufferControl structs */
struct BufferControl *SendCtl; /* Send BufferControl structs */
int max_buckets_out; /* Max buckets to send to IOC */
int tx_max_out; /* IOC's Tx queue len */
u32 total_posted;
u32 total_received;
struct delayed_work post_buckets_task;
struct net_device *dev;
unsigned long post_buckets_active;
};
struct mpt_lan_ohdr {
u16 dtype;
u8 daddr[FC_ALEN];
u16 stype;
u8 saddr[FC_ALEN];
};
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
* Forward protos...
*/
static int lan_reply (MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf,
MPT_FRAME_HDR *reply);
static int mpt_lan_open(struct net_device *dev);
static int mpt_lan_reset(struct net_device *dev);
static int mpt_lan_close(struct net_device *dev);
static void mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv);
static void mpt_lan_wake_post_buckets_task(struct net_device *dev,
int priority);
static int mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg);
static int mpt_lan_receive_post_reply(struct net_device *dev,
LANReceivePostReply_t *pRecvRep);
static int mpt_lan_send_turbo(struct net_device *dev, u32 tmsg);
static int mpt_lan_send_reply(struct net_device *dev,
LANSendReply_t *pSendRep);
static int mpt_lan_ioc_reset(MPT_ADAPTER *ioc, int reset_phase);
static int mpt_lan_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply);
static unsigned short mpt_lan_type_trans(struct sk_buff *skb,
struct net_device *dev);
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
* Fusion MPT LAN private data
*/
static u8 LanCtx = MPT_MAX_PROTOCOL_DRIVERS;
static u32 max_buckets_out = 127;
static u32 tx_max_out_p = 127 - 16;
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* lan_reply - Handle all data sent from the hardware.
* @ioc: Pointer to MPT_ADAPTER structure
* @mf: Pointer to original MPT request frame (NULL if TurboReply)
* @reply: Pointer to MPT reply frame
*
* Returns 1 indicating original alloc'd request frame ptr
* should be freed, or 0 if it shouldn't.
*/
static int
lan_reply (MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *reply)
{
struct net_device *dev = ioc->netdev;
int FreeReqFrame = 0;
dioprintk((KERN_INFO MYNAM ": %s/%s: Got reply.\n",
IOC_AND_NETDEV_NAMES_s_s(dev)));
// dioprintk((KERN_INFO MYNAM "@lan_reply: mf = %p, reply = %p\n",
// mf, reply));
if (mf == NULL) {
u32 tmsg = CAST_PTR_TO_U32(reply);
dioprintk((KERN_INFO MYNAM ": %s/%s: @lan_reply, tmsg %08x\n",
IOC_AND_NETDEV_NAMES_s_s(dev),
tmsg));
switch (GET_LAN_FORM(tmsg)) {
// NOTE! (Optimization) First case here is now caught in
// mptbase.c::mpt_interrupt() routine and callcack here
// is now skipped for this case!
#if 0
case LAN_REPLY_FORM_MESSAGE_CONTEXT:
// dioprintk((KERN_INFO MYNAM "/lan_reply: "
// "MessageContext turbo reply received\n"));
FreeReqFrame = 1;
break;
#endif
case LAN_REPLY_FORM_SEND_SINGLE:
// dioprintk((MYNAM "/lan_reply: "
// "calling mpt_lan_send_reply (turbo)\n"));
// Potential BUG here?
// FreeReqFrame = mpt_lan_send_turbo(dev, tmsg);
// If/when mpt_lan_send_turbo would return 1 here,
// calling routine (mptbase.c|mpt_interrupt)
// would Oops because mf has already been set
// to NULL. So after return from this func,
// mpt_interrupt() will attempt to put (NULL) mf ptr
// item back onto its adapter FreeQ - Oops!:-(
// It's Ok, since mpt_lan_send_turbo() *currently*
// always returns 0, but..., just in case:
(void) mpt_lan_send_turbo(dev, tmsg);
FreeReqFrame = 0;
break;
case LAN_REPLY_FORM_RECEIVE_SINGLE:
// dioprintk((KERN_INFO MYNAM "@lan_reply: "
// "rcv-Turbo = %08x\n", tmsg));
mpt_lan_receive_post_turbo(dev, tmsg);
break;
default:
printk (KERN_ERR MYNAM "/lan_reply: Got a turbo reply "
"that I don't know what to do with\n");
/* CHECKME! Hmmm... FreeReqFrame is 0 here; is that right? */
break;
}
return FreeReqFrame;
}
// msg = (u32 *) reply;
// dioprintk((KERN_INFO MYNAM "@lan_reply: msg = %08x %08x %08x %08x\n",
// le32_to_cpu(msg[0]), le32_to_cpu(msg[1]),
// le32_to_cpu(msg[2]), le32_to_cpu(msg[3])));
// dioprintk((KERN_INFO MYNAM "@lan_reply: Function = %02xh\n",
// reply->u.hdr.Function));
switch (reply->u.hdr.Function) {
case MPI_FUNCTION_LAN_SEND:
{
LANSendReply_t *pSendRep;
pSendRep = (LANSendReply_t *) reply;
FreeReqFrame = mpt_lan_send_reply(dev, pSendRep);
break;
}
case MPI_FUNCTION_LAN_RECEIVE:
{
LANReceivePostReply_t *pRecvRep;
pRecvRep = (LANReceivePostReply_t *) reply;
if (pRecvRep->NumberOfContexts) {
mpt_lan_receive_post_reply(dev, pRecvRep);
if (!(pRecvRep->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY))
FreeReqFrame = 1;
} else
dioprintk((KERN_INFO MYNAM "@lan_reply: zero context "
"ReceivePostReply received.\n"));
break;
}
case MPI_FUNCTION_LAN_RESET:
/* Just a default reply. Might want to check it to
* make sure that everything went ok.
*/
FreeReqFrame = 1;
break;
case MPI_FUNCTION_EVENT_NOTIFICATION:
case MPI_FUNCTION_EVENT_ACK:
/* _EVENT_NOTIFICATION should NOT come down this path any more.
* Should be routed to mpt_lan_event_process(), but just in case...
*/
FreeReqFrame = 1;
break;
default:
printk (KERN_ERR MYNAM "/lan_reply: Got a non-turbo "
"reply that I don't know what to do with\n");
/* CHECKME! Hmmm... FreeReqFrame is 0 here; is that right? */
FreeReqFrame = 1;
break;
}
return FreeReqFrame;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
static int
mpt_lan_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
{
struct net_device *dev = ioc->netdev;
struct mpt_lan_priv *priv;
if (dev == NULL)
return(1);
else
priv = netdev_priv(dev);
dlprintk((KERN_INFO MYNAM ": IOC %s_reset routed to LAN driver!\n",
reset_phase==MPT_IOC_SETUP_RESET ? "setup" : (
reset_phase==MPT_IOC_PRE_RESET ? "pre" : "post")));
if (priv->mpt_rxfidx == NULL)
return (1);
if (reset_phase == MPT_IOC_SETUP_RESET) {
;
} else if (reset_phase == MPT_IOC_PRE_RESET) {
int i;
unsigned long flags;
netif_stop_queue(dev);
dlprintk ((KERN_INFO "mptlan/ioc_reset: called netif_stop_queue for %s.\n", dev->name));
atomic_set(&priv->buckets_out, 0);
/* Reset Rx Free Tail index and re-populate the queue. */
spin_lock_irqsave(&priv->rxfidx_lock, flags);
priv->mpt_rxfidx_tail = -1;
for (i = 0; i < priv->max_buckets_out; i++)
priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = i;
spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
} else {
mpt_lan_post_receive_buckets(priv);
netif_wake_queue(dev);
}
return 1;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
static int
mpt_lan_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
{
dlprintk((KERN_INFO MYNAM ": MPT event routed to LAN driver!\n"));
switch (le32_to_cpu(pEvReply->Event)) {
case MPI_EVENT_NONE: /* 00 */
case MPI_EVENT_LOG_DATA: /* 01 */
case MPI_EVENT_STATE_CHANGE: /* 02 */
case MPI_EVENT_UNIT_ATTENTION: /* 03 */
case MPI_EVENT_IOC_BUS_RESET: /* 04 */
case MPI_EVENT_EXT_BUS_RESET: /* 05 */
case MPI_EVENT_RESCAN: /* 06 */
/* Ok, do we need to do anything here? As far as
I can tell, this is when a new device gets added
to the loop. */
case MPI_EVENT_LINK_STATUS_CHANGE: /* 07 */
case MPI_EVENT_LOOP_STATE_CHANGE: /* 08 */
case MPI_EVENT_LOGOUT: /* 09 */
case MPI_EVENT_EVENT_CHANGE: /* 0A */
default:
break;
}
/*
* NOTE: pEvent->AckRequired handling now done in mptbase.c;
* Do NOT do it here now!
*/
return 1;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
static int
mpt_lan_open(struct net_device *dev)
{
struct mpt_lan_priv *priv = netdev_priv(dev);
int i;
if (mpt_lan_reset(dev) != 0) {
MPT_ADAPTER *mpt_dev = priv->mpt_dev;
printk (KERN_WARNING MYNAM "/lan_open: lan_reset failed.");
if (mpt_dev->active)
printk ("The ioc is active. Perhaps it needs to be"
" reset?\n");
else
printk ("The ioc in inactive, most likely in the "
"process of being reset. Please try again in "
"a moment.\n");
}
priv->mpt_txfidx = kmalloc_array(priv->tx_max_out, sizeof(int),
GFP_KERNEL);
if (priv->mpt_txfidx == NULL)
goto out;
priv->mpt_txfidx_tail = -1;
priv->SendCtl = kcalloc(priv->tx_max_out, sizeof(struct BufferControl),
GFP_KERNEL);
if (priv->SendCtl == NULL)
goto out_mpt_txfidx;
for (i = 0; i < priv->tx_max_out; i++)
priv->mpt_txfidx[++priv->mpt_txfidx_tail] = i;
dlprintk((KERN_INFO MYNAM "@lo: Finished initializing SendCtl\n"));
priv->mpt_rxfidx = kmalloc_array(priv->max_buckets_out, sizeof(int),
GFP_KERNEL);
if (priv->mpt_rxfidx == NULL)
goto out_SendCtl;
priv->mpt_rxfidx_tail = -1;
priv->RcvCtl = kcalloc(priv->max_buckets_out,
sizeof(struct BufferControl),
GFP_KERNEL);
if (priv->RcvCtl == NULL)
goto out_mpt_rxfidx;
for (i = 0; i < priv->max_buckets_out; i++)
priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = i;
/**/ dlprintk((KERN_INFO MYNAM "/lo: txfidx contains - "));
/**/ for (i = 0; i < priv->tx_max_out; i++)
/**/ dlprintk((" %xh", priv->mpt_txfidx[i]));
/**/ dlprintk(("\n"));
dlprintk((KERN_INFO MYNAM "/lo: Finished initializing RcvCtl\n"));
mpt_lan_post_receive_buckets(priv);
printk(KERN_INFO MYNAM ": %s/%s: interface up & active\n",
IOC_AND_NETDEV_NAMES_s_s(dev));
if (mpt_event_register(LanCtx, mpt_lan_event_process) != 0) {
printk (KERN_WARNING MYNAM "/lo: Unable to register for Event"
" Notifications. This is a bad thing! We're not going "
"to go ahead, but I'd be leery of system stability at "
"this point.\n");
}
netif_start_queue(dev);
dlprintk((KERN_INFO MYNAM "/lo: Done.\n"));
return 0;
out_mpt_rxfidx:
kfree(priv->mpt_rxfidx);
priv->mpt_rxfidx = NULL;
out_SendCtl:
kfree(priv->SendCtl);
priv->SendCtl = NULL;
out_mpt_txfidx:
kfree(priv->mpt_txfidx);
priv->mpt_txfidx = NULL;
out: return -ENOMEM;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/* Send a LanReset message to the FW. This should result in the FW returning
any buckets it still has. */
static int
mpt_lan_reset(struct net_device *dev)
{
MPT_FRAME_HDR *mf;
LANResetRequest_t *pResetReq;
struct mpt_lan_priv *priv = netdev_priv(dev);
mf = mpt_get_msg_frame(LanCtx, priv->mpt_dev);
if (mf == NULL) {
/* dlprintk((KERN_ERR MYNAM "/reset: Evil funkiness abounds! "
"Unable to allocate a request frame.\n"));
*/
return -1;
}
pResetReq = (LANResetRequest_t *) mf;
pResetReq->Function = MPI_FUNCTION_LAN_RESET;
pResetReq->ChainOffset = 0;
pResetReq->Reserved = 0;
pResetReq->PortNumber = priv->pnum;
pResetReq->MsgFlags = 0;
pResetReq->Reserved2 = 0;
mpt_put_msg_frame(LanCtx, priv->mpt_dev, mf);
return 0;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
static int
mpt_lan_close(struct net_device *dev)
{
struct mpt_lan_priv *priv = netdev_priv(dev);
MPT_ADAPTER *mpt_dev = priv->mpt_dev;
unsigned long timeout;
int i;
dlprintk((KERN_INFO MYNAM ": mpt_lan_close called\n"));
mpt_event_deregister(LanCtx);
dlprintk((KERN_INFO MYNAM ":lan_close: Posted %d buckets "
"since driver was loaded, %d still out\n",
priv->total_posted,atomic_read(&priv->buckets_out)));
netif_stop_queue(dev);
mpt_lan_reset(dev);
timeout = jiffies + 2 * HZ;
while (atomic_read(&priv->buckets_out) && time_before(jiffies, timeout))
schedule_timeout_interruptible(1);
for (i = 0; i < priv->max_buckets_out; i++) {
if (priv->RcvCtl[i].skb != NULL) {
/**/ dlprintk((KERN_INFO MYNAM "/lan_close: bucket %05x "
/**/ "is still out\n", i));
dma_unmap_single(&mpt_dev->pcidev->dev,
priv->RcvCtl[i].dma,
priv->RcvCtl[i].len, DMA_FROM_DEVICE);
dev_kfree_skb(priv->RcvCtl[i].skb);
}
}
kfree(priv->RcvCtl);
kfree(priv->mpt_rxfidx);
for (i = 0; i < priv->tx_max_out; i++) {
if (priv->SendCtl[i].skb != NULL) {
dma_unmap_single(&mpt_dev->pcidev->dev,
priv->SendCtl[i].dma,
priv->SendCtl[i].len, DMA_TO_DEVICE);
dev_kfree_skb(priv->SendCtl[i].skb);
}
}
kfree(priv->SendCtl);
kfree(priv->mpt_txfidx);
atomic_set(&priv->buckets_out, 0);
printk(KERN_INFO MYNAM ": %s/%s: interface down & inactive\n",
IOC_AND_NETDEV_NAMES_s_s(dev));
return 0;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/* Tx timeout handler. */
static void
mpt_lan_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct mpt_lan_priv *priv = netdev_priv(dev);
MPT_ADAPTER *mpt_dev = priv->mpt_dev;
if (mpt_dev->active) {
dlprintk (("mptlan/tx_timeout: calling netif_wake_queue for %s.\n", dev->name));
netif_wake_queue(dev);
}
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
//static inline int
static int
mpt_lan_send_turbo(struct net_device *dev, u32 tmsg)
{
struct mpt_lan_priv *priv = netdev_priv(dev);
MPT_ADAPTER *mpt_dev = priv->mpt_dev;
struct sk_buff *sent;
unsigned long flags;
u32 ctx;
ctx = GET_LAN_BUFFER_CONTEXT(tmsg);
sent = priv->SendCtl[ctx].skb;
dev->stats.tx_packets++;
dev->stats.tx_bytes += sent->len;
dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, skb %p sent.\n",
IOC_AND_NETDEV_NAMES_s_s(dev),
__func__, sent));
priv->SendCtl[ctx].skb = NULL;
dma_unmap_single(&mpt_dev->pcidev->dev, priv->SendCtl[ctx].dma,
priv->SendCtl[ctx].len, DMA_TO_DEVICE);
dev_kfree_skb_irq(sent);
spin_lock_irqsave(&priv->txfidx_lock, flags);
priv->mpt_txfidx[++priv->mpt_txfidx_tail] = ctx;
spin_unlock_irqrestore(&priv->txfidx_lock, flags);
netif_wake_queue(dev);
return 0;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
static int
mpt_lan_send_reply(struct net_device *dev, LANSendReply_t *pSendRep)
{
struct mpt_lan_priv *priv = netdev_priv(dev);
MPT_ADAPTER *mpt_dev = priv->mpt_dev;
struct sk_buff *sent;
unsigned long flags;
int FreeReqFrame = 0;
u32 *pContext;
u32 ctx;
u8 count;
count = pSendRep->NumberOfContexts;
dioprintk((KERN_INFO MYNAM ": send_reply: IOCStatus: %04x\n",
le16_to_cpu(pSendRep->IOCStatus)));
/* Add check for Loginfo Flag in IOCStatus */
switch (le16_to_cpu(pSendRep->IOCStatus) & MPI_IOCSTATUS_MASK) {
case MPI_IOCSTATUS_SUCCESS:
dev->stats.tx_packets += count;
break;
case MPI_IOCSTATUS_LAN_CANCELED:
case MPI_IOCSTATUS_LAN_TRANSMIT_ABORTED:
break;
case MPI_IOCSTATUS_INVALID_SGL:
dev->stats.tx_errors += count;
printk (KERN_ERR MYNAM ": %s/%s: ERROR - Invalid SGL sent to IOC!\n",
IOC_AND_NETDEV_NAMES_s_s(dev));
goto out;
default:
dev->stats.tx_errors += count;
break;
}
pContext = &pSendRep->BufferContext;
spin_lock_irqsave(&priv->txfidx_lock, flags);
while (count > 0) {
ctx = GET_LAN_BUFFER_CONTEXT(le32_to_cpu(*pContext));
sent = priv->SendCtl[ctx].skb;
dev->stats.tx_bytes += sent->len;
dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, skb %p sent.\n",
IOC_AND_NETDEV_NAMES_s_s(dev),
__func__, sent));
priv->SendCtl[ctx].skb = NULL;
dma_unmap_single(&mpt_dev->pcidev->dev,
priv->SendCtl[ctx].dma,
priv->SendCtl[ctx].len, DMA_TO_DEVICE);
dev_kfree_skb_irq(sent);
priv->mpt_txfidx[++priv->mpt_txfidx_tail] = ctx;
pContext++;
count--;
}
spin_unlock_irqrestore(&priv->txfidx_lock, flags);
out:
if (!(pSendRep->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY))
FreeReqFrame = 1;
netif_wake_queue(dev);
return FreeReqFrame;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
static netdev_tx_t
mpt_lan_sdu_send (struct sk_buff *skb, struct net_device *dev)
{
struct mpt_lan_priv *priv = netdev_priv(dev);
MPT_ADAPTER *mpt_dev = priv->mpt_dev;
MPT_FRAME_HDR *mf;
LANSendRequest_t *pSendReq;
SGETransaction32_t *pTrans;
SGESimple64_t *pSimple;
const unsigned char *mac;
dma_addr_t dma;
unsigned long flags;
int ctx;
u16 cur_naa = 0x1000;
dioprintk((KERN_INFO MYNAM ": %s called, skb_addr = %p\n",
__func__, skb));
spin_lock_irqsave(&priv->txfidx_lock, flags);
if (priv->mpt_txfidx_tail < 0) {
netif_stop_queue(dev);
spin_unlock_irqrestore(&priv->txfidx_lock, flags);
printk (KERN_ERR "%s: no tx context available: %u\n",
__func__, priv->mpt_txfidx_tail);
return NETDEV_TX_BUSY;
}
mf = mpt_get_msg_frame(LanCtx, mpt_dev);
if (mf == NULL) {
netif_stop_queue(dev);
spin_unlock_irqrestore(&priv->txfidx_lock, flags);
printk (KERN_ERR "%s: Unable to alloc request frame\n",
__func__);
return NETDEV_TX_BUSY;
}
ctx = priv->mpt_txfidx[priv->mpt_txfidx_tail--];
spin_unlock_irqrestore(&priv->txfidx_lock, flags);
// dioprintk((KERN_INFO MYNAM ": %s/%s: Creating new msg frame (send).\n",
// IOC_AND_NETDEV_NAMES_s_s(dev)));
pSendReq = (LANSendRequest_t *) mf;
/* Set the mac.raw pointer, since this apparently isn't getting
* done before we get the skb. Pull the data pointer past the mac data.
*/
skb_reset_mac_header(skb);
skb_pull(skb, 12);
dma = dma_map_single(&mpt_dev->pcidev->dev, skb->data, skb->len,
DMA_TO_DEVICE);
priv->SendCtl[ctx].skb = skb;
priv->SendCtl[ctx].dma = dma;
priv->SendCtl[ctx].len = skb->len;
/* Message Header */
pSendReq->Reserved = 0;
pSendReq->Function = MPI_FUNCTION_LAN_SEND;
pSendReq->ChainOffset = 0;
pSendReq->Reserved2 = 0;
pSendReq->MsgFlags = 0;
pSendReq->PortNumber = priv->pnum;
/* Transaction Context Element */
pTrans = (SGETransaction32_t *) pSendReq->SG_List;
/* No Flags, 8 bytes of Details, 32bit Context (bloody turbo replies) */
pTrans->ContextSize = sizeof(u32);
pTrans->DetailsLength = 2 * sizeof(u32);
pTrans->Flags = 0;
pTrans->TransactionContext = cpu_to_le32(ctx);
// dioprintk((KERN_INFO MYNAM ": %s/%s: BC = %08x, skb = %p, buff = %p\n",
// IOC_AND_NETDEV_NAMES_s_s(dev),
// ctx, skb, skb->data));
mac = skb_mac_header(skb);
pTrans->TransactionDetails[0] = cpu_to_le32((cur_naa << 16) |
(mac[0] << 8) |
(mac[1] << 0));
pTrans->TransactionDetails[1] = cpu_to_le32((mac[2] << 24) |
(mac[3] << 16) |
(mac[4] << 8) |
(mac[5] << 0));
pSimple = (SGESimple64_t *) &pTrans->TransactionDetails[2];
/* If we ever decide to send more than one Simple SGE per LANSend, then
we will need to make sure that LAST_ELEMENT only gets set on the
last one. Otherwise, bad voodoo and evil funkiness will commence. */
pSimple->FlagsLength = cpu_to_le32(
((MPI_SGE_FLAGS_LAST_ELEMENT |
MPI_SGE_FLAGS_END_OF_BUFFER |
MPI_SGE_FLAGS_SIMPLE_ELEMENT |
MPI_SGE_FLAGS_SYSTEM_ADDRESS |
MPI_SGE_FLAGS_HOST_TO_IOC |
MPI_SGE_FLAGS_64_BIT_ADDRESSING |
MPI_SGE_FLAGS_END_OF_LIST) << MPI_SGE_FLAGS_SHIFT) |
skb->len);
pSimple->Address.Low = cpu_to_le32((u32) dma);
if (sizeof(dma_addr_t) > sizeof(u32))
pSimple->Address.High = cpu_to_le32((u32) ((u64) dma >> 32));
else
pSimple->Address.High = 0;
mpt_put_msg_frame (LanCtx, mpt_dev, mf);
netif_trans_update(dev);
dioprintk((KERN_INFO MYNAM ": %s/%s: Sending packet. FlagsLength = %08x.\n",
IOC_AND_NETDEV_NAMES_s_s(dev),
le32_to_cpu(pSimple->FlagsLength)));
return NETDEV_TX_OK;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
static void
mpt_lan_wake_post_buckets_task(struct net_device *dev, int priority)
/*
* @priority: 0 = put it on the timer queue, 1 = put it on the immediate queue
*/
{
struct mpt_lan_priv *priv = netdev_priv(dev);
if (test_and_set_bit(0, &priv->post_buckets_active) == 0) {
if (priority) {
schedule_delayed_work(&priv->post_buckets_task, 0);
} else {
schedule_delayed_work(&priv->post_buckets_task, 1);
dioprintk((KERN_INFO MYNAM ": post_buckets queued on "
"timer.\n"));
}
dioprintk((KERN_INFO MYNAM ": %s/%s: Queued post_buckets task.\n",
IOC_AND_NETDEV_NAMES_s_s(dev) ));
}
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
static int
mpt_lan_receive_skb(struct net_device *dev, struct sk_buff *skb)
{
struct mpt_lan_priv *priv = netdev_priv(dev);
skb->protocol = mpt_lan_type_trans(skb, dev);
dioprintk((KERN_INFO MYNAM ": %s/%s: Incoming packet (%d bytes) "
"delivered to upper level.\n",
IOC_AND_NETDEV_NAMES_s_s(dev), skb->len));
dev->stats.rx_bytes += skb->len;
dev->stats.rx_packets++;
skb->dev = dev;
netif_rx(skb);
dioprintk((MYNAM "/receive_skb: %d buckets remaining\n",
atomic_read(&priv->buckets_out)));
if (atomic_read(&priv->buckets_out) < priv->bucketthresh)
mpt_lan_wake_post_buckets_task(dev, 1);
dioprintk((KERN_INFO MYNAM "/receive_post_reply: %d buckets "
"remaining, %d received back since sod\n",
atomic_read(&priv->buckets_out), priv->total_received));
return 0;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
//static inline int
static int
mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg)
{
struct mpt_lan_priv *priv = netdev_priv(dev);
MPT_ADAPTER *mpt_dev = priv->mpt_dev;
struct sk_buff *skb, *old_skb;
unsigned long flags;
u32 ctx, len;
ctx = GET_LAN_BUCKET_CONTEXT(tmsg);
skb = priv->RcvCtl[ctx].skb;
len = GET_LAN_PACKET_LENGTH(tmsg);
if (len < MPT_LAN_RX_COPYBREAK) {
old_skb = skb;
skb = (struct sk_buff *)dev_alloc_skb(len);
if (!skb) {
printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
IOC_AND_NETDEV_NAMES_s_s(dev),
__FILE__, __LINE__);
return -ENOMEM;
}
dma_sync_single_for_cpu(&mpt_dev->pcidev->dev,
priv->RcvCtl[ctx].dma,
priv->RcvCtl[ctx].len,
DMA_FROM_DEVICE);
skb_copy_from_linear_data(old_skb, skb_put(skb, len), len);
dma_sync_single_for_device(&mpt_dev->pcidev->dev,
priv->RcvCtl[ctx].dma,
priv->RcvCtl[ctx].len,
DMA_FROM_DEVICE);
goto out;
}
skb_put(skb, len);
priv->RcvCtl[ctx].skb = NULL;
dma_unmap_single(&mpt_dev->pcidev->dev, priv->RcvCtl[ctx].dma,
priv->RcvCtl[ctx].len, DMA_FROM_DEVICE);
out:
spin_lock_irqsave(&priv->rxfidx_lock, flags);
priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
atomic_dec(&priv->buckets_out);
priv->total_received++;
return mpt_lan_receive_skb(dev, skb);
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
static int
mpt_lan_receive_post_free(struct net_device *dev,
LANReceivePostReply_t *pRecvRep)
{
struct mpt_lan_priv *priv = netdev_priv(dev);
MPT_ADAPTER *mpt_dev = priv->mpt_dev;
unsigned long flags;
struct sk_buff *skb;
u32 ctx;
int count;
int i;
count = pRecvRep->NumberOfContexts;
/**/ dlprintk((KERN_INFO MYNAM "/receive_post_reply: "
"IOC returned %d buckets, freeing them...\n", count));
spin_lock_irqsave(&priv->rxfidx_lock, flags);
for (i = 0; i < count; i++) {
ctx = le32_to_cpu(pRecvRep->BucketContext[i]);
skb = priv->RcvCtl[ctx].skb;
// dlprintk((KERN_INFO MYNAM ": %s: dev_name = %s\n",
// IOC_AND_NETDEV_NAMES_s_s(dev)));
// dlprintk((KERN_INFO MYNAM "@rpr[2], priv = %p, buckets_out addr = %p",
// priv, &(priv->buckets_out)));
// dlprintk((KERN_INFO MYNAM "@rpr[2] TC + 3\n"));
priv->RcvCtl[ctx].skb = NULL;
dma_unmap_single(&mpt_dev->pcidev->dev, priv->RcvCtl[ctx].dma,
priv->RcvCtl[ctx].len, DMA_FROM_DEVICE);
dev_kfree_skb_any(skb);
priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
}
spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
atomic_sub(count, &priv->buckets_out);
// for (i = 0; i < priv->max_buckets_out; i++)
// if (priv->RcvCtl[i].skb != NULL)
// dlprintk((KERN_INFO MYNAM "@rpr: bucket %03x "
// "is still out\n", i));
/* dlprintk((KERN_INFO MYNAM "/receive_post_reply: freed %d buckets\n",
count));
*/
/**/ dlprintk((KERN_INFO MYNAM "@receive_post_reply: %d buckets "
/**/ "remaining, %d received back since sod.\n",
/**/ atomic_read(&priv->buckets_out), priv->total_received));
return 0;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
static int
mpt_lan_receive_post_reply(struct net_device *dev,
LANReceivePostReply_t *pRecvRep)
{
struct mpt_lan_priv *priv = netdev_priv(dev);
MPT_ADAPTER *mpt_dev = priv->mpt_dev;
struct sk_buff *skb, *old_skb;
unsigned long flags;
u32 len, ctx, offset;
u32 remaining = le32_to_cpu(pRecvRep->BucketsRemaining);
int count;
int i, l;
dioprintk((KERN_INFO MYNAM ": mpt_lan_receive_post_reply called\n"));
dioprintk((KERN_INFO MYNAM ": receive_post_reply: IOCStatus: %04x\n",
le16_to_cpu(pRecvRep->IOCStatus)));
if ((le16_to_cpu(pRecvRep->IOCStatus) & MPI_IOCSTATUS_MASK) ==
MPI_IOCSTATUS_LAN_CANCELED)
return mpt_lan_receive_post_free(dev, pRecvRep);
len = le32_to_cpu(pRecvRep->PacketLength);
if (len == 0) {
printk (KERN_ERR MYNAM ": %s/%s: ERROR - Got a non-TURBO "
"ReceivePostReply w/ PacketLength zero!\n",
IOC_AND_NETDEV_NAMES_s_s(dev));
printk (KERN_ERR MYNAM ": MsgFlags = %02x, IOCStatus = %04x\n",
pRecvRep->MsgFlags, le16_to_cpu(pRecvRep->IOCStatus));
return -1;
}
ctx = le32_to_cpu(pRecvRep->BucketContext[0]);
count = pRecvRep->NumberOfContexts;
skb = priv->RcvCtl[ctx].skb;
offset = le32_to_cpu(pRecvRep->PacketOffset);
// if (offset != 0) {
// printk (KERN_INFO MYNAM ": %s/%s: Got a ReceivePostReply "
// "w/ PacketOffset %u\n",
// IOC_AND_NETDEV_NAMES_s_s(dev),
// offset);
// }
dioprintk((KERN_INFO MYNAM ": %s/%s: @rpr, offset = %d, len = %d\n",
IOC_AND_NETDEV_NAMES_s_s(dev),
offset, len));
if (count > 1) {
int szrem = len;
// dioprintk((KERN_INFO MYNAM ": %s/%s: Multiple buckets returned "
// "for single packet, concatenating...\n",
// IOC_AND_NETDEV_NAMES_s_s(dev)));
skb = (struct sk_buff *)dev_alloc_skb(len);
if (!skb) {
printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
IOC_AND_NETDEV_NAMES_s_s(dev),
__FILE__, __LINE__);
return -ENOMEM;
}
spin_lock_irqsave(&priv->rxfidx_lock, flags);
for (i = 0; i < count; i++) {
ctx = le32_to_cpu(pRecvRep->BucketContext[i]);
old_skb = priv->RcvCtl[ctx].skb;
l = priv->RcvCtl[ctx].len;
if (szrem < l)
l = szrem;
// dioprintk((KERN_INFO MYNAM ": %s/%s: Buckets = %d, len = %u\n",
// IOC_AND_NETDEV_NAMES_s_s(dev),
// i, l));
dma_sync_single_for_cpu(&mpt_dev->pcidev->dev,
priv->RcvCtl[ctx].dma,
priv->RcvCtl[ctx].len,
DMA_FROM_DEVICE);
skb_copy_from_linear_data(old_skb, skb_put(skb, l), l);
dma_sync_single_for_device(&mpt_dev->pcidev->dev,
priv->RcvCtl[ctx].dma,
priv->RcvCtl[ctx].len,
DMA_FROM_DEVICE);
priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
szrem -= l;
}
spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
} else if (len < MPT_LAN_RX_COPYBREAK) {
old_skb = skb;
skb = (struct sk_buff *)dev_alloc_skb(len);
if (!skb) {
printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
IOC_AND_NETDEV_NAMES_s_s(dev),
__FILE__, __LINE__);
return -ENOMEM;
}
dma_sync_single_for_cpu(&mpt_dev->pcidev->dev,
priv->RcvCtl[ctx].dma,
priv->RcvCtl[ctx].len,
DMA_FROM_DEVICE);
skb_copy_from_linear_data(old_skb, skb_put(skb, len), len);
dma_sync_single_for_device(&mpt_dev->pcidev->dev,
priv->RcvCtl[ctx].dma,
priv->RcvCtl[ctx].len,
DMA_FROM_DEVICE);
spin_lock_irqsave(&priv->rxfidx_lock, flags);
priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
} else {
spin_lock_irqsave(&priv->rxfidx_lock, flags);
priv->RcvCtl[ctx].skb = NULL;
dma_unmap_single(&mpt_dev->pcidev->dev, priv->RcvCtl[ctx].dma,
priv->RcvCtl[ctx].len, DMA_FROM_DEVICE);
priv->RcvCtl[ctx].dma = 0;
priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
skb_put(skb,len);
}
atomic_sub(count, &priv->buckets_out);
priv->total_received += count;
if (priv->mpt_rxfidx_tail >= MPT_LAN_MAX_BUCKETS_OUT) {
printk (KERN_ERR MYNAM ": %s/%s: Yoohoo! mpt_rxfidx_tail = %d, "
"MPT_LAN_MAX_BUCKETS_OUT = %d\n",
IOC_AND_NETDEV_NAMES_s_s(dev),
priv->mpt_rxfidx_tail,
MPT_LAN_MAX_BUCKETS_OUT);
return -1;
}
if (remaining == 0)
printk (KERN_WARNING MYNAM ": %s/%s: WARNING - IOC out of buckets! "
"(priv->buckets_out = %d)\n",
IOC_AND_NETDEV_NAMES_s_s(dev),
atomic_read(&priv->buckets_out));
else if (remaining < 10)
printk (KERN_INFO MYNAM ": %s/%s: IOC says %d buckets left. "
"(priv->buckets_out = %d)\n",
IOC_AND_NETDEV_NAMES_s_s(dev),
remaining, atomic_read(&priv->buckets_out));
if ((remaining < priv->bucketthresh) &&
((atomic_read(&priv->buckets_out) - remaining) >
MPT_LAN_BUCKETS_REMAIN_MISMATCH_THRESH)) {
printk (KERN_WARNING MYNAM " Mismatch between driver's "
"buckets_out count and fw's BucketsRemaining "
"count has crossed the threshold, issuing a "
"LanReset to clear the fw's hashtable. You may "
"want to check your /var/log/messages for \"CRC "
"error\" event notifications.\n");
mpt_lan_reset(dev);
mpt_lan_wake_post_buckets_task(dev, 0);
}
return mpt_lan_receive_skb(dev, skb);
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/* Simple SGE's only at the moment */
static void
mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv)
{
struct net_device *dev = priv->dev;
MPT_ADAPTER *mpt_dev = priv->mpt_dev;
MPT_FRAME_HDR *mf;
LANReceivePostRequest_t *pRecvReq;
SGETransaction32_t *pTrans;
SGESimple64_t *pSimple;
struct sk_buff *skb;
dma_addr_t dma;
u32 curr, buckets, count, max;
u32 len = (dev->mtu + dev->hard_header_len + 4);
unsigned long flags;
int i;
curr = atomic_read(&priv->buckets_out);
buckets = (priv->max_buckets_out - curr);
dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, Start_buckets = %u, buckets_out = %u\n",
IOC_AND_NETDEV_NAMES_s_s(dev),
__func__, buckets, curr));
max = (mpt_dev->req_sz - MPT_LAN_RECEIVE_POST_REQUEST_SIZE) /
(sizeof(SGETransaction32_t) + sizeof(SGESimple64_t));
while (buckets) {
mf = mpt_get_msg_frame(LanCtx, mpt_dev);
if (mf == NULL) {
printk (KERN_ERR "%s: Unable to alloc request frame\n",
__func__);
dioprintk((KERN_ERR "%s: %u buckets remaining\n",
__func__, buckets));
goto out;
}
pRecvReq = (LANReceivePostRequest_t *) mf;
i = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx);
mpt_dev->RequestNB[i] = 0;
count = buckets;
if (count > max)
count = max;
pRecvReq->Function = MPI_FUNCTION_LAN_RECEIVE;
pRecvReq->ChainOffset = 0;
pRecvReq->MsgFlags = 0;
pRecvReq->PortNumber = priv->pnum;
pTrans = (SGETransaction32_t *) pRecvReq->SG_List;
pSimple = NULL;
for (i = 0; i < count; i++) {
int ctx;
spin_lock_irqsave(&priv->rxfidx_lock, flags);
if (priv->mpt_rxfidx_tail < 0) {
printk (KERN_ERR "%s: Can't alloc context\n",
__func__);
spin_unlock_irqrestore(&priv->rxfidx_lock,
flags);
break;
}
ctx = priv->mpt_rxfidx[priv->mpt_rxfidx_tail--];
skb = priv->RcvCtl[ctx].skb;
if (skb && (priv->RcvCtl[ctx].len != len)) {
dma_unmap_single(&mpt_dev->pcidev->dev,
priv->RcvCtl[ctx].dma,
priv->RcvCtl[ctx].len,
DMA_FROM_DEVICE);
dev_kfree_skb(priv->RcvCtl[ctx].skb);
skb = priv->RcvCtl[ctx].skb = NULL;
}
if (skb == NULL) {
skb = dev_alloc_skb(len);
if (skb == NULL) {
printk (KERN_WARNING
MYNAM "/%s: Can't alloc skb\n",
__func__);
priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
break;
}
dma = dma_map_single(&mpt_dev->pcidev->dev,
skb->data, len,
DMA_FROM_DEVICE);
priv->RcvCtl[ctx].skb = skb;
priv->RcvCtl[ctx].dma = dma;
priv->RcvCtl[ctx].len = len;
}
spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
pTrans->ContextSize = sizeof(u32);
pTrans->DetailsLength = 0;
pTrans->Flags = 0;
pTrans->TransactionContext = cpu_to_le32(ctx);
pSimple = (SGESimple64_t *) pTrans->TransactionDetails;
pSimple->FlagsLength = cpu_to_le32(
((MPI_SGE_FLAGS_END_OF_BUFFER |
MPI_SGE_FLAGS_SIMPLE_ELEMENT |
MPI_SGE_FLAGS_64_BIT_ADDRESSING) << MPI_SGE_FLAGS_SHIFT) | len);
pSimple->Address.Low = cpu_to_le32((u32) priv->RcvCtl[ctx].dma);
if (sizeof(dma_addr_t) > sizeof(u32))
pSimple->Address.High = cpu_to_le32((u32) ((u64) priv->RcvCtl[ctx].dma >> 32));
else
pSimple->Address.High = 0;
pTrans = (SGETransaction32_t *) (pSimple + 1);
}
if (pSimple == NULL) {
/**/ printk (KERN_WARNING MYNAM "/%s: No buckets posted\n",
/**/ __func__);
mpt_free_msg_frame(mpt_dev, mf);
goto out;
}
pSimple->FlagsLength |= cpu_to_le32(MPI_SGE_FLAGS_END_OF_LIST << MPI_SGE_FLAGS_SHIFT);
pRecvReq->BucketCount = cpu_to_le32(i);
/* printk(KERN_INFO MYNAM ": posting buckets\n ");
* for (i = 0; i < j + 2; i ++)
* printk (" %08x", le32_to_cpu(msg[i]));
* printk ("\n");
*/
mpt_put_msg_frame(LanCtx, mpt_dev, mf);
priv->total_posted += i;
buckets -= i;
atomic_add(i, &priv->buckets_out);
}
out:
dioprintk((KERN_INFO MYNAM "/%s: End_buckets = %u, priv->buckets_out = %u\n",
__func__, buckets, atomic_read(&priv->buckets_out)));
dioprintk((KERN_INFO MYNAM "/%s: Posted %u buckets and received %u back\n",
__func__, priv->total_posted, priv->total_received));
clear_bit(0, &priv->post_buckets_active);
}
static void
mpt_lan_post_receive_buckets_work(struct work_struct *work)
{
mpt_lan_post_receive_buckets(container_of(work, struct mpt_lan_priv,
post_buckets_task.work));
}
static const struct net_device_ops mpt_netdev_ops = {
.ndo_open = mpt_lan_open,
.ndo_stop = mpt_lan_close,
.ndo_start_xmit = mpt_lan_sdu_send,
.ndo_tx_timeout = mpt_lan_tx_timeout,
};
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
static struct net_device *
mpt_register_lan_device (MPT_ADAPTER *mpt_dev, int pnum)
{
struct net_device *dev;
struct mpt_lan_priv *priv;
u8 HWaddr[FC_ALEN], *a;
dev = alloc_fcdev(sizeof(struct mpt_lan_priv));
if (!dev)
return NULL;
dev->mtu = MPT_LAN_MTU;
priv = netdev_priv(dev);
priv->dev = dev;
priv->mpt_dev = mpt_dev;
priv->pnum = pnum;
INIT_DELAYED_WORK(&priv->post_buckets_task,
mpt_lan_post_receive_buckets_work);
priv->post_buckets_active = 0;
dlprintk((KERN_INFO MYNAM "@%d: bucketlen = %d\n",
__LINE__, dev->mtu + dev->hard_header_len + 4));
atomic_set(&priv->buckets_out, 0);
priv->total_posted = 0;
priv->total_received = 0;
priv->max_buckets_out = max_buckets_out;
if (mpt_dev->pfacts[0].MaxLanBuckets < max_buckets_out)
priv->max_buckets_out = mpt_dev->pfacts[0].MaxLanBuckets;
dlprintk((KERN_INFO MYNAM "@%d: MaxLanBuckets=%d, max_buckets_out/priv=%d/%d\n",
__LINE__,
mpt_dev->pfacts[0].MaxLanBuckets,
max_buckets_out,
priv->max_buckets_out));
priv->bucketthresh = priv->max_buckets_out * 2 / 3;
spin_lock_init(&priv->txfidx_lock);
spin_lock_init(&priv->rxfidx_lock);
/* Grab pre-fetched LANPage1 stuff. :-) */
a = (u8 *) &mpt_dev->lan_cnfg_page1.HardwareAddressLow;
HWaddr[0] = a[5];
HWaddr[1] = a[4];
HWaddr[2] = a[3];
HWaddr[3] = a[2];
HWaddr[4] = a[1];
HWaddr[5] = a[0];
dev->addr_len = FC_ALEN;
dev_addr_set(dev, HWaddr);
memset(dev->broadcast, 0xff, FC_ALEN);
/* The Tx queue is 127 deep on the 909.
* Give ourselves some breathing room.
*/
priv->tx_max_out = (tx_max_out_p <= MPT_TX_MAX_OUT_LIM) ?
tx_max_out_p : MPT_TX_MAX_OUT_LIM;
dev->netdev_ops = &mpt_netdev_ops;
dev->watchdog_timeo = MPT_LAN_TX_TIMEOUT;
/* MTU range: 96 - 65280 */
dev->min_mtu = MPT_LAN_MIN_MTU;
dev->max_mtu = MPT_LAN_MAX_MTU;
dlprintk((KERN_INFO MYNAM ": Finished registering dev "
"and setting initial values\n"));
if (register_netdev(dev) != 0) {
free_netdev(dev);
dev = NULL;
}
return dev;
}
static int
mptlan_probe(struct pci_dev *pdev)
{
MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
struct net_device *dev;
int i;
for (i = 0; i < ioc->facts.NumberOfPorts; i++) {
printk(KERN_INFO MYNAM ": %s: PortNum=%x, "
"ProtocolFlags=%02Xh (%c%c%c%c)\n",
ioc->name, ioc->pfacts[i].PortNumber,
ioc->pfacts[i].ProtocolFlags,
MPT_PROTOCOL_FLAGS_c_c_c_c(
ioc->pfacts[i].ProtocolFlags));
if (!(ioc->pfacts[i].ProtocolFlags &
MPI_PORTFACTS_PROTOCOL_LAN)) {
printk(KERN_INFO MYNAM ": %s: Hmmm... LAN protocol "
"seems to be disabled on this adapter port!\n",
ioc->name);
continue;
}
dev = mpt_register_lan_device(ioc, i);
if (!dev) {
printk(KERN_ERR MYNAM ": %s: Unable to register "
"port%d as a LAN device\n", ioc->name,
ioc->pfacts[i].PortNumber);
continue;
}
printk(KERN_INFO MYNAM ": %s: Fusion MPT LAN device "
"registered as '%s'\n", ioc->name, dev->name);
printk(KERN_INFO MYNAM ": %s/%s: "
"LanAddr = %pM\n",
IOC_AND_NETDEV_NAMES_s_s(dev),
dev->dev_addr);
ioc->netdev = dev;
return 0;
}
return -ENODEV;
}
static void
mptlan_remove(struct pci_dev *pdev)
{
MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
struct net_device *dev = ioc->netdev;
struct mpt_lan_priv *priv = netdev_priv(dev);
cancel_delayed_work_sync(&priv->post_buckets_task);
if(dev != NULL) {
unregister_netdev(dev);
free_netdev(dev);
}
}
static struct mpt_pci_driver mptlan_driver = {
.probe = mptlan_probe,
.remove = mptlan_remove,
};
static int __init mpt_lan_init (void)
{
show_mptmod_ver(LANAME, LANVER);
LanCtx = mpt_register(lan_reply, MPTLAN_DRIVER,
"lan_reply");
if (LanCtx <= 0) {
printk (KERN_ERR MYNAM ": Failed to register with MPT base driver\n");
return -EBUSY;
}
dlprintk((KERN_INFO MYNAM ": assigned context of %d\n", LanCtx));
if (mpt_reset_register(LanCtx, mpt_lan_ioc_reset)) {
printk(KERN_ERR MYNAM ": Eieee! unable to register a reset "
"handler with mptbase! The world is at an end! "
"Everything is fading to black! Goodbye.\n");
return -EBUSY;
}
dlprintk((KERN_INFO MYNAM ": Registered for IOC reset notifications\n"));
mpt_device_driver_register(&mptlan_driver, MPTLAN_DRIVER);
return 0;
}
static void __exit mpt_lan_exit(void)
{
mpt_device_driver_deregister(MPTLAN_DRIVER);
mpt_reset_deregister(LanCtx);
if (LanCtx) {
mpt_deregister(LanCtx);
LanCtx = MPT_MAX_PROTOCOL_DRIVERS;
}
}
module_init(mpt_lan_init);
module_exit(mpt_lan_exit);
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
static unsigned short
mpt_lan_type_trans(struct sk_buff *skb, struct net_device *dev)
{
struct mpt_lan_ohdr *fch = (struct mpt_lan_ohdr *)skb->data;
struct fcllc *fcllc;
skb_reset_mac_header(skb);
skb_pull(skb, sizeof(struct mpt_lan_ohdr));
if (fch->dtype == htons(0xffff)) {
u32 *p = (u32 *) fch;
swab32s(p + 0);
swab32s(p + 1);
swab32s(p + 2);
swab32s(p + 3);
printk (KERN_WARNING MYNAM ": %s: WARNING - Broadcast swap F/W bug detected!\n",
NETDEV_PTR_TO_IOC_NAME_s(dev));
printk (KERN_WARNING MYNAM ": Please update sender @ MAC_addr = %pM\n",
fch->saddr);
}
if (*fch->daddr & 1) {
if (!memcmp(fch->daddr, dev->broadcast, FC_ALEN)) {
skb->pkt_type = PACKET_BROADCAST;
} else {
skb->pkt_type = PACKET_MULTICAST;
}
} else {
if (memcmp(fch->daddr, dev->dev_addr, FC_ALEN)) {
skb->pkt_type = PACKET_OTHERHOST;
} else {
skb->pkt_type = PACKET_HOST;
}
}
fcllc = (struct fcllc *)skb->data;
/* Strip the SNAP header from ARP packets since we don't
* pass them through to the 802.2/SNAP layers.
*/
if (fcllc->dsap == EXTENDED_SAP &&
(fcllc->ethertype == htons(ETH_P_IP) ||
fcllc->ethertype == htons(ETH_P_ARP))) {
skb_pull(skb, sizeof(struct fcllc));
return fcllc->ethertype;
}
return htons(ETH_P_802_2);
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
| linux-master | drivers/message/fusion/mptlan.c |
/*
* linux/drivers/message/fusion/mptscsih.c
* For use with LSI PCI chip/adapter(s)
* running LSI Fusion MPT (Message Passing Technology) firmware.
*
* Copyright (c) 1999-2008 LSI Corporation
* (mailto:[email protected])
*
*/
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
NO WARRANTY
THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
solely responsible for determining the appropriateness of using and
distributing the Program and assumes all risks associated with its
exercise of rights under this Agreement, including but not limited to
the risks and costs of program errors, damage to or loss of data,
programs or equipment, and unavailability or interruption of operations.
DISCLAIMER OF LIABILITY
NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/kdev_t.h>
#include <linux/blkdev.h>
#include <linux/delay.h> /* for mdelay */
#include <linux/interrupt.h>
#include <linux/reboot.h> /* notifier code */
#include <linux/workqueue.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_tcq.h>
#include <scsi/scsi_dbg.h>
#include "mptbase.h"
#include "mptscsih.h"
#include "lsi/mpi_log_sas.h"
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
#define my_NAME "Fusion MPT SCSI Host driver"
#define my_VERSION MPT_LINUX_VERSION_COMMON
#define MYNAM "mptscsih"
MODULE_AUTHOR(MODULEAUTHOR);
MODULE_DESCRIPTION(my_NAME);
MODULE_LICENSE("GPL");
MODULE_VERSION(my_VERSION);
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
* Other private/forward protos...
*/
struct scsi_cmnd *mptscsih_get_scsi_lookup(MPT_ADAPTER *ioc, int i);
static struct scsi_cmnd * mptscsih_getclear_scsi_lookup(MPT_ADAPTER *ioc, int i);
static void mptscsih_set_scsi_lookup(MPT_ADAPTER *ioc, int i, struct scsi_cmnd *scmd);
static int SCPNT_TO_LOOKUP_IDX(MPT_ADAPTER *ioc, struct scsi_cmnd *scmd);
int mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *r);
static void mptscsih_report_queue_full(struct scsi_cmnd *sc, SCSIIOReply_t *pScsiReply, SCSIIORequest_t *pScsiReq);
int mptscsih_taskmgmt_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *r);
static int mptscsih_AddSGE(MPT_ADAPTER *ioc, struct scsi_cmnd *SCpnt,
SCSIIORequest_t *pReq, int req_idx);
static void mptscsih_freeChainBuffers(MPT_ADAPTER *ioc, int req_idx);
static void mptscsih_copy_sense_data(struct scsi_cmnd *sc, MPT_SCSI_HOST *hd, MPT_FRAME_HDR *mf, SCSIIOReply_t *pScsiReply);
int mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id,
u64 lun, int ctx2abort, ulong timeout);
int mptscsih_ioc_reset(MPT_ADAPTER *ioc, int post_reset);
int mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply);
void
mptscsih_taskmgmt_response_code(MPT_ADAPTER *ioc, u8 response_code);
static int mptscsih_get_completion_code(MPT_ADAPTER *ioc,
MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply);
int mptscsih_scandv_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *r);
static int mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *iocmd);
static void mptscsih_synchronize_cache(MPT_SCSI_HOST *hd, VirtDevice *vdevice);
static int
mptscsih_taskmgmt_reply(MPT_ADAPTER *ioc, u8 type,
SCSITaskMgmtReply_t *pScsiTmReply);
void mptscsih_remove(struct pci_dev *);
void mptscsih_shutdown(struct pci_dev *);
#ifdef CONFIG_PM
int mptscsih_suspend(struct pci_dev *pdev, pm_message_t state);
int mptscsih_resume(struct pci_dev *pdev);
#endif
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
* mptscsih_getFreeChainBuffer - Function to get a free chain
* from the MPT_SCSI_HOST FreeChainQ.
* @ioc: Pointer to MPT_ADAPTER structure
* @req_idx: Index of the SCSI IO request frame. (output)
*
* return SUCCESS or FAILED
*/
static inline int
mptscsih_getFreeChainBuffer(MPT_ADAPTER *ioc, int *retIndex)
{
MPT_FRAME_HDR *chainBuf;
unsigned long flags;
int rc;
int chain_idx;
dsgprintk(ioc, printk(MYIOC_s_DEBUG_FMT "getFreeChainBuffer called\n",
ioc->name));
spin_lock_irqsave(&ioc->FreeQlock, flags);
if (!list_empty(&ioc->FreeChainQ)) {
int offset;
chainBuf = list_entry(ioc->FreeChainQ.next, MPT_FRAME_HDR,
u.frame.linkage.list);
list_del(&chainBuf->u.frame.linkage.list);
offset = (u8 *)chainBuf - (u8 *)ioc->ChainBuffer;
chain_idx = offset / ioc->req_sz;
rc = SUCCESS;
dsgprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"getFreeChainBuffer chainBuf=%p ChainBuffer=%p offset=%d chain_idx=%d\n",
ioc->name, chainBuf, ioc->ChainBuffer, offset, chain_idx));
} else {
rc = FAILED;
chain_idx = MPT_HOST_NO_CHAIN;
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT "getFreeChainBuffer failed\n",
ioc->name));
}
spin_unlock_irqrestore(&ioc->FreeQlock, flags);
*retIndex = chain_idx;
return rc;
} /* mptscsih_getFreeChainBuffer() */
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
* mptscsih_AddSGE - Add a SGE (plus chain buffers) to the
* SCSIIORequest_t Message Frame.
* @ioc: Pointer to MPT_ADAPTER structure
* @SCpnt: Pointer to scsi_cmnd structure
* @pReq: Pointer to SCSIIORequest_t structure
*
* Returns ...
*/
static int
mptscsih_AddSGE(MPT_ADAPTER *ioc, struct scsi_cmnd *SCpnt,
SCSIIORequest_t *pReq, int req_idx)
{
char *psge;
char *chainSge;
struct scatterlist *sg;
int frm_sz;
int sges_left, sg_done;
int chain_idx = MPT_HOST_NO_CHAIN;
int sgeOffset;
int numSgeSlots, numSgeThisFrame;
u32 sgflags, sgdir, thisxfer = 0;
int chain_dma_off = 0;
int newIndex;
int ii;
dma_addr_t v2;
u32 RequestNB;
sgdir = le32_to_cpu(pReq->Control) & MPI_SCSIIO_CONTROL_DATADIRECTION_MASK;
if (sgdir == MPI_SCSIIO_CONTROL_WRITE) {
sgdir = MPT_TRANSFER_HOST_TO_IOC;
} else {
sgdir = MPT_TRANSFER_IOC_TO_HOST;
}
psge = (char *) &pReq->SGL;
frm_sz = ioc->req_sz;
/* Map the data portion, if any.
* sges_left = 0 if no data transfer.
*/
sges_left = scsi_dma_map(SCpnt);
if (sges_left < 0)
return FAILED;
/* Handle the SG case.
*/
sg = scsi_sglist(SCpnt);
sg_done = 0;
sgeOffset = sizeof(SCSIIORequest_t) - sizeof(SGE_IO_UNION);
chainSge = NULL;
/* Prior to entering this loop - the following must be set
* current MF: sgeOffset (bytes)
* chainSge (Null if original MF is not a chain buffer)
* sg_done (num SGE done for this MF)
*/
nextSGEset:
numSgeSlots = ((frm_sz - sgeOffset) / ioc->SGE_size);
numSgeThisFrame = (sges_left < numSgeSlots) ? sges_left : numSgeSlots;
sgflags = MPT_SGE_FLAGS_SIMPLE_ELEMENT | sgdir;
/* Get first (num - 1) SG elements
* Skip any SG entries with a length of 0
* NOTE: at finish, sg and psge pointed to NEXT data/location positions
*/
for (ii=0; ii < (numSgeThisFrame-1); ii++) {
thisxfer = sg_dma_len(sg);
if (thisxfer == 0) {
/* Get next SG element from the OS */
sg = sg_next(sg);
sg_done++;
continue;
}
v2 = sg_dma_address(sg);
ioc->add_sge(psge, sgflags | thisxfer, v2);
/* Get next SG element from the OS */
sg = sg_next(sg);
psge += ioc->SGE_size;
sgeOffset += ioc->SGE_size;
sg_done++;
}
if (numSgeThisFrame == sges_left) {
/* Add last element, end of buffer and end of list flags.
*/
sgflags |= MPT_SGE_FLAGS_LAST_ELEMENT |
MPT_SGE_FLAGS_END_OF_BUFFER |
MPT_SGE_FLAGS_END_OF_LIST;
/* Add last SGE and set termination flags.
* Note: Last SGE may have a length of 0 - which should be ok.
*/
thisxfer = sg_dma_len(sg);
v2 = sg_dma_address(sg);
ioc->add_sge(psge, sgflags | thisxfer, v2);
sgeOffset += ioc->SGE_size;
sg_done++;
if (chainSge) {
/* The current buffer is a chain buffer,
* but there is not another one.
* Update the chain element
* Offset and Length fields.
*/
ioc->add_chain((char *)chainSge, 0, sgeOffset,
ioc->ChainBufferDMA + chain_dma_off);
} else {
/* The current buffer is the original MF
* and there is no Chain buffer.
*/
pReq->ChainOffset = 0;
RequestNB = (((sgeOffset - 1) >> ioc->NBShiftFactor) + 1) & 0x03;
dsgprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"Single Buffer RequestNB=%x, sgeOffset=%d\n", ioc->name, RequestNB, sgeOffset));
ioc->RequestNB[req_idx] = RequestNB;
}
} else {
/* At least one chain buffer is needed.
* Complete the first MF
* - last SGE element, set the LastElement bit
* - set ChainOffset (words) for orig MF
* (OR finish previous MF chain buffer)
* - update MFStructPtr ChainIndex
* - Populate chain element
* Also
* Loop until done.
*/
dsgprintk(ioc, printk(MYIOC_s_DEBUG_FMT "SG: Chain Required! sg done %d\n",
ioc->name, sg_done));
/* Set LAST_ELEMENT flag for last non-chain element
* in the buffer. Since psge points at the NEXT
* SGE element, go back one SGE element, update the flags
* and reset the pointer. (Note: sgflags & thisxfer are already
* set properly).
*/
if (sg_done) {
u32 *ptmp = (u32 *) (psge - ioc->SGE_size);
sgflags = le32_to_cpu(*ptmp);
sgflags |= MPT_SGE_FLAGS_LAST_ELEMENT;
*ptmp = cpu_to_le32(sgflags);
}
if (chainSge) {
/* The current buffer is a chain buffer.
* chainSge points to the previous Chain Element.
* Update its chain element Offset and Length (must
* include chain element size) fields.
* Old chain element is now complete.
*/
u8 nextChain = (u8) (sgeOffset >> 2);
sgeOffset += ioc->SGE_size;
ioc->add_chain((char *)chainSge, nextChain, sgeOffset,
ioc->ChainBufferDMA + chain_dma_off);
} else {
/* The original MF buffer requires a chain buffer -
* set the offset.
* Last element in this MF is a chain element.
*/
pReq->ChainOffset = (u8) (sgeOffset >> 2);
RequestNB = (((sgeOffset - 1) >> ioc->NBShiftFactor) + 1) & 0x03;
dsgprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Chain Buffer Needed, RequestNB=%x sgeOffset=%d\n", ioc->name, RequestNB, sgeOffset));
ioc->RequestNB[req_idx] = RequestNB;
}
sges_left -= sg_done;
/* NOTE: psge points to the beginning of the chain element
* in current buffer. Get a chain buffer.
*/
if ((mptscsih_getFreeChainBuffer(ioc, &newIndex)) == FAILED) {
dfailprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"getFreeChainBuffer FAILED SCSI cmd=%02x (%p)\n",
ioc->name, pReq->CDB[0], SCpnt));
return FAILED;
}
/* Update the tracking arrays.
* If chainSge == NULL, update ReqToChain, else ChainToChain
*/
if (chainSge) {
ioc->ChainToChain[chain_idx] = newIndex;
} else {
ioc->ReqToChain[req_idx] = newIndex;
}
chain_idx = newIndex;
chain_dma_off = ioc->req_sz * chain_idx;
/* Populate the chainSGE for the current buffer.
* - Set chain buffer pointer to psge and fill
* out the Address and Flags fields.
*/
chainSge = (char *) psge;
dsgprintk(ioc, printk(MYIOC_s_DEBUG_FMT " Current buff @ %p (index 0x%x)",
ioc->name, psge, req_idx));
/* Start the SGE for the next buffer
*/
psge = (char *) (ioc->ChainBuffer + chain_dma_off);
sgeOffset = 0;
sg_done = 0;
dsgprintk(ioc, printk(MYIOC_s_DEBUG_FMT " Chain buff @ %p (index 0x%x)\n",
ioc->name, psge, chain_idx));
/* Start the SGE for the next buffer
*/
goto nextSGEset;
}
return SUCCESS;
} /* mptscsih_AddSGE() */
static void
mptscsih_issue_sep_command(MPT_ADAPTER *ioc, VirtTarget *vtarget,
U32 SlotStatus)
{
MPT_FRAME_HDR *mf;
SEPRequest_t *SEPMsg;
if (ioc->bus_type != SAS)
return;
/* Not supported for hidden raid components
*/
if (vtarget->tflags & MPT_TARGET_FLAGS_RAID_COMPONENT)
return;
if ((mf = mpt_get_msg_frame(ioc->InternalCtx, ioc)) == NULL) {
dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s: no msg frames!!\n",
ioc->name,__func__));
return;
}
SEPMsg = (SEPRequest_t *)mf;
SEPMsg->Function = MPI_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
SEPMsg->Bus = vtarget->channel;
SEPMsg->TargetID = vtarget->id;
SEPMsg->Action = MPI_SEP_REQ_ACTION_WRITE_STATUS;
SEPMsg->SlotStatus = SlotStatus;
devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"Sending SEP cmd=%x channel=%d id=%d\n",
ioc->name, SlotStatus, SEPMsg->Bus, SEPMsg->TargetID));
mpt_put_msg_frame(ioc->DoneCtx, ioc, mf);
}
#ifdef CONFIG_FUSION_LOGGING
/**
* mptscsih_info_scsiio - debug print info on reply frame
* @ioc: Pointer to MPT_ADAPTER structure
* @sc: original scsi cmnd pointer
* @pScsiReply: Pointer to MPT reply frame
*
* MPT_DEBUG_REPLY needs to be enabled to obtain this info
*
* Refer to lsi/mpi.h.
**/
static void
mptscsih_info_scsiio(MPT_ADAPTER *ioc, struct scsi_cmnd *sc, SCSIIOReply_t * pScsiReply)
{
char *desc = NULL;
char *desc1 = NULL;
u16 ioc_status;
u8 skey, asc, ascq;
ioc_status = le16_to_cpu(pScsiReply->IOCStatus) & MPI_IOCSTATUS_MASK;
switch (ioc_status) {
case MPI_IOCSTATUS_SUCCESS:
desc = "success";
break;
case MPI_IOCSTATUS_SCSI_INVALID_BUS:
desc = "invalid bus";
break;
case MPI_IOCSTATUS_SCSI_INVALID_TARGETID:
desc = "invalid target_id";
break;
case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
desc = "device not there";
break;
case MPI_IOCSTATUS_SCSI_DATA_OVERRUN:
desc = "data overrun";
break;
case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN:
desc = "data underrun";
break;
case MPI_IOCSTATUS_SCSI_IO_DATA_ERROR:
desc = "I/O data error";
break;
case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR:
desc = "protocol error";
break;
case MPI_IOCSTATUS_SCSI_TASK_TERMINATED:
desc = "task terminated";
break;
case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
desc = "residual mismatch";
break;
case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
desc = "task management failed";
break;
case MPI_IOCSTATUS_SCSI_IOC_TERMINATED:
desc = "IOC terminated";
break;
case MPI_IOCSTATUS_SCSI_EXT_TERMINATED:
desc = "ext terminated";
break;
default:
desc = "";
break;
}
switch (pScsiReply->SCSIStatus)
{
case MPI_SCSI_STATUS_SUCCESS:
desc1 = "success";
break;
case MPI_SCSI_STATUS_CHECK_CONDITION:
desc1 = "check condition";
break;
case MPI_SCSI_STATUS_CONDITION_MET:
desc1 = "condition met";
break;
case MPI_SCSI_STATUS_BUSY:
desc1 = "busy";
break;
case MPI_SCSI_STATUS_INTERMEDIATE:
desc1 = "intermediate";
break;
case MPI_SCSI_STATUS_INTERMEDIATE_CONDMET:
desc1 = "intermediate condmet";
break;
case MPI_SCSI_STATUS_RESERVATION_CONFLICT:
desc1 = "reservation conflict";
break;
case MPI_SCSI_STATUS_COMMAND_TERMINATED:
desc1 = "command terminated";
break;
case MPI_SCSI_STATUS_TASK_SET_FULL:
desc1 = "task set full";
break;
case MPI_SCSI_STATUS_ACA_ACTIVE:
desc1 = "aca active";
break;
case MPI_SCSI_STATUS_FCPEXT_DEVICE_LOGGED_OUT:
desc1 = "fcpext device logged out";
break;
case MPI_SCSI_STATUS_FCPEXT_NO_LINK:
desc1 = "fcpext no link";
break;
case MPI_SCSI_STATUS_FCPEXT_UNASSIGNED:
desc1 = "fcpext unassigned";
break;
default:
desc1 = "";
break;
}
scsi_print_command(sc);
printk(MYIOC_s_DEBUG_FMT "\tfw_channel = %d, fw_id = %d, lun = %llu\n",
ioc->name, pScsiReply->Bus, pScsiReply->TargetID, sc->device->lun);
printk(MYIOC_s_DEBUG_FMT "\trequest_len = %d, underflow = %d, "
"resid = %d\n", ioc->name, scsi_bufflen(sc), sc->underflow,
scsi_get_resid(sc));
printk(MYIOC_s_DEBUG_FMT "\ttag = %d, transfer_count = %d, "
"sc->result = %08X\n", ioc->name, le16_to_cpu(pScsiReply->TaskTag),
le32_to_cpu(pScsiReply->TransferCount), sc->result);
printk(MYIOC_s_DEBUG_FMT "\tiocstatus = %s (0x%04x), "
"scsi_status = %s (0x%02x), scsi_state = (0x%02x)\n",
ioc->name, desc, ioc_status, desc1, pScsiReply->SCSIStatus,
pScsiReply->SCSIState);
if (pScsiReply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_VALID) {
skey = sc->sense_buffer[2] & 0x0F;
asc = sc->sense_buffer[12];
ascq = sc->sense_buffer[13];
printk(MYIOC_s_DEBUG_FMT "\t[sense_key,asc,ascq]: "
"[0x%02x,0x%02x,0x%02x]\n", ioc->name, skey, asc, ascq);
}
/*
* Look for + dump FCP ResponseInfo[]!
*/
if (pScsiReply->SCSIState & MPI_SCSI_STATE_RESPONSE_INFO_VALID &&
pScsiReply->ResponseInfo)
printk(MYIOC_s_DEBUG_FMT "response_info = %08xh\n",
ioc->name, le32_to_cpu(pScsiReply->ResponseInfo));
}
#endif
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
* mptscsih_io_done - Main SCSI IO callback routine registered to
* Fusion MPT (base) driver
* @ioc: Pointer to MPT_ADAPTER structure
* @mf: Pointer to original MPT request frame
* @r: Pointer to MPT reply frame (NULL if TurboReply)
*
* This routine is called from mpt.c::mpt_interrupt() at the completion
* of any SCSI IO request.
* This routine is registered with the Fusion MPT (base) driver at driver
* load/init time via the mpt_register() API call.
*
* Returns 1 indicating alloc'd request frame ptr should be freed.
*/
int
mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
{
struct scsi_cmnd *sc;
MPT_SCSI_HOST *hd;
SCSIIORequest_t *pScsiReq;
SCSIIOReply_t *pScsiReply;
u16 req_idx, req_idx_MR;
VirtDevice *vdevice;
VirtTarget *vtarget;
hd = shost_priv(ioc->sh);
req_idx = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx);
req_idx_MR = (mr != NULL) ?
le16_to_cpu(mr->u.frame.hwhdr.msgctxu.fld.req_idx) : req_idx;
/* Special case, where already freed message frame is received from
* Firmware. It happens with Resetting IOC.
* Return immediately. Do not care
*/
if ((req_idx != req_idx_MR) ||
(le32_to_cpu(mf->u.frame.linkage.arg1) == 0xdeadbeaf))
return 0;
sc = mptscsih_getclear_scsi_lookup(ioc, req_idx);
if (sc == NULL) {
MPIHeader_t *hdr = (MPIHeader_t *)mf;
/* Remark: writeSDP1 will use the ScsiDoneCtx
* If a SCSI I/O cmd, device disabled by OS and
* completion done. Cannot touch sc struct. Just free mem.
*/
if (hdr->Function == MPI_FUNCTION_SCSI_IO_REQUEST)
printk(MYIOC_s_ERR_FMT "NULL ScsiCmd ptr!\n",
ioc->name);
mptscsih_freeChainBuffers(ioc, req_idx);
return 1;
}
if ((unsigned char *)mf != sc->host_scribble) {
mptscsih_freeChainBuffers(ioc, req_idx);
return 1;
}
if (ioc->bus_type == SAS) {
VirtDevice *vdevice = sc->device->hostdata;
if (!vdevice || !vdevice->vtarget ||
vdevice->vtarget->deleted) {
sc->result = DID_NO_CONNECT << 16;
goto out;
}
}
sc->host_scribble = NULL;
sc->result = DID_OK << 16; /* Set default reply as OK */
pScsiReq = (SCSIIORequest_t *) mf;
pScsiReply = (SCSIIOReply_t *) mr;
if((ioc->facts.MsgVersion >= MPI_VERSION_01_05) && pScsiReply){
dmfprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"ScsiDone (mf=%p,mr=%p,sc=%p,idx=%d,task-tag=%d)\n",
ioc->name, mf, mr, sc, req_idx, pScsiReply->TaskTag));
}else{
dmfprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"ScsiDone (mf=%p,mr=%p,sc=%p,idx=%d)\n",
ioc->name, mf, mr, sc, req_idx));
}
if (pScsiReply == NULL) {
/* special context reply handling */
;
} else {
u32 xfer_cnt;
u16 status;
u8 scsi_state, scsi_status;
u32 log_info;
status = le16_to_cpu(pScsiReply->IOCStatus) & MPI_IOCSTATUS_MASK;
scsi_state = pScsiReply->SCSIState;
scsi_status = pScsiReply->SCSIStatus;
xfer_cnt = le32_to_cpu(pScsiReply->TransferCount);
scsi_set_resid(sc, scsi_bufflen(sc) - xfer_cnt);
log_info = le32_to_cpu(pScsiReply->IOCLogInfo);
/*
* if we get a data underrun indication, yet no data was
* transferred and the SCSI status indicates that the
* command was never started, change the data underrun
* to success
*/
if (status == MPI_IOCSTATUS_SCSI_DATA_UNDERRUN && xfer_cnt == 0 &&
(scsi_status == MPI_SCSI_STATUS_BUSY ||
scsi_status == MPI_SCSI_STATUS_RESERVATION_CONFLICT ||
scsi_status == MPI_SCSI_STATUS_TASK_SET_FULL)) {
status = MPI_IOCSTATUS_SUCCESS;
}
if (scsi_state & MPI_SCSI_STATE_AUTOSENSE_VALID)
mptscsih_copy_sense_data(sc, hd, mf, pScsiReply);
/*
* Look for + dump FCP ResponseInfo[]!
*/
if (scsi_state & MPI_SCSI_STATE_RESPONSE_INFO_VALID &&
pScsiReply->ResponseInfo) {
printk(MYIOC_s_NOTE_FMT "[%d:%d:%d:%llu] "
"FCP_ResponseInfo=%08xh\n", ioc->name,
sc->device->host->host_no, sc->device->channel,
sc->device->id, sc->device->lun,
le32_to_cpu(pScsiReply->ResponseInfo));
}
switch(status) {
case MPI_IOCSTATUS_BUSY: /* 0x0002 */
case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES: /* 0x0006 */
/* CHECKME!
* Maybe: DRIVER_BUSY | SUGGEST_RETRY | DID_SOFT_ERROR (retry)
* But not: DID_BUS_BUSY lest one risk
* killing interrupt handler:-(
*/
sc->result = SAM_STAT_BUSY;
break;
case MPI_IOCSTATUS_SCSI_INVALID_BUS: /* 0x0041 */
case MPI_IOCSTATUS_SCSI_INVALID_TARGETID: /* 0x0042 */
sc->result = DID_BAD_TARGET << 16;
break;
case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE: /* 0x0043 */
/* Spoof to SCSI Selection Timeout! */
if (ioc->bus_type != FC)
sc->result = DID_NO_CONNECT << 16;
/* else fibre, just stall until rescan event */
else
sc->result = DID_REQUEUE << 16;
if (hd->sel_timeout[pScsiReq->TargetID] < 0xFFFF)
hd->sel_timeout[pScsiReq->TargetID]++;
vdevice = sc->device->hostdata;
if (!vdevice)
break;
vtarget = vdevice->vtarget;
if (vtarget->tflags & MPT_TARGET_FLAGS_LED_ON) {
mptscsih_issue_sep_command(ioc, vtarget,
MPI_SEP_REQ_SLOTSTATUS_UNCONFIGURED);
vtarget->tflags &= ~MPT_TARGET_FLAGS_LED_ON;
}
break;
case MPI_IOCSTATUS_SCSI_IOC_TERMINATED: /* 0x004B */
if ( ioc->bus_type == SAS ) {
u16 ioc_status =
le16_to_cpu(pScsiReply->IOCStatus);
if ((ioc_status &
MPI_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
&&
((log_info & SAS_LOGINFO_MASK) ==
SAS_LOGINFO_NEXUS_LOSS)) {
VirtDevice *vdevice =
sc->device->hostdata;
/* flag the device as being in
* device removal delay so we can
* notify the midlayer to hold off
* on timeout eh */
if (vdevice && vdevice->
vtarget &&
vdevice->vtarget->
raidVolume)
printk(KERN_INFO
"Skipping Raid Volume"
"for inDMD\n");
else if (vdevice &&
vdevice->vtarget)
vdevice->vtarget->
inDMD = 1;
sc->result =
(DID_TRANSPORT_DISRUPTED
<< 16);
break;
}
} else if (ioc->bus_type == FC) {
/*
* The FC IOC may kill a request for variety of
* reasons, some of which may be recovered by a
* retry, some which are unlikely to be
* recovered. Return DID_ERROR instead of
* DID_RESET to permit retry of the command,
* just not an infinite number of them
*/
sc->result = DID_ERROR << 16;
break;
}
/*
* Allow non-SAS & non-NEXUS_LOSS to drop into below code
*/
fallthrough;
case MPI_IOCSTATUS_SCSI_TASK_TERMINATED: /* 0x0048 */
/* Linux handles an unsolicited DID_RESET better
* than an unsolicited DID_ABORT.
*/
sc->result = DID_RESET << 16;
break;
case MPI_IOCSTATUS_SCSI_EXT_TERMINATED: /* 0x004C */
if (ioc->bus_type == FC)
sc->result = DID_ERROR << 16;
else
sc->result = DID_RESET << 16;
break;
case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: /* 0x0049 */
scsi_set_resid(sc, scsi_bufflen(sc) - xfer_cnt);
if((xfer_cnt==0)||(sc->underflow > xfer_cnt))
sc->result=DID_SOFT_ERROR << 16;
else /* Sufficient data transfer occurred */
sc->result = (DID_OK << 16) | scsi_status;
dreplyprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"RESIDUAL_MISMATCH: result=%x on channel=%d id=%d\n",
ioc->name, sc->result, sc->device->channel, sc->device->id));
break;
case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN: /* 0x0045 */
/*
* Do upfront check for valid SenseData and give it
* precedence!
*/
sc->result = (DID_OK << 16) | scsi_status;
if (!(scsi_state & MPI_SCSI_STATE_AUTOSENSE_VALID)) {
/*
* For an Errata on LSI53C1030
* When the length of request data
* and transfer data are different
* with result of command (READ or VERIFY),
* DID_SOFT_ERROR is set.
*/
if (ioc->bus_type == SPI) {
if ((pScsiReq->CDB[0] == READ_6 && ((pScsiReq->CDB[1] & 0x02) == 0)) ||
pScsiReq->CDB[0] == READ_10 ||
pScsiReq->CDB[0] == READ_12 ||
(pScsiReq->CDB[0] == READ_16 &&
((pScsiReq->CDB[1] & 0x02) == 0)) ||
pScsiReq->CDB[0] == VERIFY ||
pScsiReq->CDB[0] == VERIFY_16) {
if (scsi_bufflen(sc) !=
xfer_cnt) {
sc->result =
DID_SOFT_ERROR << 16;
printk(KERN_WARNING "Errata"
"on LSI53C1030 occurred."
"sc->req_bufflen=0x%02x,"
"xfer_cnt=0x%02x\n",
scsi_bufflen(sc),
xfer_cnt);
}
}
}
if (xfer_cnt < sc->underflow) {
if (scsi_status == SAM_STAT_BUSY)
sc->result = SAM_STAT_BUSY;
else
sc->result = DID_SOFT_ERROR << 16;
}
if (scsi_state & (MPI_SCSI_STATE_AUTOSENSE_FAILED | MPI_SCSI_STATE_NO_SCSI_STATUS)) {
/* What to do?
*/
sc->result = DID_SOFT_ERROR << 16;
}
else if (scsi_state & MPI_SCSI_STATE_TERMINATED) {
/* Not real sure here either... */
sc->result = DID_RESET << 16;
}
}
dreplyprintk(ioc, printk(MYIOC_s_DEBUG_FMT
" sc->underflow={report ERR if < %02xh bytes xfer'd}\n",
ioc->name, sc->underflow));
dreplyprintk(ioc, printk(MYIOC_s_DEBUG_FMT
" ActBytesXferd=%02xh\n", ioc->name, xfer_cnt));
/* Report Queue Full
*/
if (scsi_status == MPI_SCSI_STATUS_TASK_SET_FULL)
mptscsih_report_queue_full(sc, pScsiReply, pScsiReq);
break;
case MPI_IOCSTATUS_SCSI_DATA_OVERRUN: /* 0x0044 */
scsi_set_resid(sc, 0);
fallthrough;
case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR: /* 0x0040 */
case MPI_IOCSTATUS_SUCCESS: /* 0x0000 */
sc->result = (DID_OK << 16) | scsi_status;
if (scsi_state == 0) {
;
} else if (scsi_state &
MPI_SCSI_STATE_AUTOSENSE_VALID) {
/*
* For potential trouble on LSI53C1030.
* (date:2007.xx.)
* It is checked whether the length of
* request data is equal to
* the length of transfer and residual.
* MEDIUM_ERROR is set by incorrect data.
*/
if ((ioc->bus_type == SPI) &&
(sc->sense_buffer[2] & 0x20)) {
u32 difftransfer;
difftransfer =
sc->sense_buffer[3] << 24 |
sc->sense_buffer[4] << 16 |
sc->sense_buffer[5] << 8 |
sc->sense_buffer[6];
if (((sc->sense_buffer[3] & 0x80) ==
0x80) && (scsi_bufflen(sc)
!= xfer_cnt)) {
sc->sense_buffer[2] =
MEDIUM_ERROR;
sc->sense_buffer[12] = 0xff;
sc->sense_buffer[13] = 0xff;
printk(KERN_WARNING"Errata"
"on LSI53C1030 occurred."
"sc->req_bufflen=0x%02x,"
"xfer_cnt=0x%02x\n" ,
scsi_bufflen(sc),
xfer_cnt);
}
if (((sc->sense_buffer[3] & 0x80)
!= 0x80) &&
(scsi_bufflen(sc) !=
xfer_cnt + difftransfer)) {
sc->sense_buffer[2] =
MEDIUM_ERROR;
sc->sense_buffer[12] = 0xff;
sc->sense_buffer[13] = 0xff;
printk(KERN_WARNING
"Errata on LSI53C1030 occurred"
"sc->req_bufflen=0x%02x,"
" xfer_cnt=0x%02x,"
"difftransfer=0x%02x\n",
scsi_bufflen(sc),
xfer_cnt,
difftransfer);
}
}
/*
* If running against circa 200003dd 909 MPT f/w,
* may get this (AUTOSENSE_VALID) for actual TASK_SET_FULL
* (QUEUE_FULL) returned from device! --> get 0x0000?128
* and with SenseBytes set to 0.
*/
if (pScsiReply->SCSIStatus == MPI_SCSI_STATUS_TASK_SET_FULL)
mptscsih_report_queue_full(sc, pScsiReply, pScsiReq);
}
else if (scsi_state &
(MPI_SCSI_STATE_AUTOSENSE_FAILED | MPI_SCSI_STATE_NO_SCSI_STATUS)
) {
/*
* What to do?
*/
sc->result = DID_SOFT_ERROR << 16;
}
else if (scsi_state & MPI_SCSI_STATE_TERMINATED) {
/* Not real sure here either... */
sc->result = DID_RESET << 16;
}
else if (scsi_state & MPI_SCSI_STATE_QUEUE_TAG_REJECTED) {
/* Device Inq. data indicates that it supports
* QTags, but rejects QTag messages.
* This command completed OK.
*
* Not real sure here either so do nothing... */
}
if (sc->result == MPI_SCSI_STATUS_TASK_SET_FULL)
mptscsih_report_queue_full(sc, pScsiReply, pScsiReq);
/* Add handling of:
* Reservation Conflict, Busy,
* Command Terminated, CHECK
*/
break;
case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR: /* 0x0047 */
sc->result = DID_SOFT_ERROR << 16;
break;
case MPI_IOCSTATUS_INVALID_FUNCTION: /* 0x0001 */
case MPI_IOCSTATUS_INVALID_SGL: /* 0x0003 */
case MPI_IOCSTATUS_INTERNAL_ERROR: /* 0x0004 */
case MPI_IOCSTATUS_RESERVED: /* 0x0005 */
case MPI_IOCSTATUS_INVALID_FIELD: /* 0x0007 */
case MPI_IOCSTATUS_INVALID_STATE: /* 0x0008 */
case MPI_IOCSTATUS_SCSI_IO_DATA_ERROR: /* 0x0046 */
case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED: /* 0x004A */
default:
/*
* What to do?
*/
sc->result = DID_SOFT_ERROR << 16;
break;
} /* switch(status) */
#ifdef CONFIG_FUSION_LOGGING
if (sc->result && (ioc->debug_level & MPT_DEBUG_REPLY))
mptscsih_info_scsiio(ioc, sc, pScsiReply);
#endif
} /* end of address reply case */
out:
/* Unmap the DMA buffers, if any. */
scsi_dma_unmap(sc);
scsi_done(sc); /* Issue the command callback */
/* Free Chain buffers */
mptscsih_freeChainBuffers(ioc, req_idx);
return 1;
}
/*
* mptscsih_flush_running_cmds - For each command found, search
* Scsi_Host instance taskQ and reply to OS.
* Called only if recovering from a FW reload.
* @hd: Pointer to a SCSI HOST structure
*
* Returns: None.
*
* Must be called while new I/Os are being queued.
*/
void
mptscsih_flush_running_cmds(MPT_SCSI_HOST *hd)
{
MPT_ADAPTER *ioc = hd->ioc;
struct scsi_cmnd *sc;
SCSIIORequest_t *mf = NULL;
int ii;
int channel, id;
for (ii= 0; ii < ioc->req_depth; ii++) {
sc = mptscsih_getclear_scsi_lookup(ioc, ii);
if (!sc)
continue;
mf = (SCSIIORequest_t *)MPT_INDEX_2_MFPTR(ioc, ii);
if (!mf)
continue;
channel = mf->Bus;
id = mf->TargetID;
mptscsih_freeChainBuffers(ioc, ii);
mpt_free_msg_frame(ioc, (MPT_FRAME_HDR *)mf);
if ((unsigned char *)mf != sc->host_scribble)
continue;
scsi_dma_unmap(sc);
sc->result = DID_RESET << 16;
sc->host_scribble = NULL;
dtmprintk(ioc, sdev_printk(KERN_INFO, sc->device, MYIOC_s_FMT
"completing cmds: fw_channel %d, fw_id %d, sc=%p, mf = %p, "
"idx=%x\n", ioc->name, channel, id, sc, mf, ii));
scsi_done(sc);
}
}
EXPORT_SYMBOL(mptscsih_flush_running_cmds);
/*
* mptscsih_search_running_cmds - Delete any commands associated
* with the specified target and lun. Function called only
* when a lun is disable by mid-layer.
* Do NOT access the referenced scsi_cmnd structure or
* members. Will cause either a paging or NULL ptr error.
* (BUT, BUT, BUT, the code does reference it! - mdr)
* @hd: Pointer to a SCSI HOST structure
* @vdevice: per device private data
*
* Returns: None.
*
* Called from slave_destroy.
*/
static void
mptscsih_search_running_cmds(MPT_SCSI_HOST *hd, VirtDevice *vdevice)
{
SCSIIORequest_t *mf = NULL;
int ii;
struct scsi_cmnd *sc;
struct scsi_lun lun;
MPT_ADAPTER *ioc = hd->ioc;
unsigned long flags;
spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
for (ii = 0; ii < ioc->req_depth; ii++) {
if ((sc = ioc->ScsiLookup[ii]) != NULL) {
mf = (SCSIIORequest_t *)MPT_INDEX_2_MFPTR(ioc, ii);
if (mf == NULL)
continue;
/* If the device is a hidden raid component, then its
* expected that the mf->function will be RAID_SCSI_IO
*/
if (vdevice->vtarget->tflags &
MPT_TARGET_FLAGS_RAID_COMPONENT && mf->Function !=
MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)
continue;
int_to_scsilun(vdevice->lun, &lun);
if ((mf->Bus != vdevice->vtarget->channel) ||
(mf->TargetID != vdevice->vtarget->id) ||
memcmp(lun.scsi_lun, mf->LUN, 8))
continue;
if ((unsigned char *)mf != sc->host_scribble)
continue;
ioc->ScsiLookup[ii] = NULL;
spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
mptscsih_freeChainBuffers(ioc, ii);
mpt_free_msg_frame(ioc, (MPT_FRAME_HDR *)mf);
scsi_dma_unmap(sc);
sc->host_scribble = NULL;
sc->result = DID_NO_CONNECT << 16;
dtmprintk(ioc, sdev_printk(KERN_INFO, sc->device,
MYIOC_s_FMT "completing cmds: fw_channel %d, "
"fw_id %d, sc=%p, mf = %p, idx=%x\n", ioc->name,
vdevice->vtarget->channel, vdevice->vtarget->id,
sc, mf, ii));
scsi_done(sc);
spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
}
}
spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
return;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
* mptscsih_report_queue_full - Report QUEUE_FULL status returned
* from a SCSI target device.
* @sc: Pointer to scsi_cmnd structure
* @pScsiReply: Pointer to SCSIIOReply_t
* @pScsiReq: Pointer to original SCSI request
*
* This routine periodically reports QUEUE_FULL status returned from a
* SCSI target device. It reports this to the console via kernel
* printk() API call, not more than once every 10 seconds.
*/
static void
mptscsih_report_queue_full(struct scsi_cmnd *sc, SCSIIOReply_t *pScsiReply, SCSIIORequest_t *pScsiReq)
{
long time = jiffies;
MPT_SCSI_HOST *hd;
MPT_ADAPTER *ioc;
if (sc->device == NULL)
return;
if (sc->device->host == NULL)
return;
if ((hd = shost_priv(sc->device->host)) == NULL)
return;
ioc = hd->ioc;
if (time - hd->last_queue_full > 10 * HZ) {
dprintk(ioc, printk(MYIOC_s_WARN_FMT "Device (%d:%d:%llu) reported QUEUE_FULL!\n",
ioc->name, 0, sc->device->id, sc->device->lun));
hd->last_queue_full = time;
}
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
* mptscsih_remove - Removed scsi devices
* @pdev: Pointer to pci_dev structure
*
*
*/
void
mptscsih_remove(struct pci_dev *pdev)
{
MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
struct Scsi_Host *host = ioc->sh;
MPT_SCSI_HOST *hd;
int sz1;
if (host == NULL)
hd = NULL;
else
hd = shost_priv(host);
mptscsih_shutdown(pdev);
sz1=0;
if (ioc->ScsiLookup != NULL) {
sz1 = ioc->req_depth * sizeof(void *);
kfree(ioc->ScsiLookup);
ioc->ScsiLookup = NULL;
}
dprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"Free'd ScsiLookup (%d) memory\n",
ioc->name, sz1));
if (hd)
kfree(hd->info_kbuf);
/* NULL the Scsi_Host pointer
*/
ioc->sh = NULL;
if (host)
scsi_host_put(host);
mpt_detach(pdev);
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
* mptscsih_shutdown - reboot notifier
*
*/
void
mptscsih_shutdown(struct pci_dev *pdev)
{
}
#ifdef CONFIG_PM
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
* mptscsih_suspend - Fusion MPT scsi driver suspend routine.
*
*
*/
int
mptscsih_suspend(struct pci_dev *pdev, pm_message_t state)
{
MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
scsi_block_requests(ioc->sh);
mptscsih_shutdown(pdev);
return mpt_suspend(pdev,state);
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
* mptscsih_resume - Fusion MPT scsi driver resume routine.
*
*
*/
int
mptscsih_resume(struct pci_dev *pdev)
{
MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
int rc;
rc = mpt_resume(pdev);
scsi_unblock_requests(ioc->sh);
return rc;
}
#endif
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mptscsih_info - Return information about MPT adapter
* @SChost: Pointer to Scsi_Host structure
*
* (linux scsi_host_template.info routine)
*
* Returns pointer to buffer where information was written.
*/
const char *
mptscsih_info(struct Scsi_Host *SChost)
{
MPT_SCSI_HOST *h;
int size = 0;
h = shost_priv(SChost);
if (h->info_kbuf == NULL)
if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
return h->info_kbuf;
h->info_kbuf[0] = '\0';
mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
h->info_kbuf[size-1] = '\0';
return h->info_kbuf;
}
int mptscsih_show_info(struct seq_file *m, struct Scsi_Host *host)
{
MPT_SCSI_HOST *hd = shost_priv(host);
MPT_ADAPTER *ioc = hd->ioc;
seq_printf(m, "%s: %s, ", ioc->name, ioc->prod_name);
seq_printf(m, "%s%08xh, ", MPT_FW_REV_MAGIC_ID_STRING, ioc->facts.FWVersion.Word);
seq_printf(m, "Ports=%d, ", ioc->facts.NumberOfPorts);
seq_printf(m, "MaxQ=%d\n", ioc->req_depth);
return 0;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
#define ADD_INDEX_LOG(req_ent) do { } while(0)
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mptscsih_qcmd - Primary Fusion MPT SCSI initiator IO start routine.
* @SCpnt: Pointer to scsi_cmnd structure
*
* (linux scsi_host_template.queuecommand routine)
* This is the primary SCSI IO start routine. Create a MPI SCSIIORequest
* from a linux scsi_cmnd request and send it to the IOC.
*
* Returns 0. (rtn value discarded by linux scsi mid-layer)
*/
int
mptscsih_qcmd(struct scsi_cmnd *SCpnt)
{
MPT_SCSI_HOST *hd;
MPT_FRAME_HDR *mf;
SCSIIORequest_t *pScsiReq;
VirtDevice *vdevice = SCpnt->device->hostdata;
u32 datalen;
u32 scsictl;
u32 scsidir;
u32 cmd_len;
int my_idx;
int ii;
MPT_ADAPTER *ioc;
hd = shost_priv(SCpnt->device->host);
ioc = hd->ioc;
dmfprintk(ioc, printk(MYIOC_s_DEBUG_FMT "qcmd: SCpnt=%p\n",
ioc->name, SCpnt));
if (ioc->taskmgmt_quiesce_io)
return SCSI_MLQUEUE_HOST_BUSY;
/*
* Put together a MPT SCSI request...
*/
if ((mf = mpt_get_msg_frame(ioc->DoneCtx, ioc)) == NULL) {
dprintk(ioc, printk(MYIOC_s_WARN_FMT "QueueCmd, no msg frames!!\n",
ioc->name));
return SCSI_MLQUEUE_HOST_BUSY;
}
pScsiReq = (SCSIIORequest_t *) mf;
my_idx = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx);
ADD_INDEX_LOG(my_idx);
/* TUR's being issued with scsictl=0x02000000 (DATA_IN)!
* Seems we may receive a buffer (datalen>0) even when there
* will be no data transfer! GRRRRR...
*/
if (SCpnt->sc_data_direction == DMA_FROM_DEVICE) {
datalen = scsi_bufflen(SCpnt);
scsidir = MPI_SCSIIO_CONTROL_READ; /* DATA IN (host<--ioc<--dev) */
} else if (SCpnt->sc_data_direction == DMA_TO_DEVICE) {
datalen = scsi_bufflen(SCpnt);
scsidir = MPI_SCSIIO_CONTROL_WRITE; /* DATA OUT (host-->ioc-->dev) */
} else {
datalen = 0;
scsidir = MPI_SCSIIO_CONTROL_NODATATRANSFER;
}
/* Default to untagged. Once a target structure has been allocated,
* use the Inquiry data to determine if device supports tagged.
*/
if ((vdevice->vtarget->tflags & MPT_TARGET_FLAGS_Q_YES) &&
SCpnt->device->tagged_supported)
scsictl = scsidir | MPI_SCSIIO_CONTROL_SIMPLEQ;
else
scsictl = scsidir | MPI_SCSIIO_CONTROL_UNTAGGED;
/* Use the above information to set up the message frame
*/
pScsiReq->TargetID = (u8) vdevice->vtarget->id;
pScsiReq->Bus = vdevice->vtarget->channel;
pScsiReq->ChainOffset = 0;
if (vdevice->vtarget->tflags & MPT_TARGET_FLAGS_RAID_COMPONENT)
pScsiReq->Function = MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH;
else
pScsiReq->Function = MPI_FUNCTION_SCSI_IO_REQUEST;
pScsiReq->CDBLength = SCpnt->cmd_len;
pScsiReq->SenseBufferLength = MPT_SENSE_BUFFER_SIZE;
pScsiReq->Reserved = 0;
pScsiReq->MsgFlags = mpt_msg_flags(ioc);
int_to_scsilun(SCpnt->device->lun, (struct scsi_lun *)pScsiReq->LUN);
pScsiReq->Control = cpu_to_le32(scsictl);
/*
* Write SCSI CDB into the message
*/
cmd_len = SCpnt->cmd_len;
for (ii=0; ii < cmd_len; ii++)
pScsiReq->CDB[ii] = SCpnt->cmnd[ii];
for (ii=cmd_len; ii < 16; ii++)
pScsiReq->CDB[ii] = 0;
/* DataLength */
pScsiReq->DataLength = cpu_to_le32(datalen);
/* SenseBuffer low address */
pScsiReq->SenseBufferLowAddr = cpu_to_le32(ioc->sense_buf_low_dma
+ (my_idx * MPT_SENSE_BUFFER_ALLOC));
/* Now add the SG list
* Always have a SGE even if null length.
*/
if (datalen == 0) {
/* Add a NULL SGE */
ioc->add_sge((char *)&pScsiReq->SGL,
MPT_SGE_FLAGS_SSIMPLE_READ | 0,
(dma_addr_t) -1);
} else {
/* Add a 32 or 64 bit SGE */
if (mptscsih_AddSGE(ioc, SCpnt, pScsiReq, my_idx) != SUCCESS)
goto fail;
}
SCpnt->host_scribble = (unsigned char *)mf;
mptscsih_set_scsi_lookup(ioc, my_idx, SCpnt);
mpt_put_msg_frame(ioc->DoneCtx, ioc, mf);
dmfprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Issued SCSI cmd (%p) mf=%p idx=%d\n",
ioc->name, SCpnt, mf, my_idx));
DBG_DUMP_REQUEST_FRAME(ioc, (u32 *)mf);
return 0;
fail:
mptscsih_freeChainBuffers(ioc, my_idx);
mpt_free_msg_frame(ioc, mf);
return SCSI_MLQUEUE_HOST_BUSY;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
* mptscsih_freeChainBuffers - Function to free chain buffers associated
* with a SCSI IO request
* @hd: Pointer to the MPT_SCSI_HOST instance
* @req_idx: Index of the SCSI IO request frame.
*
* Called if SG chain buffer allocation fails and mptscsih callbacks.
* No return.
*/
static void
mptscsih_freeChainBuffers(MPT_ADAPTER *ioc, int req_idx)
{
MPT_FRAME_HDR *chain;
unsigned long flags;
int chain_idx;
int next;
/* Get the first chain index and reset
* tracker state.
*/
chain_idx = ioc->ReqToChain[req_idx];
ioc->ReqToChain[req_idx] = MPT_HOST_NO_CHAIN;
while (chain_idx != MPT_HOST_NO_CHAIN) {
/* Save the next chain buffer index */
next = ioc->ChainToChain[chain_idx];
/* Free this chain buffer and reset
* tracker
*/
ioc->ChainToChain[chain_idx] = MPT_HOST_NO_CHAIN;
chain = (MPT_FRAME_HDR *) (ioc->ChainBuffer
+ (chain_idx * ioc->req_sz));
spin_lock_irqsave(&ioc->FreeQlock, flags);
list_add_tail(&chain->u.frame.linkage.list, &ioc->FreeChainQ);
spin_unlock_irqrestore(&ioc->FreeQlock, flags);
dmfprintk(ioc, printk(MYIOC_s_DEBUG_FMT "FreeChainBuffers (index %d)\n",
ioc->name, chain_idx));
/* handle next */
chain_idx = next;
}
return;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
* Reset Handling
*/
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mptscsih_IssueTaskMgmt - Generic send Task Management function.
* @hd: Pointer to MPT_SCSI_HOST structure
* @type: Task Management type
* @channel: channel number for task management
* @id: Logical Target ID for reset (if appropriate)
* @lun: Logical Unit for reset (if appropriate)
* @ctx2abort: Context for the task to be aborted (if appropriate)
* @timeout: timeout for task management control
*
* Remark: _HardResetHandler can be invoked from an interrupt thread (timer)
* or a non-interrupt thread. In the former, must not call schedule().
*
* Not all fields are meaningfull for all task types.
*
* Returns 0 for SUCCESS, or FAILED.
*
**/
int
mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id, u64 lun,
int ctx2abort, ulong timeout)
{
MPT_FRAME_HDR *mf;
SCSITaskMgmt_t *pScsiTm;
int ii;
int retval;
MPT_ADAPTER *ioc = hd->ioc;
u8 issue_hard_reset;
u32 ioc_raw_state;
unsigned long time_count;
issue_hard_reset = 0;
ioc_raw_state = mpt_GetIocState(ioc, 0);
if ((ioc_raw_state & MPI_IOC_STATE_MASK) != MPI_IOC_STATE_OPERATIONAL) {
printk(MYIOC_s_WARN_FMT
"TaskMgmt type=%x: IOC Not operational (0x%x)!\n",
ioc->name, type, ioc_raw_state);
printk(MYIOC_s_WARN_FMT "Issuing HardReset from %s!!\n",
ioc->name, __func__);
if (mpt_HardResetHandler(ioc, CAN_SLEEP) < 0)
printk(MYIOC_s_WARN_FMT "TaskMgmt HardReset "
"FAILED!!\n", ioc->name);
return 0;
}
/* DOORBELL ACTIVE check is not required if
* MPI_IOCFACTS_CAPABILITY_HIGH_PRI_Q is supported.
*/
if (!((ioc->facts.IOCCapabilities & MPI_IOCFACTS_CAPABILITY_HIGH_PRI_Q)
&& (ioc->facts.MsgVersion >= MPI_VERSION_01_05)) &&
(ioc_raw_state & MPI_DOORBELL_ACTIVE)) {
printk(MYIOC_s_WARN_FMT
"TaskMgmt type=%x: ioc_state: "
"DOORBELL_ACTIVE (0x%x)!\n",
ioc->name, type, ioc_raw_state);
return FAILED;
}
mutex_lock(&ioc->taskmgmt_cmds.mutex);
if (mpt_set_taskmgmt_in_progress_flag(ioc) != 0) {
mf = NULL;
retval = FAILED;
goto out;
}
/* Return Fail to calling function if no message frames available.
*/
if ((mf = mpt_get_msg_frame(ioc->TaskCtx, ioc)) == NULL) {
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
"TaskMgmt no msg frames!!\n", ioc->name));
retval = FAILED;
mpt_clear_taskmgmt_in_progress_flag(ioc);
goto out;
}
dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TaskMgmt request (mf=%p)\n",
ioc->name, mf));
/* Format the Request
*/
pScsiTm = (SCSITaskMgmt_t *) mf;
pScsiTm->TargetID = id;
pScsiTm->Bus = channel;
pScsiTm->ChainOffset = 0;
pScsiTm->Function = MPI_FUNCTION_SCSI_TASK_MGMT;
pScsiTm->Reserved = 0;
pScsiTm->TaskType = type;
pScsiTm->Reserved1 = 0;
pScsiTm->MsgFlags = (type == MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS)
? MPI_SCSITASKMGMT_MSGFLAGS_LIPRESET_RESET_OPTION : 0;
int_to_scsilun(lun, (struct scsi_lun *)pScsiTm->LUN);
for (ii=0; ii < 7; ii++)
pScsiTm->Reserved2[ii] = 0;
pScsiTm->TaskMsgContext = ctx2abort;
dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TaskMgmt: ctx2abort (0x%08x) "
"task_type = 0x%02X, timeout = %ld\n", ioc->name, ctx2abort,
type, timeout));
DBG_DUMP_TM_REQUEST_FRAME(ioc, (u32 *)pScsiTm);
INITIALIZE_MGMT_STATUS(ioc->taskmgmt_cmds.status)
time_count = jiffies;
if ((ioc->facts.IOCCapabilities & MPI_IOCFACTS_CAPABILITY_HIGH_PRI_Q) &&
(ioc->facts.MsgVersion >= MPI_VERSION_01_05))
mpt_put_msg_frame_hi_pri(ioc->TaskCtx, ioc, mf);
else {
retval = mpt_send_handshake_request(ioc->TaskCtx, ioc,
sizeof(SCSITaskMgmt_t), (u32*)pScsiTm, CAN_SLEEP);
if (retval) {
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
"TaskMgmt handshake FAILED!(mf=%p, rc=%d) \n",
ioc->name, mf, retval));
mpt_free_msg_frame(ioc, mf);
mpt_clear_taskmgmt_in_progress_flag(ioc);
goto out;
}
}
wait_for_completion_timeout(&ioc->taskmgmt_cmds.done,
timeout*HZ);
if (!(ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
retval = FAILED;
dtmprintk(ioc, printk(MYIOC_s_ERR_FMT
"TaskMgmt TIMED OUT!(mf=%p)\n", ioc->name, mf));
mpt_clear_taskmgmt_in_progress_flag(ioc);
if (ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET)
goto out;
issue_hard_reset = 1;
goto out;
}
retval = mptscsih_taskmgmt_reply(ioc, type,
(SCSITaskMgmtReply_t *) ioc->taskmgmt_cmds.reply);
dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"TaskMgmt completed (%d seconds)\n",
ioc->name, jiffies_to_msecs(jiffies - time_count)/1000));
out:
CLEAR_MGMT_STATUS(ioc->taskmgmt_cmds.status)
if (issue_hard_reset) {
printk(MYIOC_s_WARN_FMT
"Issuing Reset from %s!! doorbell=0x%08x\n",
ioc->name, __func__, mpt_GetIocState(ioc, 0));
retval = (ioc->bus_type == SAS) ?
mpt_HardResetHandler(ioc, CAN_SLEEP) :
mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP);
mpt_free_msg_frame(ioc, mf);
}
retval = (retval == 0) ? 0 : FAILED;
mutex_unlock(&ioc->taskmgmt_cmds.mutex);
return retval;
}
EXPORT_SYMBOL(mptscsih_IssueTaskMgmt);
static int
mptscsih_get_tm_timeout(MPT_ADAPTER *ioc)
{
switch (ioc->bus_type) {
case FC:
return 40;
case SAS:
return 30;
case SPI:
default:
return 10;
}
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mptscsih_abort - Abort linux scsi_cmnd routine, new_eh variant
* @SCpnt: Pointer to scsi_cmnd structure, IO to be aborted
*
* (linux scsi_host_template.eh_abort_handler routine)
*
* Returns SUCCESS or FAILED.
**/
int
mptscsih_abort(struct scsi_cmnd * SCpnt)
{
MPT_SCSI_HOST *hd;
MPT_FRAME_HDR *mf;
u32 ctx2abort;
int scpnt_idx;
int retval;
VirtDevice *vdevice;
MPT_ADAPTER *ioc;
/* If we can't locate our host adapter structure, return FAILED status.
*/
if ((hd = shost_priv(SCpnt->device->host)) == NULL) {
SCpnt->result = DID_RESET << 16;
scsi_done(SCpnt);
printk(KERN_ERR MYNAM ": task abort: "
"can't locate host! (sc=%p)\n", SCpnt);
return FAILED;
}
ioc = hd->ioc;
printk(MYIOC_s_INFO_FMT "attempting task abort! (sc=%p)\n",
ioc->name, SCpnt);
scsi_print_command(SCpnt);
vdevice = SCpnt->device->hostdata;
if (!vdevice || !vdevice->vtarget) {
dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"task abort: device has been deleted (sc=%p)\n",
ioc->name, SCpnt));
SCpnt->result = DID_NO_CONNECT << 16;
scsi_done(SCpnt);
retval = SUCCESS;
goto out;
}
/* Task aborts are not supported for hidden raid components.
*/
if (vdevice->vtarget->tflags & MPT_TARGET_FLAGS_RAID_COMPONENT) {
dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"task abort: hidden raid component (sc=%p)\n",
ioc->name, SCpnt));
SCpnt->result = DID_RESET << 16;
retval = FAILED;
goto out;
}
/* Task aborts are not supported for volumes.
*/
if (vdevice->vtarget->raidVolume) {
dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"task abort: raid volume (sc=%p)\n",
ioc->name, SCpnt));
SCpnt->result = DID_RESET << 16;
retval = FAILED;
goto out;
}
/* Find this command
*/
if ((scpnt_idx = SCPNT_TO_LOOKUP_IDX(ioc, SCpnt)) < 0) {
/* Cmd not found in ScsiLookup.
* Do OS callback.
*/
SCpnt->result = DID_RESET << 16;
dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "task abort: "
"Command not in the active list! (sc=%p)\n", ioc->name,
SCpnt));
retval = SUCCESS;
goto out;
}
if (ioc->timeouts < -1)
ioc->timeouts++;
if (mpt_fwfault_debug)
mpt_halt_firmware(ioc);
/* Most important! Set TaskMsgContext to SCpnt's MsgContext!
* (the IO to be ABORT'd)
*
* NOTE: Since we do not byteswap MsgContext, we do not
* swap it here either. It is an opaque cookie to
* the controller, so it does not matter. -DaveM
*/
mf = MPT_INDEX_2_MFPTR(ioc, scpnt_idx);
ctx2abort = mf->u.frame.hwhdr.msgctxu.MsgContext;
retval = mptscsih_IssueTaskMgmt(hd,
MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
vdevice->vtarget->channel,
vdevice->vtarget->id, vdevice->lun,
ctx2abort, mptscsih_get_tm_timeout(ioc));
if (SCPNT_TO_LOOKUP_IDX(ioc, SCpnt) == scpnt_idx) {
dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"task abort: command still in active list! (sc=%p)\n",
ioc->name, SCpnt));
retval = FAILED;
} else {
dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"task abort: command cleared from active list! (sc=%p)\n",
ioc->name, SCpnt));
retval = SUCCESS;
}
out:
printk(MYIOC_s_INFO_FMT "task abort: %s (rv=%04x) (sc=%p)\n",
ioc->name, ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), retval,
SCpnt);
return retval;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mptscsih_dev_reset - Perform a SCSI TARGET_RESET! new_eh variant
* @SCpnt: Pointer to scsi_cmnd structure, IO which reset is due to
*
* (linux scsi_host_template.eh_dev_reset_handler routine)
*
* Returns SUCCESS or FAILED.
**/
int
mptscsih_dev_reset(struct scsi_cmnd * SCpnt)
{
MPT_SCSI_HOST *hd;
int retval;
VirtDevice *vdevice;
MPT_ADAPTER *ioc;
/* If we can't locate our host adapter structure, return FAILED status.
*/
if ((hd = shost_priv(SCpnt->device->host)) == NULL){
printk(KERN_ERR MYNAM ": target reset: "
"Can't locate host! (sc=%p)\n", SCpnt);
return FAILED;
}
ioc = hd->ioc;
printk(MYIOC_s_INFO_FMT "attempting target reset! (sc=%p)\n",
ioc->name, SCpnt);
scsi_print_command(SCpnt);
vdevice = SCpnt->device->hostdata;
if (!vdevice || !vdevice->vtarget) {
retval = 0;
goto out;
}
/* Target reset to hidden raid component is not supported
*/
if (vdevice->vtarget->tflags & MPT_TARGET_FLAGS_RAID_COMPONENT) {
retval = FAILED;
goto out;
}
retval = mptscsih_IssueTaskMgmt(hd,
MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET,
vdevice->vtarget->channel,
vdevice->vtarget->id, 0, 0,
mptscsih_get_tm_timeout(ioc));
out:
printk (MYIOC_s_INFO_FMT "target reset: %s (sc=%p)\n",
ioc->name, ((retval == 0) ? "SUCCESS" : "FAILED" ), SCpnt);
if (retval == 0)
return SUCCESS;
else
return FAILED;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mptscsih_bus_reset - Perform a SCSI BUS_RESET! new_eh variant
* @SCpnt: Pointer to scsi_cmnd structure, IO which reset is due to
*
* (linux scsi_host_template.eh_bus_reset_handler routine)
*
* Returns SUCCESS or FAILED.
**/
int
mptscsih_bus_reset(struct scsi_cmnd * SCpnt)
{
MPT_SCSI_HOST *hd;
int retval;
VirtDevice *vdevice;
MPT_ADAPTER *ioc;
/* If we can't locate our host adapter structure, return FAILED status.
*/
if ((hd = shost_priv(SCpnt->device->host)) == NULL){
printk(KERN_ERR MYNAM ": bus reset: "
"Can't locate host! (sc=%p)\n", SCpnt);
return FAILED;
}
ioc = hd->ioc;
printk(MYIOC_s_INFO_FMT "attempting bus reset! (sc=%p)\n",
ioc->name, SCpnt);
scsi_print_command(SCpnt);
if (ioc->timeouts < -1)
ioc->timeouts++;
vdevice = SCpnt->device->hostdata;
if (!vdevice || !vdevice->vtarget)
return SUCCESS;
retval = mptscsih_IssueTaskMgmt(hd,
MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS,
vdevice->vtarget->channel, 0, 0, 0,
mptscsih_get_tm_timeout(ioc));
printk(MYIOC_s_INFO_FMT "bus reset: %s (sc=%p)\n",
ioc->name, ((retval == 0) ? "SUCCESS" : "FAILED" ), SCpnt);
if (retval == 0)
return SUCCESS;
else
return FAILED;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mptscsih_host_reset - Perform a SCSI host adapter RESET (new_eh variant)
* @SCpnt: Pointer to scsi_cmnd structure, IO which reset is due to
*
* (linux scsi_host_template.eh_host_reset_handler routine)
*
* Returns SUCCESS or FAILED.
*/
int
mptscsih_host_reset(struct scsi_cmnd *SCpnt)
{
MPT_SCSI_HOST * hd;
int status = SUCCESS;
MPT_ADAPTER *ioc;
int retval;
/* If we can't locate the host to reset, then we failed. */
if ((hd = shost_priv(SCpnt->device->host)) == NULL){
printk(KERN_ERR MYNAM ": host reset: "
"Can't locate host! (sc=%p)\n", SCpnt);
return FAILED;
}
/* make sure we have no outstanding commands at this stage */
mptscsih_flush_running_cmds(hd);
ioc = hd->ioc;
printk(MYIOC_s_INFO_FMT "attempting host reset! (sc=%p)\n",
ioc->name, SCpnt);
/* If our attempts to reset the host failed, then return a failed
* status. The host will be taken off line by the SCSI mid-layer.
*/
retval = mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP);
if (retval < 0)
status = FAILED;
else
status = SUCCESS;
printk(MYIOC_s_INFO_FMT "host reset: %s (sc=%p)\n",
ioc->name, ((retval == 0) ? "SUCCESS" : "FAILED" ), SCpnt);
return status;
}
static int
mptscsih_taskmgmt_reply(MPT_ADAPTER *ioc, u8 type,
SCSITaskMgmtReply_t *pScsiTmReply)
{
u16 iocstatus;
u32 termination_count;
int retval;
if (!(ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_RF_VALID)) {
retval = FAILED;
goto out;
}
DBG_DUMP_TM_REPLY_FRAME(ioc, (u32 *)pScsiTmReply);
iocstatus = le16_to_cpu(pScsiTmReply->IOCStatus) & MPI_IOCSTATUS_MASK;
termination_count = le32_to_cpu(pScsiTmReply->TerminationCount);
dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"TaskMgmt fw_channel = %d, fw_id = %d, task_type = 0x%02X,\n"
"\tiocstatus = 0x%04X, loginfo = 0x%08X, response_code = 0x%02X,\n"
"\tterm_cmnds = %d\n", ioc->name, pScsiTmReply->Bus,
pScsiTmReply->TargetID, type, le16_to_cpu(pScsiTmReply->IOCStatus),
le32_to_cpu(pScsiTmReply->IOCLogInfo), pScsiTmReply->ResponseCode,
termination_count));
if (ioc->facts.MsgVersion >= MPI_VERSION_01_05 &&
pScsiTmReply->ResponseCode)
mptscsih_taskmgmt_response_code(ioc,
pScsiTmReply->ResponseCode);
if (iocstatus == MPI_IOCSTATUS_SUCCESS) {
retval = 0;
goto out;
}
retval = FAILED;
if (type == MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK) {
if (termination_count == 1)
retval = 0;
goto out;
}
if (iocstatus == MPI_IOCSTATUS_SCSI_TASK_TERMINATED ||
iocstatus == MPI_IOCSTATUS_SCSI_IOC_TERMINATED)
retval = 0;
out:
return retval;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
void
mptscsih_taskmgmt_response_code(MPT_ADAPTER *ioc, u8 response_code)
{
char *desc;
switch (response_code) {
case MPI_SCSITASKMGMT_RSP_TM_COMPLETE:
desc = "The task completed.";
break;
case MPI_SCSITASKMGMT_RSP_INVALID_FRAME:
desc = "The IOC received an invalid frame status.";
break;
case MPI_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED:
desc = "The task type is not supported.";
break;
case MPI_SCSITASKMGMT_RSP_TM_FAILED:
desc = "The requested task failed.";
break;
case MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED:
desc = "The task completed successfully.";
break;
case MPI_SCSITASKMGMT_RSP_TM_INVALID_LUN:
desc = "The LUN request is invalid.";
break;
case MPI_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC:
desc = "The task is in the IOC queue and has not been sent to target.";
break;
default:
desc = "unknown";
break;
}
printk(MYIOC_s_INFO_FMT "Response Code(0x%08x): F/W: %s\n",
ioc->name, response_code, desc);
}
EXPORT_SYMBOL(mptscsih_taskmgmt_response_code);
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mptscsih_taskmgmt_complete - Registered with Fusion MPT base driver
* @ioc: Pointer to MPT_ADAPTER structure
* @mf: Pointer to SCSI task mgmt request frame
* @mr: Pointer to SCSI task mgmt reply frame
*
* This routine is called from mptbase.c::mpt_interrupt() at the completion
* of any SCSI task management request.
* This routine is registered with the MPT (base) driver at driver
* load/init time via the mpt_register() API call.
*
* Returns 1 indicating alloc'd request frame ptr should be freed.
**/
int
mptscsih_taskmgmt_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf,
MPT_FRAME_HDR *mr)
{
dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"TaskMgmt completed (mf=%p, mr=%p)\n", ioc->name, mf, mr));
ioc->taskmgmt_cmds.status |= MPT_MGMT_STATUS_COMMAND_GOOD;
if (!mr)
goto out;
ioc->taskmgmt_cmds.status |= MPT_MGMT_STATUS_RF_VALID;
memcpy(ioc->taskmgmt_cmds.reply, mr,
min(MPT_DEFAULT_FRAME_SIZE, 4 * mr->u.reply.MsgLength));
out:
if (ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_PENDING) {
mpt_clear_taskmgmt_in_progress_flag(ioc);
ioc->taskmgmt_cmds.status &= ~MPT_MGMT_STATUS_PENDING;
complete(&ioc->taskmgmt_cmds.done);
if (ioc->bus_type == SAS)
ioc->schedule_target_reset(ioc);
return 1;
}
return 0;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
* This is anyones guess quite frankly.
*/
int
mptscsih_bios_param(struct scsi_device * sdev, struct block_device *bdev,
sector_t capacity, int geom[])
{
int heads;
int sectors;
sector_t cylinders;
ulong dummy;
heads = 64;
sectors = 32;
dummy = heads * sectors;
cylinders = capacity;
sector_div(cylinders,dummy);
/*
* Handle extended translation size for logical drives
* > 1Gb
*/
if ((ulong)capacity >= 0x200000) {
heads = 255;
sectors = 63;
dummy = heads * sectors;
cylinders = capacity;
sector_div(cylinders,dummy);
}
/* return result */
geom[0] = heads;
geom[1] = sectors;
geom[2] = cylinders;
return 0;
}
/* Search IOC page 3 to determine if this is hidden physical disk
*
*/
int
mptscsih_is_phys_disk(MPT_ADAPTER *ioc, u8 channel, u8 id)
{
struct inactive_raid_component_info *component_info;
int i, j;
RaidPhysDiskPage1_t *phys_disk;
int rc = 0;
int num_paths;
if (!ioc->raid_data.pIocPg3)
goto out;
for (i = 0; i < ioc->raid_data.pIocPg3->NumPhysDisks; i++) {
if ((id == ioc->raid_data.pIocPg3->PhysDisk[i].PhysDiskID) &&
(channel == ioc->raid_data.pIocPg3->PhysDisk[i].PhysDiskBus)) {
rc = 1;
goto out;
}
}
if (ioc->bus_type != SAS)
goto out;
/*
* Check if dual path
*/
for (i = 0; i < ioc->raid_data.pIocPg3->NumPhysDisks; i++) {
num_paths = mpt_raid_phys_disk_get_num_paths(ioc,
ioc->raid_data.pIocPg3->PhysDisk[i].PhysDiskNum);
if (num_paths < 2)
continue;
phys_disk = kzalloc(offsetof(RaidPhysDiskPage1_t, Path) +
(num_paths * sizeof(RAID_PHYS_DISK1_PATH)), GFP_KERNEL);
if (!phys_disk)
continue;
if ((mpt_raid_phys_disk_pg1(ioc,
ioc->raid_data.pIocPg3->PhysDisk[i].PhysDiskNum,
phys_disk))) {
kfree(phys_disk);
continue;
}
for (j = 0; j < num_paths; j++) {
if ((phys_disk->Path[j].Flags &
MPI_RAID_PHYSDISK1_FLAG_INVALID))
continue;
if ((phys_disk->Path[j].Flags &
MPI_RAID_PHYSDISK1_FLAG_BROKEN))
continue;
if ((id == phys_disk->Path[j].PhysDiskID) &&
(channel == phys_disk->Path[j].PhysDiskBus)) {
rc = 1;
kfree(phys_disk);
goto out;
}
}
kfree(phys_disk);
}
/*
* Check inactive list for matching phys disks
*/
if (list_empty(&ioc->raid_data.inactive_list))
goto out;
mutex_lock(&ioc->raid_data.inactive_list_mutex);
list_for_each_entry(component_info, &ioc->raid_data.inactive_list,
list) {
if ((component_info->d.PhysDiskID == id) &&
(component_info->d.PhysDiskBus == channel))
rc = 1;
}
mutex_unlock(&ioc->raid_data.inactive_list_mutex);
out:
return rc;
}
EXPORT_SYMBOL(mptscsih_is_phys_disk);
u8
mptscsih_raid_id_to_num(MPT_ADAPTER *ioc, u8 channel, u8 id)
{
struct inactive_raid_component_info *component_info;
int i, j;
RaidPhysDiskPage1_t *phys_disk;
int rc = -ENXIO;
int num_paths;
if (!ioc->raid_data.pIocPg3)
goto out;
for (i = 0; i < ioc->raid_data.pIocPg3->NumPhysDisks; i++) {
if ((id == ioc->raid_data.pIocPg3->PhysDisk[i].PhysDiskID) &&
(channel == ioc->raid_data.pIocPg3->PhysDisk[i].PhysDiskBus)) {
rc = ioc->raid_data.pIocPg3->PhysDisk[i].PhysDiskNum;
goto out;
}
}
if (ioc->bus_type != SAS)
goto out;
/*
* Check if dual path
*/
for (i = 0; i < ioc->raid_data.pIocPg3->NumPhysDisks; i++) {
num_paths = mpt_raid_phys_disk_get_num_paths(ioc,
ioc->raid_data.pIocPg3->PhysDisk[i].PhysDiskNum);
if (num_paths < 2)
continue;
phys_disk = kzalloc(offsetof(RaidPhysDiskPage1_t, Path) +
(num_paths * sizeof(RAID_PHYS_DISK1_PATH)), GFP_KERNEL);
if (!phys_disk)
continue;
if ((mpt_raid_phys_disk_pg1(ioc,
ioc->raid_data.pIocPg3->PhysDisk[i].PhysDiskNum,
phys_disk))) {
kfree(phys_disk);
continue;
}
for (j = 0; j < num_paths; j++) {
if ((phys_disk->Path[j].Flags &
MPI_RAID_PHYSDISK1_FLAG_INVALID))
continue;
if ((phys_disk->Path[j].Flags &
MPI_RAID_PHYSDISK1_FLAG_BROKEN))
continue;
if ((id == phys_disk->Path[j].PhysDiskID) &&
(channel == phys_disk->Path[j].PhysDiskBus)) {
rc = phys_disk->PhysDiskNum;
kfree(phys_disk);
goto out;
}
}
kfree(phys_disk);
}
/*
* Check inactive list for matching phys disks
*/
if (list_empty(&ioc->raid_data.inactive_list))
goto out;
mutex_lock(&ioc->raid_data.inactive_list_mutex);
list_for_each_entry(component_info, &ioc->raid_data.inactive_list,
list) {
if ((component_info->d.PhysDiskID == id) &&
(component_info->d.PhysDiskBus == channel))
rc = component_info->d.PhysDiskNum;
}
mutex_unlock(&ioc->raid_data.inactive_list_mutex);
out:
return rc;
}
EXPORT_SYMBOL(mptscsih_raid_id_to_num);
/*
* OS entry point to allow for host driver to free allocated memory
* Called if no device present or device being unloaded
*/
void
mptscsih_slave_destroy(struct scsi_device *sdev)
{
struct Scsi_Host *host = sdev->host;
MPT_SCSI_HOST *hd = shost_priv(host);
VirtTarget *vtarget;
VirtDevice *vdevice;
struct scsi_target *starget;
starget = scsi_target(sdev);
vtarget = starget->hostdata;
vdevice = sdev->hostdata;
if (!vdevice)
return;
mptscsih_search_running_cmds(hd, vdevice);
vtarget->num_luns--;
mptscsih_synchronize_cache(hd, vdevice);
kfree(vdevice);
sdev->hostdata = NULL;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
* mptscsih_change_queue_depth - This function will set a devices queue depth
* @sdev: per scsi_device pointer
* @qdepth: requested queue depth
*
* Adding support for new 'change_queue_depth' api.
*/
int
mptscsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
{
MPT_SCSI_HOST *hd = shost_priv(sdev->host);
VirtTarget *vtarget;
struct scsi_target *starget;
int max_depth;
MPT_ADAPTER *ioc = hd->ioc;
starget = scsi_target(sdev);
vtarget = starget->hostdata;
if (ioc->bus_type == SPI) {
if (!(vtarget->tflags & MPT_TARGET_FLAGS_Q_YES))
max_depth = 1;
else if (sdev->type == TYPE_DISK &&
vtarget->minSyncFactor <= MPT_ULTRA160)
max_depth = MPT_SCSI_CMD_PER_DEV_HIGH;
else
max_depth = MPT_SCSI_CMD_PER_DEV_LOW;
} else
max_depth = ioc->sh->can_queue;
if (!sdev->tagged_supported)
max_depth = 1;
if (qdepth > max_depth)
qdepth = max_depth;
return scsi_change_queue_depth(sdev, qdepth);
}
/*
* OS entry point to adjust the queue_depths on a per-device basis.
* Called once per device the bus scan. Use it to force the queue_depth
* member to 1 if a device does not support Q tags.
* Return non-zero if fails.
*/
int
mptscsih_slave_configure(struct scsi_device *sdev)
{
struct Scsi_Host *sh = sdev->host;
VirtTarget *vtarget;
VirtDevice *vdevice;
struct scsi_target *starget;
MPT_SCSI_HOST *hd = shost_priv(sh);
MPT_ADAPTER *ioc = hd->ioc;
starget = scsi_target(sdev);
vtarget = starget->hostdata;
vdevice = sdev->hostdata;
dsprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"device @ %p, channel=%d, id=%d, lun=%llu\n",
ioc->name, sdev, sdev->channel, sdev->id, sdev->lun));
if (ioc->bus_type == SPI)
dsprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"sdtr %d wdtr %d ppr %d inq length=%d\n",
ioc->name, sdev->sdtr, sdev->wdtr,
sdev->ppr, sdev->inquiry_len));
vdevice->configured_lun = 1;
dsprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"Queue depth=%d, tflags=%x\n",
ioc->name, sdev->queue_depth, vtarget->tflags));
if (ioc->bus_type == SPI)
dsprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"negoFlags=%x, maxOffset=%x, SyncFactor=%x\n",
ioc->name, vtarget->negoFlags, vtarget->maxOffset,
vtarget->minSyncFactor));
mptscsih_change_queue_depth(sdev, MPT_SCSI_CMD_PER_DEV_HIGH);
dsprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"tagged %d, simple %d\n",
ioc->name,sdev->tagged_supported, sdev->simple_tags));
blk_queue_dma_alignment (sdev->request_queue, 512 - 1);
return 0;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
* Private routines...
*/
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/* Utility function to copy sense data from the scsi_cmnd buffer
* to the FC and SCSI target structures.
*
*/
static void
mptscsih_copy_sense_data(struct scsi_cmnd *sc, MPT_SCSI_HOST *hd, MPT_FRAME_HDR *mf, SCSIIOReply_t *pScsiReply)
{
VirtDevice *vdevice;
SCSIIORequest_t *pReq;
u32 sense_count = le32_to_cpu(pScsiReply->SenseCount);
MPT_ADAPTER *ioc = hd->ioc;
/* Get target structure
*/
pReq = (SCSIIORequest_t *) mf;
vdevice = sc->device->hostdata;
if (sense_count) {
u8 *sense_data;
int req_index;
/* Copy the sense received into the scsi command block. */
req_index = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx);
sense_data = ((u8 *)ioc->sense_buf_pool + (req_index * MPT_SENSE_BUFFER_ALLOC));
memcpy(sc->sense_buffer, sense_data, MPT_SENSE_BUFFER_ALLOC);
/* Log SMART data (asc = 0x5D, non-IM case only) if required.
*/
if ((ioc->events) && (ioc->eventTypes & (1 << MPI_EVENT_SCSI_DEVICE_STATUS_CHANGE))) {
if ((sense_data[12] == 0x5D) && (vdevice->vtarget->raidVolume == 0)) {
int idx;
idx = ioc->eventContext % MPTCTL_EVENT_LOG_SIZE;
ioc->events[idx].event = MPI_EVENT_SCSI_DEVICE_STATUS_CHANGE;
ioc->events[idx].eventContext = ioc->eventContext;
ioc->events[idx].data[0] = (pReq->LUN[1] << 24) |
(MPI_EVENT_SCSI_DEV_STAT_RC_SMART_DATA << 16) |
(sc->device->channel << 8) | sc->device->id;
ioc->events[idx].data[1] = (sense_data[13] << 8) | sense_data[12];
ioc->eventContext++;
if (ioc->pcidev->vendor ==
PCI_VENDOR_ID_IBM) {
mptscsih_issue_sep_command(ioc,
vdevice->vtarget, MPI_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT);
vdevice->vtarget->tflags |=
MPT_TARGET_FLAGS_LED_ON;
}
}
}
} else {
dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Hmmm... SenseData len=0! (?)\n",
ioc->name));
}
}
/**
* mptscsih_get_scsi_lookup - retrieves scmd entry
* @ioc: Pointer to MPT_ADAPTER structure
* @i: index into the array
*
* Returns the scsi_cmd pointer
*/
struct scsi_cmnd *
mptscsih_get_scsi_lookup(MPT_ADAPTER *ioc, int i)
{
unsigned long flags;
struct scsi_cmnd *scmd;
spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
scmd = ioc->ScsiLookup[i];
spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
return scmd;
}
EXPORT_SYMBOL(mptscsih_get_scsi_lookup);
/**
* mptscsih_getclear_scsi_lookup - retrieves and clears scmd entry from ScsiLookup[] array list
* @ioc: Pointer to MPT_ADAPTER structure
* @i: index into the array
*
* Returns the scsi_cmd pointer
*
**/
static struct scsi_cmnd *
mptscsih_getclear_scsi_lookup(MPT_ADAPTER *ioc, int i)
{
unsigned long flags;
struct scsi_cmnd *scmd;
spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
scmd = ioc->ScsiLookup[i];
ioc->ScsiLookup[i] = NULL;
spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
return scmd;
}
/**
* mptscsih_set_scsi_lookup - write a scmd entry into the ScsiLookup[] array list
*
* @ioc: Pointer to MPT_ADAPTER structure
* @i: index into the array
* @scmd: scsi_cmnd pointer
*
**/
static void
mptscsih_set_scsi_lookup(MPT_ADAPTER *ioc, int i, struct scsi_cmnd *scmd)
{
unsigned long flags;
spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
ioc->ScsiLookup[i] = scmd;
spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
}
/**
* SCPNT_TO_LOOKUP_IDX - searches for a given scmd in the ScsiLookup[] array list
* @ioc: Pointer to MPT_ADAPTER structure
* @sc: scsi_cmnd pointer
*/
static int
SCPNT_TO_LOOKUP_IDX(MPT_ADAPTER *ioc, struct scsi_cmnd *sc)
{
unsigned long flags;
int i, index=-1;
spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
for (i = 0; i < ioc->req_depth; i++) {
if (ioc->ScsiLookup[i] == sc) {
index = i;
goto out;
}
}
out:
spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
return index;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
int
mptscsih_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
{
MPT_SCSI_HOST *hd;
if (ioc->sh == NULL || shost_priv(ioc->sh) == NULL)
return 0;
hd = shost_priv(ioc->sh);
switch (reset_phase) {
case MPT_IOC_SETUP_RESET:
dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"%s: MPT_IOC_SETUP_RESET\n", ioc->name, __func__));
break;
case MPT_IOC_PRE_RESET:
dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"%s: MPT_IOC_PRE_RESET\n", ioc->name, __func__));
mptscsih_flush_running_cmds(hd);
break;
case MPT_IOC_POST_RESET:
dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"%s: MPT_IOC_POST_RESET\n", ioc->name, __func__));
if (ioc->internal_cmds.status & MPT_MGMT_STATUS_PENDING) {
ioc->internal_cmds.status |=
MPT_MGMT_STATUS_DID_IOCRESET;
complete(&ioc->internal_cmds.done);
}
break;
default:
break;
}
return 1; /* currently means nothing really */
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
int
mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
{
u8 event = le32_to_cpu(pEvReply->Event) & 0xFF;
devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"MPT event (=%02Xh) routed to SCSI host driver!\n",
ioc->name, event));
if ((event == MPI_EVENT_IOC_BUS_RESET ||
event == MPI_EVENT_EXT_BUS_RESET) &&
(ioc->bus_type == SPI) && (ioc->soft_resets < -1))
ioc->soft_resets++;
return 1; /* currently means nothing really */
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
* Bus Scan and Domain Validation functionality ...
*/
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
* mptscsih_scandv_complete - Scan and DV callback routine registered
* to Fustion MPT (base) driver.
*
* @ioc: Pointer to MPT_ADAPTER structure
* @mf: Pointer to original MPT request frame
* @mr: Pointer to MPT reply frame (NULL if TurboReply)
*
* This routine is called from mpt.c::mpt_interrupt() at the completion
* of any SCSI IO request.
* This routine is registered with the Fusion MPT (base) driver at driver
* load/init time via the mpt_register() API call.
*
* Returns 1 indicating alloc'd request frame ptr should be freed.
*
* Remark: Sets a completion code and (possibly) saves sense data
* in the IOC member localReply structure.
* Used ONLY for DV and other internal commands.
*/
int
mptscsih_scandv_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req,
MPT_FRAME_HDR *reply)
{
SCSIIORequest_t *pReq;
SCSIIOReply_t *pReply;
u8 cmd;
u16 req_idx;
u8 *sense_data;
int sz;
ioc->internal_cmds.status |= MPT_MGMT_STATUS_COMMAND_GOOD;
ioc->internal_cmds.completion_code = MPT_SCANDV_GOOD;
if (!reply)
goto out;
pReply = (SCSIIOReply_t *) reply;
pReq = (SCSIIORequest_t *) req;
ioc->internal_cmds.completion_code =
mptscsih_get_completion_code(ioc, req, reply);
ioc->internal_cmds.status |= MPT_MGMT_STATUS_RF_VALID;
memcpy(ioc->internal_cmds.reply, reply,
min(MPT_DEFAULT_FRAME_SIZE, 4 * reply->u.reply.MsgLength));
cmd = reply->u.hdr.Function;
if (((cmd == MPI_FUNCTION_SCSI_IO_REQUEST) ||
(cmd == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) &&
(pReply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_VALID)) {
req_idx = le16_to_cpu(req->u.frame.hwhdr.msgctxu.fld.req_idx);
sense_data = ((u8 *)ioc->sense_buf_pool +
(req_idx * MPT_SENSE_BUFFER_ALLOC));
sz = min_t(int, pReq->SenseBufferLength,
MPT_SENSE_BUFFER_ALLOC);
memcpy(ioc->internal_cmds.sense, sense_data, sz);
}
out:
if (!(ioc->internal_cmds.status & MPT_MGMT_STATUS_PENDING))
return 0;
ioc->internal_cmds.status &= ~MPT_MGMT_STATUS_PENDING;
complete(&ioc->internal_cmds.done);
return 1;
}
/**
* mptscsih_get_completion_code - get completion code from MPT request
* @ioc: Pointer to MPT_ADAPTER structure
* @req: Pointer to original MPT request frame
* @reply: Pointer to MPT reply frame (NULL if TurboReply)
*
**/
static int
mptscsih_get_completion_code(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req,
MPT_FRAME_HDR *reply)
{
SCSIIOReply_t *pReply;
MpiRaidActionReply_t *pr;
u8 scsi_status;
u16 status;
int completion_code;
pReply = (SCSIIOReply_t *)reply;
status = le16_to_cpu(pReply->IOCStatus) & MPI_IOCSTATUS_MASK;
scsi_status = pReply->SCSIStatus;
devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"IOCStatus=%04xh, SCSIState=%02xh, SCSIStatus=%02xh,"
"IOCLogInfo=%08xh\n", ioc->name, status, pReply->SCSIState,
scsi_status, le32_to_cpu(pReply->IOCLogInfo)));
switch (status) {
case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE: /* 0x0043 */
completion_code = MPT_SCANDV_SELECTION_TIMEOUT;
break;
case MPI_IOCSTATUS_SCSI_IO_DATA_ERROR: /* 0x0046 */
case MPI_IOCSTATUS_SCSI_TASK_TERMINATED: /* 0x0048 */
case MPI_IOCSTATUS_SCSI_IOC_TERMINATED: /* 0x004B */
case MPI_IOCSTATUS_SCSI_EXT_TERMINATED: /* 0x004C */
completion_code = MPT_SCANDV_DID_RESET;
break;
case MPI_IOCSTATUS_BUSY:
case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES:
completion_code = MPT_SCANDV_BUSY;
break;
case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN: /* 0x0045 */
case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR: /* 0x0040 */
case MPI_IOCSTATUS_SUCCESS: /* 0x0000 */
if (pReply->Function == MPI_FUNCTION_CONFIG) {
completion_code = MPT_SCANDV_GOOD;
} else if (pReply->Function == MPI_FUNCTION_RAID_ACTION) {
pr = (MpiRaidActionReply_t *)reply;
if (le16_to_cpu(pr->ActionStatus) ==
MPI_RAID_ACTION_ASTATUS_SUCCESS)
completion_code = MPT_SCANDV_GOOD;
else
completion_code = MPT_SCANDV_SOME_ERROR;
} else if (pReply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_VALID)
completion_code = MPT_SCANDV_SENSE;
else if (pReply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_FAILED) {
if (req->u.scsireq.CDB[0] == INQUIRY)
completion_code = MPT_SCANDV_ISSUE_SENSE;
else
completion_code = MPT_SCANDV_DID_RESET;
} else if (pReply->SCSIState & MPI_SCSI_STATE_NO_SCSI_STATUS)
completion_code = MPT_SCANDV_DID_RESET;
else if (pReply->SCSIState & MPI_SCSI_STATE_TERMINATED)
completion_code = MPT_SCANDV_DID_RESET;
else if (scsi_status == MPI_SCSI_STATUS_BUSY)
completion_code = MPT_SCANDV_BUSY;
else
completion_code = MPT_SCANDV_GOOD;
break;
case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR: /* 0x0047 */
if (pReply->SCSIState & MPI_SCSI_STATE_TERMINATED)
completion_code = MPT_SCANDV_DID_RESET;
else
completion_code = MPT_SCANDV_SOME_ERROR;
break;
default:
completion_code = MPT_SCANDV_SOME_ERROR;
break;
} /* switch(status) */
devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT
" completionCode set to %08xh\n", ioc->name, completion_code));
return completion_code;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mptscsih_do_cmd - Do internal command.
* @hd: MPT_SCSI_HOST pointer
* @io: INTERNAL_CMD pointer.
*
* Issue the specified internally generated command and do command
* specific cleanup. For bus scan / DV only.
* NOTES: If command is Inquiry and status is good,
* initialize a target structure, save the data
*
* Remark: Single threaded access only.
*
* Return:
* < 0 if an illegal command or no resources
*
* 0 if good
*
* > 0 if command complete but some type of completion error.
*/
static int
mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *io)
{
MPT_FRAME_HDR *mf;
SCSIIORequest_t *pScsiReq;
int my_idx, ii, dir;
int timeout;
char cmdLen;
char CDB[]={0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
u8 cmd = io->cmd;
MPT_ADAPTER *ioc = hd->ioc;
int ret = 0;
unsigned long timeleft;
unsigned long flags;
/* don't send internal command during diag reset */
spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
if (ioc->ioc_reset_in_progress) {
spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
dfailprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"%s: busy with host reset\n", ioc->name, __func__));
return MPT_SCANDV_BUSY;
}
spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
mutex_lock(&ioc->internal_cmds.mutex);
/* Set command specific information
*/
switch (cmd) {
case INQUIRY:
cmdLen = 6;
dir = MPI_SCSIIO_CONTROL_READ;
CDB[0] = cmd;
CDB[4] = io->size;
timeout = 10;
break;
case TEST_UNIT_READY:
cmdLen = 6;
dir = MPI_SCSIIO_CONTROL_READ;
timeout = 10;
break;
case START_STOP:
cmdLen = 6;
dir = MPI_SCSIIO_CONTROL_READ;
CDB[0] = cmd;
CDB[4] = 1; /*Spin up the disk */
timeout = 15;
break;
case REQUEST_SENSE:
cmdLen = 6;
CDB[0] = cmd;
CDB[4] = io->size;
dir = MPI_SCSIIO_CONTROL_READ;
timeout = 10;
break;
case READ_BUFFER:
cmdLen = 10;
dir = MPI_SCSIIO_CONTROL_READ;
CDB[0] = cmd;
if (io->flags & MPT_ICFLAG_ECHO) {
CDB[1] = 0x0A;
} else {
CDB[1] = 0x02;
}
if (io->flags & MPT_ICFLAG_BUF_CAP) {
CDB[1] |= 0x01;
}
CDB[6] = (io->size >> 16) & 0xFF;
CDB[7] = (io->size >> 8) & 0xFF;
CDB[8] = io->size & 0xFF;
timeout = 10;
break;
case WRITE_BUFFER:
cmdLen = 10;
dir = MPI_SCSIIO_CONTROL_WRITE;
CDB[0] = cmd;
if (io->flags & MPT_ICFLAG_ECHO) {
CDB[1] = 0x0A;
} else {
CDB[1] = 0x02;
}
CDB[6] = (io->size >> 16) & 0xFF;
CDB[7] = (io->size >> 8) & 0xFF;
CDB[8] = io->size & 0xFF;
timeout = 10;
break;
case RESERVE:
cmdLen = 6;
dir = MPI_SCSIIO_CONTROL_READ;
CDB[0] = cmd;
timeout = 10;
break;
case RELEASE:
cmdLen = 6;
dir = MPI_SCSIIO_CONTROL_READ;
CDB[0] = cmd;
timeout = 10;
break;
case SYNCHRONIZE_CACHE:
cmdLen = 10;
dir = MPI_SCSIIO_CONTROL_READ;
CDB[0] = cmd;
// CDB[1] = 0x02; /* set immediate bit */
timeout = 10;
break;
default:
/* Error Case */
ret = -EFAULT;
goto out;
}
/* Get and Populate a free Frame
* MsgContext set in mpt_get_msg_frame call
*/
if ((mf = mpt_get_msg_frame(ioc->InternalCtx, ioc)) == NULL) {
dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s: No msg frames!\n",
ioc->name, __func__));
ret = MPT_SCANDV_BUSY;
goto out;
}
pScsiReq = (SCSIIORequest_t *) mf;
/* Get the request index */
my_idx = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx);
ADD_INDEX_LOG(my_idx); /* for debug */
if (io->flags & MPT_ICFLAG_PHYS_DISK) {
pScsiReq->TargetID = io->physDiskNum;
pScsiReq->Bus = 0;
pScsiReq->ChainOffset = 0;
pScsiReq->Function = MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH;
} else {
pScsiReq->TargetID = io->id;
pScsiReq->Bus = io->channel;
pScsiReq->ChainOffset = 0;
pScsiReq->Function = MPI_FUNCTION_SCSI_IO_REQUEST;
}
pScsiReq->CDBLength = cmdLen;
pScsiReq->SenseBufferLength = MPT_SENSE_BUFFER_SIZE;
pScsiReq->Reserved = 0;
pScsiReq->MsgFlags = mpt_msg_flags(ioc);
/* MsgContext set in mpt_get_msg_fram call */
int_to_scsilun(io->lun, (struct scsi_lun *)pScsiReq->LUN);
if (io->flags & MPT_ICFLAG_TAGGED_CMD)
pScsiReq->Control = cpu_to_le32(dir | MPI_SCSIIO_CONTROL_SIMPLEQ);
else
pScsiReq->Control = cpu_to_le32(dir | MPI_SCSIIO_CONTROL_UNTAGGED);
if (cmd == REQUEST_SENSE) {
pScsiReq->Control = cpu_to_le32(dir | MPI_SCSIIO_CONTROL_UNTAGGED);
devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"%s: Untagged! 0x%02x\n", ioc->name, __func__, cmd));
}
for (ii = 0; ii < 16; ii++)
pScsiReq->CDB[ii] = CDB[ii];
pScsiReq->DataLength = cpu_to_le32(io->size);
pScsiReq->SenseBufferLowAddr = cpu_to_le32(ioc->sense_buf_low_dma
+ (my_idx * MPT_SENSE_BUFFER_ALLOC));
devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"%s: Sending Command 0x%02x for fw_channel=%d fw_id=%d lun=%llu\n",
ioc->name, __func__, cmd, io->channel, io->id, io->lun));
if (dir == MPI_SCSIIO_CONTROL_READ)
ioc->add_sge((char *) &pScsiReq->SGL,
MPT_SGE_FLAGS_SSIMPLE_READ | io->size, io->data_dma);
else
ioc->add_sge((char *) &pScsiReq->SGL,
MPT_SGE_FLAGS_SSIMPLE_WRITE | io->size, io->data_dma);
INITIALIZE_MGMT_STATUS(ioc->internal_cmds.status)
mpt_put_msg_frame(ioc->InternalCtx, ioc, mf);
timeleft = wait_for_completion_timeout(&ioc->internal_cmds.done,
timeout*HZ);
if (!(ioc->internal_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
ret = MPT_SCANDV_DID_RESET;
dfailprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"%s: TIMED OUT for cmd=0x%02x\n", ioc->name, __func__,
cmd));
if (ioc->internal_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET) {
mpt_free_msg_frame(ioc, mf);
goto out;
}
if (!timeleft) {
printk(MYIOC_s_WARN_FMT
"Issuing Reset from %s!! doorbell=0x%08xh"
" cmd=0x%02x\n",
ioc->name, __func__, mpt_GetIocState(ioc, 0),
cmd);
mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP);
mpt_free_msg_frame(ioc, mf);
}
goto out;
}
ret = ioc->internal_cmds.completion_code;
devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: success, rc=0x%02x\n",
ioc->name, __func__, ret));
out:
CLEAR_MGMT_STATUS(ioc->internal_cmds.status)
mutex_unlock(&ioc->internal_cmds.mutex);
return ret;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mptscsih_synchronize_cache - Send SYNCHRONIZE_CACHE to all disks.
* @hd: Pointer to a SCSI HOST structure
* @vdevice: virtual target device
*
* Uses the ISR, but with special processing.
* MUST be single-threaded.
*
*/
static void
mptscsih_synchronize_cache(MPT_SCSI_HOST *hd, VirtDevice *vdevice)
{
INTERNAL_CMD iocmd;
/* Ignore hidden raid components, this is handled when the command
* is sent to the volume
*/
if (vdevice->vtarget->tflags & MPT_TARGET_FLAGS_RAID_COMPONENT)
return;
if (vdevice->vtarget->type != TYPE_DISK || vdevice->vtarget->deleted ||
!vdevice->configured_lun)
return;
/* Following parameters will not change
* in this routine.
*/
iocmd.cmd = SYNCHRONIZE_CACHE;
iocmd.flags = 0;
iocmd.physDiskNum = -1;
iocmd.data = NULL;
iocmd.data_dma = -1;
iocmd.size = 0;
iocmd.rsvd = iocmd.rsvd2 = 0;
iocmd.channel = vdevice->vtarget->channel;
iocmd.id = vdevice->vtarget->id;
iocmd.lun = vdevice->lun;
mptscsih_do_cmd(hd, &iocmd);
}
static ssize_t
mptscsih_version_fw_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *host = class_to_shost(dev);
MPT_SCSI_HOST *hd = shost_priv(host);
MPT_ADAPTER *ioc = hd->ioc;
return snprintf(buf, PAGE_SIZE, "%02d.%02d.%02d.%02d\n",
(ioc->facts.FWVersion.Word & 0xFF000000) >> 24,
(ioc->facts.FWVersion.Word & 0x00FF0000) >> 16,
(ioc->facts.FWVersion.Word & 0x0000FF00) >> 8,
ioc->facts.FWVersion.Word & 0x000000FF);
}
static DEVICE_ATTR(version_fw, S_IRUGO, mptscsih_version_fw_show, NULL);
static ssize_t
mptscsih_version_bios_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *host = class_to_shost(dev);
MPT_SCSI_HOST *hd = shost_priv(host);
MPT_ADAPTER *ioc = hd->ioc;
return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x.%02x\n",
(ioc->biosVersion & 0xFF000000) >> 24,
(ioc->biosVersion & 0x00FF0000) >> 16,
(ioc->biosVersion & 0x0000FF00) >> 8,
ioc->biosVersion & 0x000000FF);
}
static DEVICE_ATTR(version_bios, S_IRUGO, mptscsih_version_bios_show, NULL);
static ssize_t
mptscsih_version_mpi_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *host = class_to_shost(dev);
MPT_SCSI_HOST *hd = shost_priv(host);
MPT_ADAPTER *ioc = hd->ioc;
return snprintf(buf, PAGE_SIZE, "%03x\n", ioc->facts.MsgVersion);
}
static DEVICE_ATTR(version_mpi, S_IRUGO, mptscsih_version_mpi_show, NULL);
static ssize_t
mptscsih_version_product_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *host = class_to_shost(dev);
MPT_SCSI_HOST *hd = shost_priv(host);
MPT_ADAPTER *ioc = hd->ioc;
return snprintf(buf, PAGE_SIZE, "%s\n", ioc->prod_name);
}
static DEVICE_ATTR(version_product, S_IRUGO,
mptscsih_version_product_show, NULL);
static ssize_t
mptscsih_version_nvdata_persistent_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *host = class_to_shost(dev);
MPT_SCSI_HOST *hd = shost_priv(host);
MPT_ADAPTER *ioc = hd->ioc;
return snprintf(buf, PAGE_SIZE, "%02xh\n",
ioc->nvdata_version_persistent);
}
static DEVICE_ATTR(version_nvdata_persistent, S_IRUGO,
mptscsih_version_nvdata_persistent_show, NULL);
static ssize_t
mptscsih_version_nvdata_default_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct Scsi_Host *host = class_to_shost(dev);
MPT_SCSI_HOST *hd = shost_priv(host);
MPT_ADAPTER *ioc = hd->ioc;
return snprintf(buf, PAGE_SIZE, "%02xh\n",ioc->nvdata_version_default);
}
static DEVICE_ATTR(version_nvdata_default, S_IRUGO,
mptscsih_version_nvdata_default_show, NULL);
static ssize_t
mptscsih_board_name_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *host = class_to_shost(dev);
MPT_SCSI_HOST *hd = shost_priv(host);
MPT_ADAPTER *ioc = hd->ioc;
return snprintf(buf, PAGE_SIZE, "%s\n", ioc->board_name);
}
static DEVICE_ATTR(board_name, S_IRUGO, mptscsih_board_name_show, NULL);
static ssize_t
mptscsih_board_assembly_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct Scsi_Host *host = class_to_shost(dev);
MPT_SCSI_HOST *hd = shost_priv(host);
MPT_ADAPTER *ioc = hd->ioc;
return snprintf(buf, PAGE_SIZE, "%s\n", ioc->board_assembly);
}
static DEVICE_ATTR(board_assembly, S_IRUGO,
mptscsih_board_assembly_show, NULL);
static ssize_t
mptscsih_board_tracer_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *host = class_to_shost(dev);
MPT_SCSI_HOST *hd = shost_priv(host);
MPT_ADAPTER *ioc = hd->ioc;
return snprintf(buf, PAGE_SIZE, "%s\n", ioc->board_tracer);
}
static DEVICE_ATTR(board_tracer, S_IRUGO,
mptscsih_board_tracer_show, NULL);
static ssize_t
mptscsih_io_delay_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *host = class_to_shost(dev);
MPT_SCSI_HOST *hd = shost_priv(host);
MPT_ADAPTER *ioc = hd->ioc;
return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->io_missing_delay);
}
static DEVICE_ATTR(io_delay, S_IRUGO,
mptscsih_io_delay_show, NULL);
static ssize_t
mptscsih_device_delay_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *host = class_to_shost(dev);
MPT_SCSI_HOST *hd = shost_priv(host);
MPT_ADAPTER *ioc = hd->ioc;
return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->device_missing_delay);
}
static DEVICE_ATTR(device_delay, S_IRUGO,
mptscsih_device_delay_show, NULL);
static ssize_t
mptscsih_debug_level_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *host = class_to_shost(dev);
MPT_SCSI_HOST *hd = shost_priv(host);
MPT_ADAPTER *ioc = hd->ioc;
return snprintf(buf, PAGE_SIZE, "%08xh\n", ioc->debug_level);
}
static ssize_t
mptscsih_debug_level_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct Scsi_Host *host = class_to_shost(dev);
MPT_SCSI_HOST *hd = shost_priv(host);
MPT_ADAPTER *ioc = hd->ioc;
int val = 0;
if (sscanf(buf, "%x", &val) != 1)
return -EINVAL;
ioc->debug_level = val;
printk(MYIOC_s_INFO_FMT "debug_level=%08xh\n",
ioc->name, ioc->debug_level);
return strlen(buf);
}
static DEVICE_ATTR(debug_level, S_IRUGO | S_IWUSR,
mptscsih_debug_level_show, mptscsih_debug_level_store);
static struct attribute *mptscsih_host_attrs[] = {
&dev_attr_version_fw.attr,
&dev_attr_version_bios.attr,
&dev_attr_version_mpi.attr,
&dev_attr_version_product.attr,
&dev_attr_version_nvdata_persistent.attr,
&dev_attr_version_nvdata_default.attr,
&dev_attr_board_name.attr,
&dev_attr_board_assembly.attr,
&dev_attr_board_tracer.attr,
&dev_attr_io_delay.attr,
&dev_attr_device_delay.attr,
&dev_attr_debug_level.attr,
NULL,
};
static const struct attribute_group mptscsih_host_attr_group = {
.attrs = mptscsih_host_attrs
};
const struct attribute_group *mptscsih_host_attr_groups[] = {
&mptscsih_host_attr_group,
NULL
};
EXPORT_SYMBOL(mptscsih_host_attr_groups);
EXPORT_SYMBOL(mptscsih_remove);
EXPORT_SYMBOL(mptscsih_shutdown);
#ifdef CONFIG_PM
EXPORT_SYMBOL(mptscsih_suspend);
EXPORT_SYMBOL(mptscsih_resume);
#endif
EXPORT_SYMBOL(mptscsih_show_info);
EXPORT_SYMBOL(mptscsih_info);
EXPORT_SYMBOL(mptscsih_qcmd);
EXPORT_SYMBOL(mptscsih_slave_destroy);
EXPORT_SYMBOL(mptscsih_slave_configure);
EXPORT_SYMBOL(mptscsih_abort);
EXPORT_SYMBOL(mptscsih_dev_reset);
EXPORT_SYMBOL(mptscsih_bus_reset);
EXPORT_SYMBOL(mptscsih_host_reset);
EXPORT_SYMBOL(mptscsih_bios_param);
EXPORT_SYMBOL(mptscsih_io_done);
EXPORT_SYMBOL(mptscsih_taskmgmt_complete);
EXPORT_SYMBOL(mptscsih_scandv_complete);
EXPORT_SYMBOL(mptscsih_event_process);
EXPORT_SYMBOL(mptscsih_ioc_reset);
EXPORT_SYMBOL(mptscsih_change_queue_depth);
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
| linux-master | drivers/message/fusion/mptscsih.c |
/*
* linux/drivers/message/fusion/mptfc.c
* For use with LSI PCI chip/adapter(s)
* running LSI Fusion MPT (Message Passing Technology) firmware.
*
* Copyright (c) 1999-2008 LSI Corporation
* (mailto:[email protected])
*
*/
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
NO WARRANTY
THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
solely responsible for determining the appropriateness of using and
distributing the Program and assumes all risks associated with its
exercise of rights under this Agreement, including but not limited to
the risks and costs of program errors, damage to or loss of data,
programs or equipment, and unavailability or interruption of operations.
DISCLAIMER OF LIABILITY
NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/kdev_t.h>
#include <linux/blkdev.h>
#include <linux/delay.h> /* for mdelay */
#include <linux/interrupt.h>
#include <linux/reboot.h> /* notifier code */
#include <linux/workqueue.h>
#include <linux/sort.h>
#include <linux/slab.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_tcq.h>
#include <scsi/scsi_transport_fc.h>
#include "mptbase.h"
#include "mptscsih.h"
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
#define my_NAME "Fusion MPT FC Host driver"
#define my_VERSION MPT_LINUX_VERSION_COMMON
#define MYNAM "mptfc"
MODULE_AUTHOR(MODULEAUTHOR);
MODULE_DESCRIPTION(my_NAME);
MODULE_LICENSE("GPL");
MODULE_VERSION(my_VERSION);
/* Command line args */
#define MPTFC_DEV_LOSS_TMO (60)
static int mptfc_dev_loss_tmo = MPTFC_DEV_LOSS_TMO; /* reasonable default */
module_param(mptfc_dev_loss_tmo, int, 0);
MODULE_PARM_DESC(mptfc_dev_loss_tmo, " Initial time the driver programs the "
" transport to wait for an rport to "
" return following a device loss event."
" Default=60.");
/* scsi-mid layer global parmeter is max_report_luns, which is 511 */
#define MPTFC_MAX_LUN (16895)
static int max_lun = MPTFC_MAX_LUN;
module_param(max_lun, int, 0);
MODULE_PARM_DESC(max_lun, " max lun, default=16895 ");
static u8 mptfcDoneCtx = MPT_MAX_PROTOCOL_DRIVERS;
static u8 mptfcTaskCtx = MPT_MAX_PROTOCOL_DRIVERS;
static u8 mptfcInternalCtx = MPT_MAX_PROTOCOL_DRIVERS;
static int mptfc_target_alloc(struct scsi_target *starget);
static int mptfc_slave_alloc(struct scsi_device *sdev);
static int mptfc_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *SCpnt);
static void mptfc_target_destroy(struct scsi_target *starget);
static void mptfc_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout);
static void mptfc_remove(struct pci_dev *pdev);
static int mptfc_abort(struct scsi_cmnd *SCpnt);
static int mptfc_dev_reset(struct scsi_cmnd *SCpnt);
static int mptfc_bus_reset(struct scsi_cmnd *SCpnt);
static const struct scsi_host_template mptfc_driver_template = {
.module = THIS_MODULE,
.proc_name = "mptfc",
.show_info = mptscsih_show_info,
.name = "MPT FC Host",
.info = mptscsih_info,
.queuecommand = mptfc_qcmd,
.target_alloc = mptfc_target_alloc,
.slave_alloc = mptfc_slave_alloc,
.slave_configure = mptscsih_slave_configure,
.target_destroy = mptfc_target_destroy,
.slave_destroy = mptscsih_slave_destroy,
.change_queue_depth = mptscsih_change_queue_depth,
.eh_timed_out = fc_eh_timed_out,
.eh_abort_handler = mptfc_abort,
.eh_device_reset_handler = mptfc_dev_reset,
.eh_bus_reset_handler = mptfc_bus_reset,
.eh_host_reset_handler = mptscsih_host_reset,
.bios_param = mptscsih_bios_param,
.can_queue = MPT_FC_CAN_QUEUE,
.this_id = -1,
.sg_tablesize = MPT_SCSI_SG_DEPTH,
.max_sectors = 8192,
.cmd_per_lun = 7,
.shost_groups = mptscsih_host_attr_groups,
};
/****************************************************************************
* Supported hardware
*/
static struct pci_device_id mptfc_pci_table[] = {
{ PCI_VENDOR_ID_LSI_LOGIC, MPI_MANUFACTPAGE_DEVICEID_FC909,
PCI_ANY_ID, PCI_ANY_ID },
{ PCI_VENDOR_ID_LSI_LOGIC, MPI_MANUFACTPAGE_DEVICEID_FC919,
PCI_ANY_ID, PCI_ANY_ID },
{ PCI_VENDOR_ID_LSI_LOGIC, MPI_MANUFACTPAGE_DEVICEID_FC929,
PCI_ANY_ID, PCI_ANY_ID },
{ PCI_VENDOR_ID_LSI_LOGIC, MPI_MANUFACTPAGE_DEVICEID_FC919X,
PCI_ANY_ID, PCI_ANY_ID },
{ PCI_VENDOR_ID_LSI_LOGIC, MPI_MANUFACTPAGE_DEVICEID_FC929X,
PCI_ANY_ID, PCI_ANY_ID },
{ PCI_VENDOR_ID_LSI_LOGIC, MPI_MANUFACTPAGE_DEVICEID_FC939X,
PCI_ANY_ID, PCI_ANY_ID },
{ PCI_VENDOR_ID_LSI_LOGIC, MPI_MANUFACTPAGE_DEVICEID_FC949X,
PCI_ANY_ID, PCI_ANY_ID },
{ PCI_VENDOR_ID_LSI_LOGIC, MPI_MANUFACTPAGE_DEVICEID_FC949E,
PCI_ANY_ID, PCI_ANY_ID },
{ PCI_VENDOR_ID_BROCADE, MPI_MANUFACTPAGE_DEVICEID_FC949E,
PCI_ANY_ID, PCI_ANY_ID },
{0} /* Terminating entry */
};
MODULE_DEVICE_TABLE(pci, mptfc_pci_table);
static struct scsi_transport_template *mptfc_transport_template = NULL;
static struct fc_function_template mptfc_transport_functions = {
.dd_fcrport_size = 8,
.show_host_node_name = 1,
.show_host_port_name = 1,
.show_host_supported_classes = 1,
.show_host_port_id = 1,
.show_rport_supported_classes = 1,
.show_starget_node_name = 1,
.show_starget_port_name = 1,
.show_starget_port_id = 1,
.set_rport_dev_loss_tmo = mptfc_set_rport_loss_tmo,
.show_rport_dev_loss_tmo = 1,
.show_host_supported_speeds = 1,
.show_host_maxframe_size = 1,
.show_host_speed = 1,
.show_host_fabric_name = 1,
.show_host_port_type = 1,
.show_host_port_state = 1,
.show_host_symbolic_name = 1,
};
static int
mptfc_block_error_handler(struct scsi_cmnd *SCpnt,
int (*func)(struct scsi_cmnd *SCpnt),
const char *caller)
{
MPT_SCSI_HOST *hd;
struct scsi_device *sdev = SCpnt->device;
struct Scsi_Host *shost = sdev->host;
struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
unsigned long flags;
int ready;
MPT_ADAPTER *ioc;
int loops = 40; /* seconds */
hd = shost_priv(SCpnt->device->host);
ioc = hd->ioc;
spin_lock_irqsave(shost->host_lock, flags);
while ((ready = fc_remote_port_chkready(rport) >> 16) == DID_IMM_RETRY
|| (loops > 0 && ioc->active == 0)) {
spin_unlock_irqrestore(shost->host_lock, flags);
dfcprintk (ioc, printk(MYIOC_s_DEBUG_FMT
"mptfc_block_error_handler.%d: %d:%llu, port status is "
"%x, active flag %d, deferring %s recovery.\n",
ioc->name, ioc->sh->host_no,
SCpnt->device->id, SCpnt->device->lun,
ready, ioc->active, caller));
msleep(1000);
spin_lock_irqsave(shost->host_lock, flags);
loops --;
}
spin_unlock_irqrestore(shost->host_lock, flags);
if (ready == DID_NO_CONNECT || !SCpnt->device->hostdata
|| ioc->active == 0) {
dfcprintk (ioc, printk(MYIOC_s_DEBUG_FMT
"%s.%d: %d:%llu, failing recovery, "
"port state %x, active %d, vdevice %p.\n", caller,
ioc->name, ioc->sh->host_no,
SCpnt->device->id, SCpnt->device->lun, ready,
ioc->active, SCpnt->device->hostdata));
return FAILED;
}
dfcprintk (ioc, printk(MYIOC_s_DEBUG_FMT
"%s.%d: %d:%llu, executing recovery.\n", caller,
ioc->name, ioc->sh->host_no,
SCpnt->device->id, SCpnt->device->lun));
return (*func)(SCpnt);
}
static int
mptfc_abort(struct scsi_cmnd *SCpnt)
{
return
mptfc_block_error_handler(SCpnt, mptscsih_abort, __func__);
}
static int
mptfc_dev_reset(struct scsi_cmnd *SCpnt)
{
return
mptfc_block_error_handler(SCpnt, mptscsih_dev_reset, __func__);
}
static int
mptfc_bus_reset(struct scsi_cmnd *SCpnt)
{
return
mptfc_block_error_handler(SCpnt, mptscsih_bus_reset, __func__);
}
static void
mptfc_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout)
{
if (timeout > 0)
rport->dev_loss_tmo = timeout;
else
rport->dev_loss_tmo = mptfc_dev_loss_tmo;
}
static int
mptfc_FcDevPage0_cmp_func(const void *a, const void *b)
{
FCDevicePage0_t **aa = (FCDevicePage0_t **)a;
FCDevicePage0_t **bb = (FCDevicePage0_t **)b;
if ((*aa)->CurrentBus == (*bb)->CurrentBus) {
if ((*aa)->CurrentTargetID == (*bb)->CurrentTargetID)
return 0;
if ((*aa)->CurrentTargetID < (*bb)->CurrentTargetID)
return -1;
return 1;
}
if ((*aa)->CurrentBus < (*bb)->CurrentBus)
return -1;
return 1;
}
static int
mptfc_GetFcDevPage0(MPT_ADAPTER *ioc, int ioc_port,
void(*func)(MPT_ADAPTER *ioc,int channel, FCDevicePage0_t *arg))
{
ConfigPageHeader_t hdr;
CONFIGPARMS cfg;
FCDevicePage0_t *ppage0_alloc, *fc;
dma_addr_t page0_dma;
int data_sz;
int ii;
FCDevicePage0_t *p0_array=NULL, *p_p0;
FCDevicePage0_t **pp0_array=NULL, **p_pp0;
int rc = -ENOMEM;
U32 port_id = 0xffffff;
int num_targ = 0;
int max_bus = ioc->facts.MaxBuses;
int max_targ;
max_targ = (ioc->facts.MaxDevices == 0) ? 256 : ioc->facts.MaxDevices;
data_sz = sizeof(FCDevicePage0_t) * max_bus * max_targ;
p_p0 = p0_array = kzalloc(data_sz, GFP_KERNEL);
if (!p0_array)
goto out;
data_sz = sizeof(FCDevicePage0_t *) * max_bus * max_targ;
p_pp0 = pp0_array = kzalloc(data_sz, GFP_KERNEL);
if (!pp0_array)
goto out;
do {
/* Get FC Device Page 0 header */
hdr.PageVersion = 0;
hdr.PageLength = 0;
hdr.PageNumber = 0;
hdr.PageType = MPI_CONFIG_PAGETYPE_FC_DEVICE;
cfg.cfghdr.hdr = &hdr;
cfg.physAddr = -1;
cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
cfg.dir = 0;
cfg.pageAddr = port_id;
cfg.timeout = 0;
if ((rc = mpt_config(ioc, &cfg)) != 0)
break;
if (hdr.PageLength <= 0)
break;
data_sz = hdr.PageLength * 4;
ppage0_alloc = dma_alloc_coherent(&ioc->pcidev->dev, data_sz,
&page0_dma, GFP_KERNEL);
rc = -ENOMEM;
if (!ppage0_alloc)
break;
cfg.physAddr = page0_dma;
cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
if ((rc = mpt_config(ioc, &cfg)) == 0) {
ppage0_alloc->PortIdentifier =
le32_to_cpu(ppage0_alloc->PortIdentifier);
ppage0_alloc->WWNN.Low =
le32_to_cpu(ppage0_alloc->WWNN.Low);
ppage0_alloc->WWNN.High =
le32_to_cpu(ppage0_alloc->WWNN.High);
ppage0_alloc->WWPN.Low =
le32_to_cpu(ppage0_alloc->WWPN.Low);
ppage0_alloc->WWPN.High =
le32_to_cpu(ppage0_alloc->WWPN.High);
ppage0_alloc->BBCredit =
le16_to_cpu(ppage0_alloc->BBCredit);
ppage0_alloc->MaxRxFrameSize =
le16_to_cpu(ppage0_alloc->MaxRxFrameSize);
port_id = ppage0_alloc->PortIdentifier;
num_targ++;
*p_p0 = *ppage0_alloc; /* save data */
*p_pp0++ = p_p0++; /* save addr */
}
dma_free_coherent(&ioc->pcidev->dev, data_sz,
ppage0_alloc, page0_dma);
if (rc != 0)
break;
} while (port_id <= 0xff0000);
if (num_targ) {
/* sort array */
if (num_targ > 1)
sort (pp0_array, num_targ, sizeof(FCDevicePage0_t *),
mptfc_FcDevPage0_cmp_func, NULL);
/* call caller's func for each targ */
for (ii = 0; ii < num_targ; ii++) {
fc = *(pp0_array+ii);
func(ioc, ioc_port, fc);
}
}
out:
kfree(pp0_array);
kfree(p0_array);
return rc;
}
static int
mptfc_generate_rport_ids(FCDevicePage0_t *pg0, struct fc_rport_identifiers *rid)
{
/* not currently usable */
if (pg0->Flags & (MPI_FC_DEVICE_PAGE0_FLAGS_PLOGI_INVALID |
MPI_FC_DEVICE_PAGE0_FLAGS_PRLI_INVALID))
return -1;
if (!(pg0->Flags & MPI_FC_DEVICE_PAGE0_FLAGS_TARGETID_BUS_VALID))
return -1;
if (!(pg0->Protocol & MPI_FC_DEVICE_PAGE0_PROT_FCP_TARGET))
return -1;
/*
* board data structure already normalized to platform endianness
* shifted to avoid unaligned access on 64 bit architecture
*/
rid->node_name = ((u64)pg0->WWNN.High) << 32 | (u64)pg0->WWNN.Low;
rid->port_name = ((u64)pg0->WWPN.High) << 32 | (u64)pg0->WWPN.Low;
rid->port_id = pg0->PortIdentifier;
rid->roles = FC_RPORT_ROLE_UNKNOWN;
return 0;
}
static void
mptfc_register_dev(MPT_ADAPTER *ioc, int channel, FCDevicePage0_t *pg0)
{
struct fc_rport_identifiers rport_ids;
struct fc_rport *rport;
struct mptfc_rport_info *ri;
int new_ri = 1;
u64 pn, nn;
VirtTarget *vtarget;
u32 roles = FC_RPORT_ROLE_UNKNOWN;
if (mptfc_generate_rport_ids(pg0, &rport_ids) < 0)
return;
roles |= FC_RPORT_ROLE_FCP_TARGET;
if (pg0->Protocol & MPI_FC_DEVICE_PAGE0_PROT_FCP_INITIATOR)
roles |= FC_RPORT_ROLE_FCP_INITIATOR;
/* scan list looking for a match */
list_for_each_entry(ri, &ioc->fc_rports, list) {
pn = (u64)ri->pg0.WWPN.High << 32 | (u64)ri->pg0.WWPN.Low;
if (pn == rport_ids.port_name) { /* match */
list_move_tail(&ri->list, &ioc->fc_rports);
new_ri = 0;
break;
}
}
if (new_ri) { /* allocate one */
ri = kzalloc(sizeof(struct mptfc_rport_info), GFP_KERNEL);
if (!ri)
return;
list_add_tail(&ri->list, &ioc->fc_rports);
}
ri->pg0 = *pg0; /* add/update pg0 data */
ri->flags &= ~MPT_RPORT_INFO_FLAGS_MISSING;
/* MPT_RPORT_INFO_FLAGS_REGISTERED - rport not previously deleted */
if (!(ri->flags & MPT_RPORT_INFO_FLAGS_REGISTERED)) {
ri->flags |= MPT_RPORT_INFO_FLAGS_REGISTERED;
rport = fc_remote_port_add(ioc->sh, channel, &rport_ids);
if (rport) {
ri->rport = rport;
if (new_ri) /* may have been reset by user */
rport->dev_loss_tmo = mptfc_dev_loss_tmo;
/*
* if already mapped, remap here. If not mapped,
* target_alloc will allocate vtarget and map,
* slave_alloc will fill in vdevice from vtarget.
*/
if (ri->starget) {
vtarget = ri->starget->hostdata;
if (vtarget) {
vtarget->id = pg0->CurrentTargetID;
vtarget->channel = pg0->CurrentBus;
vtarget->deleted = 0;
}
}
*((struct mptfc_rport_info **)rport->dd_data) = ri;
/* scan will be scheduled once rport becomes a target */
fc_remote_port_rolechg(rport,roles);
pn = (u64)ri->pg0.WWPN.High << 32 | (u64)ri->pg0.WWPN.Low;
nn = (u64)ri->pg0.WWNN.High << 32 | (u64)ri->pg0.WWNN.Low;
dfcprintk (ioc, printk(MYIOC_s_DEBUG_FMT
"mptfc_reg_dev.%d: %x, %llx / %llx, tid %d, "
"rport tid %d, tmo %d\n",
ioc->name,
ioc->sh->host_no,
pg0->PortIdentifier,
(unsigned long long)nn,
(unsigned long long)pn,
pg0->CurrentTargetID,
ri->rport->scsi_target_id,
ri->rport->dev_loss_tmo));
} else {
list_del(&ri->list);
kfree(ri);
ri = NULL;
}
}
}
/*
* OS entry point to allow for host driver to free allocated memory
* Called if no device present or device being unloaded
*/
static void
mptfc_target_destroy(struct scsi_target *starget)
{
struct fc_rport *rport;
struct mptfc_rport_info *ri;
rport = starget_to_rport(starget);
if (rport) {
ri = *((struct mptfc_rport_info **)rport->dd_data);
if (ri) /* better be! */
ri->starget = NULL;
}
kfree(starget->hostdata);
starget->hostdata = NULL;
}
/*
* OS entry point to allow host driver to alloc memory
* for each scsi target. Called once per device the bus scan.
* Return non-zero if allocation fails.
*/
static int
mptfc_target_alloc(struct scsi_target *starget)
{
VirtTarget *vtarget;
struct fc_rport *rport;
struct mptfc_rport_info *ri;
int rc;
vtarget = kzalloc(sizeof(VirtTarget), GFP_KERNEL);
if (!vtarget)
return -ENOMEM;
starget->hostdata = vtarget;
rc = -ENODEV;
rport = starget_to_rport(starget);
if (rport) {
ri = *((struct mptfc_rport_info **)rport->dd_data);
if (ri) { /* better be! */
vtarget->id = ri->pg0.CurrentTargetID;
vtarget->channel = ri->pg0.CurrentBus;
ri->starget = starget;
rc = 0;
}
}
if (rc != 0) {
kfree(vtarget);
starget->hostdata = NULL;
}
return rc;
}
/*
* mptfc_dump_lun_info
* @ioc
* @rport
* @sdev
*
*/
static void
mptfc_dump_lun_info(MPT_ADAPTER *ioc, struct fc_rport *rport, struct scsi_device *sdev,
VirtTarget *vtarget)
{
u64 nn, pn;
struct mptfc_rport_info *ri;
ri = *((struct mptfc_rport_info **)rport->dd_data);
pn = (u64)ri->pg0.WWPN.High << 32 | (u64)ri->pg0.WWPN.Low;
nn = (u64)ri->pg0.WWNN.High << 32 | (u64)ri->pg0.WWNN.Low;
dfcprintk (ioc, printk(MYIOC_s_DEBUG_FMT
"mptfc_slv_alloc.%d: num_luns %d, sdev.id %d, "
"CurrentTargetID %d, %x %llx %llx\n",
ioc->name,
sdev->host->host_no,
vtarget->num_luns,
sdev->id, ri->pg0.CurrentTargetID,
ri->pg0.PortIdentifier,
(unsigned long long)pn,
(unsigned long long)nn));
}
/*
* OS entry point to allow host driver to alloc memory
* for each scsi device. Called once per device the bus scan.
* Return non-zero if allocation fails.
* Init memory once per LUN.
*/
static int
mptfc_slave_alloc(struct scsi_device *sdev)
{
MPT_SCSI_HOST *hd;
VirtTarget *vtarget;
VirtDevice *vdevice;
struct scsi_target *starget;
struct fc_rport *rport;
MPT_ADAPTER *ioc;
starget = scsi_target(sdev);
rport = starget_to_rport(starget);
if (!rport || fc_remote_port_chkready(rport))
return -ENXIO;
hd = shost_priv(sdev->host);
ioc = hd->ioc;
vdevice = kzalloc(sizeof(VirtDevice), GFP_KERNEL);
if (!vdevice) {
printk(MYIOC_s_ERR_FMT "slave_alloc kmalloc(%zd) FAILED!\n",
ioc->name, sizeof(VirtDevice));
return -ENOMEM;
}
sdev->hostdata = vdevice;
vtarget = starget->hostdata;
if (vtarget->num_luns == 0) {
vtarget->ioc_id = ioc->id;
vtarget->tflags = MPT_TARGET_FLAGS_Q_YES;
}
vdevice->vtarget = vtarget;
vdevice->lun = sdev->lun;
vtarget->num_luns++;
mptfc_dump_lun_info(ioc, rport, sdev, vtarget);
return 0;
}
static int
mptfc_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *SCpnt)
{
struct mptfc_rport_info *ri;
struct fc_rport *rport = starget_to_rport(scsi_target(SCpnt->device));
int err;
VirtDevice *vdevice = SCpnt->device->hostdata;
if (!vdevice || !vdevice->vtarget) {
SCpnt->result = DID_NO_CONNECT << 16;
scsi_done(SCpnt);
return 0;
}
err = fc_remote_port_chkready(rport);
if (unlikely(err)) {
SCpnt->result = err;
scsi_done(SCpnt);
return 0;
}
/* dd_data is null until finished adding target */
ri = *((struct mptfc_rport_info **)rport->dd_data);
if (unlikely(!ri)) {
SCpnt->result = DID_IMM_RETRY << 16;
scsi_done(SCpnt);
return 0;
}
return mptscsih_qcmd(SCpnt);
}
/*
* mptfc_display_port_link_speed - displaying link speed
* @ioc: Pointer to MPT_ADAPTER structure
* @portnum: IOC Port number
* @pp0dest: port page0 data payload
*
*/
static void
mptfc_display_port_link_speed(MPT_ADAPTER *ioc, int portnum, FCPortPage0_t *pp0dest)
{
u8 old_speed, new_speed, state;
char *old, *new;
if (portnum >= 2)
return;
old_speed = ioc->fc_link_speed[portnum];
new_speed = pp0dest->CurrentSpeed;
state = pp0dest->PortState;
if (state != MPI_FCPORTPAGE0_PORTSTATE_OFFLINE &&
new_speed != MPI_FCPORTPAGE0_CURRENT_SPEED_UNKNOWN) {
old = old_speed == MPI_FCPORTPAGE0_CURRENT_SPEED_1GBIT ? "1 Gbps" :
old_speed == MPI_FCPORTPAGE0_CURRENT_SPEED_2GBIT ? "2 Gbps" :
old_speed == MPI_FCPORTPAGE0_CURRENT_SPEED_4GBIT ? "4 Gbps" :
"Unknown";
new = new_speed == MPI_FCPORTPAGE0_CURRENT_SPEED_1GBIT ? "1 Gbps" :
new_speed == MPI_FCPORTPAGE0_CURRENT_SPEED_2GBIT ? "2 Gbps" :
new_speed == MPI_FCPORTPAGE0_CURRENT_SPEED_4GBIT ? "4 Gbps" :
"Unknown";
if (old_speed == 0)
printk(MYIOC_s_NOTE_FMT
"FC Link Established, Speed = %s\n",
ioc->name, new);
else if (old_speed != new_speed)
printk(MYIOC_s_WARN_FMT
"FC Link Speed Change, Old Speed = %s, New Speed = %s\n",
ioc->name, old, new);
ioc->fc_link_speed[portnum] = new_speed;
}
}
/*
* mptfc_GetFcPortPage0 - Fetch FCPort config Page0.
* @ioc: Pointer to MPT_ADAPTER structure
* @portnum: IOC Port number
*
* Return: 0 for success
* -ENOMEM if no memory available
* -EPERM if not allowed due to ISR context
* -EAGAIN if no msg frames currently available
* -EFAULT for non-successful reply or no reply (timeout)
* -EINVAL portnum arg out of range (hardwired to two elements)
*/
static int
mptfc_GetFcPortPage0(MPT_ADAPTER *ioc, int portnum)
{
ConfigPageHeader_t hdr;
CONFIGPARMS cfg;
FCPortPage0_t *ppage0_alloc;
FCPortPage0_t *pp0dest;
dma_addr_t page0_dma;
int data_sz;
int copy_sz;
int rc;
int count = 400;
if (portnum > 1)
return -EINVAL;
/* Get FCPort Page 0 header */
hdr.PageVersion = 0;
hdr.PageLength = 0;
hdr.PageNumber = 0;
hdr.PageType = MPI_CONFIG_PAGETYPE_FC_PORT;
cfg.cfghdr.hdr = &hdr;
cfg.physAddr = -1;
cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
cfg.dir = 0;
cfg.pageAddr = portnum;
cfg.timeout = 0;
if ((rc = mpt_config(ioc, &cfg)) != 0)
return rc;
if (hdr.PageLength == 0)
return 0;
data_sz = hdr.PageLength * 4;
rc = -ENOMEM;
ppage0_alloc = dma_alloc_coherent(&ioc->pcidev->dev, data_sz,
&page0_dma, GFP_KERNEL);
if (ppage0_alloc) {
try_again:
memset((u8 *)ppage0_alloc, 0, data_sz);
cfg.physAddr = page0_dma;
cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
if ((rc = mpt_config(ioc, &cfg)) == 0) {
/* save the data */
pp0dest = &ioc->fc_port_page0[portnum];
copy_sz = min_t(int, sizeof(FCPortPage0_t), data_sz);
memcpy(pp0dest, ppage0_alloc, copy_sz);
/*
* Normalize endianness of structure data,
* by byte-swapping all > 1 byte fields!
*/
pp0dest->Flags = le32_to_cpu(pp0dest->Flags);
pp0dest->PortIdentifier = le32_to_cpu(pp0dest->PortIdentifier);
pp0dest->WWNN.Low = le32_to_cpu(pp0dest->WWNN.Low);
pp0dest->WWNN.High = le32_to_cpu(pp0dest->WWNN.High);
pp0dest->WWPN.Low = le32_to_cpu(pp0dest->WWPN.Low);
pp0dest->WWPN.High = le32_to_cpu(pp0dest->WWPN.High);
pp0dest->SupportedServiceClass = le32_to_cpu(pp0dest->SupportedServiceClass);
pp0dest->SupportedSpeeds = le32_to_cpu(pp0dest->SupportedSpeeds);
pp0dest->CurrentSpeed = le32_to_cpu(pp0dest->CurrentSpeed);
pp0dest->MaxFrameSize = le32_to_cpu(pp0dest->MaxFrameSize);
pp0dest->FabricWWNN.Low = le32_to_cpu(pp0dest->FabricWWNN.Low);
pp0dest->FabricWWNN.High = le32_to_cpu(pp0dest->FabricWWNN.High);
pp0dest->FabricWWPN.Low = le32_to_cpu(pp0dest->FabricWWPN.Low);
pp0dest->FabricWWPN.High = le32_to_cpu(pp0dest->FabricWWPN.High);
pp0dest->DiscoveredPortsCount = le32_to_cpu(pp0dest->DiscoveredPortsCount);
pp0dest->MaxInitiators = le32_to_cpu(pp0dest->MaxInitiators);
/*
* if still doing discovery,
* hang loose a while until finished
*/
if ((pp0dest->PortState == MPI_FCPORTPAGE0_PORTSTATE_UNKNOWN) ||
(pp0dest->PortState == MPI_FCPORTPAGE0_PORTSTATE_ONLINE &&
(pp0dest->Flags & MPI_FCPORTPAGE0_FLAGS_ATTACH_TYPE_MASK)
== MPI_FCPORTPAGE0_FLAGS_ATTACH_NO_INIT)) {
if (count-- > 0) {
msleep(100);
goto try_again;
}
printk(MYIOC_s_INFO_FMT "Firmware discovery not"
" complete.\n",
ioc->name);
}
mptfc_display_port_link_speed(ioc, portnum, pp0dest);
}
dma_free_coherent(&ioc->pcidev->dev, data_sz, ppage0_alloc,
page0_dma);
}
return rc;
}
static int
mptfc_WriteFcPortPage1(MPT_ADAPTER *ioc, int portnum)
{
ConfigPageHeader_t hdr;
CONFIGPARMS cfg;
int rc;
if (portnum > 1)
return -EINVAL;
if (!(ioc->fc_data.fc_port_page1[portnum].data))
return -EINVAL;
/* get fcport page 1 header */
hdr.PageVersion = 0;
hdr.PageLength = 0;
hdr.PageNumber = 1;
hdr.PageType = MPI_CONFIG_PAGETYPE_FC_PORT;
cfg.cfghdr.hdr = &hdr;
cfg.physAddr = -1;
cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
cfg.dir = 0;
cfg.pageAddr = portnum;
cfg.timeout = 0;
if ((rc = mpt_config(ioc, &cfg)) != 0)
return rc;
if (hdr.PageLength == 0)
return -ENODEV;
if (hdr.PageLength*4 != ioc->fc_data.fc_port_page1[portnum].pg_sz)
return -EINVAL;
cfg.physAddr = ioc->fc_data.fc_port_page1[portnum].dma;
cfg.action = MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT;
cfg.dir = 1;
rc = mpt_config(ioc, &cfg);
return rc;
}
static int
mptfc_GetFcPortPage1(MPT_ADAPTER *ioc, int portnum)
{
ConfigPageHeader_t hdr;
CONFIGPARMS cfg;
FCPortPage1_t *page1_alloc;
dma_addr_t page1_dma;
int data_sz;
int rc;
if (portnum > 1)
return -EINVAL;
/* get fcport page 1 header */
hdr.PageVersion = 0;
hdr.PageLength = 0;
hdr.PageNumber = 1;
hdr.PageType = MPI_CONFIG_PAGETYPE_FC_PORT;
cfg.cfghdr.hdr = &hdr;
cfg.physAddr = -1;
cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
cfg.dir = 0;
cfg.pageAddr = portnum;
cfg.timeout = 0;
if ((rc = mpt_config(ioc, &cfg)) != 0)
return rc;
if (hdr.PageLength == 0)
return -ENODEV;
start_over:
if (ioc->fc_data.fc_port_page1[portnum].data == NULL) {
data_sz = hdr.PageLength * 4;
if (data_sz < sizeof(FCPortPage1_t))
data_sz = sizeof(FCPortPage1_t);
page1_alloc = dma_alloc_coherent(&ioc->pcidev->dev, data_sz,
&page1_dma, GFP_KERNEL);
if (!page1_alloc)
return -ENOMEM;
}
else {
page1_alloc = ioc->fc_data.fc_port_page1[portnum].data;
page1_dma = ioc->fc_data.fc_port_page1[portnum].dma;
data_sz = ioc->fc_data.fc_port_page1[portnum].pg_sz;
if (hdr.PageLength * 4 > data_sz) {
ioc->fc_data.fc_port_page1[portnum].data = NULL;
dma_free_coherent(&ioc->pcidev->dev, data_sz,
page1_alloc, page1_dma);
goto start_over;
}
}
cfg.physAddr = page1_dma;
cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
if ((rc = mpt_config(ioc, &cfg)) == 0) {
ioc->fc_data.fc_port_page1[portnum].data = page1_alloc;
ioc->fc_data.fc_port_page1[portnum].pg_sz = data_sz;
ioc->fc_data.fc_port_page1[portnum].dma = page1_dma;
}
else {
ioc->fc_data.fc_port_page1[portnum].data = NULL;
dma_free_coherent(&ioc->pcidev->dev, data_sz, page1_alloc,
page1_dma);
}
return rc;
}
static void
mptfc_SetFcPortPage1_defaults(MPT_ADAPTER *ioc)
{
int ii;
FCPortPage1_t *pp1;
#define MPTFC_FW_DEVICE_TIMEOUT (1)
#define MPTFC_FW_IO_PEND_TIMEOUT (1)
#define ON_FLAGS (MPI_FCPORTPAGE1_FLAGS_IMMEDIATE_ERROR_REPLY)
#define OFF_FLAGS (MPI_FCPORTPAGE1_FLAGS_VERBOSE_RESCAN_EVENTS)
for (ii=0; ii<ioc->facts.NumberOfPorts; ii++) {
if (mptfc_GetFcPortPage1(ioc, ii) != 0)
continue;
pp1 = ioc->fc_data.fc_port_page1[ii].data;
if ((pp1->InitiatorDeviceTimeout == MPTFC_FW_DEVICE_TIMEOUT)
&& (pp1->InitiatorIoPendTimeout == MPTFC_FW_IO_PEND_TIMEOUT)
&& ((pp1->Flags & ON_FLAGS) == ON_FLAGS)
&& ((pp1->Flags & OFF_FLAGS) == 0))
continue;
pp1->InitiatorDeviceTimeout = MPTFC_FW_DEVICE_TIMEOUT;
pp1->InitiatorIoPendTimeout = MPTFC_FW_IO_PEND_TIMEOUT;
pp1->Flags &= ~OFF_FLAGS;
pp1->Flags |= ON_FLAGS;
mptfc_WriteFcPortPage1(ioc, ii);
}
}
static void
mptfc_init_host_attr(MPT_ADAPTER *ioc,int portnum)
{
unsigned class = 0;
unsigned cos = 0;
unsigned speed;
unsigned port_type;
unsigned port_state;
FCPortPage0_t *pp0;
struct Scsi_Host *sh;
char *sn;
/* don't know what to do as only one scsi (fc) host was allocated */
if (portnum != 0)
return;
pp0 = &ioc->fc_port_page0[portnum];
sh = ioc->sh;
sn = fc_host_symbolic_name(sh);
snprintf(sn, FC_SYMBOLIC_NAME_SIZE, "%s %s%08xh",
ioc->prod_name,
MPT_FW_REV_MAGIC_ID_STRING,
ioc->facts.FWVersion.Word);
fc_host_tgtid_bind_type(sh) = FC_TGTID_BIND_BY_WWPN;
fc_host_maxframe_size(sh) = pp0->MaxFrameSize;
fc_host_node_name(sh) =
(u64)pp0->WWNN.High << 32 | (u64)pp0->WWNN.Low;
fc_host_port_name(sh) =
(u64)pp0->WWPN.High << 32 | (u64)pp0->WWPN.Low;
fc_host_port_id(sh) = pp0->PortIdentifier;
class = pp0->SupportedServiceClass;
if (class & MPI_FCPORTPAGE0_SUPPORT_CLASS_1)
cos |= FC_COS_CLASS1;
if (class & MPI_FCPORTPAGE0_SUPPORT_CLASS_2)
cos |= FC_COS_CLASS2;
if (class & MPI_FCPORTPAGE0_SUPPORT_CLASS_3)
cos |= FC_COS_CLASS3;
fc_host_supported_classes(sh) = cos;
if (pp0->CurrentSpeed == MPI_FCPORTPAGE0_CURRENT_SPEED_1GBIT)
speed = FC_PORTSPEED_1GBIT;
else if (pp0->CurrentSpeed == MPI_FCPORTPAGE0_CURRENT_SPEED_2GBIT)
speed = FC_PORTSPEED_2GBIT;
else if (pp0->CurrentSpeed == MPI_FCPORTPAGE0_CURRENT_SPEED_4GBIT)
speed = FC_PORTSPEED_4GBIT;
else if (pp0->CurrentSpeed == MPI_FCPORTPAGE0_CURRENT_SPEED_10GBIT)
speed = FC_PORTSPEED_10GBIT;
else
speed = FC_PORTSPEED_UNKNOWN;
fc_host_speed(sh) = speed;
speed = 0;
if (pp0->SupportedSpeeds & MPI_FCPORTPAGE0_SUPPORT_1GBIT_SPEED)
speed |= FC_PORTSPEED_1GBIT;
if (pp0->SupportedSpeeds & MPI_FCPORTPAGE0_SUPPORT_2GBIT_SPEED)
speed |= FC_PORTSPEED_2GBIT;
if (pp0->SupportedSpeeds & MPI_FCPORTPAGE0_SUPPORT_4GBIT_SPEED)
speed |= FC_PORTSPEED_4GBIT;
if (pp0->SupportedSpeeds & MPI_FCPORTPAGE0_SUPPORT_10GBIT_SPEED)
speed |= FC_PORTSPEED_10GBIT;
fc_host_supported_speeds(sh) = speed;
port_state = FC_PORTSTATE_UNKNOWN;
if (pp0->PortState == MPI_FCPORTPAGE0_PORTSTATE_ONLINE)
port_state = FC_PORTSTATE_ONLINE;
else if (pp0->PortState == MPI_FCPORTPAGE0_PORTSTATE_OFFLINE)
port_state = FC_PORTSTATE_LINKDOWN;
fc_host_port_state(sh) = port_state;
port_type = FC_PORTTYPE_UNKNOWN;
if (pp0->Flags & MPI_FCPORTPAGE0_FLAGS_ATTACH_POINT_TO_POINT)
port_type = FC_PORTTYPE_PTP;
else if (pp0->Flags & MPI_FCPORTPAGE0_FLAGS_ATTACH_PRIVATE_LOOP)
port_type = FC_PORTTYPE_LPORT;
else if (pp0->Flags & MPI_FCPORTPAGE0_FLAGS_ATTACH_PUBLIC_LOOP)
port_type = FC_PORTTYPE_NLPORT;
else if (pp0->Flags & MPI_FCPORTPAGE0_FLAGS_ATTACH_FABRIC_DIRECT)
port_type = FC_PORTTYPE_NPORT;
fc_host_port_type(sh) = port_type;
fc_host_fabric_name(sh) =
(pp0->Flags & MPI_FCPORTPAGE0_FLAGS_FABRIC_WWN_VALID) ?
(u64) pp0->FabricWWNN.High << 32 | (u64) pp0->FabricWWPN.Low :
(u64)pp0->WWNN.High << 32 | (u64)pp0->WWNN.Low;
}
static void
mptfc_link_status_change(struct work_struct *work)
{
MPT_ADAPTER *ioc =
container_of(work, MPT_ADAPTER, fc_rescan_work);
int ii;
for (ii=0; ii < ioc->facts.NumberOfPorts; ii++)
(void) mptfc_GetFcPortPage0(ioc, ii);
}
static void
mptfc_setup_reset(struct work_struct *work)
{
MPT_ADAPTER *ioc =
container_of(work, MPT_ADAPTER, fc_setup_reset_work);
u64 pn;
struct mptfc_rport_info *ri;
struct scsi_target *starget;
VirtTarget *vtarget;
/* reset about to happen, delete (block) all rports */
list_for_each_entry(ri, &ioc->fc_rports, list) {
if (ri->flags & MPT_RPORT_INFO_FLAGS_REGISTERED) {
ri->flags &= ~MPT_RPORT_INFO_FLAGS_REGISTERED;
fc_remote_port_delete(ri->rport); /* won't sleep */
ri->rport = NULL;
starget = ri->starget;
if (starget) {
vtarget = starget->hostdata;
if (vtarget)
vtarget->deleted = 1;
}
pn = (u64)ri->pg0.WWPN.High << 32 |
(u64)ri->pg0.WWPN.Low;
dfcprintk (ioc, printk(MYIOC_s_DEBUG_FMT
"mptfc_setup_reset.%d: %llx deleted\n",
ioc->name,
ioc->sh->host_no,
(unsigned long long)pn));
}
}
}
static void
mptfc_rescan_devices(struct work_struct *work)
{
MPT_ADAPTER *ioc =
container_of(work, MPT_ADAPTER, fc_rescan_work);
int ii;
u64 pn;
struct mptfc_rport_info *ri;
struct scsi_target *starget;
VirtTarget *vtarget;
/* start by tagging all ports as missing */
list_for_each_entry(ri, &ioc->fc_rports, list) {
if (ri->flags & MPT_RPORT_INFO_FLAGS_REGISTERED) {
ri->flags |= MPT_RPORT_INFO_FLAGS_MISSING;
}
}
/*
* now rescan devices known to adapter,
* will reregister existing rports
*/
for (ii=0; ii < ioc->facts.NumberOfPorts; ii++) {
(void) mptfc_GetFcPortPage0(ioc, ii);
mptfc_init_host_attr(ioc, ii); /* refresh */
mptfc_GetFcDevPage0(ioc, ii, mptfc_register_dev);
}
/* delete devices still missing */
list_for_each_entry(ri, &ioc->fc_rports, list) {
/* if newly missing, delete it */
if (ri->flags & MPT_RPORT_INFO_FLAGS_MISSING) {
ri->flags &= ~(MPT_RPORT_INFO_FLAGS_REGISTERED|
MPT_RPORT_INFO_FLAGS_MISSING);
fc_remote_port_delete(ri->rport); /* won't sleep */
ri->rport = NULL;
starget = ri->starget;
if (starget) {
vtarget = starget->hostdata;
if (vtarget)
vtarget->deleted = 1;
}
pn = (u64)ri->pg0.WWPN.High << 32 |
(u64)ri->pg0.WWPN.Low;
dfcprintk (ioc, printk(MYIOC_s_DEBUG_FMT
"mptfc_rescan.%d: %llx deleted\n",
ioc->name,
ioc->sh->host_no,
(unsigned long long)pn));
}
}
}
static int
mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct Scsi_Host *sh;
MPT_SCSI_HOST *hd;
MPT_ADAPTER *ioc;
unsigned long flags;
int ii;
int numSGE = 0;
int scale;
int ioc_cap;
int error=0;
int r;
if ((r = mpt_attach(pdev,id)) != 0)
return r;
ioc = pci_get_drvdata(pdev);
ioc->DoneCtx = mptfcDoneCtx;
ioc->TaskCtx = mptfcTaskCtx;
ioc->InternalCtx = mptfcInternalCtx;
/* Added sanity check on readiness of the MPT adapter.
*/
if (ioc->last_state != MPI_IOC_STATE_OPERATIONAL) {
printk(MYIOC_s_WARN_FMT
"Skipping because it's not operational!\n",
ioc->name);
error = -ENODEV;
goto out_mptfc_probe;
}
if (!ioc->active) {
printk(MYIOC_s_WARN_FMT "Skipping because it's disabled!\n",
ioc->name);
error = -ENODEV;
goto out_mptfc_probe;
}
/* Sanity check - ensure at least 1 port is INITIATOR capable
*/
ioc_cap = 0;
for (ii=0; ii < ioc->facts.NumberOfPorts; ii++) {
if (ioc->pfacts[ii].ProtocolFlags &
MPI_PORTFACTS_PROTOCOL_INITIATOR)
ioc_cap ++;
}
if (!ioc_cap) {
printk(MYIOC_s_WARN_FMT
"Skipping ioc=%p because SCSI Initiator mode is NOT enabled!\n",
ioc->name, ioc);
return 0;
}
sh = scsi_host_alloc(&mptfc_driver_template, sizeof(MPT_SCSI_HOST));
if (!sh) {
printk(MYIOC_s_WARN_FMT
"Unable to register controller with SCSI subsystem\n",
ioc->name);
error = -1;
goto out_mptfc_probe;
}
spin_lock_init(&ioc->fc_rescan_work_lock);
INIT_WORK(&ioc->fc_rescan_work, mptfc_rescan_devices);
INIT_WORK(&ioc->fc_setup_reset_work, mptfc_setup_reset);
INIT_WORK(&ioc->fc_lsc_work, mptfc_link_status_change);
spin_lock_irqsave(&ioc->FreeQlock, flags);
/* Attach the SCSI Host to the IOC structure
*/
ioc->sh = sh;
sh->io_port = 0;
sh->n_io_port = 0;
sh->irq = 0;
/* set 16 byte cdb's */
sh->max_cmd_len = 16;
sh->max_id = ioc->pfacts->MaxDevices;
sh->max_lun = max_lun;
/* Required entry.
*/
sh->unique_id = ioc->id;
/* Verify that we won't exceed the maximum
* number of chain buffers
* We can optimize: ZZ = req_sz/sizeof(SGE)
* For 32bit SGE's:
* numSGE = 1 + (ZZ-1)*(maxChain -1) + ZZ
* + (req_sz - 64)/sizeof(SGE)
* A slightly different algorithm is required for
* 64bit SGEs.
*/
scale = ioc->req_sz/ioc->SGE_size;
if (ioc->sg_addr_size == sizeof(u64)) {
numSGE = (scale - 1) *
(ioc->facts.MaxChainDepth-1) + scale +
(ioc->req_sz - 60) / ioc->SGE_size;
} else {
numSGE = 1 + (scale - 1) *
(ioc->facts.MaxChainDepth-1) + scale +
(ioc->req_sz - 64) / ioc->SGE_size;
}
if (numSGE < sh->sg_tablesize) {
/* Reset this value */
dprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"Resetting sg_tablesize to %d from %d\n",
ioc->name, numSGE, sh->sg_tablesize));
sh->sg_tablesize = numSGE;
}
spin_unlock_irqrestore(&ioc->FreeQlock, flags);
hd = shost_priv(sh);
hd->ioc = ioc;
/* SCSI needs scsi_cmnd lookup table!
* (with size equal to req_depth*PtrSz!)
*/
ioc->ScsiLookup = kcalloc(ioc->req_depth, sizeof(void *), GFP_KERNEL);
if (!ioc->ScsiLookup) {
error = -ENOMEM;
goto out_mptfc_probe;
}
spin_lock_init(&ioc->scsi_lookup_lock);
dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "ScsiLookup @ %p\n",
ioc->name, ioc->ScsiLookup));
hd->last_queue_full = 0;
sh->transportt = mptfc_transport_template;
error = scsi_add_host (sh, &ioc->pcidev->dev);
if(error) {
dprintk(ioc, printk(MYIOC_s_ERR_FMT
"scsi_add_host failed\n", ioc->name));
goto out_mptfc_probe;
}
/* initialize workqueue */
snprintf(ioc->fc_rescan_work_q_name, sizeof(ioc->fc_rescan_work_q_name),
"mptfc_wq_%d", sh->host_no);
ioc->fc_rescan_work_q =
alloc_ordered_workqueue(ioc->fc_rescan_work_q_name,
WQ_MEM_RECLAIM);
if (!ioc->fc_rescan_work_q) {
error = -ENOMEM;
goto out_mptfc_host;
}
/*
* Pre-fetch FC port WWN and stuff...
* (FCPortPage0_t stuff)
*/
for (ii=0; ii < ioc->facts.NumberOfPorts; ii++) {
(void) mptfc_GetFcPortPage0(ioc, ii);
}
mptfc_SetFcPortPage1_defaults(ioc);
/*
* scan for rports -
* by doing it via the workqueue, some locking is eliminated
*/
queue_work(ioc->fc_rescan_work_q, &ioc->fc_rescan_work);
flush_workqueue(ioc->fc_rescan_work_q);
return 0;
out_mptfc_host:
scsi_remove_host(sh);
out_mptfc_probe:
mptscsih_remove(pdev);
return error;
}
static struct pci_driver mptfc_driver = {
.name = "mptfc",
.id_table = mptfc_pci_table,
.probe = mptfc_probe,
.remove = mptfc_remove,
.shutdown = mptscsih_shutdown,
#ifdef CONFIG_PM
.suspend = mptscsih_suspend,
.resume = mptscsih_resume,
#endif
};
static int
mptfc_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
{
MPT_SCSI_HOST *hd;
u8 event = le32_to_cpu(pEvReply->Event) & 0xFF;
unsigned long flags;
int rc=1;
if (ioc->bus_type != FC)
return 0;
devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT "MPT event (=%02Xh) routed to SCSI host driver!\n",
ioc->name, event));
if (ioc->sh == NULL ||
((hd = shost_priv(ioc->sh)) == NULL))
return 1;
switch (event) {
case MPI_EVENT_RESCAN:
spin_lock_irqsave(&ioc->fc_rescan_work_lock, flags);
if (ioc->fc_rescan_work_q) {
queue_work(ioc->fc_rescan_work_q,
&ioc->fc_rescan_work);
}
spin_unlock_irqrestore(&ioc->fc_rescan_work_lock, flags);
break;
case MPI_EVENT_LINK_STATUS_CHANGE:
spin_lock_irqsave(&ioc->fc_rescan_work_lock, flags);
if (ioc->fc_rescan_work_q) {
queue_work(ioc->fc_rescan_work_q,
&ioc->fc_lsc_work);
}
spin_unlock_irqrestore(&ioc->fc_rescan_work_lock, flags);
break;
default:
rc = mptscsih_event_process(ioc,pEvReply);
break;
}
return rc;
}
static int
mptfc_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
{
int rc;
unsigned long flags;
rc = mptscsih_ioc_reset(ioc,reset_phase);
if ((ioc->bus_type != FC) || (!rc))
return rc;
dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
": IOC %s_reset routed to FC host driver!\n",ioc->name,
reset_phase==MPT_IOC_SETUP_RESET ? "setup" : (
reset_phase==MPT_IOC_PRE_RESET ? "pre" : "post")));
if (reset_phase == MPT_IOC_SETUP_RESET) {
spin_lock_irqsave(&ioc->fc_rescan_work_lock, flags);
if (ioc->fc_rescan_work_q) {
queue_work(ioc->fc_rescan_work_q,
&ioc->fc_setup_reset_work);
}
spin_unlock_irqrestore(&ioc->fc_rescan_work_lock, flags);
}
else if (reset_phase == MPT_IOC_PRE_RESET) {
}
else { /* MPT_IOC_POST_RESET */
mptfc_SetFcPortPage1_defaults(ioc);
spin_lock_irqsave(&ioc->fc_rescan_work_lock, flags);
if (ioc->fc_rescan_work_q) {
queue_work(ioc->fc_rescan_work_q,
&ioc->fc_rescan_work);
}
spin_unlock_irqrestore(&ioc->fc_rescan_work_lock, flags);
}
return 1;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mptfc_init - Register MPT adapter(s) as SCSI host(s) with SCSI mid-layer.
*
* Returns 0 for success, non-zero for failure.
*/
static int __init
mptfc_init(void)
{
int error;
show_mptmod_ver(my_NAME, my_VERSION);
/* sanity check module parameters */
if (mptfc_dev_loss_tmo <= 0)
mptfc_dev_loss_tmo = MPTFC_DEV_LOSS_TMO;
mptfc_transport_template =
fc_attach_transport(&mptfc_transport_functions);
if (!mptfc_transport_template)
return -ENODEV;
mptfcDoneCtx = mpt_register(mptscsih_io_done, MPTFC_DRIVER,
"mptscsih_scandv_complete");
mptfcTaskCtx = mpt_register(mptscsih_taskmgmt_complete, MPTFC_DRIVER,
"mptscsih_scandv_complete");
mptfcInternalCtx = mpt_register(mptscsih_scandv_complete, MPTFC_DRIVER,
"mptscsih_scandv_complete");
mpt_event_register(mptfcDoneCtx, mptfc_event_process);
mpt_reset_register(mptfcDoneCtx, mptfc_ioc_reset);
error = pci_register_driver(&mptfc_driver);
if (error)
fc_release_transport(mptfc_transport_template);
return error;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mptfc_remove - Remove fc infrastructure for devices
* @pdev: Pointer to pci_dev structure
*
*/
static void mptfc_remove(struct pci_dev *pdev)
{
MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
struct mptfc_rport_info *p, *n;
struct workqueue_struct *work_q;
unsigned long flags;
int ii;
/* destroy workqueue */
if ((work_q=ioc->fc_rescan_work_q)) {
spin_lock_irqsave(&ioc->fc_rescan_work_lock, flags);
ioc->fc_rescan_work_q = NULL;
spin_unlock_irqrestore(&ioc->fc_rescan_work_lock, flags);
destroy_workqueue(work_q);
}
fc_remove_host(ioc->sh);
list_for_each_entry_safe(p, n, &ioc->fc_rports, list) {
list_del(&p->list);
kfree(p);
}
for (ii=0; ii<ioc->facts.NumberOfPorts; ii++) {
if (ioc->fc_data.fc_port_page1[ii].data) {
dma_free_coherent(&ioc->pcidev->dev,
ioc->fc_data.fc_port_page1[ii].pg_sz,
ioc->fc_data.fc_port_page1[ii].data,
ioc->fc_data.fc_port_page1[ii].dma);
ioc->fc_data.fc_port_page1[ii].data = NULL;
}
}
scsi_remove_host(ioc->sh);
mptscsih_remove(pdev);
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mptfc_exit - Unregisters MPT adapter(s)
*
*/
static void __exit
mptfc_exit(void)
{
pci_unregister_driver(&mptfc_driver);
fc_release_transport(mptfc_transport_template);
mpt_reset_deregister(mptfcDoneCtx);
mpt_event_deregister(mptfcDoneCtx);
mpt_deregister(mptfcInternalCtx);
mpt_deregister(mptfcTaskCtx);
mpt_deregister(mptfcDoneCtx);
}
module_init(mptfc_init);
module_exit(mptfc_exit);
| linux-master | drivers/message/fusion/mptfc.c |
/*
* linux/drivers/message/fusion/mptspi.c
* For use with LSI PCI chip/adapter(s)
* running LSI Fusion MPT (Message Passing Technology) firmware.
*
* Copyright (c) 1999-2008 LSI Corporation
* (mailto:[email protected])
*
*/
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
NO WARRANTY
THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
solely responsible for determining the appropriateness of using and
distributing the Program and assumes all risks associated with its
exercise of rights under this Agreement, including but not limited to
the risks and costs of program errors, damage to or loss of data,
programs or equipment, and unavailability or interruption of operations.
DISCLAIMER OF LIABILITY
NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/kdev_t.h>
#include <linux/blkdev.h>
#include <linux/delay.h> /* for mdelay */
#include <linux/interrupt.h>
#include <linux/reboot.h> /* notifier code */
#include <linux/workqueue.h>
#include <linux/raid_class.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_tcq.h>
#include <scsi/scsi_transport.h>
#include <scsi/scsi_transport_spi.h>
#include <scsi/scsi_dbg.h>
#include "mptbase.h"
#include "mptscsih.h"
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
#define my_NAME "Fusion MPT SPI Host driver"
#define my_VERSION MPT_LINUX_VERSION_COMMON
#define MYNAM "mptspi"
MODULE_AUTHOR(MODULEAUTHOR);
MODULE_DESCRIPTION(my_NAME);
MODULE_LICENSE("GPL");
MODULE_VERSION(my_VERSION);
/* Command line args */
static int mpt_saf_te = MPTSCSIH_SAF_TE;
module_param(mpt_saf_te, int, 0);
MODULE_PARM_DESC(mpt_saf_te, " Force enabling SEP Processor: enable=1 (default=MPTSCSIH_SAF_TE=0)");
static void mptspi_write_offset(struct scsi_target *, int);
static void mptspi_write_width(struct scsi_target *, int);
static int mptspi_write_spi_device_pg1(struct scsi_target *,
struct _CONFIG_PAGE_SCSI_DEVICE_1 *);
static struct scsi_transport_template *mptspi_transport_template = NULL;
static u8 mptspiDoneCtx = MPT_MAX_PROTOCOL_DRIVERS;
static u8 mptspiTaskCtx = MPT_MAX_PROTOCOL_DRIVERS;
static u8 mptspiInternalCtx = MPT_MAX_PROTOCOL_DRIVERS; /* Used only for internal commands */
/**
* mptspi_setTargetNegoParms - Update the target negotiation parameters
* @hd: Pointer to a SCSI Host Structure
* @target: per target private data
* @sdev: SCSI device
*
* Update the target negotiation parameters based on the Inquiry
* data, adapter capabilities, and NVRAM settings.
**/
static void
mptspi_setTargetNegoParms(MPT_SCSI_HOST *hd, VirtTarget *target,
struct scsi_device *sdev)
{
MPT_ADAPTER *ioc = hd->ioc;
SpiCfgData *pspi_data = &ioc->spi_data;
int id = (int) target->id;
int nvram;
u8 width = MPT_NARROW;
u8 factor = MPT_ASYNC;
u8 offset = 0;
u8 nfactor;
u8 noQas = 1;
target->negoFlags = pspi_data->noQas;
if (sdev->scsi_level < SCSI_2) {
width = 0;
factor = MPT_ULTRA2;
offset = pspi_data->maxSyncOffset;
target->tflags &= ~MPT_TARGET_FLAGS_Q_YES;
} else {
if (scsi_device_wide(sdev))
width = 1;
if (scsi_device_sync(sdev)) {
factor = pspi_data->minSyncFactor;
if (!scsi_device_dt(sdev))
factor = MPT_ULTRA2;
else {
if (!scsi_device_ius(sdev) &&
!scsi_device_qas(sdev))
factor = MPT_ULTRA160;
else {
factor = MPT_ULTRA320;
if (scsi_device_qas(sdev)) {
ddvprintk(ioc,
printk(MYIOC_s_DEBUG_FMT "Enabling QAS due to "
"byte56=%02x on id=%d!\n", ioc->name,
scsi_device_qas(sdev), id));
noQas = 0;
}
if (sdev->type == TYPE_TAPE &&
scsi_device_ius(sdev))
target->negoFlags |= MPT_TAPE_NEGO_IDP;
}
}
offset = pspi_data->maxSyncOffset;
/* If RAID, never disable QAS
* else if non RAID, do not disable
* QAS if bit 1 is set
* bit 1 QAS support, non-raid only
* bit 0 IU support
*/
if (target->raidVolume == 1)
noQas = 0;
} else {
factor = MPT_ASYNC;
offset = 0;
}
}
if (!sdev->tagged_supported)
target->tflags &= ~MPT_TARGET_FLAGS_Q_YES;
/* Update tflags based on NVRAM settings. (SCSI only)
*/
if (pspi_data->nvram && (pspi_data->nvram[id] != MPT_HOST_NVRAM_INVALID)) {
nvram = pspi_data->nvram[id];
nfactor = (nvram & MPT_NVRAM_SYNC_MASK) >> 8;
if (width)
width = nvram & MPT_NVRAM_WIDE_DISABLE ? 0 : 1;
if (offset > 0) {
/* Ensure factor is set to the
* maximum of: adapter, nvram, inquiry
*/
if (nfactor) {
if (nfactor < pspi_data->minSyncFactor )
nfactor = pspi_data->minSyncFactor;
factor = max(factor, nfactor);
if (factor == MPT_ASYNC)
offset = 0;
} else {
offset = 0;
factor = MPT_ASYNC;
}
} else {
factor = MPT_ASYNC;
}
}
/* Make sure data is consistent
*/
if ((!width) && (factor < MPT_ULTRA2))
factor = MPT_ULTRA2;
/* Save the data to the target structure.
*/
target->minSyncFactor = factor;
target->maxOffset = offset;
target->maxWidth = width;
spi_min_period(scsi_target(sdev)) = factor;
spi_max_offset(scsi_target(sdev)) = offset;
spi_max_width(scsi_target(sdev)) = width;
target->tflags |= MPT_TARGET_FLAGS_VALID_NEGO;
/* Disable unused features.
*/
if (!width)
target->negoFlags |= MPT_TARGET_NO_NEGO_WIDE;
if (!offset)
target->negoFlags |= MPT_TARGET_NO_NEGO_SYNC;
if ( factor > MPT_ULTRA320 )
noQas = 0;
if (noQas && (pspi_data->noQas == 0)) {
pspi_data->noQas |= MPT_TARGET_NO_NEGO_QAS;
target->negoFlags |= MPT_TARGET_NO_NEGO_QAS;
/* Disable QAS in a mixed configuration case
*/
ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"Disabling QAS due to noQas=%02x on id=%d!\n", ioc->name, noQas, id));
}
}
/**
* mptspi_writeIOCPage4 - write IOC Page 4
* @hd: Pointer to a SCSI Host Structure
* @channel: channel number
* @id: write IOC Page4 for this ID & Bus
*
* Return: -EAGAIN if unable to obtain a Message Frame
* or 0 if success.
*
* Remark: We do not wait for a return, write pages sequentially.
**/
static int
mptspi_writeIOCPage4(MPT_SCSI_HOST *hd, u8 channel , u8 id)
{
MPT_ADAPTER *ioc = hd->ioc;
Config_t *pReq;
IOCPage4_t *IOCPage4Ptr;
MPT_FRAME_HDR *mf;
dma_addr_t dataDma;
u32 flagsLength;
int ii;
/* Get a MF for this command.
*/
if ((mf = mpt_get_msg_frame(ioc->DoneCtx, ioc)) == NULL) {
dfailprintk(ioc, printk(MYIOC_s_WARN_FMT
"writeIOCPage4 : no msg frames!\n",ioc->name));
return -EAGAIN;
}
/* Set the request and the data pointers.
* Place data at end of MF.
*/
pReq = (Config_t *)mf;
/* Complete the request frame (same for all requests).
*/
pReq->Action = MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT;
pReq->Reserved = 0;
pReq->ChainOffset = 0;
pReq->Function = MPI_FUNCTION_CONFIG;
pReq->ExtPageLength = 0;
pReq->ExtPageType = 0;
pReq->MsgFlags = 0;
for (ii=0; ii < 8; ii++) {
pReq->Reserved2[ii] = 0;
}
IOCPage4Ptr = ioc->spi_data.pIocPg4;
dataDma = ioc->spi_data.IocPg4_dma;
ii = IOCPage4Ptr->ActiveSEP++;
IOCPage4Ptr->SEP[ii].SEPTargetID = id;
IOCPage4Ptr->SEP[ii].SEPBus = channel;
pReq->Header = IOCPage4Ptr->Header;
pReq->PageAddress = cpu_to_le32(id | (channel << 8 ));
/* Add a SGE to the config request.
*/
flagsLength = MPT_SGE_FLAGS_SSIMPLE_WRITE |
(IOCPage4Ptr->Header.PageLength + ii) * 4;
ioc->add_sge((char *)&pReq->PageBufferSGE, flagsLength, dataDma);
ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"writeIOCPage4: MaxSEP=%d ActiveSEP=%d id=%d bus=%d\n",
ioc->name, IOCPage4Ptr->MaxSEP, IOCPage4Ptr->ActiveSEP, id, channel));
mpt_put_msg_frame(ioc->DoneCtx, ioc, mf);
return 0;
}
/**
* mptspi_initTarget - Target, LUN alloc/free functionality.
* @hd: Pointer to MPT_SCSI_HOST structure
* @vtarget: per target private data
* @sdev: SCSI device
*
* NOTE: It's only SAFE to call this routine if data points to
* sane & valid STANDARD INQUIRY data!
*
* Allocate and initialize memory for this target.
* Save inquiry data.
*
**/
static void
mptspi_initTarget(MPT_SCSI_HOST *hd, VirtTarget *vtarget,
struct scsi_device *sdev)
{
/* Is LUN supported? If so, upper 2 bits will be 0
* in first byte of inquiry data.
*/
if (sdev->inq_periph_qual != 0)
return;
if (vtarget == NULL)
return;
vtarget->type = sdev->type;
if ((sdev->type == TYPE_PROCESSOR) && (hd->ioc->spi_data.Saf_Te)) {
/* Treat all Processors as SAF-TE if
* command line option is set */
vtarget->tflags |= MPT_TARGET_FLAGS_SAF_TE_ISSUED;
mptspi_writeIOCPage4(hd, vtarget->channel, vtarget->id);
}else if ((sdev->type == TYPE_PROCESSOR) &&
!(vtarget->tflags & MPT_TARGET_FLAGS_SAF_TE_ISSUED )) {
if (sdev->inquiry_len > 49 ) {
if (sdev->inquiry[44] == 'S' &&
sdev->inquiry[45] == 'A' &&
sdev->inquiry[46] == 'F' &&
sdev->inquiry[47] == '-' &&
sdev->inquiry[48] == 'T' &&
sdev->inquiry[49] == 'E' ) {
vtarget->tflags |= MPT_TARGET_FLAGS_SAF_TE_ISSUED;
mptspi_writeIOCPage4(hd, vtarget->channel, vtarget->id);
}
}
}
mptspi_setTargetNegoParms(hd, vtarget, sdev);
}
/**
* mptspi_is_raid - Determines whether target is belonging to volume
* @hd: Pointer to a SCSI HOST structure
* @id: target device id
*
* Return:
* non-zero = true
* zero = false
*
*/
static int
mptspi_is_raid(struct _MPT_SCSI_HOST *hd, u32 id)
{
int i, rc = 0;
MPT_ADAPTER *ioc = hd->ioc;
if (!ioc->raid_data.pIocPg2)
goto out;
if (!ioc->raid_data.pIocPg2->NumActiveVolumes)
goto out;
for (i=0; i < ioc->raid_data.pIocPg2->NumActiveVolumes; i++) {
if (ioc->raid_data.pIocPg2->RaidVolume[i].VolumeID == id) {
rc = 1;
goto out;
}
}
out:
return rc;
}
static int mptspi_target_alloc(struct scsi_target *starget)
{
struct Scsi_Host *shost = dev_to_shost(&starget->dev);
struct _MPT_SCSI_HOST *hd = shost_priv(shost);
VirtTarget *vtarget;
MPT_ADAPTER *ioc;
if (hd == NULL)
return -ENODEV;
ioc = hd->ioc;
vtarget = kzalloc(sizeof(VirtTarget), GFP_KERNEL);
if (!vtarget)
return -ENOMEM;
vtarget->ioc_id = ioc->id;
vtarget->tflags = MPT_TARGET_FLAGS_Q_YES;
vtarget->id = (u8)starget->id;
vtarget->channel = (u8)starget->channel;
vtarget->starget = starget;
starget->hostdata = vtarget;
if (starget->channel == 1) {
if (mptscsih_is_phys_disk(ioc, 0, starget->id) == 0)
return 0;
vtarget->tflags |= MPT_TARGET_FLAGS_RAID_COMPONENT;
/* The real channel for this device is zero */
vtarget->channel = 0;
/* The actual physdisknum (for RAID passthrough) */
vtarget->id = mptscsih_raid_id_to_num(ioc, 0,
starget->id);
}
if (starget->channel == 0 &&
mptspi_is_raid(hd, starget->id)) {
vtarget->raidVolume = 1;
ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"RAID Volume @ channel=%d id=%d\n", ioc->name, starget->channel,
starget->id));
}
if (ioc->spi_data.nvram &&
ioc->spi_data.nvram[starget->id] != MPT_HOST_NVRAM_INVALID) {
u32 nvram = ioc->spi_data.nvram[starget->id];
spi_min_period(starget) = (nvram & MPT_NVRAM_SYNC_MASK) >> MPT_NVRAM_SYNC_SHIFT;
spi_max_width(starget) = nvram & MPT_NVRAM_WIDE_DISABLE ? 0 : 1;
} else {
spi_min_period(starget) = ioc->spi_data.minSyncFactor;
spi_max_width(starget) = ioc->spi_data.maxBusWidth;
}
spi_max_offset(starget) = ioc->spi_data.maxSyncOffset;
spi_offset(starget) = 0;
spi_period(starget) = 0xFF;
mptspi_write_width(starget, 0);
return 0;
}
static void
mptspi_target_destroy(struct scsi_target *starget)
{
kfree(starget->hostdata);
starget->hostdata = NULL;
}
/**
* mptspi_print_write_nego - negotiation parameters debug info that is being sent
* @hd: Pointer to a SCSI HOST structure
* @starget: SCSI target
* @ii: negotiation parameters
*
*/
static void
mptspi_print_write_nego(struct _MPT_SCSI_HOST *hd, struct scsi_target *starget, u32 ii)
{
ddvprintk(hd->ioc, printk(MYIOC_s_DEBUG_FMT "id=%d Requested = 0x%08x"
" ( %s factor = 0x%02x @ offset = 0x%02x %s%s%s%s%s%s%s%s)\n",
hd->ioc->name, starget->id, ii,
ii & MPI_SCSIDEVPAGE0_NP_WIDE ? "Wide ": "",
((ii >> 8) & 0xFF), ((ii >> 16) & 0xFF),
ii & MPI_SCSIDEVPAGE0_NP_IU ? "IU ": "",
ii & MPI_SCSIDEVPAGE0_NP_DT ? "DT ": "",
ii & MPI_SCSIDEVPAGE0_NP_QAS ? "QAS ": "",
ii & MPI_SCSIDEVPAGE0_NP_HOLD_MCS ? "HOLDMCS ": "",
ii & MPI_SCSIDEVPAGE0_NP_WR_FLOW ? "WRFLOW ": "",
ii & MPI_SCSIDEVPAGE0_NP_RD_STRM ? "RDSTRM ": "",
ii & MPI_SCSIDEVPAGE0_NP_RTI ? "RTI ": "",
ii & MPI_SCSIDEVPAGE0_NP_PCOMP_EN ? "PCOMP ": ""));
}
/**
* mptspi_print_read_nego - negotiation parameters debug info that is being read
* @hd: Pointer to a SCSI HOST structure
* @starget: SCSI target
* @ii: negotiation parameters
*
*/
static void
mptspi_print_read_nego(struct _MPT_SCSI_HOST *hd, struct scsi_target *starget, u32 ii)
{
ddvprintk(hd->ioc, printk(MYIOC_s_DEBUG_FMT "id=%d Read = 0x%08x"
" ( %s factor = 0x%02x @ offset = 0x%02x %s%s%s%s%s%s%s%s)\n",
hd->ioc->name, starget->id, ii,
ii & MPI_SCSIDEVPAGE0_NP_WIDE ? "Wide ": "",
((ii >> 8) & 0xFF), ((ii >> 16) & 0xFF),
ii & MPI_SCSIDEVPAGE0_NP_IU ? "IU ": "",
ii & MPI_SCSIDEVPAGE0_NP_DT ? "DT ": "",
ii & MPI_SCSIDEVPAGE0_NP_QAS ? "QAS ": "",
ii & MPI_SCSIDEVPAGE0_NP_HOLD_MCS ? "HOLDMCS ": "",
ii & MPI_SCSIDEVPAGE0_NP_WR_FLOW ? "WRFLOW ": "",
ii & MPI_SCSIDEVPAGE0_NP_RD_STRM ? "RDSTRM ": "",
ii & MPI_SCSIDEVPAGE0_NP_RTI ? "RTI ": "",
ii & MPI_SCSIDEVPAGE0_NP_PCOMP_EN ? "PCOMP ": ""));
}
static int mptspi_read_spi_device_pg0(struct scsi_target *starget,
struct _CONFIG_PAGE_SCSI_DEVICE_0 *pass_pg0)
{
struct Scsi_Host *shost = dev_to_shost(&starget->dev);
struct _MPT_SCSI_HOST *hd = shost_priv(shost);
struct _MPT_ADAPTER *ioc = hd->ioc;
struct _CONFIG_PAGE_SCSI_DEVICE_0 *spi_dev_pg0;
dma_addr_t spi_dev_pg0_dma;
int size;
struct _x_config_parms cfg;
struct _CONFIG_PAGE_HEADER hdr;
int err = -EBUSY;
/* No SPI parameters for RAID devices */
if (starget->channel == 0 &&
mptspi_is_raid(hd, starget->id))
return -1;
size = ioc->spi_data.sdp0length * 4;
/*
if (ioc->spi_data.sdp0length & 1)
size += size + 4;
size += 2048;
*/
spi_dev_pg0 = dma_alloc_coherent(&ioc->pcidev->dev, size, &spi_dev_pg0_dma, GFP_KERNEL);
if (spi_dev_pg0 == NULL) {
starget_printk(KERN_ERR, starget, MYIOC_s_FMT
"dma_alloc_coherent for parameters failed\n", ioc->name);
return -EINVAL;
}
memset(&hdr, 0, sizeof(hdr));
hdr.PageVersion = ioc->spi_data.sdp0version;
hdr.PageLength = ioc->spi_data.sdp0length;
hdr.PageNumber = 0;
hdr.PageType = MPI_CONFIG_PAGETYPE_SCSI_DEVICE;
memset(&cfg, 0, sizeof(cfg));
cfg.cfghdr.hdr = &hdr;
cfg.physAddr = spi_dev_pg0_dma;
cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
cfg.dir = 0;
cfg.pageAddr = starget->id;
cfg.timeout = 60;
if (mpt_config(ioc, &cfg)) {
starget_printk(KERN_ERR, starget, MYIOC_s_FMT "mpt_config failed\n", ioc->name);
goto out_free;
}
err = 0;
memcpy(pass_pg0, spi_dev_pg0, size);
mptspi_print_read_nego(hd, starget, le32_to_cpu(spi_dev_pg0->NegotiatedParameters));
out_free:
dma_free_coherent(&ioc->pcidev->dev, size, spi_dev_pg0, spi_dev_pg0_dma);
return err;
}
static u32 mptspi_getRP(struct scsi_target *starget)
{
u32 nego = 0;
nego |= spi_iu(starget) ? MPI_SCSIDEVPAGE1_RP_IU : 0;
nego |= spi_dt(starget) ? MPI_SCSIDEVPAGE1_RP_DT : 0;
nego |= spi_qas(starget) ? MPI_SCSIDEVPAGE1_RP_QAS : 0;
nego |= spi_hold_mcs(starget) ? MPI_SCSIDEVPAGE1_RP_HOLD_MCS : 0;
nego |= spi_wr_flow(starget) ? MPI_SCSIDEVPAGE1_RP_WR_FLOW : 0;
nego |= spi_rd_strm(starget) ? MPI_SCSIDEVPAGE1_RP_RD_STRM : 0;
nego |= spi_rti(starget) ? MPI_SCSIDEVPAGE1_RP_RTI : 0;
nego |= spi_pcomp_en(starget) ? MPI_SCSIDEVPAGE1_RP_PCOMP_EN : 0;
nego |= (spi_period(starget) << MPI_SCSIDEVPAGE1_RP_SHIFT_MIN_SYNC_PERIOD) & MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK;
nego |= (spi_offset(starget) << MPI_SCSIDEVPAGE1_RP_SHIFT_MAX_SYNC_OFFSET) & MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK;
nego |= spi_width(starget) ? MPI_SCSIDEVPAGE1_RP_WIDE : 0;
return nego;
}
static void mptspi_read_parameters(struct scsi_target *starget)
{
int nego;
struct _CONFIG_PAGE_SCSI_DEVICE_0 spi_dev_pg0;
mptspi_read_spi_device_pg0(starget, &spi_dev_pg0);
nego = le32_to_cpu(spi_dev_pg0.NegotiatedParameters);
spi_iu(starget) = (nego & MPI_SCSIDEVPAGE0_NP_IU) ? 1 : 0;
spi_dt(starget) = (nego & MPI_SCSIDEVPAGE0_NP_DT) ? 1 : 0;
spi_qas(starget) = (nego & MPI_SCSIDEVPAGE0_NP_QAS) ? 1 : 0;
spi_wr_flow(starget) = (nego & MPI_SCSIDEVPAGE0_NP_WR_FLOW) ? 1 : 0;
spi_rd_strm(starget) = (nego & MPI_SCSIDEVPAGE0_NP_RD_STRM) ? 1 : 0;
spi_rti(starget) = (nego & MPI_SCSIDEVPAGE0_NP_RTI) ? 1 : 0;
spi_pcomp_en(starget) = (nego & MPI_SCSIDEVPAGE0_NP_PCOMP_EN) ? 1 : 0;
spi_hold_mcs(starget) = (nego & MPI_SCSIDEVPAGE0_NP_HOLD_MCS) ? 1 : 0;
spi_period(starget) = (nego & MPI_SCSIDEVPAGE0_NP_NEG_SYNC_PERIOD_MASK) >> MPI_SCSIDEVPAGE0_NP_SHIFT_SYNC_PERIOD;
spi_offset(starget) = (nego & MPI_SCSIDEVPAGE0_NP_NEG_SYNC_OFFSET_MASK) >> MPI_SCSIDEVPAGE0_NP_SHIFT_SYNC_OFFSET;
spi_width(starget) = (nego & MPI_SCSIDEVPAGE0_NP_WIDE) ? 1 : 0;
}
static int
mptscsih_quiesce_raid(MPT_SCSI_HOST *hd, int quiesce, u8 channel, u8 id)
{
MPT_ADAPTER *ioc = hd->ioc;
MpiRaidActionRequest_t *pReq;
MPT_FRAME_HDR *mf;
int ret;
unsigned long timeleft;
mutex_lock(&ioc->internal_cmds.mutex);
/* Get and Populate a free Frame
*/
if ((mf = mpt_get_msg_frame(ioc->InternalCtx, ioc)) == NULL) {
dfailprintk(hd->ioc, printk(MYIOC_s_WARN_FMT
"%s: no msg frames!\n", ioc->name, __func__));
ret = -EAGAIN;
goto out;
}
pReq = (MpiRaidActionRequest_t *)mf;
if (quiesce)
pReq->Action = MPI_RAID_ACTION_QUIESCE_PHYS_IO;
else
pReq->Action = MPI_RAID_ACTION_ENABLE_PHYS_IO;
pReq->Reserved1 = 0;
pReq->ChainOffset = 0;
pReq->Function = MPI_FUNCTION_RAID_ACTION;
pReq->VolumeID = id;
pReq->VolumeBus = channel;
pReq->PhysDiskNum = 0;
pReq->MsgFlags = 0;
pReq->Reserved2 = 0;
pReq->ActionDataWord = 0; /* Reserved for this action */
ioc->add_sge((char *)&pReq->ActionDataSGE,
MPT_SGE_FLAGS_SSIMPLE_READ | 0, (dma_addr_t) -1);
ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT "RAID Volume action=%x channel=%d id=%d\n",
ioc->name, pReq->Action, channel, id));
INITIALIZE_MGMT_STATUS(ioc->internal_cmds.status)
mpt_put_msg_frame(ioc->InternalCtx, ioc, mf);
timeleft = wait_for_completion_timeout(&ioc->internal_cmds.done, 10*HZ);
if (!(ioc->internal_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
ret = -ETIME;
dfailprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: TIMED OUT!\n",
ioc->name, __func__));
if (ioc->internal_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET)
goto out;
if (!timeleft) {
printk(MYIOC_s_WARN_FMT "Issuing Reset from %s!!\n",
ioc->name, __func__);
mpt_HardResetHandler(ioc, CAN_SLEEP);
mpt_free_msg_frame(ioc, mf);
}
goto out;
}
ret = ioc->internal_cmds.completion_code;
out:
CLEAR_MGMT_STATUS(ioc->internal_cmds.status)
mutex_unlock(&ioc->internal_cmds.mutex);
return ret;
}
static void mptspi_dv_device(struct _MPT_SCSI_HOST *hd,
struct scsi_device *sdev)
{
VirtTarget *vtarget = scsi_target(sdev)->hostdata;
MPT_ADAPTER *ioc = hd->ioc;
/* no DV on RAID devices */
if (sdev->channel == 0 &&
mptspi_is_raid(hd, sdev->id))
return;
/* If this is a piece of a RAID, then quiesce first */
if (sdev->channel == 1 &&
mptscsih_quiesce_raid(hd, 1, vtarget->channel, vtarget->id) < 0) {
starget_printk(KERN_ERR, scsi_target(sdev), MYIOC_s_FMT
"Integrated RAID quiesce failed\n", ioc->name);
return;
}
hd->spi_pending |= (1 << sdev->id);
spi_dv_device(sdev);
hd->spi_pending &= ~(1 << sdev->id);
if (sdev->channel == 1 &&
mptscsih_quiesce_raid(hd, 0, vtarget->channel, vtarget->id) < 0)
starget_printk(KERN_ERR, scsi_target(sdev), MYIOC_s_FMT
"Integrated RAID resume failed\n", ioc->name);
mptspi_read_parameters(sdev->sdev_target);
spi_display_xfer_agreement(sdev->sdev_target);
mptspi_read_parameters(sdev->sdev_target);
}
static int mptspi_slave_alloc(struct scsi_device *sdev)
{
MPT_SCSI_HOST *hd = shost_priv(sdev->host);
VirtTarget *vtarget;
VirtDevice *vdevice;
struct scsi_target *starget;
MPT_ADAPTER *ioc = hd->ioc;
if (sdev->channel == 1 &&
mptscsih_is_phys_disk(ioc, 0, sdev->id) == 0)
return -ENXIO;
vdevice = kzalloc(sizeof(VirtDevice), GFP_KERNEL);
if (!vdevice) {
printk(MYIOC_s_ERR_FMT "slave_alloc kmalloc(%zd) FAILED!\n",
ioc->name, sizeof(VirtDevice));
return -ENOMEM;
}
vdevice->lun = sdev->lun;
sdev->hostdata = vdevice;
starget = scsi_target(sdev);
vtarget = starget->hostdata;
vdevice->vtarget = vtarget;
vtarget->num_luns++;
if (sdev->channel == 1)
sdev->no_uld_attach = 1;
return 0;
}
static int mptspi_slave_configure(struct scsi_device *sdev)
{
struct _MPT_SCSI_HOST *hd = shost_priv(sdev->host);
VirtTarget *vtarget = scsi_target(sdev)->hostdata;
int ret;
mptspi_initTarget(hd, vtarget, sdev);
ret = mptscsih_slave_configure(sdev);
if (ret)
return ret;
ddvprintk(hd->ioc, printk(MYIOC_s_DEBUG_FMT "id=%d min_period=0x%02x"
" max_offset=0x%02x max_width=%d\n", hd->ioc->name,
sdev->id, spi_min_period(scsi_target(sdev)),
spi_max_offset(scsi_target(sdev)),
spi_max_width(scsi_target(sdev))));
if ((sdev->channel == 1 ||
!(mptspi_is_raid(hd, sdev->id))) &&
!spi_initial_dv(sdev->sdev_target))
mptspi_dv_device(hd, sdev);
return 0;
}
static int
mptspi_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *SCpnt)
{
struct _MPT_SCSI_HOST *hd = shost_priv(shost);
VirtDevice *vdevice = SCpnt->device->hostdata;
MPT_ADAPTER *ioc = hd->ioc;
if (!vdevice || !vdevice->vtarget) {
SCpnt->result = DID_NO_CONNECT << 16;
scsi_done(SCpnt);
return 0;
}
if (SCpnt->device->channel == 1 &&
mptscsih_is_phys_disk(ioc, 0, SCpnt->device->id) == 0) {
SCpnt->result = DID_NO_CONNECT << 16;
scsi_done(SCpnt);
return 0;
}
if (spi_dv_pending(scsi_target(SCpnt->device)))
ddvprintk(ioc, scsi_print_command(SCpnt));
return mptscsih_qcmd(SCpnt);
}
static void mptspi_slave_destroy(struct scsi_device *sdev)
{
struct scsi_target *starget = scsi_target(sdev);
VirtTarget *vtarget = starget->hostdata;
VirtDevice *vdevice = sdev->hostdata;
/* Will this be the last lun on a non-raid device? */
if (vtarget->num_luns == 1 && vdevice->configured_lun) {
struct _CONFIG_PAGE_SCSI_DEVICE_1 pg1;
/* Async Narrow */
pg1.RequestedParameters = 0;
pg1.Reserved = 0;
pg1.Configuration = 0;
mptspi_write_spi_device_pg1(starget, &pg1);
}
mptscsih_slave_destroy(sdev);
}
static const struct scsi_host_template mptspi_driver_template = {
.module = THIS_MODULE,
.proc_name = "mptspi",
.show_info = mptscsih_show_info,
.name = "MPT SPI Host",
.info = mptscsih_info,
.queuecommand = mptspi_qcmd,
.target_alloc = mptspi_target_alloc,
.slave_alloc = mptspi_slave_alloc,
.slave_configure = mptspi_slave_configure,
.target_destroy = mptspi_target_destroy,
.slave_destroy = mptspi_slave_destroy,
.change_queue_depth = mptscsih_change_queue_depth,
.eh_abort_handler = mptscsih_abort,
.eh_device_reset_handler = mptscsih_dev_reset,
.eh_bus_reset_handler = mptscsih_bus_reset,
.eh_host_reset_handler = mptscsih_host_reset,
.bios_param = mptscsih_bios_param,
.can_queue = MPT_SCSI_CAN_QUEUE,
.this_id = -1,
.sg_tablesize = MPT_SCSI_SG_DEPTH,
.max_sectors = 8192,
.cmd_per_lun = 7,
.shost_groups = mptscsih_host_attr_groups,
};
static int mptspi_write_spi_device_pg1(struct scsi_target *starget,
struct _CONFIG_PAGE_SCSI_DEVICE_1 *pass_pg1)
{
struct Scsi_Host *shost = dev_to_shost(&starget->dev);
struct _MPT_SCSI_HOST *hd = shost_priv(shost);
struct _MPT_ADAPTER *ioc = hd->ioc;
struct _CONFIG_PAGE_SCSI_DEVICE_1 *pg1;
dma_addr_t pg1_dma;
int size;
struct _x_config_parms cfg;
struct _CONFIG_PAGE_HEADER hdr;
int err = -EBUSY;
u32 nego_parms;
u32 period;
struct scsi_device *sdev;
int i;
/* don't allow updating nego parameters on RAID devices */
if (starget->channel == 0 &&
mptspi_is_raid(hd, starget->id))
return -1;
size = ioc->spi_data.sdp1length * 4;
pg1 = dma_alloc_coherent(&ioc->pcidev->dev, size, &pg1_dma, GFP_KERNEL);
if (pg1 == NULL) {
starget_printk(KERN_ERR, starget, MYIOC_s_FMT
"dma_alloc_coherent for parameters failed\n", ioc->name);
return -EINVAL;
}
memset(&hdr, 0, sizeof(hdr));
hdr.PageVersion = ioc->spi_data.sdp1version;
hdr.PageLength = ioc->spi_data.sdp1length;
hdr.PageNumber = 1;
hdr.PageType = MPI_CONFIG_PAGETYPE_SCSI_DEVICE;
memset(&cfg, 0, sizeof(cfg));
cfg.cfghdr.hdr = &hdr;
cfg.physAddr = pg1_dma;
cfg.action = MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT;
cfg.dir = 1;
cfg.pageAddr = starget->id;
memcpy(pg1, pass_pg1, size);
pg1->Header.PageVersion = hdr.PageVersion;
pg1->Header.PageLength = hdr.PageLength;
pg1->Header.PageNumber = hdr.PageNumber;
pg1->Header.PageType = hdr.PageType;
nego_parms = le32_to_cpu(pg1->RequestedParameters);
period = (nego_parms & MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK) >>
MPI_SCSIDEVPAGE1_RP_SHIFT_MIN_SYNC_PERIOD;
if (period == 8) {
/* Turn on inline data padding for TAPE when running U320 */
for (i = 0 ; i < 16; i++) {
sdev = scsi_device_lookup_by_target(starget, i);
if (sdev && sdev->type == TYPE_TAPE) {
sdev_printk(KERN_DEBUG, sdev, MYIOC_s_FMT
"IDP:ON\n", ioc->name);
nego_parms |= MPI_SCSIDEVPAGE1_RP_IDP;
pg1->RequestedParameters =
cpu_to_le32(nego_parms);
break;
}
}
}
mptspi_print_write_nego(hd, starget, le32_to_cpu(pg1->RequestedParameters));
if (mpt_config(ioc, &cfg)) {
starget_printk(KERN_ERR, starget, MYIOC_s_FMT
"mpt_config failed\n", ioc->name);
goto out_free;
}
err = 0;
out_free:
dma_free_coherent(&ioc->pcidev->dev, size, pg1, pg1_dma);
return err;
}
static void mptspi_write_offset(struct scsi_target *starget, int offset)
{
struct _CONFIG_PAGE_SCSI_DEVICE_1 pg1;
u32 nego;
if (offset < 0)
offset = 0;
if (offset > 255)
offset = 255;
if (spi_offset(starget) == -1)
mptspi_read_parameters(starget);
spi_offset(starget) = offset;
nego = mptspi_getRP(starget);
pg1.RequestedParameters = cpu_to_le32(nego);
pg1.Reserved = 0;
pg1.Configuration = 0;
mptspi_write_spi_device_pg1(starget, &pg1);
}
static void mptspi_write_period(struct scsi_target *starget, int period)
{
struct _CONFIG_PAGE_SCSI_DEVICE_1 pg1;
u32 nego;
if (period < 8)
period = 8;
if (period > 255)
period = 255;
if (spi_period(starget) == -1)
mptspi_read_parameters(starget);
if (period == 8) {
spi_iu(starget) = 1;
spi_dt(starget) = 1;
} else if (period == 9) {
spi_dt(starget) = 1;
}
spi_period(starget) = period;
nego = mptspi_getRP(starget);
pg1.RequestedParameters = cpu_to_le32(nego);
pg1.Reserved = 0;
pg1.Configuration = 0;
mptspi_write_spi_device_pg1(starget, &pg1);
}
static void mptspi_write_dt(struct scsi_target *starget, int dt)
{
struct _CONFIG_PAGE_SCSI_DEVICE_1 pg1;
u32 nego;
if (spi_period(starget) == -1)
mptspi_read_parameters(starget);
if (!dt && spi_period(starget) < 10)
spi_period(starget) = 10;
spi_dt(starget) = dt;
nego = mptspi_getRP(starget);
pg1.RequestedParameters = cpu_to_le32(nego);
pg1.Reserved = 0;
pg1.Configuration = 0;
mptspi_write_spi_device_pg1(starget, &pg1);
}
static void mptspi_write_iu(struct scsi_target *starget, int iu)
{
struct _CONFIG_PAGE_SCSI_DEVICE_1 pg1;
u32 nego;
if (spi_period(starget) == -1)
mptspi_read_parameters(starget);
if (!iu && spi_period(starget) < 9)
spi_period(starget) = 9;
spi_iu(starget) = iu;
nego = mptspi_getRP(starget);
pg1.RequestedParameters = cpu_to_le32(nego);
pg1.Reserved = 0;
pg1.Configuration = 0;
mptspi_write_spi_device_pg1(starget, &pg1);
}
#define MPTSPI_SIMPLE_TRANSPORT_PARM(parm) \
static void mptspi_write_##parm(struct scsi_target *starget, int parm)\
{ \
struct _CONFIG_PAGE_SCSI_DEVICE_1 pg1; \
u32 nego; \
\
spi_##parm(starget) = parm; \
\
nego = mptspi_getRP(starget); \
\
pg1.RequestedParameters = cpu_to_le32(nego); \
pg1.Reserved = 0; \
pg1.Configuration = 0; \
\
mptspi_write_spi_device_pg1(starget, &pg1); \
}
MPTSPI_SIMPLE_TRANSPORT_PARM(rd_strm)
MPTSPI_SIMPLE_TRANSPORT_PARM(wr_flow)
MPTSPI_SIMPLE_TRANSPORT_PARM(rti)
MPTSPI_SIMPLE_TRANSPORT_PARM(hold_mcs)
MPTSPI_SIMPLE_TRANSPORT_PARM(pcomp_en)
static void mptspi_write_qas(struct scsi_target *starget, int qas)
{
struct _CONFIG_PAGE_SCSI_DEVICE_1 pg1;
struct Scsi_Host *shost = dev_to_shost(&starget->dev);
struct _MPT_SCSI_HOST *hd = shost_priv(shost);
VirtTarget *vtarget = starget->hostdata;
u32 nego;
if ((vtarget->negoFlags & MPT_TARGET_NO_NEGO_QAS) ||
hd->ioc->spi_data.noQas)
spi_qas(starget) = 0;
else
spi_qas(starget) = qas;
nego = mptspi_getRP(starget);
pg1.RequestedParameters = cpu_to_le32(nego);
pg1.Reserved = 0;
pg1.Configuration = 0;
mptspi_write_spi_device_pg1(starget, &pg1);
}
static void mptspi_write_width(struct scsi_target *starget, int width)
{
struct _CONFIG_PAGE_SCSI_DEVICE_1 pg1;
u32 nego;
if (!width) {
spi_dt(starget) = 0;
if (spi_period(starget) < 10)
spi_period(starget) = 10;
}
spi_width(starget) = width;
nego = mptspi_getRP(starget);
pg1.RequestedParameters = cpu_to_le32(nego);
pg1.Reserved = 0;
pg1.Configuration = 0;
mptspi_write_spi_device_pg1(starget, &pg1);
}
struct work_queue_wrapper {
struct work_struct work;
struct _MPT_SCSI_HOST *hd;
int disk;
};
static void mpt_work_wrapper(struct work_struct *work)
{
struct work_queue_wrapper *wqw =
container_of(work, struct work_queue_wrapper, work);
struct _MPT_SCSI_HOST *hd = wqw->hd;
MPT_ADAPTER *ioc = hd->ioc;
struct Scsi_Host *shost = ioc->sh;
struct scsi_device *sdev;
int disk = wqw->disk;
struct _CONFIG_PAGE_IOC_3 *pg3;
kfree(wqw);
mpt_findImVolumes(ioc);
pg3 = ioc->raid_data.pIocPg3;
if (!pg3)
return;
shost_for_each_device(sdev,shost) {
struct scsi_target *starget = scsi_target(sdev);
VirtTarget *vtarget = starget->hostdata;
/* only want to search RAID components */
if (sdev->channel != 1)
continue;
/* The id is the raid PhysDiskNum, even if
* starget->id is the actual target address */
if(vtarget->id != disk)
continue;
starget_printk(KERN_INFO, vtarget->starget, MYIOC_s_FMT
"Integrated RAID requests DV of new device\n", ioc->name);
mptspi_dv_device(hd, sdev);
}
shost_printk(KERN_INFO, shost, MYIOC_s_FMT
"Integrated RAID detects new device %d\n", ioc->name, disk);
scsi_scan_target(&ioc->sh->shost_gendev, 1, disk, 0, SCSI_SCAN_RESCAN);
}
static void mpt_dv_raid(struct _MPT_SCSI_HOST *hd, int disk)
{
struct work_queue_wrapper *wqw = kmalloc(sizeof(*wqw), GFP_ATOMIC);
MPT_ADAPTER *ioc = hd->ioc;
if (!wqw) {
shost_printk(KERN_ERR, ioc->sh, MYIOC_s_FMT
"Failed to act on RAID event for physical disk %d\n",
ioc->name, disk);
return;
}
INIT_WORK(&wqw->work, mpt_work_wrapper);
wqw->hd = hd;
wqw->disk = disk;
schedule_work(&wqw->work);
}
static int
mptspi_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
{
u8 event = le32_to_cpu(pEvReply->Event) & 0xFF;
struct _MPT_SCSI_HOST *hd = shost_priv(ioc->sh);
if (ioc->bus_type != SPI)
return 0;
if (hd && event == MPI_EVENT_INTEGRATED_RAID) {
int reason
= (le32_to_cpu(pEvReply->Data[0]) & 0x00FF0000) >> 16;
if (reason == MPI_EVENT_RAID_RC_DOMAIN_VAL_NEEDED) {
int disk = (le32_to_cpu(pEvReply->Data[0]) & 0xFF000000) >> 24;
mpt_dv_raid(hd, disk);
}
}
return mptscsih_event_process(ioc, pEvReply);
}
static int
mptspi_deny_binding(struct scsi_target *starget)
{
struct _MPT_SCSI_HOST *hd =
(struct _MPT_SCSI_HOST *)dev_to_shost(starget->dev.parent)->hostdata;
return ((mptspi_is_raid(hd, starget->id)) &&
starget->channel == 0) ? 1 : 0;
}
static struct spi_function_template mptspi_transport_functions = {
.get_offset = mptspi_read_parameters,
.set_offset = mptspi_write_offset,
.show_offset = 1,
.get_period = mptspi_read_parameters,
.set_period = mptspi_write_period,
.show_period = 1,
.get_width = mptspi_read_parameters,
.set_width = mptspi_write_width,
.show_width = 1,
.get_iu = mptspi_read_parameters,
.set_iu = mptspi_write_iu,
.show_iu = 1,
.get_dt = mptspi_read_parameters,
.set_dt = mptspi_write_dt,
.show_dt = 1,
.get_qas = mptspi_read_parameters,
.set_qas = mptspi_write_qas,
.show_qas = 1,
.get_wr_flow = mptspi_read_parameters,
.set_wr_flow = mptspi_write_wr_flow,
.show_wr_flow = 1,
.get_rd_strm = mptspi_read_parameters,
.set_rd_strm = mptspi_write_rd_strm,
.show_rd_strm = 1,
.get_rti = mptspi_read_parameters,
.set_rti = mptspi_write_rti,
.show_rti = 1,
.get_pcomp_en = mptspi_read_parameters,
.set_pcomp_en = mptspi_write_pcomp_en,
.show_pcomp_en = 1,
.get_hold_mcs = mptspi_read_parameters,
.set_hold_mcs = mptspi_write_hold_mcs,
.show_hold_mcs = 1,
.deny_binding = mptspi_deny_binding,
};
/****************************************************************************
* Supported hardware
*/
static struct pci_device_id mptspi_pci_table[] = {
{ PCI_VENDOR_ID_LSI_LOGIC, MPI_MANUFACTPAGE_DEVID_53C1030,
PCI_ANY_ID, PCI_ANY_ID },
{ PCI_VENDOR_ID_ATTO, MPI_MANUFACTPAGE_DEVID_53C1030,
PCI_ANY_ID, PCI_ANY_ID },
{ PCI_VENDOR_ID_LSI_LOGIC, MPI_MANUFACTPAGE_DEVID_53C1035,
PCI_ANY_ID, PCI_ANY_ID },
{0} /* Terminating entry */
};
MODULE_DEVICE_TABLE(pci, mptspi_pci_table);
/*
* renegotiate for a given target
*/
static void
mptspi_dv_renegotiate_work(struct work_struct *work)
{
struct work_queue_wrapper *wqw =
container_of(work, struct work_queue_wrapper, work);
struct _MPT_SCSI_HOST *hd = wqw->hd;
struct scsi_device *sdev;
struct scsi_target *starget;
struct _CONFIG_PAGE_SCSI_DEVICE_1 pg1;
u32 nego;
MPT_ADAPTER *ioc = hd->ioc;
kfree(wqw);
if (hd->spi_pending) {
shost_for_each_device(sdev, ioc->sh) {
if (hd->spi_pending & (1 << sdev->id))
continue;
starget = scsi_target(sdev);
nego = mptspi_getRP(starget);
pg1.RequestedParameters = cpu_to_le32(nego);
pg1.Reserved = 0;
pg1.Configuration = 0;
mptspi_write_spi_device_pg1(starget, &pg1);
}
} else {
shost_for_each_device(sdev, ioc->sh)
mptspi_dv_device(hd, sdev);
}
}
static void
mptspi_dv_renegotiate(struct _MPT_SCSI_HOST *hd)
{
struct work_queue_wrapper *wqw = kmalloc(sizeof(*wqw), GFP_ATOMIC);
if (!wqw)
return;
INIT_WORK(&wqw->work, mptspi_dv_renegotiate_work);
wqw->hd = hd;
schedule_work(&wqw->work);
}
/*
* spi module reset handler
*/
static int
mptspi_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
{
int rc;
rc = mptscsih_ioc_reset(ioc, reset_phase);
if ((ioc->bus_type != SPI) || (!rc))
return rc;
/* only try to do a renegotiation if we're properly set up
* if we get an ioc fault on bringup, ioc->sh will be NULL */
if (reset_phase == MPT_IOC_POST_RESET &&
ioc->sh) {
struct _MPT_SCSI_HOST *hd = shost_priv(ioc->sh);
mptspi_dv_renegotiate(hd);
}
return rc;
}
#ifdef CONFIG_PM
/*
* spi module resume handler
*/
static int
mptspi_resume(struct pci_dev *pdev)
{
MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
struct _MPT_SCSI_HOST *hd = shost_priv(ioc->sh);
int rc;
rc = mptscsih_resume(pdev);
mptspi_dv_renegotiate(hd);
return rc;
}
#endif
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
* mptspi_probe - Installs scsi devices per bus.
* @pdev: Pointer to pci_dev structure
*
* Returns 0 for success, non-zero for failure.
*
*/
static int
mptspi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct Scsi_Host *sh;
MPT_SCSI_HOST *hd;
MPT_ADAPTER *ioc;
unsigned long flags;
int ii;
int numSGE = 0;
int scale;
int ioc_cap;
int error=0;
int r;
if ((r = mpt_attach(pdev,id)) != 0)
return r;
ioc = pci_get_drvdata(pdev);
ioc->DoneCtx = mptspiDoneCtx;
ioc->TaskCtx = mptspiTaskCtx;
ioc->InternalCtx = mptspiInternalCtx;
/* Added sanity check on readiness of the MPT adapter.
*/
if (ioc->last_state != MPI_IOC_STATE_OPERATIONAL) {
printk(MYIOC_s_WARN_FMT
"Skipping because it's not operational!\n",
ioc->name);
error = -ENODEV;
goto out_mptspi_probe;
}
if (!ioc->active) {
printk(MYIOC_s_WARN_FMT "Skipping because it's disabled!\n",
ioc->name);
error = -ENODEV;
goto out_mptspi_probe;
}
/* Sanity check - ensure at least 1 port is INITIATOR capable
*/
ioc_cap = 0;
for (ii=0; ii < ioc->facts.NumberOfPorts; ii++) {
if (ioc->pfacts[ii].ProtocolFlags &
MPI_PORTFACTS_PROTOCOL_INITIATOR)
ioc_cap ++;
}
if (!ioc_cap) {
printk(MYIOC_s_WARN_FMT
"Skipping ioc=%p because SCSI Initiator mode is NOT enabled!\n",
ioc->name, ioc);
return 0;
}
sh = scsi_host_alloc(&mptspi_driver_template, sizeof(MPT_SCSI_HOST));
if (!sh) {
printk(MYIOC_s_WARN_FMT
"Unable to register controller with SCSI subsystem\n",
ioc->name);
error = -1;
goto out_mptspi_probe;
}
/* VMWare emulation doesn't properly implement WRITE_SAME
*/
if (pdev->subsystem_vendor == 0x15AD)
sh->no_write_same = 1;
spin_lock_irqsave(&ioc->FreeQlock, flags);
/* Attach the SCSI Host to the IOC structure
*/
ioc->sh = sh;
sh->io_port = 0;
sh->n_io_port = 0;
sh->irq = 0;
/* set 16 byte cdb's */
sh->max_cmd_len = 16;
/* Yikes! This is important!
* Otherwise, by default, linux
* only scans target IDs 0-7!
* pfactsN->MaxDevices unreliable
* (not supported in early
* versions of the FW).
* max_id = 1 + actual max id,
* max_lun = 1 + actual last lun,
* see hosts.h :o(
*/
sh->max_id = ioc->devices_per_bus;
sh->max_lun = MPT_LAST_LUN + 1;
/*
* If RAID Firmware Detected, setup virtual channel
*/
if (ioc->ir_firmware)
sh->max_channel = 1;
else
sh->max_channel = 0;
sh->this_id = ioc->pfacts[0].PortSCSIID;
/* Required entry.
*/
sh->unique_id = ioc->id;
/* Verify that we won't exceed the maximum
* number of chain buffers
* We can optimize: ZZ = req_sz/sizeof(SGE)
* For 32bit SGE's:
* numSGE = 1 + (ZZ-1)*(maxChain -1) + ZZ
* + (req_sz - 64)/sizeof(SGE)
* A slightly different algorithm is required for
* 64bit SGEs.
*/
scale = ioc->req_sz/ioc->SGE_size;
if (ioc->sg_addr_size == sizeof(u64)) {
numSGE = (scale - 1) *
(ioc->facts.MaxChainDepth-1) + scale +
(ioc->req_sz - 60) / ioc->SGE_size;
} else {
numSGE = 1 + (scale - 1) *
(ioc->facts.MaxChainDepth-1) + scale +
(ioc->req_sz - 64) / ioc->SGE_size;
}
if (numSGE < sh->sg_tablesize) {
/* Reset this value */
dprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"Resetting sg_tablesize to %d from %d\n",
ioc->name, numSGE, sh->sg_tablesize));
sh->sg_tablesize = numSGE;
}
spin_unlock_irqrestore(&ioc->FreeQlock, flags);
hd = shost_priv(sh);
hd->ioc = ioc;
/* SCSI needs scsi_cmnd lookup table!
* (with size equal to req_depth*PtrSz!)
*/
ioc->ScsiLookup = kcalloc(ioc->req_depth, sizeof(void *), GFP_KERNEL);
if (!ioc->ScsiLookup) {
error = -ENOMEM;
goto out_mptspi_probe;
}
spin_lock_init(&ioc->scsi_lookup_lock);
dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "ScsiLookup @ %p\n",
ioc->name, ioc->ScsiLookup));
ioc->spi_data.Saf_Te = mpt_saf_te;
ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"saf_te %x\n",
ioc->name,
mpt_saf_te));
ioc->spi_data.noQas = 0;
hd->last_queue_full = 0;
hd->spi_pending = 0;
/* Some versions of the firmware don't support page 0; without
* that we can't get the parameters */
if (ioc->spi_data.sdp0length != 0)
sh->transportt = mptspi_transport_template;
error = scsi_add_host (sh, &ioc->pcidev->dev);
if(error) {
dprintk(ioc, printk(MYIOC_s_ERR_FMT
"scsi_add_host failed\n", ioc->name));
goto out_mptspi_probe;
}
/*
* issue internal bus reset
*/
if (ioc->spi_data.bus_reset)
mptscsih_IssueTaskMgmt(hd,
MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS,
0, 0, 0, 0, 5);
scsi_scan_host(sh);
return 0;
out_mptspi_probe:
mptscsih_remove(pdev);
return error;
}
static void mptspi_remove(struct pci_dev *pdev)
{
MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
scsi_remove_host(ioc->sh);
mptscsih_remove(pdev);
}
static struct pci_driver mptspi_driver = {
.name = "mptspi",
.id_table = mptspi_pci_table,
.probe = mptspi_probe,
.remove = mptspi_remove,
.shutdown = mptscsih_shutdown,
#ifdef CONFIG_PM
.suspend = mptscsih_suspend,
.resume = mptspi_resume,
#endif
};
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mptspi_init - Register MPT adapter(s) as SCSI host(s) with SCSI mid-layer.
*
* Returns 0 for success, non-zero for failure.
*/
static int __init
mptspi_init(void)
{
int error;
show_mptmod_ver(my_NAME, my_VERSION);
mptspi_transport_template = spi_attach_transport(&mptspi_transport_functions);
if (!mptspi_transport_template)
return -ENODEV;
mptspiDoneCtx = mpt_register(mptscsih_io_done, MPTSPI_DRIVER,
"mptscsih_io_done");
mptspiTaskCtx = mpt_register(mptscsih_taskmgmt_complete, MPTSPI_DRIVER,
"mptscsih_taskmgmt_complete");
mptspiInternalCtx = mpt_register(mptscsih_scandv_complete,
MPTSPI_DRIVER, "mptscsih_scandv_complete");
mpt_event_register(mptspiDoneCtx, mptspi_event_process);
mpt_reset_register(mptspiDoneCtx, mptspi_ioc_reset);
error = pci_register_driver(&mptspi_driver);
if (error)
spi_release_transport(mptspi_transport_template);
return error;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mptspi_exit - Unregisters MPT adapter(s)
*/
static void __exit
mptspi_exit(void)
{
pci_unregister_driver(&mptspi_driver);
mpt_reset_deregister(mptspiDoneCtx);
mpt_event_deregister(mptspiDoneCtx);
mpt_deregister(mptspiInternalCtx);
mpt_deregister(mptspiTaskCtx);
mpt_deregister(mptspiDoneCtx);
spi_release_transport(mptspi_transport_template);
}
module_init(mptspi_init);
module_exit(mptspi_exit);
| linux-master | drivers/message/fusion/mptspi.c |
/*
* linux/drivers/message/fusion/mptsas.c
* For use with LSI PCI chip/adapter(s)
* running LSI Fusion MPT (Message Passing Technology) firmware.
*
* Copyright (c) 1999-2008 LSI Corporation
* (mailto:[email protected])
*/
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
NO WARRANTY
THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
solely responsible for determining the appropriateness of using and
distributing the Program and assumes all risks associated with its
exercise of rights under this Agreement, including but not limited to
the risks and costs of program errors, damage to or loss of data,
programs or equipment, and unavailability or interruption of operations.
DISCLAIMER OF LIABILITY
NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/jiffies.h>
#include <linux/workqueue.h>
#include <linux/delay.h> /* for mdelay */
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_transport_sas.h>
#include <scsi/scsi_transport.h>
#include <scsi/scsi_dbg.h>
#include "mptbase.h"
#include "mptscsih.h"
#include "mptsas.h"
#define my_NAME "Fusion MPT SAS Host driver"
#define my_VERSION MPT_LINUX_VERSION_COMMON
#define MYNAM "mptsas"
/*
* Reserved channel for integrated raid
*/
#define MPTSAS_RAID_CHANNEL 1
#define SAS_CONFIG_PAGE_TIMEOUT 30
MODULE_AUTHOR(MODULEAUTHOR);
MODULE_DESCRIPTION(my_NAME);
MODULE_LICENSE("GPL");
MODULE_VERSION(my_VERSION);
static int mpt_pt_clear;
module_param(mpt_pt_clear, int, 0);
MODULE_PARM_DESC(mpt_pt_clear,
" Clear persistency table: enable=1 "
"(default=MPTSCSIH_PT_CLEAR=0)");
/* scsi-mid layer global parameter is max_report_luns, which is 511 */
#define MPTSAS_MAX_LUN (16895)
static int max_lun = MPTSAS_MAX_LUN;
module_param(max_lun, int, 0);
MODULE_PARM_DESC(max_lun, " max lun, default=16895 ");
static int mpt_loadtime_max_sectors = 8192;
module_param(mpt_loadtime_max_sectors, int, 0);
MODULE_PARM_DESC(mpt_loadtime_max_sectors,
" Maximum sector define for Host Bus Adaptor.Range 64 to 8192 default=8192");
static u8 mptsasDoneCtx = MPT_MAX_PROTOCOL_DRIVERS;
static u8 mptsasTaskCtx = MPT_MAX_PROTOCOL_DRIVERS;
static u8 mptsasInternalCtx = MPT_MAX_PROTOCOL_DRIVERS; /* Used only for internal commands */
static u8 mptsasMgmtCtx = MPT_MAX_PROTOCOL_DRIVERS;
static u8 mptsasDeviceResetCtx = MPT_MAX_PROTOCOL_DRIVERS;
static void mptsas_firmware_event_work(struct work_struct *work);
static void mptsas_send_sas_event(struct fw_event_work *fw_event);
static void mptsas_send_raid_event(struct fw_event_work *fw_event);
static void mptsas_send_ir2_event(struct fw_event_work *fw_event);
static void mptsas_parse_device_info(struct sas_identify *identify,
struct mptsas_devinfo *device_info);
static inline void mptsas_set_rphy(MPT_ADAPTER *ioc,
struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy);
static struct mptsas_phyinfo *mptsas_find_phyinfo_by_sas_address
(MPT_ADAPTER *ioc, u64 sas_address);
static int mptsas_sas_device_pg0(MPT_ADAPTER *ioc,
struct mptsas_devinfo *device_info, u32 form, u32 form_specific);
static int mptsas_sas_enclosure_pg0(MPT_ADAPTER *ioc,
struct mptsas_enclosure *enclosure, u32 form, u32 form_specific);
static int mptsas_add_end_device(MPT_ADAPTER *ioc,
struct mptsas_phyinfo *phy_info);
static void mptsas_del_end_device(MPT_ADAPTER *ioc,
struct mptsas_phyinfo *phy_info);
static void mptsas_send_link_status_event(struct fw_event_work *fw_event);
static struct mptsas_portinfo *mptsas_find_portinfo_by_sas_address
(MPT_ADAPTER *ioc, u64 sas_address);
static void mptsas_expander_delete(MPT_ADAPTER *ioc,
struct mptsas_portinfo *port_info, u8 force);
static void mptsas_send_expander_event(struct fw_event_work *fw_event);
static void mptsas_not_responding_devices(MPT_ADAPTER *ioc);
static void mptsas_scan_sas_topology(MPT_ADAPTER *ioc);
static void mptsas_broadcast_primitive_work(struct fw_event_work *fw_event);
static void mptsas_handle_queue_full_event(struct fw_event_work *fw_event);
static void mptsas_volume_delete(MPT_ADAPTER *ioc, u8 id);
void mptsas_schedule_target_reset(void *ioc);
static void mptsas_print_phy_data(MPT_ADAPTER *ioc,
MPI_SAS_IO_UNIT0_PHY_DATA *phy_data)
{
dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"---- IO UNIT PAGE 0 ------------\n", ioc->name));
dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Handle=0x%X\n",
ioc->name, le16_to_cpu(phy_data->AttachedDeviceHandle)));
dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Controller Handle=0x%X\n",
ioc->name, le16_to_cpu(phy_data->ControllerDevHandle)));
dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Port=0x%X\n",
ioc->name, phy_data->Port));
dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Port Flags=0x%X\n",
ioc->name, phy_data->PortFlags));
dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "PHY Flags=0x%X\n",
ioc->name, phy_data->PhyFlags));
dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Negotiated Link Rate=0x%X\n",
ioc->name, phy_data->NegotiatedLinkRate));
dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"Controller PHY Device Info=0x%X\n", ioc->name,
le32_to_cpu(phy_data->ControllerPhyDeviceInfo)));
dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "DiscoveryStatus=0x%X\n\n",
ioc->name, le32_to_cpu(phy_data->DiscoveryStatus)));
}
static void mptsas_print_phy_pg0(MPT_ADAPTER *ioc, SasPhyPage0_t *pg0)
{
__le64 sas_address;
memcpy(&sas_address, &pg0->SASAddress, sizeof(__le64));
dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"---- SAS PHY PAGE 0 ------------\n", ioc->name));
dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"Attached Device Handle=0x%X\n", ioc->name,
le16_to_cpu(pg0->AttachedDevHandle)));
dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "SAS Address=0x%llX\n",
ioc->name, (unsigned long long)le64_to_cpu(sas_address)));
dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"Attached PHY Identifier=0x%X\n", ioc->name,
pg0->AttachedPhyIdentifier));
dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Attached Device Info=0x%X\n",
ioc->name, le32_to_cpu(pg0->AttachedDeviceInfo)));
dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Programmed Link Rate=0x%X\n",
ioc->name, pg0->ProgrammedLinkRate));
dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Change Count=0x%X\n",
ioc->name, pg0->ChangeCount));
dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "PHY Info=0x%X\n\n",
ioc->name, le32_to_cpu(pg0->PhyInfo)));
}
static void mptsas_print_phy_pg1(MPT_ADAPTER *ioc, SasPhyPage1_t *pg1)
{
dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"---- SAS PHY PAGE 1 ------------\n", ioc->name));
dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Invalid Dword Count=0x%x\n",
ioc->name, pg1->InvalidDwordCount));
dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"Running Disparity Error Count=0x%x\n", ioc->name,
pg1->RunningDisparityErrorCount));
dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"Loss Dword Synch Count=0x%x\n", ioc->name,
pg1->LossDwordSynchCount));
dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"PHY Reset Problem Count=0x%x\n\n", ioc->name,
pg1->PhyResetProblemCount));
}
static void mptsas_print_device_pg0(MPT_ADAPTER *ioc, SasDevicePage0_t *pg0)
{
__le64 sas_address;
memcpy(&sas_address, &pg0->SASAddress, sizeof(__le64));
dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"---- SAS DEVICE PAGE 0 ---------\n", ioc->name));
dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Handle=0x%X\n",
ioc->name, le16_to_cpu(pg0->DevHandle)));
dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Parent Handle=0x%X\n",
ioc->name, le16_to_cpu(pg0->ParentDevHandle)));
dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Enclosure Handle=0x%X\n",
ioc->name, le16_to_cpu(pg0->EnclosureHandle)));
dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Slot=0x%X\n",
ioc->name, le16_to_cpu(pg0->Slot)));
dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "SAS Address=0x%llX\n",
ioc->name, (unsigned long long)le64_to_cpu(sas_address)));
dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Target ID=0x%X\n",
ioc->name, pg0->TargetID));
dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Bus=0x%X\n",
ioc->name, pg0->Bus));
dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Parent Phy Num=0x%X\n",
ioc->name, pg0->PhyNum));
dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Access Status=0x%X\n",
ioc->name, le16_to_cpu(pg0->AccessStatus)));
dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Device Info=0x%X\n",
ioc->name, le32_to_cpu(pg0->DeviceInfo)));
dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Flags=0x%X\n",
ioc->name, le16_to_cpu(pg0->Flags)));
dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Physical Port=0x%X\n\n",
ioc->name, pg0->PhysicalPort));
}
static void mptsas_print_expander_pg1(MPT_ADAPTER *ioc, SasExpanderPage1_t *pg1)
{
dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"---- SAS EXPANDER PAGE 1 ------------\n", ioc->name));
dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Physical Port=0x%X\n",
ioc->name, pg1->PhysicalPort));
dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "PHY Identifier=0x%X\n",
ioc->name, pg1->PhyIdentifier));
dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Negotiated Link Rate=0x%X\n",
ioc->name, pg1->NegotiatedLinkRate));
dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Programmed Link Rate=0x%X\n",
ioc->name, pg1->ProgrammedLinkRate));
dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Hardware Link Rate=0x%X\n",
ioc->name, pg1->HwLinkRate));
dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Owner Device Handle=0x%X\n",
ioc->name, le16_to_cpu(pg1->OwnerDevHandle)));
dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"Attached Device Handle=0x%X\n\n", ioc->name,
le16_to_cpu(pg1->AttachedDevHandle)));
}
/* inhibit sas firmware event handling */
static void
mptsas_fw_event_off(MPT_ADAPTER *ioc)
{
unsigned long flags;
spin_lock_irqsave(&ioc->fw_event_lock, flags);
ioc->fw_events_off = 1;
ioc->sas_discovery_quiesce_io = 0;
spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
}
/* enable sas firmware event handling */
static void
mptsas_fw_event_on(MPT_ADAPTER *ioc)
{
unsigned long flags;
spin_lock_irqsave(&ioc->fw_event_lock, flags);
ioc->fw_events_off = 0;
spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
}
/* queue a sas firmware event */
static void
mptsas_add_fw_event(MPT_ADAPTER *ioc, struct fw_event_work *fw_event,
unsigned long delay)
{
unsigned long flags;
spin_lock_irqsave(&ioc->fw_event_lock, flags);
list_add_tail(&fw_event->list, &ioc->fw_event_list);
fw_event->users = 1;
INIT_DELAYED_WORK(&fw_event->work, mptsas_firmware_event_work);
devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: add (fw_event=0x%p)"
"on cpuid %d\n", ioc->name, __func__,
fw_event, smp_processor_id()));
queue_delayed_work_on(smp_processor_id(), ioc->fw_event_q,
&fw_event->work, delay);
spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
}
/* requeue a sas firmware event */
static void
mptsas_requeue_fw_event(MPT_ADAPTER *ioc, struct fw_event_work *fw_event,
unsigned long delay)
{
unsigned long flags;
spin_lock_irqsave(&ioc->fw_event_lock, flags);
devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: reschedule task "
"(fw_event=0x%p)on cpuid %d\n", ioc->name, __func__,
fw_event, smp_processor_id()));
fw_event->retries++;
queue_delayed_work_on(smp_processor_id(), ioc->fw_event_q,
&fw_event->work, msecs_to_jiffies(delay));
spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
}
static void __mptsas_free_fw_event(MPT_ADAPTER *ioc,
struct fw_event_work *fw_event)
{
devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: kfree (fw_event=0x%p)\n",
ioc->name, __func__, fw_event));
list_del(&fw_event->list);
kfree(fw_event);
}
/* free memory associated to a sas firmware event */
static void
mptsas_free_fw_event(MPT_ADAPTER *ioc, struct fw_event_work *fw_event)
{
unsigned long flags;
spin_lock_irqsave(&ioc->fw_event_lock, flags);
fw_event->users--;
if (!fw_event->users)
__mptsas_free_fw_event(ioc, fw_event);
spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
}
/* walk the firmware event queue, and either stop or wait for
* outstanding events to complete */
static void
mptsas_cleanup_fw_event_q(MPT_ADAPTER *ioc)
{
struct fw_event_work *fw_event;
struct mptsas_target_reset_event *target_reset_list, *n;
MPT_SCSI_HOST *hd = shost_priv(ioc->sh);
unsigned long flags;
/* flush the target_reset_list */
if (!list_empty(&hd->target_reset_list)) {
list_for_each_entry_safe(target_reset_list, n,
&hd->target_reset_list, list) {
dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"%s: removing target reset for id=%d\n",
ioc->name, __func__,
target_reset_list->sas_event_data.TargetID));
list_del(&target_reset_list->list);
kfree(target_reset_list);
}
}
if (list_empty(&ioc->fw_event_list) || !ioc->fw_event_q)
return;
spin_lock_irqsave(&ioc->fw_event_lock, flags);
while (!list_empty(&ioc->fw_event_list)) {
bool canceled = false;
fw_event = list_first_entry(&ioc->fw_event_list,
struct fw_event_work, list);
fw_event->users++;
spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
if (cancel_delayed_work_sync(&fw_event->work))
canceled = true;
spin_lock_irqsave(&ioc->fw_event_lock, flags);
if (canceled)
fw_event->users--;
fw_event->users--;
WARN_ON_ONCE(fw_event->users);
__mptsas_free_fw_event(ioc, fw_event);
}
spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
}
static inline MPT_ADAPTER *phy_to_ioc(struct sas_phy *phy)
{
struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
return ((MPT_SCSI_HOST *)shost->hostdata)->ioc;
}
static inline MPT_ADAPTER *rphy_to_ioc(struct sas_rphy *rphy)
{
struct Scsi_Host *shost = dev_to_shost(rphy->dev.parent->parent);
return ((MPT_SCSI_HOST *)shost->hostdata)->ioc;
}
/*
* mptsas_find_portinfo_by_handle
*
* This function should be called with the sas_topology_mutex already held
*/
static struct mptsas_portinfo *
mptsas_find_portinfo_by_handle(MPT_ADAPTER *ioc, u16 handle)
{
struct mptsas_portinfo *port_info, *rc=NULL;
int i;
list_for_each_entry(port_info, &ioc->sas_topology, list)
for (i = 0; i < port_info->num_phys; i++)
if (port_info->phy_info[i].identify.handle == handle) {
rc = port_info;
goto out;
}
out:
return rc;
}
/**
* mptsas_find_portinfo_by_sas_address - find and return portinfo for
* this sas_address
* @ioc: Pointer to MPT_ADAPTER structure
* @sas_address: expander sas address
*
* This function should be called with the sas_topology_mutex already held.
*
* Return: %NULL if not found.
**/
static struct mptsas_portinfo *
mptsas_find_portinfo_by_sas_address(MPT_ADAPTER *ioc, u64 sas_address)
{
struct mptsas_portinfo *port_info, *rc = NULL;
int i;
if (sas_address >= ioc->hba_port_sas_addr &&
sas_address < (ioc->hba_port_sas_addr +
ioc->hba_port_num_phy))
return ioc->hba_port_info;
mutex_lock(&ioc->sas_topology_mutex);
list_for_each_entry(port_info, &ioc->sas_topology, list)
for (i = 0; i < port_info->num_phys; i++)
if (port_info->phy_info[i].identify.sas_address ==
sas_address) {
rc = port_info;
goto out;
}
out:
mutex_unlock(&ioc->sas_topology_mutex);
return rc;
}
/*
* Returns true if there is a scsi end device
*/
static inline int
mptsas_is_end_device(struct mptsas_devinfo * attached)
{
if ((attached->sas_address) &&
(attached->device_info &
MPI_SAS_DEVICE_INFO_END_DEVICE) &&
((attached->device_info &
MPI_SAS_DEVICE_INFO_SSP_TARGET) |
(attached->device_info &
MPI_SAS_DEVICE_INFO_STP_TARGET) |
(attached->device_info &
MPI_SAS_DEVICE_INFO_SATA_DEVICE)))
return 1;
else
return 0;
}
/* no mutex */
static void
mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
{
struct mptsas_portinfo *port_info;
struct mptsas_phyinfo *phy_info;
u8 i;
if (!port_details)
return;
port_info = port_details->port_info;
phy_info = port_info->phy_info;
dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: [%p]: num_phys=%02d "
"bitmask=0x%016llX\n", ioc->name, __func__, port_details,
port_details->num_phys, (unsigned long long)
port_details->phy_bitmask));
for (i = 0; i < port_info->num_phys; i++, phy_info++) {
if(phy_info->port_details != port_details)
continue;
memset(&phy_info->attached, 0, sizeof(struct mptsas_devinfo));
mptsas_set_rphy(ioc, phy_info, NULL);
phy_info->port_details = NULL;
}
kfree(port_details);
}
static inline struct sas_rphy *
mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
{
if (phy_info->port_details)
return phy_info->port_details->rphy;
else
return NULL;
}
static inline void
mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
{
if (phy_info->port_details) {
phy_info->port_details->rphy = rphy;
dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
ioc->name, rphy));
}
if (rphy) {
dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
&rphy->dev, MYIOC_s_FMT "add:", ioc->name));
dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
ioc->name, rphy, rphy->dev.release));
}
}
static inline struct sas_port *
mptsas_get_port(struct mptsas_phyinfo *phy_info)
{
if (phy_info->port_details)
return phy_info->port_details->port;
else
return NULL;
}
static inline void
mptsas_set_port(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_port *port)
{
if (phy_info->port_details)
phy_info->port_details->port = port;
if (port) {
dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
&port->dev, MYIOC_s_FMT "add:", ioc->name));
dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "port=%p release=%p\n",
ioc->name, port, port->dev.release));
}
}
static inline struct scsi_target *
mptsas_get_starget(struct mptsas_phyinfo *phy_info)
{
if (phy_info->port_details)
return phy_info->port_details->starget;
else
return NULL;
}
static inline void
mptsas_set_starget(struct mptsas_phyinfo *phy_info, struct scsi_target *
starget)
{
if (phy_info->port_details)
phy_info->port_details->starget = starget;
}
/**
* mptsas_add_device_component - adds a new device component to our lists
* @ioc: Pointer to MPT_ADAPTER structure
* @channel: channel number
* @id: Logical Target ID for reset (if appropriate)
* @sas_address: expander sas address
* @device_info: specific bits (flags) for devices
* @slot: enclosure slot ID
* @enclosure_logical_id: enclosure WWN
*
**/
static void
mptsas_add_device_component(MPT_ADAPTER *ioc, u8 channel, u8 id,
u64 sas_address, u32 device_info, u16 slot, u64 enclosure_logical_id)
{
struct mptsas_device_info *sas_info, *next;
struct scsi_device *sdev;
struct scsi_target *starget;
struct sas_rphy *rphy;
/*
* Delete all matching devices out of the list
*/
mutex_lock(&ioc->sas_device_info_mutex);
list_for_each_entry_safe(sas_info, next, &ioc->sas_device_info_list,
list) {
if (!sas_info->is_logical_volume &&
(sas_info->sas_address == sas_address ||
(sas_info->fw.channel == channel &&
sas_info->fw.id == id))) {
list_del(&sas_info->list);
kfree(sas_info);
}
}
sas_info = kzalloc(sizeof(struct mptsas_device_info), GFP_KERNEL);
if (!sas_info)
goto out;
/*
* Set Firmware mapping
*/
sas_info->fw.id = id;
sas_info->fw.channel = channel;
sas_info->sas_address = sas_address;
sas_info->device_info = device_info;
sas_info->slot = slot;
sas_info->enclosure_logical_id = enclosure_logical_id;
INIT_LIST_HEAD(&sas_info->list);
list_add_tail(&sas_info->list, &ioc->sas_device_info_list);
/*
* Set OS mapping
*/
shost_for_each_device(sdev, ioc->sh) {
starget = scsi_target(sdev);
rphy = dev_to_rphy(starget->dev.parent);
if (rphy->identify.sas_address == sas_address) {
sas_info->os.id = starget->id;
sas_info->os.channel = starget->channel;
}
}
out:
mutex_unlock(&ioc->sas_device_info_mutex);
return;
}
/**
* mptsas_add_device_component_by_fw - adds a new device component by FW ID
* @ioc: Pointer to MPT_ADAPTER structure
* @channel: channel number
* @id: Logical Target ID
*
**/
static void
mptsas_add_device_component_by_fw(MPT_ADAPTER *ioc, u8 channel, u8 id)
{
struct mptsas_devinfo sas_device;
struct mptsas_enclosure enclosure_info;
int rc;
rc = mptsas_sas_device_pg0(ioc, &sas_device,
(MPI_SAS_DEVICE_PGAD_FORM_BUS_TARGET_ID <<
MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
(channel << 8) + id);
if (rc)
return;
memset(&enclosure_info, 0, sizeof(struct mptsas_enclosure));
mptsas_sas_enclosure_pg0(ioc, &enclosure_info,
(MPI_SAS_ENCLOS_PGAD_FORM_HANDLE <<
MPI_SAS_ENCLOS_PGAD_FORM_SHIFT),
sas_device.handle_enclosure);
mptsas_add_device_component(ioc, sas_device.channel,
sas_device.id, sas_device.sas_address, sas_device.device_info,
sas_device.slot, enclosure_info.enclosure_logical_id);
}
/**
* mptsas_add_device_component_starget_ir - Handle Integrated RAID, adding each individual device to list
* @ioc: Pointer to MPT_ADAPTER structure
* @starget: SCSI target for this SCSI device
*
**/
static void
mptsas_add_device_component_starget_ir(MPT_ADAPTER *ioc,
struct scsi_target *starget)
{
CONFIGPARMS cfg;
ConfigPageHeader_t hdr;
dma_addr_t dma_handle;
pRaidVolumePage0_t buffer = NULL;
int i;
RaidPhysDiskPage0_t phys_disk;
struct mptsas_device_info *sas_info, *next;
memset(&cfg, 0 , sizeof(CONFIGPARMS));
memset(&hdr, 0 , sizeof(ConfigPageHeader_t));
hdr.PageType = MPI_CONFIG_PAGETYPE_RAID_VOLUME;
/* assumption that all volumes on channel = 0 */
cfg.pageAddr = starget->id;
cfg.cfghdr.hdr = &hdr;
cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
cfg.timeout = SAS_CONFIG_PAGE_TIMEOUT;
if (mpt_config(ioc, &cfg) != 0)
goto out;
if (!hdr.PageLength)
goto out;
buffer = dma_alloc_coherent(&ioc->pcidev->dev, hdr.PageLength * 4,
&dma_handle, GFP_KERNEL);
if (!buffer)
goto out;
cfg.physAddr = dma_handle;
cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
if (mpt_config(ioc, &cfg) != 0)
goto out;
if (!buffer->NumPhysDisks)
goto out;
/*
* Adding entry for hidden components
*/
for (i = 0; i < buffer->NumPhysDisks; i++) {
if (mpt_raid_phys_disk_pg0(ioc,
buffer->PhysDisk[i].PhysDiskNum, &phys_disk) != 0)
continue;
mptsas_add_device_component_by_fw(ioc, phys_disk.PhysDiskBus,
phys_disk.PhysDiskID);
mutex_lock(&ioc->sas_device_info_mutex);
list_for_each_entry(sas_info, &ioc->sas_device_info_list,
list) {
if (!sas_info->is_logical_volume &&
(sas_info->fw.channel == phys_disk.PhysDiskBus &&
sas_info->fw.id == phys_disk.PhysDiskID)) {
sas_info->is_hidden_raid_component = 1;
sas_info->volume_id = starget->id;
}
}
mutex_unlock(&ioc->sas_device_info_mutex);
}
/*
* Delete all matching devices out of the list
*/
mutex_lock(&ioc->sas_device_info_mutex);
list_for_each_entry_safe(sas_info, next, &ioc->sas_device_info_list,
list) {
if (sas_info->is_logical_volume && sas_info->fw.id ==
starget->id) {
list_del(&sas_info->list);
kfree(sas_info);
}
}
sas_info = kzalloc(sizeof(struct mptsas_device_info), GFP_KERNEL);
if (sas_info) {
sas_info->fw.id = starget->id;
sas_info->os.id = starget->id;
sas_info->os.channel = starget->channel;
sas_info->is_logical_volume = 1;
INIT_LIST_HEAD(&sas_info->list);
list_add_tail(&sas_info->list, &ioc->sas_device_info_list);
}
mutex_unlock(&ioc->sas_device_info_mutex);
out:
if (buffer)
dma_free_coherent(&ioc->pcidev->dev, hdr.PageLength * 4,
buffer, dma_handle);
}
/**
* mptsas_add_device_component_starget - adds a SCSI target device component
* @ioc: Pointer to MPT_ADAPTER structure
* @starget: SCSI target for this SCSI device
*
**/
static void
mptsas_add_device_component_starget(MPT_ADAPTER *ioc,
struct scsi_target *starget)
{
struct sas_rphy *rphy;
struct mptsas_phyinfo *phy_info = NULL;
struct mptsas_enclosure enclosure_info;
rphy = dev_to_rphy(starget->dev.parent);
phy_info = mptsas_find_phyinfo_by_sas_address(ioc,
rphy->identify.sas_address);
if (!phy_info)
return;
memset(&enclosure_info, 0, sizeof(struct mptsas_enclosure));
mptsas_sas_enclosure_pg0(ioc, &enclosure_info,
(MPI_SAS_ENCLOS_PGAD_FORM_HANDLE <<
MPI_SAS_ENCLOS_PGAD_FORM_SHIFT),
phy_info->attached.handle_enclosure);
mptsas_add_device_component(ioc, phy_info->attached.channel,
phy_info->attached.id, phy_info->attached.sas_address,
phy_info->attached.device_info,
phy_info->attached.slot, enclosure_info.enclosure_logical_id);
}
/**
* mptsas_del_device_component_by_os - Once a device has been removed, we mark the entry in the list as being cached
* @ioc: Pointer to MPT_ADAPTER structure
* @channel: os mapped id's
* @id: Logical Target ID
*
**/
static void
mptsas_del_device_component_by_os(MPT_ADAPTER *ioc, u8 channel, u8 id)
{
struct mptsas_device_info *sas_info, *next;
/*
* Set is_cached flag
*/
list_for_each_entry_safe(sas_info, next, &ioc->sas_device_info_list,
list) {
if (sas_info->os.channel == channel && sas_info->os.id == id)
sas_info->is_cached = 1;
}
}
/**
* mptsas_del_device_components - Cleaning the list
* @ioc: Pointer to MPT_ADAPTER structure
*
**/
static void
mptsas_del_device_components(MPT_ADAPTER *ioc)
{
struct mptsas_device_info *sas_info, *next;
mutex_lock(&ioc->sas_device_info_mutex);
list_for_each_entry_safe(sas_info, next, &ioc->sas_device_info_list,
list) {
list_del(&sas_info->list);
kfree(sas_info);
}
mutex_unlock(&ioc->sas_device_info_mutex);
}
/*
* mptsas_setup_wide_ports
*
* Updates for new and existing narrow/wide port configuration
* in the sas_topology
*/
static void
mptsas_setup_wide_ports(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info)
{
struct mptsas_portinfo_details * port_details;
struct mptsas_phyinfo *phy_info, *phy_info_cmp;
u64 sas_address;
int i, j;
mutex_lock(&ioc->sas_topology_mutex);
phy_info = port_info->phy_info;
for (i = 0 ; i < port_info->num_phys ; i++, phy_info++) {
if (phy_info->attached.handle)
continue;
port_details = phy_info->port_details;
if (!port_details)
continue;
if (port_details->num_phys < 2)
continue;
/*
* Removing a phy from a port, letting the last
* phy be removed by firmware events.
*/
dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"%s: [%p]: deleting phy = %d\n",
ioc->name, __func__, port_details, i));
port_details->num_phys--;
port_details->phy_bitmask &= ~ (1 << phy_info->phy_id);
memset(&phy_info->attached, 0, sizeof(struct mptsas_devinfo));
if (phy_info->phy) {
devtprintk(ioc, dev_printk(KERN_DEBUG,
&phy_info->phy->dev, MYIOC_s_FMT
"delete phy %d, phy-obj (0x%p)\n", ioc->name,
phy_info->phy_id, phy_info->phy));
sas_port_delete_phy(port_details->port, phy_info->phy);
}
phy_info->port_details = NULL;
}
/*
* Populate and refresh the tree
*/
phy_info = port_info->phy_info;
for (i = 0 ; i < port_info->num_phys ; i++, phy_info++) {
sas_address = phy_info->attached.sas_address;
dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "phy_id=%d sas_address=0x%018llX\n",
ioc->name, i, (unsigned long long)sas_address));
if (!sas_address)
continue;
port_details = phy_info->port_details;
/*
* Forming a port
*/
if (!port_details) {
port_details = kzalloc(sizeof(struct
mptsas_portinfo_details), GFP_KERNEL);
if (!port_details)
goto out;
port_details->num_phys = 1;
port_details->port_info = port_info;
if (phy_info->phy_id < 64 )
port_details->phy_bitmask |=
(1 << phy_info->phy_id);
phy_info->sas_port_add_phy=1;
dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "\t\tForming port\n\t\t"
"phy_id=%d sas_address=0x%018llX\n",
ioc->name, i, (unsigned long long)sas_address));
phy_info->port_details = port_details;
}
if (i == port_info->num_phys - 1)
continue;
phy_info_cmp = &port_info->phy_info[i + 1];
for (j = i + 1 ; j < port_info->num_phys ; j++,
phy_info_cmp++) {
if (!phy_info_cmp->attached.sas_address)
continue;
if (sas_address != phy_info_cmp->attached.sas_address)
continue;
if (phy_info_cmp->port_details == port_details )
continue;
dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"\t\tphy_id=%d sas_address=0x%018llX\n",
ioc->name, j, (unsigned long long)
phy_info_cmp->attached.sas_address));
if (phy_info_cmp->port_details) {
port_details->rphy =
mptsas_get_rphy(phy_info_cmp);
port_details->port =
mptsas_get_port(phy_info_cmp);
port_details->starget =
mptsas_get_starget(phy_info_cmp);
port_details->num_phys =
phy_info_cmp->port_details->num_phys;
if (!phy_info_cmp->port_details->num_phys)
kfree(phy_info_cmp->port_details);
} else
phy_info_cmp->sas_port_add_phy=1;
/*
* Adding a phy to a port
*/
phy_info_cmp->port_details = port_details;
if (phy_info_cmp->phy_id < 64 )
port_details->phy_bitmask |=
(1 << phy_info_cmp->phy_id);
port_details->num_phys++;
}
}
out:
for (i = 0; i < port_info->num_phys; i++) {
port_details = port_info->phy_info[i].port_details;
if (!port_details)
continue;
dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"%s: [%p]: phy_id=%02d num_phys=%02d "
"bitmask=0x%016llX\n", ioc->name, __func__,
port_details, i, port_details->num_phys,
(unsigned long long)port_details->phy_bitmask));
dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "\t\tport = %p rphy=%p\n",
ioc->name, port_details->port, port_details->rphy));
}
dsaswideprintk(ioc, printk("\n"));
mutex_unlock(&ioc->sas_topology_mutex);
}
/**
* mptsas_find_vtarget - find a virtual target device (FC LUN device or
* SCSI target device)
*
* @ioc: Pointer to MPT_ADAPTER structure
* @channel: channel number
* @id: Logical Target ID
*
**/
static VirtTarget *
mptsas_find_vtarget(MPT_ADAPTER *ioc, u8 channel, u8 id)
{
struct scsi_device *sdev;
VirtDevice *vdevice;
VirtTarget *vtarget = NULL;
shost_for_each_device(sdev, ioc->sh) {
vdevice = sdev->hostdata;
if ((vdevice == NULL) ||
(vdevice->vtarget == NULL))
continue;
if ((vdevice->vtarget->tflags &
MPT_TARGET_FLAGS_RAID_COMPONENT ||
vdevice->vtarget->raidVolume))
continue;
if (vdevice->vtarget->id == id &&
vdevice->vtarget->channel == channel)
vtarget = vdevice->vtarget;
}
return vtarget;
}
static void
mptsas_queue_device_delete(MPT_ADAPTER *ioc,
MpiEventDataSasDeviceStatusChange_t *sas_event_data)
{
struct fw_event_work *fw_event;
fw_event = kzalloc(sizeof(*fw_event) +
sizeof(MpiEventDataSasDeviceStatusChange_t),
GFP_ATOMIC);
if (!fw_event) {
printk(MYIOC_s_WARN_FMT "%s: failed at (line=%d)\n",
ioc->name, __func__, __LINE__);
return;
}
memcpy(fw_event->event_data, sas_event_data,
sizeof(MpiEventDataSasDeviceStatusChange_t));
fw_event->event = MPI_EVENT_SAS_DEVICE_STATUS_CHANGE;
fw_event->ioc = ioc;
mptsas_add_fw_event(ioc, fw_event, msecs_to_jiffies(1));
}
static void
mptsas_queue_rescan(MPT_ADAPTER *ioc)
{
struct fw_event_work *fw_event;
fw_event = kzalloc(sizeof(*fw_event), GFP_ATOMIC);
if (!fw_event) {
printk(MYIOC_s_WARN_FMT "%s: failed at (line=%d)\n",
ioc->name, __func__, __LINE__);
return;
}
fw_event->event = -1;
fw_event->ioc = ioc;
mptsas_add_fw_event(ioc, fw_event, msecs_to_jiffies(1));
}
/**
* mptsas_target_reset - Issues TARGET_RESET to end device using
* handshaking method
*
* @ioc: Pointer to MPT_ADAPTER structure
* @channel: channel number
* @id: Logical Target ID for reset
*
* Return: (1) success
* (0) failure
*
**/
static int
mptsas_target_reset(MPT_ADAPTER *ioc, u8 channel, u8 id)
{
MPT_FRAME_HDR *mf;
SCSITaskMgmt_t *pScsiTm;
if (mpt_set_taskmgmt_in_progress_flag(ioc) != 0)
return 0;
mf = mpt_get_msg_frame(mptsasDeviceResetCtx, ioc);
if (mf == NULL) {
dfailprintk(ioc, printk(MYIOC_s_WARN_FMT
"%s, no msg frames @%d!!\n", ioc->name,
__func__, __LINE__));
goto out_fail;
}
dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TaskMgmt request (mf=%p)\n",
ioc->name, mf));
/* Format the Request
*/
pScsiTm = (SCSITaskMgmt_t *) mf;
memset (pScsiTm, 0, sizeof(SCSITaskMgmt_t));
pScsiTm->TargetID = id;
pScsiTm->Bus = channel;
pScsiTm->Function = MPI_FUNCTION_SCSI_TASK_MGMT;
pScsiTm->TaskType = MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
pScsiTm->MsgFlags = MPI_SCSITASKMGMT_MSGFLAGS_LIPRESET_RESET_OPTION;
DBG_DUMP_TM_REQUEST_FRAME(ioc, (u32 *)mf);
dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"TaskMgmt type=%d (sas device delete) fw_channel = %d fw_id = %d)\n",
ioc->name, MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET, channel, id));
mpt_put_msg_frame_hi_pri(mptsasDeviceResetCtx, ioc, mf);
return 1;
out_fail:
mpt_clear_taskmgmt_in_progress_flag(ioc);
return 0;
}
static void
mptsas_block_io_sdev(struct scsi_device *sdev, void *data)
{
scsi_device_set_state(sdev, SDEV_BLOCK);
}
static void
mptsas_block_io_starget(struct scsi_target *starget)
{
if (starget)
starget_for_each_device(starget, NULL, mptsas_block_io_sdev);
}
/**
* mptsas_target_reset_queue - queue a target reset
*
* @ioc: Pointer to MPT_ADAPTER structure
* @sas_event_data: SAS Device Status Change Event data
*
* Receive request for TARGET_RESET after receiving a firmware
* event NOT_RESPONDING_EVENT, then put command in link list
* and queue if task_queue already in use.
*
**/
static void
mptsas_target_reset_queue(MPT_ADAPTER *ioc,
EVENT_DATA_SAS_DEVICE_STATUS_CHANGE *sas_event_data)
{
MPT_SCSI_HOST *hd = shost_priv(ioc->sh);
VirtTarget *vtarget = NULL;
struct mptsas_target_reset_event *target_reset_list;
u8 id, channel;
id = sas_event_data->TargetID;
channel = sas_event_data->Bus;
vtarget = mptsas_find_vtarget(ioc, channel, id);
if (vtarget) {
mptsas_block_io_starget(vtarget->starget);
vtarget->deleted = 1; /* block IO */
}
target_reset_list = kzalloc(sizeof(struct mptsas_target_reset_event),
GFP_ATOMIC);
if (!target_reset_list) {
dfailprintk(ioc, printk(MYIOC_s_WARN_FMT
"%s, failed to allocate mem @%d..!!\n",
ioc->name, __func__, __LINE__));
return;
}
memcpy(&target_reset_list->sas_event_data, sas_event_data,
sizeof(*sas_event_data));
list_add_tail(&target_reset_list->list, &hd->target_reset_list);
target_reset_list->time_count = jiffies;
if (mptsas_target_reset(ioc, channel, id)) {
target_reset_list->target_reset_issued = 1;
}
}
/**
* mptsas_schedule_target_reset- send pending target reset
* @iocp: per adapter object
*
* This function will delete scheduled target reset from the list and
* try to send next target reset. This will be called from completion
* context of any Task management command.
*/
void
mptsas_schedule_target_reset(void *iocp)
{
MPT_ADAPTER *ioc = (MPT_ADAPTER *)(iocp);
MPT_SCSI_HOST *hd = shost_priv(ioc->sh);
struct list_head *head = &hd->target_reset_list;
struct mptsas_target_reset_event *target_reset_list;
u8 id, channel;
/*
* issue target reset to next device in the queue
*/
if (list_empty(head))
return;
target_reset_list = list_entry(head->next,
struct mptsas_target_reset_event, list);
id = target_reset_list->sas_event_data.TargetID;
channel = target_reset_list->sas_event_data.Bus;
target_reset_list->time_count = jiffies;
if (mptsas_target_reset(ioc, channel, id))
target_reset_list->target_reset_issued = 1;
return;
}
/**
* mptsas_taskmgmt_complete - complete SAS task management function
* @ioc: Pointer to MPT_ADAPTER structure
* @mf: MPT message frame
* @mr: SCSI Task Management Reply structure ptr (may be %NULL)
*
* Completion for TARGET_RESET after NOT_RESPONDING_EVENT, enable work
* queue to finish off removing device from upper layers, then send next
* TARGET_RESET in the queue.
**/
static int
mptsas_taskmgmt_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
{
MPT_SCSI_HOST *hd = shost_priv(ioc->sh);
struct list_head *head = &hd->target_reset_list;
u8 id, channel;
struct mptsas_target_reset_event *target_reset_list;
SCSITaskMgmtReply_t *pScsiTmReply;
dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TaskMgmt completed: "
"(mf = %p, mr = %p)\n", ioc->name, mf, mr));
pScsiTmReply = (SCSITaskMgmtReply_t *)mr;
if (!pScsiTmReply)
return 0;
dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"\tTaskMgmt completed: fw_channel = %d, fw_id = %d,\n"
"\ttask_type = 0x%02X, iocstatus = 0x%04X "
"loginfo = 0x%08X,\n\tresponse_code = 0x%02X, "
"term_cmnds = %d\n", ioc->name,
pScsiTmReply->Bus, pScsiTmReply->TargetID,
pScsiTmReply->TaskType,
le16_to_cpu(pScsiTmReply->IOCStatus),
le32_to_cpu(pScsiTmReply->IOCLogInfo),
pScsiTmReply->ResponseCode,
le32_to_cpu(pScsiTmReply->TerminationCount)));
if (pScsiTmReply->ResponseCode)
mptscsih_taskmgmt_response_code(ioc,
pScsiTmReply->ResponseCode);
if (pScsiTmReply->TaskType ==
MPI_SCSITASKMGMT_TASKTYPE_QUERY_TASK || pScsiTmReply->TaskType ==
MPI_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET) {
ioc->taskmgmt_cmds.status |= MPT_MGMT_STATUS_COMMAND_GOOD;
ioc->taskmgmt_cmds.status |= MPT_MGMT_STATUS_RF_VALID;
memcpy(ioc->taskmgmt_cmds.reply, mr,
min(MPT_DEFAULT_FRAME_SIZE, 4 * mr->u.reply.MsgLength));
if (ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_PENDING) {
ioc->taskmgmt_cmds.status &= ~MPT_MGMT_STATUS_PENDING;
complete(&ioc->taskmgmt_cmds.done);
return 1;
}
return 0;
}
mpt_clear_taskmgmt_in_progress_flag(ioc);
if (list_empty(head))
return 1;
target_reset_list = list_entry(head->next,
struct mptsas_target_reset_event, list);
dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"TaskMgmt: completed (%d seconds)\n",
ioc->name, jiffies_to_msecs(jiffies -
target_reset_list->time_count)/1000));
id = pScsiTmReply->TargetID;
channel = pScsiTmReply->Bus;
target_reset_list->time_count = jiffies;
/*
* retry target reset
*/
if (!target_reset_list->target_reset_issued) {
if (mptsas_target_reset(ioc, channel, id))
target_reset_list->target_reset_issued = 1;
return 1;
}
/*
* enable work queue to remove device from upper layers
*/
list_del(&target_reset_list->list);
if (!ioc->fw_events_off)
mptsas_queue_device_delete(ioc,
&target_reset_list->sas_event_data);
ioc->schedule_target_reset(ioc);
return 1;
}
/**
* mptsas_ioc_reset - issue an IOC reset for this reset phase
*
* @ioc: Pointer to MPT_ADAPTER structure
* @reset_phase: id of phase of reset
*
**/
static int
mptsas_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
{
MPT_SCSI_HOST *hd;
int rc;
rc = mptscsih_ioc_reset(ioc, reset_phase);
if ((ioc->bus_type != SAS) || (!rc))
return rc;
hd = shost_priv(ioc->sh);
if (!hd->ioc)
goto out;
switch (reset_phase) {
case MPT_IOC_SETUP_RESET:
dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"%s: MPT_IOC_SETUP_RESET\n", ioc->name, __func__));
mptsas_fw_event_off(ioc);
break;
case MPT_IOC_PRE_RESET:
dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"%s: MPT_IOC_PRE_RESET\n", ioc->name, __func__));
break;
case MPT_IOC_POST_RESET:
dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"%s: MPT_IOC_POST_RESET\n", ioc->name, __func__));
if (ioc->sas_mgmt.status & MPT_MGMT_STATUS_PENDING) {
ioc->sas_mgmt.status |= MPT_MGMT_STATUS_DID_IOCRESET;
complete(&ioc->sas_mgmt.done);
}
mptsas_cleanup_fw_event_q(ioc);
mptsas_queue_rescan(ioc);
break;
default:
break;
}
out:
return rc;
}
/**
* enum device_state - TUR device state
* @DEVICE_RETRY: need to retry the TUR
* @DEVICE_ERROR: TUR return error, don't add device
* @DEVICE_READY: device can be added
*
*/
enum device_state{
DEVICE_RETRY,
DEVICE_ERROR,
DEVICE_READY,
};
static int
mptsas_sas_enclosure_pg0(MPT_ADAPTER *ioc, struct mptsas_enclosure *enclosure,
u32 form, u32 form_specific)
{
ConfigExtendedPageHeader_t hdr;
CONFIGPARMS cfg;
SasEnclosurePage0_t *buffer;
dma_addr_t dma_handle;
int error;
__le64 le_identifier;
memset(&hdr, 0, sizeof(hdr));
hdr.PageVersion = MPI_SASENCLOSURE0_PAGEVERSION;
hdr.PageNumber = 0;
hdr.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
hdr.ExtPageType = MPI_CONFIG_EXTPAGETYPE_ENCLOSURE;
cfg.cfghdr.ehdr = &hdr;
cfg.physAddr = -1;
cfg.pageAddr = form + form_specific;
cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
cfg.dir = 0; /* read */
cfg.timeout = SAS_CONFIG_PAGE_TIMEOUT;
error = mpt_config(ioc, &cfg);
if (error)
goto out;
if (!hdr.ExtPageLength) {
error = -ENXIO;
goto out;
}
buffer = dma_alloc_coherent(&ioc->pcidev->dev, hdr.ExtPageLength * 4,
&dma_handle, GFP_KERNEL);
if (!buffer) {
error = -ENOMEM;
goto out;
}
cfg.physAddr = dma_handle;
cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
error = mpt_config(ioc, &cfg);
if (error)
goto out_free_consistent;
/* save config data */
memcpy(&le_identifier, &buffer->EnclosureLogicalID, sizeof(__le64));
enclosure->enclosure_logical_id = le64_to_cpu(le_identifier);
enclosure->enclosure_handle = le16_to_cpu(buffer->EnclosureHandle);
enclosure->flags = le16_to_cpu(buffer->Flags);
enclosure->num_slot = le16_to_cpu(buffer->NumSlots);
enclosure->start_slot = le16_to_cpu(buffer->StartSlot);
enclosure->start_id = buffer->StartTargetID;
enclosure->start_channel = buffer->StartBus;
enclosure->sep_id = buffer->SEPTargetID;
enclosure->sep_channel = buffer->SEPBus;
out_free_consistent:
dma_free_coherent(&ioc->pcidev->dev, hdr.ExtPageLength * 4, buffer,
dma_handle);
out:
return error;
}
/**
* mptsas_add_end_device - report a new end device to sas transport layer
* @ioc: Pointer to MPT_ADAPTER structure
* @phy_info: describes attached device
*
* return (0) success (1) failure
*
**/
static int
mptsas_add_end_device(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info)
{
struct sas_rphy *rphy;
struct sas_port *port;
struct sas_identify identify;
char *ds = NULL;
u8 fw_id;
if (!phy_info) {
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
"%s: exit at line=%d\n", ioc->name,
__func__, __LINE__));
return 1;
}
fw_id = phy_info->attached.id;
if (mptsas_get_rphy(phy_info)) {
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
"%s: fw_id=%d exit at line=%d\n", ioc->name,
__func__, fw_id, __LINE__));
return 2;
}
port = mptsas_get_port(phy_info);
if (!port) {
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
"%s: fw_id=%d exit at line=%d\n", ioc->name,
__func__, fw_id, __LINE__));
return 3;
}
if (phy_info->attached.device_info &
MPI_SAS_DEVICE_INFO_SSP_TARGET)
ds = "ssp";
if (phy_info->attached.device_info &
MPI_SAS_DEVICE_INFO_STP_TARGET)
ds = "stp";
if (phy_info->attached.device_info &
MPI_SAS_DEVICE_INFO_SATA_DEVICE)
ds = "sata";
printk(MYIOC_s_INFO_FMT "attaching %s device: fw_channel %d, fw_id %d,"
" phy %d, sas_addr 0x%llx\n", ioc->name, ds,
phy_info->attached.channel, phy_info->attached.id,
phy_info->attached.phy_id, (unsigned long long)
phy_info->attached.sas_address);
mptsas_parse_device_info(&identify, &phy_info->attached);
rphy = sas_end_device_alloc(port);
if (!rphy) {
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
"%s: fw_id=%d exit at line=%d\n", ioc->name,
__func__, fw_id, __LINE__));
return 5; /* non-fatal: an rphy can be added later */
}
rphy->identify = identify;
if (sas_rphy_add(rphy)) {
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
"%s: fw_id=%d exit at line=%d\n", ioc->name,
__func__, fw_id, __LINE__));
sas_rphy_free(rphy);
return 6;
}
mptsas_set_rphy(ioc, phy_info, rphy);
return 0;
}
/**
* mptsas_del_end_device - report a deleted end device to sas transport layer
* @ioc: Pointer to MPT_ADAPTER structure
* @phy_info: describes attached device
*
**/
static void
mptsas_del_end_device(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info)
{
struct sas_rphy *rphy;
struct sas_port *port;
struct mptsas_portinfo *port_info;
struct mptsas_phyinfo *phy_info_parent;
int i;
char *ds = NULL;
u8 fw_id;
u64 sas_address;
if (!phy_info)
return;
fw_id = phy_info->attached.id;
sas_address = phy_info->attached.sas_address;
if (!phy_info->port_details) {
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
"%s: fw_id=%d exit at line=%d\n", ioc->name,
__func__, fw_id, __LINE__));
return;
}
rphy = mptsas_get_rphy(phy_info);
if (!rphy) {
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
"%s: fw_id=%d exit at line=%d\n", ioc->name,
__func__, fw_id, __LINE__));
return;
}
if (phy_info->attached.device_info & MPI_SAS_DEVICE_INFO_SSP_INITIATOR
|| phy_info->attached.device_info
& MPI_SAS_DEVICE_INFO_SMP_INITIATOR
|| phy_info->attached.device_info
& MPI_SAS_DEVICE_INFO_STP_INITIATOR)
ds = "initiator";
if (phy_info->attached.device_info &
MPI_SAS_DEVICE_INFO_SSP_TARGET)
ds = "ssp";
if (phy_info->attached.device_info &
MPI_SAS_DEVICE_INFO_STP_TARGET)
ds = "stp";
if (phy_info->attached.device_info &
MPI_SAS_DEVICE_INFO_SATA_DEVICE)
ds = "sata";
dev_printk(KERN_DEBUG, &rphy->dev, MYIOC_s_FMT
"removing %s device: fw_channel %d, fw_id %d, phy %d,"
"sas_addr 0x%llx\n", ioc->name, ds, phy_info->attached.channel,
phy_info->attached.id, phy_info->attached.phy_id,
(unsigned long long) sas_address);
port = mptsas_get_port(phy_info);
if (!port) {
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
"%s: fw_id=%d exit at line=%d\n", ioc->name,
__func__, fw_id, __LINE__));
return;
}
port_info = phy_info->portinfo;
phy_info_parent = port_info->phy_info;
for (i = 0; i < port_info->num_phys; i++, phy_info_parent++) {
if (!phy_info_parent->phy)
continue;
if (phy_info_parent->attached.sas_address !=
sas_address)
continue;
dev_printk(KERN_DEBUG, &phy_info_parent->phy->dev,
MYIOC_s_FMT "delete phy %d, phy-obj (0x%p)\n",
ioc->name, phy_info_parent->phy_id,
phy_info_parent->phy);
sas_port_delete_phy(port, phy_info_parent->phy);
}
dev_printk(KERN_DEBUG, &port->dev, MYIOC_s_FMT
"delete port %d, sas_addr (0x%llx)\n", ioc->name,
port->port_identifier, (unsigned long long)sas_address);
sas_port_delete(port);
mptsas_set_port(ioc, phy_info, NULL);
mptsas_port_delete(ioc, phy_info->port_details);
}
static struct mptsas_phyinfo *
mptsas_refreshing_device_handles(MPT_ADAPTER *ioc,
struct mptsas_devinfo *sas_device)
{
struct mptsas_phyinfo *phy_info;
struct mptsas_portinfo *port_info;
int i;
phy_info = mptsas_find_phyinfo_by_sas_address(ioc,
sas_device->sas_address);
if (!phy_info)
goto out;
port_info = phy_info->portinfo;
if (!port_info)
goto out;
mutex_lock(&ioc->sas_topology_mutex);
for (i = 0; i < port_info->num_phys; i++) {
if (port_info->phy_info[i].attached.sas_address !=
sas_device->sas_address)
continue;
port_info->phy_info[i].attached.channel = sas_device->channel;
port_info->phy_info[i].attached.id = sas_device->id;
port_info->phy_info[i].attached.sas_address =
sas_device->sas_address;
port_info->phy_info[i].attached.handle = sas_device->handle;
port_info->phy_info[i].attached.handle_parent =
sas_device->handle_parent;
port_info->phy_info[i].attached.handle_enclosure =
sas_device->handle_enclosure;
}
mutex_unlock(&ioc->sas_topology_mutex);
out:
return phy_info;
}
/**
* mptsas_firmware_event_work - work thread for processing fw events
* @work: work queue payload containing info describing the event
* Context: user
*
*/
static void
mptsas_firmware_event_work(struct work_struct *work)
{
struct fw_event_work *fw_event =
container_of(work, struct fw_event_work, work.work);
MPT_ADAPTER *ioc = fw_event->ioc;
/* special rescan topology handling */
if (fw_event->event == -1) {
if (ioc->in_rescan) {
devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"%s: rescan ignored as it is in progress\n",
ioc->name, __func__));
return;
}
devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: rescan after "
"reset\n", ioc->name, __func__));
ioc->in_rescan = 1;
mptsas_not_responding_devices(ioc);
mptsas_scan_sas_topology(ioc);
ioc->in_rescan = 0;
mptsas_free_fw_event(ioc, fw_event);
mptsas_fw_event_on(ioc);
return;
}
/* events handling turned off during host reset */
if (ioc->fw_events_off) {
mptsas_free_fw_event(ioc, fw_event);
return;
}
devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: fw_event=(0x%p), "
"event = (0x%02x)\n", ioc->name, __func__, fw_event,
(fw_event->event & 0xFF)));
switch (fw_event->event) {
case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
mptsas_send_sas_event(fw_event);
break;
case MPI_EVENT_INTEGRATED_RAID:
mptsas_send_raid_event(fw_event);
break;
case MPI_EVENT_IR2:
mptsas_send_ir2_event(fw_event);
break;
case MPI_EVENT_PERSISTENT_TABLE_FULL:
mptbase_sas_persist_operation(ioc,
MPI_SAS_OP_CLEAR_NOT_PRESENT);
mptsas_free_fw_event(ioc, fw_event);
break;
case MPI_EVENT_SAS_BROADCAST_PRIMITIVE:
mptsas_broadcast_primitive_work(fw_event);
break;
case MPI_EVENT_SAS_EXPANDER_STATUS_CHANGE:
mptsas_send_expander_event(fw_event);
break;
case MPI_EVENT_SAS_PHY_LINK_STATUS:
mptsas_send_link_status_event(fw_event);
break;
case MPI_EVENT_QUEUE_FULL:
mptsas_handle_queue_full_event(fw_event);
break;
}
}
static int
mptsas_slave_configure(struct scsi_device *sdev)
{
struct Scsi_Host *host = sdev->host;
MPT_SCSI_HOST *hd = shost_priv(host);
MPT_ADAPTER *ioc = hd->ioc;
VirtDevice *vdevice = sdev->hostdata;
if (vdevice->vtarget->deleted) {
sdev_printk(KERN_INFO, sdev, "clearing deleted flag\n");
vdevice->vtarget->deleted = 0;
}
/*
* RAID volumes placed beyond the last expected port.
* Ignore sending sas mode pages in that case..
*/
if (sdev->channel == MPTSAS_RAID_CHANNEL) {
mptsas_add_device_component_starget_ir(ioc, scsi_target(sdev));
goto out;
}
sas_read_port_mode_page(sdev);
mptsas_add_device_component_starget(ioc, scsi_target(sdev));
out:
return mptscsih_slave_configure(sdev);
}
static int
mptsas_target_alloc(struct scsi_target *starget)
{
struct Scsi_Host *host = dev_to_shost(&starget->dev);
MPT_SCSI_HOST *hd = shost_priv(host);
VirtTarget *vtarget;
u8 id, channel;
struct sas_rphy *rphy;
struct mptsas_portinfo *p;
int i;
MPT_ADAPTER *ioc = hd->ioc;
vtarget = kzalloc(sizeof(VirtTarget), GFP_KERNEL);
if (!vtarget)
return -ENOMEM;
vtarget->starget = starget;
vtarget->ioc_id = ioc->id;
vtarget->tflags = MPT_TARGET_FLAGS_Q_YES;
id = starget->id;
channel = 0;
/*
* RAID volumes placed beyond the last expected port.
*/
if (starget->channel == MPTSAS_RAID_CHANNEL) {
if (!ioc->raid_data.pIocPg2) {
kfree(vtarget);
return -ENXIO;
}
for (i = 0; i < ioc->raid_data.pIocPg2->NumActiveVolumes; i++) {
if (id == ioc->raid_data.pIocPg2->
RaidVolume[i].VolumeID) {
channel = ioc->raid_data.pIocPg2->
RaidVolume[i].VolumeBus;
}
}
vtarget->raidVolume = 1;
goto out;
}
rphy = dev_to_rphy(starget->dev.parent);
mutex_lock(&ioc->sas_topology_mutex);
list_for_each_entry(p, &ioc->sas_topology, list) {
for (i = 0; i < p->num_phys; i++) {
if (p->phy_info[i].attached.sas_address !=
rphy->identify.sas_address)
continue;
id = p->phy_info[i].attached.id;
channel = p->phy_info[i].attached.channel;
mptsas_set_starget(&p->phy_info[i], starget);
/*
* Exposing hidden raid components
*/
if (mptscsih_is_phys_disk(ioc, channel, id)) {
id = mptscsih_raid_id_to_num(ioc,
channel, id);
vtarget->tflags |=
MPT_TARGET_FLAGS_RAID_COMPONENT;
p->phy_info[i].attached.phys_disk_num = id;
}
mutex_unlock(&ioc->sas_topology_mutex);
goto out;
}
}
mutex_unlock(&ioc->sas_topology_mutex);
kfree(vtarget);
return -ENXIO;
out:
vtarget->id = id;
vtarget->channel = channel;
starget->hostdata = vtarget;
return 0;
}
static void
mptsas_target_destroy(struct scsi_target *starget)
{
struct Scsi_Host *host = dev_to_shost(&starget->dev);
MPT_SCSI_HOST *hd = shost_priv(host);
struct sas_rphy *rphy;
struct mptsas_portinfo *p;
int i;
MPT_ADAPTER *ioc = hd->ioc;
VirtTarget *vtarget;
if (!starget->hostdata)
return;
vtarget = starget->hostdata;
mptsas_del_device_component_by_os(ioc, starget->channel,
starget->id);
if (starget->channel == MPTSAS_RAID_CHANNEL)
goto out;
rphy = dev_to_rphy(starget->dev.parent);
list_for_each_entry(p, &ioc->sas_topology, list) {
for (i = 0; i < p->num_phys; i++) {
if (p->phy_info[i].attached.sas_address !=
rphy->identify.sas_address)
continue;
starget_printk(KERN_INFO, starget, MYIOC_s_FMT
"delete device: fw_channel %d, fw_id %d, phy %d, "
"sas_addr 0x%llx\n", ioc->name,
p->phy_info[i].attached.channel,
p->phy_info[i].attached.id,
p->phy_info[i].attached.phy_id, (unsigned long long)
p->phy_info[i].attached.sas_address);
mptsas_set_starget(&p->phy_info[i], NULL);
}
}
out:
vtarget->starget = NULL;
kfree(starget->hostdata);
starget->hostdata = NULL;
}
static int
mptsas_slave_alloc(struct scsi_device *sdev)
{
struct Scsi_Host *host = sdev->host;
MPT_SCSI_HOST *hd = shost_priv(host);
struct sas_rphy *rphy;
struct mptsas_portinfo *p;
VirtDevice *vdevice;
struct scsi_target *starget;
int i;
MPT_ADAPTER *ioc = hd->ioc;
vdevice = kzalloc(sizeof(VirtDevice), GFP_KERNEL);
if (!vdevice) {
printk(MYIOC_s_ERR_FMT "slave_alloc kzalloc(%zd) FAILED!\n",
ioc->name, sizeof(VirtDevice));
return -ENOMEM;
}
starget = scsi_target(sdev);
vdevice->vtarget = starget->hostdata;
if (sdev->channel == MPTSAS_RAID_CHANNEL)
goto out;
rphy = dev_to_rphy(sdev->sdev_target->dev.parent);
mutex_lock(&ioc->sas_topology_mutex);
list_for_each_entry(p, &ioc->sas_topology, list) {
for (i = 0; i < p->num_phys; i++) {
if (p->phy_info[i].attached.sas_address !=
rphy->identify.sas_address)
continue;
vdevice->lun = sdev->lun;
/*
* Exposing hidden raid components
*/
if (mptscsih_is_phys_disk(ioc,
p->phy_info[i].attached.channel,
p->phy_info[i].attached.id))
sdev->no_uld_attach = 1;
mutex_unlock(&ioc->sas_topology_mutex);
goto out;
}
}
mutex_unlock(&ioc->sas_topology_mutex);
kfree(vdevice);
return -ENXIO;
out:
vdevice->vtarget->num_luns++;
sdev->hostdata = vdevice;
return 0;
}
static int
mptsas_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *SCpnt)
{
MPT_SCSI_HOST *hd;
MPT_ADAPTER *ioc;
VirtDevice *vdevice = SCpnt->device->hostdata;
if (!vdevice || !vdevice->vtarget || vdevice->vtarget->deleted) {
SCpnt->result = DID_NO_CONNECT << 16;
scsi_done(SCpnt);
return 0;
}
hd = shost_priv(shost);
ioc = hd->ioc;
if (ioc->sas_discovery_quiesce_io)
return SCSI_MLQUEUE_HOST_BUSY;
if (ioc->debug_level & MPT_DEBUG_SCSI)
scsi_print_command(SCpnt);
return mptscsih_qcmd(SCpnt);
}
/**
* mptsas_eh_timed_out - resets the scsi_cmnd timeout
* if the device under question is currently in the
* device removal delay.
* @sc: scsi command that the midlayer is about to time out
*
**/
static enum scsi_timeout_action mptsas_eh_timed_out(struct scsi_cmnd *sc)
{
MPT_SCSI_HOST *hd;
MPT_ADAPTER *ioc;
VirtDevice *vdevice;
enum scsi_timeout_action rc = SCSI_EH_NOT_HANDLED;
hd = shost_priv(sc->device->host);
if (hd == NULL) {
printk(KERN_ERR MYNAM ": %s: Can't locate host! (sc=%p)\n",
__func__, sc);
goto done;
}
ioc = hd->ioc;
if (ioc->bus_type != SAS) {
printk(KERN_ERR MYNAM ": %s: Wrong bus type (sc=%p)\n",
__func__, sc);
goto done;
}
/* In case if IOC is in reset from internal context.
* Do not execute EEH for the same IOC. SML should to reset timer.
*/
if (ioc->ioc_reset_in_progress) {
dtmprintk(ioc, printk(MYIOC_s_WARN_FMT ": %s: ioc is in reset,"
"SML need to reset the timer (sc=%p)\n",
ioc->name, __func__, sc));
rc = SCSI_EH_RESET_TIMER;
}
vdevice = sc->device->hostdata;
if (vdevice && vdevice->vtarget && (vdevice->vtarget->inDMD
|| vdevice->vtarget->deleted)) {
dtmprintk(ioc, printk(MYIOC_s_WARN_FMT ": %s: target removed "
"or in device removal delay (sc=%p)\n",
ioc->name, __func__, sc));
rc = SCSI_EH_RESET_TIMER;
goto done;
}
done:
return rc;
}
static const struct scsi_host_template mptsas_driver_template = {
.module = THIS_MODULE,
.proc_name = "mptsas",
.show_info = mptscsih_show_info,
.name = "MPT SAS Host",
.info = mptscsih_info,
.queuecommand = mptsas_qcmd,
.target_alloc = mptsas_target_alloc,
.slave_alloc = mptsas_slave_alloc,
.slave_configure = mptsas_slave_configure,
.target_destroy = mptsas_target_destroy,
.slave_destroy = mptscsih_slave_destroy,
.change_queue_depth = mptscsih_change_queue_depth,
.eh_timed_out = mptsas_eh_timed_out,
.eh_abort_handler = mptscsih_abort,
.eh_device_reset_handler = mptscsih_dev_reset,
.eh_host_reset_handler = mptscsih_host_reset,
.bios_param = mptscsih_bios_param,
.can_queue = MPT_SAS_CAN_QUEUE,
.this_id = -1,
.sg_tablesize = MPT_SCSI_SG_DEPTH,
.max_sectors = 8192,
.cmd_per_lun = 7,
.shost_groups = mptscsih_host_attr_groups,
.no_write_same = 1,
};
static int mptsas_get_linkerrors(struct sas_phy *phy)
{
MPT_ADAPTER *ioc = phy_to_ioc(phy);
ConfigExtendedPageHeader_t hdr;
CONFIGPARMS cfg;
SasPhyPage1_t *buffer;
dma_addr_t dma_handle;
int error;
/* FIXME: only have link errors on local phys */
if (!scsi_is_sas_phy_local(phy))
return -EINVAL;
hdr.PageVersion = MPI_SASPHY1_PAGEVERSION;
hdr.ExtPageLength = 0;
hdr.PageNumber = 1 /* page number 1*/;
hdr.Reserved1 = 0;
hdr.Reserved2 = 0;
hdr.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
hdr.ExtPageType = MPI_CONFIG_EXTPAGETYPE_SAS_PHY;
cfg.cfghdr.ehdr = &hdr;
cfg.physAddr = -1;
cfg.pageAddr = phy->identify.phy_identifier;
cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
cfg.dir = 0; /* read */
cfg.timeout = SAS_CONFIG_PAGE_TIMEOUT;
error = mpt_config(ioc, &cfg);
if (error)
return error;
if (!hdr.ExtPageLength)
return -ENXIO;
buffer = dma_alloc_coherent(&ioc->pcidev->dev, hdr.ExtPageLength * 4,
&dma_handle, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
cfg.physAddr = dma_handle;
cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
error = mpt_config(ioc, &cfg);
if (error)
goto out_free_consistent;
mptsas_print_phy_pg1(ioc, buffer);
phy->invalid_dword_count = le32_to_cpu(buffer->InvalidDwordCount);
phy->running_disparity_error_count =
le32_to_cpu(buffer->RunningDisparityErrorCount);
phy->loss_of_dword_sync_count =
le32_to_cpu(buffer->LossDwordSynchCount);
phy->phy_reset_problem_count =
le32_to_cpu(buffer->PhyResetProblemCount);
out_free_consistent:
dma_free_coherent(&ioc->pcidev->dev, hdr.ExtPageLength * 4, buffer,
dma_handle);
return error;
}
static int mptsas_mgmt_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req,
MPT_FRAME_HDR *reply)
{
ioc->sas_mgmt.status |= MPT_MGMT_STATUS_COMMAND_GOOD;
if (reply != NULL) {
ioc->sas_mgmt.status |= MPT_MGMT_STATUS_RF_VALID;
memcpy(ioc->sas_mgmt.reply, reply,
min(ioc->reply_sz, 4 * reply->u.reply.MsgLength));
}
if (ioc->sas_mgmt.status & MPT_MGMT_STATUS_PENDING) {
ioc->sas_mgmt.status &= ~MPT_MGMT_STATUS_PENDING;
complete(&ioc->sas_mgmt.done);
return 1;
}
return 0;
}
static int mptsas_phy_reset(struct sas_phy *phy, int hard_reset)
{
MPT_ADAPTER *ioc = phy_to_ioc(phy);
SasIoUnitControlRequest_t *req;
SasIoUnitControlReply_t *reply;
MPT_FRAME_HDR *mf;
MPIHeader_t *hdr;
unsigned long timeleft;
int error = -ERESTARTSYS;
/* FIXME: fusion doesn't allow non-local phy reset */
if (!scsi_is_sas_phy_local(phy))
return -EINVAL;
/* not implemented for expanders */
if (phy->identify.target_port_protocols & SAS_PROTOCOL_SMP)
return -ENXIO;
if (mutex_lock_interruptible(&ioc->sas_mgmt.mutex))
goto out;
mf = mpt_get_msg_frame(mptsasMgmtCtx, ioc);
if (!mf) {
error = -ENOMEM;
goto out_unlock;
}
hdr = (MPIHeader_t *) mf;
req = (SasIoUnitControlRequest_t *)mf;
memset(req, 0, sizeof(SasIoUnitControlRequest_t));
req->Function = MPI_FUNCTION_SAS_IO_UNIT_CONTROL;
req->MsgContext = hdr->MsgContext;
req->Operation = hard_reset ?
MPI_SAS_OP_PHY_HARD_RESET : MPI_SAS_OP_PHY_LINK_RESET;
req->PhyNum = phy->identify.phy_identifier;
INITIALIZE_MGMT_STATUS(ioc->sas_mgmt.status)
mpt_put_msg_frame(mptsasMgmtCtx, ioc, mf);
timeleft = wait_for_completion_timeout(&ioc->sas_mgmt.done,
10 * HZ);
if (!(ioc->sas_mgmt.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
error = -ETIME;
mpt_free_msg_frame(ioc, mf);
if (ioc->sas_mgmt.status & MPT_MGMT_STATUS_DID_IOCRESET)
goto out_unlock;
if (!timeleft)
mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP);
goto out_unlock;
}
/* a reply frame is expected */
if ((ioc->sas_mgmt.status &
MPT_MGMT_STATUS_RF_VALID) == 0) {
error = -ENXIO;
goto out_unlock;
}
/* process the completed Reply Message Frame */
reply = (SasIoUnitControlReply_t *)ioc->sas_mgmt.reply;
if (reply->IOCStatus != MPI_IOCSTATUS_SUCCESS) {
printk(MYIOC_s_INFO_FMT "%s: IOCStatus=0x%X IOCLogInfo=0x%X\n",
ioc->name, __func__, reply->IOCStatus, reply->IOCLogInfo);
error = -ENXIO;
goto out_unlock;
}
error = 0;
out_unlock:
CLEAR_MGMT_STATUS(ioc->sas_mgmt.status)
mutex_unlock(&ioc->sas_mgmt.mutex);
out:
return error;
}
static int
mptsas_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier)
{
MPT_ADAPTER *ioc = rphy_to_ioc(rphy);
int i, error;
struct mptsas_portinfo *p;
struct mptsas_enclosure enclosure_info;
u64 enclosure_handle;
mutex_lock(&ioc->sas_topology_mutex);
list_for_each_entry(p, &ioc->sas_topology, list) {
for (i = 0; i < p->num_phys; i++) {
if (p->phy_info[i].attached.sas_address ==
rphy->identify.sas_address) {
enclosure_handle = p->phy_info[i].
attached.handle_enclosure;
goto found_info;
}
}
}
mutex_unlock(&ioc->sas_topology_mutex);
return -ENXIO;
found_info:
mutex_unlock(&ioc->sas_topology_mutex);
memset(&enclosure_info, 0, sizeof(struct mptsas_enclosure));
error = mptsas_sas_enclosure_pg0(ioc, &enclosure_info,
(MPI_SAS_ENCLOS_PGAD_FORM_HANDLE <<
MPI_SAS_ENCLOS_PGAD_FORM_SHIFT), enclosure_handle);
if (!error)
*identifier = enclosure_info.enclosure_logical_id;
return error;
}
static int
mptsas_get_bay_identifier(struct sas_rphy *rphy)
{
MPT_ADAPTER *ioc = rphy_to_ioc(rphy);
struct mptsas_portinfo *p;
int i, rc;
mutex_lock(&ioc->sas_topology_mutex);
list_for_each_entry(p, &ioc->sas_topology, list) {
for (i = 0; i < p->num_phys; i++) {
if (p->phy_info[i].attached.sas_address ==
rphy->identify.sas_address) {
rc = p->phy_info[i].attached.slot;
goto out;
}
}
}
rc = -ENXIO;
out:
mutex_unlock(&ioc->sas_topology_mutex);
return rc;
}
static void mptsas_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
struct sas_rphy *rphy)
{
MPT_ADAPTER *ioc = ((MPT_SCSI_HOST *) shost->hostdata)->ioc;
MPT_FRAME_HDR *mf;
SmpPassthroughRequest_t *smpreq;
int flagsLength;
unsigned long timeleft;
char *psge;
u64 sas_address = 0;
unsigned int reslen = 0;
int ret = -EINVAL;
/* do we need to support multiple segments? */
if (job->request_payload.sg_cnt > 1 ||
job->reply_payload.sg_cnt > 1) {
printk(MYIOC_s_ERR_FMT "%s: multiple segments req %u, rsp %u\n",
ioc->name, __func__, job->request_payload.payload_len,
job->reply_payload.payload_len);
goto out;
}
ret = mutex_lock_interruptible(&ioc->sas_mgmt.mutex);
if (ret)
goto out;
mf = mpt_get_msg_frame(mptsasMgmtCtx, ioc);
if (!mf) {
ret = -ENOMEM;
goto out_unlock;
}
smpreq = (SmpPassthroughRequest_t *)mf;
memset(smpreq, 0, sizeof(*smpreq));
smpreq->RequestDataLength =
cpu_to_le16(job->request_payload.payload_len - 4);
smpreq->Function = MPI_FUNCTION_SMP_PASSTHROUGH;
if (rphy)
sas_address = rphy->identify.sas_address;
else {
struct mptsas_portinfo *port_info;
mutex_lock(&ioc->sas_topology_mutex);
port_info = ioc->hba_port_info;
if (port_info && port_info->phy_info)
sas_address =
port_info->phy_info[0].phy->identify.sas_address;
mutex_unlock(&ioc->sas_topology_mutex);
}
*((u64 *)&smpreq->SASAddress) = cpu_to_le64(sas_address);
psge = (char *)
(((int *) mf) + (offsetof(SmpPassthroughRequest_t, SGL) / 4));
/* request */
flagsLength = (MPI_SGE_FLAGS_SIMPLE_ELEMENT |
MPI_SGE_FLAGS_END_OF_BUFFER |
MPI_SGE_FLAGS_DIRECTION)
<< MPI_SGE_FLAGS_SHIFT;
if (!dma_map_sg(&ioc->pcidev->dev, job->request_payload.sg_list,
1, DMA_BIDIRECTIONAL))
goto put_mf;
flagsLength |= (sg_dma_len(job->request_payload.sg_list) - 4);
ioc->add_sge(psge, flagsLength,
sg_dma_address(job->request_payload.sg_list));
psge += ioc->SGE_size;
/* response */
flagsLength = MPI_SGE_FLAGS_SIMPLE_ELEMENT |
MPI_SGE_FLAGS_SYSTEM_ADDRESS |
MPI_SGE_FLAGS_IOC_TO_HOST |
MPI_SGE_FLAGS_END_OF_BUFFER;
flagsLength = flagsLength << MPI_SGE_FLAGS_SHIFT;
if (!dma_map_sg(&ioc->pcidev->dev, job->reply_payload.sg_list,
1, DMA_BIDIRECTIONAL))
goto unmap_out;
flagsLength |= sg_dma_len(job->reply_payload.sg_list) + 4;
ioc->add_sge(psge, flagsLength,
sg_dma_address(job->reply_payload.sg_list));
INITIALIZE_MGMT_STATUS(ioc->sas_mgmt.status)
mpt_put_msg_frame(mptsasMgmtCtx, ioc, mf);
timeleft = wait_for_completion_timeout(&ioc->sas_mgmt.done, 10 * HZ);
if (!(ioc->sas_mgmt.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
ret = -ETIME;
mpt_free_msg_frame(ioc, mf);
mf = NULL;
if (ioc->sas_mgmt.status & MPT_MGMT_STATUS_DID_IOCRESET)
goto unmap_in;
if (!timeleft)
mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP);
goto unmap_in;
}
mf = NULL;
if (ioc->sas_mgmt.status & MPT_MGMT_STATUS_RF_VALID) {
SmpPassthroughReply_t *smprep;
smprep = (SmpPassthroughReply_t *)ioc->sas_mgmt.reply;
memcpy(job->reply, smprep, sizeof(*smprep));
job->reply_len = sizeof(*smprep);
reslen = smprep->ResponseDataLength;
} else {
printk(MYIOC_s_ERR_FMT
"%s: smp passthru reply failed to be returned\n",
ioc->name, __func__);
ret = -ENXIO;
}
unmap_in:
dma_unmap_sg(&ioc->pcidev->dev, job->reply_payload.sg_list, 1,
DMA_BIDIRECTIONAL);
unmap_out:
dma_unmap_sg(&ioc->pcidev->dev, job->request_payload.sg_list, 1,
DMA_BIDIRECTIONAL);
put_mf:
if (mf)
mpt_free_msg_frame(ioc, mf);
out_unlock:
CLEAR_MGMT_STATUS(ioc->sas_mgmt.status)
mutex_unlock(&ioc->sas_mgmt.mutex);
out:
bsg_job_done(job, ret, reslen);
}
static struct sas_function_template mptsas_transport_functions = {
.get_linkerrors = mptsas_get_linkerrors,
.get_enclosure_identifier = mptsas_get_enclosure_identifier,
.get_bay_identifier = mptsas_get_bay_identifier,
.phy_reset = mptsas_phy_reset,
.smp_handler = mptsas_smp_handler,
};
static struct scsi_transport_template *mptsas_transport_template;
static int
mptsas_sas_io_unit_pg0(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info)
{
ConfigExtendedPageHeader_t hdr;
CONFIGPARMS cfg;
SasIOUnitPage0_t *buffer;
dma_addr_t dma_handle;
int error, i;
hdr.PageVersion = MPI_SASIOUNITPAGE0_PAGEVERSION;
hdr.ExtPageLength = 0;
hdr.PageNumber = 0;
hdr.Reserved1 = 0;
hdr.Reserved2 = 0;
hdr.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
hdr.ExtPageType = MPI_CONFIG_EXTPAGETYPE_SAS_IO_UNIT;
cfg.cfghdr.ehdr = &hdr;
cfg.physAddr = -1;
cfg.pageAddr = 0;
cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
cfg.dir = 0; /* read */
cfg.timeout = SAS_CONFIG_PAGE_TIMEOUT;
error = mpt_config(ioc, &cfg);
if (error)
goto out;
if (!hdr.ExtPageLength) {
error = -ENXIO;
goto out;
}
buffer = dma_alloc_coherent(&ioc->pcidev->dev, hdr.ExtPageLength * 4,
&dma_handle, GFP_KERNEL);
if (!buffer) {
error = -ENOMEM;
goto out;
}
cfg.physAddr = dma_handle;
cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
error = mpt_config(ioc, &cfg);
if (error)
goto out_free_consistent;
port_info->num_phys = buffer->NumPhys;
port_info->phy_info = kcalloc(port_info->num_phys,
sizeof(struct mptsas_phyinfo), GFP_KERNEL);
if (!port_info->phy_info) {
error = -ENOMEM;
goto out_free_consistent;
}
ioc->nvdata_version_persistent =
le16_to_cpu(buffer->NvdataVersionPersistent);
ioc->nvdata_version_default =
le16_to_cpu(buffer->NvdataVersionDefault);
for (i = 0; i < port_info->num_phys; i++) {
mptsas_print_phy_data(ioc, &buffer->PhyData[i]);
port_info->phy_info[i].phy_id = i;
port_info->phy_info[i].port_id =
buffer->PhyData[i].Port;
port_info->phy_info[i].negotiated_link_rate =
buffer->PhyData[i].NegotiatedLinkRate;
port_info->phy_info[i].portinfo = port_info;
port_info->phy_info[i].handle =
le16_to_cpu(buffer->PhyData[i].ControllerDevHandle);
}
out_free_consistent:
dma_free_coherent(&ioc->pcidev->dev, hdr.ExtPageLength * 4, buffer,
dma_handle);
out:
return error;
}
static int
mptsas_sas_io_unit_pg1(MPT_ADAPTER *ioc)
{
ConfigExtendedPageHeader_t hdr;
CONFIGPARMS cfg;
SasIOUnitPage1_t *buffer;
dma_addr_t dma_handle;
int error;
u8 device_missing_delay;
memset(&hdr, 0, sizeof(ConfigExtendedPageHeader_t));
memset(&cfg, 0, sizeof(CONFIGPARMS));
cfg.cfghdr.ehdr = &hdr;
cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
cfg.timeout = SAS_CONFIG_PAGE_TIMEOUT;
cfg.cfghdr.ehdr->PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
cfg.cfghdr.ehdr->ExtPageType = MPI_CONFIG_EXTPAGETYPE_SAS_IO_UNIT;
cfg.cfghdr.ehdr->PageVersion = MPI_SASIOUNITPAGE1_PAGEVERSION;
cfg.cfghdr.ehdr->PageNumber = 1;
error = mpt_config(ioc, &cfg);
if (error)
goto out;
if (!hdr.ExtPageLength) {
error = -ENXIO;
goto out;
}
buffer = dma_alloc_coherent(&ioc->pcidev->dev, hdr.ExtPageLength * 4,
&dma_handle, GFP_KERNEL);
if (!buffer) {
error = -ENOMEM;
goto out;
}
cfg.physAddr = dma_handle;
cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
error = mpt_config(ioc, &cfg);
if (error)
goto out_free_consistent;
ioc->io_missing_delay =
le16_to_cpu(buffer->IODeviceMissingDelay);
device_missing_delay = buffer->ReportDeviceMissingDelay;
ioc->device_missing_delay = (device_missing_delay & MPI_SAS_IOUNIT1_REPORT_MISSING_UNIT_16) ?
(device_missing_delay & MPI_SAS_IOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16 :
device_missing_delay & MPI_SAS_IOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
out_free_consistent:
dma_free_coherent(&ioc->pcidev->dev, hdr.ExtPageLength * 4, buffer,
dma_handle);
out:
return error;
}
static int
mptsas_sas_phy_pg0(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info,
u32 form, u32 form_specific)
{
ConfigExtendedPageHeader_t hdr;
CONFIGPARMS cfg;
SasPhyPage0_t *buffer;
dma_addr_t dma_handle;
int error;
hdr.PageVersion = MPI_SASPHY0_PAGEVERSION;
hdr.ExtPageLength = 0;
hdr.PageNumber = 0;
hdr.Reserved1 = 0;
hdr.Reserved2 = 0;
hdr.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
hdr.ExtPageType = MPI_CONFIG_EXTPAGETYPE_SAS_PHY;
cfg.cfghdr.ehdr = &hdr;
cfg.dir = 0; /* read */
cfg.timeout = SAS_CONFIG_PAGE_TIMEOUT;
/* Get Phy Pg 0 for each Phy. */
cfg.physAddr = -1;
cfg.pageAddr = form + form_specific;
cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
error = mpt_config(ioc, &cfg);
if (error)
goto out;
if (!hdr.ExtPageLength) {
error = -ENXIO;
goto out;
}
buffer = dma_alloc_coherent(&ioc->pcidev->dev, hdr.ExtPageLength * 4,
&dma_handle, GFP_KERNEL);
if (!buffer) {
error = -ENOMEM;
goto out;
}
cfg.physAddr = dma_handle;
cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
error = mpt_config(ioc, &cfg);
if (error)
goto out_free_consistent;
mptsas_print_phy_pg0(ioc, buffer);
phy_info->hw_link_rate = buffer->HwLinkRate;
phy_info->programmed_link_rate = buffer->ProgrammedLinkRate;
phy_info->identify.handle = le16_to_cpu(buffer->OwnerDevHandle);
phy_info->attached.handle = le16_to_cpu(buffer->AttachedDevHandle);
out_free_consistent:
dma_free_coherent(&ioc->pcidev->dev, hdr.ExtPageLength * 4, buffer,
dma_handle);
out:
return error;
}
static int
mptsas_sas_device_pg0(MPT_ADAPTER *ioc, struct mptsas_devinfo *device_info,
u32 form, u32 form_specific)
{
ConfigExtendedPageHeader_t hdr;
CONFIGPARMS cfg;
SasDevicePage0_t *buffer;
dma_addr_t dma_handle;
__le64 sas_address;
int error=0;
hdr.PageVersion = MPI_SASDEVICE0_PAGEVERSION;
hdr.ExtPageLength = 0;
hdr.PageNumber = 0;
hdr.Reserved1 = 0;
hdr.Reserved2 = 0;
hdr.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
hdr.ExtPageType = MPI_CONFIG_EXTPAGETYPE_SAS_DEVICE;
cfg.cfghdr.ehdr = &hdr;
cfg.pageAddr = form + form_specific;
cfg.physAddr = -1;
cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
cfg.dir = 0; /* read */
cfg.timeout = SAS_CONFIG_PAGE_TIMEOUT;
memset(device_info, 0, sizeof(struct mptsas_devinfo));
error = mpt_config(ioc, &cfg);
if (error)
goto out;
if (!hdr.ExtPageLength) {
error = -ENXIO;
goto out;
}
buffer = dma_alloc_coherent(&ioc->pcidev->dev, hdr.ExtPageLength * 4,
&dma_handle, GFP_KERNEL);
if (!buffer) {
error = -ENOMEM;
goto out;
}
cfg.physAddr = dma_handle;
cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
error = mpt_config(ioc, &cfg);
if (error == MPI_IOCSTATUS_CONFIG_INVALID_PAGE) {
error = -ENODEV;
goto out_free_consistent;
}
if (error)
goto out_free_consistent;
mptsas_print_device_pg0(ioc, buffer);
memset(device_info, 0, sizeof(struct mptsas_devinfo));
device_info->handle = le16_to_cpu(buffer->DevHandle);
device_info->handle_parent = le16_to_cpu(buffer->ParentDevHandle);
device_info->handle_enclosure =
le16_to_cpu(buffer->EnclosureHandle);
device_info->slot = le16_to_cpu(buffer->Slot);
device_info->phy_id = buffer->PhyNum;
device_info->port_id = buffer->PhysicalPort;
device_info->id = buffer->TargetID;
device_info->phys_disk_num = ~0;
device_info->channel = buffer->Bus;
memcpy(&sas_address, &buffer->SASAddress, sizeof(__le64));
device_info->sas_address = le64_to_cpu(sas_address);
device_info->device_info =
le32_to_cpu(buffer->DeviceInfo);
device_info->flags = le16_to_cpu(buffer->Flags);
out_free_consistent:
dma_free_coherent(&ioc->pcidev->dev, hdr.ExtPageLength * 4, buffer,
dma_handle);
out:
return error;
}
static int
mptsas_sas_expander_pg0(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info,
u32 form, u32 form_specific)
{
ConfigExtendedPageHeader_t hdr;
CONFIGPARMS cfg;
SasExpanderPage0_t *buffer;
dma_addr_t dma_handle;
int i, error;
__le64 sas_address;
memset(port_info, 0, sizeof(struct mptsas_portinfo));
hdr.PageVersion = MPI_SASEXPANDER0_PAGEVERSION;
hdr.ExtPageLength = 0;
hdr.PageNumber = 0;
hdr.Reserved1 = 0;
hdr.Reserved2 = 0;
hdr.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
hdr.ExtPageType = MPI_CONFIG_EXTPAGETYPE_SAS_EXPANDER;
cfg.cfghdr.ehdr = &hdr;
cfg.physAddr = -1;
cfg.pageAddr = form + form_specific;
cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
cfg.dir = 0; /* read */
cfg.timeout = SAS_CONFIG_PAGE_TIMEOUT;
memset(port_info, 0, sizeof(struct mptsas_portinfo));
error = mpt_config(ioc, &cfg);
if (error)
goto out;
if (!hdr.ExtPageLength) {
error = -ENXIO;
goto out;
}
buffer = dma_alloc_coherent(&ioc->pcidev->dev, hdr.ExtPageLength * 4,
&dma_handle, GFP_KERNEL);
if (!buffer) {
error = -ENOMEM;
goto out;
}
cfg.physAddr = dma_handle;
cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
error = mpt_config(ioc, &cfg);
if (error == MPI_IOCSTATUS_CONFIG_INVALID_PAGE) {
error = -ENODEV;
goto out_free_consistent;
}
if (error)
goto out_free_consistent;
/* save config data */
port_info->num_phys = (buffer->NumPhys) ? buffer->NumPhys : 1;
port_info->phy_info = kcalloc(port_info->num_phys,
sizeof(struct mptsas_phyinfo), GFP_KERNEL);
if (!port_info->phy_info) {
error = -ENOMEM;
goto out_free_consistent;
}
memcpy(&sas_address, &buffer->SASAddress, sizeof(__le64));
for (i = 0; i < port_info->num_phys; i++) {
port_info->phy_info[i].portinfo = port_info;
port_info->phy_info[i].handle =
le16_to_cpu(buffer->DevHandle);
port_info->phy_info[i].identify.sas_address =
le64_to_cpu(sas_address);
port_info->phy_info[i].identify.handle_parent =
le16_to_cpu(buffer->ParentDevHandle);
}
out_free_consistent:
dma_free_coherent(&ioc->pcidev->dev, hdr.ExtPageLength * 4, buffer,
dma_handle);
out:
return error;
}
static int
mptsas_sas_expander_pg1(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info,
u32 form, u32 form_specific)
{
ConfigExtendedPageHeader_t hdr;
CONFIGPARMS cfg;
SasExpanderPage1_t *buffer;
dma_addr_t dma_handle;
int error=0;
hdr.PageVersion = MPI_SASEXPANDER1_PAGEVERSION;
hdr.ExtPageLength = 0;
hdr.PageNumber = 1;
hdr.Reserved1 = 0;
hdr.Reserved2 = 0;
hdr.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
hdr.ExtPageType = MPI_CONFIG_EXTPAGETYPE_SAS_EXPANDER;
cfg.cfghdr.ehdr = &hdr;
cfg.physAddr = -1;
cfg.pageAddr = form + form_specific;
cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
cfg.dir = 0; /* read */
cfg.timeout = SAS_CONFIG_PAGE_TIMEOUT;
error = mpt_config(ioc, &cfg);
if (error)
goto out;
if (!hdr.ExtPageLength) {
error = -ENXIO;
goto out;
}
buffer = dma_alloc_coherent(&ioc->pcidev->dev, hdr.ExtPageLength * 4,
&dma_handle, GFP_KERNEL);
if (!buffer) {
error = -ENOMEM;
goto out;
}
cfg.physAddr = dma_handle;
cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
error = mpt_config(ioc, &cfg);
if (error == MPI_IOCSTATUS_CONFIG_INVALID_PAGE) {
error = -ENODEV;
goto out_free_consistent;
}
if (error)
goto out_free_consistent;
mptsas_print_expander_pg1(ioc, buffer);
/* save config data */
phy_info->phy_id = buffer->PhyIdentifier;
phy_info->port_id = buffer->PhysicalPort;
phy_info->negotiated_link_rate = buffer->NegotiatedLinkRate;
phy_info->programmed_link_rate = buffer->ProgrammedLinkRate;
phy_info->hw_link_rate = buffer->HwLinkRate;
phy_info->identify.handle = le16_to_cpu(buffer->OwnerDevHandle);
phy_info->attached.handle = le16_to_cpu(buffer->AttachedDevHandle);
out_free_consistent:
dma_free_coherent(&ioc->pcidev->dev, hdr.ExtPageLength * 4, buffer,
dma_handle);
out:
return error;
}
struct rep_manu_request{
u8 smp_frame_type;
u8 function;
u8 reserved;
u8 request_length;
};
struct rep_manu_reply{
u8 smp_frame_type; /* 0x41 */
u8 function; /* 0x01 */
u8 function_result;
u8 response_length;
u16 expander_change_count;
u8 reserved0[2];
u8 sas_format:1;
u8 reserved1:7;
u8 reserved2[3];
u8 vendor_id[SAS_EXPANDER_VENDOR_ID_LEN];
u8 product_id[SAS_EXPANDER_PRODUCT_ID_LEN];
u8 product_rev[SAS_EXPANDER_PRODUCT_REV_LEN];
u8 component_vendor_id[SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN];
u16 component_id;
u8 component_revision_id;
u8 reserved3;
u8 vendor_specific[8];
};
/**
* mptsas_exp_repmanufacture_info - sets expander manufacturer info
* @ioc: per adapter object
* @sas_address: expander sas address
* @edev: the sas_expander_device object
*
* For an edge expander or a fanout expander:
* fills in the sas_expander_device object when SMP port is created.
*
* Return: 0 for success, non-zero for failure.
*/
static int
mptsas_exp_repmanufacture_info(MPT_ADAPTER *ioc,
u64 sas_address, struct sas_expander_device *edev)
{
MPT_FRAME_HDR *mf;
SmpPassthroughRequest_t *smpreq;
SmpPassthroughReply_t *smprep;
struct rep_manu_reply *manufacture_reply;
struct rep_manu_request *manufacture_request;
int ret;
int flagsLength;
unsigned long timeleft;
char *psge;
unsigned long flags;
void *data_out = NULL;
dma_addr_t data_out_dma = 0;
u32 sz;
spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
if (ioc->ioc_reset_in_progress) {
spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
printk(MYIOC_s_INFO_FMT "%s: host reset in progress!\n",
__func__, ioc->name);
return -EFAULT;
}
spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
ret = mutex_lock_interruptible(&ioc->sas_mgmt.mutex);
if (ret)
goto out;
mf = mpt_get_msg_frame(mptsasMgmtCtx, ioc);
if (!mf) {
ret = -ENOMEM;
goto out_unlock;
}
smpreq = (SmpPassthroughRequest_t *)mf;
memset(smpreq, 0, sizeof(*smpreq));
sz = sizeof(struct rep_manu_request) + sizeof(struct rep_manu_reply);
data_out = dma_alloc_coherent(&ioc->pcidev->dev, sz, &data_out_dma,
GFP_KERNEL);
if (!data_out) {
printk(KERN_ERR "Memory allocation failure at %s:%d/%s()!\n",
__FILE__, __LINE__, __func__);
ret = -ENOMEM;
goto put_mf;
}
manufacture_request = data_out;
manufacture_request->smp_frame_type = 0x40;
manufacture_request->function = 1;
manufacture_request->reserved = 0;
manufacture_request->request_length = 0;
smpreq->Function = MPI_FUNCTION_SMP_PASSTHROUGH;
smpreq->PhysicalPort = 0xFF;
*((u64 *)&smpreq->SASAddress) = cpu_to_le64(sas_address);
smpreq->RequestDataLength = sizeof(struct rep_manu_request);
psge = (char *)
(((int *) mf) + (offsetof(SmpPassthroughRequest_t, SGL) / 4));
flagsLength = MPI_SGE_FLAGS_SIMPLE_ELEMENT |
MPI_SGE_FLAGS_SYSTEM_ADDRESS |
MPI_SGE_FLAGS_HOST_TO_IOC |
MPI_SGE_FLAGS_END_OF_BUFFER;
flagsLength = flagsLength << MPI_SGE_FLAGS_SHIFT;
flagsLength |= sizeof(struct rep_manu_request);
ioc->add_sge(psge, flagsLength, data_out_dma);
psge += ioc->SGE_size;
flagsLength = MPI_SGE_FLAGS_SIMPLE_ELEMENT |
MPI_SGE_FLAGS_SYSTEM_ADDRESS |
MPI_SGE_FLAGS_IOC_TO_HOST |
MPI_SGE_FLAGS_END_OF_BUFFER;
flagsLength = flagsLength << MPI_SGE_FLAGS_SHIFT;
flagsLength |= sizeof(struct rep_manu_reply);
ioc->add_sge(psge, flagsLength, data_out_dma +
sizeof(struct rep_manu_request));
INITIALIZE_MGMT_STATUS(ioc->sas_mgmt.status)
mpt_put_msg_frame(mptsasMgmtCtx, ioc, mf);
timeleft = wait_for_completion_timeout(&ioc->sas_mgmt.done, 10 * HZ);
if (!(ioc->sas_mgmt.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
ret = -ETIME;
mpt_free_msg_frame(ioc, mf);
mf = NULL;
if (ioc->sas_mgmt.status & MPT_MGMT_STATUS_DID_IOCRESET)
goto out_free;
if (!timeleft)
mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP);
goto out_free;
}
mf = NULL;
if (ioc->sas_mgmt.status & MPT_MGMT_STATUS_RF_VALID) {
u8 *tmp;
smprep = (SmpPassthroughReply_t *)ioc->sas_mgmt.reply;
if (le16_to_cpu(smprep->ResponseDataLength) !=
sizeof(struct rep_manu_reply))
goto out_free;
manufacture_reply = data_out + sizeof(struct rep_manu_request);
strncpy(edev->vendor_id, manufacture_reply->vendor_id,
SAS_EXPANDER_VENDOR_ID_LEN);
strncpy(edev->product_id, manufacture_reply->product_id,
SAS_EXPANDER_PRODUCT_ID_LEN);
strncpy(edev->product_rev, manufacture_reply->product_rev,
SAS_EXPANDER_PRODUCT_REV_LEN);
edev->level = manufacture_reply->sas_format;
if (manufacture_reply->sas_format) {
strncpy(edev->component_vendor_id,
manufacture_reply->component_vendor_id,
SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN);
tmp = (u8 *)&manufacture_reply->component_id;
edev->component_id = tmp[0] << 8 | tmp[1];
edev->component_revision_id =
manufacture_reply->component_revision_id;
}
} else {
printk(MYIOC_s_ERR_FMT
"%s: smp passthru reply failed to be returned\n",
ioc->name, __func__);
ret = -ENXIO;
}
out_free:
if (data_out_dma)
dma_free_coherent(&ioc->pcidev->dev, sz, data_out,
data_out_dma);
put_mf:
if (mf)
mpt_free_msg_frame(ioc, mf);
out_unlock:
CLEAR_MGMT_STATUS(ioc->sas_mgmt.status)
mutex_unlock(&ioc->sas_mgmt.mutex);
out:
return ret;
}
static void
mptsas_parse_device_info(struct sas_identify *identify,
struct mptsas_devinfo *device_info)
{
u16 protocols;
identify->sas_address = device_info->sas_address;
identify->phy_identifier = device_info->phy_id;
/*
* Fill in Phy Initiator Port Protocol.
* Bits 6:3, more than one bit can be set, fall through cases.
*/
protocols = device_info->device_info & 0x78;
identify->initiator_port_protocols = 0;
if (protocols & MPI_SAS_DEVICE_INFO_SSP_INITIATOR)
identify->initiator_port_protocols |= SAS_PROTOCOL_SSP;
if (protocols & MPI_SAS_DEVICE_INFO_STP_INITIATOR)
identify->initiator_port_protocols |= SAS_PROTOCOL_STP;
if (protocols & MPI_SAS_DEVICE_INFO_SMP_INITIATOR)
identify->initiator_port_protocols |= SAS_PROTOCOL_SMP;
if (protocols & MPI_SAS_DEVICE_INFO_SATA_HOST)
identify->initiator_port_protocols |= SAS_PROTOCOL_SATA;
/*
* Fill in Phy Target Port Protocol.
* Bits 10:7, more than one bit can be set, fall through cases.
*/
protocols = device_info->device_info & 0x780;
identify->target_port_protocols = 0;
if (protocols & MPI_SAS_DEVICE_INFO_SSP_TARGET)
identify->target_port_protocols |= SAS_PROTOCOL_SSP;
if (protocols & MPI_SAS_DEVICE_INFO_STP_TARGET)
identify->target_port_protocols |= SAS_PROTOCOL_STP;
if (protocols & MPI_SAS_DEVICE_INFO_SMP_TARGET)
identify->target_port_protocols |= SAS_PROTOCOL_SMP;
if (protocols & MPI_SAS_DEVICE_INFO_SATA_DEVICE)
identify->target_port_protocols |= SAS_PROTOCOL_SATA;
/*
* Fill in Attached device type.
*/
switch (device_info->device_info &
MPI_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) {
case MPI_SAS_DEVICE_INFO_NO_DEVICE:
identify->device_type = SAS_PHY_UNUSED;
break;
case MPI_SAS_DEVICE_INFO_END_DEVICE:
identify->device_type = SAS_END_DEVICE;
break;
case MPI_SAS_DEVICE_INFO_EDGE_EXPANDER:
identify->device_type = SAS_EDGE_EXPANDER_DEVICE;
break;
case MPI_SAS_DEVICE_INFO_FANOUT_EXPANDER:
identify->device_type = SAS_FANOUT_EXPANDER_DEVICE;
break;
}
}
static int mptsas_probe_one_phy(struct device *dev,
struct mptsas_phyinfo *phy_info, int index, int local)
{
MPT_ADAPTER *ioc;
struct sas_phy *phy;
struct sas_port *port;
int error = 0;
VirtTarget *vtarget;
if (!dev) {
error = -ENODEV;
goto out;
}
if (!phy_info->phy) {
phy = sas_phy_alloc(dev, index);
if (!phy) {
error = -ENOMEM;
goto out;
}
} else
phy = phy_info->phy;
mptsas_parse_device_info(&phy->identify, &phy_info->identify);
/*
* Set Negotiated link rate.
*/
switch (phy_info->negotiated_link_rate) {
case MPI_SAS_IOUNIT0_RATE_PHY_DISABLED:
phy->negotiated_linkrate = SAS_PHY_DISABLED;
break;
case MPI_SAS_IOUNIT0_RATE_FAILED_SPEED_NEGOTIATION:
phy->negotiated_linkrate = SAS_LINK_RATE_FAILED;
break;
case MPI_SAS_IOUNIT0_RATE_1_5:
phy->negotiated_linkrate = SAS_LINK_RATE_1_5_GBPS;
break;
case MPI_SAS_IOUNIT0_RATE_3_0:
phy->negotiated_linkrate = SAS_LINK_RATE_3_0_GBPS;
break;
case MPI_SAS_IOUNIT0_RATE_6_0:
phy->negotiated_linkrate = SAS_LINK_RATE_6_0_GBPS;
break;
case MPI_SAS_IOUNIT0_RATE_SATA_OOB_COMPLETE:
case MPI_SAS_IOUNIT0_RATE_UNKNOWN:
default:
phy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN;
break;
}
/*
* Set Max hardware link rate.
*/
switch (phy_info->hw_link_rate & MPI_SAS_PHY0_PRATE_MAX_RATE_MASK) {
case MPI_SAS_PHY0_HWRATE_MAX_RATE_1_5:
phy->maximum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
break;
case MPI_SAS_PHY0_PRATE_MAX_RATE_3_0:
phy->maximum_linkrate_hw = SAS_LINK_RATE_3_0_GBPS;
break;
default:
break;
}
/*
* Set Max programmed link rate.
*/
switch (phy_info->programmed_link_rate &
MPI_SAS_PHY0_PRATE_MAX_RATE_MASK) {
case MPI_SAS_PHY0_PRATE_MAX_RATE_1_5:
phy->maximum_linkrate = SAS_LINK_RATE_1_5_GBPS;
break;
case MPI_SAS_PHY0_PRATE_MAX_RATE_3_0:
phy->maximum_linkrate = SAS_LINK_RATE_3_0_GBPS;
break;
default:
break;
}
/*
* Set Min hardware link rate.
*/
switch (phy_info->hw_link_rate & MPI_SAS_PHY0_HWRATE_MIN_RATE_MASK) {
case MPI_SAS_PHY0_HWRATE_MIN_RATE_1_5:
phy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
break;
case MPI_SAS_PHY0_PRATE_MIN_RATE_3_0:
phy->minimum_linkrate_hw = SAS_LINK_RATE_3_0_GBPS;
break;
default:
break;
}
/*
* Set Min programmed link rate.
*/
switch (phy_info->programmed_link_rate &
MPI_SAS_PHY0_PRATE_MIN_RATE_MASK) {
case MPI_SAS_PHY0_PRATE_MIN_RATE_1_5:
phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS;
break;
case MPI_SAS_PHY0_PRATE_MIN_RATE_3_0:
phy->minimum_linkrate = SAS_LINK_RATE_3_0_GBPS;
break;
default:
break;
}
if (!phy_info->phy) {
error = sas_phy_add(phy);
if (error) {
sas_phy_free(phy);
goto out;
}
phy_info->phy = phy;
}
if (!phy_info->attached.handle ||
!phy_info->port_details)
goto out;
port = mptsas_get_port(phy_info);
ioc = phy_to_ioc(phy_info->phy);
if (phy_info->sas_port_add_phy) {
if (!port) {
port = sas_port_alloc_num(dev);
if (!port) {
error = -ENOMEM;
goto out;
}
error = sas_port_add(port);
if (error) {
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
"%s: exit at line=%d\n", ioc->name,
__func__, __LINE__));
goto out;
}
mptsas_set_port(ioc, phy_info, port);
devtprintk(ioc, dev_printk(KERN_DEBUG, &port->dev,
MYIOC_s_FMT "add port %d, sas_addr (0x%llx)\n",
ioc->name, port->port_identifier,
(unsigned long long)phy_info->
attached.sas_address));
}
dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"sas_port_add_phy: phy_id=%d\n",
ioc->name, phy_info->phy_id));
sas_port_add_phy(port, phy_info->phy);
phy_info->sas_port_add_phy = 0;
devtprintk(ioc, dev_printk(KERN_DEBUG, &phy_info->phy->dev,
MYIOC_s_FMT "add phy %d, phy-obj (0x%p)\n", ioc->name,
phy_info->phy_id, phy_info->phy));
}
if (!mptsas_get_rphy(phy_info) && port && !port->rphy) {
struct sas_rphy *rphy;
struct device *parent;
struct sas_identify identify;
parent = dev->parent->parent;
/*
* Let the hotplug_work thread handle processing
* the adding/removing of devices that occur
* after start of day.
*/
if (mptsas_is_end_device(&phy_info->attached) &&
phy_info->attached.handle_parent) {
goto out;
}
mptsas_parse_device_info(&identify, &phy_info->attached);
if (scsi_is_host_device(parent)) {
struct mptsas_portinfo *port_info;
int i;
port_info = ioc->hba_port_info;
for (i = 0; i < port_info->num_phys; i++)
if (port_info->phy_info[i].identify.sas_address ==
identify.sas_address) {
sas_port_mark_backlink(port);
goto out;
}
} else if (scsi_is_sas_rphy(parent)) {
struct sas_rphy *parent_rphy = dev_to_rphy(parent);
if (identify.sas_address ==
parent_rphy->identify.sas_address) {
sas_port_mark_backlink(port);
goto out;
}
}
switch (identify.device_type) {
case SAS_END_DEVICE:
rphy = sas_end_device_alloc(port);
break;
case SAS_EDGE_EXPANDER_DEVICE:
case SAS_FANOUT_EXPANDER_DEVICE:
rphy = sas_expander_alloc(port, identify.device_type);
break;
default:
rphy = NULL;
break;
}
if (!rphy) {
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
"%s: exit at line=%d\n", ioc->name,
__func__, __LINE__));
goto out;
}
rphy->identify = identify;
error = sas_rphy_add(rphy);
if (error) {
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
"%s: exit at line=%d\n", ioc->name,
__func__, __LINE__));
sas_rphy_free(rphy);
goto out;
}
mptsas_set_rphy(ioc, phy_info, rphy);
if (identify.device_type == SAS_EDGE_EXPANDER_DEVICE ||
identify.device_type == SAS_FANOUT_EXPANDER_DEVICE)
mptsas_exp_repmanufacture_info(ioc,
identify.sas_address,
rphy_to_expander_device(rphy));
}
/* If the device exists, verify it wasn't previously flagged
as a missing device. If so, clear it */
vtarget = mptsas_find_vtarget(ioc,
phy_info->attached.channel,
phy_info->attached.id);
if (vtarget && vtarget->inDMD) {
printk(KERN_INFO "Device returned, unsetting inDMD\n");
vtarget->inDMD = 0;
}
out:
return error;
}
static int
mptsas_probe_hba_phys(MPT_ADAPTER *ioc)
{
struct mptsas_portinfo *port_info, *hba;
int error = -ENOMEM, i;
hba = kzalloc(sizeof(struct mptsas_portinfo), GFP_KERNEL);
if (! hba)
goto out;
error = mptsas_sas_io_unit_pg0(ioc, hba);
if (error)
goto out_free_port_info;
mptsas_sas_io_unit_pg1(ioc);
mutex_lock(&ioc->sas_topology_mutex);
port_info = ioc->hba_port_info;
if (!port_info) {
ioc->hba_port_info = port_info = hba;
ioc->hba_port_num_phy = port_info->num_phys;
list_add_tail(&port_info->list, &ioc->sas_topology);
} else {
for (i = 0; i < hba->num_phys; i++) {
port_info->phy_info[i].negotiated_link_rate =
hba->phy_info[i].negotiated_link_rate;
port_info->phy_info[i].handle =
hba->phy_info[i].handle;
port_info->phy_info[i].port_id =
hba->phy_info[i].port_id;
}
kfree(hba->phy_info);
kfree(hba);
hba = NULL;
}
mutex_unlock(&ioc->sas_topology_mutex);
#if defined(CPQ_CIM)
ioc->num_ports = port_info->num_phys;
#endif
for (i = 0; i < port_info->num_phys; i++) {
mptsas_sas_phy_pg0(ioc, &port_info->phy_info[i],
(MPI_SAS_PHY_PGAD_FORM_PHY_NUMBER <<
MPI_SAS_PHY_PGAD_FORM_SHIFT), i);
port_info->phy_info[i].identify.handle =
port_info->phy_info[i].handle;
mptsas_sas_device_pg0(ioc, &port_info->phy_info[i].identify,
(MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
port_info->phy_info[i].identify.handle);
if (!ioc->hba_port_sas_addr)
ioc->hba_port_sas_addr =
port_info->phy_info[i].identify.sas_address;
port_info->phy_info[i].identify.phy_id =
port_info->phy_info[i].phy_id = i;
if (port_info->phy_info[i].attached.handle)
mptsas_sas_device_pg0(ioc,
&port_info->phy_info[i].attached,
(MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
port_info->phy_info[i].attached.handle);
}
mptsas_setup_wide_ports(ioc, port_info);
for (i = 0; i < port_info->num_phys; i++, ioc->sas_index++)
mptsas_probe_one_phy(&ioc->sh->shost_gendev,
&port_info->phy_info[i], ioc->sas_index, 1);
return 0;
out_free_port_info:
kfree(hba);
out:
return error;
}
static void
mptsas_expander_refresh(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info)
{
struct mptsas_portinfo *parent;
struct device *parent_dev;
struct sas_rphy *rphy;
int i;
u64 sas_address; /* expander sas address */
u32 handle;
handle = port_info->phy_info[0].handle;
sas_address = port_info->phy_info[0].identify.sas_address;
for (i = 0; i < port_info->num_phys; i++) {
mptsas_sas_expander_pg1(ioc, &port_info->phy_info[i],
(MPI_SAS_EXPAND_PGAD_FORM_HANDLE_PHY_NUM <<
MPI_SAS_EXPAND_PGAD_FORM_SHIFT), (i << 16) + handle);
mptsas_sas_device_pg0(ioc,
&port_info->phy_info[i].identify,
(MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
port_info->phy_info[i].identify.handle);
port_info->phy_info[i].identify.phy_id =
port_info->phy_info[i].phy_id;
if (port_info->phy_info[i].attached.handle) {
mptsas_sas_device_pg0(ioc,
&port_info->phy_info[i].attached,
(MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
port_info->phy_info[i].attached.handle);
port_info->phy_info[i].attached.phy_id =
port_info->phy_info[i].phy_id;
}
}
mutex_lock(&ioc->sas_topology_mutex);
parent = mptsas_find_portinfo_by_handle(ioc,
port_info->phy_info[0].identify.handle_parent);
if (!parent) {
mutex_unlock(&ioc->sas_topology_mutex);
return;
}
for (i = 0, parent_dev = NULL; i < parent->num_phys && !parent_dev;
i++) {
if (parent->phy_info[i].attached.sas_address == sas_address) {
rphy = mptsas_get_rphy(&parent->phy_info[i]);
parent_dev = &rphy->dev;
}
}
mutex_unlock(&ioc->sas_topology_mutex);
mptsas_setup_wide_ports(ioc, port_info);
for (i = 0; i < port_info->num_phys; i++, ioc->sas_index++)
mptsas_probe_one_phy(parent_dev, &port_info->phy_info[i],
ioc->sas_index, 0);
}
static void
mptsas_expander_event_add(MPT_ADAPTER *ioc,
MpiEventDataSasExpanderStatusChange_t *expander_data)
{
struct mptsas_portinfo *port_info;
int i;
__le64 sas_address;
port_info = kzalloc(sizeof(struct mptsas_portinfo), GFP_KERNEL);
BUG_ON(!port_info);
port_info->num_phys = (expander_data->NumPhys) ?
expander_data->NumPhys : 1;
port_info->phy_info = kcalloc(port_info->num_phys,
sizeof(struct mptsas_phyinfo), GFP_KERNEL);
BUG_ON(!port_info->phy_info);
memcpy(&sas_address, &expander_data->SASAddress, sizeof(__le64));
for (i = 0; i < port_info->num_phys; i++) {
port_info->phy_info[i].portinfo = port_info;
port_info->phy_info[i].handle =
le16_to_cpu(expander_data->DevHandle);
port_info->phy_info[i].identify.sas_address =
le64_to_cpu(sas_address);
port_info->phy_info[i].identify.handle_parent =
le16_to_cpu(expander_data->ParentDevHandle);
}
mutex_lock(&ioc->sas_topology_mutex);
list_add_tail(&port_info->list, &ioc->sas_topology);
mutex_unlock(&ioc->sas_topology_mutex);
printk(MYIOC_s_INFO_FMT "add expander: num_phys %d, "
"sas_addr (0x%llx)\n", ioc->name, port_info->num_phys,
(unsigned long long)sas_address);
mptsas_expander_refresh(ioc, port_info);
}
/**
* mptsas_delete_expander_siblings - remove siblings attached to expander
* @ioc: Pointer to MPT_ADAPTER structure
* @parent: the parent port_info object
* @expander: the expander port_info object
**/
static void
mptsas_delete_expander_siblings(MPT_ADAPTER *ioc, struct mptsas_portinfo
*parent, struct mptsas_portinfo *expander)
{
struct mptsas_phyinfo *phy_info;
struct mptsas_portinfo *port_info;
struct sas_rphy *rphy;
int i;
phy_info = expander->phy_info;
for (i = 0; i < expander->num_phys; i++, phy_info++) {
rphy = mptsas_get_rphy(phy_info);
if (!rphy)
continue;
if (rphy->identify.device_type == SAS_END_DEVICE)
mptsas_del_end_device(ioc, phy_info);
}
phy_info = expander->phy_info;
for (i = 0; i < expander->num_phys; i++, phy_info++) {
rphy = mptsas_get_rphy(phy_info);
if (!rphy)
continue;
if (rphy->identify.device_type ==
MPI_SAS_DEVICE_INFO_EDGE_EXPANDER ||
rphy->identify.device_type ==
MPI_SAS_DEVICE_INFO_FANOUT_EXPANDER) {
port_info = mptsas_find_portinfo_by_sas_address(ioc,
rphy->identify.sas_address);
if (!port_info)
continue;
if (port_info == parent) /* backlink rphy */
continue;
/*
Delete this expander even if the expdevpage is exists
because the parent expander is already deleted
*/
mptsas_expander_delete(ioc, port_info, 1);
}
}
}
/**
* mptsas_expander_delete - remove this expander
* @ioc: Pointer to MPT_ADAPTER structure
* @port_info: expander port_info struct
* @force: Flag to forcefully delete the expander
*
**/
static void mptsas_expander_delete(MPT_ADAPTER *ioc,
struct mptsas_portinfo *port_info, u8 force)
{
struct mptsas_portinfo *parent;
int i;
u64 expander_sas_address;
struct mptsas_phyinfo *phy_info;
struct mptsas_portinfo buffer;
struct mptsas_portinfo_details *port_details;
struct sas_port *port;
if (!port_info)
return;
/* see if expander is still there before deleting */
mptsas_sas_expander_pg0(ioc, &buffer,
(MPI_SAS_EXPAND_PGAD_FORM_HANDLE <<
MPI_SAS_EXPAND_PGAD_FORM_SHIFT),
port_info->phy_info[0].identify.handle);
if (buffer.num_phys) {
kfree(buffer.phy_info);
if (!force)
return;
}
/*
* Obtain the port_info instance to the parent port
*/
port_details = NULL;
expander_sas_address =
port_info->phy_info[0].identify.sas_address;
parent = mptsas_find_portinfo_by_handle(ioc,
port_info->phy_info[0].identify.handle_parent);
mptsas_delete_expander_siblings(ioc, parent, port_info);
if (!parent)
goto out;
/*
* Delete rphys in the parent that point
* to this expander.
*/
phy_info = parent->phy_info;
port = NULL;
for (i = 0; i < parent->num_phys; i++, phy_info++) {
if (!phy_info->phy)
continue;
if (phy_info->attached.sas_address !=
expander_sas_address)
continue;
if (!port) {
port = mptsas_get_port(phy_info);
port_details = phy_info->port_details;
}
dev_printk(KERN_DEBUG, &phy_info->phy->dev,
MYIOC_s_FMT "delete phy %d, phy-obj (0x%p)\n", ioc->name,
phy_info->phy_id, phy_info->phy);
sas_port_delete_phy(port, phy_info->phy);
}
if (port) {
dev_printk(KERN_DEBUG, &port->dev,
MYIOC_s_FMT "delete port %d, sas_addr (0x%llx)\n",
ioc->name, port->port_identifier,
(unsigned long long)expander_sas_address);
sas_port_delete(port);
mptsas_port_delete(ioc, port_details);
}
out:
printk(MYIOC_s_INFO_FMT "delete expander: num_phys %d, "
"sas_addr (0x%llx)\n", ioc->name, port_info->num_phys,
(unsigned long long)expander_sas_address);
/*
* free link
*/
list_del(&port_info->list);
kfree(port_info->phy_info);
kfree(port_info);
}
/**
* mptsas_send_expander_event - expanders events
* @fw_event: event data
*
*
* This function handles adding, removing, and refreshing
* device handles within the expander objects.
*/
static void
mptsas_send_expander_event(struct fw_event_work *fw_event)
{
MPT_ADAPTER *ioc;
MpiEventDataSasExpanderStatusChange_t *expander_data;
struct mptsas_portinfo *port_info;
__le64 sas_address;
int i;
ioc = fw_event->ioc;
expander_data = (MpiEventDataSasExpanderStatusChange_t *)
fw_event->event_data;
memcpy(&sas_address, &expander_data->SASAddress, sizeof(__le64));
sas_address = le64_to_cpu(sas_address);
port_info = mptsas_find_portinfo_by_sas_address(ioc, sas_address);
if (expander_data->ReasonCode == MPI_EVENT_SAS_EXP_RC_ADDED) {
if (port_info) {
for (i = 0; i < port_info->num_phys; i++) {
port_info->phy_info[i].portinfo = port_info;
port_info->phy_info[i].handle =
le16_to_cpu(expander_data->DevHandle);
port_info->phy_info[i].identify.sas_address =
le64_to_cpu(sas_address);
port_info->phy_info[i].identify.handle_parent =
le16_to_cpu(expander_data->ParentDevHandle);
}
mptsas_expander_refresh(ioc, port_info);
} else if (!port_info && expander_data->NumPhys)
mptsas_expander_event_add(ioc, expander_data);
} else if (expander_data->ReasonCode ==
MPI_EVENT_SAS_EXP_RC_NOT_RESPONDING)
mptsas_expander_delete(ioc, port_info, 0);
mptsas_free_fw_event(ioc, fw_event);
}
/**
* mptsas_expander_add - adds a newly discovered expander
* @ioc: Pointer to MPT_ADAPTER structure
* @handle: device handle
*
*/
static struct mptsas_portinfo *
mptsas_expander_add(MPT_ADAPTER *ioc, u16 handle)
{
struct mptsas_portinfo buffer, *port_info;
int i;
if ((mptsas_sas_expander_pg0(ioc, &buffer,
(MPI_SAS_EXPAND_PGAD_FORM_HANDLE <<
MPI_SAS_EXPAND_PGAD_FORM_SHIFT), handle)))
return NULL;
port_info = kzalloc(sizeof(struct mptsas_portinfo), GFP_KERNEL);
if (!port_info) {
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
"%s: exit at line=%d\n", ioc->name,
__func__, __LINE__));
return NULL;
}
port_info->num_phys = buffer.num_phys;
port_info->phy_info = buffer.phy_info;
for (i = 0; i < port_info->num_phys; i++)
port_info->phy_info[i].portinfo = port_info;
mutex_lock(&ioc->sas_topology_mutex);
list_add_tail(&port_info->list, &ioc->sas_topology);
mutex_unlock(&ioc->sas_topology_mutex);
printk(MYIOC_s_INFO_FMT "add expander: num_phys %d, "
"sas_addr (0x%llx)\n", ioc->name, port_info->num_phys,
(unsigned long long)buffer.phy_info[0].identify.sas_address);
mptsas_expander_refresh(ioc, port_info);
return port_info;
}
static void
mptsas_send_link_status_event(struct fw_event_work *fw_event)
{
MPT_ADAPTER *ioc;
MpiEventDataSasPhyLinkStatus_t *link_data;
struct mptsas_portinfo *port_info;
struct mptsas_phyinfo *phy_info = NULL;
__le64 sas_address;
u8 phy_num;
u8 link_rate;
ioc = fw_event->ioc;
link_data = (MpiEventDataSasPhyLinkStatus_t *)fw_event->event_data;
memcpy(&sas_address, &link_data->SASAddress, sizeof(__le64));
sas_address = le64_to_cpu(sas_address);
link_rate = link_data->LinkRates >> 4;
phy_num = link_data->PhyNum;
port_info = mptsas_find_portinfo_by_sas_address(ioc, sas_address);
if (port_info) {
phy_info = &port_info->phy_info[phy_num];
if (phy_info)
phy_info->negotiated_link_rate = link_rate;
}
if (link_rate == MPI_SAS_IOUNIT0_RATE_1_5 ||
link_rate == MPI_SAS_IOUNIT0_RATE_3_0 ||
link_rate == MPI_SAS_IOUNIT0_RATE_6_0) {
if (!port_info) {
if (ioc->old_sas_discovery_protocal) {
port_info = mptsas_expander_add(ioc,
le16_to_cpu(link_data->DevHandle));
if (port_info)
goto out;
}
goto out;
}
if (port_info == ioc->hba_port_info)
mptsas_probe_hba_phys(ioc);
else
mptsas_expander_refresh(ioc, port_info);
} else if (phy_info && phy_info->phy) {
if (link_rate == MPI_SAS_IOUNIT0_RATE_PHY_DISABLED)
phy_info->phy->negotiated_linkrate =
SAS_PHY_DISABLED;
else if (link_rate ==
MPI_SAS_IOUNIT0_RATE_FAILED_SPEED_NEGOTIATION)
phy_info->phy->negotiated_linkrate =
SAS_LINK_RATE_FAILED;
else {
phy_info->phy->negotiated_linkrate =
SAS_LINK_RATE_UNKNOWN;
if (ioc->device_missing_delay &&
mptsas_is_end_device(&phy_info->attached)) {
struct scsi_device *sdev;
VirtDevice *vdevice;
u8 channel, id;
id = phy_info->attached.id;
channel = phy_info->attached.channel;
devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"Link down for fw_id %d:fw_channel %d\n",
ioc->name, phy_info->attached.id,
phy_info->attached.channel));
shost_for_each_device(sdev, ioc->sh) {
vdevice = sdev->hostdata;
if ((vdevice == NULL) ||
(vdevice->vtarget == NULL))
continue;
if ((vdevice->vtarget->tflags &
MPT_TARGET_FLAGS_RAID_COMPONENT ||
vdevice->vtarget->raidVolume))
continue;
if (vdevice->vtarget->id == id &&
vdevice->vtarget->channel ==
channel)
devtprintk(ioc,
printk(MYIOC_s_DEBUG_FMT
"SDEV OUTSTANDING CMDS"
"%d\n", ioc->name,
scsi_device_busy(sdev)));
}
}
}
}
out:
mptsas_free_fw_event(ioc, fw_event);
}
static void
mptsas_not_responding_devices(MPT_ADAPTER *ioc)
{
struct mptsas_portinfo buffer, *port_info;
struct mptsas_device_info *sas_info;
struct mptsas_devinfo sas_device;
u32 handle;
VirtTarget *vtarget = NULL;
struct mptsas_phyinfo *phy_info;
u8 found_expander;
int retval, retry_count;
unsigned long flags;
mpt_findImVolumes(ioc);
spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
if (ioc->ioc_reset_in_progress) {
dfailprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"%s: exiting due to a parallel reset \n", ioc->name,
__func__));
spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
return;
}
spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
/* devices, logical volumes */
mutex_lock(&ioc->sas_device_info_mutex);
redo_device_scan:
list_for_each_entry(sas_info, &ioc->sas_device_info_list, list) {
if (sas_info->is_cached)
continue;
if (!sas_info->is_logical_volume) {
sas_device.handle = 0;
retry_count = 0;
retry_page:
retval = mptsas_sas_device_pg0(ioc, &sas_device,
(MPI_SAS_DEVICE_PGAD_FORM_BUS_TARGET_ID
<< MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
(sas_info->fw.channel << 8) +
sas_info->fw.id);
if (sas_device.handle)
continue;
if (retval == -EBUSY) {
spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
if (ioc->ioc_reset_in_progress) {
dfailprintk(ioc,
printk(MYIOC_s_DEBUG_FMT
"%s: exiting due to reset\n",
ioc->name, __func__));
spin_unlock_irqrestore
(&ioc->taskmgmt_lock, flags);
mutex_unlock(&ioc->
sas_device_info_mutex);
return;
}
spin_unlock_irqrestore(&ioc->taskmgmt_lock,
flags);
}
if (retval && (retval != -ENODEV)) {
if (retry_count < 10) {
retry_count++;
goto retry_page;
} else {
devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"%s: Config page retry exceeded retry "
"count deleting device 0x%llx\n",
ioc->name, __func__,
sas_info->sas_address));
}
}
/* delete device */
vtarget = mptsas_find_vtarget(ioc,
sas_info->fw.channel, sas_info->fw.id);
if (vtarget)
vtarget->deleted = 1;
phy_info = mptsas_find_phyinfo_by_sas_address(ioc,
sas_info->sas_address);
mptsas_del_end_device(ioc, phy_info);
goto redo_device_scan;
} else
mptsas_volume_delete(ioc, sas_info->fw.id);
}
mutex_unlock(&ioc->sas_device_info_mutex);
/* expanders */
mutex_lock(&ioc->sas_topology_mutex);
redo_expander_scan:
list_for_each_entry(port_info, &ioc->sas_topology, list) {
if (!(port_info->phy_info[0].identify.device_info &
MPI_SAS_DEVICE_INFO_SMP_TARGET))
continue;
found_expander = 0;
handle = 0xFFFF;
while (!mptsas_sas_expander_pg0(ioc, &buffer,
(MPI_SAS_EXPAND_PGAD_FORM_GET_NEXT_HANDLE <<
MPI_SAS_EXPAND_PGAD_FORM_SHIFT), handle) &&
!found_expander) {
handle = buffer.phy_info[0].handle;
if (buffer.phy_info[0].identify.sas_address ==
port_info->phy_info[0].identify.sas_address) {
found_expander = 1;
}
kfree(buffer.phy_info);
}
if (!found_expander) {
mptsas_expander_delete(ioc, port_info, 0);
goto redo_expander_scan;
}
}
mutex_unlock(&ioc->sas_topology_mutex);
}
/**
* mptsas_probe_expanders - adding expanders
* @ioc: Pointer to MPT_ADAPTER structure
*
**/
static void
mptsas_probe_expanders(MPT_ADAPTER *ioc)
{
struct mptsas_portinfo buffer, *port_info;
u32 handle;
int i;
handle = 0xFFFF;
while (!mptsas_sas_expander_pg0(ioc, &buffer,
(MPI_SAS_EXPAND_PGAD_FORM_GET_NEXT_HANDLE <<
MPI_SAS_EXPAND_PGAD_FORM_SHIFT), handle)) {
handle = buffer.phy_info[0].handle;
port_info = mptsas_find_portinfo_by_sas_address(ioc,
buffer.phy_info[0].identify.sas_address);
if (port_info) {
/* refreshing handles */
for (i = 0; i < buffer.num_phys; i++) {
port_info->phy_info[i].handle = handle;
port_info->phy_info[i].identify.handle_parent =
buffer.phy_info[0].identify.handle_parent;
}
mptsas_expander_refresh(ioc, port_info);
kfree(buffer.phy_info);
continue;
}
port_info = kzalloc(sizeof(struct mptsas_portinfo), GFP_KERNEL);
if (!port_info) {
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
"%s: exit at line=%d\n", ioc->name,
__func__, __LINE__));
return;
}
port_info->num_phys = buffer.num_phys;
port_info->phy_info = buffer.phy_info;
for (i = 0; i < port_info->num_phys; i++)
port_info->phy_info[i].portinfo = port_info;
mutex_lock(&ioc->sas_topology_mutex);
list_add_tail(&port_info->list, &ioc->sas_topology);
mutex_unlock(&ioc->sas_topology_mutex);
printk(MYIOC_s_INFO_FMT "add expander: num_phys %d, "
"sas_addr (0x%llx)\n", ioc->name, port_info->num_phys,
(unsigned long long)buffer.phy_info[0].identify.sas_address);
mptsas_expander_refresh(ioc, port_info);
}
}
static void
mptsas_probe_devices(MPT_ADAPTER *ioc)
{
u16 handle;
struct mptsas_devinfo sas_device;
struct mptsas_phyinfo *phy_info;
handle = 0xFFFF;
while (!(mptsas_sas_device_pg0(ioc, &sas_device,
MPI_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
handle = sas_device.handle;
if ((sas_device.device_info &
(MPI_SAS_DEVICE_INFO_SSP_TARGET |
MPI_SAS_DEVICE_INFO_STP_TARGET |
MPI_SAS_DEVICE_INFO_SATA_DEVICE)) == 0)
continue;
/* If there is no FW B_T mapping for this device then continue
* */
if (!(sas_device.flags & MPI_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)
|| !(sas_device.flags &
MPI_SAS_DEVICE0_FLAGS_DEVICE_MAPPED))
continue;
phy_info = mptsas_refreshing_device_handles(ioc, &sas_device);
if (!phy_info)
continue;
if (mptsas_get_rphy(phy_info))
continue;
mptsas_add_end_device(ioc, phy_info);
}
}
/**
* mptsas_scan_sas_topology - scans new SAS topology
* (part of probe or rescan)
* @ioc: Pointer to MPT_ADAPTER structure
*
**/
static void
mptsas_scan_sas_topology(MPT_ADAPTER *ioc)
{
struct scsi_device *sdev;
int i;
mptsas_probe_hba_phys(ioc);
mptsas_probe_expanders(ioc);
mptsas_probe_devices(ioc);
/*
Reporting RAID volumes.
*/
if (!ioc->ir_firmware || !ioc->raid_data.pIocPg2 ||
!ioc->raid_data.pIocPg2->NumActiveVolumes)
return;
for (i = 0; i < ioc->raid_data.pIocPg2->NumActiveVolumes; i++) {
sdev = scsi_device_lookup(ioc->sh, MPTSAS_RAID_CHANNEL,
ioc->raid_data.pIocPg2->RaidVolume[i].VolumeID, 0);
if (sdev) {
scsi_device_put(sdev);
continue;
}
printk(MYIOC_s_INFO_FMT "attaching raid volume, channel %d, "
"id %d\n", ioc->name, MPTSAS_RAID_CHANNEL,
ioc->raid_data.pIocPg2->RaidVolume[i].VolumeID);
scsi_add_device(ioc->sh, MPTSAS_RAID_CHANNEL,
ioc->raid_data.pIocPg2->RaidVolume[i].VolumeID, 0);
}
}
static void
mptsas_handle_queue_full_event(struct fw_event_work *fw_event)
{
MPT_ADAPTER *ioc;
EventDataQueueFull_t *qfull_data;
struct mptsas_device_info *sas_info;
struct scsi_device *sdev;
int depth;
int id = -1;
int channel = -1;
int fw_id, fw_channel;
u16 current_depth;
ioc = fw_event->ioc;
qfull_data = (EventDataQueueFull_t *)fw_event->event_data;
fw_id = qfull_data->TargetID;
fw_channel = qfull_data->Bus;
current_depth = le16_to_cpu(qfull_data->CurrentDepth);
/* if hidden raid component, look for the volume id */
mutex_lock(&ioc->sas_device_info_mutex);
if (mptscsih_is_phys_disk(ioc, fw_channel, fw_id)) {
list_for_each_entry(sas_info, &ioc->sas_device_info_list,
list) {
if (sas_info->is_cached ||
sas_info->is_logical_volume)
continue;
if (sas_info->is_hidden_raid_component &&
(sas_info->fw.channel == fw_channel &&
sas_info->fw.id == fw_id)) {
id = sas_info->volume_id;
channel = MPTSAS_RAID_CHANNEL;
goto out;
}
}
} else {
list_for_each_entry(sas_info, &ioc->sas_device_info_list,
list) {
if (sas_info->is_cached ||
sas_info->is_hidden_raid_component ||
sas_info->is_logical_volume)
continue;
if (sas_info->fw.channel == fw_channel &&
sas_info->fw.id == fw_id) {
id = sas_info->os.id;
channel = sas_info->os.channel;
goto out;
}
}
}
out:
mutex_unlock(&ioc->sas_device_info_mutex);
if (id != -1) {
shost_for_each_device(sdev, ioc->sh) {
if (sdev->id == id && sdev->channel == channel) {
if (current_depth > sdev->queue_depth) {
sdev_printk(KERN_INFO, sdev,
"strange observation, the queue "
"depth is (%d) meanwhile fw queue "
"depth (%d)\n", sdev->queue_depth,
current_depth);
continue;
}
depth = scsi_track_queue_full(sdev,
sdev->queue_depth - 1);
if (depth > 0)
sdev_printk(KERN_INFO, sdev,
"Queue depth reduced to (%d)\n",
depth);
else if (depth < 0)
sdev_printk(KERN_INFO, sdev,
"Tagged Command Queueing is being "
"disabled\n");
else if (depth == 0)
sdev_printk(KERN_DEBUG, sdev,
"Queue depth not changed yet\n");
}
}
}
mptsas_free_fw_event(ioc, fw_event);
}
static struct mptsas_phyinfo *
mptsas_find_phyinfo_by_sas_address(MPT_ADAPTER *ioc, u64 sas_address)
{
struct mptsas_portinfo *port_info;
struct mptsas_phyinfo *phy_info = NULL;
int i;
mutex_lock(&ioc->sas_topology_mutex);
list_for_each_entry(port_info, &ioc->sas_topology, list) {
for (i = 0; i < port_info->num_phys; i++) {
if (!mptsas_is_end_device(
&port_info->phy_info[i].attached))
continue;
if (port_info->phy_info[i].attached.sas_address
!= sas_address)
continue;
phy_info = &port_info->phy_info[i];
break;
}
}
mutex_unlock(&ioc->sas_topology_mutex);
return phy_info;
}
/**
* mptsas_find_phyinfo_by_phys_disk_num - find phyinfo for the
* specified @phys_disk_num
* @ioc: Pointer to MPT_ADAPTER structure
* @phys_disk_num: (hot plug) physical disk number (for RAID support)
* @channel: channel number
* @id: Logical Target ID
*
**/
static struct mptsas_phyinfo *
mptsas_find_phyinfo_by_phys_disk_num(MPT_ADAPTER *ioc, u8 phys_disk_num,
u8 channel, u8 id)
{
struct mptsas_phyinfo *phy_info = NULL;
struct mptsas_portinfo *port_info;
RaidPhysDiskPage1_t *phys_disk = NULL;
int num_paths;
u64 sas_address = 0;
int i;
phy_info = NULL;
if (!ioc->raid_data.pIocPg3)
return NULL;
/* dual port support */
num_paths = mpt_raid_phys_disk_get_num_paths(ioc, phys_disk_num);
if (!num_paths)
goto out;
phys_disk = kzalloc(offsetof(RaidPhysDiskPage1_t, Path) +
(num_paths * sizeof(RAID_PHYS_DISK1_PATH)), GFP_KERNEL);
if (!phys_disk)
goto out;
mpt_raid_phys_disk_pg1(ioc, phys_disk_num, phys_disk);
for (i = 0; i < num_paths; i++) {
if ((phys_disk->Path[i].Flags & 1) != 0)
/* entry no longer valid */
continue;
if ((id == phys_disk->Path[i].PhysDiskID) &&
(channel == phys_disk->Path[i].PhysDiskBus)) {
memcpy(&sas_address, &phys_disk->Path[i].WWID,
sizeof(u64));
phy_info = mptsas_find_phyinfo_by_sas_address(ioc,
sas_address);
goto out;
}
}
out:
kfree(phys_disk);
if (phy_info)
return phy_info;
/*
* Extra code to handle RAID0 case, where the sas_address is not updated
* in phys_disk_page_1 when hotswapped
*/
mutex_lock(&ioc->sas_topology_mutex);
list_for_each_entry(port_info, &ioc->sas_topology, list) {
for (i = 0; i < port_info->num_phys && !phy_info; i++) {
if (!mptsas_is_end_device(
&port_info->phy_info[i].attached))
continue;
if (port_info->phy_info[i].attached.phys_disk_num == ~0)
continue;
if ((port_info->phy_info[i].attached.phys_disk_num ==
phys_disk_num) &&
(port_info->phy_info[i].attached.id == id) &&
(port_info->phy_info[i].attached.channel ==
channel))
phy_info = &port_info->phy_info[i];
}
}
mutex_unlock(&ioc->sas_topology_mutex);
return phy_info;
}
static void
mptsas_reprobe_lun(struct scsi_device *sdev, void *data)
{
int rc;
sdev->no_uld_attach = data ? 1 : 0;
rc = scsi_device_reprobe(sdev);
}
static void
mptsas_reprobe_target(struct scsi_target *starget, int uld_attach)
{
starget_for_each_device(starget, uld_attach ? (void *)1 : NULL,
mptsas_reprobe_lun);
}
static void
mptsas_adding_inactive_raid_components(MPT_ADAPTER *ioc, u8 channel, u8 id)
{
CONFIGPARMS cfg;
ConfigPageHeader_t hdr;
dma_addr_t dma_handle;
pRaidVolumePage0_t buffer = NULL;
RaidPhysDiskPage0_t phys_disk;
int i;
struct mptsas_phyinfo *phy_info;
struct mptsas_devinfo sas_device;
memset(&cfg, 0 , sizeof(CONFIGPARMS));
memset(&hdr, 0 , sizeof(ConfigPageHeader_t));
hdr.PageType = MPI_CONFIG_PAGETYPE_RAID_VOLUME;
cfg.pageAddr = (channel << 8) + id;
cfg.cfghdr.hdr = &hdr;
cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
cfg.timeout = SAS_CONFIG_PAGE_TIMEOUT;
if (mpt_config(ioc, &cfg) != 0)
goto out;
if (!hdr.PageLength)
goto out;
buffer = dma_alloc_coherent(&ioc->pcidev->dev, hdr.PageLength * 4,
&dma_handle, GFP_KERNEL);
if (!buffer)
goto out;
cfg.physAddr = dma_handle;
cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
if (mpt_config(ioc, &cfg) != 0)
goto out;
if (!(buffer->VolumeStatus.Flags &
MPI_RAIDVOL0_STATUS_FLAG_VOLUME_INACTIVE))
goto out;
if (!buffer->NumPhysDisks)
goto out;
for (i = 0; i < buffer->NumPhysDisks; i++) {
if (mpt_raid_phys_disk_pg0(ioc,
buffer->PhysDisk[i].PhysDiskNum, &phys_disk) != 0)
continue;
if (mptsas_sas_device_pg0(ioc, &sas_device,
(MPI_SAS_DEVICE_PGAD_FORM_BUS_TARGET_ID <<
MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
(phys_disk.PhysDiskBus << 8) +
phys_disk.PhysDiskID))
continue;
/* If there is no FW B_T mapping for this device then continue
* */
if (!(sas_device.flags & MPI_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)
|| !(sas_device.flags &
MPI_SAS_DEVICE0_FLAGS_DEVICE_MAPPED))
continue;
phy_info = mptsas_find_phyinfo_by_sas_address(ioc,
sas_device.sas_address);
mptsas_add_end_device(ioc, phy_info);
}
out:
if (buffer)
dma_free_coherent(&ioc->pcidev->dev, hdr.PageLength * 4,
buffer, dma_handle);
}
/*
* Work queue thread to handle SAS hotplug events
*/
static void
mptsas_hotplug_work(MPT_ADAPTER *ioc, struct fw_event_work *fw_event,
struct mptsas_hotplug_event *hot_plug_info)
{
struct mptsas_phyinfo *phy_info;
struct scsi_target * starget;
struct mptsas_devinfo sas_device;
VirtTarget *vtarget;
int i;
struct mptsas_portinfo *port_info;
switch (hot_plug_info->event_type) {
case MPTSAS_ADD_PHYSDISK:
if (!ioc->raid_data.pIocPg2)
break;
for (i = 0; i < ioc->raid_data.pIocPg2->NumActiveVolumes; i++) {
if (ioc->raid_data.pIocPg2->RaidVolume[i].VolumeID ==
hot_plug_info->id) {
printk(MYIOC_s_WARN_FMT "firmware bug: unable "
"to add hidden disk - target_id matches "
"volume_id\n", ioc->name);
mptsas_free_fw_event(ioc, fw_event);
return;
}
}
mpt_findImVolumes(ioc);
fallthrough;
case MPTSAS_ADD_DEVICE:
memset(&sas_device, 0, sizeof(struct mptsas_devinfo));
mptsas_sas_device_pg0(ioc, &sas_device,
(MPI_SAS_DEVICE_PGAD_FORM_BUS_TARGET_ID <<
MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
(hot_plug_info->channel << 8) +
hot_plug_info->id);
/* If there is no FW B_T mapping for this device then break
* */
if (!(sas_device.flags & MPI_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)
|| !(sas_device.flags &
MPI_SAS_DEVICE0_FLAGS_DEVICE_MAPPED))
break;
if (!sas_device.handle)
return;
phy_info = mptsas_refreshing_device_handles(ioc, &sas_device);
/* Device hot plug */
if (!phy_info) {
devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"%s %d HOT PLUG: "
"parent handle of device %x\n", ioc->name,
__func__, __LINE__, sas_device.handle_parent));
port_info = mptsas_find_portinfo_by_handle(ioc,
sas_device.handle_parent);
if (port_info == ioc->hba_port_info)
mptsas_probe_hba_phys(ioc);
else if (port_info)
mptsas_expander_refresh(ioc, port_info);
else {
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
"%s %d port info is NULL\n",
ioc->name, __func__, __LINE__));
break;
}
phy_info = mptsas_refreshing_device_handles
(ioc, &sas_device);
}
if (!phy_info) {
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
"%s %d phy info is NULL\n",
ioc->name, __func__, __LINE__));
break;
}
if (mptsas_get_rphy(phy_info))
break;
mptsas_add_end_device(ioc, phy_info);
break;
case MPTSAS_DEL_DEVICE:
phy_info = mptsas_find_phyinfo_by_sas_address(ioc,
hot_plug_info->sas_address);
mptsas_del_end_device(ioc, phy_info);
break;
case MPTSAS_DEL_PHYSDISK:
mpt_findImVolumes(ioc);
phy_info = mptsas_find_phyinfo_by_phys_disk_num(
ioc, hot_plug_info->phys_disk_num,
hot_plug_info->channel,
hot_plug_info->id);
mptsas_del_end_device(ioc, phy_info);
break;
case MPTSAS_ADD_PHYSDISK_REPROBE:
if (mptsas_sas_device_pg0(ioc, &sas_device,
(MPI_SAS_DEVICE_PGAD_FORM_BUS_TARGET_ID <<
MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
(hot_plug_info->channel << 8) + hot_plug_info->id)) {
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
"%s: fw_id=%d exit at line=%d\n", ioc->name,
__func__, hot_plug_info->id, __LINE__));
break;
}
/* If there is no FW B_T mapping for this device then break
* */
if (!(sas_device.flags & MPI_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)
|| !(sas_device.flags &
MPI_SAS_DEVICE0_FLAGS_DEVICE_MAPPED))
break;
phy_info = mptsas_find_phyinfo_by_sas_address(
ioc, sas_device.sas_address);
if (!phy_info) {
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
"%s: fw_id=%d exit at line=%d\n", ioc->name,
__func__, hot_plug_info->id, __LINE__));
break;
}
starget = mptsas_get_starget(phy_info);
if (!starget) {
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
"%s: fw_id=%d exit at line=%d\n", ioc->name,
__func__, hot_plug_info->id, __LINE__));
break;
}
vtarget = starget->hostdata;
if (!vtarget) {
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
"%s: fw_id=%d exit at line=%d\n", ioc->name,
__func__, hot_plug_info->id, __LINE__));
break;
}
mpt_findImVolumes(ioc);
starget_printk(KERN_INFO, starget, MYIOC_s_FMT "RAID Hidding: "
"fw_channel=%d, fw_id=%d, physdsk %d, sas_addr 0x%llx\n",
ioc->name, hot_plug_info->channel, hot_plug_info->id,
hot_plug_info->phys_disk_num, (unsigned long long)
sas_device.sas_address);
vtarget->id = hot_plug_info->phys_disk_num;
vtarget->tflags |= MPT_TARGET_FLAGS_RAID_COMPONENT;
phy_info->attached.phys_disk_num = hot_plug_info->phys_disk_num;
mptsas_reprobe_target(starget, 1);
break;
case MPTSAS_DEL_PHYSDISK_REPROBE:
if (mptsas_sas_device_pg0(ioc, &sas_device,
(MPI_SAS_DEVICE_PGAD_FORM_BUS_TARGET_ID <<
MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
(hot_plug_info->channel << 8) + hot_plug_info->id)) {
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
"%s: fw_id=%d exit at line=%d\n",
ioc->name, __func__,
hot_plug_info->id, __LINE__));
break;
}
/* If there is no FW B_T mapping for this device then break
* */
if (!(sas_device.flags & MPI_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)
|| !(sas_device.flags &
MPI_SAS_DEVICE0_FLAGS_DEVICE_MAPPED))
break;
phy_info = mptsas_find_phyinfo_by_sas_address(ioc,
sas_device.sas_address);
if (!phy_info) {
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
"%s: fw_id=%d exit at line=%d\n", ioc->name,
__func__, hot_plug_info->id, __LINE__));
break;
}
starget = mptsas_get_starget(phy_info);
if (!starget) {
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
"%s: fw_id=%d exit at line=%d\n", ioc->name,
__func__, hot_plug_info->id, __LINE__));
break;
}
vtarget = starget->hostdata;
if (!vtarget) {
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
"%s: fw_id=%d exit at line=%d\n", ioc->name,
__func__, hot_plug_info->id, __LINE__));
break;
}
if (!(vtarget->tflags & MPT_TARGET_FLAGS_RAID_COMPONENT)) {
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
"%s: fw_id=%d exit at line=%d\n", ioc->name,
__func__, hot_plug_info->id, __LINE__));
break;
}
mpt_findImVolumes(ioc);
starget_printk(KERN_INFO, starget, MYIOC_s_FMT "RAID Exposing:"
" fw_channel=%d, fw_id=%d, physdsk %d, sas_addr 0x%llx\n",
ioc->name, hot_plug_info->channel, hot_plug_info->id,
hot_plug_info->phys_disk_num, (unsigned long long)
sas_device.sas_address);
vtarget->tflags &= ~MPT_TARGET_FLAGS_RAID_COMPONENT;
vtarget->id = hot_plug_info->id;
phy_info->attached.phys_disk_num = ~0;
mptsas_reprobe_target(starget, 0);
mptsas_add_device_component_by_fw(ioc,
hot_plug_info->channel, hot_plug_info->id);
break;
case MPTSAS_ADD_RAID:
mpt_findImVolumes(ioc);
printk(MYIOC_s_INFO_FMT "attaching raid volume, channel %d, "
"id %d\n", ioc->name, MPTSAS_RAID_CHANNEL,
hot_plug_info->id);
scsi_add_device(ioc->sh, MPTSAS_RAID_CHANNEL,
hot_plug_info->id, 0);
break;
case MPTSAS_DEL_RAID:
mpt_findImVolumes(ioc);
printk(MYIOC_s_INFO_FMT "removing raid volume, channel %d, "
"id %d\n", ioc->name, MPTSAS_RAID_CHANNEL,
hot_plug_info->id);
scsi_remove_device(hot_plug_info->sdev);
scsi_device_put(hot_plug_info->sdev);
break;
case MPTSAS_ADD_INACTIVE_VOLUME:
mpt_findImVolumes(ioc);
mptsas_adding_inactive_raid_components(ioc,
hot_plug_info->channel, hot_plug_info->id);
break;
default:
break;
}
mptsas_free_fw_event(ioc, fw_event);
}
static void
mptsas_send_sas_event(struct fw_event_work *fw_event)
{
MPT_ADAPTER *ioc;
struct mptsas_hotplug_event hot_plug_info;
EVENT_DATA_SAS_DEVICE_STATUS_CHANGE *sas_event_data;
u32 device_info;
u64 sas_address;
ioc = fw_event->ioc;
sas_event_data = (EVENT_DATA_SAS_DEVICE_STATUS_CHANGE *)
fw_event->event_data;
device_info = le32_to_cpu(sas_event_data->DeviceInfo);
if ((device_info &
(MPI_SAS_DEVICE_INFO_SSP_TARGET |
MPI_SAS_DEVICE_INFO_STP_TARGET |
MPI_SAS_DEVICE_INFO_SATA_DEVICE)) == 0) {
mptsas_free_fw_event(ioc, fw_event);
return;
}
if (sas_event_data->ReasonCode ==
MPI_EVENT_SAS_DEV_STAT_RC_NO_PERSIST_ADDED) {
mptbase_sas_persist_operation(ioc,
MPI_SAS_OP_CLEAR_NOT_PRESENT);
mptsas_free_fw_event(ioc, fw_event);
return;
}
switch (sas_event_data->ReasonCode) {
case MPI_EVENT_SAS_DEV_STAT_RC_NOT_RESPONDING:
case MPI_EVENT_SAS_DEV_STAT_RC_ADDED:
memset(&hot_plug_info, 0, sizeof(struct mptsas_hotplug_event));
hot_plug_info.handle = le16_to_cpu(sas_event_data->DevHandle);
hot_plug_info.channel = sas_event_data->Bus;
hot_plug_info.id = sas_event_data->TargetID;
hot_plug_info.phy_id = sas_event_data->PhyNum;
memcpy(&sas_address, &sas_event_data->SASAddress,
sizeof(u64));
hot_plug_info.sas_address = le64_to_cpu(sas_address);
hot_plug_info.device_info = device_info;
if (sas_event_data->ReasonCode &
MPI_EVENT_SAS_DEV_STAT_RC_ADDED)
hot_plug_info.event_type = MPTSAS_ADD_DEVICE;
else
hot_plug_info.event_type = MPTSAS_DEL_DEVICE;
mptsas_hotplug_work(ioc, fw_event, &hot_plug_info);
break;
case MPI_EVENT_SAS_DEV_STAT_RC_NO_PERSIST_ADDED:
mptbase_sas_persist_operation(ioc,
MPI_SAS_OP_CLEAR_NOT_PRESENT);
mptsas_free_fw_event(ioc, fw_event);
break;
case MPI_EVENT_SAS_DEV_STAT_RC_SMART_DATA:
/* TODO */
case MPI_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET:
/* TODO */
default:
mptsas_free_fw_event(ioc, fw_event);
break;
}
}
static void
mptsas_send_raid_event(struct fw_event_work *fw_event)
{
MPT_ADAPTER *ioc;
EVENT_DATA_RAID *raid_event_data;
struct mptsas_hotplug_event hot_plug_info;
int status;
int state;
struct scsi_device *sdev = NULL;
VirtDevice *vdevice = NULL;
RaidPhysDiskPage0_t phys_disk;
ioc = fw_event->ioc;
raid_event_data = (EVENT_DATA_RAID *)fw_event->event_data;
status = le32_to_cpu(raid_event_data->SettingsStatus);
state = (status >> 8) & 0xff;
memset(&hot_plug_info, 0, sizeof(struct mptsas_hotplug_event));
hot_plug_info.id = raid_event_data->VolumeID;
hot_plug_info.channel = raid_event_data->VolumeBus;
hot_plug_info.phys_disk_num = raid_event_data->PhysDiskNum;
if (raid_event_data->ReasonCode == MPI_EVENT_RAID_RC_VOLUME_DELETED ||
raid_event_data->ReasonCode == MPI_EVENT_RAID_RC_VOLUME_CREATED ||
raid_event_data->ReasonCode ==
MPI_EVENT_RAID_RC_VOLUME_STATUS_CHANGED) {
sdev = scsi_device_lookup(ioc->sh, MPTSAS_RAID_CHANNEL,
hot_plug_info.id, 0);
hot_plug_info.sdev = sdev;
if (sdev)
vdevice = sdev->hostdata;
}
devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Entering %s: "
"ReasonCode=%02x\n", ioc->name, __func__,
raid_event_data->ReasonCode));
switch (raid_event_data->ReasonCode) {
case MPI_EVENT_RAID_RC_PHYSDISK_DELETED:
hot_plug_info.event_type = MPTSAS_DEL_PHYSDISK_REPROBE;
break;
case MPI_EVENT_RAID_RC_PHYSDISK_CREATED:
hot_plug_info.event_type = MPTSAS_ADD_PHYSDISK_REPROBE;
break;
case MPI_EVENT_RAID_RC_PHYSDISK_STATUS_CHANGED:
switch (state) {
case MPI_PD_STATE_ONLINE:
case MPI_PD_STATE_NOT_COMPATIBLE:
mpt_raid_phys_disk_pg0(ioc,
raid_event_data->PhysDiskNum, &phys_disk);
hot_plug_info.id = phys_disk.PhysDiskID;
hot_plug_info.channel = phys_disk.PhysDiskBus;
hot_plug_info.event_type = MPTSAS_ADD_PHYSDISK;
break;
case MPI_PD_STATE_FAILED:
case MPI_PD_STATE_MISSING:
case MPI_PD_STATE_OFFLINE_AT_HOST_REQUEST:
case MPI_PD_STATE_FAILED_AT_HOST_REQUEST:
case MPI_PD_STATE_OFFLINE_FOR_ANOTHER_REASON:
hot_plug_info.event_type = MPTSAS_DEL_PHYSDISK;
break;
default:
break;
}
break;
case MPI_EVENT_RAID_RC_VOLUME_DELETED:
if (!sdev)
break;
vdevice->vtarget->deleted = 1; /* block IO */
hot_plug_info.event_type = MPTSAS_DEL_RAID;
break;
case MPI_EVENT_RAID_RC_VOLUME_CREATED:
if (sdev) {
scsi_device_put(sdev);
break;
}
hot_plug_info.event_type = MPTSAS_ADD_RAID;
break;
case MPI_EVENT_RAID_RC_VOLUME_STATUS_CHANGED:
if (!(status & MPI_RAIDVOL0_STATUS_FLAG_ENABLED)) {
if (!sdev)
break;
vdevice->vtarget->deleted = 1; /* block IO */
hot_plug_info.event_type = MPTSAS_DEL_RAID;
break;
}
switch (state) {
case MPI_RAIDVOL0_STATUS_STATE_FAILED:
case MPI_RAIDVOL0_STATUS_STATE_MISSING:
if (!sdev)
break;
vdevice->vtarget->deleted = 1; /* block IO */
hot_plug_info.event_type = MPTSAS_DEL_RAID;
break;
case MPI_RAIDVOL0_STATUS_STATE_OPTIMAL:
case MPI_RAIDVOL0_STATUS_STATE_DEGRADED:
if (sdev) {
scsi_device_put(sdev);
break;
}
hot_plug_info.event_type = MPTSAS_ADD_RAID;
break;
default:
break;
}
break;
default:
break;
}
if (hot_plug_info.event_type != MPTSAS_IGNORE_EVENT)
mptsas_hotplug_work(ioc, fw_event, &hot_plug_info);
else
mptsas_free_fw_event(ioc, fw_event);
}
/**
* mptsas_issue_tm - send mptsas internal tm request
* @ioc: Pointer to MPT_ADAPTER structure
* @type: Task Management type
* @channel: channel number for task management
* @id: Logical Target ID for reset (if appropriate)
* @lun: Logical unit for reset (if appropriate)
* @task_context: Context for the task to be aborted
* @timeout: timeout for task management control
* @issue_reset: set to 1 on return if reset is needed, else 0
*
* Return: 0 on success or -1 on failure.
*
*/
static int
mptsas_issue_tm(MPT_ADAPTER *ioc, u8 type, u8 channel, u8 id, u64 lun,
int task_context, ulong timeout, u8 *issue_reset)
{
MPT_FRAME_HDR *mf;
SCSITaskMgmt_t *pScsiTm;
int retval;
unsigned long timeleft;
*issue_reset = 0;
mf = mpt_get_msg_frame(mptsasDeviceResetCtx, ioc);
if (mf == NULL) {
retval = -1; /* return failure */
dtmprintk(ioc, printk(MYIOC_s_WARN_FMT "TaskMgmt request: no "
"msg frames!!\n", ioc->name));
goto out;
}
dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TaskMgmt request: mr = %p, "
"task_type = 0x%02X,\n\t timeout = %ld, fw_channel = %d, "
"fw_id = %d, lun = %lld,\n\t task_context = 0x%x\n", ioc->name, mf,
type, timeout, channel, id, (unsigned long long)lun,
task_context));
pScsiTm = (SCSITaskMgmt_t *) mf;
memset(pScsiTm, 0, sizeof(SCSITaskMgmt_t));
pScsiTm->Function = MPI_FUNCTION_SCSI_TASK_MGMT;
pScsiTm->TaskType = type;
pScsiTm->MsgFlags = 0;
pScsiTm->TargetID = id;
pScsiTm->Bus = channel;
pScsiTm->ChainOffset = 0;
pScsiTm->Reserved = 0;
pScsiTm->Reserved1 = 0;
pScsiTm->TaskMsgContext = task_context;
int_to_scsilun(lun, (struct scsi_lun *)pScsiTm->LUN);
INITIALIZE_MGMT_STATUS(ioc->taskmgmt_cmds.status)
CLEAR_MGMT_STATUS(ioc->internal_cmds.status)
retval = 0;
mpt_put_msg_frame_hi_pri(mptsasDeviceResetCtx, ioc, mf);
/* Now wait for the command to complete */
timeleft = wait_for_completion_timeout(&ioc->taskmgmt_cmds.done,
timeout*HZ);
if (!(ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
retval = -1; /* return failure */
dtmprintk(ioc, printk(MYIOC_s_ERR_FMT
"TaskMgmt request: TIMED OUT!(mr=%p)\n", ioc->name, mf));
mpt_free_msg_frame(ioc, mf);
if (ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET)
goto out;
*issue_reset = 1;
goto out;
}
if (!(ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_RF_VALID)) {
retval = -1; /* return failure */
dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"TaskMgmt request: failed with no reply\n", ioc->name));
goto out;
}
out:
CLEAR_MGMT_STATUS(ioc->taskmgmt_cmds.status)
return retval;
}
/**
* mptsas_broadcast_primitive_work - Handle broadcast primitives
* @fw_event: work queue payload containing info describing the event
*
* This will be handled in workqueue context.
*/
static void
mptsas_broadcast_primitive_work(struct fw_event_work *fw_event)
{
MPT_ADAPTER *ioc = fw_event->ioc;
MPT_FRAME_HDR *mf;
VirtDevice *vdevice;
int ii;
struct scsi_cmnd *sc;
SCSITaskMgmtReply_t *pScsiTmReply;
u8 issue_reset;
int task_context;
u8 channel, id;
int lun;
u32 termination_count;
u32 query_count;
dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"%s - enter\n", ioc->name, __func__));
mutex_lock(&ioc->taskmgmt_cmds.mutex);
if (mpt_set_taskmgmt_in_progress_flag(ioc) != 0) {
mutex_unlock(&ioc->taskmgmt_cmds.mutex);
mptsas_requeue_fw_event(ioc, fw_event, 1000);
return;
}
issue_reset = 0;
termination_count = 0;
query_count = 0;
mpt_findImVolumes(ioc);
pScsiTmReply = (SCSITaskMgmtReply_t *) ioc->taskmgmt_cmds.reply;
for (ii = 0; ii < ioc->req_depth; ii++) {
if (ioc->fw_events_off)
goto out;
sc = mptscsih_get_scsi_lookup(ioc, ii);
if (!sc)
continue;
mf = MPT_INDEX_2_MFPTR(ioc, ii);
if (!mf)
continue;
task_context = mf->u.frame.hwhdr.msgctxu.MsgContext;
vdevice = sc->device->hostdata;
if (!vdevice || !vdevice->vtarget)
continue;
if (vdevice->vtarget->tflags & MPT_TARGET_FLAGS_RAID_COMPONENT)
continue; /* skip hidden raid components */
if (vdevice->vtarget->raidVolume)
continue; /* skip hidden raid components */
channel = vdevice->vtarget->channel;
id = vdevice->vtarget->id;
lun = vdevice->lun;
if (mptsas_issue_tm(ioc, MPI_SCSITASKMGMT_TASKTYPE_QUERY_TASK,
channel, id, (u64)lun, task_context, 30, &issue_reset))
goto out;
query_count++;
termination_count +=
le32_to_cpu(pScsiTmReply->TerminationCount);
if ((pScsiTmReply->IOCStatus == MPI_IOCSTATUS_SUCCESS) &&
(pScsiTmReply->ResponseCode ==
MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED ||
pScsiTmReply->ResponseCode ==
MPI_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC))
continue;
if (mptsas_issue_tm(ioc,
MPI_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET,
channel, id, (u64)lun, 0, 30, &issue_reset))
goto out;
termination_count +=
le32_to_cpu(pScsiTmReply->TerminationCount);
}
out:
dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"%s - exit, query_count = %d termination_count = %d\n",
ioc->name, __func__, query_count, termination_count));
ioc->broadcast_aen_busy = 0;
mpt_clear_taskmgmt_in_progress_flag(ioc);
mutex_unlock(&ioc->taskmgmt_cmds.mutex);
if (issue_reset) {
printk(MYIOC_s_WARN_FMT
"Issuing Reset from %s!! doorbell=0x%08x\n",
ioc->name, __func__, mpt_GetIocState(ioc, 0));
mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP);
}
mptsas_free_fw_event(ioc, fw_event);
}
/*
* mptsas_send_ir2_event - handle exposing hidden disk when
* an inactive raid volume is added
*
* @ioc: Pointer to MPT_ADAPTER structure
* @ir2_data
*
*/
static void
mptsas_send_ir2_event(struct fw_event_work *fw_event)
{
MPT_ADAPTER *ioc;
struct mptsas_hotplug_event hot_plug_info;
MPI_EVENT_DATA_IR2 *ir2_data;
u8 reasonCode;
RaidPhysDiskPage0_t phys_disk;
ioc = fw_event->ioc;
ir2_data = (MPI_EVENT_DATA_IR2 *)fw_event->event_data;
reasonCode = ir2_data->ReasonCode;
devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Entering %s: "
"ReasonCode=%02x\n", ioc->name, __func__, reasonCode));
memset(&hot_plug_info, 0, sizeof(struct mptsas_hotplug_event));
hot_plug_info.id = ir2_data->TargetID;
hot_plug_info.channel = ir2_data->Bus;
switch (reasonCode) {
case MPI_EVENT_IR2_RC_FOREIGN_CFG_DETECTED:
hot_plug_info.event_type = MPTSAS_ADD_INACTIVE_VOLUME;
break;
case MPI_EVENT_IR2_RC_DUAL_PORT_REMOVED:
hot_plug_info.phys_disk_num = ir2_data->PhysDiskNum;
hot_plug_info.event_type = MPTSAS_DEL_PHYSDISK;
break;
case MPI_EVENT_IR2_RC_DUAL_PORT_ADDED:
hot_plug_info.phys_disk_num = ir2_data->PhysDiskNum;
mpt_raid_phys_disk_pg0(ioc,
ir2_data->PhysDiskNum, &phys_disk);
hot_plug_info.id = phys_disk.PhysDiskID;
hot_plug_info.event_type = MPTSAS_ADD_PHYSDISK;
break;
default:
mptsas_free_fw_event(ioc, fw_event);
return;
}
mptsas_hotplug_work(ioc, fw_event, &hot_plug_info);
}
static int
mptsas_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *reply)
{
u32 event = le32_to_cpu(reply->Event);
int event_data_sz;
struct fw_event_work *fw_event;
unsigned long delay;
if (ioc->bus_type != SAS)
return 0;
/* events turned off due to host reset or driver unloading */
if (ioc->fw_events_off)
return 0;
delay = msecs_to_jiffies(1);
switch (event) {
case MPI_EVENT_SAS_BROADCAST_PRIMITIVE:
{
EVENT_DATA_SAS_BROADCAST_PRIMITIVE *broadcast_event_data =
(EVENT_DATA_SAS_BROADCAST_PRIMITIVE *)reply->Data;
if (broadcast_event_data->Primitive !=
MPI_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT)
return 0;
if (ioc->broadcast_aen_busy)
return 0;
ioc->broadcast_aen_busy = 1;
break;
}
case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
{
EVENT_DATA_SAS_DEVICE_STATUS_CHANGE *sas_event_data =
(EVENT_DATA_SAS_DEVICE_STATUS_CHANGE *)reply->Data;
u16 ioc_stat;
ioc_stat = le16_to_cpu(reply->IOCStatus);
if (sas_event_data->ReasonCode ==
MPI_EVENT_SAS_DEV_STAT_RC_NOT_RESPONDING) {
mptsas_target_reset_queue(ioc, sas_event_data);
return 0;
}
if (sas_event_data->ReasonCode ==
MPI_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET &&
ioc->device_missing_delay &&
(ioc_stat & MPI_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)) {
VirtTarget *vtarget = NULL;
u8 id, channel;
id = sas_event_data->TargetID;
channel = sas_event_data->Bus;
vtarget = mptsas_find_vtarget(ioc, channel, id);
if (vtarget) {
devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"LogInfo (0x%x) available for "
"INTERNAL_DEVICE_RESET"
"fw_id %d fw_channel %d\n", ioc->name,
le32_to_cpu(reply->IOCLogInfo),
id, channel));
if (vtarget->raidVolume) {
devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"Skipping Raid Volume for inDMD\n",
ioc->name));
} else {
devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"Setting device flag inDMD\n",
ioc->name));
vtarget->inDMD = 1;
}
}
}
break;
}
case MPI_EVENT_SAS_EXPANDER_STATUS_CHANGE:
{
MpiEventDataSasExpanderStatusChange_t *expander_data =
(MpiEventDataSasExpanderStatusChange_t *)reply->Data;
if (ioc->old_sas_discovery_protocal)
return 0;
if (expander_data->ReasonCode ==
MPI_EVENT_SAS_EXP_RC_NOT_RESPONDING &&
ioc->device_missing_delay)
delay = HZ * ioc->device_missing_delay;
break;
}
case MPI_EVENT_SAS_DISCOVERY:
{
u32 discovery_status;
EventDataSasDiscovery_t *discovery_data =
(EventDataSasDiscovery_t *)reply->Data;
discovery_status = le32_to_cpu(discovery_data->DiscoveryStatus);
ioc->sas_discovery_quiesce_io = discovery_status ? 1 : 0;
if (ioc->old_sas_discovery_protocal && !discovery_status)
mptsas_queue_rescan(ioc);
return 0;
}
case MPI_EVENT_INTEGRATED_RAID:
case MPI_EVENT_PERSISTENT_TABLE_FULL:
case MPI_EVENT_IR2:
case MPI_EVENT_SAS_PHY_LINK_STATUS:
case MPI_EVENT_QUEUE_FULL:
break;
default:
return 0;
}
event_data_sz = ((reply->MsgLength * 4) -
offsetof(EventNotificationReply_t, Data));
fw_event = kzalloc(sizeof(*fw_event) + event_data_sz, GFP_ATOMIC);
if (!fw_event) {
printk(MYIOC_s_WARN_FMT "%s: failed at (line=%d)\n", ioc->name,
__func__, __LINE__);
return 0;
}
memcpy(fw_event->event_data, reply->Data, event_data_sz);
fw_event->event = event;
fw_event->ioc = ioc;
mptsas_add_fw_event(ioc, fw_event, delay);
return 0;
}
/* Delete a volume when no longer listed in ioc pg2
*/
static void mptsas_volume_delete(MPT_ADAPTER *ioc, u8 id)
{
struct scsi_device *sdev;
int i;
sdev = scsi_device_lookup(ioc->sh, MPTSAS_RAID_CHANNEL, id, 0);
if (!sdev)
return;
if (!ioc->raid_data.pIocPg2)
goto out;
if (!ioc->raid_data.pIocPg2->NumActiveVolumes)
goto out;
for (i = 0; i < ioc->raid_data.pIocPg2->NumActiveVolumes; i++)
if (ioc->raid_data.pIocPg2->RaidVolume[i].VolumeID == id)
goto release_sdev;
out:
printk(MYIOC_s_INFO_FMT "removing raid volume, channel %d, "
"id %d\n", ioc->name, MPTSAS_RAID_CHANNEL, id);
scsi_remove_device(sdev);
release_sdev:
scsi_device_put(sdev);
}
static int
mptsas_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct Scsi_Host *sh;
MPT_SCSI_HOST *hd;
MPT_ADAPTER *ioc;
unsigned long flags;
int ii;
int numSGE = 0;
int scale;
int ioc_cap;
int error=0;
int r;
r = mpt_attach(pdev,id);
if (r)
return r;
ioc = pci_get_drvdata(pdev);
mptsas_fw_event_off(ioc);
ioc->DoneCtx = mptsasDoneCtx;
ioc->TaskCtx = mptsasTaskCtx;
ioc->InternalCtx = mptsasInternalCtx;
ioc->schedule_target_reset = &mptsas_schedule_target_reset;
ioc->schedule_dead_ioc_flush_running_cmds =
&mptscsih_flush_running_cmds;
/* Added sanity check on readiness of the MPT adapter.
*/
if (ioc->last_state != MPI_IOC_STATE_OPERATIONAL) {
printk(MYIOC_s_WARN_FMT
"Skipping because it's not operational!\n",
ioc->name);
error = -ENODEV;
goto out_mptsas_probe;
}
if (!ioc->active) {
printk(MYIOC_s_WARN_FMT "Skipping because it's disabled!\n",
ioc->name);
error = -ENODEV;
goto out_mptsas_probe;
}
/* Sanity check - ensure at least 1 port is INITIATOR capable
*/
ioc_cap = 0;
for (ii = 0; ii < ioc->facts.NumberOfPorts; ii++) {
if (ioc->pfacts[ii].ProtocolFlags &
MPI_PORTFACTS_PROTOCOL_INITIATOR)
ioc_cap++;
}
if (!ioc_cap) {
printk(MYIOC_s_WARN_FMT
"Skipping ioc=%p because SCSI Initiator mode "
"is NOT enabled!\n", ioc->name, ioc);
return 0;
}
sh = scsi_host_alloc(&mptsas_driver_template, sizeof(MPT_SCSI_HOST));
if (!sh) {
printk(MYIOC_s_WARN_FMT
"Unable to register controller with SCSI subsystem\n",
ioc->name);
error = -1;
goto out_mptsas_probe;
}
spin_lock_irqsave(&ioc->FreeQlock, flags);
/* Attach the SCSI Host to the IOC structure
*/
ioc->sh = sh;
sh->io_port = 0;
sh->n_io_port = 0;
sh->irq = 0;
/* set 16 byte cdb's */
sh->max_cmd_len = 16;
sh->can_queue = min_t(int, ioc->req_depth - 10, sh->can_queue);
sh->max_id = -1;
sh->max_lun = max_lun;
sh->transportt = mptsas_transport_template;
/* Required entry.
*/
sh->unique_id = ioc->id;
INIT_LIST_HEAD(&ioc->sas_topology);
mutex_init(&ioc->sas_topology_mutex);
mutex_init(&ioc->sas_discovery_mutex);
mutex_init(&ioc->sas_mgmt.mutex);
init_completion(&ioc->sas_mgmt.done);
/* Verify that we won't exceed the maximum
* number of chain buffers
* We can optimize: ZZ = req_sz/sizeof(SGE)
* For 32bit SGE's:
* numSGE = 1 + (ZZ-1)*(maxChain -1) + ZZ
* + (req_sz - 64)/sizeof(SGE)
* A slightly different algorithm is required for
* 64bit SGEs.
*/
scale = ioc->req_sz/ioc->SGE_size;
if (ioc->sg_addr_size == sizeof(u64)) {
numSGE = (scale - 1) *
(ioc->facts.MaxChainDepth-1) + scale +
(ioc->req_sz - 60) / ioc->SGE_size;
} else {
numSGE = 1 + (scale - 1) *
(ioc->facts.MaxChainDepth-1) + scale +
(ioc->req_sz - 64) / ioc->SGE_size;
}
if (numSGE < sh->sg_tablesize) {
/* Reset this value */
dprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"Resetting sg_tablesize to %d from %d\n",
ioc->name, numSGE, sh->sg_tablesize));
sh->sg_tablesize = numSGE;
}
if (mpt_loadtime_max_sectors) {
if (mpt_loadtime_max_sectors < 64 ||
mpt_loadtime_max_sectors > 8192) {
printk(MYIOC_s_INFO_FMT "Invalid value passed for"
"mpt_loadtime_max_sectors %d."
"Range from 64 to 8192\n", ioc->name,
mpt_loadtime_max_sectors);
}
mpt_loadtime_max_sectors &= 0xFFFFFFFE;
dprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"Resetting max sector to %d from %d\n",
ioc->name, mpt_loadtime_max_sectors, sh->max_sectors));
sh->max_sectors = mpt_loadtime_max_sectors;
}
hd = shost_priv(sh);
hd->ioc = ioc;
/* SCSI needs scsi_cmnd lookup table!
* (with size equal to req_depth*PtrSz!)
*/
ioc->ScsiLookup = kcalloc(ioc->req_depth, sizeof(void *), GFP_ATOMIC);
if (!ioc->ScsiLookup) {
error = -ENOMEM;
spin_unlock_irqrestore(&ioc->FreeQlock, flags);
goto out_mptsas_probe;
}
spin_lock_init(&ioc->scsi_lookup_lock);
dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "ScsiLookup @ %p\n",
ioc->name, ioc->ScsiLookup));
ioc->sas_data.ptClear = mpt_pt_clear;
hd->last_queue_full = 0;
INIT_LIST_HEAD(&hd->target_reset_list);
INIT_LIST_HEAD(&ioc->sas_device_info_list);
mutex_init(&ioc->sas_device_info_mutex);
spin_unlock_irqrestore(&ioc->FreeQlock, flags);
if (ioc->sas_data.ptClear==1) {
mptbase_sas_persist_operation(
ioc, MPI_SAS_OP_CLEAR_ALL_PERSISTENT);
}
error = scsi_add_host(sh, &ioc->pcidev->dev);
if (error) {
dprintk(ioc, printk(MYIOC_s_ERR_FMT
"scsi_add_host failed\n", ioc->name));
goto out_mptsas_probe;
}
/* older firmware doesn't support expander events */
if ((ioc->facts.HeaderVersion >> 8) < 0xE)
ioc->old_sas_discovery_protocal = 1;
mptsas_scan_sas_topology(ioc);
mptsas_fw_event_on(ioc);
return 0;
out_mptsas_probe:
mptscsih_remove(pdev);
return error;
}
static void
mptsas_shutdown(struct pci_dev *pdev)
{
MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
mptsas_fw_event_off(ioc);
mptsas_cleanup_fw_event_q(ioc);
}
static void mptsas_remove(struct pci_dev *pdev)
{
MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
struct mptsas_portinfo *p, *n;
int i;
if (!ioc->sh) {
printk(MYIOC_s_INFO_FMT "IOC is in Target mode\n", ioc->name);
mpt_detach(pdev);
return;
}
mptsas_shutdown(pdev);
mptsas_del_device_components(ioc);
ioc->sas_discovery_ignore_events = 1;
sas_remove_host(ioc->sh);
mutex_lock(&ioc->sas_topology_mutex);
list_for_each_entry_safe(p, n, &ioc->sas_topology, list) {
list_del(&p->list);
for (i = 0 ; i < p->num_phys ; i++)
mptsas_port_delete(ioc, p->phy_info[i].port_details);
kfree(p->phy_info);
kfree(p);
}
mutex_unlock(&ioc->sas_topology_mutex);
ioc->hba_port_info = NULL;
mptscsih_remove(pdev);
}
static struct pci_device_id mptsas_pci_table[] = {
{ PCI_VENDOR_ID_LSI_LOGIC, MPI_MANUFACTPAGE_DEVID_SAS1064,
PCI_ANY_ID, PCI_ANY_ID },
{ PCI_VENDOR_ID_LSI_LOGIC, MPI_MANUFACTPAGE_DEVID_SAS1068,
PCI_ANY_ID, PCI_ANY_ID },
{ PCI_VENDOR_ID_LSI_LOGIC, MPI_MANUFACTPAGE_DEVID_SAS1064E,
PCI_ANY_ID, PCI_ANY_ID },
{ PCI_VENDOR_ID_LSI_LOGIC, MPI_MANUFACTPAGE_DEVID_SAS1068E,
PCI_ANY_ID, PCI_ANY_ID },
{ PCI_VENDOR_ID_LSI_LOGIC, MPI_MANUFACTPAGE_DEVID_SAS1078,
PCI_ANY_ID, PCI_ANY_ID },
{ PCI_VENDOR_ID_LSI_LOGIC, MPI_MANUFACTPAGE_DEVID_SAS1068_820XELP,
PCI_ANY_ID, PCI_ANY_ID },
{0} /* Terminating entry */
};
MODULE_DEVICE_TABLE(pci, mptsas_pci_table);
static struct pci_driver mptsas_driver = {
.name = "mptsas",
.id_table = mptsas_pci_table,
.probe = mptsas_probe,
.remove = mptsas_remove,
.shutdown = mptsas_shutdown,
#ifdef CONFIG_PM
.suspend = mptscsih_suspend,
.resume = mptscsih_resume,
#endif
};
static int __init
mptsas_init(void)
{
int error;
show_mptmod_ver(my_NAME, my_VERSION);
mptsas_transport_template =
sas_attach_transport(&mptsas_transport_functions);
if (!mptsas_transport_template)
return -ENODEV;
mptsasDoneCtx = mpt_register(mptscsih_io_done, MPTSAS_DRIVER,
"mptscsih_io_done");
mptsasTaskCtx = mpt_register(mptscsih_taskmgmt_complete, MPTSAS_DRIVER,
"mptscsih_taskmgmt_complete");
mptsasInternalCtx =
mpt_register(mptscsih_scandv_complete, MPTSAS_DRIVER,
"mptscsih_scandv_complete");
mptsasMgmtCtx = mpt_register(mptsas_mgmt_done, MPTSAS_DRIVER,
"mptsas_mgmt_done");
mptsasDeviceResetCtx =
mpt_register(mptsas_taskmgmt_complete, MPTSAS_DRIVER,
"mptsas_taskmgmt_complete");
mpt_event_register(mptsasDoneCtx, mptsas_event_process);
mpt_reset_register(mptsasDoneCtx, mptsas_ioc_reset);
error = pci_register_driver(&mptsas_driver);
if (error)
sas_release_transport(mptsas_transport_template);
return error;
}
static void __exit
mptsas_exit(void)
{
pci_unregister_driver(&mptsas_driver);
sas_release_transport(mptsas_transport_template);
mpt_reset_deregister(mptsasDoneCtx);
mpt_event_deregister(mptsasDoneCtx);
mpt_deregister(mptsasMgmtCtx);
mpt_deregister(mptsasInternalCtx);
mpt_deregister(mptsasTaskCtx);
mpt_deregister(mptsasDoneCtx);
mpt_deregister(mptsasDeviceResetCtx);
}
module_init(mptsas_init);
module_exit(mptsas_exit);
| linux-master | drivers/message/fusion/mptsas.c |
/*
* linux/drivers/message/fusion/mptctl.c
* mpt Ioctl driver.
* For use with LSI PCI chip/adapters
* running LSI Fusion MPT (Message Passing Technology) firmware.
*
* Copyright (c) 1999-2008 LSI Corporation
* (mailto:[email protected])
*
*/
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
NO WARRANTY
THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
solely responsible for determining the appropriateness of using and
distributing the Program and assumes all risks associated with its
exercise of rights under this Agreement, including but not limited to
the risks and costs of program errors, damage to or loss of data,
programs or equipment, and unavailability or interruption of operations.
DISCLAIMER OF LIABILITY
NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/delay.h> /* for mdelay */
#include <linux/miscdevice.h>
#include <linux/mutex.h>
#include <linux/compat.h>
#include <asm/io.h>
#include <linux/uaccess.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_tcq.h>
#define COPYRIGHT "Copyright (c) 1999-2008 LSI Corporation"
#define MODULEAUTHOR "LSI Corporation"
#include "mptbase.h"
#include "mptctl.h"
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
#define my_NAME "Fusion MPT misc device (ioctl) driver"
#define my_VERSION MPT_LINUX_VERSION_COMMON
#define MYNAM "mptctl"
MODULE_AUTHOR(MODULEAUTHOR);
MODULE_DESCRIPTION(my_NAME);
MODULE_LICENSE("GPL");
MODULE_VERSION(my_VERSION);
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
static DEFINE_MUTEX(mpctl_mutex);
static u8 mptctl_id = MPT_MAX_PROTOCOL_DRIVERS;
static u8 mptctl_taskmgmt_id = MPT_MAX_PROTOCOL_DRIVERS;
static DECLARE_WAIT_QUEUE_HEAD ( mptctl_wait );
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
struct buflist {
u8 *kptr;
int len;
};
/*
* Function prototypes. Called from OS entry point mptctl_ioctl.
* arg contents specific to function.
*/
static int mptctl_fw_download(MPT_ADAPTER *iocp, unsigned long arg);
static int mptctl_getiocinfo(MPT_ADAPTER *iocp, unsigned long arg, unsigned int cmd);
static int mptctl_gettargetinfo(MPT_ADAPTER *iocp, unsigned long arg);
static int mptctl_readtest(MPT_ADAPTER *iocp, unsigned long arg);
static int mptctl_mpt_command(MPT_ADAPTER *iocp, unsigned long arg);
static int mptctl_eventquery(MPT_ADAPTER *iocp, unsigned long arg);
static int mptctl_eventenable(MPT_ADAPTER *iocp, unsigned long arg);
static int mptctl_eventreport(MPT_ADAPTER *iocp, unsigned long arg);
static int mptctl_replace_fw(MPT_ADAPTER *iocp, unsigned long arg);
static int mptctl_do_reset(MPT_ADAPTER *iocp, unsigned long arg);
static int mptctl_hp_hostinfo(MPT_ADAPTER *iocp, unsigned long arg, unsigned int cmd);
static int mptctl_hp_targetinfo(MPT_ADAPTER *iocp, unsigned long arg);
static int mptctl_probe(struct pci_dev *);
static void mptctl_remove(struct pci_dev *);
#ifdef CONFIG_COMPAT
static long compat_mpctl_ioctl(struct file *f, unsigned cmd, unsigned long arg);
#endif
/*
* Private function calls.
*/
static int mptctl_do_mpt_command(MPT_ADAPTER *iocp, struct mpt_ioctl_command karg, void __user *mfPtr);
static int mptctl_do_fw_download(MPT_ADAPTER *iocp, char __user *ufwbuf, size_t fwlen);
static MptSge_t *kbuf_alloc_2_sgl(int bytes, u32 dir, int sge_offset, int *frags,
struct buflist **blp, dma_addr_t *sglbuf_dma, MPT_ADAPTER *ioc);
static void kfree_sgl(MptSge_t *sgl, dma_addr_t sgl_dma,
struct buflist *buflist, MPT_ADAPTER *ioc);
/*
* Reset Handler cleanup function
*/
static int mptctl_ioc_reset(MPT_ADAPTER *ioc, int reset_phase);
/*
* Event Handler function
*/
static int mptctl_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply);
static struct fasync_struct *async_queue=NULL;
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
* Scatter gather list (SGL) sizes and limits...
*/
//#define MAX_SCSI_FRAGS 9
#define MAX_FRAGS_SPILL1 9
#define MAX_FRAGS_SPILL2 15
#define FRAGS_PER_BUCKET (MAX_FRAGS_SPILL2 + 1)
//#define MAX_CHAIN_FRAGS 64
//#define MAX_CHAIN_FRAGS (15+15+15+16)
#define MAX_CHAIN_FRAGS (4 * MAX_FRAGS_SPILL2 + 1)
// Define max sg LIST bytes ( == (#frags + #chains) * 8 bytes each)
// Works out to: 592d bytes! (9+1)*8 + 4*(15+1)*8
// ^----------------- 80 + 512
#define MAX_SGL_BYTES ((MAX_FRAGS_SPILL1 + 1 + (4 * FRAGS_PER_BUCKET)) * 8)
/* linux only seems to ever give 128kB MAX contiguous (GFP_USER) mem bytes */
#define MAX_KMALLOC_SZ (128*1024)
#define MPT_IOCTL_DEFAULT_TIMEOUT 10 /* Default timeout value (seconds) */
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mptctl_syscall_down - Down the MPT adapter syscall semaphore.
* @ioc: Pointer to MPT adapter
* @nonblock: boolean, non-zero if O_NONBLOCK is set
*
* All of the ioctl commands can potentially sleep, which is illegal
* with a spinlock held, thus we perform mutual exclusion here.
*
* Returns negative errno on error, or zero for success.
*/
static inline int
mptctl_syscall_down(MPT_ADAPTER *ioc, int nonblock)
{
int rc = 0;
if (nonblock) {
if (!mutex_trylock(&ioc->ioctl_cmds.mutex))
rc = -EAGAIN;
} else {
if (mutex_lock_interruptible(&ioc->ioctl_cmds.mutex))
rc = -ERESTARTSYS;
}
return rc;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
* This is the callback for any message we have posted. The message itself
* will be returned to the message pool when we return from the IRQ
*
* This runs in irq context so be short and sweet.
*/
static int
mptctl_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply)
{
char *sense_data;
int req_index;
int sz;
if (!req)
return 0;
dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "completing mpi function "
"(0x%02X), req=%p, reply=%p\n", ioc->name, req->u.hdr.Function,
req, reply));
/*
* Handling continuation of the same reply. Processing the first
* reply, and eating the other replys that come later.
*/
if (ioc->ioctl_cmds.msg_context != req->u.hdr.MsgContext)
goto out_continuation;
ioc->ioctl_cmds.status |= MPT_MGMT_STATUS_COMMAND_GOOD;
if (!reply)
goto out;
ioc->ioctl_cmds.status |= MPT_MGMT_STATUS_RF_VALID;
sz = min(ioc->reply_sz, 4*reply->u.reply.MsgLength);
memcpy(ioc->ioctl_cmds.reply, reply, sz);
if (reply->u.reply.IOCStatus || reply->u.reply.IOCLogInfo)
dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"iocstatus (0x%04X), loginfo (0x%08X)\n", ioc->name,
le16_to_cpu(reply->u.reply.IOCStatus),
le32_to_cpu(reply->u.reply.IOCLogInfo)));
if ((req->u.hdr.Function == MPI_FUNCTION_SCSI_IO_REQUEST) ||
(req->u.hdr.Function ==
MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) {
if (reply->u.sreply.SCSIStatus || reply->u.sreply.SCSIState)
dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"scsi_status (0x%02x), scsi_state (0x%02x), "
"tag = (0x%04x), transfer_count (0x%08x)\n", ioc->name,
reply->u.sreply.SCSIStatus,
reply->u.sreply.SCSIState,
le16_to_cpu(reply->u.sreply.TaskTag),
le32_to_cpu(reply->u.sreply.TransferCount)));
if (reply->u.sreply.SCSIState &
MPI_SCSI_STATE_AUTOSENSE_VALID) {
sz = req->u.scsireq.SenseBufferLength;
req_index =
le16_to_cpu(req->u.frame.hwhdr.msgctxu.fld.req_idx);
sense_data = ((u8 *)ioc->sense_buf_pool +
(req_index * MPT_SENSE_BUFFER_ALLOC));
memcpy(ioc->ioctl_cmds.sense, sense_data, sz);
ioc->ioctl_cmds.status |= MPT_MGMT_STATUS_SENSE_VALID;
}
}
out:
/* We are done, issue wake up
*/
if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_PENDING) {
if (req->u.hdr.Function == MPI_FUNCTION_SCSI_TASK_MGMT) {
mpt_clear_taskmgmt_in_progress_flag(ioc);
ioc->ioctl_cmds.status &= ~MPT_MGMT_STATUS_PENDING;
complete(&ioc->ioctl_cmds.done);
if (ioc->bus_type == SAS)
ioc->schedule_target_reset(ioc);
} else {
ioc->ioctl_cmds.status &= ~MPT_MGMT_STATUS_PENDING;
complete(&ioc->ioctl_cmds.done);
}
}
out_continuation:
if (reply && (reply->u.reply.MsgFlags &
MPI_MSGFLAGS_CONTINUATION_REPLY))
return 0;
return 1;
}
static int
mptctl_taskmgmt_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
{
if (!mf)
return 0;
dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"TaskMgmt completed (mf=%p, mr=%p)\n",
ioc->name, mf, mr));
ioc->taskmgmt_cmds.status |= MPT_MGMT_STATUS_COMMAND_GOOD;
if (!mr)
goto out;
ioc->taskmgmt_cmds.status |= MPT_MGMT_STATUS_RF_VALID;
memcpy(ioc->taskmgmt_cmds.reply, mr,
min(MPT_DEFAULT_FRAME_SIZE, 4 * mr->u.reply.MsgLength));
out:
if (ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_PENDING) {
mpt_clear_taskmgmt_in_progress_flag(ioc);
ioc->taskmgmt_cmds.status &= ~MPT_MGMT_STATUS_PENDING;
complete(&ioc->taskmgmt_cmds.done);
if (ioc->bus_type == SAS)
ioc->schedule_target_reset(ioc);
return 1;
}
return 0;
}
static int
mptctl_do_taskmgmt(MPT_ADAPTER *ioc, u8 tm_type, u8 bus_id, u8 target_id)
{
MPT_FRAME_HDR *mf;
SCSITaskMgmt_t *pScsiTm;
SCSITaskMgmtReply_t *pScsiTmReply;
int ii;
int retval;
unsigned long timeout;
u16 iocstatus;
mutex_lock(&ioc->taskmgmt_cmds.mutex);
if (mpt_set_taskmgmt_in_progress_flag(ioc) != 0) {
mutex_unlock(&ioc->taskmgmt_cmds.mutex);
return -EPERM;
}
retval = 0;
mf = mpt_get_msg_frame(mptctl_taskmgmt_id, ioc);
if (mf == NULL) {
dtmprintk(ioc,
printk(MYIOC_s_WARN_FMT "TaskMgmt, no msg frames!!\n",
ioc->name));
mpt_clear_taskmgmt_in_progress_flag(ioc);
retval = -ENOMEM;
goto tm_done;
}
dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TaskMgmt request (mf=%p)\n",
ioc->name, mf));
pScsiTm = (SCSITaskMgmt_t *) mf;
memset(pScsiTm, 0, sizeof(SCSITaskMgmt_t));
pScsiTm->Function = MPI_FUNCTION_SCSI_TASK_MGMT;
pScsiTm->TaskType = tm_type;
if ((tm_type == MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS) &&
(ioc->bus_type == FC))
pScsiTm->MsgFlags =
MPI_SCSITASKMGMT_MSGFLAGS_LIPRESET_RESET_OPTION;
pScsiTm->TargetID = target_id;
pScsiTm->Bus = bus_id;
pScsiTm->ChainOffset = 0;
pScsiTm->Reserved = 0;
pScsiTm->Reserved1 = 0;
pScsiTm->TaskMsgContext = 0;
for (ii= 0; ii < 8; ii++)
pScsiTm->LUN[ii] = 0;
for (ii=0; ii < 7; ii++)
pScsiTm->Reserved2[ii] = 0;
switch (ioc->bus_type) {
case FC:
timeout = 40;
break;
case SAS:
timeout = 30;
break;
case SPI:
default:
timeout = 10;
break;
}
dtmprintk(ioc,
printk(MYIOC_s_DEBUG_FMT "TaskMgmt type=%d timeout=%ld\n",
ioc->name, tm_type, timeout));
INITIALIZE_MGMT_STATUS(ioc->taskmgmt_cmds.status)
if ((ioc->facts.IOCCapabilities & MPI_IOCFACTS_CAPABILITY_HIGH_PRI_Q) &&
(ioc->facts.MsgVersion >= MPI_VERSION_01_05))
mpt_put_msg_frame_hi_pri(mptctl_taskmgmt_id, ioc, mf);
else {
retval = mpt_send_handshake_request(mptctl_taskmgmt_id, ioc,
sizeof(SCSITaskMgmt_t), (u32 *)pScsiTm, CAN_SLEEP);
if (retval != 0) {
dfailprintk(ioc,
printk(MYIOC_s_ERR_FMT
"TaskMgmt send_handshake FAILED!"
" (ioc %p, mf %p, rc=%d) \n", ioc->name,
ioc, mf, retval));
mpt_free_msg_frame(ioc, mf);
mpt_clear_taskmgmt_in_progress_flag(ioc);
goto tm_done;
}
}
/* Now wait for the command to complete */
ii = wait_for_completion_timeout(&ioc->taskmgmt_cmds.done, timeout*HZ);
if (!(ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"TaskMgmt failed\n", ioc->name));
mpt_free_msg_frame(ioc, mf);
mpt_clear_taskmgmt_in_progress_flag(ioc);
if (ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET)
retval = 0;
else
retval = -1; /* return failure */
goto tm_done;
}
if (!(ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_RF_VALID)) {
dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"TaskMgmt failed\n", ioc->name));
retval = -1; /* return failure */
goto tm_done;
}
pScsiTmReply = (SCSITaskMgmtReply_t *) ioc->taskmgmt_cmds.reply;
dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"TaskMgmt fw_channel = %d, fw_id = %d, task_type=0x%02X, "
"iocstatus=0x%04X\n\tloginfo=0x%08X, response_code=0x%02X, "
"term_cmnds=%d\n", ioc->name, pScsiTmReply->Bus,
pScsiTmReply->TargetID, tm_type,
le16_to_cpu(pScsiTmReply->IOCStatus),
le32_to_cpu(pScsiTmReply->IOCLogInfo),
pScsiTmReply->ResponseCode,
le32_to_cpu(pScsiTmReply->TerminationCount)));
iocstatus = le16_to_cpu(pScsiTmReply->IOCStatus) & MPI_IOCSTATUS_MASK;
if (iocstatus == MPI_IOCSTATUS_SCSI_TASK_TERMINATED ||
iocstatus == MPI_IOCSTATUS_SCSI_IOC_TERMINATED ||
iocstatus == MPI_IOCSTATUS_SUCCESS)
retval = 0;
else {
dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"TaskMgmt failed\n", ioc->name));
retval = -1; /* return failure */
}
tm_done:
mutex_unlock(&ioc->taskmgmt_cmds.mutex);
CLEAR_MGMT_STATUS(ioc->taskmgmt_cmds.status)
return retval;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/* mptctl_timeout_expired
*
* Expecting an interrupt, however timed out.
*
*/
static void
mptctl_timeout_expired(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf)
{
unsigned long flags;
int ret_val = -1;
SCSIIORequest_t *scsi_req = (SCSIIORequest_t *) mf;
u8 function = mf->u.hdr.Function;
dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT ": %s\n",
ioc->name, __func__));
if (mpt_fwfault_debug)
mpt_halt_firmware(ioc);
spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
if (ioc->ioc_reset_in_progress) {
spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
CLEAR_MGMT_PENDING_STATUS(ioc->ioctl_cmds.status)
mpt_free_msg_frame(ioc, mf);
return;
}
spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
CLEAR_MGMT_PENDING_STATUS(ioc->ioctl_cmds.status)
if (ioc->bus_type == SAS) {
if (function == MPI_FUNCTION_SCSI_IO_REQUEST)
ret_val = mptctl_do_taskmgmt(ioc,
MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET,
scsi_req->Bus, scsi_req->TargetID);
else if (function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)
ret_val = mptctl_do_taskmgmt(ioc,
MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS,
scsi_req->Bus, 0);
if (!ret_val)
return;
} else {
if ((function == MPI_FUNCTION_SCSI_IO_REQUEST) ||
(function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH))
ret_val = mptctl_do_taskmgmt(ioc,
MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS,
scsi_req->Bus, 0);
if (!ret_val)
return;
}
dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Calling Reset! \n",
ioc->name));
mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP);
mpt_free_msg_frame(ioc, mf);
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/* mptctl_ioc_reset
*
* Clean-up functionality. Used only if there has been a
* reload of the FW due.
*
*/
static int
mptctl_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
{
switch(reset_phase) {
case MPT_IOC_SETUP_RESET:
dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"%s: MPT_IOC_SETUP_RESET\n", ioc->name, __func__));
break;
case MPT_IOC_PRE_RESET:
dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"%s: MPT_IOC_PRE_RESET\n", ioc->name, __func__));
break;
case MPT_IOC_POST_RESET:
dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"%s: MPT_IOC_POST_RESET\n", ioc->name, __func__));
if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_PENDING) {
ioc->ioctl_cmds.status |= MPT_MGMT_STATUS_DID_IOCRESET;
complete(&ioc->ioctl_cmds.done);
}
break;
default:
break;
}
return 1;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/* ASYNC Event Notification Support */
static int
mptctl_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
{
u8 event;
event = le32_to_cpu(pEvReply->Event) & 0xFF;
dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s() called\n",
ioc->name, __func__));
if(async_queue == NULL)
return 1;
/* Raise SIGIO for persistent events.
* TODO - this define is not in MPI spec yet,
* but they plan to set it to 0x21
*/
if (event == 0x21) {
ioc->aen_event_read_flag=1;
dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Raised SIGIO to application\n",
ioc->name));
devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"Raised SIGIO to application\n", ioc->name));
kill_fasync(&async_queue, SIGIO, POLL_IN);
return 1;
}
/* This flag is set after SIGIO was raised, and
* remains set until the application has read
* the event log via ioctl=MPTEVENTREPORT
*/
if(ioc->aen_event_read_flag)
return 1;
/* Signal only for the events that are
* requested for by the application
*/
if (ioc->events && (ioc->eventTypes & ( 1 << event))) {
ioc->aen_event_read_flag=1;
dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"Raised SIGIO to application\n", ioc->name));
devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"Raised SIGIO to application\n", ioc->name));
kill_fasync(&async_queue, SIGIO, POLL_IN);
}
return 1;
}
static int
mptctl_fasync(int fd, struct file *filep, int mode)
{
MPT_ADAPTER *ioc;
int ret;
mutex_lock(&mpctl_mutex);
list_for_each_entry(ioc, &ioc_list, list)
ioc->aen_event_read_flag=0;
ret = fasync_helper(fd, filep, mode, &async_queue);
mutex_unlock(&mpctl_mutex);
return ret;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
* MPT ioctl handler
* cmd - specify the particular IOCTL command to be issued
* arg - data specific to the command. Must not be null.
*/
static long
__mptctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
mpt_ioctl_header __user *uhdr = (void __user *) arg;
mpt_ioctl_header khdr;
unsigned iocnumX;
int nonblock = (file->f_flags & O_NONBLOCK);
int ret;
MPT_ADAPTER *iocp = NULL;
if (copy_from_user(&khdr, uhdr, sizeof(khdr))) {
printk(KERN_ERR MYNAM "%s::mptctl_ioctl() @%d - "
"Unable to copy mpt_ioctl_header data @ %p\n",
__FILE__, __LINE__, uhdr);
return -EFAULT;
}
ret = -ENXIO; /* (-6) No such device or address */
/* Verify intended MPT adapter - set iocnumX and the adapter
* pointer (iocp)
*/
iocnumX = khdr.iocnum & 0xFF;
if ((mpt_verify_adapter(iocnumX, &iocp) < 0) || (iocp == NULL))
return -ENODEV;
if (!iocp->active) {
printk(KERN_DEBUG MYNAM "%s::mptctl_ioctl() @%d - Controller disabled.\n",
__FILE__, __LINE__);
return -EFAULT;
}
/* Handle those commands that are just returning
* information stored in the driver.
* These commands should never time out and are unaffected
* by TM and FW reloads.
*/
if ((cmd & ~IOCSIZE_MASK) == (MPTIOCINFO & ~IOCSIZE_MASK)) {
return mptctl_getiocinfo(iocp, arg, _IOC_SIZE(cmd));
} else if (cmd == MPTTARGETINFO) {
return mptctl_gettargetinfo(iocp, arg);
} else if (cmd == MPTTEST) {
return mptctl_readtest(iocp, arg);
} else if (cmd == MPTEVENTQUERY) {
return mptctl_eventquery(iocp, arg);
} else if (cmd == MPTEVENTENABLE) {
return mptctl_eventenable(iocp, arg);
} else if (cmd == MPTEVENTREPORT) {
return mptctl_eventreport(iocp, arg);
} else if (cmd == MPTFWREPLACE) {
return mptctl_replace_fw(iocp, arg);
}
/* All of these commands require an interrupt or
* are unknown/illegal.
*/
if ((ret = mptctl_syscall_down(iocp, nonblock)) != 0)
return ret;
if (cmd == MPTFWDOWNLOAD)
ret = mptctl_fw_download(iocp, arg);
else if (cmd == MPTCOMMAND)
ret = mptctl_mpt_command(iocp, arg);
else if (cmd == MPTHARDRESET)
ret = mptctl_do_reset(iocp, arg);
else if ((cmd & ~IOCSIZE_MASK) == (HP_GETHOSTINFO & ~IOCSIZE_MASK))
ret = mptctl_hp_hostinfo(iocp, arg, _IOC_SIZE(cmd));
else if (cmd == HP_GETTARGETINFO)
ret = mptctl_hp_targetinfo(iocp, arg);
else
ret = -EINVAL;
mutex_unlock(&iocp->ioctl_cmds.mutex);
return ret;
}
static long
mptctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
long ret;
mutex_lock(&mpctl_mutex);
ret = __mptctl_ioctl(file, cmd, arg);
mutex_unlock(&mpctl_mutex);
return ret;
}
static int mptctl_do_reset(MPT_ADAPTER *iocp, unsigned long arg)
{
struct mpt_ioctl_diag_reset __user *urinfo = (void __user *) arg;
struct mpt_ioctl_diag_reset krinfo;
if (copy_from_user(&krinfo, urinfo, sizeof(struct mpt_ioctl_diag_reset))) {
printk(KERN_ERR MYNAM "%s@%d::mptctl_do_reset - "
"Unable to copy mpt_ioctl_diag_reset struct @ %p\n",
__FILE__, __LINE__, urinfo);
return -EFAULT;
}
dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT "mptctl_do_reset called.\n",
iocp->name));
if (mpt_HardResetHandler(iocp, CAN_SLEEP) != 0) {
printk (MYIOC_s_ERR_FMT "%s@%d::mptctl_do_reset - reset failed.\n",
iocp->name, __FILE__, __LINE__);
return -1;
}
return 0;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
* MPT FW download function. Cast the arg into the mpt_fw_xfer structure.
* This structure contains: iocnum, firmware length (bytes),
* pointer to user space memory where the fw image is stored.
*
* Outputs: None.
* Return: 0 if successful
* -EFAULT if data unavailable
* -ENXIO if no such device
* -EAGAIN if resource problem
* -ENOMEM if no memory for SGE
* -EMLINK if too many chain buffers required
* -EBADRQC if adapter does not support FW download
* -EBUSY if adapter is busy
* -ENOMSG if FW upload returned bad status
*/
static int
mptctl_fw_download(MPT_ADAPTER *iocp, unsigned long arg)
{
struct mpt_fw_xfer __user *ufwdl = (void __user *) arg;
struct mpt_fw_xfer kfwdl;
if (copy_from_user(&kfwdl, ufwdl, sizeof(struct mpt_fw_xfer))) {
printk(KERN_ERR MYNAM "%s@%d::_ioctl_fwdl - "
"Unable to copy mpt_fw_xfer struct @ %p\n",
__FILE__, __LINE__, ufwdl);
return -EFAULT;
}
return mptctl_do_fw_download(iocp, kfwdl.bufp, kfwdl.fwlen);
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
* FW Download engine.
* Outputs: None.
* Return: 0 if successful
* -EFAULT if data unavailable
* -ENXIO if no such device
* -EAGAIN if resource problem
* -ENOMEM if no memory for SGE
* -EMLINK if too many chain buffers required
* -EBADRQC if adapter does not support FW download
* -EBUSY if adapter is busy
* -ENOMSG if FW upload returned bad status
*/
static int
mptctl_do_fw_download(MPT_ADAPTER *iocp, char __user *ufwbuf, size_t fwlen)
{
FWDownload_t *dlmsg;
MPT_FRAME_HDR *mf;
FWDownloadTCSGE_t *ptsge;
MptSge_t *sgl, *sgIn;
char *sgOut;
struct buflist *buflist;
struct buflist *bl;
dma_addr_t sgl_dma;
int ret;
int numfrags = 0;
int maxfrags;
int n = 0;
u32 sgdir;
u32 nib;
int fw_bytes_copied = 0;
int i;
int sge_offset = 0;
u16 iocstat;
pFWDownloadReply_t ReplyMsg = NULL;
unsigned long timeleft;
/* Valid device. Get a message frame and construct the FW download message.
*/
if ((mf = mpt_get_msg_frame(mptctl_id, iocp)) == NULL)
return -EAGAIN;
dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT
"mptctl_do_fwdl called. mptctl_id = %xh.\n", iocp->name, mptctl_id));
dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT "DbG: kfwdl.bufp = %p\n",
iocp->name, ufwbuf));
dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT "DbG: kfwdl.fwlen = %d\n",
iocp->name, (int)fwlen));
dlmsg = (FWDownload_t*) mf;
ptsge = (FWDownloadTCSGE_t *) &dlmsg->SGL;
sgOut = (char *) (ptsge + 1);
/*
* Construct f/w download request
*/
dlmsg->ImageType = MPI_FW_DOWNLOAD_ITYPE_FW;
dlmsg->Reserved = 0;
dlmsg->ChainOffset = 0;
dlmsg->Function = MPI_FUNCTION_FW_DOWNLOAD;
dlmsg->Reserved1[0] = dlmsg->Reserved1[1] = dlmsg->Reserved1[2] = 0;
if (iocp->facts.MsgVersion >= MPI_VERSION_01_05)
dlmsg->MsgFlags = MPI_FW_DOWNLOAD_MSGFLGS_LAST_SEGMENT;
else
dlmsg->MsgFlags = 0;
/* Set up the Transaction SGE.
*/
ptsge->Reserved = 0;
ptsge->ContextSize = 0;
ptsge->DetailsLength = 12;
ptsge->Flags = MPI_SGE_FLAGS_TRANSACTION_ELEMENT;
ptsge->Reserved_0100_Checksum = 0;
ptsge->ImageOffset = 0;
ptsge->ImageSize = cpu_to_le32(fwlen);
/* Add the SGL
*/
/*
* Need to kmalloc area(s) for holding firmware image bytes.
* But we need to do it piece meal, using a proper
* scatter gather list (with 128kB MAX hunks).
*
* A practical limit here might be # of sg hunks that fit into
* a single IOC request frame; 12 or 8 (see below), so:
* For FC9xx: 12 x 128kB == 1.5 mB (max)
* For C1030: 8 x 128kB == 1 mB (max)
* We could support chaining, but things get ugly(ier:)
*
* Set the sge_offset to the start of the sgl (bytes).
*/
sgdir = 0x04000000; /* IOC will READ from sys mem */
sge_offset = sizeof(MPIHeader_t) + sizeof(FWDownloadTCSGE_t);
if ((sgl = kbuf_alloc_2_sgl(fwlen, sgdir, sge_offset,
&numfrags, &buflist, &sgl_dma, iocp)) == NULL)
return -ENOMEM;
/*
* We should only need SGL with 2 simple_32bit entries (up to 256 kB)
* for FC9xx f/w image, but calculate max number of sge hunks
* we can fit into a request frame, and limit ourselves to that.
* (currently no chain support)
* maxfrags = (Request Size - FWdownload Size ) / Size of 32 bit SGE
* Request maxfrags
* 128 12
* 96 8
* 64 4
*/
maxfrags = (iocp->req_sz - sizeof(MPIHeader_t) -
sizeof(FWDownloadTCSGE_t))
/ iocp->SGE_size;
if (numfrags > maxfrags) {
ret = -EMLINK;
goto fwdl_out;
}
dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT "DbG: sgl buffer = %p, sgfrags = %d\n",
iocp->name, sgl, numfrags));
/*
* Parse SG list, copying sgl itself,
* plus f/w image hunks from user space as we go...
*/
ret = -EFAULT;
sgIn = sgl;
bl = buflist;
for (i=0; i < numfrags; i++) {
/* Get the SGE type: 0 - TCSGE, 3 - Chain, 1 - Simple SGE
* Skip everything but Simple. If simple, copy from
* user space into kernel space.
* Note: we should not have anything but Simple as
* Chain SGE are illegal.
*/
nib = (sgIn->FlagsLength & 0x30000000) >> 28;
if (nib == 0 || nib == 3) {
;
} else if (sgIn->Address) {
iocp->add_sge(sgOut, sgIn->FlagsLength, sgIn->Address);
n++;
if (copy_from_user(bl->kptr, ufwbuf+fw_bytes_copied, bl->len)) {
printk(MYIOC_s_ERR_FMT "%s@%d::_ioctl_fwdl - "
"Unable to copy f/w buffer hunk#%d @ %p\n",
iocp->name, __FILE__, __LINE__, n, ufwbuf);
goto fwdl_out;
}
fw_bytes_copied += bl->len;
}
sgIn++;
bl++;
sgOut += iocp->SGE_size;
}
DBG_DUMP_FW_DOWNLOAD(iocp, (u32 *)mf, numfrags);
/*
* Finally, perform firmware download.
*/
ReplyMsg = NULL;
SET_MGMT_MSG_CONTEXT(iocp->ioctl_cmds.msg_context, dlmsg->MsgContext);
INITIALIZE_MGMT_STATUS(iocp->ioctl_cmds.status)
mpt_put_msg_frame(mptctl_id, iocp, mf);
/* Now wait for the command to complete */
retry_wait:
timeleft = wait_for_completion_timeout(&iocp->ioctl_cmds.done, HZ*60);
if (!(iocp->ioctl_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
ret = -ETIME;
printk(MYIOC_s_WARN_FMT "%s: failed\n", iocp->name, __func__);
if (iocp->ioctl_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET) {
mpt_free_msg_frame(iocp, mf);
goto fwdl_out;
}
if (!timeleft) {
printk(MYIOC_s_WARN_FMT
"FW download timeout, doorbell=0x%08x\n",
iocp->name, mpt_GetIocState(iocp, 0));
mptctl_timeout_expired(iocp, mf);
} else
goto retry_wait;
goto fwdl_out;
}
if (!(iocp->ioctl_cmds.status & MPT_MGMT_STATUS_RF_VALID)) {
printk(MYIOC_s_WARN_FMT "%s: failed\n", iocp->name, __func__);
mpt_free_msg_frame(iocp, mf);
ret = -ENODATA;
goto fwdl_out;
}
if (sgl)
kfree_sgl(sgl, sgl_dma, buflist, iocp);
ReplyMsg = (pFWDownloadReply_t)iocp->ioctl_cmds.reply;
iocstat = le16_to_cpu(ReplyMsg->IOCStatus) & MPI_IOCSTATUS_MASK;
if (iocstat == MPI_IOCSTATUS_SUCCESS) {
printk(MYIOC_s_INFO_FMT "F/W update successful!\n", iocp->name);
return 0;
} else if (iocstat == MPI_IOCSTATUS_INVALID_FUNCTION) {
printk(MYIOC_s_WARN_FMT "Hmmm... F/W download not supported!?!\n",
iocp->name);
printk(MYIOC_s_WARN_FMT "(time to go bang on somebodies door)\n",
iocp->name);
return -EBADRQC;
} else if (iocstat == MPI_IOCSTATUS_BUSY) {
printk(MYIOC_s_WARN_FMT "IOC_BUSY!\n", iocp->name);
printk(MYIOC_s_WARN_FMT "(try again later?)\n", iocp->name);
return -EBUSY;
} else {
printk(MYIOC_s_WARN_FMT "ioctl_fwdl() returned [bad] status = %04xh\n",
iocp->name, iocstat);
printk(MYIOC_s_WARN_FMT "(bad VooDoo)\n", iocp->name);
return -ENOMSG;
}
return 0;
fwdl_out:
CLEAR_MGMT_STATUS(iocp->ioctl_cmds.status);
SET_MGMT_MSG_CONTEXT(iocp->ioctl_cmds.msg_context, 0);
kfree_sgl(sgl, sgl_dma, buflist, iocp);
return ret;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
* SGE Allocation routine
*
* Inputs: bytes - number of bytes to be transferred
* sgdir - data direction
* sge_offset - offset (in bytes) from the start of the request
* frame to the first SGE
* ioc - pointer to the mptadapter
* Outputs: frags - number of scatter gather elements
* blp - point to the buflist pointer
* sglbuf_dma - pointer to the (dma) sgl
* Returns: Null if failes
* pointer to the (virtual) sgl if successful.
*/
static MptSge_t *
kbuf_alloc_2_sgl(int bytes, u32 sgdir, int sge_offset, int *frags,
struct buflist **blp, dma_addr_t *sglbuf_dma, MPT_ADAPTER *ioc)
{
MptSge_t *sglbuf = NULL; /* pointer to array of SGE */
/* and chain buffers */
struct buflist *buflist = NULL; /* kernel routine */
MptSge_t *sgl;
int numfrags = 0;
int fragcnt = 0;
int alloc_sz = min(bytes,MAX_KMALLOC_SZ); // avoid kernel warning msg!
int bytes_allocd = 0;
int this_alloc;
dma_addr_t pa; // phys addr
int i, buflist_ent;
int sg_spill = MAX_FRAGS_SPILL1;
int dir;
if (bytes < 0)
return NULL;
/* initialization */
*frags = 0;
*blp = NULL;
/* Allocate and initialize an array of kernel
* structures for the SG elements.
*/
i = MAX_SGL_BYTES / 8;
buflist = kzalloc(i, GFP_USER);
if (!buflist)
return NULL;
buflist_ent = 0;
/* Allocate a single block of memory to store the sg elements and
* the chain buffers. The calling routine is responsible for
* copying the data in this array into the correct place in the
* request and chain buffers.
*/
sglbuf = dma_alloc_coherent(&ioc->pcidev->dev, MAX_SGL_BYTES,
sglbuf_dma, GFP_KERNEL);
if (sglbuf == NULL)
goto free_and_fail;
if (sgdir & 0x04000000)
dir = DMA_TO_DEVICE;
else
dir = DMA_FROM_DEVICE;
/* At start:
* sgl = sglbuf = point to beginning of sg buffer
* buflist_ent = 0 = first kernel structure
* sg_spill = number of SGE that can be written before the first
* chain element.
*
*/
sgl = sglbuf;
sg_spill = ((ioc->req_sz - sge_offset)/ioc->SGE_size) - 1;
while (bytes_allocd < bytes) {
this_alloc = min(alloc_sz, bytes-bytes_allocd);
buflist[buflist_ent].len = this_alloc;
buflist[buflist_ent].kptr = dma_alloc_coherent(&ioc->pcidev->dev,
this_alloc,
&pa, GFP_KERNEL);
if (buflist[buflist_ent].kptr == NULL) {
alloc_sz = alloc_sz / 2;
if (alloc_sz == 0) {
printk(MYIOC_s_WARN_FMT "-SG: No can do - "
"not enough memory! :-(\n", ioc->name);
printk(MYIOC_s_WARN_FMT "-SG: (freeing %d frags)\n",
ioc->name, numfrags);
goto free_and_fail;
}
continue;
} else {
dma_addr_t dma_addr;
bytes_allocd += this_alloc;
sgl->FlagsLength = (0x10000000|sgdir|this_alloc);
dma_addr = dma_map_single(&ioc->pcidev->dev,
buflist[buflist_ent].kptr,
this_alloc, dir);
sgl->Address = dma_addr;
fragcnt++;
numfrags++;
sgl++;
buflist_ent++;
}
if (bytes_allocd >= bytes)
break;
/* Need to chain? */
if (fragcnt == sg_spill) {
printk(MYIOC_s_WARN_FMT
"-SG: No can do - " "Chain required! :-(\n", ioc->name);
printk(MYIOC_s_WARN_FMT "(freeing %d frags)\n", ioc->name, numfrags);
goto free_and_fail;
}
/* overflow check... */
if (numfrags*8 > MAX_SGL_BYTES){
/* GRRRRR... */
printk(MYIOC_s_WARN_FMT "-SG: No can do - "
"too many SG frags! :-(\n", ioc->name);
printk(MYIOC_s_WARN_FMT "-SG: (freeing %d frags)\n",
ioc->name, numfrags);
goto free_and_fail;
}
}
/* Last sge fixup: set LE+eol+eob bits */
sgl[-1].FlagsLength |= 0xC1000000;
*frags = numfrags;
*blp = buflist;
dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "-SG: kbuf_alloc_2_sgl() - "
"%d SG frags generated!\n", ioc->name, numfrags));
dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "-SG: kbuf_alloc_2_sgl() - "
"last (big) alloc_sz=%d\n", ioc->name, alloc_sz));
return sglbuf;
free_and_fail:
if (sglbuf != NULL) {
for (i = 0; i < numfrags; i++) {
dma_addr_t dma_addr;
u8 *kptr;
int len;
if ((sglbuf[i].FlagsLength >> 24) == 0x30)
continue;
dma_addr = sglbuf[i].Address;
kptr = buflist[i].kptr;
len = buflist[i].len;
dma_free_coherent(&ioc->pcidev->dev, len, kptr,
dma_addr);
}
dma_free_coherent(&ioc->pcidev->dev, MAX_SGL_BYTES, sglbuf,
*sglbuf_dma);
}
kfree(buflist);
return NULL;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
* Routine to free the SGL elements.
*/
static void
kfree_sgl(MptSge_t *sgl, dma_addr_t sgl_dma, struct buflist *buflist, MPT_ADAPTER *ioc)
{
MptSge_t *sg = sgl;
struct buflist *bl = buflist;
u32 nib;
int dir;
int n = 0;
if (sg->FlagsLength & 0x04000000)
dir = DMA_TO_DEVICE;
else
dir = DMA_FROM_DEVICE;
nib = (sg->FlagsLength & 0xF0000000) >> 28;
while (! (nib & 0x4)) { /* eob */
/* skip ignore/chain. */
if (nib == 0 || nib == 3) {
;
} else if (sg->Address) {
dma_addr_t dma_addr;
void *kptr;
int len;
dma_addr = sg->Address;
kptr = bl->kptr;
len = bl->len;
dma_unmap_single(&ioc->pcidev->dev, dma_addr, len,
dir);
dma_free_coherent(&ioc->pcidev->dev, len, kptr,
dma_addr);
n++;
}
sg++;
bl++;
nib = (le32_to_cpu(sg->FlagsLength) & 0xF0000000) >> 28;
}
/* we're at eob! */
if (sg->Address) {
dma_addr_t dma_addr;
void *kptr;
int len;
dma_addr = sg->Address;
kptr = bl->kptr;
len = bl->len;
dma_unmap_single(&ioc->pcidev->dev, dma_addr, len, dir);
dma_free_coherent(&ioc->pcidev->dev, len, kptr, dma_addr);
n++;
}
dma_free_coherent(&ioc->pcidev->dev, MAX_SGL_BYTES, sgl, sgl_dma);
kfree(buflist);
dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "-SG: Free'd 1 SGL buf + %d kbufs!\n",
ioc->name, n));
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
* mptctl_getiocinfo - Query the host adapter for IOC information.
* @arg: User space argument
*
* Outputs: None.
* Return: 0 if successful
* -EFAULT if data unavailable
* -ENODEV if no such device/adapter
*/
static int
mptctl_getiocinfo (MPT_ADAPTER *ioc, unsigned long arg, unsigned int data_size)
{
struct mpt_ioctl_iocinfo __user *uarg = (void __user *) arg;
struct mpt_ioctl_iocinfo *karg;
struct pci_dev *pdev;
unsigned int port;
int cim_rev;
struct scsi_device *sdev;
VirtDevice *vdevice;
/* Add of PCI INFO results in unaligned access for
* IA64 and Sparc. Reset long to int. Return no PCI
* data for obsolete format.
*/
if (data_size == sizeof(struct mpt_ioctl_iocinfo_rev0))
cim_rev = 0;
else if (data_size == sizeof(struct mpt_ioctl_iocinfo_rev1))
cim_rev = 1;
else if (data_size == sizeof(struct mpt_ioctl_iocinfo))
cim_rev = 2;
else if (data_size == (sizeof(struct mpt_ioctl_iocinfo_rev0)+12))
cim_rev = 0; /* obsolete */
else
return -EFAULT;
karg = memdup_user(uarg, data_size);
if (IS_ERR(karg)) {
printk(KERN_ERR MYNAM "%s@%d::mpt_ioctl_iocinfo() - memdup_user returned error [%ld]\n",
__FILE__, __LINE__, PTR_ERR(karg));
return PTR_ERR(karg);
}
/* Verify the data transfer size is correct. */
if (karg->hdr.maxDataSize != data_size) {
printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_getiocinfo - "
"Structure size mismatch. Command not completed.\n",
ioc->name, __FILE__, __LINE__);
kfree(karg);
return -EFAULT;
}
dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_getiocinfo called.\n",
ioc->name));
/* Fill in the data and return the structure to the calling
* program
*/
if (ioc->bus_type == SAS)
karg->adapterType = MPT_IOCTL_INTERFACE_SAS;
else if (ioc->bus_type == FC)
karg->adapterType = MPT_IOCTL_INTERFACE_FC;
else
karg->adapterType = MPT_IOCTL_INTERFACE_SCSI;
if (karg->hdr.port > 1) {
kfree(karg);
return -EINVAL;
}
port = karg->hdr.port;
karg->port = port;
pdev = (struct pci_dev *) ioc->pcidev;
karg->pciId = pdev->device;
karg->hwRev = pdev->revision;
karg->subSystemDevice = pdev->subsystem_device;
karg->subSystemVendor = pdev->subsystem_vendor;
if (cim_rev == 1) {
/* Get the PCI bus, device, and function numbers for the IOC
*/
karg->pciInfo.u.bits.busNumber = pdev->bus->number;
karg->pciInfo.u.bits.deviceNumber = PCI_SLOT( pdev->devfn );
karg->pciInfo.u.bits.functionNumber = PCI_FUNC( pdev->devfn );
} else if (cim_rev == 2) {
/* Get the PCI bus, device, function and segment ID numbers
for the IOC */
karg->pciInfo.u.bits.busNumber = pdev->bus->number;
karg->pciInfo.u.bits.deviceNumber = PCI_SLOT( pdev->devfn );
karg->pciInfo.u.bits.functionNumber = PCI_FUNC( pdev->devfn );
karg->pciInfo.segmentID = pci_domain_nr(pdev->bus);
}
/* Get number of devices
*/
karg->numDevices = 0;
if (ioc->sh) {
shost_for_each_device(sdev, ioc->sh) {
vdevice = sdev->hostdata;
if (vdevice == NULL || vdevice->vtarget == NULL)
continue;
if (vdevice->vtarget->tflags &
MPT_TARGET_FLAGS_RAID_COMPONENT)
continue;
karg->numDevices++;
}
}
/* Set the BIOS and FW Version
*/
karg->FWVersion = ioc->facts.FWVersion.Word;
karg->BIOSVersion = ioc->biosVersion;
/* Set the Version Strings.
*/
strncpy (karg->driverVersion, MPT_LINUX_PACKAGE_NAME, MPT_IOCTL_VERSION_LENGTH);
karg->driverVersion[MPT_IOCTL_VERSION_LENGTH-1]='\0';
karg->busChangeEvent = 0;
karg->hostId = ioc->pfacts[port].PortSCSIID;
karg->rsvd[0] = karg->rsvd[1] = 0;
/* Copy the data from kernel memory to user memory
*/
if (copy_to_user((char __user *)arg, karg, data_size)) {
printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_getiocinfo - "
"Unable to write out mpt_ioctl_iocinfo struct @ %p\n",
ioc->name, __FILE__, __LINE__, uarg);
kfree(karg);
return -EFAULT;
}
kfree(karg);
return 0;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
* mptctl_gettargetinfo - Query the host adapter for target information.
* @arg: User space argument
*
* Outputs: None.
* Return: 0 if successful
* -EFAULT if data unavailable
* -ENODEV if no such device/adapter
*/
static int
mptctl_gettargetinfo (MPT_ADAPTER *ioc, unsigned long arg)
{
struct mpt_ioctl_targetinfo __user *uarg = (void __user *) arg;
struct mpt_ioctl_targetinfo karg;
VirtDevice *vdevice;
char *pmem;
int *pdata;
int numDevices = 0;
int lun;
int maxWordsLeft;
int numBytes;
struct scsi_device *sdev;
if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_targetinfo))) {
printk(KERN_ERR MYNAM "%s@%d::mptctl_gettargetinfo - "
"Unable to read in mpt_ioctl_targetinfo struct @ %p\n",
__FILE__, __LINE__, uarg);
return -EFAULT;
}
dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_gettargetinfo called.\n",
ioc->name));
numBytes = karg.hdr.maxDataSize - sizeof(mpt_ioctl_header);
maxWordsLeft = numBytes/sizeof(int);
if (maxWordsLeft <= 0) {
printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_gettargetinfo() - no memory available!\n",
ioc->name, __FILE__, __LINE__);
return -ENOMEM;
}
/* Fill in the data and return the structure to the calling
* program
*/
/* struct mpt_ioctl_targetinfo does not contain sufficient space
* for the target structures so when the IOCTL is called, there is
* not sufficient stack space for the structure. Allocate memory,
* populate the memory, copy back to the user, then free memory.
* targetInfo format:
* bits 31-24: reserved
* 23-16: LUN
* 15- 8: Bus Number
* 7- 0: Target ID
*/
pmem = kzalloc(numBytes, GFP_KERNEL);
if (!pmem) {
printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_gettargetinfo() - no memory available!\n",
ioc->name, __FILE__, __LINE__);
return -ENOMEM;
}
pdata = (int *) pmem;
/* Get number of devices
*/
if (ioc->sh){
shost_for_each_device(sdev, ioc->sh) {
if (!maxWordsLeft)
continue;
vdevice = sdev->hostdata;
if (vdevice == NULL || vdevice->vtarget == NULL)
continue;
if (vdevice->vtarget->tflags &
MPT_TARGET_FLAGS_RAID_COMPONENT)
continue;
lun = (vdevice->vtarget->raidVolume) ? 0x80 : vdevice->lun;
*pdata = (((u8)lun << 16) + (vdevice->vtarget->channel << 8) +
(vdevice->vtarget->id ));
pdata++;
numDevices++;
--maxWordsLeft;
}
}
karg.numDevices = numDevices;
/* Copy part of the data from kernel memory to user memory
*/
if (copy_to_user((char __user *)arg, &karg,
sizeof(struct mpt_ioctl_targetinfo))) {
printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_gettargetinfo - "
"Unable to write out mpt_ioctl_targetinfo struct @ %p\n",
ioc->name, __FILE__, __LINE__, uarg);
kfree(pmem);
return -EFAULT;
}
/* Copy the remaining data from kernel memory to user memory
*/
if (copy_to_user(uarg->targetInfo, pmem, numBytes)) {
printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_gettargetinfo - "
"Unable to write out mpt_ioctl_targetinfo struct @ %p\n",
ioc->name, __FILE__, __LINE__, pdata);
kfree(pmem);
return -EFAULT;
}
kfree(pmem);
return 0;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/* MPT IOCTL Test function.
*
* Outputs: None.
* Return: 0 if successful
* -EFAULT if data unavailable
* -ENODEV if no such device/adapter
*/
static int
mptctl_readtest (MPT_ADAPTER *ioc, unsigned long arg)
{
struct mpt_ioctl_test __user *uarg = (void __user *) arg;
struct mpt_ioctl_test karg;
if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_test))) {
printk(KERN_ERR MYNAM "%s@%d::mptctl_readtest - "
"Unable to read in mpt_ioctl_test struct @ %p\n",
__FILE__, __LINE__, uarg);
return -EFAULT;
}
dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_readtest called.\n",
ioc->name));
/* Fill in the data and return the structure to the calling
* program
*/
#ifdef MFCNT
karg.chip_type = ioc->mfcnt;
#else
karg.chip_type = ioc->pcidev->device;
#endif
strncpy (karg.name, ioc->name, MPT_MAX_NAME);
karg.name[MPT_MAX_NAME-1]='\0';
strncpy (karg.product, ioc->prod_name, MPT_PRODUCT_LENGTH);
karg.product[MPT_PRODUCT_LENGTH-1]='\0';
/* Copy the data from kernel memory to user memory
*/
if (copy_to_user((char __user *)arg, &karg, sizeof(struct mpt_ioctl_test))) {
printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_readtest - "
"Unable to write out mpt_ioctl_test struct @ %p\n",
ioc->name, __FILE__, __LINE__, uarg);
return -EFAULT;
}
return 0;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
* mptctl_eventquery - Query the host adapter for the event types
* that are being logged.
* @arg: User space argument
*
* Outputs: None.
* Return: 0 if successful
* -EFAULT if data unavailable
* -ENODEV if no such device/adapter
*/
static int
mptctl_eventquery (MPT_ADAPTER *ioc, unsigned long arg)
{
struct mpt_ioctl_eventquery __user *uarg = (void __user *) arg;
struct mpt_ioctl_eventquery karg;
if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_eventquery))) {
printk(KERN_ERR MYNAM "%s@%d::mptctl_eventquery - "
"Unable to read in mpt_ioctl_eventquery struct @ %p\n",
__FILE__, __LINE__, uarg);
return -EFAULT;
}
dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_eventquery called.\n",
ioc->name));
karg.eventEntries = MPTCTL_EVENT_LOG_SIZE;
karg.eventTypes = ioc->eventTypes;
/* Copy the data from kernel memory to user memory
*/
if (copy_to_user((char __user *)arg, &karg, sizeof(struct mpt_ioctl_eventquery))) {
printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_eventquery - "
"Unable to write out mpt_ioctl_eventquery struct @ %p\n",
ioc->name, __FILE__, __LINE__, uarg);
return -EFAULT;
}
return 0;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
static int
mptctl_eventenable (MPT_ADAPTER *ioc, unsigned long arg)
{
struct mpt_ioctl_eventenable __user *uarg = (void __user *) arg;
struct mpt_ioctl_eventenable karg;
if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_eventenable))) {
printk(KERN_ERR MYNAM "%s@%d::mptctl_eventenable - "
"Unable to read in mpt_ioctl_eventenable struct @ %p\n",
__FILE__, __LINE__, uarg);
return -EFAULT;
}
dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_eventenable called.\n",
ioc->name));
if (ioc->events == NULL) {
/* Have not yet allocated memory - do so now.
*/
int sz = MPTCTL_EVENT_LOG_SIZE * sizeof(MPT_IOCTL_EVENTS);
ioc->events = kzalloc(sz, GFP_KERNEL);
if (!ioc->events) {
printk(MYIOC_s_ERR_FMT
": ERROR - Insufficient memory to add adapter!\n",
ioc->name);
return -ENOMEM;
}
ioc->alloc_total += sz;
ioc->eventContext = 0;
}
/* Update the IOC event logging flag.
*/
ioc->eventTypes = karg.eventTypes;
return 0;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
static int
mptctl_eventreport (MPT_ADAPTER *ioc, unsigned long arg)
{
struct mpt_ioctl_eventreport __user *uarg = (void __user *) arg;
struct mpt_ioctl_eventreport karg;
int numBytes, maxEvents, max;
if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_eventreport))) {
printk(KERN_ERR MYNAM "%s@%d::mptctl_eventreport - "
"Unable to read in mpt_ioctl_eventreport struct @ %p\n",
__FILE__, __LINE__, uarg);
return -EFAULT;
}
dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_eventreport called.\n",
ioc->name));
numBytes = karg.hdr.maxDataSize - sizeof(mpt_ioctl_header);
maxEvents = numBytes/sizeof(MPT_IOCTL_EVENTS);
max = MPTCTL_EVENT_LOG_SIZE < maxEvents ? MPTCTL_EVENT_LOG_SIZE : maxEvents;
/* If fewer than 1 event is requested, there must have
* been some type of error.
*/
if ((max < 1) || !ioc->events)
return -ENODATA;
/* reset this flag so SIGIO can restart */
ioc->aen_event_read_flag=0;
/* Copy the data from kernel memory to user memory
*/
numBytes = max * sizeof(MPT_IOCTL_EVENTS);
if (copy_to_user(uarg->eventData, ioc->events, numBytes)) {
printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_eventreport - "
"Unable to write out mpt_ioctl_eventreport struct @ %p\n",
ioc->name, __FILE__, __LINE__, ioc->events);
return -EFAULT;
}
return 0;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
static int
mptctl_replace_fw (MPT_ADAPTER *ioc, unsigned long arg)
{
struct mpt_ioctl_replace_fw __user *uarg = (void __user *) arg;
struct mpt_ioctl_replace_fw karg;
int newFwSize;
if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_replace_fw))) {
printk(KERN_ERR MYNAM "%s@%d::mptctl_replace_fw - "
"Unable to read in mpt_ioctl_replace_fw struct @ %p\n",
__FILE__, __LINE__, uarg);
return -EFAULT;
}
dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_replace_fw called.\n",
ioc->name));
/* If caching FW, Free the old FW image
*/
if (ioc->cached_fw == NULL)
return 0;
mpt_free_fw_memory(ioc);
/* Allocate memory for the new FW image
*/
newFwSize = ALIGN(karg.newImageSize, 4);
mpt_alloc_fw_memory(ioc, newFwSize);
if (ioc->cached_fw == NULL)
return -ENOMEM;
/* Copy the data from user memory to kernel space
*/
if (copy_from_user(ioc->cached_fw, uarg->newImage, newFwSize)) {
printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_replace_fw - "
"Unable to read in mpt_ioctl_replace_fw image "
"@ %p\n", ioc->name, __FILE__, __LINE__, uarg);
mpt_free_fw_memory(ioc);
return -EFAULT;
}
/* Update IOCFactsReply
*/
ioc->facts.FWImageSize = newFwSize;
return 0;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/* MPT IOCTL MPTCOMMAND function.
* Cast the arg into the mpt_ioctl_mpt_command structure.
*
* Outputs: None.
* Return: 0 if successful
* -EBUSY if previous command timeout and IOC reset is not complete.
* -EFAULT if data unavailable
* -ENODEV if no such device/adapter
* -ETIME if timer expires
* -ENOMEM if memory allocation error
*/
static int
mptctl_mpt_command (MPT_ADAPTER *ioc, unsigned long arg)
{
struct mpt_ioctl_command __user *uarg = (void __user *) arg;
struct mpt_ioctl_command karg;
int rc;
if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_command))) {
printk(KERN_ERR MYNAM "%s@%d::mptctl_mpt_command - "
"Unable to read in mpt_ioctl_command struct @ %p\n",
__FILE__, __LINE__, uarg);
return -EFAULT;
}
rc = mptctl_do_mpt_command (ioc, karg, &uarg->MF);
return rc;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/* Worker routine for the IOCTL MPTCOMMAND and MPTCOMMAND32 (sparc) commands.
*
* Outputs: None.
* Return: 0 if successful
* -EBUSY if previous command timeout and IOC reset is not complete.
* -EFAULT if data unavailable
* -ENODEV if no such device/adapter
* -ETIME if timer expires
* -ENOMEM if memory allocation error
* -EPERM if SCSI I/O and target is untagged
*/
static int
mptctl_do_mpt_command (MPT_ADAPTER *ioc, struct mpt_ioctl_command karg, void __user *mfPtr)
{
MPT_FRAME_HDR *mf = NULL;
MPIHeader_t *hdr;
char *psge;
struct buflist bufIn; /* data In buffer */
struct buflist bufOut; /* data Out buffer */
dma_addr_t dma_addr_in;
dma_addr_t dma_addr_out;
int sgSize = 0; /* Num SG elements */
int flagsLength;
int sz, rc = 0;
int msgContext;
u16 req_idx;
ulong timeout;
unsigned long timeleft;
struct scsi_device *sdev;
unsigned long flags;
u8 function;
/* bufIn and bufOut are used for user to kernel space transfers
*/
bufIn.kptr = bufOut.kptr = NULL;
bufIn.len = bufOut.len = 0;
spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
if (ioc->ioc_reset_in_progress) {
spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
printk(KERN_ERR MYNAM "%s@%d::mptctl_do_mpt_command - "
"Busy with diagnostic reset\n", __FILE__, __LINE__);
return -EBUSY;
}
spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
/* Basic sanity checks to prevent underflows or integer overflows */
if (karg.maxReplyBytes < 0 ||
karg.dataInSize < 0 ||
karg.dataOutSize < 0 ||
karg.dataSgeOffset < 0 ||
karg.maxSenseBytes < 0 ||
karg.dataSgeOffset > ioc->req_sz / 4)
return -EINVAL;
/* Verify that the final request frame will not be too large.
*/
sz = karg.dataSgeOffset * 4;
if (karg.dataInSize > 0)
sz += ioc->SGE_size;
if (karg.dataOutSize > 0)
sz += ioc->SGE_size;
if (sz > ioc->req_sz) {
printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - "
"Request frame too large (%d) maximum (%d)\n",
ioc->name, __FILE__, __LINE__, sz, ioc->req_sz);
return -EFAULT;
}
/* Get a free request frame and save the message context.
*/
if ((mf = mpt_get_msg_frame(mptctl_id, ioc)) == NULL)
return -EAGAIN;
hdr = (MPIHeader_t *) mf;
msgContext = le32_to_cpu(hdr->MsgContext);
req_idx = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx);
/* Copy the request frame
* Reset the saved message context.
* Request frame in user space
*/
if (copy_from_user(mf, mfPtr, karg.dataSgeOffset * 4)) {
printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - "
"Unable to read MF from mpt_ioctl_command struct @ %p\n",
ioc->name, __FILE__, __LINE__, mfPtr);
function = -1;
rc = -EFAULT;
goto done_free_mem;
}
hdr->MsgContext = cpu_to_le32(msgContext);
function = hdr->Function;
/* Verify that this request is allowed.
*/
dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sending mpi function (0x%02X), req=%p\n",
ioc->name, hdr->Function, mf));
switch (function) {
case MPI_FUNCTION_IOC_FACTS:
case MPI_FUNCTION_PORT_FACTS:
karg.dataOutSize = karg.dataInSize = 0;
break;
case MPI_FUNCTION_CONFIG:
{
Config_t *config_frame;
config_frame = (Config_t *)mf;
dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "\ttype=0x%02x ext_type=0x%02x "
"number=0x%02x action=0x%02x\n", ioc->name,
config_frame->Header.PageType,
config_frame->ExtPageType,
config_frame->Header.PageNumber,
config_frame->Action));
break;
}
case MPI_FUNCTION_FC_COMMON_TRANSPORT_SEND:
case MPI_FUNCTION_FC_EX_LINK_SRVC_SEND:
case MPI_FUNCTION_FW_UPLOAD:
case MPI_FUNCTION_SCSI_ENCLOSURE_PROCESSOR:
case MPI_FUNCTION_FW_DOWNLOAD:
case MPI_FUNCTION_FC_PRIMITIVE_SEND:
case MPI_FUNCTION_TOOLBOX:
case MPI_FUNCTION_SAS_IO_UNIT_CONTROL:
break;
case MPI_FUNCTION_SCSI_IO_REQUEST:
if (ioc->sh) {
SCSIIORequest_t *pScsiReq = (SCSIIORequest_t *) mf;
int qtag = MPI_SCSIIO_CONTROL_UNTAGGED;
int scsidir = 0;
int dataSize;
u32 id;
id = (ioc->devices_per_bus == 0) ? 256 : ioc->devices_per_bus;
if (pScsiReq->TargetID > id) {
printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - "
"Target ID out of bounds. \n",
ioc->name, __FILE__, __LINE__);
rc = -ENODEV;
goto done_free_mem;
}
if (pScsiReq->Bus >= ioc->number_of_buses) {
printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - "
"Target Bus out of bounds. \n",
ioc->name, __FILE__, __LINE__);
rc = -ENODEV;
goto done_free_mem;
}
pScsiReq->MsgFlags &= ~MPI_SCSIIO_MSGFLGS_SENSE_WIDTH;
pScsiReq->MsgFlags |= mpt_msg_flags(ioc);
/* verify that app has not requested
* more sense data than driver
* can provide, if so, reset this parameter
* set the sense buffer pointer low address
* update the control field to specify Q type
*/
if (karg.maxSenseBytes > MPT_SENSE_BUFFER_SIZE)
pScsiReq->SenseBufferLength = MPT_SENSE_BUFFER_SIZE;
else
pScsiReq->SenseBufferLength = karg.maxSenseBytes;
pScsiReq->SenseBufferLowAddr =
cpu_to_le32(ioc->sense_buf_low_dma
+ (req_idx * MPT_SENSE_BUFFER_ALLOC));
shost_for_each_device(sdev, ioc->sh) {
struct scsi_target *starget = scsi_target(sdev);
VirtTarget *vtarget = starget->hostdata;
if (vtarget == NULL)
continue;
if ((pScsiReq->TargetID == vtarget->id) &&
(pScsiReq->Bus == vtarget->channel) &&
(vtarget->tflags & MPT_TARGET_FLAGS_Q_YES))
qtag = MPI_SCSIIO_CONTROL_SIMPLEQ;
}
/* Have the IOCTL driver set the direction based
* on the dataOutSize (ordering issue with Sparc).
*/
if (karg.dataOutSize > 0) {
scsidir = MPI_SCSIIO_CONTROL_WRITE;
dataSize = karg.dataOutSize;
} else {
scsidir = MPI_SCSIIO_CONTROL_READ;
dataSize = karg.dataInSize;
}
pScsiReq->Control = cpu_to_le32(scsidir | qtag);
pScsiReq->DataLength = cpu_to_le32(dataSize);
} else {
printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - "
"SCSI driver is not loaded. \n",
ioc->name, __FILE__, __LINE__);
rc = -EFAULT;
goto done_free_mem;
}
break;
case MPI_FUNCTION_SMP_PASSTHROUGH:
/* Check mf->PassthruFlags to determine if
* transfer is ImmediateMode or not.
* Immediate mode returns data in the ReplyFrame.
* Else, we are sending request and response data
* in two SGLs at the end of the mf.
*/
break;
case MPI_FUNCTION_SATA_PASSTHROUGH:
if (!ioc->sh) {
printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - "
"SCSI driver is not loaded. \n",
ioc->name, __FILE__, __LINE__);
rc = -EFAULT;
goto done_free_mem;
}
break;
case MPI_FUNCTION_RAID_ACTION:
/* Just add a SGE
*/
break;
case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
if (ioc->sh) {
SCSIIORequest_t *pScsiReq = (SCSIIORequest_t *) mf;
int qtag = MPI_SCSIIO_CONTROL_SIMPLEQ;
int scsidir = MPI_SCSIIO_CONTROL_READ;
int dataSize;
pScsiReq->MsgFlags &= ~MPI_SCSIIO_MSGFLGS_SENSE_WIDTH;
pScsiReq->MsgFlags |= mpt_msg_flags(ioc);
/* verify that app has not requested
* more sense data than driver
* can provide, if so, reset this parameter
* set the sense buffer pointer low address
* update the control field to specify Q type
*/
if (karg.maxSenseBytes > MPT_SENSE_BUFFER_SIZE)
pScsiReq->SenseBufferLength = MPT_SENSE_BUFFER_SIZE;
else
pScsiReq->SenseBufferLength = karg.maxSenseBytes;
pScsiReq->SenseBufferLowAddr =
cpu_to_le32(ioc->sense_buf_low_dma
+ (req_idx * MPT_SENSE_BUFFER_ALLOC));
/* All commands to physical devices are tagged
*/
/* Have the IOCTL driver set the direction based
* on the dataOutSize (ordering issue with Sparc).
*/
if (karg.dataOutSize > 0) {
scsidir = MPI_SCSIIO_CONTROL_WRITE;
dataSize = karg.dataOutSize;
} else {
scsidir = MPI_SCSIIO_CONTROL_READ;
dataSize = karg.dataInSize;
}
pScsiReq->Control = cpu_to_le32(scsidir | qtag);
pScsiReq->DataLength = cpu_to_le32(dataSize);
} else {
printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - "
"SCSI driver is not loaded. \n",
ioc->name, __FILE__, __LINE__);
rc = -EFAULT;
goto done_free_mem;
}
break;
case MPI_FUNCTION_SCSI_TASK_MGMT:
{
SCSITaskMgmt_t *pScsiTm;
pScsiTm = (SCSITaskMgmt_t *)mf;
dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"\tTaskType=0x%x MsgFlags=0x%x "
"TaskMsgContext=0x%x id=%d channel=%d\n",
ioc->name, pScsiTm->TaskType, le32_to_cpu
(pScsiTm->TaskMsgContext), pScsiTm->MsgFlags,
pScsiTm->TargetID, pScsiTm->Bus));
break;
}
case MPI_FUNCTION_IOC_INIT:
{
IOCInit_t *pInit = (IOCInit_t *) mf;
u32 high_addr, sense_high;
/* Verify that all entries in the IOC INIT match
* existing setup (and in LE format).
*/
if (sizeof(dma_addr_t) == sizeof(u64)) {
high_addr = cpu_to_le32((u32)((u64)ioc->req_frames_dma >> 32));
sense_high= cpu_to_le32((u32)((u64)ioc->sense_buf_pool_dma >> 32));
} else {
high_addr = 0;
sense_high= 0;
}
if ((pInit->Flags != 0) || (pInit->MaxDevices != ioc->facts.MaxDevices) ||
(pInit->MaxBuses != ioc->facts.MaxBuses) ||
(pInit->ReplyFrameSize != cpu_to_le16(ioc->reply_sz)) ||
(pInit->HostMfaHighAddr != high_addr) ||
(pInit->SenseBufferHighAddr != sense_high)) {
printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - "
"IOC_INIT issued with 1 or more incorrect parameters. Rejected.\n",
ioc->name, __FILE__, __LINE__);
rc = -EFAULT;
goto done_free_mem;
}
}
break;
default:
/*
* MPI_FUNCTION_PORT_ENABLE
* MPI_FUNCTION_TARGET_CMD_BUFFER_POST
* MPI_FUNCTION_TARGET_ASSIST
* MPI_FUNCTION_TARGET_STATUS_SEND
* MPI_FUNCTION_TARGET_MODE_ABORT
* MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET
* MPI_FUNCTION_IO_UNIT_RESET
* MPI_FUNCTION_HANDSHAKE
* MPI_FUNCTION_REPLY_FRAME_REMOVAL
* MPI_FUNCTION_EVENT_NOTIFICATION
* (driver handles event notification)
* MPI_FUNCTION_EVENT_ACK
*/
/* What to do with these??? CHECK ME!!!
MPI_FUNCTION_FC_LINK_SRVC_BUF_POST
MPI_FUNCTION_FC_LINK_SRVC_RSP
MPI_FUNCTION_FC_ABORT
MPI_FUNCTION_LAN_SEND
MPI_FUNCTION_LAN_RECEIVE
MPI_FUNCTION_LAN_RESET
*/
printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - "
"Illegal request (function 0x%x) \n",
ioc->name, __FILE__, __LINE__, hdr->Function);
rc = -EFAULT;
goto done_free_mem;
}
/* Add the SGL ( at most one data in SGE and one data out SGE )
* In the case of two SGE's - the data out (write) will always
* preceede the data in (read) SGE. psgList is used to free the
* allocated memory.
*/
psge = (char *) (((int *) mf) + karg.dataSgeOffset);
flagsLength = 0;
if (karg.dataOutSize > 0)
sgSize ++;
if (karg.dataInSize > 0)
sgSize ++;
if (sgSize > 0) {
/* Set up the dataOut memory allocation */
if (karg.dataOutSize > 0) {
if (karg.dataInSize > 0) {
flagsLength = ( MPI_SGE_FLAGS_SIMPLE_ELEMENT |
MPI_SGE_FLAGS_END_OF_BUFFER |
MPI_SGE_FLAGS_DIRECTION)
<< MPI_SGE_FLAGS_SHIFT;
} else {
flagsLength = MPT_SGE_FLAGS_SSIMPLE_WRITE;
}
flagsLength |= karg.dataOutSize;
bufOut.len = karg.dataOutSize;
bufOut.kptr = dma_alloc_coherent(&ioc->pcidev->dev,
bufOut.len,
&dma_addr_out, GFP_KERNEL);
if (bufOut.kptr == NULL) {
rc = -ENOMEM;
goto done_free_mem;
} else {
/* Set up this SGE.
* Copy to MF and to sglbuf
*/
ioc->add_sge(psge, flagsLength, dma_addr_out);
psge += ioc->SGE_size;
/* Copy user data to kernel space.
*/
if (copy_from_user(bufOut.kptr,
karg.dataOutBufPtr,
bufOut.len)) {
printk(MYIOC_s_ERR_FMT
"%s@%d::mptctl_do_mpt_command - Unable "
"to read user data "
"struct @ %p\n",
ioc->name, __FILE__, __LINE__,karg.dataOutBufPtr);
rc = -EFAULT;
goto done_free_mem;
}
}
}
if (karg.dataInSize > 0) {
flagsLength = MPT_SGE_FLAGS_SSIMPLE_READ;
flagsLength |= karg.dataInSize;
bufIn.len = karg.dataInSize;
bufIn.kptr = dma_alloc_coherent(&ioc->pcidev->dev,
bufIn.len,
&dma_addr_in, GFP_KERNEL);
if (bufIn.kptr == NULL) {
rc = -ENOMEM;
goto done_free_mem;
} else {
/* Set up this SGE
* Copy to MF and to sglbuf
*/
ioc->add_sge(psge, flagsLength, dma_addr_in);
}
}
} else {
/* Add a NULL SGE
*/
ioc->add_sge(psge, flagsLength, (dma_addr_t) -1);
}
SET_MGMT_MSG_CONTEXT(ioc->ioctl_cmds.msg_context, hdr->MsgContext);
INITIALIZE_MGMT_STATUS(ioc->ioctl_cmds.status)
if (hdr->Function == MPI_FUNCTION_SCSI_TASK_MGMT) {
mutex_lock(&ioc->taskmgmt_cmds.mutex);
if (mpt_set_taskmgmt_in_progress_flag(ioc) != 0) {
mutex_unlock(&ioc->taskmgmt_cmds.mutex);
goto done_free_mem;
}
DBG_DUMP_TM_REQUEST_FRAME(ioc, (u32 *)mf);
if ((ioc->facts.IOCCapabilities & MPI_IOCFACTS_CAPABILITY_HIGH_PRI_Q) &&
(ioc->facts.MsgVersion >= MPI_VERSION_01_05))
mpt_put_msg_frame_hi_pri(mptctl_id, ioc, mf);
else {
rc =mpt_send_handshake_request(mptctl_id, ioc,
sizeof(SCSITaskMgmt_t), (u32*)mf, CAN_SLEEP);
if (rc != 0) {
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
"send_handshake FAILED! (ioc %p, mf %p)\n",
ioc->name, ioc, mf));
mpt_clear_taskmgmt_in_progress_flag(ioc);
rc = -ENODATA;
mutex_unlock(&ioc->taskmgmt_cmds.mutex);
goto done_free_mem;
}
}
} else
mpt_put_msg_frame(mptctl_id, ioc, mf);
/* Now wait for the command to complete */
timeout = (karg.timeout > 0) ? karg.timeout : MPT_IOCTL_DEFAULT_TIMEOUT;
retry_wait:
timeleft = wait_for_completion_timeout(&ioc->ioctl_cmds.done,
HZ*timeout);
if (!(ioc->ioctl_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
rc = -ETIME;
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT "%s: TIMED OUT!\n",
ioc->name, __func__));
if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET) {
if (function == MPI_FUNCTION_SCSI_TASK_MGMT)
mutex_unlock(&ioc->taskmgmt_cmds.mutex);
goto done_free_mem;
}
if (!timeleft) {
printk(MYIOC_s_WARN_FMT
"mpt cmd timeout, doorbell=0x%08x"
" function=0x%x\n",
ioc->name, mpt_GetIocState(ioc, 0), function);
if (function == MPI_FUNCTION_SCSI_TASK_MGMT)
mutex_unlock(&ioc->taskmgmt_cmds.mutex);
mptctl_timeout_expired(ioc, mf);
mf = NULL;
} else
goto retry_wait;
goto done_free_mem;
}
if (function == MPI_FUNCTION_SCSI_TASK_MGMT)
mutex_unlock(&ioc->taskmgmt_cmds.mutex);
mf = NULL;
/* If a valid reply frame, copy to the user.
* Offset 2: reply length in U32's
*/
if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_RF_VALID) {
if (karg.maxReplyBytes < ioc->reply_sz) {
sz = min(karg.maxReplyBytes,
4*ioc->ioctl_cmds.reply[2]);
} else {
sz = min(ioc->reply_sz, 4*ioc->ioctl_cmds.reply[2]);
}
if (sz > 0) {
if (copy_to_user(karg.replyFrameBufPtr,
ioc->ioctl_cmds.reply, sz)){
printk(MYIOC_s_ERR_FMT
"%s@%d::mptctl_do_mpt_command - "
"Unable to write out reply frame %p\n",
ioc->name, __FILE__, __LINE__, karg.replyFrameBufPtr);
rc = -ENODATA;
goto done_free_mem;
}
}
}
/* If valid sense data, copy to user.
*/
if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_SENSE_VALID) {
sz = min(karg.maxSenseBytes, MPT_SENSE_BUFFER_SIZE);
if (sz > 0) {
if (copy_to_user(karg.senseDataPtr,
ioc->ioctl_cmds.sense, sz)) {
printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - "
"Unable to write sense data to user %p\n",
ioc->name, __FILE__, __LINE__,
karg.senseDataPtr);
rc = -ENODATA;
goto done_free_mem;
}
}
}
/* If the overall status is _GOOD and data in, copy data
* to user.
*/
if ((ioc->ioctl_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD) &&
(karg.dataInSize > 0) && (bufIn.kptr)) {
if (copy_to_user(karg.dataInBufPtr,
bufIn.kptr, karg.dataInSize)) {
printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - "
"Unable to write data to user %p\n",
ioc->name, __FILE__, __LINE__,
karg.dataInBufPtr);
rc = -ENODATA;
}
}
done_free_mem:
CLEAR_MGMT_STATUS(ioc->ioctl_cmds.status)
SET_MGMT_MSG_CONTEXT(ioc->ioctl_cmds.msg_context, 0);
/* Free the allocated memory.
*/
if (bufOut.kptr != NULL) {
dma_free_coherent(&ioc->pcidev->dev, bufOut.len,
(void *)bufOut.kptr, dma_addr_out);
}
if (bufIn.kptr != NULL) {
dma_free_coherent(&ioc->pcidev->dev, bufIn.len,
(void *)bufIn.kptr, dma_addr_in);
}
/* mf is null if command issued successfully
* otherwise, failure occurred after mf acquired.
*/
if (mf)
mpt_free_msg_frame(ioc, mf);
return rc;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/* Prototype Routine for the HOST INFO command.
*
* Outputs: None.
* Return: 0 if successful
* -EFAULT if data unavailable
* -EBUSY if previous command timeout and IOC reset is not complete.
* -ENODEV if no such device/adapter
* -ETIME if timer expires
* -ENOMEM if memory allocation error
*/
static int
mptctl_hp_hostinfo(MPT_ADAPTER *ioc, unsigned long arg, unsigned int data_size)
{
hp_host_info_t __user *uarg = (void __user *) arg;
struct pci_dev *pdev;
char *pbuf=NULL;
dma_addr_t buf_dma;
hp_host_info_t karg;
CONFIGPARMS cfg;
ConfigPageHeader_t hdr;
int rc, cim_rev;
ToolboxIstwiReadWriteRequest_t *IstwiRWRequest;
MPT_FRAME_HDR *mf = NULL;
unsigned long timeleft;
u32 msgcontext;
/* Reset long to int. Should affect IA64 and SPARC only
*/
if (data_size == sizeof(hp_host_info_t))
cim_rev = 1;
else if (data_size == sizeof(hp_host_info_rev0_t))
cim_rev = 0; /* obsolete */
else
return -EFAULT;
if (copy_from_user(&karg, uarg, sizeof(hp_host_info_t))) {
printk(KERN_ERR MYNAM "%s@%d::mptctl_hp_host_info - "
"Unable to read in hp_host_info struct @ %p\n",
__FILE__, __LINE__, uarg);
return -EFAULT;
}
dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT ": mptctl_hp_hostinfo called.\n",
ioc->name));
/* Fill in the data and return the structure to the calling
* program
*/
pdev = (struct pci_dev *) ioc->pcidev;
karg.vendor = pdev->vendor;
karg.device = pdev->device;
karg.subsystem_id = pdev->subsystem_device;
karg.subsystem_vendor = pdev->subsystem_vendor;
karg.devfn = pdev->devfn;
karg.bus = pdev->bus->number;
/* Save the SCSI host no. if
* SCSI driver loaded
*/
if (ioc->sh != NULL)
karg.host_no = ioc->sh->host_no;
else
karg.host_no = -1;
/* Reformat the fw_version into a string */
snprintf(karg.fw_version, sizeof(karg.fw_version),
"%.2hhu.%.2hhu.%.2hhu.%.2hhu",
ioc->facts.FWVersion.Struct.Major,
ioc->facts.FWVersion.Struct.Minor,
ioc->facts.FWVersion.Struct.Unit,
ioc->facts.FWVersion.Struct.Dev);
/* Issue a config request to get the device serial number
*/
hdr.PageVersion = 0;
hdr.PageLength = 0;
hdr.PageNumber = 0;
hdr.PageType = MPI_CONFIG_PAGETYPE_MANUFACTURING;
cfg.cfghdr.hdr = &hdr;
cfg.physAddr = -1;
cfg.pageAddr = 0;
cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
cfg.dir = 0; /* read */
cfg.timeout = 10;
strncpy(karg.serial_number, " ", 24);
if (mpt_config(ioc, &cfg) == 0) {
if (cfg.cfghdr.hdr->PageLength > 0) {
/* Issue the second config page request */
cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
pbuf = dma_alloc_coherent(&ioc->pcidev->dev,
hdr.PageLength * 4,
&buf_dma, GFP_KERNEL);
if (pbuf) {
cfg.physAddr = buf_dma;
if (mpt_config(ioc, &cfg) == 0) {
ManufacturingPage0_t *pdata = (ManufacturingPage0_t *) pbuf;
if (strlen(pdata->BoardTracerNumber) > 1) {
strscpy(karg.serial_number,
pdata->BoardTracerNumber, 24);
}
}
dma_free_coherent(&ioc->pcidev->dev,
hdr.PageLength * 4, pbuf,
buf_dma);
pbuf = NULL;
}
}
}
rc = mpt_GetIocState(ioc, 1);
switch (rc) {
case MPI_IOC_STATE_OPERATIONAL:
karg.ioc_status = HP_STATUS_OK;
break;
case MPI_IOC_STATE_FAULT:
karg.ioc_status = HP_STATUS_FAILED;
break;
case MPI_IOC_STATE_RESET:
case MPI_IOC_STATE_READY:
default:
karg.ioc_status = HP_STATUS_OTHER;
break;
}
karg.base_io_addr = pci_resource_start(pdev, 0);
if ((ioc->bus_type == SAS) || (ioc->bus_type == FC))
karg.bus_phys_width = HP_BUS_WIDTH_UNK;
else
karg.bus_phys_width = HP_BUS_WIDTH_16;
karg.hard_resets = 0;
karg.soft_resets = 0;
karg.timeouts = 0;
if (ioc->sh != NULL) {
MPT_SCSI_HOST *hd = shost_priv(ioc->sh);
if (hd && (cim_rev == 1)) {
karg.hard_resets = ioc->hard_resets;
karg.soft_resets = ioc->soft_resets;
karg.timeouts = ioc->timeouts;
}
}
/*
* Gather ISTWI(Industry Standard Two Wire Interface) Data
*/
if ((mf = mpt_get_msg_frame(mptctl_id, ioc)) == NULL) {
dfailprintk(ioc, printk(MYIOC_s_WARN_FMT
"%s, no msg frames!!\n", ioc->name, __func__));
goto out;
}
IstwiRWRequest = (ToolboxIstwiReadWriteRequest_t *)mf;
msgcontext = IstwiRWRequest->MsgContext;
memset(IstwiRWRequest,0,sizeof(ToolboxIstwiReadWriteRequest_t));
IstwiRWRequest->MsgContext = msgcontext;
IstwiRWRequest->Function = MPI_FUNCTION_TOOLBOX;
IstwiRWRequest->Tool = MPI_TOOLBOX_ISTWI_READ_WRITE_TOOL;
IstwiRWRequest->Flags = MPI_TB_ISTWI_FLAGS_READ;
IstwiRWRequest->NumAddressBytes = 0x01;
IstwiRWRequest->DataLength = cpu_to_le16(0x04);
if (pdev->devfn & 1)
IstwiRWRequest->DeviceAddr = 0xB2;
else
IstwiRWRequest->DeviceAddr = 0xB0;
pbuf = dma_alloc_coherent(&ioc->pcidev->dev, 4, &buf_dma, GFP_KERNEL);
if (!pbuf)
goto out;
ioc->add_sge((char *)&IstwiRWRequest->SGL,
(MPT_SGE_FLAGS_SSIMPLE_READ|4), buf_dma);
SET_MGMT_MSG_CONTEXT(ioc->ioctl_cmds.msg_context,
IstwiRWRequest->MsgContext);
INITIALIZE_MGMT_STATUS(ioc->ioctl_cmds.status)
mpt_put_msg_frame(mptctl_id, ioc, mf);
retry_wait:
timeleft = wait_for_completion_timeout(&ioc->ioctl_cmds.done,
HZ*MPT_IOCTL_DEFAULT_TIMEOUT);
if (!(ioc->ioctl_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
printk(MYIOC_s_WARN_FMT "%s: failed\n", ioc->name, __func__);
if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET) {
mpt_free_msg_frame(ioc, mf);
goto out;
}
if (!timeleft) {
printk(MYIOC_s_WARN_FMT
"HOST INFO command timeout, doorbell=0x%08x\n",
ioc->name, mpt_GetIocState(ioc, 0));
mptctl_timeout_expired(ioc, mf);
} else
goto retry_wait;
goto out;
}
/*
*ISTWI Data Definition
* pbuf[0] = FW_VERSION = 0x4
* pbuf[1] = Bay Count = 6 or 4 or 2, depending on
* the config, you should be seeing one out of these three values
* pbuf[2] = Drive Installed Map = bit pattern depend on which
* bays have drives in them
* pbuf[3] = Checksum (0x100 = (byte0 + byte2 + byte3)
*/
if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_RF_VALID)
karg.rsvd = *(u32 *)pbuf;
out:
CLEAR_MGMT_STATUS(ioc->ioctl_cmds.status)
SET_MGMT_MSG_CONTEXT(ioc->ioctl_cmds.msg_context, 0);
if (pbuf)
dma_free_coherent(&ioc->pcidev->dev, 4, pbuf, buf_dma);
/* Copy the data from kernel memory to user memory
*/
if (copy_to_user((char __user *)arg, &karg, sizeof(hp_host_info_t))) {
printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_hpgethostinfo - "
"Unable to write out hp_host_info @ %p\n",
ioc->name, __FILE__, __LINE__, uarg);
return -EFAULT;
}
return 0;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/* Prototype Routine for the TARGET INFO command.
*
* Outputs: None.
* Return: 0 if successful
* -EFAULT if data unavailable
* -EBUSY if previous command timeout and IOC reset is not complete.
* -ENODEV if no such device/adapter
* -ETIME if timer expires
* -ENOMEM if memory allocation error
*/
static int
mptctl_hp_targetinfo(MPT_ADAPTER *ioc, unsigned long arg)
{
hp_target_info_t __user *uarg = (void __user *) arg;
SCSIDevicePage0_t *pg0_alloc;
SCSIDevicePage3_t *pg3_alloc;
MPT_SCSI_HOST *hd = NULL;
hp_target_info_t karg;
int data_sz;
dma_addr_t page_dma;
CONFIGPARMS cfg;
ConfigPageHeader_t hdr;
int tmp, np, rc = 0;
if (copy_from_user(&karg, uarg, sizeof(hp_target_info_t))) {
printk(KERN_ERR MYNAM "%s@%d::mptctl_hp_targetinfo - "
"Unable to read in hp_host_targetinfo struct @ %p\n",
__FILE__, __LINE__, uarg);
return -EFAULT;
}
if (karg.hdr.id >= MPT_MAX_FC_DEVICES)
return -EINVAL;
dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_hp_targetinfo called.\n",
ioc->name));
/* There is nothing to do for FCP parts.
*/
if ((ioc->bus_type == SAS) || (ioc->bus_type == FC))
return 0;
if ((ioc->spi_data.sdp0length == 0) || (ioc->sh == NULL))
return 0;
if (ioc->sh->host_no != karg.hdr.host)
return -ENODEV;
/* Get the data transfer speeds
*/
data_sz = ioc->spi_data.sdp0length * 4;
pg0_alloc = dma_alloc_coherent(&ioc->pcidev->dev, data_sz, &page_dma,
GFP_KERNEL);
if (pg0_alloc) {
hdr.PageVersion = ioc->spi_data.sdp0version;
hdr.PageLength = data_sz;
hdr.PageNumber = 0;
hdr.PageType = MPI_CONFIG_PAGETYPE_SCSI_DEVICE;
cfg.cfghdr.hdr = &hdr;
cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
cfg.dir = 0;
cfg.timeout = 0;
cfg.physAddr = page_dma;
cfg.pageAddr = (karg.hdr.channel << 8) | karg.hdr.id;
if ((rc = mpt_config(ioc, &cfg)) == 0) {
np = le32_to_cpu(pg0_alloc->NegotiatedParameters);
karg.negotiated_width = np & MPI_SCSIDEVPAGE0_NP_WIDE ?
HP_BUS_WIDTH_16 : HP_BUS_WIDTH_8;
if (np & MPI_SCSIDEVPAGE0_NP_NEG_SYNC_OFFSET_MASK) {
tmp = (np & MPI_SCSIDEVPAGE0_NP_NEG_SYNC_PERIOD_MASK) >> 8;
if (tmp < 0x09)
karg.negotiated_speed = HP_DEV_SPEED_ULTRA320;
else if (tmp <= 0x09)
karg.negotiated_speed = HP_DEV_SPEED_ULTRA160;
else if (tmp <= 0x0A)
karg.negotiated_speed = HP_DEV_SPEED_ULTRA2;
else if (tmp <= 0x0C)
karg.negotiated_speed = HP_DEV_SPEED_ULTRA;
else if (tmp <= 0x25)
karg.negotiated_speed = HP_DEV_SPEED_FAST;
else
karg.negotiated_speed = HP_DEV_SPEED_ASYNC;
} else
karg.negotiated_speed = HP_DEV_SPEED_ASYNC;
}
dma_free_coherent(&ioc->pcidev->dev, data_sz, (u8 *)pg0_alloc,
page_dma);
}
/* Set defaults
*/
karg.message_rejects = -1;
karg.phase_errors = -1;
karg.parity_errors = -1;
karg.select_timeouts = -1;
/* Get the target error parameters
*/
hdr.PageVersion = 0;
hdr.PageLength = 0;
hdr.PageNumber = 3;
hdr.PageType = MPI_CONFIG_PAGETYPE_SCSI_DEVICE;
cfg.cfghdr.hdr = &hdr;
cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
cfg.dir = 0;
cfg.timeout = 0;
cfg.physAddr = -1;
if ((mpt_config(ioc, &cfg) == 0) && (cfg.cfghdr.hdr->PageLength > 0)) {
/* Issue the second config page request */
cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
data_sz = (int) cfg.cfghdr.hdr->PageLength * 4;
pg3_alloc = dma_alloc_coherent(&ioc->pcidev->dev, data_sz,
&page_dma, GFP_KERNEL);
if (pg3_alloc) {
cfg.physAddr = page_dma;
cfg.pageAddr = (karg.hdr.channel << 8) | karg.hdr.id;
if ((rc = mpt_config(ioc, &cfg)) == 0) {
karg.message_rejects = (u32) le16_to_cpu(pg3_alloc->MsgRejectCount);
karg.phase_errors = (u32) le16_to_cpu(pg3_alloc->PhaseErrorCount);
karg.parity_errors = (u32) le16_to_cpu(pg3_alloc->ParityErrorCount);
}
dma_free_coherent(&ioc->pcidev->dev, data_sz,
(u8 *)pg3_alloc, page_dma);
}
}
hd = shost_priv(ioc->sh);
if (hd != NULL)
karg.select_timeouts = hd->sel_timeout[karg.hdr.id];
/* Copy the data from kernel memory to user memory
*/
if (copy_to_user((char __user *)arg, &karg, sizeof(hp_target_info_t))) {
printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_hp_target_info - "
"Unable to write out mpt_ioctl_targetinfo struct @ %p\n",
ioc->name, __FILE__, __LINE__, uarg);
return -EFAULT;
}
return 0;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
static const struct file_operations mptctl_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.fasync = mptctl_fasync,
.unlocked_ioctl = mptctl_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = compat_mpctl_ioctl,
#endif
};
static struct miscdevice mptctl_miscdev = {
MPT_MINOR,
MYNAM,
&mptctl_fops
};
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
#ifdef CONFIG_COMPAT
static int
compat_mptfwxfer_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
struct mpt_fw_xfer32 kfw32;
struct mpt_fw_xfer kfw;
MPT_ADAPTER *iocp = NULL;
int iocnum, iocnumX;
int nonblock = (filp->f_flags & O_NONBLOCK);
int ret;
if (copy_from_user(&kfw32, (char __user *)arg, sizeof(kfw32)))
return -EFAULT;
/* Verify intended MPT adapter */
iocnumX = kfw32.iocnum & 0xFF;
if (((iocnum = mpt_verify_adapter(iocnumX, &iocp)) < 0) ||
(iocp == NULL)) {
printk(KERN_DEBUG MYNAM "::compat_mptfwxfer_ioctl @%d - ioc%d not found!\n",
__LINE__, iocnumX);
return -ENODEV;
}
if ((ret = mptctl_syscall_down(iocp, nonblock)) != 0)
return ret;
dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT "compat_mptfwxfer_ioctl() called\n",
iocp->name));
kfw.iocnum = iocnum;
kfw.fwlen = kfw32.fwlen;
kfw.bufp = compat_ptr(kfw32.bufp);
ret = mptctl_do_fw_download(iocp, kfw.bufp, kfw.fwlen);
mutex_unlock(&iocp->ioctl_cmds.mutex);
return ret;
}
static int
compat_mpt_command(struct file *filp, unsigned int cmd,
unsigned long arg)
{
struct mpt_ioctl_command32 karg32;
struct mpt_ioctl_command32 __user *uarg = (struct mpt_ioctl_command32 __user *) arg;
struct mpt_ioctl_command karg;
MPT_ADAPTER *iocp = NULL;
int iocnum, iocnumX;
int nonblock = (filp->f_flags & O_NONBLOCK);
int ret;
if (copy_from_user(&karg32, (char __user *)arg, sizeof(karg32)))
return -EFAULT;
/* Verify intended MPT adapter */
iocnumX = karg32.hdr.iocnum & 0xFF;
if (((iocnum = mpt_verify_adapter(iocnumX, &iocp)) < 0) ||
(iocp == NULL)) {
printk(KERN_DEBUG MYNAM "::compat_mpt_command @%d - ioc%d not found!\n",
__LINE__, iocnumX);
return -ENODEV;
}
if ((ret = mptctl_syscall_down(iocp, nonblock)) != 0)
return ret;
dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT "compat_mpt_command() called\n",
iocp->name));
/* Copy data to karg */
karg.hdr.iocnum = karg32.hdr.iocnum;
karg.hdr.port = karg32.hdr.port;
karg.timeout = karg32.timeout;
karg.maxReplyBytes = karg32.maxReplyBytes;
karg.dataInSize = karg32.dataInSize;
karg.dataOutSize = karg32.dataOutSize;
karg.maxSenseBytes = karg32.maxSenseBytes;
karg.dataSgeOffset = karg32.dataSgeOffset;
karg.replyFrameBufPtr = (char __user *)(unsigned long)karg32.replyFrameBufPtr;
karg.dataInBufPtr = (char __user *)(unsigned long)karg32.dataInBufPtr;
karg.dataOutBufPtr = (char __user *)(unsigned long)karg32.dataOutBufPtr;
karg.senseDataPtr = (char __user *)(unsigned long)karg32.senseDataPtr;
/* Pass new structure to do_mpt_command
*/
ret = mptctl_do_mpt_command (iocp, karg, &uarg->MF);
mutex_unlock(&iocp->ioctl_cmds.mutex);
return ret;
}
static long compat_mpctl_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
{
long ret;
mutex_lock(&mpctl_mutex);
switch (cmd) {
case MPTIOCINFO:
case MPTIOCINFO1:
case MPTIOCINFO2:
case MPTTARGETINFO:
case MPTEVENTQUERY:
case MPTEVENTENABLE:
case MPTEVENTREPORT:
case MPTHARDRESET:
case HP_GETHOSTINFO:
case HP_GETTARGETINFO:
case MPTTEST:
ret = __mptctl_ioctl(f, cmd, arg);
break;
case MPTCOMMAND32:
ret = compat_mpt_command(f, cmd, arg);
break;
case MPTFWDOWNLOAD32:
ret = compat_mptfwxfer_ioctl(f, cmd, arg);
break;
default:
ret = -ENOIOCTLCMD;
break;
}
mutex_unlock(&mpctl_mutex);
return ret;
}
#endif
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
* mptctl_probe - Installs ioctl devices per bus.
* @pdev: Pointer to pci_dev structure
*
* Returns 0 for success, non-zero for failure.
*
*/
static int
mptctl_probe(struct pci_dev *pdev)
{
MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
mutex_init(&ioc->ioctl_cmds.mutex);
init_completion(&ioc->ioctl_cmds.done);
return 0;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
* mptctl_remove - Removed ioctl devices
* @pdev: Pointer to pci_dev structure
*
*
*/
static void
mptctl_remove(struct pci_dev *pdev)
{
}
static struct mpt_pci_driver mptctl_driver = {
.probe = mptctl_probe,
.remove = mptctl_remove,
};
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
static int __init mptctl_init(void)
{
int err;
show_mptmod_ver(my_NAME, my_VERSION);
mpt_device_driver_register(&mptctl_driver, MPTCTL_DRIVER);
/* Register this device */
err = misc_register(&mptctl_miscdev);
if (err < 0) {
printk(KERN_ERR MYNAM ": Can't register misc device [minor=%d].\n", MPT_MINOR);
goto out_fail;
}
printk(KERN_INFO MYNAM ": Registered with Fusion MPT base driver\n");
printk(KERN_INFO MYNAM ": /dev/%s @ (major,minor=%d,%d)\n",
mptctl_miscdev.name, MISC_MAJOR, mptctl_miscdev.minor);
/*
* Install our handler
*/
mptctl_id = mpt_register(mptctl_reply, MPTCTL_DRIVER,
"mptctl_reply");
if (!mptctl_id || mptctl_id >= MPT_MAX_PROTOCOL_DRIVERS) {
printk(KERN_ERR MYNAM ": ERROR: Failed to register with Fusion MPT base driver\n");
misc_deregister(&mptctl_miscdev);
err = -EBUSY;
goto out_fail;
}
mptctl_taskmgmt_id = mpt_register(mptctl_taskmgmt_reply, MPTCTL_DRIVER,
"mptctl_taskmgmt_reply");
if (!mptctl_taskmgmt_id || mptctl_taskmgmt_id >= MPT_MAX_PROTOCOL_DRIVERS) {
printk(KERN_ERR MYNAM ": ERROR: Failed to register with Fusion MPT base driver\n");
mpt_deregister(mptctl_id);
misc_deregister(&mptctl_miscdev);
err = -EBUSY;
goto out_fail;
}
mpt_reset_register(mptctl_id, mptctl_ioc_reset);
mpt_event_register(mptctl_id, mptctl_event_process);
return 0;
out_fail:
mpt_device_driver_deregister(MPTCTL_DRIVER);
return err;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
static void mptctl_exit(void)
{
misc_deregister(&mptctl_miscdev);
printk(KERN_INFO MYNAM ": Deregistered /dev/%s @ (major,minor=%d,%d)\n",
mptctl_miscdev.name, MISC_MAJOR, mptctl_miscdev.minor);
/* De-register event handler from base module */
mpt_event_deregister(mptctl_id);
/* De-register reset handler from base module */
mpt_reset_deregister(mptctl_id);
/* De-register callback handler from base module */
mpt_deregister(mptctl_taskmgmt_id);
mpt_deregister(mptctl_id);
mpt_device_driver_deregister(MPTCTL_DRIVER);
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
module_init(mptctl_init);
module_exit(mptctl_exit);
| linux-master | drivers/message/fusion/mptctl.c |
/*
* AXP20x regulators driver.
*
* Copyright (C) 2013 Carlo Caione <[email protected]>
*
* This file is subject to the terms and conditions of the GNU General
* Public License. See the file "COPYING" in the main directory of this
* archive for more details.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/mfd/axp20x.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
#include <linux/regulator/of_regulator.h>
#define AXP20X_GPIO0_FUNC_MASK GENMASK(3, 0)
#define AXP20X_GPIO1_FUNC_MASK GENMASK(3, 0)
#define AXP20X_IO_ENABLED 0x03
#define AXP20X_IO_DISABLED 0x07
#define AXP20X_WORKMODE_DCDC2_MASK BIT_MASK(2)
#define AXP20X_WORKMODE_DCDC3_MASK BIT_MASK(1)
#define AXP20X_FREQ_DCDC_MASK GENMASK(3, 0)
#define AXP20X_VBUS_IPSOUT_MGMT_MASK BIT_MASK(2)
#define AXP20X_DCDC2_V_OUT_MASK GENMASK(5, 0)
#define AXP20X_DCDC3_V_OUT_MASK GENMASK(7, 0)
#define AXP20X_LDO2_V_OUT_MASK GENMASK(7, 4)
#define AXP20X_LDO3_V_OUT_MASK GENMASK(6, 0)
#define AXP20X_LDO4_V_OUT_MASK GENMASK(3, 0)
#define AXP20X_LDO5_V_OUT_MASK GENMASK(7, 4)
#define AXP20X_PWR_OUT_EXTEN_MASK BIT_MASK(0)
#define AXP20X_PWR_OUT_DCDC3_MASK BIT_MASK(1)
#define AXP20X_PWR_OUT_LDO2_MASK BIT_MASK(2)
#define AXP20X_PWR_OUT_LDO4_MASK BIT_MASK(3)
#define AXP20X_PWR_OUT_DCDC2_MASK BIT_MASK(4)
#define AXP20X_PWR_OUT_LDO3_MASK BIT_MASK(6)
#define AXP20X_DCDC2_LDO3_V_RAMP_DCDC2_RATE_MASK BIT_MASK(0)
#define AXP20X_DCDC2_LDO3_V_RAMP_DCDC2_RATE(x) \
((x) << 0)
#define AXP20X_DCDC2_LDO3_V_RAMP_LDO3_RATE_MASK BIT_MASK(1)
#define AXP20X_DCDC2_LDO3_V_RAMP_LDO3_RATE(x) \
((x) << 1)
#define AXP20X_DCDC2_LDO3_V_RAMP_DCDC2_EN_MASK BIT_MASK(2)
#define AXP20X_DCDC2_LDO3_V_RAMP_DCDC2_EN BIT(2)
#define AXP20X_DCDC2_LDO3_V_RAMP_LDO3_EN_MASK BIT_MASK(3)
#define AXP20X_DCDC2_LDO3_V_RAMP_LDO3_EN BIT(3)
#define AXP20X_LDO4_V_OUT_1250mV_START 0x0
#define AXP20X_LDO4_V_OUT_1250mV_STEPS 0
#define AXP20X_LDO4_V_OUT_1250mV_END \
(AXP20X_LDO4_V_OUT_1250mV_START + AXP20X_LDO4_V_OUT_1250mV_STEPS)
#define AXP20X_LDO4_V_OUT_1300mV_START 0x1
#define AXP20X_LDO4_V_OUT_1300mV_STEPS 7
#define AXP20X_LDO4_V_OUT_1300mV_END \
(AXP20X_LDO4_V_OUT_1300mV_START + AXP20X_LDO4_V_OUT_1300mV_STEPS)
#define AXP20X_LDO4_V_OUT_2500mV_START 0x9
#define AXP20X_LDO4_V_OUT_2500mV_STEPS 0
#define AXP20X_LDO4_V_OUT_2500mV_END \
(AXP20X_LDO4_V_OUT_2500mV_START + AXP20X_LDO4_V_OUT_2500mV_STEPS)
#define AXP20X_LDO4_V_OUT_2700mV_START 0xa
#define AXP20X_LDO4_V_OUT_2700mV_STEPS 1
#define AXP20X_LDO4_V_OUT_2700mV_END \
(AXP20X_LDO4_V_OUT_2700mV_START + AXP20X_LDO4_V_OUT_2700mV_STEPS)
#define AXP20X_LDO4_V_OUT_3000mV_START 0xc
#define AXP20X_LDO4_V_OUT_3000mV_STEPS 3
#define AXP20X_LDO4_V_OUT_3000mV_END \
(AXP20X_LDO4_V_OUT_3000mV_START + AXP20X_LDO4_V_OUT_3000mV_STEPS)
#define AXP20X_LDO4_V_OUT_NUM_VOLTAGES 16
#define AXP22X_IO_ENABLED 0x03
#define AXP22X_IO_DISABLED 0x04
#define AXP22X_WORKMODE_DCDCX_MASK(x) BIT_MASK(x)
#define AXP22X_MISC_N_VBUSEN_FUNC BIT(4)
#define AXP22X_DCDC1_V_OUT_MASK GENMASK(4, 0)
#define AXP22X_DCDC2_V_OUT_MASK GENMASK(5, 0)
#define AXP22X_DCDC3_V_OUT_MASK GENMASK(5, 0)
#define AXP22X_DCDC4_V_OUT_MASK GENMASK(5, 0)
#define AXP22X_DCDC5_V_OUT_MASK GENMASK(4, 0)
#define AXP22X_DC5LDO_V_OUT_MASK GENMASK(2, 0)
#define AXP22X_ALDO1_V_OUT_MASK GENMASK(4, 0)
#define AXP22X_ALDO2_V_OUT_MASK GENMASK(4, 0)
#define AXP22X_ALDO3_V_OUT_MASK GENMASK(4, 0)
#define AXP22X_DLDO1_V_OUT_MASK GENMASK(4, 0)
#define AXP22X_DLDO2_V_OUT_MASK GENMASK(4, 0)
#define AXP22X_DLDO3_V_OUT_MASK GENMASK(4, 0)
#define AXP22X_DLDO4_V_OUT_MASK GENMASK(4, 0)
#define AXP22X_ELDO1_V_OUT_MASK GENMASK(4, 0)
#define AXP22X_ELDO2_V_OUT_MASK GENMASK(4, 0)
#define AXP22X_ELDO3_V_OUT_MASK GENMASK(4, 0)
#define AXP22X_LDO_IO0_V_OUT_MASK GENMASK(4, 0)
#define AXP22X_LDO_IO1_V_OUT_MASK GENMASK(4, 0)
#define AXP22X_PWR_OUT_DC5LDO_MASK BIT_MASK(0)
#define AXP22X_PWR_OUT_DCDC1_MASK BIT_MASK(1)
#define AXP22X_PWR_OUT_DCDC2_MASK BIT_MASK(2)
#define AXP22X_PWR_OUT_DCDC3_MASK BIT_MASK(3)
#define AXP22X_PWR_OUT_DCDC4_MASK BIT_MASK(4)
#define AXP22X_PWR_OUT_DCDC5_MASK BIT_MASK(5)
#define AXP22X_PWR_OUT_ALDO1_MASK BIT_MASK(6)
#define AXP22X_PWR_OUT_ALDO2_MASK BIT_MASK(7)
#define AXP22X_PWR_OUT_SW_MASK BIT_MASK(6)
#define AXP22X_PWR_OUT_DC1SW_MASK BIT_MASK(7)
#define AXP22X_PWR_OUT_ELDO1_MASK BIT_MASK(0)
#define AXP22X_PWR_OUT_ELDO2_MASK BIT_MASK(1)
#define AXP22X_PWR_OUT_ELDO3_MASK BIT_MASK(2)
#define AXP22X_PWR_OUT_DLDO1_MASK BIT_MASK(3)
#define AXP22X_PWR_OUT_DLDO2_MASK BIT_MASK(4)
#define AXP22X_PWR_OUT_DLDO3_MASK BIT_MASK(5)
#define AXP22X_PWR_OUT_DLDO4_MASK BIT_MASK(6)
#define AXP22X_PWR_OUT_ALDO3_MASK BIT_MASK(7)
#define AXP313A_DCDC1_NUM_VOLTAGES 107
#define AXP313A_DCDC23_NUM_VOLTAGES 88
#define AXP313A_DCDC_V_OUT_MASK GENMASK(6, 0)
#define AXP313A_LDO_V_OUT_MASK GENMASK(4, 0)
#define AXP803_PWR_OUT_DCDC1_MASK BIT_MASK(0)
#define AXP803_PWR_OUT_DCDC2_MASK BIT_MASK(1)
#define AXP803_PWR_OUT_DCDC3_MASK BIT_MASK(2)
#define AXP803_PWR_OUT_DCDC4_MASK BIT_MASK(3)
#define AXP803_PWR_OUT_DCDC5_MASK BIT_MASK(4)
#define AXP803_PWR_OUT_DCDC6_MASK BIT_MASK(5)
#define AXP803_PWR_OUT_FLDO1_MASK BIT_MASK(2)
#define AXP803_PWR_OUT_FLDO2_MASK BIT_MASK(3)
#define AXP803_DCDC1_V_OUT_MASK GENMASK(4, 0)
#define AXP803_DCDC2_V_OUT_MASK GENMASK(6, 0)
#define AXP803_DCDC3_V_OUT_MASK GENMASK(6, 0)
#define AXP803_DCDC4_V_OUT_MASK GENMASK(6, 0)
#define AXP803_DCDC5_V_OUT_MASK GENMASK(6, 0)
#define AXP803_DCDC6_V_OUT_MASK GENMASK(6, 0)
#define AXP803_FLDO1_V_OUT_MASK GENMASK(3, 0)
#define AXP803_FLDO2_V_OUT_MASK GENMASK(3, 0)
#define AXP803_DCDC23_POLYPHASE_DUAL BIT(6)
#define AXP803_DCDC56_POLYPHASE_DUAL BIT(5)
#define AXP803_DCDC234_500mV_START 0x00
#define AXP803_DCDC234_500mV_STEPS 70
#define AXP803_DCDC234_500mV_END \
(AXP803_DCDC234_500mV_START + AXP803_DCDC234_500mV_STEPS)
#define AXP803_DCDC234_1220mV_START 0x47
#define AXP803_DCDC234_1220mV_STEPS 4
#define AXP803_DCDC234_1220mV_END \
(AXP803_DCDC234_1220mV_START + AXP803_DCDC234_1220mV_STEPS)
#define AXP803_DCDC234_NUM_VOLTAGES 76
#define AXP803_DCDC5_800mV_START 0x00
#define AXP803_DCDC5_800mV_STEPS 32
#define AXP803_DCDC5_800mV_END \
(AXP803_DCDC5_800mV_START + AXP803_DCDC5_800mV_STEPS)
#define AXP803_DCDC5_1140mV_START 0x21
#define AXP803_DCDC5_1140mV_STEPS 35
#define AXP803_DCDC5_1140mV_END \
(AXP803_DCDC5_1140mV_START + AXP803_DCDC5_1140mV_STEPS)
#define AXP803_DCDC5_NUM_VOLTAGES 69
#define AXP803_DCDC6_600mV_START 0x00
#define AXP803_DCDC6_600mV_STEPS 50
#define AXP803_DCDC6_600mV_END \
(AXP803_DCDC6_600mV_START + AXP803_DCDC6_600mV_STEPS)
#define AXP803_DCDC6_1120mV_START 0x33
#define AXP803_DCDC6_1120mV_STEPS 20
#define AXP803_DCDC6_1120mV_END \
(AXP803_DCDC6_1120mV_START + AXP803_DCDC6_1120mV_STEPS)
#define AXP803_DCDC6_NUM_VOLTAGES 72
#define AXP803_DLDO2_700mV_START 0x00
#define AXP803_DLDO2_700mV_STEPS 26
#define AXP803_DLDO2_700mV_END \
(AXP803_DLDO2_700mV_START + AXP803_DLDO2_700mV_STEPS)
#define AXP803_DLDO2_3400mV_START 0x1b
#define AXP803_DLDO2_3400mV_STEPS 4
#define AXP803_DLDO2_3400mV_END \
(AXP803_DLDO2_3400mV_START + AXP803_DLDO2_3400mV_STEPS)
#define AXP803_DLDO2_NUM_VOLTAGES 32
#define AXP806_DCDCA_V_CTRL_MASK GENMASK(6, 0)
#define AXP806_DCDCB_V_CTRL_MASK GENMASK(4, 0)
#define AXP806_DCDCC_V_CTRL_MASK GENMASK(6, 0)
#define AXP806_DCDCD_V_CTRL_MASK GENMASK(5, 0)
#define AXP806_DCDCE_V_CTRL_MASK GENMASK(4, 0)
#define AXP806_ALDO1_V_CTRL_MASK GENMASK(4, 0)
#define AXP806_ALDO2_V_CTRL_MASK GENMASK(4, 0)
#define AXP806_ALDO3_V_CTRL_MASK GENMASK(4, 0)
#define AXP806_BLDO1_V_CTRL_MASK GENMASK(3, 0)
#define AXP806_BLDO2_V_CTRL_MASK GENMASK(3, 0)
#define AXP806_BLDO3_V_CTRL_MASK GENMASK(3, 0)
#define AXP806_BLDO4_V_CTRL_MASK GENMASK(3, 0)
#define AXP806_CLDO1_V_CTRL_MASK GENMASK(4, 0)
#define AXP806_CLDO2_V_CTRL_MASK GENMASK(4, 0)
#define AXP806_CLDO3_V_CTRL_MASK GENMASK(4, 0)
#define AXP806_PWR_OUT_DCDCA_MASK BIT_MASK(0)
#define AXP806_PWR_OUT_DCDCB_MASK BIT_MASK(1)
#define AXP806_PWR_OUT_DCDCC_MASK BIT_MASK(2)
#define AXP806_PWR_OUT_DCDCD_MASK BIT_MASK(3)
#define AXP806_PWR_OUT_DCDCE_MASK BIT_MASK(4)
#define AXP806_PWR_OUT_ALDO1_MASK BIT_MASK(5)
#define AXP806_PWR_OUT_ALDO2_MASK BIT_MASK(6)
#define AXP806_PWR_OUT_ALDO3_MASK BIT_MASK(7)
#define AXP806_PWR_OUT_BLDO1_MASK BIT_MASK(0)
#define AXP806_PWR_OUT_BLDO2_MASK BIT_MASK(1)
#define AXP806_PWR_OUT_BLDO3_MASK BIT_MASK(2)
#define AXP806_PWR_OUT_BLDO4_MASK BIT_MASK(3)
#define AXP806_PWR_OUT_CLDO1_MASK BIT_MASK(4)
#define AXP806_PWR_OUT_CLDO2_MASK BIT_MASK(5)
#define AXP806_PWR_OUT_CLDO3_MASK BIT_MASK(6)
#define AXP806_PWR_OUT_SW_MASK BIT_MASK(7)
#define AXP806_DCDCAB_POLYPHASE_DUAL 0x40
#define AXP806_DCDCABC_POLYPHASE_TRI 0x80
#define AXP806_DCDCABC_POLYPHASE_MASK GENMASK(7, 6)
#define AXP806_DCDCDE_POLYPHASE_DUAL BIT(5)
#define AXP806_DCDCA_600mV_START 0x00
#define AXP806_DCDCA_600mV_STEPS 50
#define AXP806_DCDCA_600mV_END \
(AXP806_DCDCA_600mV_START + AXP806_DCDCA_600mV_STEPS)
#define AXP806_DCDCA_1120mV_START 0x33
#define AXP806_DCDCA_1120mV_STEPS 20
#define AXP806_DCDCA_1120mV_END \
(AXP806_DCDCA_1120mV_START + AXP806_DCDCA_1120mV_STEPS)
#define AXP806_DCDCA_NUM_VOLTAGES 72
#define AXP806_DCDCD_600mV_START 0x00
#define AXP806_DCDCD_600mV_STEPS 45
#define AXP806_DCDCD_600mV_END \
(AXP806_DCDCD_600mV_START + AXP806_DCDCD_600mV_STEPS)
#define AXP806_DCDCD_1600mV_START 0x2e
#define AXP806_DCDCD_1600mV_STEPS 17
#define AXP806_DCDCD_1600mV_END \
(AXP806_DCDCD_1600mV_START + AXP806_DCDCD_1600mV_STEPS)
#define AXP806_DCDCD_NUM_VOLTAGES 64
#define AXP809_DCDC4_600mV_START 0x00
#define AXP809_DCDC4_600mV_STEPS 47
#define AXP809_DCDC4_600mV_END \
(AXP809_DCDC4_600mV_START + AXP809_DCDC4_600mV_STEPS)
#define AXP809_DCDC4_1800mV_START 0x30
#define AXP809_DCDC4_1800mV_STEPS 8
#define AXP809_DCDC4_1800mV_END \
(AXP809_DCDC4_1800mV_START + AXP809_DCDC4_1800mV_STEPS)
#define AXP809_DCDC4_NUM_VOLTAGES 57
#define AXP813_DCDC7_V_OUT_MASK GENMASK(6, 0)
#define AXP813_PWR_OUT_DCDC7_MASK BIT_MASK(6)
#define AXP15060_DCDC1_V_CTRL_MASK GENMASK(4, 0)
#define AXP15060_DCDC2_V_CTRL_MASK GENMASK(6, 0)
#define AXP15060_DCDC3_V_CTRL_MASK GENMASK(6, 0)
#define AXP15060_DCDC4_V_CTRL_MASK GENMASK(6, 0)
#define AXP15060_DCDC5_V_CTRL_MASK GENMASK(6, 0)
#define AXP15060_DCDC6_V_CTRL_MASK GENMASK(4, 0)
#define AXP15060_ALDO1_V_CTRL_MASK GENMASK(4, 0)
#define AXP15060_ALDO2_V_CTRL_MASK GENMASK(4, 0)
#define AXP15060_ALDO3_V_CTRL_MASK GENMASK(4, 0)
#define AXP15060_ALDO4_V_CTRL_MASK GENMASK(4, 0)
#define AXP15060_ALDO5_V_CTRL_MASK GENMASK(4, 0)
#define AXP15060_BLDO1_V_CTRL_MASK GENMASK(4, 0)
#define AXP15060_BLDO2_V_CTRL_MASK GENMASK(4, 0)
#define AXP15060_BLDO3_V_CTRL_MASK GENMASK(4, 0)
#define AXP15060_BLDO4_V_CTRL_MASK GENMASK(4, 0)
#define AXP15060_BLDO5_V_CTRL_MASK GENMASK(4, 0)
#define AXP15060_CLDO1_V_CTRL_MASK GENMASK(4, 0)
#define AXP15060_CLDO2_V_CTRL_MASK GENMASK(4, 0)
#define AXP15060_CLDO3_V_CTRL_MASK GENMASK(4, 0)
#define AXP15060_CLDO4_V_CTRL_MASK GENMASK(5, 0)
#define AXP15060_CPUSLDO_V_CTRL_MASK GENMASK(3, 0)
#define AXP15060_PWR_OUT_DCDC1_MASK BIT_MASK(0)
#define AXP15060_PWR_OUT_DCDC2_MASK BIT_MASK(1)
#define AXP15060_PWR_OUT_DCDC3_MASK BIT_MASK(2)
#define AXP15060_PWR_OUT_DCDC4_MASK BIT_MASK(3)
#define AXP15060_PWR_OUT_DCDC5_MASK BIT_MASK(4)
#define AXP15060_PWR_OUT_DCDC6_MASK BIT_MASK(5)
#define AXP15060_PWR_OUT_ALDO1_MASK BIT_MASK(0)
#define AXP15060_PWR_OUT_ALDO2_MASK BIT_MASK(1)
#define AXP15060_PWR_OUT_ALDO3_MASK BIT_MASK(2)
#define AXP15060_PWR_OUT_ALDO4_MASK BIT_MASK(3)
#define AXP15060_PWR_OUT_ALDO5_MASK BIT_MASK(4)
#define AXP15060_PWR_OUT_BLDO1_MASK BIT_MASK(5)
#define AXP15060_PWR_OUT_BLDO2_MASK BIT_MASK(6)
#define AXP15060_PWR_OUT_BLDO3_MASK BIT_MASK(7)
#define AXP15060_PWR_OUT_BLDO4_MASK BIT_MASK(0)
#define AXP15060_PWR_OUT_BLDO5_MASK BIT_MASK(1)
#define AXP15060_PWR_OUT_CLDO1_MASK BIT_MASK(2)
#define AXP15060_PWR_OUT_CLDO2_MASK BIT_MASK(3)
#define AXP15060_PWR_OUT_CLDO3_MASK BIT_MASK(4)
#define AXP15060_PWR_OUT_CLDO4_MASK BIT_MASK(5)
#define AXP15060_PWR_OUT_CPUSLDO_MASK BIT_MASK(6)
#define AXP15060_PWR_OUT_SW_MASK BIT_MASK(7)
#define AXP15060_DCDC23_POLYPHASE_DUAL_MASK BIT_MASK(6)
#define AXP15060_DCDC46_POLYPHASE_DUAL_MASK BIT_MASK(7)
#define AXP15060_DCDC234_500mV_START 0x00
#define AXP15060_DCDC234_500mV_STEPS 70
#define AXP15060_DCDC234_500mV_END \
(AXP15060_DCDC234_500mV_START + AXP15060_DCDC234_500mV_STEPS)
#define AXP15060_DCDC234_1220mV_START 0x47
#define AXP15060_DCDC234_1220mV_STEPS 16
#define AXP15060_DCDC234_1220mV_END \
(AXP15060_DCDC234_1220mV_START + AXP15060_DCDC234_1220mV_STEPS)
#define AXP15060_DCDC234_NUM_VOLTAGES 88
#define AXP15060_DCDC5_800mV_START 0x00
#define AXP15060_DCDC5_800mV_STEPS 32
#define AXP15060_DCDC5_800mV_END \
(AXP15060_DCDC5_800mV_START + AXP15060_DCDC5_800mV_STEPS)
#define AXP15060_DCDC5_1140mV_START 0x21
#define AXP15060_DCDC5_1140mV_STEPS 35
#define AXP15060_DCDC5_1140mV_END \
(AXP15060_DCDC5_1140mV_START + AXP15060_DCDC5_1140mV_STEPS)
#define AXP15060_DCDC5_NUM_VOLTAGES 69
#define AXP_DESC_IO(_family, _id, _match, _supply, _min, _max, _step, _vreg, \
_vmask, _ereg, _emask, _enable_val, _disable_val) \
[_family##_##_id] = { \
.name = (_match), \
.supply_name = (_supply), \
.of_match = of_match_ptr(_match), \
.regulators_node = of_match_ptr("regulators"), \
.type = REGULATOR_VOLTAGE, \
.id = _family##_##_id, \
.n_voltages = (((_max) - (_min)) / (_step) + 1), \
.owner = THIS_MODULE, \
.min_uV = (_min) * 1000, \
.uV_step = (_step) * 1000, \
.vsel_reg = (_vreg), \
.vsel_mask = (_vmask), \
.enable_reg = (_ereg), \
.enable_mask = (_emask), \
.enable_val = (_enable_val), \
.disable_val = (_disable_val), \
.ops = &axp20x_ops, \
}
#define AXP_DESC(_family, _id, _match, _supply, _min, _max, _step, _vreg, \
_vmask, _ereg, _emask) \
[_family##_##_id] = { \
.name = (_match), \
.supply_name = (_supply), \
.of_match = of_match_ptr(_match), \
.regulators_node = of_match_ptr("regulators"), \
.type = REGULATOR_VOLTAGE, \
.id = _family##_##_id, \
.n_voltages = (((_max) - (_min)) / (_step) + 1), \
.owner = THIS_MODULE, \
.min_uV = (_min) * 1000, \
.uV_step = (_step) * 1000, \
.vsel_reg = (_vreg), \
.vsel_mask = (_vmask), \
.enable_reg = (_ereg), \
.enable_mask = (_emask), \
.ops = &axp20x_ops, \
}
#define AXP_DESC_SW(_family, _id, _match, _supply, _ereg, _emask) \
[_family##_##_id] = { \
.name = (_match), \
.supply_name = (_supply), \
.of_match = of_match_ptr(_match), \
.regulators_node = of_match_ptr("regulators"), \
.type = REGULATOR_VOLTAGE, \
.id = _family##_##_id, \
.owner = THIS_MODULE, \
.enable_reg = (_ereg), \
.enable_mask = (_emask), \
.ops = &axp20x_ops_sw, \
}
#define AXP_DESC_FIXED(_family, _id, _match, _supply, _volt) \
[_family##_##_id] = { \
.name = (_match), \
.supply_name = (_supply), \
.of_match = of_match_ptr(_match), \
.regulators_node = of_match_ptr("regulators"), \
.type = REGULATOR_VOLTAGE, \
.id = _family##_##_id, \
.n_voltages = 1, \
.owner = THIS_MODULE, \
.min_uV = (_volt) * 1000, \
.ops = &axp20x_ops_fixed \
}
#define AXP_DESC_RANGES(_family, _id, _match, _supply, _ranges, _n_voltages, \
_vreg, _vmask, _ereg, _emask) \
[_family##_##_id] = { \
.name = (_match), \
.supply_name = (_supply), \
.of_match = of_match_ptr(_match), \
.regulators_node = of_match_ptr("regulators"), \
.type = REGULATOR_VOLTAGE, \
.id = _family##_##_id, \
.n_voltages = (_n_voltages), \
.owner = THIS_MODULE, \
.vsel_reg = (_vreg), \
.vsel_mask = (_vmask), \
.enable_reg = (_ereg), \
.enable_mask = (_emask), \
.linear_ranges = (_ranges), \
.n_linear_ranges = ARRAY_SIZE(_ranges), \
.ops = &axp20x_ops_range, \
}
static const int axp209_dcdc2_ldo3_slew_rates[] = {
1600,
800,
};
static int axp20x_set_ramp_delay(struct regulator_dev *rdev, int ramp)
{
struct axp20x_dev *axp20x = rdev_get_drvdata(rdev);
int id = rdev_get_id(rdev);
u8 reg, mask, enable, cfg = 0xff;
const int *slew_rates;
int rate_count = 0;
switch (axp20x->variant) {
case AXP209_ID:
if (id == AXP20X_DCDC2) {
slew_rates = axp209_dcdc2_ldo3_slew_rates;
rate_count = ARRAY_SIZE(axp209_dcdc2_ldo3_slew_rates);
reg = AXP20X_DCDC2_LDO3_V_RAMP;
mask = AXP20X_DCDC2_LDO3_V_RAMP_DCDC2_RATE_MASK |
AXP20X_DCDC2_LDO3_V_RAMP_DCDC2_EN_MASK;
enable = (ramp > 0) ?
AXP20X_DCDC2_LDO3_V_RAMP_DCDC2_EN : 0;
break;
}
if (id == AXP20X_LDO3) {
slew_rates = axp209_dcdc2_ldo3_slew_rates;
rate_count = ARRAY_SIZE(axp209_dcdc2_ldo3_slew_rates);
reg = AXP20X_DCDC2_LDO3_V_RAMP;
mask = AXP20X_DCDC2_LDO3_V_RAMP_LDO3_RATE_MASK |
AXP20X_DCDC2_LDO3_V_RAMP_LDO3_EN_MASK;
enable = (ramp > 0) ?
AXP20X_DCDC2_LDO3_V_RAMP_LDO3_EN : 0;
break;
}
if (rate_count > 0)
break;
fallthrough;
default:
/* Not supported for this regulator */
return -ENOTSUPP;
}
if (ramp == 0) {
cfg = enable;
} else {
int i;
for (i = 0; i < rate_count; i++) {
if (ramp > slew_rates[i])
break;
if (id == AXP20X_DCDC2)
cfg = AXP20X_DCDC2_LDO3_V_RAMP_DCDC2_RATE(i);
else
cfg = AXP20X_DCDC2_LDO3_V_RAMP_LDO3_RATE(i);
}
if (cfg == 0xff) {
dev_err(axp20x->dev, "unsupported ramp value %d", ramp);
return -EINVAL;
}
cfg |= enable;
}
return regmap_update_bits(axp20x->regmap, reg, mask, cfg);
}
static int axp20x_regulator_enable_regmap(struct regulator_dev *rdev)
{
struct axp20x_dev *axp20x = rdev_get_drvdata(rdev);
int id = rdev_get_id(rdev);
switch (axp20x->variant) {
case AXP209_ID:
if ((id == AXP20X_LDO3) &&
rdev->constraints && rdev->constraints->soft_start) {
int v_out;
int ret;
/*
* On some boards, the LDO3 can be overloaded when
* turning on, causing the entire PMIC to shutdown
* without warning. Turning it on at the minimal voltage
* and then setting the voltage to the requested value
* works reliably.
*/
if (regulator_is_enabled_regmap(rdev))
break;
v_out = regulator_get_voltage_sel_regmap(rdev);
if (v_out < 0)
return v_out;
if (v_out == 0)
break;
ret = regulator_set_voltage_sel_regmap(rdev, 0x00);
/*
* A small pause is needed between
* setting the voltage and enabling the LDO to give the
* internal state machine time to process the request.
*/
usleep_range(1000, 5000);
ret |= regulator_enable_regmap(rdev);
ret |= regulator_set_voltage_sel_regmap(rdev, v_out);
return ret;
}
break;
default:
/* No quirks */
break;
}
return regulator_enable_regmap(rdev);
};
static const struct regulator_ops axp20x_ops_fixed = {
.list_voltage = regulator_list_voltage_linear,
};
static const struct regulator_ops axp20x_ops_range = {
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.list_voltage = regulator_list_voltage_linear_range,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
};
static const struct regulator_ops axp20x_ops = {
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.list_voltage = regulator_list_voltage_linear,
.enable = axp20x_regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
.set_ramp_delay = axp20x_set_ramp_delay,
};
static const struct regulator_ops axp20x_ops_sw = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
};
static const struct linear_range axp20x_ldo4_ranges[] = {
REGULATOR_LINEAR_RANGE(1250000,
AXP20X_LDO4_V_OUT_1250mV_START,
AXP20X_LDO4_V_OUT_1250mV_END,
0),
REGULATOR_LINEAR_RANGE(1300000,
AXP20X_LDO4_V_OUT_1300mV_START,
AXP20X_LDO4_V_OUT_1300mV_END,
100000),
REGULATOR_LINEAR_RANGE(2500000,
AXP20X_LDO4_V_OUT_2500mV_START,
AXP20X_LDO4_V_OUT_2500mV_END,
0),
REGULATOR_LINEAR_RANGE(2700000,
AXP20X_LDO4_V_OUT_2700mV_START,
AXP20X_LDO4_V_OUT_2700mV_END,
100000),
REGULATOR_LINEAR_RANGE(3000000,
AXP20X_LDO4_V_OUT_3000mV_START,
AXP20X_LDO4_V_OUT_3000mV_END,
100000),
};
static const struct regulator_desc axp20x_regulators[] = {
AXP_DESC(AXP20X, DCDC2, "dcdc2", "vin2", 700, 2275, 25,
AXP20X_DCDC2_V_OUT, AXP20X_DCDC2_V_OUT_MASK,
AXP20X_PWR_OUT_CTRL, AXP20X_PWR_OUT_DCDC2_MASK),
AXP_DESC(AXP20X, DCDC3, "dcdc3", "vin3", 700, 3500, 25,
AXP20X_DCDC3_V_OUT, AXP20X_DCDC3_V_OUT_MASK,
AXP20X_PWR_OUT_CTRL, AXP20X_PWR_OUT_DCDC3_MASK),
AXP_DESC_FIXED(AXP20X, LDO1, "ldo1", "acin", 1300),
AXP_DESC(AXP20X, LDO2, "ldo2", "ldo24in", 1800, 3300, 100,
AXP20X_LDO24_V_OUT, AXP20X_LDO2_V_OUT_MASK,
AXP20X_PWR_OUT_CTRL, AXP20X_PWR_OUT_LDO2_MASK),
AXP_DESC(AXP20X, LDO3, "ldo3", "ldo3in", 700, 3500, 25,
AXP20X_LDO3_V_OUT, AXP20X_LDO3_V_OUT_MASK,
AXP20X_PWR_OUT_CTRL, AXP20X_PWR_OUT_LDO3_MASK),
AXP_DESC_RANGES(AXP20X, LDO4, "ldo4", "ldo24in",
axp20x_ldo4_ranges, AXP20X_LDO4_V_OUT_NUM_VOLTAGES,
AXP20X_LDO24_V_OUT, AXP20X_LDO4_V_OUT_MASK,
AXP20X_PWR_OUT_CTRL, AXP20X_PWR_OUT_LDO4_MASK),
AXP_DESC_IO(AXP20X, LDO5, "ldo5", "ldo5in", 1800, 3300, 100,
AXP20X_LDO5_V_OUT, AXP20X_LDO5_V_OUT_MASK,
AXP20X_GPIO0_CTRL, AXP20X_GPIO0_FUNC_MASK,
AXP20X_IO_ENABLED, AXP20X_IO_DISABLED),
};
static const struct regulator_desc axp22x_regulators[] = {
AXP_DESC(AXP22X, DCDC1, "dcdc1", "vin1", 1600, 3400, 100,
AXP22X_DCDC1_V_OUT, AXP22X_DCDC1_V_OUT_MASK,
AXP22X_PWR_OUT_CTRL1, AXP22X_PWR_OUT_DCDC1_MASK),
AXP_DESC(AXP22X, DCDC2, "dcdc2", "vin2", 600, 1540, 20,
AXP22X_DCDC2_V_OUT, AXP22X_DCDC2_V_OUT_MASK,
AXP22X_PWR_OUT_CTRL1, AXP22X_PWR_OUT_DCDC2_MASK),
AXP_DESC(AXP22X, DCDC3, "dcdc3", "vin3", 600, 1860, 20,
AXP22X_DCDC3_V_OUT, AXP22X_DCDC3_V_OUT_MASK,
AXP22X_PWR_OUT_CTRL1, AXP22X_PWR_OUT_DCDC3_MASK),
AXP_DESC(AXP22X, DCDC4, "dcdc4", "vin4", 600, 1540, 20,
AXP22X_DCDC4_V_OUT, AXP22X_DCDC4_V_OUT_MASK,
AXP22X_PWR_OUT_CTRL1, AXP22X_PWR_OUT_DCDC4_MASK),
AXP_DESC(AXP22X, DCDC5, "dcdc5", "vin5", 1000, 2550, 50,
AXP22X_DCDC5_V_OUT, AXP22X_DCDC5_V_OUT_MASK,
AXP22X_PWR_OUT_CTRL1, AXP22X_PWR_OUT_DCDC5_MASK),
/* secondary switchable output of DCDC1 */
AXP_DESC_SW(AXP22X, DC1SW, "dc1sw", NULL,
AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_DC1SW_MASK),
/* LDO regulator internally chained to DCDC5 */
AXP_DESC(AXP22X, DC5LDO, "dc5ldo", NULL, 700, 1400, 100,
AXP22X_DC5LDO_V_OUT, AXP22X_DC5LDO_V_OUT_MASK,
AXP22X_PWR_OUT_CTRL1, AXP22X_PWR_OUT_DC5LDO_MASK),
AXP_DESC(AXP22X, ALDO1, "aldo1", "aldoin", 700, 3300, 100,
AXP22X_ALDO1_V_OUT, AXP22X_ALDO1_V_OUT_MASK,
AXP22X_PWR_OUT_CTRL1, AXP22X_PWR_OUT_ALDO1_MASK),
AXP_DESC(AXP22X, ALDO2, "aldo2", "aldoin", 700, 3300, 100,
AXP22X_ALDO2_V_OUT, AXP22X_ALDO2_V_OUT_MASK,
AXP22X_PWR_OUT_CTRL1, AXP22X_PWR_OUT_ALDO2_MASK),
AXP_DESC(AXP22X, ALDO3, "aldo3", "aldoin", 700, 3300, 100,
AXP22X_ALDO3_V_OUT, AXP22X_ALDO3_V_OUT_MASK,
AXP22X_PWR_OUT_CTRL3, AXP22X_PWR_OUT_ALDO3_MASK),
AXP_DESC(AXP22X, DLDO1, "dldo1", "dldoin", 700, 3300, 100,
AXP22X_DLDO1_V_OUT, AXP22X_DLDO1_V_OUT_MASK,
AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_DLDO1_MASK),
AXP_DESC(AXP22X, DLDO2, "dldo2", "dldoin", 700, 3300, 100,
AXP22X_DLDO2_V_OUT, AXP22X_DLDO2_V_OUT_MASK,
AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_DLDO2_MASK),
AXP_DESC(AXP22X, DLDO3, "dldo3", "dldoin", 700, 3300, 100,
AXP22X_DLDO3_V_OUT, AXP22X_DLDO3_V_OUT_MASK,
AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_DLDO3_MASK),
AXP_DESC(AXP22X, DLDO4, "dldo4", "dldoin", 700, 3300, 100,
AXP22X_DLDO4_V_OUT, AXP22X_DLDO4_V_OUT_MASK,
AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_DLDO4_MASK),
AXP_DESC(AXP22X, ELDO1, "eldo1", "eldoin", 700, 3300, 100,
AXP22X_ELDO1_V_OUT, AXP22X_ELDO1_V_OUT_MASK,
AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_ELDO1_MASK),
AXP_DESC(AXP22X, ELDO2, "eldo2", "eldoin", 700, 3300, 100,
AXP22X_ELDO2_V_OUT, AXP22X_ELDO2_V_OUT_MASK,
AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_ELDO2_MASK),
AXP_DESC(AXP22X, ELDO3, "eldo3", "eldoin", 700, 3300, 100,
AXP22X_ELDO3_V_OUT, AXP22X_ELDO3_V_OUT_MASK,
AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_ELDO3_MASK),
/* Note the datasheet only guarantees reliable operation up to
* 3.3V, this needs to be enforced via dts provided constraints */
AXP_DESC_IO(AXP22X, LDO_IO0, "ldo_io0", "ips", 700, 3800, 100,
AXP22X_LDO_IO0_V_OUT, AXP22X_LDO_IO0_V_OUT_MASK,
AXP20X_GPIO0_CTRL, AXP20X_GPIO0_FUNC_MASK,
AXP22X_IO_ENABLED, AXP22X_IO_DISABLED),
/* Note the datasheet only guarantees reliable operation up to
* 3.3V, this needs to be enforced via dts provided constraints */
AXP_DESC_IO(AXP22X, LDO_IO1, "ldo_io1", "ips", 700, 3800, 100,
AXP22X_LDO_IO1_V_OUT, AXP22X_LDO_IO1_V_OUT_MASK,
AXP20X_GPIO1_CTRL, AXP20X_GPIO1_FUNC_MASK,
AXP22X_IO_ENABLED, AXP22X_IO_DISABLED),
AXP_DESC_FIXED(AXP22X, RTC_LDO, "rtc_ldo", "ips", 3000),
};
static const struct regulator_desc axp22x_drivevbus_regulator = {
.name = "drivevbus",
.supply_name = "drivevbus",
.of_match = of_match_ptr("drivevbus"),
.regulators_node = of_match_ptr("regulators"),
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
.enable_reg = AXP20X_VBUS_IPSOUT_MGMT,
.enable_mask = AXP20X_VBUS_IPSOUT_MGMT_MASK,
.ops = &axp20x_ops_sw,
};
static const struct linear_range axp313a_dcdc1_ranges[] = {
REGULATOR_LINEAR_RANGE(500000, 0, 70, 10000),
REGULATOR_LINEAR_RANGE(1220000, 71, 87, 20000),
REGULATOR_LINEAR_RANGE(1600000, 88, 106, 100000),
};
static const struct linear_range axp313a_dcdc2_ranges[] = {
REGULATOR_LINEAR_RANGE(500000, 0, 70, 10000),
REGULATOR_LINEAR_RANGE(1220000, 71, 87, 20000),
};
/*
* This is deviating from the datasheet. The values here are taken from the
* BSP driver and have been confirmed by measurements.
*/
static const struct linear_range axp313a_dcdc3_ranges[] = {
REGULATOR_LINEAR_RANGE(500000, 0, 70, 10000),
REGULATOR_LINEAR_RANGE(1220000, 71, 102, 20000),
};
static const struct regulator_desc axp313a_regulators[] = {
AXP_DESC_RANGES(AXP313A, DCDC1, "dcdc1", "vin1",
axp313a_dcdc1_ranges, AXP313A_DCDC1_NUM_VOLTAGES,
AXP313A_DCDC1_CONRTOL, AXP313A_DCDC_V_OUT_MASK,
AXP313A_OUTPUT_CONTROL, BIT(0)),
AXP_DESC_RANGES(AXP313A, DCDC2, "dcdc2", "vin2",
axp313a_dcdc2_ranges, AXP313A_DCDC23_NUM_VOLTAGES,
AXP313A_DCDC2_CONRTOL, AXP313A_DCDC_V_OUT_MASK,
AXP313A_OUTPUT_CONTROL, BIT(1)),
AXP_DESC_RANGES(AXP313A, DCDC3, "dcdc3", "vin3",
axp313a_dcdc3_ranges, AXP313A_DCDC23_NUM_VOLTAGES,
AXP313A_DCDC3_CONRTOL, AXP313A_DCDC_V_OUT_MASK,
AXP313A_OUTPUT_CONTROL, BIT(2)),
AXP_DESC(AXP313A, ALDO1, "aldo1", "vin1", 500, 3500, 100,
AXP313A_ALDO1_CONRTOL, AXP313A_LDO_V_OUT_MASK,
AXP313A_OUTPUT_CONTROL, BIT(3)),
AXP_DESC(AXP313A, DLDO1, "dldo1", "vin1", 500, 3500, 100,
AXP313A_DLDO1_CONRTOL, AXP313A_LDO_V_OUT_MASK,
AXP313A_OUTPUT_CONTROL, BIT(4)),
AXP_DESC_FIXED(AXP313A, RTC_LDO, "rtc-ldo", "vin1", 1800),
};
/* DCDC ranges shared with AXP813 */
static const struct linear_range axp803_dcdc234_ranges[] = {
REGULATOR_LINEAR_RANGE(500000,
AXP803_DCDC234_500mV_START,
AXP803_DCDC234_500mV_END,
10000),
REGULATOR_LINEAR_RANGE(1220000,
AXP803_DCDC234_1220mV_START,
AXP803_DCDC234_1220mV_END,
20000),
};
static const struct linear_range axp803_dcdc5_ranges[] = {
REGULATOR_LINEAR_RANGE(800000,
AXP803_DCDC5_800mV_START,
AXP803_DCDC5_800mV_END,
10000),
REGULATOR_LINEAR_RANGE(1140000,
AXP803_DCDC5_1140mV_START,
AXP803_DCDC5_1140mV_END,
20000),
};
static const struct linear_range axp803_dcdc6_ranges[] = {
REGULATOR_LINEAR_RANGE(600000,
AXP803_DCDC6_600mV_START,
AXP803_DCDC6_600mV_END,
10000),
REGULATOR_LINEAR_RANGE(1120000,
AXP803_DCDC6_1120mV_START,
AXP803_DCDC6_1120mV_END,
20000),
};
/* AXP806's CLDO2 and AXP809's DLDO1 share the same range */
static const struct linear_range axp803_dldo2_ranges[] = {
REGULATOR_LINEAR_RANGE(700000,
AXP803_DLDO2_700mV_START,
AXP803_DLDO2_700mV_END,
100000),
REGULATOR_LINEAR_RANGE(3400000,
AXP803_DLDO2_3400mV_START,
AXP803_DLDO2_3400mV_END,
200000),
};
static const struct regulator_desc axp803_regulators[] = {
AXP_DESC(AXP803, DCDC1, "dcdc1", "vin1", 1600, 3400, 100,
AXP803_DCDC1_V_OUT, AXP803_DCDC1_V_OUT_MASK,
AXP22X_PWR_OUT_CTRL1, AXP803_PWR_OUT_DCDC1_MASK),
AXP_DESC_RANGES(AXP803, DCDC2, "dcdc2", "vin2",
axp803_dcdc234_ranges, AXP803_DCDC234_NUM_VOLTAGES,
AXP803_DCDC2_V_OUT, AXP803_DCDC2_V_OUT_MASK,
AXP22X_PWR_OUT_CTRL1, AXP803_PWR_OUT_DCDC2_MASK),
AXP_DESC_RANGES(AXP803, DCDC3, "dcdc3", "vin3",
axp803_dcdc234_ranges, AXP803_DCDC234_NUM_VOLTAGES,
AXP803_DCDC3_V_OUT, AXP803_DCDC3_V_OUT_MASK,
AXP22X_PWR_OUT_CTRL1, AXP803_PWR_OUT_DCDC3_MASK),
AXP_DESC_RANGES(AXP803, DCDC4, "dcdc4", "vin4",
axp803_dcdc234_ranges, AXP803_DCDC234_NUM_VOLTAGES,
AXP803_DCDC4_V_OUT, AXP803_DCDC4_V_OUT_MASK,
AXP22X_PWR_OUT_CTRL1, AXP803_PWR_OUT_DCDC4_MASK),
AXP_DESC_RANGES(AXP803, DCDC5, "dcdc5", "vin5",
axp803_dcdc5_ranges, AXP803_DCDC5_NUM_VOLTAGES,
AXP803_DCDC5_V_OUT, AXP803_DCDC5_V_OUT_MASK,
AXP22X_PWR_OUT_CTRL1, AXP803_PWR_OUT_DCDC5_MASK),
AXP_DESC_RANGES(AXP803, DCDC6, "dcdc6", "vin6",
axp803_dcdc6_ranges, AXP803_DCDC6_NUM_VOLTAGES,
AXP803_DCDC6_V_OUT, AXP803_DCDC6_V_OUT_MASK,
AXP22X_PWR_OUT_CTRL1, AXP803_PWR_OUT_DCDC6_MASK),
/* secondary switchable output of DCDC1 */
AXP_DESC_SW(AXP803, DC1SW, "dc1sw", NULL,
AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_DC1SW_MASK),
AXP_DESC(AXP803, ALDO1, "aldo1", "aldoin", 700, 3300, 100,
AXP22X_ALDO1_V_OUT, AXP22X_ALDO1_V_OUT_MASK,
AXP22X_PWR_OUT_CTRL3, AXP806_PWR_OUT_ALDO1_MASK),
AXP_DESC(AXP803, ALDO2, "aldo2", "aldoin", 700, 3300, 100,
AXP22X_ALDO2_V_OUT, AXP22X_ALDO2_V_OUT_MASK,
AXP22X_PWR_OUT_CTRL3, AXP806_PWR_OUT_ALDO2_MASK),
AXP_DESC(AXP803, ALDO3, "aldo3", "aldoin", 700, 3300, 100,
AXP22X_ALDO3_V_OUT, AXP22X_ALDO3_V_OUT_MASK,
AXP22X_PWR_OUT_CTRL3, AXP806_PWR_OUT_ALDO3_MASK),
AXP_DESC(AXP803, DLDO1, "dldo1", "dldoin", 700, 3300, 100,
AXP22X_DLDO1_V_OUT, AXP22X_DLDO1_V_OUT_MASK,
AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_DLDO1_MASK),
AXP_DESC_RANGES(AXP803, DLDO2, "dldo2", "dldoin",
axp803_dldo2_ranges, AXP803_DLDO2_NUM_VOLTAGES,
AXP22X_DLDO2_V_OUT, AXP22X_DLDO2_V_OUT_MASK,
AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_DLDO2_MASK),
AXP_DESC(AXP803, DLDO3, "dldo3", "dldoin", 700, 3300, 100,
AXP22X_DLDO3_V_OUT, AXP22X_DLDO3_V_OUT_MASK,
AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_DLDO3_MASK),
AXP_DESC(AXP803, DLDO4, "dldo4", "dldoin", 700, 3300, 100,
AXP22X_DLDO4_V_OUT, AXP22X_DLDO4_V_OUT_MASK,
AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_DLDO4_MASK),
AXP_DESC(AXP803, ELDO1, "eldo1", "eldoin", 700, 1900, 50,
AXP22X_ELDO1_V_OUT, AXP22X_ELDO1_V_OUT_MASK,
AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_ELDO1_MASK),
AXP_DESC(AXP803, ELDO2, "eldo2", "eldoin", 700, 1900, 50,
AXP22X_ELDO2_V_OUT, AXP22X_ELDO2_V_OUT_MASK,
AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_ELDO2_MASK),
AXP_DESC(AXP803, ELDO3, "eldo3", "eldoin", 700, 1900, 50,
AXP22X_ELDO3_V_OUT, AXP22X_ELDO3_V_OUT_MASK,
AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_ELDO3_MASK),
AXP_DESC(AXP803, FLDO1, "fldo1", "fldoin", 700, 1450, 50,
AXP803_FLDO1_V_OUT, AXP803_FLDO1_V_OUT_MASK,
AXP22X_PWR_OUT_CTRL3, AXP803_PWR_OUT_FLDO1_MASK),
AXP_DESC(AXP803, FLDO2, "fldo2", "fldoin", 700, 1450, 50,
AXP803_FLDO2_V_OUT, AXP803_FLDO2_V_OUT_MASK,
AXP22X_PWR_OUT_CTRL3, AXP803_PWR_OUT_FLDO2_MASK),
AXP_DESC_IO(AXP803, LDO_IO0, "ldo-io0", "ips", 700, 3300, 100,
AXP22X_LDO_IO0_V_OUT, AXP22X_LDO_IO0_V_OUT_MASK,
AXP20X_GPIO0_CTRL, AXP20X_GPIO0_FUNC_MASK,
AXP22X_IO_ENABLED, AXP22X_IO_DISABLED),
AXP_DESC_IO(AXP803, LDO_IO1, "ldo-io1", "ips", 700, 3300, 100,
AXP22X_LDO_IO1_V_OUT, AXP22X_LDO_IO1_V_OUT_MASK,
AXP20X_GPIO1_CTRL, AXP20X_GPIO1_FUNC_MASK,
AXP22X_IO_ENABLED, AXP22X_IO_DISABLED),
AXP_DESC_FIXED(AXP803, RTC_LDO, "rtc-ldo", "ips", 3000),
};
static const struct linear_range axp806_dcdca_ranges[] = {
REGULATOR_LINEAR_RANGE(600000,
AXP806_DCDCA_600mV_START,
AXP806_DCDCA_600mV_END,
10000),
REGULATOR_LINEAR_RANGE(1120000,
AXP806_DCDCA_1120mV_START,
AXP806_DCDCA_1120mV_END,
20000),
};
static const struct linear_range axp806_dcdcd_ranges[] = {
REGULATOR_LINEAR_RANGE(600000,
AXP806_DCDCD_600mV_START,
AXP806_DCDCD_600mV_END,
20000),
REGULATOR_LINEAR_RANGE(1600000,
AXP806_DCDCD_1600mV_START,
AXP806_DCDCD_1600mV_END,
100000),
};
static const struct regulator_desc axp806_regulators[] = {
AXP_DESC_RANGES(AXP806, DCDCA, "dcdca", "vina",
axp806_dcdca_ranges, AXP806_DCDCA_NUM_VOLTAGES,
AXP806_DCDCA_V_CTRL, AXP806_DCDCA_V_CTRL_MASK,
AXP806_PWR_OUT_CTRL1, AXP806_PWR_OUT_DCDCA_MASK),
AXP_DESC(AXP806, DCDCB, "dcdcb", "vinb", 1000, 2550, 50,
AXP806_DCDCB_V_CTRL, AXP806_DCDCB_V_CTRL_MASK,
AXP806_PWR_OUT_CTRL1, AXP806_PWR_OUT_DCDCB_MASK),
AXP_DESC_RANGES(AXP806, DCDCC, "dcdcc", "vinc",
axp806_dcdca_ranges, AXP806_DCDCA_NUM_VOLTAGES,
AXP806_DCDCC_V_CTRL, AXP806_DCDCC_V_CTRL_MASK,
AXP806_PWR_OUT_CTRL1, AXP806_PWR_OUT_DCDCC_MASK),
AXP_DESC_RANGES(AXP806, DCDCD, "dcdcd", "vind",
axp806_dcdcd_ranges, AXP806_DCDCD_NUM_VOLTAGES,
AXP806_DCDCD_V_CTRL, AXP806_DCDCD_V_CTRL_MASK,
AXP806_PWR_OUT_CTRL1, AXP806_PWR_OUT_DCDCD_MASK),
AXP_DESC(AXP806, DCDCE, "dcdce", "vine", 1100, 3400, 100,
AXP806_DCDCE_V_CTRL, AXP806_DCDCE_V_CTRL_MASK,
AXP806_PWR_OUT_CTRL1, AXP806_PWR_OUT_DCDCE_MASK),
AXP_DESC(AXP806, ALDO1, "aldo1", "aldoin", 700, 3300, 100,
AXP806_ALDO1_V_CTRL, AXP806_ALDO1_V_CTRL_MASK,
AXP806_PWR_OUT_CTRL1, AXP806_PWR_OUT_ALDO1_MASK),
AXP_DESC(AXP806, ALDO2, "aldo2", "aldoin", 700, 3400, 100,
AXP806_ALDO2_V_CTRL, AXP806_ALDO2_V_CTRL_MASK,
AXP806_PWR_OUT_CTRL1, AXP806_PWR_OUT_ALDO2_MASK),
AXP_DESC(AXP806, ALDO3, "aldo3", "aldoin", 700, 3300, 100,
AXP806_ALDO3_V_CTRL, AXP806_ALDO3_V_CTRL_MASK,
AXP806_PWR_OUT_CTRL1, AXP806_PWR_OUT_ALDO3_MASK),
AXP_DESC(AXP806, BLDO1, "bldo1", "bldoin", 700, 1900, 100,
AXP806_BLDO1_V_CTRL, AXP806_BLDO1_V_CTRL_MASK,
AXP806_PWR_OUT_CTRL2, AXP806_PWR_OUT_BLDO1_MASK),
AXP_DESC(AXP806, BLDO2, "bldo2", "bldoin", 700, 1900, 100,
AXP806_BLDO2_V_CTRL, AXP806_BLDO2_V_CTRL_MASK,
AXP806_PWR_OUT_CTRL2, AXP806_PWR_OUT_BLDO2_MASK),
AXP_DESC(AXP806, BLDO3, "bldo3", "bldoin", 700, 1900, 100,
AXP806_BLDO3_V_CTRL, AXP806_BLDO3_V_CTRL_MASK,
AXP806_PWR_OUT_CTRL2, AXP806_PWR_OUT_BLDO3_MASK),
AXP_DESC(AXP806, BLDO4, "bldo4", "bldoin", 700, 1900, 100,
AXP806_BLDO4_V_CTRL, AXP806_BLDO4_V_CTRL_MASK,
AXP806_PWR_OUT_CTRL2, AXP806_PWR_OUT_BLDO4_MASK),
AXP_DESC(AXP806, CLDO1, "cldo1", "cldoin", 700, 3300, 100,
AXP806_CLDO1_V_CTRL, AXP806_CLDO1_V_CTRL_MASK,
AXP806_PWR_OUT_CTRL2, AXP806_PWR_OUT_CLDO1_MASK),
AXP_DESC_RANGES(AXP806, CLDO2, "cldo2", "cldoin",
axp803_dldo2_ranges, AXP803_DLDO2_NUM_VOLTAGES,
AXP806_CLDO2_V_CTRL, AXP806_CLDO2_V_CTRL_MASK,
AXP806_PWR_OUT_CTRL2, AXP806_PWR_OUT_CLDO2_MASK),
AXP_DESC(AXP806, CLDO3, "cldo3", "cldoin", 700, 3300, 100,
AXP806_CLDO3_V_CTRL, AXP806_CLDO3_V_CTRL_MASK,
AXP806_PWR_OUT_CTRL2, AXP806_PWR_OUT_CLDO3_MASK),
AXP_DESC_SW(AXP806, SW, "sw", "swin",
AXP806_PWR_OUT_CTRL2, AXP806_PWR_OUT_SW_MASK),
};
static const struct linear_range axp809_dcdc4_ranges[] = {
REGULATOR_LINEAR_RANGE(600000,
AXP809_DCDC4_600mV_START,
AXP809_DCDC4_600mV_END,
20000),
REGULATOR_LINEAR_RANGE(1800000,
AXP809_DCDC4_1800mV_START,
AXP809_DCDC4_1800mV_END,
100000),
};
static const struct regulator_desc axp809_regulators[] = {
AXP_DESC(AXP809, DCDC1, "dcdc1", "vin1", 1600, 3400, 100,
AXP22X_DCDC1_V_OUT, AXP22X_DCDC1_V_OUT_MASK,
AXP22X_PWR_OUT_CTRL1, AXP22X_PWR_OUT_DCDC1_MASK),
AXP_DESC(AXP809, DCDC2, "dcdc2", "vin2", 600, 1540, 20,
AXP22X_DCDC2_V_OUT, AXP22X_DCDC2_V_OUT_MASK,
AXP22X_PWR_OUT_CTRL1, AXP22X_PWR_OUT_DCDC2_MASK),
AXP_DESC(AXP809, DCDC3, "dcdc3", "vin3", 600, 1860, 20,
AXP22X_DCDC3_V_OUT, AXP22X_DCDC3_V_OUT_MASK,
AXP22X_PWR_OUT_CTRL1, AXP22X_PWR_OUT_DCDC3_MASK),
AXP_DESC_RANGES(AXP809, DCDC4, "dcdc4", "vin4",
axp809_dcdc4_ranges, AXP809_DCDC4_NUM_VOLTAGES,
AXP22X_DCDC4_V_OUT, AXP22X_DCDC4_V_OUT_MASK,
AXP22X_PWR_OUT_CTRL1, AXP22X_PWR_OUT_DCDC4_MASK),
AXP_DESC(AXP809, DCDC5, "dcdc5", "vin5", 1000, 2550, 50,
AXP22X_DCDC5_V_OUT, AXP22X_DCDC5_V_OUT_MASK,
AXP22X_PWR_OUT_CTRL1, AXP22X_PWR_OUT_DCDC5_MASK),
/* secondary switchable output of DCDC1 */
AXP_DESC_SW(AXP809, DC1SW, "dc1sw", NULL,
AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_DC1SW_MASK),
/* LDO regulator internally chained to DCDC5 */
AXP_DESC(AXP809, DC5LDO, "dc5ldo", NULL, 700, 1400, 100,
AXP22X_DC5LDO_V_OUT, AXP22X_DC5LDO_V_OUT_MASK,
AXP22X_PWR_OUT_CTRL1, AXP22X_PWR_OUT_DC5LDO_MASK),
AXP_DESC(AXP809, ALDO1, "aldo1", "aldoin", 700, 3300, 100,
AXP22X_ALDO1_V_OUT, AXP22X_ALDO1_V_OUT_MASK,
AXP22X_PWR_OUT_CTRL1, AXP22X_PWR_OUT_ALDO1_MASK),
AXP_DESC(AXP809, ALDO2, "aldo2", "aldoin", 700, 3300, 100,
AXP22X_ALDO2_V_OUT, AXP22X_ALDO2_V_OUT_MASK,
AXP22X_PWR_OUT_CTRL1, AXP22X_PWR_OUT_ALDO2_MASK),
AXP_DESC(AXP809, ALDO3, "aldo3", "aldoin", 700, 3300, 100,
AXP22X_ALDO3_V_OUT, AXP22X_ALDO3_V_OUT_MASK,
AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_ALDO3_MASK),
AXP_DESC_RANGES(AXP809, DLDO1, "dldo1", "dldoin",
axp803_dldo2_ranges, AXP803_DLDO2_NUM_VOLTAGES,
AXP22X_DLDO1_V_OUT, AXP22X_DLDO1_V_OUT_MASK,
AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_DLDO1_MASK),
AXP_DESC(AXP809, DLDO2, "dldo2", "dldoin", 700, 3300, 100,
AXP22X_DLDO2_V_OUT, AXP22X_DLDO2_V_OUT_MASK,
AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_DLDO2_MASK),
AXP_DESC(AXP809, ELDO1, "eldo1", "eldoin", 700, 3300, 100,
AXP22X_ELDO1_V_OUT, AXP22X_ELDO1_V_OUT_MASK,
AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_ELDO1_MASK),
AXP_DESC(AXP809, ELDO2, "eldo2", "eldoin", 700, 3300, 100,
AXP22X_ELDO2_V_OUT, AXP22X_ELDO2_V_OUT_MASK,
AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_ELDO2_MASK),
AXP_DESC(AXP809, ELDO3, "eldo3", "eldoin", 700, 3300, 100,
AXP22X_ELDO3_V_OUT, AXP22X_ELDO3_V_OUT_MASK,
AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_ELDO3_MASK),
/*
* Note the datasheet only guarantees reliable operation up to
* 3.3V, this needs to be enforced via dts provided constraints
*/
AXP_DESC_IO(AXP809, LDO_IO0, "ldo_io0", "ips", 700, 3800, 100,
AXP22X_LDO_IO0_V_OUT, AXP22X_LDO_IO0_V_OUT_MASK,
AXP20X_GPIO0_CTRL, AXP20X_GPIO0_FUNC_MASK,
AXP22X_IO_ENABLED, AXP22X_IO_DISABLED),
/*
* Note the datasheet only guarantees reliable operation up to
* 3.3V, this needs to be enforced via dts provided constraints
*/
AXP_DESC_IO(AXP809, LDO_IO1, "ldo_io1", "ips", 700, 3800, 100,
AXP22X_LDO_IO1_V_OUT, AXP22X_LDO_IO1_V_OUT_MASK,
AXP20X_GPIO1_CTRL, AXP20X_GPIO1_FUNC_MASK,
AXP22X_IO_ENABLED, AXP22X_IO_DISABLED),
AXP_DESC_FIXED(AXP809, RTC_LDO, "rtc_ldo", "ips", 1800),
AXP_DESC_SW(AXP809, SW, "sw", "swin",
AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_SW_MASK),
};
static const struct regulator_desc axp813_regulators[] = {
AXP_DESC(AXP813, DCDC1, "dcdc1", "vin1", 1600, 3400, 100,
AXP803_DCDC1_V_OUT, AXP803_DCDC1_V_OUT_MASK,
AXP22X_PWR_OUT_CTRL1, AXP803_PWR_OUT_DCDC1_MASK),
AXP_DESC_RANGES(AXP813, DCDC2, "dcdc2", "vin2",
axp803_dcdc234_ranges, AXP803_DCDC234_NUM_VOLTAGES,
AXP803_DCDC2_V_OUT, AXP803_DCDC2_V_OUT_MASK,
AXP22X_PWR_OUT_CTRL1, AXP803_PWR_OUT_DCDC2_MASK),
AXP_DESC_RANGES(AXP813, DCDC3, "dcdc3", "vin3",
axp803_dcdc234_ranges, AXP803_DCDC234_NUM_VOLTAGES,
AXP803_DCDC3_V_OUT, AXP803_DCDC3_V_OUT_MASK,
AXP22X_PWR_OUT_CTRL1, AXP803_PWR_OUT_DCDC3_MASK),
AXP_DESC_RANGES(AXP813, DCDC4, "dcdc4", "vin4",
axp803_dcdc234_ranges, AXP803_DCDC234_NUM_VOLTAGES,
AXP803_DCDC4_V_OUT, AXP803_DCDC4_V_OUT_MASK,
AXP22X_PWR_OUT_CTRL1, AXP803_PWR_OUT_DCDC4_MASK),
AXP_DESC_RANGES(AXP813, DCDC5, "dcdc5", "vin5",
axp803_dcdc5_ranges, AXP803_DCDC5_NUM_VOLTAGES,
AXP803_DCDC5_V_OUT, AXP803_DCDC5_V_OUT_MASK,
AXP22X_PWR_OUT_CTRL1, AXP803_PWR_OUT_DCDC5_MASK),
AXP_DESC_RANGES(AXP813, DCDC6, "dcdc6", "vin6",
axp803_dcdc6_ranges, AXP803_DCDC6_NUM_VOLTAGES,
AXP803_DCDC6_V_OUT, AXP803_DCDC6_V_OUT_MASK,
AXP22X_PWR_OUT_CTRL1, AXP803_PWR_OUT_DCDC6_MASK),
AXP_DESC_RANGES(AXP813, DCDC7, "dcdc7", "vin7",
axp803_dcdc6_ranges, AXP803_DCDC6_NUM_VOLTAGES,
AXP813_DCDC7_V_OUT, AXP813_DCDC7_V_OUT_MASK,
AXP22X_PWR_OUT_CTRL1, AXP813_PWR_OUT_DCDC7_MASK),
AXP_DESC(AXP813, ALDO1, "aldo1", "aldoin", 700, 3300, 100,
AXP22X_ALDO1_V_OUT, AXP22X_ALDO1_V_OUT_MASK,
AXP22X_PWR_OUT_CTRL3, AXP806_PWR_OUT_ALDO1_MASK),
AXP_DESC(AXP813, ALDO2, "aldo2", "aldoin", 700, 3300, 100,
AXP22X_ALDO2_V_OUT, AXP22X_ALDO2_V_OUT_MASK,
AXP22X_PWR_OUT_CTRL3, AXP806_PWR_OUT_ALDO2_MASK),
AXP_DESC(AXP813, ALDO3, "aldo3", "aldoin", 700, 3300, 100,
AXP22X_ALDO3_V_OUT, AXP22X_ALDO3_V_OUT_MASK,
AXP22X_PWR_OUT_CTRL3, AXP806_PWR_OUT_ALDO3_MASK),
AXP_DESC(AXP813, DLDO1, "dldo1", "dldoin", 700, 3300, 100,
AXP22X_DLDO1_V_OUT, AXP22X_DLDO1_V_OUT_MASK,
AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_DLDO1_MASK),
AXP_DESC_RANGES(AXP813, DLDO2, "dldo2", "dldoin",
axp803_dldo2_ranges, AXP803_DLDO2_NUM_VOLTAGES,
AXP22X_DLDO2_V_OUT, AXP22X_DLDO2_V_OUT_MASK,
AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_DLDO2_MASK),
AXP_DESC(AXP813, DLDO3, "dldo3", "dldoin", 700, 3300, 100,
AXP22X_DLDO3_V_OUT, AXP22X_DLDO3_V_OUT_MASK,
AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_DLDO3_MASK),
AXP_DESC(AXP813, DLDO4, "dldo4", "dldoin", 700, 3300, 100,
AXP22X_DLDO4_V_OUT, AXP22X_DLDO4_V_OUT_MASK,
AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_DLDO4_MASK),
AXP_DESC(AXP813, ELDO1, "eldo1", "eldoin", 700, 1900, 50,
AXP22X_ELDO1_V_OUT, AXP22X_ELDO1_V_OUT_MASK,
AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_ELDO1_MASK),
AXP_DESC(AXP813, ELDO2, "eldo2", "eldoin", 700, 1900, 50,
AXP22X_ELDO2_V_OUT, AXP22X_ELDO2_V_OUT_MASK,
AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_ELDO2_MASK),
AXP_DESC(AXP813, ELDO3, "eldo3", "eldoin", 700, 1900, 50,
AXP22X_ELDO3_V_OUT, AXP22X_ELDO3_V_OUT_MASK,
AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_ELDO3_MASK),
/* to do / check ... */
AXP_DESC(AXP813, FLDO1, "fldo1", "fldoin", 700, 1450, 50,
AXP803_FLDO1_V_OUT, AXP803_FLDO1_V_OUT_MASK,
AXP22X_PWR_OUT_CTRL3, AXP803_PWR_OUT_FLDO1_MASK),
AXP_DESC(AXP813, FLDO2, "fldo2", "fldoin", 700, 1450, 50,
AXP803_FLDO2_V_OUT, AXP803_FLDO2_V_OUT_MASK,
AXP22X_PWR_OUT_CTRL3, AXP803_PWR_OUT_FLDO2_MASK),
/*
* TODO: FLDO3 = {DCDC5, FLDOIN} / 2
*
* This means FLDO3 effectively switches supplies at runtime,
* something the regulator subsystem does not support.
*/
AXP_DESC_FIXED(AXP813, RTC_LDO, "rtc-ldo", "ips", 1800),
AXP_DESC_IO(AXP813, LDO_IO0, "ldo-io0", "ips", 700, 3300, 100,
AXP22X_LDO_IO0_V_OUT, AXP22X_LDO_IO0_V_OUT_MASK,
AXP20X_GPIO0_CTRL, AXP20X_GPIO0_FUNC_MASK,
AXP22X_IO_ENABLED, AXP22X_IO_DISABLED),
AXP_DESC_IO(AXP813, LDO_IO1, "ldo-io1", "ips", 700, 3300, 100,
AXP22X_LDO_IO1_V_OUT, AXP22X_LDO_IO1_V_OUT_MASK,
AXP20X_GPIO1_CTRL, AXP20X_GPIO1_FUNC_MASK,
AXP22X_IO_ENABLED, AXP22X_IO_DISABLED),
AXP_DESC_SW(AXP813, SW, "sw", "swin",
AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_DC1SW_MASK),
};
static const struct linear_range axp15060_dcdc234_ranges[] = {
REGULATOR_LINEAR_RANGE(500000,
AXP15060_DCDC234_500mV_START,
AXP15060_DCDC234_500mV_END,
10000),
REGULATOR_LINEAR_RANGE(1220000,
AXP15060_DCDC234_1220mV_START,
AXP15060_DCDC234_1220mV_END,
20000),
};
static const struct linear_range axp15060_dcdc5_ranges[] = {
REGULATOR_LINEAR_RANGE(800000,
AXP15060_DCDC5_800mV_START,
AXP15060_DCDC5_800mV_END,
10000),
REGULATOR_LINEAR_RANGE(1140000,
AXP15060_DCDC5_1140mV_START,
AXP15060_DCDC5_1140mV_END,
20000),
};
static const struct regulator_desc axp15060_regulators[] = {
AXP_DESC(AXP15060, DCDC1, "dcdc1", "vin1", 1500, 3400, 100,
AXP15060_DCDC1_V_CTRL, AXP15060_DCDC1_V_CTRL_MASK,
AXP15060_PWR_OUT_CTRL1, AXP15060_PWR_OUT_DCDC1_MASK),
AXP_DESC_RANGES(AXP15060, DCDC2, "dcdc2", "vin2",
axp15060_dcdc234_ranges, AXP15060_DCDC234_NUM_VOLTAGES,
AXP15060_DCDC2_V_CTRL, AXP15060_DCDC2_V_CTRL_MASK,
AXP15060_PWR_OUT_CTRL1, AXP15060_PWR_OUT_DCDC2_MASK),
AXP_DESC_RANGES(AXP15060, DCDC3, "dcdc3", "vin3",
axp15060_dcdc234_ranges, AXP15060_DCDC234_NUM_VOLTAGES,
AXP15060_DCDC3_V_CTRL, AXP15060_DCDC3_V_CTRL_MASK,
AXP15060_PWR_OUT_CTRL1, AXP15060_PWR_OUT_DCDC3_MASK),
AXP_DESC_RANGES(AXP15060, DCDC4, "dcdc4", "vin4",
axp15060_dcdc234_ranges, AXP15060_DCDC234_NUM_VOLTAGES,
AXP15060_DCDC4_V_CTRL, AXP15060_DCDC4_V_CTRL_MASK,
AXP15060_PWR_OUT_CTRL1, AXP15060_PWR_OUT_DCDC4_MASK),
AXP_DESC_RANGES(AXP15060, DCDC5, "dcdc5", "vin5",
axp15060_dcdc5_ranges, AXP15060_DCDC5_NUM_VOLTAGES,
AXP15060_DCDC5_V_CTRL, AXP15060_DCDC5_V_CTRL_MASK,
AXP15060_PWR_OUT_CTRL1, AXP15060_PWR_OUT_DCDC5_MASK),
AXP_DESC(AXP15060, DCDC6, "dcdc6", "vin6", 500, 3400, 100,
AXP15060_DCDC6_V_CTRL, AXP15060_DCDC6_V_CTRL_MASK,
AXP15060_PWR_OUT_CTRL1, AXP15060_PWR_OUT_DCDC6_MASK),
AXP_DESC(AXP15060, ALDO1, "aldo1", "aldoin", 700, 3300, 100,
AXP15060_ALDO1_V_CTRL, AXP15060_ALDO1_V_CTRL_MASK,
AXP15060_PWR_OUT_CTRL2, AXP15060_PWR_OUT_ALDO1_MASK),
AXP_DESC(AXP15060, ALDO2, "aldo2", "aldoin", 700, 3300, 100,
AXP15060_ALDO2_V_CTRL, AXP15060_ALDO2_V_CTRL_MASK,
AXP15060_PWR_OUT_CTRL2, AXP15060_PWR_OUT_ALDO2_MASK),
AXP_DESC(AXP15060, ALDO3, "aldo3", "aldoin", 700, 3300, 100,
AXP15060_ALDO3_V_CTRL, AXP15060_ALDO3_V_CTRL_MASK,
AXP15060_PWR_OUT_CTRL2, AXP15060_PWR_OUT_ALDO3_MASK),
AXP_DESC(AXP15060, ALDO4, "aldo4", "aldoin", 700, 3300, 100,
AXP15060_ALDO4_V_CTRL, AXP15060_ALDO4_V_CTRL_MASK,
AXP15060_PWR_OUT_CTRL2, AXP15060_PWR_OUT_ALDO4_MASK),
AXP_DESC(AXP15060, ALDO5, "aldo5", "aldoin", 700, 3300, 100,
AXP15060_ALDO5_V_CTRL, AXP15060_ALDO5_V_CTRL_MASK,
AXP15060_PWR_OUT_CTRL2, AXP15060_PWR_OUT_ALDO5_MASK),
AXP_DESC(AXP15060, BLDO1, "bldo1", "bldoin", 700, 3300, 100,
AXP15060_BLDO1_V_CTRL, AXP15060_BLDO1_V_CTRL_MASK,
AXP15060_PWR_OUT_CTRL2, AXP15060_PWR_OUT_BLDO1_MASK),
AXP_DESC(AXP15060, BLDO2, "bldo2", "bldoin", 700, 3300, 100,
AXP15060_BLDO2_V_CTRL, AXP15060_BLDO2_V_CTRL_MASK,
AXP15060_PWR_OUT_CTRL2, AXP15060_PWR_OUT_BLDO2_MASK),
AXP_DESC(AXP15060, BLDO3, "bldo3", "bldoin", 700, 3300, 100,
AXP15060_BLDO3_V_CTRL, AXP15060_BLDO3_V_CTRL_MASK,
AXP15060_PWR_OUT_CTRL2, AXP15060_PWR_OUT_BLDO3_MASK),
AXP_DESC(AXP15060, BLDO4, "bldo4", "bldoin", 700, 3300, 100,
AXP15060_BLDO4_V_CTRL, AXP15060_BLDO4_V_CTRL_MASK,
AXP15060_PWR_OUT_CTRL3, AXP15060_PWR_OUT_BLDO4_MASK),
AXP_DESC(AXP15060, BLDO5, "bldo5", "bldoin", 700, 3300, 100,
AXP15060_BLDO5_V_CTRL, AXP15060_BLDO5_V_CTRL_MASK,
AXP15060_PWR_OUT_CTRL3, AXP15060_PWR_OUT_BLDO5_MASK),
AXP_DESC(AXP15060, CLDO1, "cldo1", "cldoin", 700, 3300, 100,
AXP15060_CLDO1_V_CTRL, AXP15060_CLDO1_V_CTRL_MASK,
AXP15060_PWR_OUT_CTRL3, AXP15060_PWR_OUT_CLDO1_MASK),
AXP_DESC(AXP15060, CLDO2, "cldo2", "cldoin", 700, 3300, 100,
AXP15060_CLDO2_V_CTRL, AXP15060_CLDO2_V_CTRL_MASK,
AXP15060_PWR_OUT_CTRL3, AXP15060_PWR_OUT_CLDO2_MASK),
AXP_DESC(AXP15060, CLDO3, "cldo3", "cldoin", 700, 3300, 100,
AXP15060_CLDO3_V_CTRL, AXP15060_CLDO3_V_CTRL_MASK,
AXP15060_PWR_OUT_CTRL3, AXP15060_PWR_OUT_CLDO3_MASK),
AXP_DESC(AXP15060, CLDO4, "cldo4", "cldoin", 700, 4200, 100,
AXP15060_CLDO4_V_CTRL, AXP15060_CLDO4_V_CTRL_MASK,
AXP15060_PWR_OUT_CTRL3, AXP15060_PWR_OUT_CLDO4_MASK),
/* Supply comes from DCDC5 */
AXP_DESC(AXP15060, CPUSLDO, "cpusldo", NULL, 700, 1400, 50,
AXP15060_CPUSLDO_V_CTRL, AXP15060_CPUSLDO_V_CTRL_MASK,
AXP15060_PWR_OUT_CTRL3, AXP15060_PWR_OUT_CPUSLDO_MASK),
/* Supply comes from DCDC1 */
AXP_DESC_SW(AXP15060, SW, "sw", NULL,
AXP15060_PWR_OUT_CTRL3, AXP15060_PWR_OUT_SW_MASK),
/* Supply comes from ALDO1 */
AXP_DESC_FIXED(AXP15060, RTC_LDO, "rtc-ldo", NULL, 1800),
};
static int axp20x_set_dcdc_freq(struct platform_device *pdev, u32 dcdcfreq)
{
struct axp20x_dev *axp20x = dev_get_drvdata(pdev->dev.parent);
unsigned int reg = AXP20X_DCDC_FREQ;
u32 min, max, def, step;
switch (axp20x->variant) {
case AXP202_ID:
case AXP209_ID:
min = 750;
max = 1875;
def = 1500;
step = 75;
break;
case AXP803_ID:
case AXP813_ID:
/*
* AXP803/AXP813 DCDC work frequency setting has the same
* range and step as AXP22X, but at a different register.
* (See include/linux/mfd/axp20x.h)
*/
reg = AXP803_DCDC_FREQ_CTRL;
fallthrough; /* to the check below */
case AXP806_ID:
/*
* AXP806 also have DCDC work frequency setting register at a
* different position.
*/
if (axp20x->variant == AXP806_ID)
reg = AXP806_DCDC_FREQ_CTRL;
fallthrough;
case AXP221_ID:
case AXP223_ID:
case AXP809_ID:
min = 1800;
max = 4050;
def = 3000;
step = 150;
break;
case AXP313A_ID:
case AXP15060_ID:
/* The DCDC PWM frequency seems to be fixed to 3 MHz. */
if (dcdcfreq != 0) {
dev_err(&pdev->dev,
"DCDC frequency on this PMIC is fixed to 3 MHz.\n");
return -EINVAL;
}
return 0;
default:
dev_err(&pdev->dev,
"Setting DCDC frequency for unsupported AXP variant\n");
return -EINVAL;
}
if (dcdcfreq == 0)
dcdcfreq = def;
if (dcdcfreq < min) {
dcdcfreq = min;
dev_warn(&pdev->dev, "DCDC frequency too low. Set to %ukHz\n",
min);
}
if (dcdcfreq > max) {
dcdcfreq = max;
dev_warn(&pdev->dev, "DCDC frequency too high. Set to %ukHz\n",
max);
}
dcdcfreq = (dcdcfreq - min) / step;
return regmap_update_bits(axp20x->regmap, reg,
AXP20X_FREQ_DCDC_MASK, dcdcfreq);
}
static int axp20x_regulator_parse_dt(struct platform_device *pdev)
{
struct device_node *np, *regulators;
int ret = 0;
u32 dcdcfreq = 0;
np = of_node_get(pdev->dev.parent->of_node);
if (!np)
return 0;
regulators = of_get_child_by_name(np, "regulators");
if (!regulators) {
dev_warn(&pdev->dev, "regulators node not found\n");
} else {
of_property_read_u32(regulators, "x-powers,dcdc-freq", &dcdcfreq);
ret = axp20x_set_dcdc_freq(pdev, dcdcfreq);
if (ret < 0) {
dev_err(&pdev->dev, "Error setting dcdc frequency: %d\n", ret);
}
of_node_put(regulators);
}
of_node_put(np);
return ret;
}
static int axp20x_set_dcdc_workmode(struct regulator_dev *rdev, int id, u32 workmode)
{
struct axp20x_dev *axp20x = rdev_get_drvdata(rdev);
unsigned int reg = AXP20X_DCDC_MODE;
unsigned int mask;
switch (axp20x->variant) {
case AXP202_ID:
case AXP209_ID:
if ((id != AXP20X_DCDC2) && (id != AXP20X_DCDC3))
return -EINVAL;
mask = AXP20X_WORKMODE_DCDC2_MASK;
if (id == AXP20X_DCDC3)
mask = AXP20X_WORKMODE_DCDC3_MASK;
workmode <<= ffs(mask) - 1;
break;
case AXP806_ID:
/*
* AXP806 DCDC regulator IDs have the same range as AXP22X.
* (See include/linux/mfd/axp20x.h)
*/
reg = AXP806_DCDC_MODE_CTRL2;
fallthrough; /* to the check below */
case AXP221_ID:
case AXP223_ID:
case AXP809_ID:
if (id < AXP22X_DCDC1 || id > AXP22X_DCDC5)
return -EINVAL;
mask = AXP22X_WORKMODE_DCDCX_MASK(id - AXP22X_DCDC1);
workmode <<= id - AXP22X_DCDC1;
break;
case AXP803_ID:
if (id < AXP803_DCDC1 || id > AXP803_DCDC6)
return -EINVAL;
mask = AXP22X_WORKMODE_DCDCX_MASK(id - AXP803_DCDC1);
workmode <<= id - AXP803_DCDC1;
break;
case AXP813_ID:
if (id < AXP813_DCDC1 || id > AXP813_DCDC7)
return -EINVAL;
mask = AXP22X_WORKMODE_DCDCX_MASK(id - AXP813_DCDC1);
workmode <<= id - AXP813_DCDC1;
break;
case AXP15060_ID:
reg = AXP15060_DCDC_MODE_CTRL2;
if (id < AXP15060_DCDC1 || id > AXP15060_DCDC6)
return -EINVAL;
mask = AXP22X_WORKMODE_DCDCX_MASK(id - AXP15060_DCDC1);
workmode <<= id - AXP15060_DCDC1;
break;
default:
/* should not happen */
WARN_ON(1);
return -EINVAL;
}
return regmap_update_bits(rdev->regmap, reg, mask, workmode);
}
/*
* This function checks whether a regulator is part of a poly-phase
* output setup based on the registers settings. Returns true if it is.
*/
static bool axp20x_is_polyphase_slave(struct axp20x_dev *axp20x, int id)
{
u32 reg = 0;
/*
* Currently in our supported AXP variants, only AXP803, AXP806,
* AXP813 and AXP15060 have polyphase regulators.
*/
switch (axp20x->variant) {
case AXP803_ID:
case AXP813_ID:
regmap_read(axp20x->regmap, AXP803_POLYPHASE_CTRL, ®);
switch (id) {
case AXP803_DCDC3:
return !!(reg & AXP803_DCDC23_POLYPHASE_DUAL);
case AXP803_DCDC6:
return !!(reg & AXP803_DCDC56_POLYPHASE_DUAL);
}
break;
case AXP806_ID:
regmap_read(axp20x->regmap, AXP806_DCDC_MODE_CTRL2, ®);
switch (id) {
case AXP806_DCDCB:
return (((reg & AXP806_DCDCABC_POLYPHASE_MASK) ==
AXP806_DCDCAB_POLYPHASE_DUAL) ||
((reg & AXP806_DCDCABC_POLYPHASE_MASK) ==
AXP806_DCDCABC_POLYPHASE_TRI));
case AXP806_DCDCC:
return ((reg & AXP806_DCDCABC_POLYPHASE_MASK) ==
AXP806_DCDCABC_POLYPHASE_TRI);
case AXP806_DCDCE:
return !!(reg & AXP806_DCDCDE_POLYPHASE_DUAL);
}
break;
case AXP15060_ID:
regmap_read(axp20x->regmap, AXP15060_DCDC_MODE_CTRL1, ®);
switch (id) {
case AXP15060_DCDC3:
return !!(reg & AXP15060_DCDC23_POLYPHASE_DUAL_MASK);
case AXP15060_DCDC6:
return !!(reg & AXP15060_DCDC46_POLYPHASE_DUAL_MASK);
}
break;
default:
return false;
}
return false;
}
static int axp20x_regulator_probe(struct platform_device *pdev)
{
struct regulator_dev *rdev;
struct axp20x_dev *axp20x = dev_get_drvdata(pdev->dev.parent);
const struct regulator_desc *regulators;
struct regulator_config config = {
.dev = pdev->dev.parent,
.regmap = axp20x->regmap,
.driver_data = axp20x,
};
int ret, i, nregulators;
u32 workmode;
const char *dcdc1_name = axp22x_regulators[AXP22X_DCDC1].name;
const char *dcdc5_name = axp22x_regulators[AXP22X_DCDC5].name;
const char *aldo1_name = axp15060_regulators[AXP15060_ALDO1].name;
bool drivevbus = false;
switch (axp20x->variant) {
case AXP202_ID:
case AXP209_ID:
regulators = axp20x_regulators;
nregulators = AXP20X_REG_ID_MAX;
break;
case AXP221_ID:
case AXP223_ID:
regulators = axp22x_regulators;
nregulators = AXP22X_REG_ID_MAX;
drivevbus = of_property_read_bool(pdev->dev.parent->of_node,
"x-powers,drive-vbus-en");
break;
case AXP313A_ID:
regulators = axp313a_regulators;
nregulators = AXP313A_REG_ID_MAX;
break;
case AXP803_ID:
regulators = axp803_regulators;
nregulators = AXP803_REG_ID_MAX;
drivevbus = of_property_read_bool(pdev->dev.parent->of_node,
"x-powers,drive-vbus-en");
break;
case AXP806_ID:
regulators = axp806_regulators;
nregulators = AXP806_REG_ID_MAX;
break;
case AXP809_ID:
regulators = axp809_regulators;
nregulators = AXP809_REG_ID_MAX;
break;
case AXP813_ID:
regulators = axp813_regulators;
nregulators = AXP813_REG_ID_MAX;
drivevbus = of_property_read_bool(pdev->dev.parent->of_node,
"x-powers,drive-vbus-en");
break;
case AXP15060_ID:
regulators = axp15060_regulators;
nregulators = AXP15060_REG_ID_MAX;
break;
default:
dev_err(&pdev->dev, "Unsupported AXP variant: %ld\n",
axp20x->variant);
return -EINVAL;
}
/* This only sets the dcdc freq. Ignore any errors */
axp20x_regulator_parse_dt(pdev);
for (i = 0; i < nregulators; i++) {
const struct regulator_desc *desc = ®ulators[i];
struct regulator_desc *new_desc;
/*
* If this regulator is a slave in a poly-phase setup,
* skip it, as its controls are bound to the master
* regulator and won't work.
*/
if (axp20x_is_polyphase_slave(axp20x, i))
continue;
/* Support for AXP813's FLDO3 is not implemented */
if (axp20x->variant == AXP813_ID && i == AXP813_FLDO3)
continue;
/*
* Regulators DC1SW, DC5LDO and RTCLDO on AXP15060 are
* connected internally, so we have to handle their supply
* names separately.
*
* We always register the regulators in proper sequence,
* so the supply names are correctly read. See the last
* part of this loop to see where we save the DT defined
* name.
*/
if ((regulators == axp22x_regulators && i == AXP22X_DC1SW) ||
(regulators == axp803_regulators && i == AXP803_DC1SW) ||
(regulators == axp809_regulators && i == AXP809_DC1SW) ||
(regulators == axp15060_regulators && i == AXP15060_SW)) {
new_desc = devm_kzalloc(&pdev->dev, sizeof(*desc),
GFP_KERNEL);
if (!new_desc)
return -ENOMEM;
*new_desc = regulators[i];
new_desc->supply_name = dcdc1_name;
desc = new_desc;
}
if ((regulators == axp22x_regulators && i == AXP22X_DC5LDO) ||
(regulators == axp809_regulators && i == AXP809_DC5LDO) ||
(regulators == axp15060_regulators && i == AXP15060_CPUSLDO)) {
new_desc = devm_kzalloc(&pdev->dev, sizeof(*desc),
GFP_KERNEL);
if (!new_desc)
return -ENOMEM;
*new_desc = regulators[i];
new_desc->supply_name = dcdc5_name;
desc = new_desc;
}
if (regulators == axp15060_regulators && i == AXP15060_RTC_LDO) {
new_desc = devm_kzalloc(&pdev->dev, sizeof(*desc),
GFP_KERNEL);
if (!new_desc)
return -ENOMEM;
*new_desc = regulators[i];
new_desc->supply_name = aldo1_name;
desc = new_desc;
}
rdev = devm_regulator_register(&pdev->dev, desc, &config);
if (IS_ERR(rdev)) {
dev_err(&pdev->dev, "Failed to register %s\n",
regulators[i].name);
return PTR_ERR(rdev);
}
ret = of_property_read_u32(rdev->dev.of_node,
"x-powers,dcdc-workmode",
&workmode);
if (!ret) {
if (axp20x_set_dcdc_workmode(rdev, i, workmode))
dev_err(&pdev->dev, "Failed to set workmode on %s\n",
rdev->desc->name);
}
/*
* Save AXP22X DCDC1 / DCDC5 / AXP15060 ALDO1 regulator names for later.
*/
if ((regulators == axp22x_regulators && i == AXP22X_DCDC1) ||
(regulators == axp809_regulators && i == AXP809_DCDC1) ||
(regulators == axp15060_regulators && i == AXP15060_DCDC1))
of_property_read_string(rdev->dev.of_node,
"regulator-name",
&dcdc1_name);
if ((regulators == axp22x_regulators && i == AXP22X_DCDC5) ||
(regulators == axp809_regulators && i == AXP809_DCDC5) ||
(regulators == axp15060_regulators && i == AXP15060_DCDC5))
of_property_read_string(rdev->dev.of_node,
"regulator-name",
&dcdc5_name);
if (regulators == axp15060_regulators && i == AXP15060_ALDO1)
of_property_read_string(rdev->dev.of_node,
"regulator-name",
&aldo1_name);
}
if (drivevbus) {
/* Change N_VBUSEN sense pin to DRIVEVBUS output pin */
regmap_update_bits(axp20x->regmap, AXP20X_OVER_TMP,
AXP22X_MISC_N_VBUSEN_FUNC, 0);
rdev = devm_regulator_register(&pdev->dev,
&axp22x_drivevbus_regulator,
&config);
if (IS_ERR(rdev)) {
dev_err(&pdev->dev, "Failed to register drivevbus\n");
return PTR_ERR(rdev);
}
}
return 0;
}
static struct platform_driver axp20x_regulator_driver = {
.probe = axp20x_regulator_probe,
.driver = {
.name = "axp20x-regulator",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
};
module_platform_driver(axp20x_regulator_driver);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Carlo Caione <[email protected]>");
MODULE_DESCRIPTION("Regulator Driver for AXP20X PMIC");
MODULE_ALIAS("platform:axp20x-regulator");
| linux-master | drivers/regulator/axp20x-regulator.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/drivers/regulator/aat2870-regulator.c
*
* Copyright (c) 2011, NVIDIA Corporation.
* Author: Jin Park <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
#include <linux/mfd/aat2870.h>
struct aat2870_regulator {
struct aat2870_data *aat2870;
struct regulator_desc desc;
u8 enable_addr;
u8 enable_shift;
u8 enable_mask;
u8 voltage_addr;
u8 voltage_shift;
u8 voltage_mask;
};
static int aat2870_ldo_set_voltage_sel(struct regulator_dev *rdev,
unsigned selector)
{
struct aat2870_regulator *ri = rdev_get_drvdata(rdev);
struct aat2870_data *aat2870 = ri->aat2870;
return aat2870->update(aat2870, ri->voltage_addr, ri->voltage_mask,
selector << ri->voltage_shift);
}
static int aat2870_ldo_get_voltage_sel(struct regulator_dev *rdev)
{
struct aat2870_regulator *ri = rdev_get_drvdata(rdev);
struct aat2870_data *aat2870 = ri->aat2870;
u8 val;
int ret;
ret = aat2870->read(aat2870, ri->voltage_addr, &val);
if (ret)
return ret;
return (val & ri->voltage_mask) >> ri->voltage_shift;
}
static int aat2870_ldo_enable(struct regulator_dev *rdev)
{
struct aat2870_regulator *ri = rdev_get_drvdata(rdev);
struct aat2870_data *aat2870 = ri->aat2870;
return aat2870->update(aat2870, ri->enable_addr, ri->enable_mask,
ri->enable_mask);
}
static int aat2870_ldo_disable(struct regulator_dev *rdev)
{
struct aat2870_regulator *ri = rdev_get_drvdata(rdev);
struct aat2870_data *aat2870 = ri->aat2870;
return aat2870->update(aat2870, ri->enable_addr, ri->enable_mask, 0);
}
static int aat2870_ldo_is_enabled(struct regulator_dev *rdev)
{
struct aat2870_regulator *ri = rdev_get_drvdata(rdev);
struct aat2870_data *aat2870 = ri->aat2870;
u8 val;
int ret;
ret = aat2870->read(aat2870, ri->enable_addr, &val);
if (ret)
return ret;
return val & ri->enable_mask ? 1 : 0;
}
static const struct regulator_ops aat2870_ldo_ops = {
.list_voltage = regulator_list_voltage_table,
.map_voltage = regulator_map_voltage_ascend,
.set_voltage_sel = aat2870_ldo_set_voltage_sel,
.get_voltage_sel = aat2870_ldo_get_voltage_sel,
.enable = aat2870_ldo_enable,
.disable = aat2870_ldo_disable,
.is_enabled = aat2870_ldo_is_enabled,
};
static const unsigned int aat2870_ldo_voltages[] = {
1200000, 1300000, 1500000, 1600000,
1800000, 2000000, 2200000, 2500000,
2600000, 2700000, 2800000, 2900000,
3000000, 3100000, 3200000, 3300000,
};
#define AAT2870_LDO(ids) \
{ \
.desc = { \
.name = #ids, \
.id = AAT2870_ID_##ids, \
.n_voltages = ARRAY_SIZE(aat2870_ldo_voltages), \
.volt_table = aat2870_ldo_voltages, \
.ops = &aat2870_ldo_ops, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
}, \
}
static struct aat2870_regulator aat2870_regulators[] = {
AAT2870_LDO(LDOA),
AAT2870_LDO(LDOB),
AAT2870_LDO(LDOC),
AAT2870_LDO(LDOD),
};
static struct aat2870_regulator *aat2870_get_regulator(int id)
{
struct aat2870_regulator *ri = NULL;
int i;
for (i = 0; i < ARRAY_SIZE(aat2870_regulators); i++) {
ri = &aat2870_regulators[i];
if (ri->desc.id == id)
break;
}
if (i == ARRAY_SIZE(aat2870_regulators))
return NULL;
ri->enable_addr = AAT2870_LDO_EN;
ri->enable_shift = id - AAT2870_ID_LDOA;
ri->enable_mask = 0x1 << ri->enable_shift;
ri->voltage_addr = (id - AAT2870_ID_LDOA) / 2 ?
AAT2870_LDO_CD : AAT2870_LDO_AB;
ri->voltage_shift = (id - AAT2870_ID_LDOA) % 2 ? 0 : 4;
ri->voltage_mask = 0xF << ri->voltage_shift;
return ri;
}
static int aat2870_regulator_probe(struct platform_device *pdev)
{
struct aat2870_regulator *ri;
struct regulator_config config = { };
struct regulator_dev *rdev;
ri = aat2870_get_regulator(pdev->id);
if (!ri) {
dev_err(&pdev->dev, "Invalid device ID, %d\n", pdev->id);
return -EINVAL;
}
ri->aat2870 = dev_get_drvdata(pdev->dev.parent);
config.dev = &pdev->dev;
config.driver_data = ri;
config.init_data = dev_get_platdata(&pdev->dev);
rdev = devm_regulator_register(&pdev->dev, &ri->desc, &config);
if (IS_ERR(rdev)) {
dev_err(&pdev->dev, "Failed to register regulator %s\n",
ri->desc.name);
return PTR_ERR(rdev);
}
platform_set_drvdata(pdev, rdev);
return 0;
}
static struct platform_driver aat2870_regulator_driver = {
.driver = {
.name = "aat2870-regulator",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
.probe = aat2870_regulator_probe,
};
static int __init aat2870_regulator_init(void)
{
return platform_driver_register(&aat2870_regulator_driver);
}
subsys_initcall(aat2870_regulator_init);
static void __exit aat2870_regulator_exit(void)
{
platform_driver_unregister(&aat2870_regulator_driver);
}
module_exit(aat2870_regulator_exit);
MODULE_DESCRIPTION("AnalogicTech AAT2870 Regulator");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jin Park <[email protected]>");
MODULE_ALIAS("platform:aat2870-regulator");
| linux-master | drivers/regulator/aat2870-regulator.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Regulator driver for the Richtek RT5033
*
* Copyright (C) 2014 Samsung Electronics, Co., Ltd.
* Author: Beomho Seo <[email protected]>
*/
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
#include <linux/mfd/rt5033.h>
#include <linux/mfd/rt5033-private.h>
#include <linux/regulator/of_regulator.h>
static const struct linear_range rt5033_buck_ranges[] = {
REGULATOR_LINEAR_RANGE(1000000, 0, 20, 100000),
REGULATOR_LINEAR_RANGE(3000000, 21, 31, 0),
};
static const struct linear_range rt5033_ldo_ranges[] = {
REGULATOR_LINEAR_RANGE(1200000, 0, 18, 100000),
REGULATOR_LINEAR_RANGE(3000000, 19, 31, 0),
};
static const struct regulator_ops rt5033_safe_ldo_ops = {
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.list_voltage = regulator_list_voltage_linear,
};
static const struct regulator_ops rt5033_buck_ops = {
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.list_voltage = regulator_list_voltage_linear_range,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
};
static const struct regulator_desc rt5033_supported_regulators[] = {
[RT5033_BUCK] = {
.name = "BUCK",
.of_match = of_match_ptr("BUCK"),
.regulators_node = of_match_ptr("regulators"),
.id = RT5033_BUCK,
.ops = &rt5033_buck_ops,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
.n_voltages = RT5033_REGULATOR_BUCK_VOLTAGE_STEP_NUM,
.linear_ranges = rt5033_buck_ranges,
.n_linear_ranges = ARRAY_SIZE(rt5033_buck_ranges),
.enable_reg = RT5033_REG_CTRL,
.enable_mask = RT5033_CTRL_EN_BUCK_MASK,
.vsel_reg = RT5033_REG_BUCK_CTRL,
.vsel_mask = RT5033_BUCK_CTRL_MASK,
},
[RT5033_LDO] = {
.name = "LDO",
.of_match = of_match_ptr("LDO"),
.regulators_node = of_match_ptr("regulators"),
.id = RT5033_LDO,
.ops = &rt5033_buck_ops,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
.n_voltages = RT5033_REGULATOR_LDO_VOLTAGE_STEP_NUM,
.linear_ranges = rt5033_ldo_ranges,
.n_linear_ranges = ARRAY_SIZE(rt5033_ldo_ranges),
.enable_reg = RT5033_REG_CTRL,
.enable_mask = RT5033_CTRL_EN_LDO_MASK,
.vsel_reg = RT5033_REG_LDO_CTRL,
.vsel_mask = RT5033_LDO_CTRL_MASK,
},
[RT5033_SAFE_LDO] = {
.name = "SAFE_LDO",
.of_match = of_match_ptr("SAFE_LDO"),
.regulators_node = of_match_ptr("regulators"),
.id = RT5033_SAFE_LDO,
.ops = &rt5033_safe_ldo_ops,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
.n_voltages = 1,
.min_uV = RT5033_REGULATOR_SAFE_LDO_VOLTAGE,
.enable_reg = RT5033_REG_CTRL,
.enable_mask = RT5033_CTRL_EN_SAFE_LDO_MASK,
},
};
static int rt5033_regulator_probe(struct platform_device *pdev)
{
struct rt5033_dev *rt5033 = dev_get_drvdata(pdev->dev.parent);
int ret, i;
struct regulator_config config = {};
config.dev = rt5033->dev;
config.driver_data = rt5033;
for (i = 0; i < ARRAY_SIZE(rt5033_supported_regulators); i++) {
struct regulator_dev *regulator;
config.regmap = rt5033->regmap;
regulator = devm_regulator_register(&pdev->dev,
&rt5033_supported_regulators[i], &config);
if (IS_ERR(regulator)) {
ret = PTR_ERR(regulator);
dev_err(&pdev->dev,
"Regulator init failed %d: with error: %d\n",
i, ret);
return ret;
}
}
return 0;
}
static const struct platform_device_id rt5033_regulator_id[] = {
{ "rt5033-regulator", },
{ }
};
MODULE_DEVICE_TABLE(platform, rt5033_regulator_id);
static struct platform_driver rt5033_regulator_driver = {
.driver = {
.name = "rt5033-regulator",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
.probe = rt5033_regulator_probe,
.id_table = rt5033_regulator_id,
};
module_platform_driver(rt5033_regulator_driver);
MODULE_DESCRIPTION("Richtek RT5033 Regulator driver");
MODULE_AUTHOR("Beomho Seo <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/regulator/rt5033-regulator.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/mfd/sm5703.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/of_regulator.h>
enum sm5703_regulators {
SM5703_BUCK,
SM5703_LDO1,
SM5703_LDO2,
SM5703_LDO3,
SM5703_USBLDO1,
SM5703_USBLDO2,
SM5703_VBUS,
SM5703_MAX_REGULATORS,
};
static const int sm5703_ldo_voltagemap[] = {
1500000, 1800000, 2600000, 2800000, 3000000, 3300000,
};
static const int sm5703_buck_voltagemap[] = {
1000000, 1000000, 1000000, 1000000,
1000000, 1000000, 1000000, 1000000,
1000000, 1000000, 1000000, 1100000,
1200000, 1300000, 1400000, 1500000,
1600000, 1700000, 1800000, 1900000,
2000000, 2100000, 2200000, 2300000,
2400000, 2500000, 2600000, 2700000,
2800000, 2900000, 3000000, 3000000,
};
#define SM5703USBLDO(_name, _id) \
[SM5703_USBLDO ## _id] = { \
.name = _name, \
.of_match = _name, \
.regulators_node = "regulators", \
.type = REGULATOR_VOLTAGE, \
.id = SM5703_USBLDO ## _id, \
.ops = &sm5703_regulator_ops_fixed, \
.n_voltages = 1, \
.fixed_uV = SM5703_USBLDO_MICROVOLT, \
.enable_reg = SM5703_REG_USBLDO12, \
.enable_mask = SM5703_REG_EN_USBLDO ##_id, \
.owner = THIS_MODULE, \
}
#define SM5703VBUS(_name) \
[SM5703_VBUS] = { \
.name = _name, \
.of_match = _name, \
.regulators_node = "regulators", \
.type = REGULATOR_VOLTAGE, \
.id = SM5703_VBUS, \
.ops = &sm5703_regulator_ops_fixed, \
.n_voltages = 1, \
.fixed_uV = SM5703_VBUS_MICROVOLT, \
.enable_reg = SM5703_REG_CNTL, \
.enable_mask = SM5703_OPERATION_MODE_MASK, \
.enable_val = SM5703_OPERATION_MODE_USB_OTG_MODE, \
.disable_val = SM5703_OPERATION_MODE_CHARGING_ON, \
.owner = THIS_MODULE, \
}
#define SM5703BUCK(_name) \
[SM5703_BUCK] = { \
.name = _name, \
.of_match = _name, \
.regulators_node = "regulators", \
.type = REGULATOR_VOLTAGE, \
.id = SM5703_BUCK, \
.ops = &sm5703_regulator_ops, \
.n_voltages = ARRAY_SIZE(sm5703_buck_voltagemap), \
.volt_table = sm5703_buck_voltagemap, \
.vsel_reg = SM5703_REG_BUCK, \
.vsel_mask = SM5703_BUCK_VOLT_MASK, \
.enable_reg = SM5703_REG_BUCK, \
.enable_mask = SM5703_REG_EN_BUCK, \
.owner = THIS_MODULE, \
}
#define SM5703LDO(_name, _id) \
[SM5703_LDO ## _id] = { \
.name = _name, \
.of_match = _name, \
.regulators_node = "regulators", \
.type = REGULATOR_VOLTAGE, \
.id = SM5703_LDO ## _id, \
.ops = &sm5703_regulator_ops, \
.n_voltages = ARRAY_SIZE(sm5703_ldo_voltagemap), \
.volt_table = sm5703_ldo_voltagemap, \
.vsel_reg = SM5703_REG_LDO ##_id, \
.vsel_mask = SM5703_LDO_VOLT_MASK, \
.enable_reg = SM5703_REG_LDO ##_id, \
.enable_mask = SM5703_LDO_EN, \
.owner = THIS_MODULE, \
}
static const struct regulator_ops sm5703_regulator_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
.list_voltage = regulator_list_voltage_table,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
};
static const struct regulator_ops sm5703_regulator_ops_fixed = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
};
static struct regulator_desc sm5703_regulators_desc[SM5703_MAX_REGULATORS] = {
SM5703BUCK("buck"),
SM5703LDO("ldo1", 1),
SM5703LDO("ldo2", 2),
SM5703LDO("ldo3", 3),
SM5703USBLDO("usbldo1", 1),
SM5703USBLDO("usbldo2", 2),
SM5703VBUS("vbus"),
};
static int sm5703_regulator_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct regulator_config config = { NULL, };
struct regulator_dev *rdev;
struct sm5703_dev *sm5703 = dev_get_drvdata(pdev->dev.parent);
int i;
config.dev = dev->parent;
config.regmap = sm5703->regmap;
for (i = 0; i < SM5703_MAX_REGULATORS; i++) {
rdev = devm_regulator_register(dev,
&sm5703_regulators_desc[i],
&config);
if (IS_ERR(rdev))
return dev_err_probe(dev, PTR_ERR(rdev),
"Failed to register a regulator\n");
}
return 0;
}
static const struct platform_device_id sm5703_regulator_id[] = {
{ "sm5703-regulator", 0 },
{}
};
MODULE_DEVICE_TABLE(platform, sm5703_regulator_id);
static struct platform_driver sm5703_regulator_driver = {
.driver = {
.name = "sm5703-regulator",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
.probe = sm5703_regulator_probe,
.id_table = sm5703_regulator_id,
};
module_platform_driver(sm5703_regulator_driver);
MODULE_DESCRIPTION("Silicon Mitus SM5703 LDO/Buck/USB regulator driver");
MODULE_AUTHOR("Markuss Broks <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/regulator/sm5703-regulator.c |
// SPDX-License-Identifier: GPL-2.0
//
// sky81452-regulator.c SKY81452 regulator driver
//
// Copyright 2014 Skyworks Solutions Inc.
// Author : Gyungoh Yoo <[email protected]>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/init.h>
#include <linux/err.h>
#include <linux/of.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/of_regulator.h>
/* registers */
#define SKY81452_REG1 0x01
#define SKY81452_REG3 0x03
/* bit mask */
#define SKY81452_LEN 0x40
#define SKY81452_LOUT 0x1F
static const struct regulator_ops sky81452_reg_ops = {
.list_voltage = regulator_list_voltage_linear_range,
.map_voltage = regulator_map_voltage_linear_range,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
};
static const struct linear_range sky81452_reg_ranges[] = {
REGULATOR_LINEAR_RANGE(4500000, 0, 14, 250000),
REGULATOR_LINEAR_RANGE(9000000, 15, 31, 1000000),
};
static const struct regulator_desc sky81452_reg = {
.name = "LOUT",
.of_match = of_match_ptr("lout"),
.regulators_node = of_match_ptr("regulator"),
.ops = &sky81452_reg_ops,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
.n_voltages = SKY81452_LOUT + 1,
.linear_ranges = sky81452_reg_ranges,
.n_linear_ranges = ARRAY_SIZE(sky81452_reg_ranges),
.vsel_reg = SKY81452_REG3,
.vsel_mask = SKY81452_LOUT,
.enable_reg = SKY81452_REG1,
.enable_mask = SKY81452_LEN,
};
static int sky81452_reg_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
const struct regulator_init_data *init_data = dev_get_platdata(dev);
struct regulator_config config = { };
struct regulator_dev *rdev;
config.dev = dev->parent;
config.init_data = init_data;
config.of_node = dev->of_node;
config.regmap = dev_get_drvdata(dev->parent);
rdev = devm_regulator_register(dev, &sky81452_reg, &config);
if (IS_ERR(rdev)) {
dev_err(dev, "failed to register. err=%ld\n", PTR_ERR(rdev));
return PTR_ERR(rdev);
}
platform_set_drvdata(pdev, rdev);
return 0;
}
static struct platform_driver sky81452_reg_driver = {
.driver = {
.name = "sky81452-regulator",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
.probe = sky81452_reg_probe,
};
module_platform_driver(sky81452_reg_driver);
MODULE_DESCRIPTION("Skyworks SKY81452 Regulator driver");
MODULE_AUTHOR("Gyungoh Yoo <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/regulator/sky81452-regulator.c |
// SPDX-License-Identifier: GPL-2.0
//
// Copyright (C) 2021 ROHM Semiconductors
// regulator IRQ based event notification helpers
//
// Logic has been partially adapted from qcom-labibb driver.
//
// Author: Matti Vaittinen <[email protected]>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/reboot.h>
#include <linux/regmap.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/regulator/driver.h>
#include "internal.h"
#define REGULATOR_FORCED_SAFETY_SHUTDOWN_WAIT_MS 10000
struct regulator_irq {
struct regulator_irq_data rdata;
struct regulator_irq_desc desc;
int irq;
int retry_cnt;
struct delayed_work isr_work;
};
/*
* Should only be called from threaded handler to prevent potential deadlock
*/
static void rdev_flag_err(struct regulator_dev *rdev, int err)
{
spin_lock(&rdev->err_lock);
rdev->cached_err |= err;
spin_unlock(&rdev->err_lock);
}
static void rdev_clear_err(struct regulator_dev *rdev, int err)
{
spin_lock(&rdev->err_lock);
rdev->cached_err &= ~err;
spin_unlock(&rdev->err_lock);
}
static void regulator_notifier_isr_work(struct work_struct *work)
{
struct regulator_irq *h;
struct regulator_irq_desc *d;
struct regulator_irq_data *rid;
int ret = 0;
int tmo, i;
int num_rdevs;
h = container_of(work, struct regulator_irq,
isr_work.work);
d = &h->desc;
rid = &h->rdata;
num_rdevs = rid->num_states;
reread:
if (d->fatal_cnt && h->retry_cnt > d->fatal_cnt) {
if (!d->die)
return hw_protection_shutdown("Regulator HW failure? - no IC recovery",
REGULATOR_FORCED_SAFETY_SHUTDOWN_WAIT_MS);
ret = d->die(rid);
/*
* If the 'last resort' IC recovery failed we will have
* nothing else left to do...
*/
if (ret)
return hw_protection_shutdown("Regulator HW failure. IC recovery failed",
REGULATOR_FORCED_SAFETY_SHUTDOWN_WAIT_MS);
/*
* If h->die() was implemented we assume recovery has been
* attempted (probably regulator was shut down) and we
* just enable IRQ and bail-out.
*/
goto enable_out;
}
if (d->renable) {
ret = d->renable(rid);
if (ret == REGULATOR_FAILED_RETRY) {
/* Driver could not get current status */
h->retry_cnt++;
if (!d->reread_ms)
goto reread;
tmo = d->reread_ms;
goto reschedule;
}
if (ret) {
/*
* IC status reading succeeded. update error info
* just in case the renable changed it.
*/
for (i = 0; i < num_rdevs; i++) {
struct regulator_err_state *stat;
struct regulator_dev *rdev;
stat = &rid->states[i];
rdev = stat->rdev;
rdev_clear_err(rdev, (~stat->errors) &
stat->possible_errs);
}
h->retry_cnt++;
/*
* The IC indicated problem is still ON - no point in
* re-enabling the IRQ. Retry later.
*/
tmo = d->irq_off_ms;
goto reschedule;
}
}
/*
* Either IC reported problem cleared or no status checker was provided.
* If problems are gone - good. If not - then the IRQ will fire again
* and we'll have a new nice loop. In any case we should clear error
* flags here and re-enable IRQs.
*/
for (i = 0; i < num_rdevs; i++) {
struct regulator_err_state *stat;
struct regulator_dev *rdev;
stat = &rid->states[i];
rdev = stat->rdev;
rdev_clear_err(rdev, stat->possible_errs);
}
/*
* Things have been seemingly successful => zero retry-counter.
*/
h->retry_cnt = 0;
enable_out:
enable_irq(h->irq);
return;
reschedule:
if (!d->high_prio)
mod_delayed_work(system_wq, &h->isr_work,
msecs_to_jiffies(tmo));
else
mod_delayed_work(system_highpri_wq, &h->isr_work,
msecs_to_jiffies(tmo));
}
static irqreturn_t regulator_notifier_isr(int irq, void *data)
{
struct regulator_irq *h = data;
struct regulator_irq_desc *d;
struct regulator_irq_data *rid;
unsigned long rdev_map = 0;
int num_rdevs;
int ret, i;
d = &h->desc;
rid = &h->rdata;
num_rdevs = rid->num_states;
if (d->fatal_cnt)
h->retry_cnt++;
/*
* we spare a few cycles by not clearing statuses prior to this call.
* The IC driver must initialize the status buffers for rdevs
* which it indicates having active events via rdev_map.
*
* Maybe we should just to be on a safer side(?)
*/
ret = d->map_event(irq, rid, &rdev_map);
/*
* If status reading fails (which is unlikely) we don't ack/disable
* IRQ but just increase fail count and retry when IRQ fires again.
* If retry_count exceeds the given safety limit we call IC specific die
* handler which can try disabling regulator(s).
*
* If no die handler is given we will just power-off as a last resort.
*
* We could try disabling all associated rdevs - but we might shoot
* ourselves in the head and leave the problematic regulator enabled. So
* if IC has no die-handler populated we just assume the regulator
* can't be disabled.
*/
if (unlikely(ret == REGULATOR_FAILED_RETRY))
goto fail_out;
h->retry_cnt = 0;
/*
* Let's not disable IRQ if there were no status bits for us. We'd
* better leave spurious IRQ handling to genirq
*/
if (ret || !rdev_map)
return IRQ_NONE;
/*
* Some events are bogus if the regulator is disabled. Skip such events
* if all relevant regulators are disabled
*/
if (d->skip_off) {
for_each_set_bit(i, &rdev_map, num_rdevs) {
struct regulator_dev *rdev;
const struct regulator_ops *ops;
rdev = rid->states[i].rdev;
ops = rdev->desc->ops;
/*
* If any of the flagged regulators is enabled we do
* handle this
*/
if (ops->is_enabled(rdev))
break;
}
if (i == num_rdevs)
return IRQ_NONE;
}
/* Disable IRQ if HW keeps line asserted */
if (d->irq_off_ms)
disable_irq_nosync(irq);
/*
* IRQ seems to be for us. Let's fire correct notifiers / store error
* flags
*/
for_each_set_bit(i, &rdev_map, num_rdevs) {
struct regulator_err_state *stat;
struct regulator_dev *rdev;
stat = &rid->states[i];
rdev = stat->rdev;
rdev_dbg(rdev, "Sending regulator notification EVT 0x%lx\n",
stat->notifs);
regulator_notifier_call_chain(rdev, stat->notifs, NULL);
rdev_flag_err(rdev, stat->errors);
}
if (d->irq_off_ms) {
if (!d->high_prio)
schedule_delayed_work(&h->isr_work,
msecs_to_jiffies(d->irq_off_ms));
else
mod_delayed_work(system_highpri_wq,
&h->isr_work,
msecs_to_jiffies(d->irq_off_ms));
}
return IRQ_HANDLED;
fail_out:
if (d->fatal_cnt && h->retry_cnt > d->fatal_cnt) {
/* If we have no recovery, just try shut down straight away */
if (!d->die) {
hw_protection_shutdown("Regulator failure. Retry count exceeded",
REGULATOR_FORCED_SAFETY_SHUTDOWN_WAIT_MS);
} else {
ret = d->die(rid);
/* If die() failed shut down as a last attempt to save the HW */
if (ret)
hw_protection_shutdown("Regulator failure. Recovery failed",
REGULATOR_FORCED_SAFETY_SHUTDOWN_WAIT_MS);
}
}
return IRQ_NONE;
}
static int init_rdev_state(struct device *dev, struct regulator_irq *h,
struct regulator_dev **rdev, int common_err,
int *rdev_err, int rdev_amount)
{
int i;
h->rdata.states = devm_kzalloc(dev, sizeof(*h->rdata.states) *
rdev_amount, GFP_KERNEL);
if (!h->rdata.states)
return -ENOMEM;
h->rdata.num_states = rdev_amount;
h->rdata.data = h->desc.data;
for (i = 0; i < rdev_amount; i++) {
h->rdata.states[i].possible_errs = common_err;
if (rdev_err)
h->rdata.states[i].possible_errs |= *rdev_err++;
h->rdata.states[i].rdev = *rdev++;
}
return 0;
}
static void init_rdev_errors(struct regulator_irq *h)
{
int i;
for (i = 0; i < h->rdata.num_states; i++)
if (h->rdata.states[i].possible_errs)
h->rdata.states[i].rdev->use_cached_err = true;
}
/**
* regulator_irq_helper - register IRQ based regulator event/error notifier
*
* @dev: device providing the IRQs
* @d: IRQ helper descriptor.
* @irq: IRQ used to inform events/errors to be notified.
* @irq_flags: Extra IRQ flags to be OR'ed with the default
* IRQF_ONESHOT when requesting the (threaded) irq.
* @common_errs: Errors which can be flagged by this IRQ for all rdevs.
* When IRQ is re-enabled these errors will be cleared
* from all associated regulators. Use this instead of the
* per_rdev_errs if you use
* regulator_irq_map_event_simple() for event mapping.
* @per_rdev_errs: Optional error flag array describing errors specific
* for only some of the regulators. These errors will be
* or'ed with common errors. If this is given the array
* should contain rdev_amount flags. Can be set to NULL
* if there is no regulator specific error flags for this
* IRQ.
* @rdev: Array of pointers to regulators associated with this
* IRQ.
* @rdev_amount: Amount of regulators associated with this IRQ.
*
* Return: handle to irq_helper or an ERR_PTR() encoded error code.
*/
void *regulator_irq_helper(struct device *dev,
const struct regulator_irq_desc *d, int irq,
int irq_flags, int common_errs, int *per_rdev_errs,
struct regulator_dev **rdev, int rdev_amount)
{
struct regulator_irq *h;
int ret;
if (!rdev_amount || !d || !d->map_event || !d->name)
return ERR_PTR(-EINVAL);
h = devm_kzalloc(dev, sizeof(*h), GFP_KERNEL);
if (!h)
return ERR_PTR(-ENOMEM);
h->irq = irq;
h->desc = *d;
ret = init_rdev_state(dev, h, rdev, common_errs, per_rdev_errs,
rdev_amount);
if (ret)
return ERR_PTR(ret);
init_rdev_errors(h);
if (h->desc.irq_off_ms)
INIT_DELAYED_WORK(&h->isr_work, regulator_notifier_isr_work);
ret = request_threaded_irq(h->irq, NULL, regulator_notifier_isr,
IRQF_ONESHOT | irq_flags, h->desc.name, h);
if (ret) {
dev_err(dev, "Failed to request IRQ %d\n", irq);
return ERR_PTR(ret);
}
return h;
}
EXPORT_SYMBOL_GPL(regulator_irq_helper);
/**
* regulator_irq_helper_cancel - drop IRQ based regulator event/error notifier
*
* @handle: Pointer to handle returned by a successful call to
* regulator_irq_helper(). Will be NULLed upon return.
*
* The associated IRQ is released and work is cancelled when the function
* returns.
*/
void regulator_irq_helper_cancel(void **handle)
{
if (handle && *handle) {
struct regulator_irq *h = *handle;
free_irq(h->irq, h);
if (h->desc.irq_off_ms)
cancel_delayed_work_sync(&h->isr_work);
h = NULL;
}
}
EXPORT_SYMBOL_GPL(regulator_irq_helper_cancel);
/**
* regulator_irq_map_event_simple - regulator IRQ notification for trivial IRQs
*
* @irq: Number of IRQ that occurred
* @rid: Information about the event IRQ indicates
* @dev_mask: mask indicating the regulator originating the IRQ
*
* Regulators whose IRQ has single, well defined purpose (always indicate
* exactly one event, and are relevant to exactly one regulator device) can
* use this function as their map_event callbac for their regulator IRQ
* notification helperk. Exactly one rdev and exactly one error (in
* "common_errs"-field) can be given at IRQ helper registration for
* regulator_irq_map_event_simple() to be viable.
*/
int regulator_irq_map_event_simple(int irq, struct regulator_irq_data *rid,
unsigned long *dev_mask)
{
int err = rid->states[0].possible_errs;
*dev_mask = 1;
/*
* This helper should only be used in a situation where the IRQ
* can indicate only one type of problem for one specific rdev.
* Something fishy is going on if we are having multiple rdevs or ERROR
* flags here.
*/
if (WARN_ON(rid->num_states != 1 || hweight32(err) != 1))
return 0;
rid->states[0].errors = err;
rid->states[0].notifs = regulator_err2notif(err);
return 0;
}
EXPORT_SYMBOL_GPL(regulator_irq_map_event_simple);
| linux-master | drivers/regulator/irq_helpers.c |
// SPDX-License-Identifier: GPL-2.0+
//
// mp5416.c - regulator driver for mps mp5416
//
// Copyright 2020 Monolithic Power Systems, Inc
//
// Author: Saravanan Sekar <[email protected]>
#include <linux/err.h>
#include <linux/i2c.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/regulator/driver.h>
#define MP5416_REG_CTL0 0x00
#define MP5416_REG_CTL1 0x01
#define MP5416_REG_CTL2 0x02
#define MP5416_REG_ILIM 0x03
#define MP5416_REG_BUCK1 0x04
#define MP5416_REG_BUCK2 0x05
#define MP5416_REG_BUCK3 0x06
#define MP5416_REG_BUCK4 0x07
#define MP5416_REG_LDO1 0x08
#define MP5416_REG_LDO2 0x09
#define MP5416_REG_LDO3 0x0a
#define MP5416_REG_LDO4 0x0b
#define MP5416_REGULATOR_EN BIT(7)
#define MP5416_MASK_VSET 0x7f
#define MP5416_MASK_BUCK1_ILIM 0xc0
#define MP5416_MASK_BUCK2_ILIM 0x0c
#define MP5416_MASK_BUCK3_ILIM 0x30
#define MP5416_MASK_BUCK4_ILIM 0x03
#define MP5416_MASK_DVS_SLEWRATE 0xc0
/* values in uV */
#define MP5416_VOLT1_MIN 600000
#define MP5416_VOLT1_MAX 2187500
#define MP5416_VOLT1_STEP 12500
#define MP5416_VOLT2_MIN 800000
#define MP5416_VOLT2_MAX 3975000
#define MP5416_VOLT2_STEP 25000
#define MP5416_VOLT1_RANGE \
((MP5416_VOLT1_MAX - MP5416_VOLT1_MIN)/MP5416_VOLT1_STEP + 1)
#define MP5416_VOLT2_RANGE \
((MP5416_VOLT2_MAX - MP5416_VOLT2_MIN)/MP5416_VOLT2_STEP + 1)
#define MP5416BUCK(_name, _id, _ilim, _dreg, _dval, _vsel) \
[MP5416_BUCK ## _id] = { \
.id = MP5416_BUCK ## _id, \
.name = _name, \
.of_match = _name, \
.regulators_node = "regulators", \
.ops = &mp5416_buck_ops, \
.min_uV = MP5416_VOLT ##_vsel## _MIN, \
.uV_step = MP5416_VOLT ##_vsel## _STEP, \
.n_voltages = MP5416_VOLT ##_vsel## _RANGE, \
.curr_table = _ilim, \
.n_current_limits = ARRAY_SIZE(_ilim), \
.csel_reg = MP5416_REG_ILIM, \
.csel_mask = MP5416_MASK_BUCK ## _id ##_ILIM, \
.vsel_reg = MP5416_REG_BUCK ## _id, \
.vsel_mask = MP5416_MASK_VSET, \
.enable_reg = MP5416_REG_BUCK ## _id, \
.enable_mask = MP5416_REGULATOR_EN, \
.ramp_reg = MP5416_REG_CTL2, \
.ramp_mask = MP5416_MASK_DVS_SLEWRATE, \
.ramp_delay_table = mp5416_buck_ramp_table, \
.n_ramp_values = ARRAY_SIZE(mp5416_buck_ramp_table), \
.active_discharge_on = _dval, \
.active_discharge_reg = _dreg, \
.active_discharge_mask = _dval, \
.owner = THIS_MODULE, \
}
#define MP5416LDO(_name, _id, _dval) \
[MP5416_LDO ## _id] = { \
.id = MP5416_LDO ## _id, \
.name = _name, \
.of_match = _name, \
.regulators_node = "regulators", \
.ops = &mp5416_ldo_ops, \
.min_uV = MP5416_VOLT2_MIN, \
.uV_step = MP5416_VOLT2_STEP, \
.n_voltages = MP5416_VOLT2_RANGE, \
.vsel_reg = MP5416_REG_LDO ##_id, \
.vsel_mask = MP5416_MASK_VSET, \
.enable_reg = MP5416_REG_LDO ##_id, \
.enable_mask = MP5416_REGULATOR_EN, \
.active_discharge_on = _dval, \
.active_discharge_reg = MP5416_REG_CTL2, \
.active_discharge_mask = _dval, \
.owner = THIS_MODULE, \
}
enum mp5416_regulators {
MP5416_BUCK1,
MP5416_BUCK2,
MP5416_BUCK3,
MP5416_BUCK4,
MP5416_LDO1,
MP5416_LDO2,
MP5416_LDO3,
MP5416_LDO4,
MP5416_MAX_REGULATORS,
};
static const struct regmap_config mp5416_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.max_register = 0x0d,
};
/* Current limits array (in uA)
* ILIM1 & ILIM3
*/
static const unsigned int mp5416_I_limits1[] = {
3800000, 4600000, 5600000, 6800000
};
/* ILIM2 & ILIM4 */
static const unsigned int mp5416_I_limits2[] = {
2200000, 3200000, 4200000, 5200000
};
/*
* DVS ramp rate BUCK1 to BUCK4
* 00: 32mV/us
* 01: 16mV/us
* 10: 8mV/us
* 11: 4mV/us
*/
static const unsigned int mp5416_buck_ramp_table[] = {
32000, 16000, 8000, 4000
};
static const struct regulator_ops mp5416_ldo_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
.list_voltage = regulator_list_voltage_linear,
.map_voltage = regulator_map_voltage_linear,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.set_active_discharge = regulator_set_active_discharge_regmap,
};
static const struct regulator_ops mp5416_buck_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
.list_voltage = regulator_list_voltage_linear,
.map_voltage = regulator_map_voltage_linear,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.set_active_discharge = regulator_set_active_discharge_regmap,
.get_current_limit = regulator_get_current_limit_regmap,
.set_current_limit = regulator_set_current_limit_regmap,
.set_ramp_delay = regulator_set_ramp_delay_regmap,
};
static struct regulator_desc mp5416_regulators_desc[MP5416_MAX_REGULATORS] = {
MP5416BUCK("buck1", 1, mp5416_I_limits1, MP5416_REG_CTL1, BIT(0), 1),
MP5416BUCK("buck2", 2, mp5416_I_limits2, MP5416_REG_CTL1, BIT(1), 2),
MP5416BUCK("buck3", 3, mp5416_I_limits1, MP5416_REG_CTL1, BIT(2), 1),
MP5416BUCK("buck4", 4, mp5416_I_limits2, MP5416_REG_CTL2, BIT(5), 2),
MP5416LDO("ldo1", 1, BIT(4)),
MP5416LDO("ldo2", 2, BIT(3)),
MP5416LDO("ldo3", 3, BIT(2)),
MP5416LDO("ldo4", 4, BIT(1)),
};
static struct regulator_desc mp5496_regulators_desc[MP5416_MAX_REGULATORS] = {
MP5416BUCK("buck1", 1, mp5416_I_limits1, MP5416_REG_CTL1, BIT(0), 1),
MP5416BUCK("buck2", 2, mp5416_I_limits2, MP5416_REG_CTL1, BIT(1), 1),
MP5416BUCK("buck3", 3, mp5416_I_limits1, MP5416_REG_CTL1, BIT(2), 1),
MP5416BUCK("buck4", 4, mp5416_I_limits2, MP5416_REG_CTL2, BIT(5), 1),
MP5416LDO("ldo1", 1, BIT(4)),
MP5416LDO("ldo2", 2, BIT(3)),
MP5416LDO("ldo3", 3, BIT(2)),
MP5416LDO("ldo4", 4, BIT(1)),
};
static int mp5416_i2c_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct regulator_config config = { NULL, };
static const struct regulator_desc *desc;
struct regulator_dev *rdev;
struct regmap *regmap;
int i;
regmap = devm_regmap_init_i2c(client, &mp5416_regmap_config);
if (IS_ERR(regmap)) {
dev_err(dev, "Failed to allocate regmap!\n");
return PTR_ERR(regmap);
}
desc = of_device_get_match_data(dev);
if (!desc)
return -ENODEV;
config.dev = dev;
config.regmap = regmap;
for (i = 0; i < MP5416_MAX_REGULATORS; i++) {
rdev = devm_regulator_register(dev,
&desc[i],
&config);
if (IS_ERR(rdev)) {
dev_err(dev, "Failed to register regulator!\n");
return PTR_ERR(rdev);
}
}
return 0;
}
static const struct of_device_id mp5416_of_match[] = {
{ .compatible = "mps,mp5416", .data = &mp5416_regulators_desc },
{ .compatible = "mps,mp5496", .data = &mp5496_regulators_desc },
{},
};
MODULE_DEVICE_TABLE(of, mp5416_of_match);
static const struct i2c_device_id mp5416_id[] = {
{ "mp5416", },
{ "mp5496", },
{ },
};
MODULE_DEVICE_TABLE(i2c, mp5416_id);
static struct i2c_driver mp5416_regulator_driver = {
.driver = {
.name = "mp5416",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = of_match_ptr(mp5416_of_match),
},
.probe = mp5416_i2c_probe,
.id_table = mp5416_id,
};
module_i2c_driver(mp5416_regulator_driver);
MODULE_AUTHOR("Saravanan Sekar <[email protected]>");
MODULE_DESCRIPTION("MP5416 PMIC regulator driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/regulator/mp5416.c |
// SPDX-License-Identifier: GPL-2.0+
//
// max77802.c - Regulator driver for the Maxim 77802
//
// Copyright (C) 2013-2014 Google, Inc
// Simon Glass <[email protected]>
//
// Copyright (C) 2012 Samsung Electronics
// Chiwoong Byun <[email protected]>
// Jonghwa Lee <[email protected]>
//
// This driver is based on max8997.c
#include <linux/kernel.h>
#include <linux/bug.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
#include <linux/regulator/of_regulator.h>
#include <linux/mfd/max77686.h>
#include <linux/mfd/max77686-private.h>
#include <dt-bindings/regulator/maxim,max77802.h>
/* Default ramp delay in case it is not manually set */
#define MAX77802_RAMP_DELAY 100000 /* uV/us */
#define MAX77802_OPMODE_SHIFT_LDO 6
#define MAX77802_OPMODE_BUCK234_SHIFT 4
#define MAX77802_OPMODE_MASK 0x3
#define MAX77802_VSEL_MASK 0x3F
#define MAX77802_DVS_VSEL_MASK 0xFF
#define MAX77802_RAMP_RATE_MASK_2BIT 0xC0
#define MAX77802_RAMP_RATE_SHIFT_2BIT 6
#define MAX77802_RAMP_RATE_MASK_4BIT 0xF0
#define MAX77802_RAMP_RATE_SHIFT_4BIT 4
#define MAX77802_STATUS_OFF 0x0
#define MAX77802_OFF_PWRREQ 0x1
#define MAX77802_LP_PWRREQ 0x2
static const unsigned int max77802_buck234_ramp_table[] = {
12500,
25000,
50000,
100000,
};
static const unsigned int max77802_buck16_ramp_table[] = {
1000, 2000, 3030, 4000,
5000, 5880, 7140, 8330,
9090, 10000, 11110, 12500,
16670, 25000, 50000, 100000,
};
struct max77802_regulator_prv {
/* Array indexed by regulator id */
unsigned int opmode[MAX77802_REG_MAX];
};
static inline unsigned int max77802_map_mode(unsigned int mode)
{
return mode == MAX77802_OPMODE_NORMAL ?
REGULATOR_MODE_NORMAL : REGULATOR_MODE_STANDBY;
}
static int max77802_get_opmode_shift(int id)
{
if (id == MAX77802_BUCK1 || (id >= MAX77802_BUCK5 &&
id <= MAX77802_BUCK10))
return 0;
if (id >= MAX77802_BUCK2 && id <= MAX77802_BUCK4)
return MAX77802_OPMODE_BUCK234_SHIFT;
if (id >= MAX77802_LDO1 && id <= MAX77802_LDO35)
return MAX77802_OPMODE_SHIFT_LDO;
return -EINVAL;
}
/**
* max77802_set_suspend_disable - Disable the regulator during system suspend
* @rdev: regulator to mark as disabled
*
* All regulators expect LDO 1, 3, 20 and 21 support OFF by PWRREQ.
* Configure the regulator so the PMIC will turn it OFF during system suspend.
*/
static int max77802_set_suspend_disable(struct regulator_dev *rdev)
{
unsigned int val = MAX77802_OFF_PWRREQ;
struct max77802_regulator_prv *max77802 = rdev_get_drvdata(rdev);
unsigned int id = rdev_get_id(rdev);
int shift = max77802_get_opmode_shift(id);
if (WARN_ON_ONCE(id >= ARRAY_SIZE(max77802->opmode)))
return -EINVAL;
max77802->opmode[id] = val;
return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
rdev->desc->enable_mask, val << shift);
}
/*
* Some LDOs support Low Power Mode while the system is running.
*
* LDOs 1, 3, 20, 21.
*/
static int max77802_set_mode(struct regulator_dev *rdev, unsigned int mode)
{
struct max77802_regulator_prv *max77802 = rdev_get_drvdata(rdev);
unsigned int id = rdev_get_id(rdev);
unsigned int val;
int shift = max77802_get_opmode_shift(id);
switch (mode) {
case REGULATOR_MODE_STANDBY:
val = MAX77802_OPMODE_LP; /* ON in Low Power Mode */
break;
case REGULATOR_MODE_NORMAL:
val = MAX77802_OPMODE_NORMAL; /* ON in Normal Mode */
break;
default:
dev_warn(&rdev->dev, "%s: regulator mode: 0x%x not supported\n",
rdev->desc->name, mode);
return -EINVAL;
}
if (WARN_ON_ONCE(id >= ARRAY_SIZE(max77802->opmode)))
return -EINVAL;
max77802->opmode[id] = val;
return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
rdev->desc->enable_mask, val << shift);
}
static unsigned max77802_get_mode(struct regulator_dev *rdev)
{
struct max77802_regulator_prv *max77802 = rdev_get_drvdata(rdev);
unsigned int id = rdev_get_id(rdev);
if (WARN_ON_ONCE(id >= ARRAY_SIZE(max77802->opmode)))
return -EINVAL;
return max77802_map_mode(max77802->opmode[id]);
}
/**
* max77802_set_suspend_mode - set regulator opmode when the system is suspended
* @rdev: regulator to change mode
* @mode: operating mode to be set
*
* Will set the operating mode for the regulators during system suspend.
* This function is valid for the three different enable control logics:
*
* Enable Control Logic1 by PWRREQ (BUCK 2-4 and LDOs 2, 4-19, 22-35)
* Enable Control Logic2 by PWRREQ (LDOs 1, 20, 21)
* Enable Control Logic3 by PWRREQ (LDO 3)
*
* If setting the regulator mode fails, the function only warns but does
* not return an error code to avoid the regulator core to stop setting
* the operating mode for the remaining regulators.
*/
static int max77802_set_suspend_mode(struct regulator_dev *rdev,
unsigned int mode)
{
struct max77802_regulator_prv *max77802 = rdev_get_drvdata(rdev);
unsigned int id = rdev_get_id(rdev);
unsigned int val;
int shift = max77802_get_opmode_shift(id);
if (WARN_ON_ONCE(id >= ARRAY_SIZE(max77802->opmode)))
return -EINVAL;
/*
* If the regulator has been disabled for suspend
* then is invalid to try setting a suspend mode.
*/
if (max77802->opmode[id] == MAX77802_OFF_PWRREQ) {
dev_warn(&rdev->dev, "%s: is disabled, mode: 0x%x not set\n",
rdev->desc->name, mode);
return 0;
}
switch (mode) {
case REGULATOR_MODE_STANDBY:
/*
* If the regulator opmode is normal then enable
* ON in Low Power Mode by PWRREQ. If the mode is
* already Low Power then no action is required.
*/
if (max77802->opmode[id] == MAX77802_OPMODE_NORMAL)
val = MAX77802_LP_PWRREQ;
else
return 0;
break;
case REGULATOR_MODE_NORMAL:
/*
* If the regulator operating mode is Low Power then
* normal is not a valid opmode in suspend. If the
* mode is already normal then no action is required.
*/
if (max77802->opmode[id] == MAX77802_OPMODE_LP)
dev_warn(&rdev->dev, "%s: in Low Power: 0x%x invalid\n",
rdev->desc->name, mode);
return 0;
default:
dev_warn(&rdev->dev, "%s: regulator mode: 0x%x not supported\n",
rdev->desc->name, mode);
return -EINVAL;
}
return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
rdev->desc->enable_mask, val << shift);
}
static int max77802_enable(struct regulator_dev *rdev)
{
struct max77802_regulator_prv *max77802 = rdev_get_drvdata(rdev);
unsigned int id = rdev_get_id(rdev);
int shift = max77802_get_opmode_shift(id);
if (WARN_ON_ONCE(id >= ARRAY_SIZE(max77802->opmode)))
return -EINVAL;
if (max77802->opmode[id] == MAX77802_OFF_PWRREQ)
max77802->opmode[id] = MAX77802_OPMODE_NORMAL;
return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
rdev->desc->enable_mask,
max77802->opmode[id] << shift);
}
/*
* LDOs 2, 4-19, 22-35
*/
static const struct regulator_ops max77802_ldo_ops_logic1 = {
.list_voltage = regulator_list_voltage_linear,
.map_voltage = regulator_map_voltage_linear,
.is_enabled = regulator_is_enabled_regmap,
.enable = max77802_enable,
.disable = regulator_disable_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.set_voltage_time_sel = regulator_set_voltage_time_sel,
.set_suspend_disable = max77802_set_suspend_disable,
.set_suspend_mode = max77802_set_suspend_mode,
};
/*
* LDOs 1, 20, 21, 3
*/
static const struct regulator_ops max77802_ldo_ops_logic2 = {
.list_voltage = regulator_list_voltage_linear,
.map_voltage = regulator_map_voltage_linear,
.is_enabled = regulator_is_enabled_regmap,
.enable = max77802_enable,
.disable = regulator_disable_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.set_voltage_time_sel = regulator_set_voltage_time_sel,
.set_mode = max77802_set_mode,
.get_mode = max77802_get_mode,
.set_suspend_mode = max77802_set_suspend_mode,
};
/* BUCKS 1, 6 */
static const struct regulator_ops max77802_buck_16_dvs_ops = {
.list_voltage = regulator_list_voltage_linear,
.map_voltage = regulator_map_voltage_linear,
.is_enabled = regulator_is_enabled_regmap,
.enable = max77802_enable,
.disable = regulator_disable_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.set_voltage_time_sel = regulator_set_voltage_time_sel,
.set_ramp_delay = regulator_set_ramp_delay_regmap,
.set_suspend_disable = max77802_set_suspend_disable,
};
/* BUCKs 2-4 */
static const struct regulator_ops max77802_buck_234_ops = {
.list_voltage = regulator_list_voltage_linear,
.map_voltage = regulator_map_voltage_linear,
.is_enabled = regulator_is_enabled_regmap,
.enable = max77802_enable,
.disable = regulator_disable_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.set_voltage_time_sel = regulator_set_voltage_time_sel,
.set_ramp_delay = regulator_set_ramp_delay_regmap,
.set_suspend_disable = max77802_set_suspend_disable,
.set_suspend_mode = max77802_set_suspend_mode,
};
/* BUCKs 5, 7-10 */
static const struct regulator_ops max77802_buck_dvs_ops = {
.list_voltage = regulator_list_voltage_linear,
.map_voltage = regulator_map_voltage_linear,
.is_enabled = regulator_is_enabled_regmap,
.enable = max77802_enable,
.disable = regulator_disable_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.set_voltage_time_sel = regulator_set_voltage_time_sel,
.set_suspend_disable = max77802_set_suspend_disable,
};
/* LDOs 3-7, 9-14, 18-26, 28, 29, 32-34 */
#define regulator_77802_desc_p_ldo(num, supply, log) { \
.name = "LDO"#num, \
.of_match = of_match_ptr("LDO"#num), \
.regulators_node = of_match_ptr("regulators"), \
.id = MAX77802_LDO##num, \
.supply_name = "inl"#supply, \
.ops = &max77802_ldo_ops_logic##log, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
.min_uV = 800000, \
.uV_step = 50000, \
.ramp_delay = MAX77802_RAMP_DELAY, \
.n_voltages = 1 << 6, \
.vsel_reg = MAX77802_REG_LDO1CTRL1 + num - 1, \
.vsel_mask = MAX77802_VSEL_MASK, \
.enable_reg = MAX77802_REG_LDO1CTRL1 + num - 1, \
.enable_mask = MAX77802_OPMODE_MASK << MAX77802_OPMODE_SHIFT_LDO, \
.of_map_mode = max77802_map_mode, \
}
/* LDOs 1, 2, 8, 15, 17, 27, 30, 35 */
#define regulator_77802_desc_n_ldo(num, supply, log) { \
.name = "LDO"#num, \
.of_match = of_match_ptr("LDO"#num), \
.regulators_node = of_match_ptr("regulators"), \
.id = MAX77802_LDO##num, \
.supply_name = "inl"#supply, \
.ops = &max77802_ldo_ops_logic##log, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
.min_uV = 800000, \
.uV_step = 25000, \
.ramp_delay = MAX77802_RAMP_DELAY, \
.n_voltages = 1 << 6, \
.vsel_reg = MAX77802_REG_LDO1CTRL1 + num - 1, \
.vsel_mask = MAX77802_VSEL_MASK, \
.enable_reg = MAX77802_REG_LDO1CTRL1 + num - 1, \
.enable_mask = MAX77802_OPMODE_MASK << MAX77802_OPMODE_SHIFT_LDO, \
.of_map_mode = max77802_map_mode, \
}
/* BUCKs 1, 6 */
#define regulator_77802_desc_16_buck(num) { \
.name = "BUCK"#num, \
.of_match = of_match_ptr("BUCK"#num), \
.regulators_node = of_match_ptr("regulators"), \
.id = MAX77802_BUCK##num, \
.supply_name = "inb"#num, \
.ops = &max77802_buck_16_dvs_ops, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
.min_uV = 612500, \
.uV_step = 6250, \
.ramp_delay = MAX77802_RAMP_DELAY, \
.n_voltages = 1 << 8, \
.vsel_reg = MAX77802_REG_BUCK ## num ## DVS1, \
.vsel_mask = MAX77802_DVS_VSEL_MASK, \
.enable_reg = MAX77802_REG_BUCK ## num ## CTRL, \
.enable_mask = MAX77802_OPMODE_MASK, \
.ramp_reg = MAX77802_REG_BUCK ## num ## CTRL, \
.ramp_mask = MAX77802_RAMP_RATE_MASK_4BIT, \
.ramp_delay_table = max77802_buck16_ramp_table, \
.n_ramp_values = ARRAY_SIZE(max77802_buck16_ramp_table), \
.of_map_mode = max77802_map_mode, \
}
/* BUCKS 2-4 */
#define regulator_77802_desc_234_buck(num) { \
.name = "BUCK"#num, \
.of_match = of_match_ptr("BUCK"#num), \
.regulators_node = of_match_ptr("regulators"), \
.id = MAX77802_BUCK##num, \
.supply_name = "inb"#num, \
.ops = &max77802_buck_234_ops, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
.min_uV = 600000, \
.uV_step = 6250, \
.ramp_delay = MAX77802_RAMP_DELAY, \
.n_voltages = 0x91, \
.vsel_reg = MAX77802_REG_BUCK ## num ## DVS1, \
.vsel_mask = MAX77802_DVS_VSEL_MASK, \
.enable_reg = MAX77802_REG_BUCK ## num ## CTRL1, \
.enable_mask = MAX77802_OPMODE_MASK << \
MAX77802_OPMODE_BUCK234_SHIFT, \
.ramp_reg = MAX77802_REG_BUCK ## num ## CTRL1, \
.ramp_mask = MAX77802_RAMP_RATE_MASK_2BIT, \
.ramp_delay_table = max77802_buck234_ramp_table, \
.n_ramp_values = ARRAY_SIZE(max77802_buck234_ramp_table), \
.of_map_mode = max77802_map_mode, \
}
/* BUCK 5 */
#define regulator_77802_desc_buck5(num) { \
.name = "BUCK"#num, \
.of_match = of_match_ptr("BUCK"#num), \
.regulators_node = of_match_ptr("regulators"), \
.id = MAX77802_BUCK##num, \
.supply_name = "inb"#num, \
.ops = &max77802_buck_dvs_ops, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
.min_uV = 750000, \
.uV_step = 50000, \
.ramp_delay = MAX77802_RAMP_DELAY, \
.n_voltages = 1 << 6, \
.vsel_reg = MAX77802_REG_BUCK5OUT, \
.vsel_mask = MAX77802_VSEL_MASK, \
.enable_reg = MAX77802_REG_BUCK5CTRL, \
.enable_mask = MAX77802_OPMODE_MASK, \
.of_map_mode = max77802_map_mode, \
}
/* BUCKs 7-10 */
#define regulator_77802_desc_buck7_10(num) { \
.name = "BUCK"#num, \
.of_match = of_match_ptr("BUCK"#num), \
.regulators_node = of_match_ptr("regulators"), \
.id = MAX77802_BUCK##num, \
.supply_name = "inb"#num, \
.ops = &max77802_buck_dvs_ops, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
.min_uV = 750000, \
.uV_step = 50000, \
.ramp_delay = MAX77802_RAMP_DELAY, \
.n_voltages = 1 << 6, \
.vsel_reg = MAX77802_REG_BUCK7OUT + (num - 7) * 3, \
.vsel_mask = MAX77802_VSEL_MASK, \
.enable_reg = MAX77802_REG_BUCK7CTRL + (num - 7) * 3, \
.enable_mask = MAX77802_OPMODE_MASK, \
.of_map_mode = max77802_map_mode, \
}
static const struct regulator_desc regulators[] = {
regulator_77802_desc_16_buck(1),
regulator_77802_desc_234_buck(2),
regulator_77802_desc_234_buck(3),
regulator_77802_desc_234_buck(4),
regulator_77802_desc_buck5(5),
regulator_77802_desc_16_buck(6),
regulator_77802_desc_buck7_10(7),
regulator_77802_desc_buck7_10(8),
regulator_77802_desc_buck7_10(9),
regulator_77802_desc_buck7_10(10),
regulator_77802_desc_n_ldo(1, 10, 2),
regulator_77802_desc_n_ldo(2, 10, 1),
regulator_77802_desc_p_ldo(3, 3, 2),
regulator_77802_desc_p_ldo(4, 6, 1),
regulator_77802_desc_p_ldo(5, 3, 1),
regulator_77802_desc_p_ldo(6, 3, 1),
regulator_77802_desc_p_ldo(7, 3, 1),
regulator_77802_desc_n_ldo(8, 1, 1),
regulator_77802_desc_p_ldo(9, 5, 1),
regulator_77802_desc_p_ldo(10, 4, 1),
regulator_77802_desc_p_ldo(11, 4, 1),
regulator_77802_desc_p_ldo(12, 9, 1),
regulator_77802_desc_p_ldo(13, 4, 1),
regulator_77802_desc_p_ldo(14, 4, 1),
regulator_77802_desc_n_ldo(15, 1, 1),
regulator_77802_desc_n_ldo(17, 2, 1),
regulator_77802_desc_p_ldo(18, 7, 1),
regulator_77802_desc_p_ldo(19, 5, 1),
regulator_77802_desc_p_ldo(20, 7, 2),
regulator_77802_desc_p_ldo(21, 6, 2),
regulator_77802_desc_p_ldo(23, 9, 1),
regulator_77802_desc_p_ldo(24, 6, 1),
regulator_77802_desc_p_ldo(25, 9, 1),
regulator_77802_desc_p_ldo(26, 9, 1),
regulator_77802_desc_n_ldo(27, 2, 1),
regulator_77802_desc_p_ldo(28, 7, 1),
regulator_77802_desc_p_ldo(29, 7, 1),
regulator_77802_desc_n_ldo(30, 2, 1),
regulator_77802_desc_p_ldo(32, 9, 1),
regulator_77802_desc_p_ldo(33, 6, 1),
regulator_77802_desc_p_ldo(34, 9, 1),
regulator_77802_desc_n_ldo(35, 2, 1),
};
static int max77802_pmic_probe(struct platform_device *pdev)
{
struct max77686_dev *iodev = dev_get_drvdata(pdev->dev.parent);
struct max77802_regulator_prv *max77802;
int i, val;
struct regulator_config config = { };
max77802 = devm_kzalloc(&pdev->dev,
sizeof(struct max77802_regulator_prv),
GFP_KERNEL);
if (!max77802)
return -ENOMEM;
config.dev = iodev->dev;
config.regmap = iodev->regmap;
config.driver_data = max77802;
platform_set_drvdata(pdev, max77802);
for (i = 0; i < MAX77802_REG_MAX; i++) {
struct regulator_dev *rdev;
unsigned int id = regulators[i].id;
int shift = max77802_get_opmode_shift(id);
int ret;
ret = regmap_read(iodev->regmap, regulators[i].enable_reg, &val);
if (ret < 0) {
dev_warn(&pdev->dev,
"cannot read current mode for %d\n", i);
val = MAX77802_OPMODE_NORMAL;
} else {
val = val >> shift & MAX77802_OPMODE_MASK;
}
/*
* If the regulator is disabled and the system warm rebooted,
* the hardware reports OFF as the regulator operating mode.
* Default to operating mode NORMAL in that case.
*/
if (id < ARRAY_SIZE(max77802->opmode)) {
if (val == MAX77802_STATUS_OFF)
max77802->opmode[id] = MAX77802_OPMODE_NORMAL;
else
max77802->opmode[id] = val;
}
rdev = devm_regulator_register(&pdev->dev,
®ulators[i], &config);
if (IS_ERR(rdev)) {
ret = PTR_ERR(rdev);
dev_err(&pdev->dev,
"regulator init failed for %d: %d\n", i, ret);
return ret;
}
}
return 0;
}
static const struct platform_device_id max77802_pmic_id[] = {
{"max77802-pmic", 0},
{ },
};
MODULE_DEVICE_TABLE(platform, max77802_pmic_id);
static struct platform_driver max77802_pmic_driver = {
.driver = {
.name = "max77802-pmic",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
.probe = max77802_pmic_probe,
.id_table = max77802_pmic_id,
};
module_platform_driver(max77802_pmic_driver);
MODULE_DESCRIPTION("MAXIM 77802 Regulator Driver");
MODULE_AUTHOR("Simon Glass <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/regulator/max77802-regulator.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Regulators driver for Maxim max8649
*
* Copyright (C) 2009-2010 Marvell International Ltd.
* Haojian Zhuang <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/err.h>
#include <linux/i2c.h>
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
#include <linux/slab.h>
#include <linux/regulator/max8649.h>
#include <linux/regmap.h>
#define MAX8649_DCDC_VMIN 750000 /* uV */
#define MAX8649_DCDC_VMAX 1380000 /* uV */
#define MAX8649_DCDC_STEP 10000 /* uV */
#define MAX8649_VOL_MASK 0x3f
/* Registers */
#define MAX8649_MODE0 0x00
#define MAX8649_MODE1 0x01
#define MAX8649_MODE2 0x02
#define MAX8649_MODE3 0x03
#define MAX8649_CONTROL 0x04
#define MAX8649_SYNC 0x05
#define MAX8649_RAMP 0x06
#define MAX8649_CHIP_ID1 0x08
#define MAX8649_CHIP_ID2 0x09
/* Bits */
#define MAX8649_EN_PD (1 << 7)
#define MAX8649_VID0_PD (1 << 6)
#define MAX8649_VID1_PD (1 << 5)
#define MAX8649_VID_MASK (3 << 5)
#define MAX8649_FORCE_PWM (1 << 7)
#define MAX8649_SYNC_EXTCLK (1 << 6)
#define MAX8649_EXT_MASK (3 << 6)
#define MAX8649_RAMP_MASK (7 << 5)
#define MAX8649_RAMP_DOWN (1 << 1)
struct max8649_regulator_info {
struct device *dev;
struct regmap *regmap;
unsigned mode:2; /* bit[1:0] = VID1, VID0 */
unsigned extclk_freq:2;
unsigned extclk:1;
unsigned ramp_timing:3;
unsigned ramp_down:1;
};
static int max8649_enable_time(struct regulator_dev *rdev)
{
struct max8649_regulator_info *info = rdev_get_drvdata(rdev);
int voltage, rate, ret;
unsigned int val;
/* get voltage */
ret = regmap_read(info->regmap, rdev->desc->vsel_reg, &val);
if (ret != 0)
return ret;
val &= MAX8649_VOL_MASK;
voltage = regulator_list_voltage_linear(rdev, (unsigned char)val);
/* get rate */
ret = regmap_read(info->regmap, MAX8649_RAMP, &val);
if (ret != 0)
return ret;
ret = (val & MAX8649_RAMP_MASK) >> 5;
rate = (32 * 1000) >> ret; /* uV/uS */
return DIV_ROUND_UP(voltage, rate);
}
static int max8649_set_mode(struct regulator_dev *rdev, unsigned int mode)
{
struct max8649_regulator_info *info = rdev_get_drvdata(rdev);
switch (mode) {
case REGULATOR_MODE_FAST:
regmap_update_bits(info->regmap, rdev->desc->vsel_reg,
MAX8649_FORCE_PWM, MAX8649_FORCE_PWM);
break;
case REGULATOR_MODE_NORMAL:
regmap_update_bits(info->regmap, rdev->desc->vsel_reg,
MAX8649_FORCE_PWM, 0);
break;
default:
return -EINVAL;
}
return 0;
}
static unsigned int max8649_get_mode(struct regulator_dev *rdev)
{
struct max8649_regulator_info *info = rdev_get_drvdata(rdev);
unsigned int val;
int ret;
ret = regmap_read(info->regmap, rdev->desc->vsel_reg, &val);
if (ret != 0)
return ret;
if (val & MAX8649_FORCE_PWM)
return REGULATOR_MODE_FAST;
return REGULATOR_MODE_NORMAL;
}
static const struct regulator_ops max8649_dcdc_ops = {
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.list_voltage = regulator_list_voltage_linear,
.map_voltage = regulator_map_voltage_linear,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
.enable_time = max8649_enable_time,
.set_mode = max8649_set_mode,
.get_mode = max8649_get_mode,
};
static struct regulator_desc dcdc_desc = {
.name = "max8649",
.ops = &max8649_dcdc_ops,
.type = REGULATOR_VOLTAGE,
.n_voltages = 1 << 6,
.owner = THIS_MODULE,
.vsel_mask = MAX8649_VOL_MASK,
.min_uV = MAX8649_DCDC_VMIN,
.uV_step = MAX8649_DCDC_STEP,
.enable_reg = MAX8649_CONTROL,
.enable_mask = MAX8649_EN_PD,
.enable_is_inverted = true,
};
static const struct regmap_config max8649_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
};
static int max8649_regulator_probe(struct i2c_client *client)
{
struct max8649_platform_data *pdata = dev_get_platdata(&client->dev);
struct max8649_regulator_info *info = NULL;
struct regulator_dev *regulator;
struct regulator_config config = { };
unsigned int val;
unsigned char data;
int ret;
info = devm_kzalloc(&client->dev, sizeof(struct max8649_regulator_info),
GFP_KERNEL);
if (!info)
return -ENOMEM;
info->regmap = devm_regmap_init_i2c(client, &max8649_regmap_config);
if (IS_ERR(info->regmap)) {
ret = PTR_ERR(info->regmap);
dev_err(&client->dev, "Failed to allocate register map: %d\n", ret);
return ret;
}
info->dev = &client->dev;
i2c_set_clientdata(client, info);
info->mode = pdata->mode;
switch (info->mode) {
case 0:
dcdc_desc.vsel_reg = MAX8649_MODE0;
break;
case 1:
dcdc_desc.vsel_reg = MAX8649_MODE1;
break;
case 2:
dcdc_desc.vsel_reg = MAX8649_MODE2;
break;
case 3:
dcdc_desc.vsel_reg = MAX8649_MODE3;
break;
default:
break;
}
ret = regmap_read(info->regmap, MAX8649_CHIP_ID1, &val);
if (ret != 0) {
dev_err(info->dev, "Failed to detect ID of MAX8649:%d\n",
ret);
return ret;
}
dev_info(info->dev, "Detected MAX8649 (ID:%x)\n", val);
/* enable VID0 & VID1 */
regmap_update_bits(info->regmap, MAX8649_CONTROL, MAX8649_VID_MASK, 0);
/* enable/disable external clock synchronization */
info->extclk = pdata->extclk;
data = (info->extclk) ? MAX8649_SYNC_EXTCLK : 0;
regmap_update_bits(info->regmap, dcdc_desc.vsel_reg,
MAX8649_SYNC_EXTCLK, data);
if (info->extclk) {
/* set external clock frequency */
info->extclk_freq = pdata->extclk_freq;
regmap_update_bits(info->regmap, MAX8649_SYNC, MAX8649_EXT_MASK,
info->extclk_freq << 6);
}
if (pdata->ramp_timing) {
info->ramp_timing = pdata->ramp_timing;
regmap_update_bits(info->regmap, MAX8649_RAMP, MAX8649_RAMP_MASK,
info->ramp_timing << 5);
}
info->ramp_down = pdata->ramp_down;
if (info->ramp_down) {
regmap_update_bits(info->regmap, MAX8649_RAMP, MAX8649_RAMP_DOWN,
MAX8649_RAMP_DOWN);
}
config.dev = &client->dev;
config.init_data = pdata->regulator;
config.driver_data = info;
config.regmap = info->regmap;
regulator = devm_regulator_register(&client->dev, &dcdc_desc,
&config);
if (IS_ERR(regulator)) {
dev_err(info->dev, "failed to register regulator %s\n",
dcdc_desc.name);
return PTR_ERR(regulator);
}
return 0;
}
static const struct i2c_device_id max8649_id[] = {
{ "max8649", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, max8649_id);
static struct i2c_driver max8649_driver = {
.probe = max8649_regulator_probe,
.driver = {
.name = "max8649",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
.id_table = max8649_id,
};
static int __init max8649_init(void)
{
return i2c_add_driver(&max8649_driver);
}
subsys_initcall(max8649_init);
static void __exit max8649_exit(void)
{
i2c_del_driver(&max8649_driver);
}
module_exit(max8649_exit);
/* Module information */
MODULE_DESCRIPTION("MAXIM 8649 voltage regulator driver");
MODULE_AUTHOR("Haojian Zhuang <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/regulator/max8649.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Regulator driver for TI TPS6586x
*
* Copyright (C) 2010 Compulab Ltd.
* Author: Mike Rapoport <[email protected]>
*
* Based on da903x
* Copyright (C) 2006-2008 Marvell International Ltd.
* Copyright (C) 2008 Compulab Ltd.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/err.h>
#include <linux/of.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
#include <linux/regulator/of_regulator.h>
#include <linux/mfd/tps6586x.h>
/* supply control and voltage setting */
#define TPS6586X_SUPPLYENA 0x10
#define TPS6586X_SUPPLYENB 0x11
#define TPS6586X_SUPPLYENC 0x12
#define TPS6586X_SUPPLYEND 0x13
#define TPS6586X_SUPPLYENE 0x14
#define TPS6586X_VCC1 0x20
#define TPS6586X_VCC2 0x21
#define TPS6586X_SM1V1 0x23
#define TPS6586X_SM1V2 0x24
#define TPS6586X_SM1SL 0x25
#define TPS6586X_SM0V1 0x26
#define TPS6586X_SM0V2 0x27
#define TPS6586X_SM0SL 0x28
#define TPS6586X_LDO2AV1 0x29
#define TPS6586X_LDO2AV2 0x2A
#define TPS6586X_LDO2BV1 0x2F
#define TPS6586X_LDO2BV2 0x30
#define TPS6586X_LDO4V1 0x32
#define TPS6586X_LDO4V2 0x33
/* converter settings */
#define TPS6586X_SUPPLYV1 0x41
#define TPS6586X_SUPPLYV2 0x42
#define TPS6586X_SUPPLYV3 0x43
#define TPS6586X_SUPPLYV4 0x44
#define TPS6586X_SUPPLYV5 0x45
#define TPS6586X_SUPPLYV6 0x46
#define TPS6586X_SMODE1 0x47
#define TPS6586X_SMODE2 0x48
struct tps6586x_regulator {
struct regulator_desc desc;
int enable_bit[2];
int enable_reg[2];
};
static const struct regulator_ops tps6586x_rw_regulator_ops = {
.list_voltage = regulator_list_voltage_table,
.map_voltage = regulator_map_voltage_ascend,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
};
static const struct regulator_ops tps6586x_rw_linear_regulator_ops = {
.list_voltage = regulator_list_voltage_linear,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
};
static const struct regulator_ops tps6586x_ro_regulator_ops = {
.list_voltage = regulator_list_voltage_table,
.map_voltage = regulator_map_voltage_ascend,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
};
static const struct regulator_ops tps6586x_sys_regulator_ops = {
};
static const unsigned int tps6586x_ldo0_voltages[] = {
1200000, 1500000, 1800000, 2500000, 2700000, 2850000, 3100000, 3300000,
};
static const unsigned int tps6586x_ldo_voltages[] = {
1250000, 1500000, 1800000, 2500000, 2700000, 2850000, 3100000, 3300000,
};
static const unsigned int tps658640_rtc_voltages[] = {
2500000, 2850000, 3100000, 3300000,
};
#define TPS6586X_REGULATOR(_id, _ops, _pin_name, vdata, vreg, shift, nbits, \
ereg0, ebit0, ereg1, ebit1, goreg, gobit) \
.desc = { \
.supply_name = _pin_name, \
.name = "REG-" #_id, \
.ops = &tps6586x_## _ops ## _regulator_ops, \
.type = REGULATOR_VOLTAGE, \
.id = TPS6586X_ID_##_id, \
.n_voltages = ARRAY_SIZE(vdata##_voltages), \
.volt_table = vdata##_voltages, \
.owner = THIS_MODULE, \
.enable_reg = TPS6586X_SUPPLY##ereg0, \
.enable_mask = 1 << (ebit0), \
.vsel_reg = TPS6586X_##vreg, \
.vsel_mask = ((1 << (nbits)) - 1) << (shift), \
.apply_reg = (goreg), \
.apply_bit = (gobit), \
}, \
.enable_reg[0] = TPS6586X_SUPPLY##ereg0, \
.enable_bit[0] = (ebit0), \
.enable_reg[1] = TPS6586X_SUPPLY##ereg1, \
.enable_bit[1] = (ebit1),
#define TPS6586X_REGULATOR_LINEAR(_id, _ops, _pin_name, n_volt, min_uv, \
uv_step, vreg, shift, nbits, ereg0, \
ebit0, ereg1, ebit1, goreg, gobit) \
.desc = { \
.supply_name = _pin_name, \
.name = "REG-" #_id, \
.ops = &tps6586x_## _ops ## _regulator_ops, \
.type = REGULATOR_VOLTAGE, \
.id = TPS6586X_ID_##_id, \
.n_voltages = n_volt, \
.min_uV = min_uv, \
.uV_step = uv_step, \
.owner = THIS_MODULE, \
.enable_reg = TPS6586X_SUPPLY##ereg0, \
.enable_mask = 1 << (ebit0), \
.vsel_reg = TPS6586X_##vreg, \
.vsel_mask = ((1 << (nbits)) - 1) << (shift), \
.apply_reg = (goreg), \
.apply_bit = (gobit), \
}, \
.enable_reg[0] = TPS6586X_SUPPLY##ereg0, \
.enable_bit[0] = (ebit0), \
.enable_reg[1] = TPS6586X_SUPPLY##ereg1, \
.enable_bit[1] = (ebit1),
#define TPS6586X_LDO(_id, _pname, vdata, vreg, shift, nbits, \
ereg0, ebit0, ereg1, ebit1) \
{ \
TPS6586X_REGULATOR(_id, rw, _pname, vdata, vreg, shift, nbits, \
ereg0, ebit0, ereg1, ebit1, 0, 0) \
}
#define TPS6586X_LDO_LINEAR(_id, _pname, n_volt, min_uv, uv_step, vreg, \
shift, nbits, ereg0, ebit0, ereg1, ebit1) \
{ \
TPS6586X_REGULATOR_LINEAR(_id, rw_linear, _pname, n_volt, \
min_uv, uv_step, vreg, shift, nbits, \
ereg0, ebit0, ereg1, ebit1, 0, 0) \
}
#define TPS6586X_FIXED_LDO(_id, _pname, vdata, vreg, shift, nbits, \
ereg0, ebit0, ereg1, ebit1) \
{ \
TPS6586X_REGULATOR(_id, ro, _pname, vdata, vreg, shift, nbits, \
ereg0, ebit0, ereg1, ebit1, 0, 0) \
}
#define TPS6586X_DVM(_id, _pname, n_volt, min_uv, uv_step, vreg, shift, \
nbits, ereg0, ebit0, ereg1, ebit1, goreg, gobit) \
{ \
TPS6586X_REGULATOR_LINEAR(_id, rw_linear, _pname, n_volt, \
min_uv, uv_step, vreg, shift, nbits, \
ereg0, ebit0, ereg1, ebit1, goreg, \
gobit) \
}
#define TPS6586X_SYS_REGULATOR() \
{ \
.desc = { \
.supply_name = "sys", \
.name = "REG-SYS", \
.ops = &tps6586x_sys_regulator_ops, \
.type = REGULATOR_VOLTAGE, \
.id = TPS6586X_ID_SYS, \
.owner = THIS_MODULE, \
}, \
}
static struct tps6586x_regulator tps6586x_regulator[] = {
TPS6586X_SYS_REGULATOR(),
TPS6586X_LDO(LDO_0, "vinldo01", tps6586x_ldo0, SUPPLYV1, 5, 3, ENC, 0,
END, 0),
TPS6586X_LDO(LDO_3, "vinldo23", tps6586x_ldo, SUPPLYV4, 0, 3, ENC, 2,
END, 2),
TPS6586X_LDO(LDO_5, "REG-SYS", tps6586x_ldo, SUPPLYV6, 0, 3, ENE, 6,
ENE, 6),
TPS6586X_LDO(LDO_6, "vinldo678", tps6586x_ldo, SUPPLYV3, 0, 3, ENC, 4,
END, 4),
TPS6586X_LDO(LDO_7, "vinldo678", tps6586x_ldo, SUPPLYV3, 3, 3, ENC, 5,
END, 5),
TPS6586X_LDO(LDO_8, "vinldo678", tps6586x_ldo, SUPPLYV2, 5, 3, ENC, 6,
END, 6),
TPS6586X_LDO(LDO_9, "vinldo9", tps6586x_ldo, SUPPLYV6, 3, 3, ENE, 7,
ENE, 7),
TPS6586X_LDO(LDO_RTC, "REG-SYS", tps6586x_ldo, SUPPLYV4, 3, 3, V4, 7,
V4, 7),
TPS6586X_LDO_LINEAR(LDO_1, "vinldo01", 32, 725000, 25000, SUPPLYV1,
0, 5, ENC, 1, END, 1),
TPS6586X_LDO_LINEAR(SM_2, "vin-sm2", 32, 3000000, 50000, SUPPLYV2,
0, 5, ENC, 7, END, 7),
TPS6586X_DVM(LDO_2, "vinldo23", 32, 725000, 25000, LDO2BV1, 0, 5,
ENA, 3, ENB, 3, TPS6586X_VCC2, BIT(6)),
TPS6586X_DVM(LDO_4, "vinldo4", 32, 1700000, 25000, LDO4V1, 0, 5,
ENC, 3, END, 3, TPS6586X_VCC1, BIT(6)),
TPS6586X_DVM(SM_0, "vin-sm0", 32, 725000, 25000, SM0V1, 0, 5,
ENA, 1, ENB, 1, TPS6586X_VCC1, BIT(2)),
TPS6586X_DVM(SM_1, "vin-sm1", 32, 725000, 25000, SM1V1, 0, 5,
ENA, 0, ENB, 0, TPS6586X_VCC1, BIT(0)),
};
static struct tps6586x_regulator tps658623_regulator[] = {
TPS6586X_LDO_LINEAR(SM_2, "vin-sm2", 32, 1700000, 25000, SUPPLYV2,
0, 5, ENC, 7, END, 7),
};
static struct tps6586x_regulator tps658640_regulator[] = {
TPS6586X_LDO(LDO_3, "vinldo23", tps6586x_ldo0, SUPPLYV4, 0, 3,
ENC, 2, END, 2),
TPS6586X_LDO(LDO_5, "REG-SYS", tps6586x_ldo0, SUPPLYV6, 0, 3,
ENE, 6, ENE, 6),
TPS6586X_LDO(LDO_6, "vinldo678", tps6586x_ldo0, SUPPLYV3, 0, 3,
ENC, 4, END, 4),
TPS6586X_LDO(LDO_7, "vinldo678", tps6586x_ldo0, SUPPLYV3, 3, 3,
ENC, 5, END, 5),
TPS6586X_LDO(LDO_8, "vinldo678", tps6586x_ldo0, SUPPLYV2, 5, 3,
ENC, 6, END, 6),
TPS6586X_LDO(LDO_9, "vinldo9", tps6586x_ldo0, SUPPLYV6, 3, 3,
ENE, 7, ENE, 7),
TPS6586X_LDO_LINEAR(SM_2, "vin-sm2", 32, 2150000, 50000, SUPPLYV2,
0, 5, ENC, 7, END, 7),
TPS6586X_FIXED_LDO(LDO_RTC, "REG-SYS", tps658640_rtc, SUPPLYV4, 3, 2,
V4, 7, V4, 7),
};
static struct tps6586x_regulator tps658643_regulator[] = {
TPS6586X_LDO_LINEAR(SM_2, "vin-sm2", 32, 1025000, 25000, SUPPLYV2,
0, 5, ENC, 7, END, 7),
};
/*
* TPS6586X has 2 enable bits that are OR'ed to determine the actual
* regulator state. Clearing one of this bits allows switching
* regulator on and of with single register write.
*/
static inline int tps6586x_regulator_preinit(struct device *parent,
struct tps6586x_regulator *ri)
{
uint8_t val1, val2;
int ret;
if (ri->enable_reg[0] == ri->enable_reg[1] &&
ri->enable_bit[0] == ri->enable_bit[1])
return 0;
ret = tps6586x_read(parent, ri->enable_reg[0], &val1);
if (ret)
return ret;
ret = tps6586x_read(parent, ri->enable_reg[1], &val2);
if (ret)
return ret;
if (!(val2 & (1 << ri->enable_bit[1])))
return 0;
/*
* The regulator is on, but it's enabled with the bit we don't
* want to use, so we switch the enable bits
*/
if (!(val1 & (1 << ri->enable_bit[0]))) {
ret = tps6586x_set_bits(parent, ri->enable_reg[0],
1 << ri->enable_bit[0]);
if (ret)
return ret;
}
return tps6586x_clr_bits(parent, ri->enable_reg[1],
1 << ri->enable_bit[1]);
}
static int tps6586x_regulator_set_slew_rate(struct platform_device *pdev,
int id, struct regulator_init_data *p)
{
struct device *parent = pdev->dev.parent;
struct tps6586x_settings *setting = p->driver_data;
uint8_t reg;
if (setting == NULL)
return 0;
if (!(setting->slew_rate & TPS6586X_SLEW_RATE_SET))
return 0;
/* only SM0 and SM1 can have the slew rate settings */
switch (id) {
case TPS6586X_ID_SM_0:
reg = TPS6586X_SM0SL;
break;
case TPS6586X_ID_SM_1:
reg = TPS6586X_SM1SL;
break;
default:
dev_err(&pdev->dev, "Only SM0/SM1 can set slew rate\n");
return -EINVAL;
}
return tps6586x_write(parent, reg,
setting->slew_rate & TPS6586X_SLEW_RATE_MASK);
}
static struct tps6586x_regulator *find_regulator_info(int id, int version)
{
struct tps6586x_regulator *ri;
struct tps6586x_regulator *table = NULL;
int num;
int i;
switch (version) {
case TPS658623:
case TPS658624:
table = tps658623_regulator;
num = ARRAY_SIZE(tps658623_regulator);
break;
case TPS658640:
case TPS658640v2:
table = tps658640_regulator;
num = ARRAY_SIZE(tps658640_regulator);
break;
case TPS658643:
table = tps658643_regulator;
num = ARRAY_SIZE(tps658643_regulator);
break;
}
/* Search version specific table first */
if (table) {
for (i = 0; i < num; i++) {
ri = &table[i];
if (ri->desc.id == id)
return ri;
}
}
for (i = 0; i < ARRAY_SIZE(tps6586x_regulator); i++) {
ri = &tps6586x_regulator[i];
if (ri->desc.id == id)
return ri;
}
return NULL;
}
#ifdef CONFIG_OF
static struct of_regulator_match tps6586x_matches[] = {
{ .name = "sys", .driver_data = (void *)TPS6586X_ID_SYS },
{ .name = "sm0", .driver_data = (void *)TPS6586X_ID_SM_0 },
{ .name = "sm1", .driver_data = (void *)TPS6586X_ID_SM_1 },
{ .name = "sm2", .driver_data = (void *)TPS6586X_ID_SM_2 },
{ .name = "ldo0", .driver_data = (void *)TPS6586X_ID_LDO_0 },
{ .name = "ldo1", .driver_data = (void *)TPS6586X_ID_LDO_1 },
{ .name = "ldo2", .driver_data = (void *)TPS6586X_ID_LDO_2 },
{ .name = "ldo3", .driver_data = (void *)TPS6586X_ID_LDO_3 },
{ .name = "ldo4", .driver_data = (void *)TPS6586X_ID_LDO_4 },
{ .name = "ldo5", .driver_data = (void *)TPS6586X_ID_LDO_5 },
{ .name = "ldo6", .driver_data = (void *)TPS6586X_ID_LDO_6 },
{ .name = "ldo7", .driver_data = (void *)TPS6586X_ID_LDO_7 },
{ .name = "ldo8", .driver_data = (void *)TPS6586X_ID_LDO_8 },
{ .name = "ldo9", .driver_data = (void *)TPS6586X_ID_LDO_9 },
{ .name = "ldo_rtc", .driver_data = (void *)TPS6586X_ID_LDO_RTC },
};
static struct tps6586x_platform_data *tps6586x_parse_regulator_dt(
struct platform_device *pdev,
struct of_regulator_match **tps6586x_reg_matches)
{
const unsigned int num = ARRAY_SIZE(tps6586x_matches);
struct device_node *np = pdev->dev.parent->of_node;
struct device_node *regs;
const char *sys_rail = NULL;
unsigned int i;
struct tps6586x_platform_data *pdata;
int err;
regs = of_get_child_by_name(np, "regulators");
if (!regs) {
dev_err(&pdev->dev, "regulator node not found\n");
return NULL;
}
err = of_regulator_match(&pdev->dev, regs, tps6586x_matches, num);
of_node_put(regs);
if (err < 0) {
dev_err(&pdev->dev, "Regulator match failed, e %d\n", err);
return NULL;
}
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return NULL;
for (i = 0; i < num; i++) {
uintptr_t id;
if (!tps6586x_matches[i].init_data)
continue;
pdata->reg_init_data[i] = tps6586x_matches[i].init_data;
id = (uintptr_t)tps6586x_matches[i].driver_data;
if (id == TPS6586X_ID_SYS)
sys_rail = pdata->reg_init_data[i]->constraints.name;
if ((id == TPS6586X_ID_LDO_5) || (id == TPS6586X_ID_LDO_RTC))
pdata->reg_init_data[i]->supply_regulator = sys_rail;
}
*tps6586x_reg_matches = tps6586x_matches;
return pdata;
}
#else
static struct tps6586x_platform_data *tps6586x_parse_regulator_dt(
struct platform_device *pdev,
struct of_regulator_match **tps6586x_reg_matches)
{
*tps6586x_reg_matches = NULL;
return NULL;
}
#endif
static int tps6586x_regulator_probe(struct platform_device *pdev)
{
struct tps6586x_regulator *ri = NULL;
struct regulator_config config = { };
struct regulator_dev *rdev;
struct regulator_init_data *reg_data;
struct tps6586x_platform_data *pdata;
struct of_regulator_match *tps6586x_reg_matches = NULL;
int version;
int id;
int err;
dev_dbg(&pdev->dev, "Probing regulator\n");
pdata = dev_get_platdata(pdev->dev.parent);
if ((!pdata) && (pdev->dev.parent->of_node))
pdata = tps6586x_parse_regulator_dt(pdev,
&tps6586x_reg_matches);
if (!pdata) {
dev_err(&pdev->dev, "Platform data not available, exiting\n");
return -ENODEV;
}
version = tps6586x_get_version(pdev->dev.parent);
for (id = 0; id < TPS6586X_ID_MAX_REGULATOR; ++id) {
reg_data = pdata->reg_init_data[id];
ri = find_regulator_info(id, version);
if (!ri) {
dev_err(&pdev->dev, "invalid regulator ID specified\n");
return -EINVAL;
}
err = tps6586x_regulator_preinit(pdev->dev.parent, ri);
if (err) {
dev_err(&pdev->dev,
"regulator %d preinit failed, e %d\n", id, err);
return err;
}
config.dev = pdev->dev.parent;
config.init_data = reg_data;
config.driver_data = ri;
if (tps6586x_reg_matches)
config.of_node = tps6586x_reg_matches[id].of_node;
rdev = devm_regulator_register(&pdev->dev, &ri->desc, &config);
if (IS_ERR(rdev)) {
dev_err(&pdev->dev, "failed to register regulator %s\n",
ri->desc.name);
return PTR_ERR(rdev);
}
if (reg_data) {
err = tps6586x_regulator_set_slew_rate(pdev, id,
reg_data);
if (err < 0) {
dev_err(&pdev->dev,
"Slew rate config failed, e %d\n", err);
return err;
}
}
}
platform_set_drvdata(pdev, rdev);
return 0;
}
static struct platform_driver tps6586x_regulator_driver = {
.driver = {
.name = "tps6586x-regulator",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
.probe = tps6586x_regulator_probe,
};
static int __init tps6586x_regulator_init(void)
{
return platform_driver_register(&tps6586x_regulator_driver);
}
subsys_initcall(tps6586x_regulator_init);
static void __exit tps6586x_regulator_exit(void)
{
platform_driver_unregister(&tps6586x_regulator_driver);
}
module_exit(tps6586x_regulator_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mike Rapoport <[email protected]>");
MODULE_DESCRIPTION("Regulator Driver for TI TPS6586X PMIC");
MODULE_ALIAS("platform:tps6586x-regulator");
| linux-master | drivers/regulator/tps6586x-regulator.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Regulator driver for LP87565 PMIC
*
* Copyright (C) 2017 Texas Instruments Incorporated - https://www.ti.com/
*/
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/mfd/lp87565.h>
enum LP87565_regulator_id {
/* BUCK's */
LP87565_BUCK_0,
LP87565_BUCK_1,
LP87565_BUCK_2,
LP87565_BUCK_3,
LP87565_BUCK_10,
LP87565_BUCK_23,
LP87565_BUCK_3210,
};
#define LP87565_REGULATOR(_name, _id, _of, _ops, _n, _vr, _vm, \
_er, _em, _ev, _delay, _lr, _cr) \
[_id] = { \
.desc = { \
.name = _name, \
.supply_name = _of "-in", \
.id = _id, \
.of_match = _of, \
.regulators_node = "regulators", \
.ops = &_ops, \
.n_voltages = _n, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
.vsel_reg = _vr, \
.vsel_mask = _vm, \
.enable_reg = _er, \
.enable_mask = _em, \
.enable_val = _ev, \
.ramp_delay = _delay, \
.linear_ranges = _lr, \
.n_linear_ranges = ARRAY_SIZE(_lr), \
.curr_table = lp87565_buck_uA, \
.n_current_limits = ARRAY_SIZE(lp87565_buck_uA),\
.csel_reg = (_cr), \
.csel_mask = LP87565_BUCK_CTRL_2_ILIM, \
}, \
.ctrl2_reg = _cr, \
}
struct lp87565_regulator {
struct regulator_desc desc;
unsigned int ctrl2_reg;
};
static const struct lp87565_regulator regulators[];
static const struct linear_range buck0_1_2_3_ranges[] = {
REGULATOR_LINEAR_RANGE(600000, 0xA, 0x17, 10000),
REGULATOR_LINEAR_RANGE(735000, 0x18, 0x9d, 5000),
REGULATOR_LINEAR_RANGE(1420000, 0x9e, 0xff, 20000),
};
static const unsigned int lp87565_buck_ramp_delay[] = {
30000, 15000, 10000, 7500, 3800, 1900, 940, 470
};
/* LP87565 BUCK current limit */
static const unsigned int lp87565_buck_uA[] = {
1500000, 2000000, 2500000, 3000000, 3500000, 4000000, 4500000, 5000000,
};
static int lp87565_buck_set_ramp_delay(struct regulator_dev *rdev,
int ramp_delay)
{
int id = rdev_get_id(rdev);
unsigned int reg;
int ret;
if (ramp_delay <= 470)
reg = 7;
else if (ramp_delay <= 940)
reg = 6;
else if (ramp_delay <= 1900)
reg = 5;
else if (ramp_delay <= 3800)
reg = 4;
else if (ramp_delay <= 7500)
reg = 3;
else if (ramp_delay <= 10000)
reg = 2;
else if (ramp_delay <= 15000)
reg = 1;
else
reg = 0;
ret = regmap_update_bits(rdev->regmap, regulators[id].ctrl2_reg,
LP87565_BUCK_CTRL_2_SLEW_RATE,
reg << __ffs(LP87565_BUCK_CTRL_2_SLEW_RATE));
if (ret) {
dev_err(&rdev->dev, "SLEW RATE write failed: %d\n", ret);
return ret;
}
rdev->constraints->ramp_delay = lp87565_buck_ramp_delay[reg];
/* Conservatively give a 15% margin */
rdev->constraints->ramp_delay =
rdev->constraints->ramp_delay * 85 / 100;
return 0;
}
/* Operations permitted on BUCKs */
static const struct regulator_ops lp87565_buck_ops = {
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.list_voltage = regulator_list_voltage_linear_range,
.map_voltage = regulator_map_voltage_linear_range,
.set_voltage_time_sel = regulator_set_voltage_time_sel,
.set_ramp_delay = lp87565_buck_set_ramp_delay,
.set_current_limit = regulator_set_current_limit_regmap,
.get_current_limit = regulator_get_current_limit_regmap,
};
static const struct lp87565_regulator regulators[] = {
LP87565_REGULATOR("BUCK0", LP87565_BUCK_0, "buck0", lp87565_buck_ops,
256, LP87565_REG_BUCK0_VOUT, LP87565_BUCK_VSET,
LP87565_REG_BUCK0_CTRL_1,
LP87565_BUCK_CTRL_1_EN |
LP87565_BUCK_CTRL_1_EN_PIN_CTRL,
LP87565_BUCK_CTRL_1_EN, 3230,
buck0_1_2_3_ranges, LP87565_REG_BUCK0_CTRL_2),
LP87565_REGULATOR("BUCK1", LP87565_BUCK_1, "buck1", lp87565_buck_ops,
256, LP87565_REG_BUCK1_VOUT, LP87565_BUCK_VSET,
LP87565_REG_BUCK1_CTRL_1,
LP87565_BUCK_CTRL_1_EN |
LP87565_BUCK_CTRL_1_EN_PIN_CTRL,
LP87565_BUCK_CTRL_1_EN, 3230,
buck0_1_2_3_ranges, LP87565_REG_BUCK1_CTRL_2),
LP87565_REGULATOR("BUCK2", LP87565_BUCK_2, "buck2", lp87565_buck_ops,
256, LP87565_REG_BUCK2_VOUT, LP87565_BUCK_VSET,
LP87565_REG_BUCK2_CTRL_1,
LP87565_BUCK_CTRL_1_EN |
LP87565_BUCK_CTRL_1_EN_PIN_CTRL,
LP87565_BUCK_CTRL_1_EN, 3230,
buck0_1_2_3_ranges, LP87565_REG_BUCK2_CTRL_2),
LP87565_REGULATOR("BUCK3", LP87565_BUCK_3, "buck3", lp87565_buck_ops,
256, LP87565_REG_BUCK3_VOUT, LP87565_BUCK_VSET,
LP87565_REG_BUCK3_CTRL_1,
LP87565_BUCK_CTRL_1_EN |
LP87565_BUCK_CTRL_1_EN_PIN_CTRL,
LP87565_BUCK_CTRL_1_EN, 3230,
buck0_1_2_3_ranges, LP87565_REG_BUCK3_CTRL_2),
LP87565_REGULATOR("BUCK10", LP87565_BUCK_10, "buck10", lp87565_buck_ops,
256, LP87565_REG_BUCK0_VOUT, LP87565_BUCK_VSET,
LP87565_REG_BUCK0_CTRL_1,
LP87565_BUCK_CTRL_1_EN |
LP87565_BUCK_CTRL_1_EN_PIN_CTRL |
LP87565_BUCK_CTRL_1_FPWM_MP_0_2,
LP87565_BUCK_CTRL_1_EN |
LP87565_BUCK_CTRL_1_FPWM_MP_0_2, 3230,
buck0_1_2_3_ranges, LP87565_REG_BUCK0_CTRL_2),
LP87565_REGULATOR("BUCK23", LP87565_BUCK_23, "buck23", lp87565_buck_ops,
256, LP87565_REG_BUCK2_VOUT, LP87565_BUCK_VSET,
LP87565_REG_BUCK2_CTRL_1,
LP87565_BUCK_CTRL_1_EN |
LP87565_BUCK_CTRL_1_EN_PIN_CTRL,
LP87565_BUCK_CTRL_1_EN, 3230,
buck0_1_2_3_ranges, LP87565_REG_BUCK2_CTRL_2),
LP87565_REGULATOR("BUCK3210", LP87565_BUCK_3210, "buck3210",
lp87565_buck_ops, 256, LP87565_REG_BUCK0_VOUT,
LP87565_BUCK_VSET, LP87565_REG_BUCK0_CTRL_1,
LP87565_BUCK_CTRL_1_EN |
LP87565_BUCK_CTRL_1_EN_PIN_CTRL |
LP87565_BUCK_CTRL_1_FPWM_MP_0_2,
LP87565_BUCK_CTRL_1_EN |
LP87565_BUCK_CTRL_1_FPWM_MP_0_2, 3230,
buck0_1_2_3_ranges, LP87565_REG_BUCK0_CTRL_2),
};
static int lp87565_regulator_probe(struct platform_device *pdev)
{
struct lp87565 *lp87565 = dev_get_drvdata(pdev->dev.parent);
struct regulator_config config = { };
struct regulator_dev *rdev;
int i, min_idx, max_idx;
platform_set_drvdata(pdev, lp87565);
config.dev = &pdev->dev;
config.dev->of_node = lp87565->dev->of_node;
config.driver_data = lp87565;
config.regmap = lp87565->regmap;
switch (lp87565->dev_type) {
case LP87565_DEVICE_TYPE_LP87565_Q1:
min_idx = LP87565_BUCK_10;
max_idx = LP87565_BUCK_23;
break;
case LP87565_DEVICE_TYPE_LP87561_Q1:
min_idx = LP87565_BUCK_3210;
max_idx = LP87565_BUCK_3210;
break;
default:
min_idx = LP87565_BUCK_0;
max_idx = LP87565_BUCK_3;
break;
}
for (i = min_idx; i <= max_idx; i++) {
rdev = devm_regulator_register(&pdev->dev, ®ulators[i].desc,
&config);
if (IS_ERR(rdev)) {
dev_err(lp87565->dev, "failed to register %s regulator\n",
pdev->name);
return PTR_ERR(rdev);
}
}
return 0;
}
static const struct platform_device_id lp87565_regulator_id_table[] = {
{ "lp87565-regulator", },
{ "lp87565-q1-regulator", },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(platform, lp87565_regulator_id_table);
static struct platform_driver lp87565_regulator_driver = {
.driver = {
.name = "lp87565-pmic",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
.probe = lp87565_regulator_probe,
.id_table = lp87565_regulator_id_table,
};
module_platform_driver(lp87565_regulator_driver);
MODULE_AUTHOR("J Keerthy <[email protected]>");
MODULE_DESCRIPTION("LP87565 voltage regulator driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/regulator/lp87565-regulator.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2015 Texas Instruments Incorporated - https://www.ti.com/
*
* Author: Andrew F. Davis <[email protected]>
*
* Based on the TPS65912 driver
*/
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
#include <linux/mfd/tps65086.h>
enum tps65086_regulators { BUCK1, BUCK2, BUCK3, BUCK4, BUCK5, BUCK6, LDOA1,
LDOA2, LDOA3, VTT, SWA1, SWB1, SWB2 };
/* Selector for regulator configuration regarding PMIC chip ID. */
enum tps65086_ids {
TPS6508640 = 0,
TPS65086401,
TPS6508641,
TPS65086470,
};
#define TPS65086_REGULATOR(_name, _of, _id, _nv, _vr, _vm, _er, _em, _lr, _dr, _dm) \
[_id] = { \
.desc = { \
.name = _name, \
.of_match = of_match_ptr(_of), \
.regulators_node = "regulators", \
.of_parse_cb = tps65086_of_parse_cb, \
.id = _id, \
.ops = ®_ops, \
.n_voltages = _nv, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
.vsel_reg = _vr, \
.vsel_mask = _vm, \
.enable_reg = _er, \
.enable_mask = _em, \
.volt_table = NULL, \
.linear_ranges = _lr, \
.n_linear_ranges = ARRAY_SIZE(_lr), \
}, \
.decay_reg = _dr, \
.decay_mask = _dm, \
}
#define TPS65086_SWITCH(_name, _of, _id, _er, _em) \
[_id] = { \
.desc = { \
.name = _name, \
.of_match = of_match_ptr(_of), \
.regulators_node = "regulators", \
.of_parse_cb = tps65086_of_parse_cb, \
.id = _id, \
.ops = &switch_ops, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
.enable_reg = _er, \
.enable_mask = _em, \
}, \
}
#define TPS65086_REGULATOR_CONFIG(_chip_id, _config) \
[_chip_id] = { \
.config = _config, \
.num_elems = ARRAY_SIZE(_config), \
}
struct tps65086_regulator {
struct regulator_desc desc;
unsigned int decay_reg;
unsigned int decay_mask;
};
struct tps65086_regulator_config {
struct tps65086_regulator * const config;
const unsigned int num_elems;
};
static const struct linear_range tps65086_10mv_ranges[] = {
REGULATOR_LINEAR_RANGE(0, 0x0, 0x0, 0),
REGULATOR_LINEAR_RANGE(410000, 0x1, 0x7F, 10000),
};
static const struct linear_range tps65086_buck126_25mv_ranges[] = {
REGULATOR_LINEAR_RANGE(0, 0x0, 0x0, 0),
REGULATOR_LINEAR_RANGE(1000000, 0x1, 0x18, 0),
REGULATOR_LINEAR_RANGE(1025000, 0x19, 0x7F, 25000),
};
static const struct linear_range tps65086_buck345_25mv_ranges[] = {
REGULATOR_LINEAR_RANGE(0, 0x0, 0x0, 0),
REGULATOR_LINEAR_RANGE(425000, 0x1, 0x7F, 25000),
};
static const struct linear_range tps65086_ldoa1_ranges[] = {
REGULATOR_LINEAR_RANGE(1350000, 0x0, 0x0, 0),
REGULATOR_LINEAR_RANGE(1500000, 0x1, 0x7, 100000),
REGULATOR_LINEAR_RANGE(2300000, 0x8, 0xB, 100000),
REGULATOR_LINEAR_RANGE(2850000, 0xC, 0xD, 150000),
REGULATOR_LINEAR_RANGE(3300000, 0xE, 0xE, 0),
};
static const struct linear_range tps65086_ldoa23_ranges[] = {
REGULATOR_LINEAR_RANGE(700000, 0x0, 0xD, 50000),
REGULATOR_LINEAR_RANGE(1400000, 0xE, 0xF, 100000),
};
/* Operations permitted on regulators */
static const struct regulator_ops reg_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.map_voltage = regulator_map_voltage_linear_range,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.list_voltage = regulator_list_voltage_linear_range,
};
/* Operations permitted on load switches */
static const struct regulator_ops switch_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
};
static int tps65086_of_parse_cb(struct device_node *dev,
const struct regulator_desc *desc,
struct regulator_config *config);
static struct tps65086_regulator tps6508640_regulator_config[] = {
TPS65086_REGULATOR("BUCK1", "buck1", BUCK1, 0x80, TPS65086_BUCK1CTRL,
BUCK_VID_MASK, TPS65086_BUCK123CTRL, BIT(0),
tps65086_10mv_ranges, TPS65086_BUCK1CTRL,
BIT(0)),
TPS65086_REGULATOR("BUCK2", "buck2", BUCK2, 0x80, TPS65086_BUCK2CTRL,
BUCK_VID_MASK, TPS65086_BUCK123CTRL, BIT(1),
tps65086_10mv_ranges, TPS65086_BUCK2CTRL,
BIT(0)),
TPS65086_REGULATOR("BUCK3", "buck3", BUCK3, 0x80, TPS65086_BUCK3VID,
BUCK_VID_MASK, TPS65086_BUCK123CTRL, BIT(2),
tps65086_10mv_ranges, TPS65086_BUCK3DECAY,
BIT(0)),
TPS65086_REGULATOR("BUCK4", "buck4", BUCK4, 0x80, TPS65086_BUCK4VID,
BUCK_VID_MASK, TPS65086_BUCK4CTRL, BIT(0),
tps65086_10mv_ranges, TPS65086_BUCK4VID,
BIT(0)),
TPS65086_REGULATOR("BUCK5", "buck5", BUCK5, 0x80, TPS65086_BUCK5VID,
BUCK_VID_MASK, TPS65086_BUCK5CTRL, BIT(0),
tps65086_10mv_ranges, TPS65086_BUCK5CTRL,
BIT(0)),
TPS65086_REGULATOR("BUCK6", "buck6", BUCK6, 0x80, TPS65086_BUCK6VID,
BUCK_VID_MASK, TPS65086_BUCK6CTRL, BIT(0),
tps65086_10mv_ranges, TPS65086_BUCK6CTRL,
BIT(0)),
TPS65086_REGULATOR("LDOA1", "ldoa1", LDOA1, 0xF, TPS65086_LDOA1CTRL,
VDOA1_VID_MASK, TPS65086_SWVTT_EN, BIT(7),
tps65086_ldoa1_ranges, 0, 0),
TPS65086_REGULATOR("LDOA2", "ldoa2", LDOA2, 0x10, TPS65086_LDOA2VID,
VDOA23_VID_MASK, TPS65086_LDOA2CTRL, BIT(0),
tps65086_ldoa23_ranges, 0, 0),
TPS65086_REGULATOR("LDOA3", "ldoa3", LDOA3, 0x10, TPS65086_LDOA3VID,
VDOA23_VID_MASK, TPS65086_LDOA3CTRL, BIT(0),
tps65086_ldoa23_ranges, 0, 0),
TPS65086_SWITCH("VTT", "vtt", VTT, TPS65086_SWVTT_EN, BIT(4)),
TPS65086_SWITCH("SWA1", "swa1", SWA1, TPS65086_SWVTT_EN, BIT(5)),
TPS65086_SWITCH("SWB1", "swb1", SWB1, TPS65086_SWVTT_EN, BIT(6)),
TPS65086_SWITCH("SWB2", "swb2", SWB2, TPS65086_LDOA1CTRL, BIT(0)),
};
static struct tps65086_regulator tps65086401_regulator_config[] = {
TPS65086_REGULATOR("BUCK1", "buck1", BUCK1, 0x80, TPS65086_BUCK1CTRL,
BUCK_VID_MASK, TPS65086_BUCK123CTRL, BIT(0),
tps65086_10mv_ranges, TPS65086_BUCK1CTRL,
BIT(0)),
TPS65086_REGULATOR("BUCK2", "buck2", BUCK2, 0x80, TPS65086_BUCK2CTRL,
BUCK_VID_MASK, TPS65086_BUCK123CTRL, BIT(1),
tps65086_10mv_ranges, TPS65086_BUCK2CTRL,
BIT(0)),
TPS65086_REGULATOR("BUCK3", "buck3", BUCK3, 0x80, TPS65086_BUCK3VID,
BUCK_VID_MASK, TPS65086_BUCK123CTRL, BIT(2),
tps65086_10mv_ranges, TPS65086_BUCK3DECAY,
BIT(0)),
TPS65086_REGULATOR("BUCK4", "buck4", BUCK4, 0x80, TPS65086_BUCK4VID,
BUCK_VID_MASK, TPS65086_BUCK4CTRL, BIT(0),
tps65086_10mv_ranges, TPS65086_BUCK4VID,
BIT(0)),
TPS65086_REGULATOR("BUCK5", "buck5", BUCK5, 0x80, TPS65086_BUCK5VID,
BUCK_VID_MASK, TPS65086_BUCK5CTRL, BIT(0),
tps65086_10mv_ranges, TPS65086_BUCK5CTRL,
BIT(0)),
TPS65086_REGULATOR("BUCK6", "buck6", BUCK6, 0x80, TPS65086_BUCK6VID,
BUCK_VID_MASK, TPS65086_BUCK6CTRL, BIT(0),
tps65086_10mv_ranges, TPS65086_BUCK6CTRL,
BIT(0)),
TPS65086_REGULATOR("LDOA1", "ldoa1", LDOA1, 0xF, TPS65086_LDOA1CTRL,
VDOA1_VID_MASK, TPS65086_SWVTT_EN, BIT(7),
tps65086_ldoa1_ranges, 0, 0),
TPS65086_REGULATOR("LDOA2", "ldoa2", LDOA2, 0x10, TPS65086_LDOA2VID,
VDOA23_VID_MASK, TPS65086_LDOA2CTRL, BIT(0),
tps65086_ldoa23_ranges, 0, 0),
TPS65086_REGULATOR("LDOA3", "ldoa3", LDOA3, 0x10, TPS65086_LDOA3VID,
VDOA23_VID_MASK, TPS65086_LDOA3CTRL, BIT(0),
tps65086_ldoa23_ranges, 0, 0),
TPS65086_SWITCH("VTT", "vtt", VTT, TPS65086_SWVTT_EN, BIT(4)),
TPS65086_SWITCH("SWA1", "swa1", SWA1, TPS65086_SWVTT_EN, BIT(5)),
TPS65086_SWITCH("SWB1", "swb1", SWB1, TPS65086_SWVTT_EN, BIT(6)),
};
static struct tps65086_regulator tps6508641_regulator_config[] = {
TPS65086_REGULATOR("BUCK1", "buck1", BUCK1, 0x80, TPS65086_BUCK1CTRL,
BUCK_VID_MASK, TPS65086_BUCK123CTRL, BIT(0),
tps65086_10mv_ranges, TPS65086_BUCK1CTRL,
BIT(0)),
TPS65086_REGULATOR("BUCK2", "buck2", BUCK2, 0x80, TPS65086_BUCK2CTRL,
BUCK_VID_MASK, TPS65086_BUCK123CTRL, BIT(1),
tps65086_10mv_ranges, TPS65086_BUCK2CTRL,
BIT(0)),
TPS65086_REGULATOR("BUCK3", "buck3", BUCK3, 0x80, TPS65086_BUCK3VID,
BUCK_VID_MASK, TPS65086_BUCK123CTRL, BIT(2),
tps65086_10mv_ranges, TPS65086_BUCK3DECAY,
BIT(0)),
TPS65086_REGULATOR("BUCK4", "buck4", BUCK4, 0x80, TPS65086_BUCK4VID,
BUCK_VID_MASK, TPS65086_BUCK4CTRL, BIT(0),
tps65086_10mv_ranges, TPS65086_BUCK4VID,
BIT(0)),
TPS65086_REGULATOR("BUCK5", "buck5", BUCK5, 0x80, TPS65086_BUCK5VID,
BUCK_VID_MASK, TPS65086_BUCK5CTRL, BIT(0),
tps65086_10mv_ranges, TPS65086_BUCK5CTRL,
BIT(0)),
TPS65086_REGULATOR("BUCK6", "buck6", BUCK6, 0x80, TPS65086_BUCK6VID,
BUCK_VID_MASK, TPS65086_BUCK6CTRL, BIT(0),
tps65086_10mv_ranges, TPS65086_BUCK6CTRL,
BIT(0)),
TPS65086_REGULATOR("LDOA1", "ldoa1", LDOA1, 0xF, TPS65086_LDOA1CTRL,
VDOA1_VID_MASK, TPS65086_SWVTT_EN, BIT(7),
tps65086_ldoa1_ranges, 0, 0),
TPS65086_REGULATOR("LDOA2", "ldoa2", LDOA2, 0x10, TPS65086_LDOA2VID,
VDOA23_VID_MASK, TPS65086_LDOA2CTRL, BIT(0),
tps65086_ldoa23_ranges, 0, 0),
TPS65086_REGULATOR("LDOA3", "ldoa3", LDOA3, 0x10, TPS65086_LDOA3VID,
VDOA23_VID_MASK, TPS65086_LDOA3CTRL, BIT(0),
tps65086_ldoa23_ranges, 0, 0),
TPS65086_SWITCH("VTT", "vtt", VTT, TPS65086_SWVTT_EN, BIT(4)),
TPS65086_SWITCH("SWA1", "swa1", SWA1, TPS65086_SWVTT_EN, BIT(5)),
TPS65086_SWITCH("SWB1", "swb1", SWB1, TPS65086_SWVTT_EN, BIT(6)),
};
static struct tps65086_regulator tps65086470_regulator_config[] = {
TPS65086_REGULATOR("BUCK1", "buck1", BUCK1, 0x80, TPS65086_BUCK1CTRL,
BUCK_VID_MASK, TPS65086_BUCK123CTRL, BIT(0),
tps65086_10mv_ranges, TPS65086_BUCK1CTRL,
BIT(0)),
TPS65086_REGULATOR("BUCK2", "buck2", BUCK2, 0x80, TPS65086_BUCK2CTRL,
BUCK_VID_MASK, TPS65086_BUCK123CTRL, BIT(1),
tps65086_10mv_ranges, TPS65086_BUCK2CTRL,
BIT(0)),
TPS65086_REGULATOR("BUCK3", "buck3", BUCK3, 0x80, TPS65086_BUCK3VID,
BUCK_VID_MASK, TPS65086_BUCK123CTRL, BIT(2),
tps65086_10mv_ranges, TPS65086_BUCK3DECAY,
BIT(0)),
TPS65086_REGULATOR("BUCK4", "buck4", BUCK4, 0x80, TPS65086_BUCK4VID,
BUCK_VID_MASK, TPS65086_BUCK4CTRL, BIT(0),
tps65086_10mv_ranges, TPS65086_BUCK4VID,
BIT(0)),
TPS65086_REGULATOR("BUCK5", "buck5", BUCK5, 0x80, TPS65086_BUCK5VID,
BUCK_VID_MASK, TPS65086_BUCK5CTRL, BIT(0),
tps65086_10mv_ranges, TPS65086_BUCK5CTRL,
BIT(0)),
TPS65086_REGULATOR("BUCK6", "buck6", BUCK6, 0x80, TPS65086_BUCK6VID,
BUCK_VID_MASK, TPS65086_BUCK6CTRL, BIT(0),
tps65086_10mv_ranges, TPS65086_BUCK6CTRL,
BIT(0)),
TPS65086_REGULATOR("LDOA1", "ldoa1", LDOA1, 0xF, TPS65086_LDOA1CTRL,
VDOA1_VID_MASK, TPS65086_LDOA1CTRL, BIT(0),
tps65086_ldoa1_ranges, 0, 0),
TPS65086_REGULATOR("LDOA2", "ldoa2", LDOA2, 0x10, TPS65086_LDOA2VID,
VDOA23_VID_MASK, TPS65086_LDOA2CTRL, BIT(0),
tps65086_ldoa23_ranges, 0, 0),
TPS65086_REGULATOR("LDOA3", "ldoa3", LDOA3, 0x10, TPS65086_LDOA3VID,
VDOA23_VID_MASK, TPS65086_LDOA3CTRL, BIT(0),
tps65086_ldoa23_ranges, 0, 0),
TPS65086_SWITCH("VTT", "vtt", VTT, TPS65086_SWVTT_EN, BIT(4)),
TPS65086_SWITCH("SWA1", "swa1", SWA1, TPS65086_SWVTT_EN, BIT(5)),
TPS65086_SWITCH("SWB1", "swb1", SWB1, TPS65086_SWVTT_EN, BIT(6)),
TPS65086_SWITCH("SWB2", "swb2", SWB2, TPS65086_SWVTT_EN, BIT(7)),
};
static const struct tps65086_regulator_config regulator_configs[] = {
TPS65086_REGULATOR_CONFIG(TPS6508640, tps6508640_regulator_config),
TPS65086_REGULATOR_CONFIG(TPS65086401, tps65086401_regulator_config),
TPS65086_REGULATOR_CONFIG(TPS6508641, tps6508641_regulator_config),
TPS65086_REGULATOR_CONFIG(TPS65086470, tps65086470_regulator_config)
};
static int tps65086_of_parse_cb(struct device_node *node,
const struct regulator_desc *desc,
struct regulator_config *config)
{
struct tps65086 * const tps = dev_get_drvdata(config->dev);
struct tps65086_regulator *regulators = tps->reg_config->config;
int ret;
/* Check for 25mV step mode */
if (of_property_read_bool(node, "ti,regulator-step-size-25mv")) {
switch (desc->id) {
case BUCK1:
case BUCK2:
case BUCK6:
regulators[desc->id].desc.linear_ranges =
tps65086_buck126_25mv_ranges;
regulators[desc->id].desc.n_linear_ranges =
ARRAY_SIZE(tps65086_buck126_25mv_ranges);
break;
case BUCK3:
case BUCK4:
case BUCK5:
regulators[desc->id].desc.linear_ranges =
tps65086_buck345_25mv_ranges;
regulators[desc->id].desc.n_linear_ranges =
ARRAY_SIZE(tps65086_buck345_25mv_ranges);
break;
default:
dev_warn(config->dev, "25mV step mode only valid for BUCK regulators\n");
}
}
/* Check for decay mode */
if (desc->id <= BUCK6 && of_property_read_bool(node, "ti,regulator-decay")) {
ret = regmap_write_bits(config->regmap,
regulators[desc->id].decay_reg,
regulators[desc->id].decay_mask,
regulators[desc->id].decay_mask);
if (ret) {
dev_err(config->dev, "Error setting decay\n");
return ret;
}
}
return 0;
}
static int tps65086_regulator_probe(struct platform_device *pdev)
{
struct tps65086 *tps = dev_get_drvdata(pdev->dev.parent);
struct regulator_config config = { };
unsigned int selector_reg_config;
struct regulator_dev *rdev;
int i;
/* Select regulator configuration for used PMIC device */
switch (tps->chip_id) {
case TPS6508640_ID:
selector_reg_config = TPS6508640;
break;
case TPS65086401_ID:
selector_reg_config = TPS65086401;
break;
case TPS6508641_ID:
selector_reg_config = TPS6508641;
break;
case TPS65086470_ID:
selector_reg_config = TPS65086470;
break;
default:
dev_err(tps->dev, "Unknown device ID. Cannot determine regulator config.\n");
return -ENODEV;
}
tps->reg_config = ®ulator_configs[selector_reg_config];
platform_set_drvdata(pdev, tps);
config.dev = &pdev->dev;
config.dev->of_node = tps->dev->of_node;
config.driver_data = tps;
config.regmap = tps->regmap;
for (i = 0; i < tps->reg_config->num_elems; ++i) {
struct regulator_desc * const desc_ptr = &tps->reg_config->config[i].desc;
dev_dbg(tps->dev, "Index: %u; Regulator name: \"%s\"; Regulator ID: %d\n",
i, desc_ptr->name, desc_ptr->id);
rdev = devm_regulator_register(&pdev->dev, desc_ptr, &config);
if (IS_ERR(rdev)) {
dev_err(tps->dev, "failed to register %d \"%s\" regulator\n",
i, desc_ptr->name);
return PTR_ERR(rdev);
}
}
return 0;
}
static const struct platform_device_id tps65086_regulator_id_table[] = {
{ "tps65086-regulator", },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(platform, tps65086_regulator_id_table);
static struct platform_driver tps65086_regulator_driver = {
.driver = {
.name = "tps65086-regulator",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
.probe = tps65086_regulator_probe,
.id_table = tps65086_regulator_id_table,
};
module_platform_driver(tps65086_regulator_driver);
MODULE_AUTHOR("Andrew F. Davis <[email protected]>");
MODULE_DESCRIPTION("TPS65086 Regulator driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/regulator/tps65086-regulator.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Regulators driver for Maxim max8925
*
* Copyright (C) 2009 Marvell International Ltd.
* Haojian Zhuang <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/err.h>
#include <linux/i2c.h>
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
#include <linux/mfd/max8925.h>
#include <linux/of.h>
#include <linux/regulator/of_regulator.h>
#define SD1_DVM_VMIN 850000
#define SD1_DVM_VMAX 1000000
#define SD1_DVM_STEP 50000
#define SD1_DVM_SHIFT 5 /* SDCTL1 bit5 */
#define SD1_DVM_EN 6 /* SDV1 bit 6 */
/* bit definitions in LDO control registers */
#define LDO_SEQ_I2C 0x7 /* Power U/D by i2c */
#define LDO_SEQ_MASK 0x7 /* Power U/D sequence mask */
#define LDO_SEQ_SHIFT 2 /* Power U/D sequence offset */
#define LDO_I2C_EN 0x1 /* Enable by i2c */
#define LDO_I2C_EN_MASK 0x1 /* Enable mask by i2c */
#define LDO_I2C_EN_SHIFT 0 /* Enable offset by i2c */
struct max8925_regulator_info {
struct regulator_desc desc;
struct i2c_client *i2c;
int vol_reg;
int enable_reg;
};
static int max8925_set_voltage_sel(struct regulator_dev *rdev,
unsigned int selector)
{
struct max8925_regulator_info *info = rdev_get_drvdata(rdev);
unsigned char mask = rdev->desc->n_voltages - 1;
return max8925_set_bits(info->i2c, info->vol_reg, mask, selector);
}
static int max8925_get_voltage_sel(struct regulator_dev *rdev)
{
struct max8925_regulator_info *info = rdev_get_drvdata(rdev);
unsigned char data, mask;
int ret;
ret = max8925_reg_read(info->i2c, info->vol_reg);
if (ret < 0)
return ret;
mask = rdev->desc->n_voltages - 1;
data = ret & mask;
return data;
}
static int max8925_enable(struct regulator_dev *rdev)
{
struct max8925_regulator_info *info = rdev_get_drvdata(rdev);
return max8925_set_bits(info->i2c, info->enable_reg,
LDO_SEQ_MASK << LDO_SEQ_SHIFT |
LDO_I2C_EN_MASK << LDO_I2C_EN_SHIFT,
LDO_SEQ_I2C << LDO_SEQ_SHIFT |
LDO_I2C_EN << LDO_I2C_EN_SHIFT);
}
static int max8925_disable(struct regulator_dev *rdev)
{
struct max8925_regulator_info *info = rdev_get_drvdata(rdev);
return max8925_set_bits(info->i2c, info->enable_reg,
LDO_SEQ_MASK << LDO_SEQ_SHIFT |
LDO_I2C_EN_MASK << LDO_I2C_EN_SHIFT,
LDO_SEQ_I2C << LDO_SEQ_SHIFT);
}
static int max8925_is_enabled(struct regulator_dev *rdev)
{
struct max8925_regulator_info *info = rdev_get_drvdata(rdev);
int ldo_seq, ret;
ret = max8925_reg_read(info->i2c, info->enable_reg);
if (ret < 0)
return ret;
ldo_seq = (ret >> LDO_SEQ_SHIFT) & LDO_SEQ_MASK;
if (ldo_seq != LDO_SEQ_I2C)
return 1;
else
return ret & (LDO_I2C_EN_MASK << LDO_I2C_EN_SHIFT);
}
static int max8925_set_dvm_voltage(struct regulator_dev *rdev, int uV)
{
struct max8925_regulator_info *info = rdev_get_drvdata(rdev);
unsigned char data, mask;
if (uV < SD1_DVM_VMIN || uV > SD1_DVM_VMAX)
return -EINVAL;
data = DIV_ROUND_UP(uV - SD1_DVM_VMIN, SD1_DVM_STEP);
data <<= SD1_DVM_SHIFT;
mask = 3 << SD1_DVM_SHIFT;
return max8925_set_bits(info->i2c, info->enable_reg, mask, data);
}
static int max8925_set_dvm_enable(struct regulator_dev *rdev)
{
struct max8925_regulator_info *info = rdev_get_drvdata(rdev);
return max8925_set_bits(info->i2c, info->vol_reg, 1 << SD1_DVM_EN,
1 << SD1_DVM_EN);
}
static int max8925_set_dvm_disable(struct regulator_dev *rdev)
{
struct max8925_regulator_info *info = rdev_get_drvdata(rdev);
return max8925_set_bits(info->i2c, info->vol_reg, 1 << SD1_DVM_EN, 0);
}
static const struct regulator_ops max8925_regulator_sdv_ops = {
.map_voltage = regulator_map_voltage_linear,
.list_voltage = regulator_list_voltage_linear,
.set_voltage_sel = max8925_set_voltage_sel,
.get_voltage_sel = max8925_get_voltage_sel,
.enable = max8925_enable,
.disable = max8925_disable,
.is_enabled = max8925_is_enabled,
.set_suspend_voltage = max8925_set_dvm_voltage,
.set_suspend_enable = max8925_set_dvm_enable,
.set_suspend_disable = max8925_set_dvm_disable,
};
static const struct regulator_ops max8925_regulator_ldo_ops = {
.map_voltage = regulator_map_voltage_linear,
.list_voltage = regulator_list_voltage_linear,
.set_voltage_sel = max8925_set_voltage_sel,
.get_voltage_sel = max8925_get_voltage_sel,
.enable = max8925_enable,
.disable = max8925_disable,
.is_enabled = max8925_is_enabled,
};
#define MAX8925_SDV(_id, min, max, step) \
{ \
.desc = { \
.name = "SDV" #_id, \
.of_match = of_match_ptr("SDV" #_id), \
.regulators_node = of_match_ptr("regulators"), \
.ops = &max8925_regulator_sdv_ops, \
.type = REGULATOR_VOLTAGE, \
.id = MAX8925_ID_SD##_id, \
.owner = THIS_MODULE, \
.n_voltages = 64, \
.min_uV = min * 1000, \
.uV_step = step * 1000, \
}, \
.vol_reg = MAX8925_SDV##_id, \
.enable_reg = MAX8925_SDCTL##_id, \
}
#define MAX8925_LDO(_id, min, max, step) \
{ \
.desc = { \
.name = "LDO" #_id, \
.of_match = of_match_ptr("LDO" #_id), \
.regulators_node = of_match_ptr("regulators"), \
.ops = &max8925_regulator_ldo_ops, \
.type = REGULATOR_VOLTAGE, \
.id = MAX8925_ID_LDO##_id, \
.owner = THIS_MODULE, \
.n_voltages = 64, \
.min_uV = min * 1000, \
.uV_step = step * 1000, \
}, \
.vol_reg = MAX8925_LDOVOUT##_id, \
.enable_reg = MAX8925_LDOCTL##_id, \
}
static struct max8925_regulator_info max8925_regulator_info[] = {
MAX8925_SDV(1, 637.5, 1425, 12.5),
MAX8925_SDV(2, 650, 2225, 25),
MAX8925_SDV(3, 750, 3900, 50),
MAX8925_LDO(1, 750, 3900, 50),
MAX8925_LDO(2, 650, 2250, 25),
MAX8925_LDO(3, 650, 2250, 25),
MAX8925_LDO(4, 750, 3900, 50),
MAX8925_LDO(5, 750, 3900, 50),
MAX8925_LDO(6, 750, 3900, 50),
MAX8925_LDO(7, 750, 3900, 50),
MAX8925_LDO(8, 750, 3900, 50),
MAX8925_LDO(9, 750, 3900, 50),
MAX8925_LDO(10, 750, 3900, 50),
MAX8925_LDO(11, 750, 3900, 50),
MAX8925_LDO(12, 750, 3900, 50),
MAX8925_LDO(13, 750, 3900, 50),
MAX8925_LDO(14, 750, 3900, 50),
MAX8925_LDO(15, 750, 3900, 50),
MAX8925_LDO(16, 750, 3900, 50),
MAX8925_LDO(17, 650, 2250, 25),
MAX8925_LDO(18, 650, 2250, 25),
MAX8925_LDO(19, 750, 3900, 50),
MAX8925_LDO(20, 750, 3900, 50),
};
static int max8925_regulator_probe(struct platform_device *pdev)
{
struct max8925_chip *chip = dev_get_drvdata(pdev->dev.parent);
struct regulator_init_data *pdata = dev_get_platdata(&pdev->dev);
struct regulator_config config = { };
struct max8925_regulator_info *ri;
struct resource *res;
struct regulator_dev *rdev;
int i;
res = platform_get_resource(pdev, IORESOURCE_REG, 0);
if (!res) {
dev_err(&pdev->dev, "No REG resource!\n");
return -EINVAL;
}
for (i = 0; i < ARRAY_SIZE(max8925_regulator_info); i++) {
ri = &max8925_regulator_info[i];
if (ri->vol_reg == res->start)
break;
}
if (i == ARRAY_SIZE(max8925_regulator_info)) {
dev_err(&pdev->dev, "Failed to find regulator %llu\n",
(unsigned long long)res->start);
return -EINVAL;
}
ri->i2c = chip->i2c;
config.dev = chip->dev;
config.driver_data = ri;
if (pdata)
config.init_data = pdata;
rdev = devm_regulator_register(&pdev->dev, &ri->desc, &config);
if (IS_ERR(rdev)) {
dev_err(&pdev->dev, "failed to register regulator %s\n",
ri->desc.name);
return PTR_ERR(rdev);
}
platform_set_drvdata(pdev, rdev);
return 0;
}
static struct platform_driver max8925_regulator_driver = {
.driver = {
.name = "max8925-regulator",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
.probe = max8925_regulator_probe,
};
static int __init max8925_regulator_init(void)
{
return platform_driver_register(&max8925_regulator_driver);
}
subsys_initcall(max8925_regulator_init);
static void __exit max8925_regulator_exit(void)
{
platform_driver_unregister(&max8925_regulator_driver);
}
module_exit(max8925_regulator_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Haojian Zhuang <[email protected]>");
MODULE_DESCRIPTION("Regulator Driver for Maxim 8925 PMIC");
MODULE_ALIAS("platform:max8925-regulator");
| linux-master | drivers/regulator/max8925-regulator.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2020 ROHM Semiconductors
#include <linux/errno.h>
#include <linux/mfd/rohm-generic.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/regmap.h>
#include <linux/regulator/driver.h>
static int set_dvs_level(const struct regulator_desc *desc,
struct device_node *np, struct regmap *regmap,
char *prop, unsigned int reg, unsigned int mask,
unsigned int omask, unsigned int oreg)
{
int ret, i;
uint32_t uv;
ret = of_property_read_u32(np, prop, &uv);
if (ret) {
if (ret != -EINVAL)
return ret;
return 0;
}
/* If voltage is set to 0 => disable */
if (uv == 0) {
if (omask)
return regmap_update_bits(regmap, oreg, omask, 0);
}
/* Some setups don't allow setting own voltage but do allow enabling */
if (!mask) {
if (omask)
return regmap_update_bits(regmap, oreg, omask, omask);
return -EINVAL;
}
for (i = 0; i < desc->n_voltages; i++) {
/* NOTE to next hacker - Does not support pickable ranges */
if (desc->linear_range_selectors_bitfield)
return -EINVAL;
if (desc->n_linear_ranges)
ret = regulator_desc_list_voltage_linear_range(desc, i);
else
ret = regulator_desc_list_voltage_linear(desc, i);
if (ret < 0)
continue;
if (ret == uv) {
i <<= ffs(desc->vsel_mask) - 1;
ret = regmap_update_bits(regmap, reg, mask, i);
if (omask && !ret)
ret = regmap_update_bits(regmap, oreg, omask,
omask);
break;
}
}
return ret;
}
int rohm_regulator_set_dvs_levels(const struct rohm_dvs_config *dvs,
struct device_node *np,
const struct regulator_desc *desc,
struct regmap *regmap)
{
int i, ret = 0;
char *prop;
unsigned int reg, mask, omask, oreg = desc->enable_reg;
for (i = 0; i < ROHM_DVS_LEVEL_VALID_AMOUNT && !ret; i++) {
int bit;
bit = BIT(i);
if (dvs->level_map & bit) {
switch (bit) {
case ROHM_DVS_LEVEL_RUN:
prop = "rohm,dvs-run-voltage";
reg = dvs->run_reg;
mask = dvs->run_mask;
omask = dvs->run_on_mask;
break;
case ROHM_DVS_LEVEL_IDLE:
prop = "rohm,dvs-idle-voltage";
reg = dvs->idle_reg;
mask = dvs->idle_mask;
omask = dvs->idle_on_mask;
break;
case ROHM_DVS_LEVEL_SUSPEND:
prop = "rohm,dvs-suspend-voltage";
reg = dvs->suspend_reg;
mask = dvs->suspend_mask;
omask = dvs->suspend_on_mask;
break;
case ROHM_DVS_LEVEL_LPSR:
prop = "rohm,dvs-lpsr-voltage";
reg = dvs->lpsr_reg;
mask = dvs->lpsr_mask;
omask = dvs->lpsr_on_mask;
break;
case ROHM_DVS_LEVEL_SNVS:
prop = "rohm,dvs-snvs-voltage";
reg = dvs->snvs_reg;
mask = dvs->snvs_mask;
omask = dvs->snvs_on_mask;
break;
default:
return -EINVAL;
}
ret = set_dvs_level(desc, np, regmap, prop, reg, mask,
omask, oreg);
}
}
return ret;
}
EXPORT_SYMBOL(rohm_regulator_set_dvs_levels);
/*
* Few ROHM PMIC ICs have constrains on voltage changing:
* BD71837 - only buck 1-4 voltages can be changed when they are enabled.
* Other bucks and all LDOs must be disabled when voltage is changed.
* BD96801 - LDO voltage levels can be changed when LDOs are disabled.
*/
int rohm_regulator_set_voltage_sel_restricted(struct regulator_dev *rdev,
unsigned int sel)
{
if (rdev->desc->ops->is_enabled(rdev))
return -EBUSY;
return regulator_set_voltage_sel_regmap(rdev, sel);
}
EXPORT_SYMBOL_GPL(rohm_regulator_set_voltage_sel_restricted);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Matti Vaittinen <[email protected]>");
MODULE_DESCRIPTION("Generic helpers for ROHM PMIC regulator drivers");
| linux-master | drivers/regulator/rohm-regulator.c |
// SPDX-License-Identifier: GPL-2.0+
//
// wm831x-ldo.c -- LDO driver for the WM831x series
//
// Copyright 2009 Wolfson Microelectronics PLC.
//
// Author: Mark Brown <[email protected]>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/bitops.h>
#include <linux/err.h>
#include <linux/i2c.h>
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
#include <linux/slab.h>
#include <linux/mfd/wm831x/core.h>
#include <linux/mfd/wm831x/regulator.h>
#include <linux/mfd/wm831x/pdata.h>
#define WM831X_LDO_MAX_NAME 9
#define WM831X_LDO_CONTROL 0
#define WM831X_LDO_ON_CONTROL 1
#define WM831X_LDO_SLEEP_CONTROL 2
#define WM831X_ALIVE_LDO_ON_CONTROL 0
#define WM831X_ALIVE_LDO_SLEEP_CONTROL 1
struct wm831x_ldo {
char name[WM831X_LDO_MAX_NAME];
char supply_name[WM831X_LDO_MAX_NAME];
struct regulator_desc desc;
int base;
struct wm831x *wm831x;
struct regulator_dev *regulator;
};
/*
* Shared
*/
static irqreturn_t wm831x_ldo_uv_irq(int irq, void *data)
{
struct wm831x_ldo *ldo = data;
regulator_notifier_call_chain(ldo->regulator,
REGULATOR_EVENT_UNDER_VOLTAGE,
NULL);
return IRQ_HANDLED;
}
/*
* General purpose LDOs
*/
static const struct linear_range wm831x_gp_ldo_ranges[] = {
REGULATOR_LINEAR_RANGE(900000, 0, 14, 50000),
REGULATOR_LINEAR_RANGE(1700000, 15, 31, 100000),
};
static int wm831x_gp_ldo_set_suspend_voltage(struct regulator_dev *rdev,
int uV)
{
struct wm831x_ldo *ldo = rdev_get_drvdata(rdev);
struct wm831x *wm831x = ldo->wm831x;
int sel, reg = ldo->base + WM831X_LDO_SLEEP_CONTROL;
sel = regulator_map_voltage_linear_range(rdev, uV, uV);
if (sel < 0)
return sel;
return wm831x_set_bits(wm831x, reg, WM831X_LDO1_ON_VSEL_MASK, sel);
}
static unsigned int wm831x_gp_ldo_get_mode(struct regulator_dev *rdev)
{
struct wm831x_ldo *ldo = rdev_get_drvdata(rdev);
struct wm831x *wm831x = ldo->wm831x;
int ctrl_reg = ldo->base + WM831X_LDO_CONTROL;
int on_reg = ldo->base + WM831X_LDO_ON_CONTROL;
int ret;
ret = wm831x_reg_read(wm831x, on_reg);
if (ret < 0)
return ret;
if (!(ret & WM831X_LDO1_ON_MODE))
return REGULATOR_MODE_NORMAL;
ret = wm831x_reg_read(wm831x, ctrl_reg);
if (ret < 0)
return ret;
if (ret & WM831X_LDO1_LP_MODE)
return REGULATOR_MODE_STANDBY;
else
return REGULATOR_MODE_IDLE;
}
static int wm831x_gp_ldo_set_mode(struct regulator_dev *rdev,
unsigned int mode)
{
struct wm831x_ldo *ldo = rdev_get_drvdata(rdev);
struct wm831x *wm831x = ldo->wm831x;
int ctrl_reg = ldo->base + WM831X_LDO_CONTROL;
int on_reg = ldo->base + WM831X_LDO_ON_CONTROL;
int ret;
switch (mode) {
case REGULATOR_MODE_NORMAL:
ret = wm831x_set_bits(wm831x, on_reg,
WM831X_LDO1_ON_MODE, 0);
if (ret < 0)
return ret;
break;
case REGULATOR_MODE_IDLE:
ret = wm831x_set_bits(wm831x, ctrl_reg,
WM831X_LDO1_LP_MODE, 0);
if (ret < 0)
return ret;
ret = wm831x_set_bits(wm831x, on_reg,
WM831X_LDO1_ON_MODE,
WM831X_LDO1_ON_MODE);
if (ret < 0)
return ret;
break;
case REGULATOR_MODE_STANDBY:
ret = wm831x_set_bits(wm831x, ctrl_reg,
WM831X_LDO1_LP_MODE,
WM831X_LDO1_LP_MODE);
if (ret < 0)
return ret;
ret = wm831x_set_bits(wm831x, on_reg,
WM831X_LDO1_ON_MODE,
WM831X_LDO1_ON_MODE);
if (ret < 0)
return ret;
break;
default:
return -EINVAL;
}
return 0;
}
static int wm831x_gp_ldo_get_status(struct regulator_dev *rdev)
{
struct wm831x_ldo *ldo = rdev_get_drvdata(rdev);
struct wm831x *wm831x = ldo->wm831x;
int mask = 1 << rdev_get_id(rdev);
int ret;
/* Is the regulator on? */
ret = wm831x_reg_read(wm831x, WM831X_LDO_STATUS);
if (ret < 0)
return ret;
if (!(ret & mask))
return REGULATOR_STATUS_OFF;
/* Is it reporting under voltage? */
ret = wm831x_reg_read(wm831x, WM831X_LDO_UV_STATUS);
if (ret < 0)
return ret;
if (ret & mask)
return REGULATOR_STATUS_ERROR;
ret = wm831x_gp_ldo_get_mode(rdev);
if (ret < 0)
return ret;
else
return regulator_mode_to_status(ret);
}
static unsigned int wm831x_gp_ldo_get_optimum_mode(struct regulator_dev *rdev,
int input_uV,
int output_uV, int load_uA)
{
if (load_uA < 20000)
return REGULATOR_MODE_STANDBY;
if (load_uA < 50000)
return REGULATOR_MODE_IDLE;
return REGULATOR_MODE_NORMAL;
}
static const struct regulator_ops wm831x_gp_ldo_ops = {
.list_voltage = regulator_list_voltage_linear_range,
.map_voltage = regulator_map_voltage_linear_range,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.set_suspend_voltage = wm831x_gp_ldo_set_suspend_voltage,
.get_mode = wm831x_gp_ldo_get_mode,
.set_mode = wm831x_gp_ldo_set_mode,
.get_status = wm831x_gp_ldo_get_status,
.get_optimum_mode = wm831x_gp_ldo_get_optimum_mode,
.get_bypass = regulator_get_bypass_regmap,
.set_bypass = regulator_set_bypass_regmap,
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
};
static int wm831x_gp_ldo_probe(struct platform_device *pdev)
{
struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
struct wm831x_pdata *pdata = dev_get_platdata(wm831x->dev);
struct regulator_config config = { };
int id;
struct wm831x_ldo *ldo;
struct resource *res;
int ret, irq;
if (pdata && pdata->wm831x_num)
id = (pdata->wm831x_num * 10) + 1;
else
id = 0;
id = pdev->id - id;
dev_dbg(&pdev->dev, "Probing LDO%d\n", id + 1);
ldo = devm_kzalloc(&pdev->dev, sizeof(struct wm831x_ldo), GFP_KERNEL);
if (!ldo)
return -ENOMEM;
ldo->wm831x = wm831x;
res = platform_get_resource(pdev, IORESOURCE_REG, 0);
if (res == NULL) {
dev_err(&pdev->dev, "No REG resource\n");
ret = -EINVAL;
goto err;
}
ldo->base = res->start;
snprintf(ldo->name, sizeof(ldo->name), "LDO%d", id + 1);
ldo->desc.name = ldo->name;
snprintf(ldo->supply_name, sizeof(ldo->supply_name),
"LDO%dVDD", id + 1);
ldo->desc.supply_name = ldo->supply_name;
ldo->desc.id = id;
ldo->desc.type = REGULATOR_VOLTAGE;
ldo->desc.n_voltages = 32;
ldo->desc.ops = &wm831x_gp_ldo_ops;
ldo->desc.owner = THIS_MODULE;
ldo->desc.vsel_reg = ldo->base + WM831X_LDO_ON_CONTROL;
ldo->desc.vsel_mask = WM831X_LDO1_ON_VSEL_MASK;
ldo->desc.enable_reg = WM831X_LDO_ENABLE;
ldo->desc.enable_mask = 1 << id;
ldo->desc.bypass_reg = ldo->base;
ldo->desc.bypass_mask = WM831X_LDO1_SWI;
ldo->desc.linear_ranges = wm831x_gp_ldo_ranges;
ldo->desc.n_linear_ranges = ARRAY_SIZE(wm831x_gp_ldo_ranges);
config.dev = pdev->dev.parent;
if (pdata)
config.init_data = pdata->ldo[id];
config.driver_data = ldo;
config.regmap = wm831x->regmap;
ldo->regulator = devm_regulator_register(&pdev->dev, &ldo->desc,
&config);
if (IS_ERR(ldo->regulator)) {
ret = PTR_ERR(ldo->regulator);
dev_err(wm831x->dev, "Failed to register LDO%d: %d\n",
id + 1, ret);
goto err;
}
irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "UV"));
ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
wm831x_ldo_uv_irq,
IRQF_TRIGGER_RISING | IRQF_ONESHOT,
ldo->name,
ldo);
if (ret != 0) {
dev_err(&pdev->dev, "Failed to request UV IRQ %d: %d\n",
irq, ret);
goto err;
}
platform_set_drvdata(pdev, ldo);
return 0;
err:
return ret;
}
static struct platform_driver wm831x_gp_ldo_driver = {
.probe = wm831x_gp_ldo_probe,
.driver = {
.name = "wm831x-ldo",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
};
/*
* Analogue LDOs
*/
static const struct linear_range wm831x_aldo_ranges[] = {
REGULATOR_LINEAR_RANGE(1000000, 0, 12, 50000),
REGULATOR_LINEAR_RANGE(1700000, 13, 31, 100000),
};
static int wm831x_aldo_set_suspend_voltage(struct regulator_dev *rdev,
int uV)
{
struct wm831x_ldo *ldo = rdev_get_drvdata(rdev);
struct wm831x *wm831x = ldo->wm831x;
int sel, reg = ldo->base + WM831X_LDO_SLEEP_CONTROL;
sel = regulator_map_voltage_linear_range(rdev, uV, uV);
if (sel < 0)
return sel;
return wm831x_set_bits(wm831x, reg, WM831X_LDO7_ON_VSEL_MASK, sel);
}
static unsigned int wm831x_aldo_get_mode(struct regulator_dev *rdev)
{
struct wm831x_ldo *ldo = rdev_get_drvdata(rdev);
struct wm831x *wm831x = ldo->wm831x;
int on_reg = ldo->base + WM831X_LDO_ON_CONTROL;
int ret;
ret = wm831x_reg_read(wm831x, on_reg);
if (ret < 0)
return 0;
if (ret & WM831X_LDO7_ON_MODE)
return REGULATOR_MODE_IDLE;
else
return REGULATOR_MODE_NORMAL;
}
static int wm831x_aldo_set_mode(struct regulator_dev *rdev,
unsigned int mode)
{
struct wm831x_ldo *ldo = rdev_get_drvdata(rdev);
struct wm831x *wm831x = ldo->wm831x;
int on_reg = ldo->base + WM831X_LDO_ON_CONTROL;
int ret;
switch (mode) {
case REGULATOR_MODE_NORMAL:
ret = wm831x_set_bits(wm831x, on_reg, WM831X_LDO7_ON_MODE, 0);
if (ret < 0)
return ret;
break;
case REGULATOR_MODE_IDLE:
ret = wm831x_set_bits(wm831x, on_reg, WM831X_LDO7_ON_MODE,
WM831X_LDO7_ON_MODE);
if (ret < 0)
return ret;
break;
default:
return -EINVAL;
}
return 0;
}
static int wm831x_aldo_get_status(struct regulator_dev *rdev)
{
struct wm831x_ldo *ldo = rdev_get_drvdata(rdev);
struct wm831x *wm831x = ldo->wm831x;
int mask = 1 << rdev_get_id(rdev);
int ret;
/* Is the regulator on? */
ret = wm831x_reg_read(wm831x, WM831X_LDO_STATUS);
if (ret < 0)
return ret;
if (!(ret & mask))
return REGULATOR_STATUS_OFF;
/* Is it reporting under voltage? */
ret = wm831x_reg_read(wm831x, WM831X_LDO_UV_STATUS);
if (ret < 0)
return ret;
if (ret & mask)
return REGULATOR_STATUS_ERROR;
ret = wm831x_aldo_get_mode(rdev);
if (ret < 0)
return ret;
else
return regulator_mode_to_status(ret);
}
static const struct regulator_ops wm831x_aldo_ops = {
.list_voltage = regulator_list_voltage_linear_range,
.map_voltage = regulator_map_voltage_linear_range,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.set_suspend_voltage = wm831x_aldo_set_suspend_voltage,
.get_mode = wm831x_aldo_get_mode,
.set_mode = wm831x_aldo_set_mode,
.get_status = wm831x_aldo_get_status,
.set_bypass = regulator_set_bypass_regmap,
.get_bypass = regulator_get_bypass_regmap,
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
};
static int wm831x_aldo_probe(struct platform_device *pdev)
{
struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
struct wm831x_pdata *pdata = dev_get_platdata(wm831x->dev);
struct regulator_config config = { };
int id;
struct wm831x_ldo *ldo;
struct resource *res;
int ret, irq;
if (pdata && pdata->wm831x_num)
id = (pdata->wm831x_num * 10) + 1;
else
id = 0;
id = pdev->id - id;
dev_dbg(&pdev->dev, "Probing LDO%d\n", id + 1);
ldo = devm_kzalloc(&pdev->dev, sizeof(struct wm831x_ldo), GFP_KERNEL);
if (!ldo)
return -ENOMEM;
ldo->wm831x = wm831x;
res = platform_get_resource(pdev, IORESOURCE_REG, 0);
if (res == NULL) {
dev_err(&pdev->dev, "No REG resource\n");
ret = -EINVAL;
goto err;
}
ldo->base = res->start;
snprintf(ldo->name, sizeof(ldo->name), "LDO%d", id + 1);
ldo->desc.name = ldo->name;
snprintf(ldo->supply_name, sizeof(ldo->supply_name),
"LDO%dVDD", id + 1);
ldo->desc.supply_name = ldo->supply_name;
ldo->desc.id = id;
ldo->desc.type = REGULATOR_VOLTAGE;
ldo->desc.n_voltages = 32;
ldo->desc.linear_ranges = wm831x_aldo_ranges;
ldo->desc.n_linear_ranges = ARRAY_SIZE(wm831x_aldo_ranges);
ldo->desc.ops = &wm831x_aldo_ops;
ldo->desc.owner = THIS_MODULE;
ldo->desc.vsel_reg = ldo->base + WM831X_LDO_ON_CONTROL;
ldo->desc.vsel_mask = WM831X_LDO7_ON_VSEL_MASK;
ldo->desc.enable_reg = WM831X_LDO_ENABLE;
ldo->desc.enable_mask = 1 << id;
ldo->desc.bypass_reg = ldo->base;
ldo->desc.bypass_mask = WM831X_LDO7_SWI;
config.dev = pdev->dev.parent;
if (pdata)
config.init_data = pdata->ldo[id];
config.driver_data = ldo;
config.regmap = wm831x->regmap;
ldo->regulator = devm_regulator_register(&pdev->dev, &ldo->desc,
&config);
if (IS_ERR(ldo->regulator)) {
ret = PTR_ERR(ldo->regulator);
dev_err(wm831x->dev, "Failed to register LDO%d: %d\n",
id + 1, ret);
goto err;
}
irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "UV"));
ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
wm831x_ldo_uv_irq,
IRQF_TRIGGER_RISING | IRQF_ONESHOT,
ldo->name, ldo);
if (ret != 0) {
dev_err(&pdev->dev, "Failed to request UV IRQ %d: %d\n",
irq, ret);
goto err;
}
platform_set_drvdata(pdev, ldo);
return 0;
err:
return ret;
}
static struct platform_driver wm831x_aldo_driver = {
.probe = wm831x_aldo_probe,
.driver = {
.name = "wm831x-aldo",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
};
/*
* Alive LDO
*/
#define WM831X_ALIVE_LDO_MAX_SELECTOR 0xf
static int wm831x_alive_ldo_set_suspend_voltage(struct regulator_dev *rdev,
int uV)
{
struct wm831x_ldo *ldo = rdev_get_drvdata(rdev);
struct wm831x *wm831x = ldo->wm831x;
int sel, reg = ldo->base + WM831X_ALIVE_LDO_SLEEP_CONTROL;
sel = regulator_map_voltage_linear(rdev, uV, uV);
if (sel < 0)
return sel;
return wm831x_set_bits(wm831x, reg, WM831X_LDO11_ON_VSEL_MASK, sel);
}
static int wm831x_alive_ldo_get_status(struct regulator_dev *rdev)
{
struct wm831x_ldo *ldo = rdev_get_drvdata(rdev);
struct wm831x *wm831x = ldo->wm831x;
int mask = 1 << rdev_get_id(rdev);
int ret;
/* Is the regulator on? */
ret = wm831x_reg_read(wm831x, WM831X_LDO_STATUS);
if (ret < 0)
return ret;
if (ret & mask)
return REGULATOR_STATUS_ON;
else
return REGULATOR_STATUS_OFF;
}
static const struct regulator_ops wm831x_alive_ldo_ops = {
.list_voltage = regulator_list_voltage_linear,
.map_voltage = regulator_map_voltage_linear,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.set_suspend_voltage = wm831x_alive_ldo_set_suspend_voltage,
.get_status = wm831x_alive_ldo_get_status,
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
};
static int wm831x_alive_ldo_probe(struct platform_device *pdev)
{
struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
struct wm831x_pdata *pdata = dev_get_platdata(wm831x->dev);
struct regulator_config config = { };
int id;
struct wm831x_ldo *ldo;
struct resource *res;
int ret;
if (pdata && pdata->wm831x_num)
id = (pdata->wm831x_num * 10) + 1;
else
id = 0;
id = pdev->id - id;
dev_dbg(&pdev->dev, "Probing LDO%d\n", id + 1);
ldo = devm_kzalloc(&pdev->dev, sizeof(struct wm831x_ldo), GFP_KERNEL);
if (!ldo)
return -ENOMEM;
ldo->wm831x = wm831x;
res = platform_get_resource(pdev, IORESOURCE_REG, 0);
if (res == NULL) {
dev_err(&pdev->dev, "No REG resource\n");
ret = -EINVAL;
goto err;
}
ldo->base = res->start;
snprintf(ldo->name, sizeof(ldo->name), "LDO%d", id + 1);
ldo->desc.name = ldo->name;
snprintf(ldo->supply_name, sizeof(ldo->supply_name),
"LDO%dVDD", id + 1);
ldo->desc.supply_name = ldo->supply_name;
ldo->desc.id = id;
ldo->desc.type = REGULATOR_VOLTAGE;
ldo->desc.n_voltages = WM831X_ALIVE_LDO_MAX_SELECTOR + 1;
ldo->desc.ops = &wm831x_alive_ldo_ops;
ldo->desc.owner = THIS_MODULE;
ldo->desc.vsel_reg = ldo->base + WM831X_ALIVE_LDO_ON_CONTROL;
ldo->desc.vsel_mask = WM831X_LDO11_ON_VSEL_MASK;
ldo->desc.enable_reg = WM831X_LDO_ENABLE;
ldo->desc.enable_mask = 1 << id;
ldo->desc.min_uV = 800000;
ldo->desc.uV_step = 50000;
ldo->desc.enable_time = 1000;
config.dev = pdev->dev.parent;
if (pdata)
config.init_data = pdata->ldo[id];
config.driver_data = ldo;
config.regmap = wm831x->regmap;
ldo->regulator = devm_regulator_register(&pdev->dev, &ldo->desc,
&config);
if (IS_ERR(ldo->regulator)) {
ret = PTR_ERR(ldo->regulator);
dev_err(wm831x->dev, "Failed to register LDO%d: %d\n",
id + 1, ret);
goto err;
}
platform_set_drvdata(pdev, ldo);
return 0;
err:
return ret;
}
static struct platform_driver wm831x_alive_ldo_driver = {
.probe = wm831x_alive_ldo_probe,
.driver = {
.name = "wm831x-alive-ldo",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
};
static struct platform_driver * const drivers[] = {
&wm831x_gp_ldo_driver,
&wm831x_aldo_driver,
&wm831x_alive_ldo_driver,
};
static int __init wm831x_ldo_init(void)
{
return platform_register_drivers(drivers, ARRAY_SIZE(drivers));
}
subsys_initcall(wm831x_ldo_init);
static void __exit wm831x_ldo_exit(void)
{
platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
}
module_exit(wm831x_ldo_exit);
/* Module information */
MODULE_AUTHOR("Mark Brown <[email protected]>");
MODULE_DESCRIPTION("WM831x LDO driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:wm831x-ldo");
MODULE_ALIAS("platform:wm831x-aldo");
MODULE_ALIAS("platform:wm831x-aliveldo");
| linux-master | drivers/regulator/wm831x-ldo.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2020 NXP.
* NXP PCA9450 pmic driver
*/
#include <linux/err.h>
#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
#include <linux/regulator/of_regulator.h>
#include <linux/regulator/pca9450.h>
struct pc9450_dvs_config {
unsigned int run_reg; /* dvs0 */
unsigned int run_mask;
unsigned int standby_reg; /* dvs1 */
unsigned int standby_mask;
};
struct pca9450_regulator_desc {
struct regulator_desc desc;
const struct pc9450_dvs_config dvs;
};
struct pca9450 {
struct device *dev;
struct regmap *regmap;
struct gpio_desc *sd_vsel_gpio;
enum pca9450_chip_type type;
unsigned int rcnt;
int irq;
};
static const struct regmap_range pca9450_status_range = {
.range_min = PCA9450_REG_INT1,
.range_max = PCA9450_REG_PWRON_STAT,
};
static const struct regmap_access_table pca9450_volatile_regs = {
.yes_ranges = &pca9450_status_range,
.n_yes_ranges = 1,
};
static const struct regmap_config pca9450_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.volatile_table = &pca9450_volatile_regs,
.max_register = PCA9450_MAX_REGISTER - 1,
.cache_type = REGCACHE_RBTREE,
};
/*
* BUCK1/2/3
* BUCK1RAM[1:0] BUCK1 DVS ramp rate setting
* 00: 25mV/1usec
* 01: 25mV/2usec
* 10: 25mV/4usec
* 11: 25mV/8usec
*/
static const unsigned int pca9450_dvs_buck_ramp_table[] = {
25000, 12500, 6250, 3125
};
static const struct regulator_ops pca9450_dvs_buck_regulator_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
.list_voltage = regulator_list_voltage_linear_range,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_time_sel = regulator_set_voltage_time_sel,
.set_ramp_delay = regulator_set_ramp_delay_regmap,
};
static const struct regulator_ops pca9450_buck_regulator_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
.list_voltage = regulator_list_voltage_linear_range,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_time_sel = regulator_set_voltage_time_sel,
};
static const struct regulator_ops pca9450_ldo_regulator_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
.list_voltage = regulator_list_voltage_linear_range,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
};
/*
* BUCK1/2/3
* 0.60 to 2.1875V (12.5mV step)
*/
static const struct linear_range pca9450_dvs_buck_volts[] = {
REGULATOR_LINEAR_RANGE(600000, 0x00, 0x7F, 12500),
};
/*
* BUCK4/5/6
* 0.6V to 3.4V (25mV step)
*/
static const struct linear_range pca9450_buck_volts[] = {
REGULATOR_LINEAR_RANGE(600000, 0x00, 0x70, 25000),
REGULATOR_LINEAR_RANGE(3400000, 0x71, 0x7F, 0),
};
/*
* LDO1
* 1.6 to 3.3V ()
*/
static const struct linear_range pca9450_ldo1_volts[] = {
REGULATOR_LINEAR_RANGE(1600000, 0x00, 0x03, 100000),
REGULATOR_LINEAR_RANGE(3000000, 0x04, 0x07, 100000),
};
/*
* LDO2
* 0.8 to 1.15V (50mV step)
*/
static const struct linear_range pca9450_ldo2_volts[] = {
REGULATOR_LINEAR_RANGE(800000, 0x00, 0x07, 50000),
};
/*
* LDO3/4
* 0.8 to 3.3V (100mV step)
*/
static const struct linear_range pca9450_ldo34_volts[] = {
REGULATOR_LINEAR_RANGE(800000, 0x00, 0x19, 100000),
REGULATOR_LINEAR_RANGE(3300000, 0x1A, 0x1F, 0),
};
/*
* LDO5
* 1.8 to 3.3V (100mV step)
*/
static const struct linear_range pca9450_ldo5_volts[] = {
REGULATOR_LINEAR_RANGE(1800000, 0x00, 0x0F, 100000),
};
static int buck_set_dvs(const struct regulator_desc *desc,
struct device_node *np, struct regmap *regmap,
char *prop, unsigned int reg, unsigned int mask)
{
int ret, i;
uint32_t uv;
ret = of_property_read_u32(np, prop, &uv);
if (ret == -EINVAL)
return 0;
else if (ret)
return ret;
for (i = 0; i < desc->n_voltages; i++) {
ret = regulator_desc_list_voltage_linear_range(desc, i);
if (ret < 0)
continue;
if (ret == uv) {
i <<= ffs(desc->vsel_mask) - 1;
ret = regmap_update_bits(regmap, reg, mask, i);
break;
}
}
if (ret == 0) {
struct pca9450_regulator_desc *regulator = container_of(desc,
struct pca9450_regulator_desc, desc);
/* Enable DVS control through PMIC_STBY_REQ for this BUCK */
ret = regmap_update_bits(regmap, regulator->desc.enable_reg,
BUCK1_DVS_CTRL, BUCK1_DVS_CTRL);
}
return ret;
}
static int pca9450_set_dvs_levels(struct device_node *np,
const struct regulator_desc *desc,
struct regulator_config *cfg)
{
struct pca9450_regulator_desc *data = container_of(desc,
struct pca9450_regulator_desc, desc);
const struct pc9450_dvs_config *dvs = &data->dvs;
unsigned int reg, mask;
char *prop;
int i, ret = 0;
for (i = 0; i < PCA9450_DVS_LEVEL_MAX; i++) {
switch (i) {
case PCA9450_DVS_LEVEL_RUN:
prop = "nxp,dvs-run-voltage";
reg = dvs->run_reg;
mask = dvs->run_mask;
break;
case PCA9450_DVS_LEVEL_STANDBY:
prop = "nxp,dvs-standby-voltage";
reg = dvs->standby_reg;
mask = dvs->standby_mask;
break;
default:
return -EINVAL;
}
ret = buck_set_dvs(desc, np, cfg->regmap, prop, reg, mask);
if (ret)
break;
}
return ret;
}
static const struct pca9450_regulator_desc pca9450a_regulators[] = {
{
.desc = {
.name = "buck1",
.of_match = of_match_ptr("BUCK1"),
.regulators_node = of_match_ptr("regulators"),
.id = PCA9450_BUCK1,
.ops = &pca9450_dvs_buck_regulator_ops,
.type = REGULATOR_VOLTAGE,
.n_voltages = PCA9450_BUCK1_VOLTAGE_NUM,
.linear_ranges = pca9450_dvs_buck_volts,
.n_linear_ranges = ARRAY_SIZE(pca9450_dvs_buck_volts),
.vsel_reg = PCA9450_REG_BUCK1OUT_DVS0,
.vsel_mask = BUCK1OUT_DVS0_MASK,
.enable_reg = PCA9450_REG_BUCK1CTRL,
.enable_mask = BUCK1_ENMODE_MASK,
.ramp_reg = PCA9450_REG_BUCK1CTRL,
.ramp_mask = BUCK1_RAMP_MASK,
.ramp_delay_table = pca9450_dvs_buck_ramp_table,
.n_ramp_values = ARRAY_SIZE(pca9450_dvs_buck_ramp_table),
.owner = THIS_MODULE,
.of_parse_cb = pca9450_set_dvs_levels,
},
.dvs = {
.run_reg = PCA9450_REG_BUCK1OUT_DVS0,
.run_mask = BUCK1OUT_DVS0_MASK,
.standby_reg = PCA9450_REG_BUCK1OUT_DVS1,
.standby_mask = BUCK1OUT_DVS1_MASK,
},
},
{
.desc = {
.name = "buck2",
.of_match = of_match_ptr("BUCK2"),
.regulators_node = of_match_ptr("regulators"),
.id = PCA9450_BUCK2,
.ops = &pca9450_dvs_buck_regulator_ops,
.type = REGULATOR_VOLTAGE,
.n_voltages = PCA9450_BUCK2_VOLTAGE_NUM,
.linear_ranges = pca9450_dvs_buck_volts,
.n_linear_ranges = ARRAY_SIZE(pca9450_dvs_buck_volts),
.vsel_reg = PCA9450_REG_BUCK2OUT_DVS0,
.vsel_mask = BUCK2OUT_DVS0_MASK,
.enable_reg = PCA9450_REG_BUCK2CTRL,
.enable_mask = BUCK2_ENMODE_MASK,
.ramp_reg = PCA9450_REG_BUCK2CTRL,
.ramp_mask = BUCK2_RAMP_MASK,
.ramp_delay_table = pca9450_dvs_buck_ramp_table,
.n_ramp_values = ARRAY_SIZE(pca9450_dvs_buck_ramp_table),
.owner = THIS_MODULE,
.of_parse_cb = pca9450_set_dvs_levels,
},
.dvs = {
.run_reg = PCA9450_REG_BUCK2OUT_DVS0,
.run_mask = BUCK2OUT_DVS0_MASK,
.standby_reg = PCA9450_REG_BUCK2OUT_DVS1,
.standby_mask = BUCK2OUT_DVS1_MASK,
},
},
{
.desc = {
.name = "buck3",
.of_match = of_match_ptr("BUCK3"),
.regulators_node = of_match_ptr("regulators"),
.id = PCA9450_BUCK3,
.ops = &pca9450_dvs_buck_regulator_ops,
.type = REGULATOR_VOLTAGE,
.n_voltages = PCA9450_BUCK3_VOLTAGE_NUM,
.linear_ranges = pca9450_dvs_buck_volts,
.n_linear_ranges = ARRAY_SIZE(pca9450_dvs_buck_volts),
.vsel_reg = PCA9450_REG_BUCK3OUT_DVS0,
.vsel_mask = BUCK3OUT_DVS0_MASK,
.enable_reg = PCA9450_REG_BUCK3CTRL,
.enable_mask = BUCK3_ENMODE_MASK,
.ramp_reg = PCA9450_REG_BUCK3CTRL,
.ramp_mask = BUCK3_RAMP_MASK,
.ramp_delay_table = pca9450_dvs_buck_ramp_table,
.n_ramp_values = ARRAY_SIZE(pca9450_dvs_buck_ramp_table),
.owner = THIS_MODULE,
.of_parse_cb = pca9450_set_dvs_levels,
},
.dvs = {
.run_reg = PCA9450_REG_BUCK3OUT_DVS0,
.run_mask = BUCK3OUT_DVS0_MASK,
.standby_reg = PCA9450_REG_BUCK3OUT_DVS1,
.standby_mask = BUCK3OUT_DVS1_MASK,
},
},
{
.desc = {
.name = "buck4",
.of_match = of_match_ptr("BUCK4"),
.regulators_node = of_match_ptr("regulators"),
.id = PCA9450_BUCK4,
.ops = &pca9450_buck_regulator_ops,
.type = REGULATOR_VOLTAGE,
.n_voltages = PCA9450_BUCK4_VOLTAGE_NUM,
.linear_ranges = pca9450_buck_volts,
.n_linear_ranges = ARRAY_SIZE(pca9450_buck_volts),
.vsel_reg = PCA9450_REG_BUCK4OUT,
.vsel_mask = BUCK4OUT_MASK,
.enable_reg = PCA9450_REG_BUCK4CTRL,
.enable_mask = BUCK4_ENMODE_MASK,
.owner = THIS_MODULE,
},
},
{
.desc = {
.name = "buck5",
.of_match = of_match_ptr("BUCK5"),
.regulators_node = of_match_ptr("regulators"),
.id = PCA9450_BUCK5,
.ops = &pca9450_buck_regulator_ops,
.type = REGULATOR_VOLTAGE,
.n_voltages = PCA9450_BUCK5_VOLTAGE_NUM,
.linear_ranges = pca9450_buck_volts,
.n_linear_ranges = ARRAY_SIZE(pca9450_buck_volts),
.vsel_reg = PCA9450_REG_BUCK5OUT,
.vsel_mask = BUCK5OUT_MASK,
.enable_reg = PCA9450_REG_BUCK5CTRL,
.enable_mask = BUCK5_ENMODE_MASK,
.owner = THIS_MODULE,
},
},
{
.desc = {
.name = "buck6",
.of_match = of_match_ptr("BUCK6"),
.regulators_node = of_match_ptr("regulators"),
.id = PCA9450_BUCK6,
.ops = &pca9450_buck_regulator_ops,
.type = REGULATOR_VOLTAGE,
.n_voltages = PCA9450_BUCK6_VOLTAGE_NUM,
.linear_ranges = pca9450_buck_volts,
.n_linear_ranges = ARRAY_SIZE(pca9450_buck_volts),
.vsel_reg = PCA9450_REG_BUCK6OUT,
.vsel_mask = BUCK6OUT_MASK,
.enable_reg = PCA9450_REG_BUCK6CTRL,
.enable_mask = BUCK6_ENMODE_MASK,
.owner = THIS_MODULE,
},
},
{
.desc = {
.name = "ldo1",
.of_match = of_match_ptr("LDO1"),
.regulators_node = of_match_ptr("regulators"),
.id = PCA9450_LDO1,
.ops = &pca9450_ldo_regulator_ops,
.type = REGULATOR_VOLTAGE,
.n_voltages = PCA9450_LDO1_VOLTAGE_NUM,
.linear_ranges = pca9450_ldo1_volts,
.n_linear_ranges = ARRAY_SIZE(pca9450_ldo1_volts),
.vsel_reg = PCA9450_REG_LDO1CTRL,
.vsel_mask = LDO1OUT_MASK,
.enable_reg = PCA9450_REG_LDO1CTRL,
.enable_mask = LDO1_EN_MASK,
.owner = THIS_MODULE,
},
},
{
.desc = {
.name = "ldo2",
.of_match = of_match_ptr("LDO2"),
.regulators_node = of_match_ptr("regulators"),
.id = PCA9450_LDO2,
.ops = &pca9450_ldo_regulator_ops,
.type = REGULATOR_VOLTAGE,
.n_voltages = PCA9450_LDO2_VOLTAGE_NUM,
.linear_ranges = pca9450_ldo2_volts,
.n_linear_ranges = ARRAY_SIZE(pca9450_ldo2_volts),
.vsel_reg = PCA9450_REG_LDO2CTRL,
.vsel_mask = LDO2OUT_MASK,
.enable_reg = PCA9450_REG_LDO2CTRL,
.enable_mask = LDO2_EN_MASK,
.owner = THIS_MODULE,
},
},
{
.desc = {
.name = "ldo3",
.of_match = of_match_ptr("LDO3"),
.regulators_node = of_match_ptr("regulators"),
.id = PCA9450_LDO3,
.ops = &pca9450_ldo_regulator_ops,
.type = REGULATOR_VOLTAGE,
.n_voltages = PCA9450_LDO3_VOLTAGE_NUM,
.linear_ranges = pca9450_ldo34_volts,
.n_linear_ranges = ARRAY_SIZE(pca9450_ldo34_volts),
.vsel_reg = PCA9450_REG_LDO3CTRL,
.vsel_mask = LDO3OUT_MASK,
.enable_reg = PCA9450_REG_LDO3CTRL,
.enable_mask = LDO3_EN_MASK,
.owner = THIS_MODULE,
},
},
{
.desc = {
.name = "ldo4",
.of_match = of_match_ptr("LDO4"),
.regulators_node = of_match_ptr("regulators"),
.id = PCA9450_LDO4,
.ops = &pca9450_ldo_regulator_ops,
.type = REGULATOR_VOLTAGE,
.n_voltages = PCA9450_LDO4_VOLTAGE_NUM,
.linear_ranges = pca9450_ldo34_volts,
.n_linear_ranges = ARRAY_SIZE(pca9450_ldo34_volts),
.vsel_reg = PCA9450_REG_LDO4CTRL,
.vsel_mask = LDO4OUT_MASK,
.enable_reg = PCA9450_REG_LDO4CTRL,
.enable_mask = LDO4_EN_MASK,
.owner = THIS_MODULE,
},
},
{
.desc = {
.name = "ldo5",
.of_match = of_match_ptr("LDO5"),
.regulators_node = of_match_ptr("regulators"),
.id = PCA9450_LDO5,
.ops = &pca9450_ldo_regulator_ops,
.type = REGULATOR_VOLTAGE,
.n_voltages = PCA9450_LDO5_VOLTAGE_NUM,
.linear_ranges = pca9450_ldo5_volts,
.n_linear_ranges = ARRAY_SIZE(pca9450_ldo5_volts),
.vsel_reg = PCA9450_REG_LDO5CTRL_H,
.vsel_mask = LDO5HOUT_MASK,
.enable_reg = PCA9450_REG_LDO5CTRL_H,
.enable_mask = LDO5H_EN_MASK,
.owner = THIS_MODULE,
},
},
};
/*
* Buck3 removed on PCA9450B and connected with Buck1 internal for dual phase
* on PCA9450C as no Buck3.
*/
static const struct pca9450_regulator_desc pca9450bc_regulators[] = {
{
.desc = {
.name = "buck1",
.of_match = of_match_ptr("BUCK1"),
.regulators_node = of_match_ptr("regulators"),
.id = PCA9450_BUCK1,
.ops = &pca9450_dvs_buck_regulator_ops,
.type = REGULATOR_VOLTAGE,
.n_voltages = PCA9450_BUCK1_VOLTAGE_NUM,
.linear_ranges = pca9450_dvs_buck_volts,
.n_linear_ranges = ARRAY_SIZE(pca9450_dvs_buck_volts),
.vsel_reg = PCA9450_REG_BUCK1OUT_DVS0,
.vsel_mask = BUCK1OUT_DVS0_MASK,
.enable_reg = PCA9450_REG_BUCK1CTRL,
.enable_mask = BUCK1_ENMODE_MASK,
.ramp_reg = PCA9450_REG_BUCK1CTRL,
.ramp_mask = BUCK1_RAMP_MASK,
.ramp_delay_table = pca9450_dvs_buck_ramp_table,
.n_ramp_values = ARRAY_SIZE(pca9450_dvs_buck_ramp_table),
.owner = THIS_MODULE,
.of_parse_cb = pca9450_set_dvs_levels,
},
.dvs = {
.run_reg = PCA9450_REG_BUCK1OUT_DVS0,
.run_mask = BUCK1OUT_DVS0_MASK,
.standby_reg = PCA9450_REG_BUCK1OUT_DVS1,
.standby_mask = BUCK1OUT_DVS1_MASK,
},
},
{
.desc = {
.name = "buck2",
.of_match = of_match_ptr("BUCK2"),
.regulators_node = of_match_ptr("regulators"),
.id = PCA9450_BUCK2,
.ops = &pca9450_dvs_buck_regulator_ops,
.type = REGULATOR_VOLTAGE,
.n_voltages = PCA9450_BUCK2_VOLTAGE_NUM,
.linear_ranges = pca9450_dvs_buck_volts,
.n_linear_ranges = ARRAY_SIZE(pca9450_dvs_buck_volts),
.vsel_reg = PCA9450_REG_BUCK2OUT_DVS0,
.vsel_mask = BUCK2OUT_DVS0_MASK,
.enable_reg = PCA9450_REG_BUCK2CTRL,
.enable_mask = BUCK2_ENMODE_MASK,
.ramp_reg = PCA9450_REG_BUCK2CTRL,
.ramp_mask = BUCK2_RAMP_MASK,
.ramp_delay_table = pca9450_dvs_buck_ramp_table,
.n_ramp_values = ARRAY_SIZE(pca9450_dvs_buck_ramp_table),
.owner = THIS_MODULE,
.of_parse_cb = pca9450_set_dvs_levels,
},
.dvs = {
.run_reg = PCA9450_REG_BUCK2OUT_DVS0,
.run_mask = BUCK2OUT_DVS0_MASK,
.standby_reg = PCA9450_REG_BUCK2OUT_DVS1,
.standby_mask = BUCK2OUT_DVS1_MASK,
},
},
{
.desc = {
.name = "buck4",
.of_match = of_match_ptr("BUCK4"),
.regulators_node = of_match_ptr("regulators"),
.id = PCA9450_BUCK4,
.ops = &pca9450_buck_regulator_ops,
.type = REGULATOR_VOLTAGE,
.n_voltages = PCA9450_BUCK4_VOLTAGE_NUM,
.linear_ranges = pca9450_buck_volts,
.n_linear_ranges = ARRAY_SIZE(pca9450_buck_volts),
.vsel_reg = PCA9450_REG_BUCK4OUT,
.vsel_mask = BUCK4OUT_MASK,
.enable_reg = PCA9450_REG_BUCK4CTRL,
.enable_mask = BUCK4_ENMODE_MASK,
.owner = THIS_MODULE,
},
},
{
.desc = {
.name = "buck5",
.of_match = of_match_ptr("BUCK5"),
.regulators_node = of_match_ptr("regulators"),
.id = PCA9450_BUCK5,
.ops = &pca9450_buck_regulator_ops,
.type = REGULATOR_VOLTAGE,
.n_voltages = PCA9450_BUCK5_VOLTAGE_NUM,
.linear_ranges = pca9450_buck_volts,
.n_linear_ranges = ARRAY_SIZE(pca9450_buck_volts),
.vsel_reg = PCA9450_REG_BUCK5OUT,
.vsel_mask = BUCK5OUT_MASK,
.enable_reg = PCA9450_REG_BUCK5CTRL,
.enable_mask = BUCK5_ENMODE_MASK,
.owner = THIS_MODULE,
},
},
{
.desc = {
.name = "buck6",
.of_match = of_match_ptr("BUCK6"),
.regulators_node = of_match_ptr("regulators"),
.id = PCA9450_BUCK6,
.ops = &pca9450_buck_regulator_ops,
.type = REGULATOR_VOLTAGE,
.n_voltages = PCA9450_BUCK6_VOLTAGE_NUM,
.linear_ranges = pca9450_buck_volts,
.n_linear_ranges = ARRAY_SIZE(pca9450_buck_volts),
.vsel_reg = PCA9450_REG_BUCK6OUT,
.vsel_mask = BUCK6OUT_MASK,
.enable_reg = PCA9450_REG_BUCK6CTRL,
.enable_mask = BUCK6_ENMODE_MASK,
.owner = THIS_MODULE,
},
},
{
.desc = {
.name = "ldo1",
.of_match = of_match_ptr("LDO1"),
.regulators_node = of_match_ptr("regulators"),
.id = PCA9450_LDO1,
.ops = &pca9450_ldo_regulator_ops,
.type = REGULATOR_VOLTAGE,
.n_voltages = PCA9450_LDO1_VOLTAGE_NUM,
.linear_ranges = pca9450_ldo1_volts,
.n_linear_ranges = ARRAY_SIZE(pca9450_ldo1_volts),
.vsel_reg = PCA9450_REG_LDO1CTRL,
.vsel_mask = LDO1OUT_MASK,
.enable_reg = PCA9450_REG_LDO1CTRL,
.enable_mask = LDO1_EN_MASK,
.owner = THIS_MODULE,
},
},
{
.desc = {
.name = "ldo2",
.of_match = of_match_ptr("LDO2"),
.regulators_node = of_match_ptr("regulators"),
.id = PCA9450_LDO2,
.ops = &pca9450_ldo_regulator_ops,
.type = REGULATOR_VOLTAGE,
.n_voltages = PCA9450_LDO2_VOLTAGE_NUM,
.linear_ranges = pca9450_ldo2_volts,
.n_linear_ranges = ARRAY_SIZE(pca9450_ldo2_volts),
.vsel_reg = PCA9450_REG_LDO2CTRL,
.vsel_mask = LDO2OUT_MASK,
.enable_reg = PCA9450_REG_LDO2CTRL,
.enable_mask = LDO2_EN_MASK,
.owner = THIS_MODULE,
},
},
{
.desc = {
.name = "ldo3",
.of_match = of_match_ptr("LDO3"),
.regulators_node = of_match_ptr("regulators"),
.id = PCA9450_LDO3,
.ops = &pca9450_ldo_regulator_ops,
.type = REGULATOR_VOLTAGE,
.n_voltages = PCA9450_LDO3_VOLTAGE_NUM,
.linear_ranges = pca9450_ldo34_volts,
.n_linear_ranges = ARRAY_SIZE(pca9450_ldo34_volts),
.vsel_reg = PCA9450_REG_LDO3CTRL,
.vsel_mask = LDO3OUT_MASK,
.enable_reg = PCA9450_REG_LDO3CTRL,
.enable_mask = LDO3_EN_MASK,
.owner = THIS_MODULE,
},
},
{
.desc = {
.name = "ldo4",
.of_match = of_match_ptr("LDO4"),
.regulators_node = of_match_ptr("regulators"),
.id = PCA9450_LDO4,
.ops = &pca9450_ldo_regulator_ops,
.type = REGULATOR_VOLTAGE,
.n_voltages = PCA9450_LDO4_VOLTAGE_NUM,
.linear_ranges = pca9450_ldo34_volts,
.n_linear_ranges = ARRAY_SIZE(pca9450_ldo34_volts),
.vsel_reg = PCA9450_REG_LDO4CTRL,
.vsel_mask = LDO4OUT_MASK,
.enable_reg = PCA9450_REG_LDO4CTRL,
.enable_mask = LDO4_EN_MASK,
.owner = THIS_MODULE,
},
},
{
.desc = {
.name = "ldo5",
.of_match = of_match_ptr("LDO5"),
.regulators_node = of_match_ptr("regulators"),
.id = PCA9450_LDO5,
.ops = &pca9450_ldo_regulator_ops,
.type = REGULATOR_VOLTAGE,
.n_voltages = PCA9450_LDO5_VOLTAGE_NUM,
.linear_ranges = pca9450_ldo5_volts,
.n_linear_ranges = ARRAY_SIZE(pca9450_ldo5_volts),
.vsel_reg = PCA9450_REG_LDO5CTRL_H,
.vsel_mask = LDO5HOUT_MASK,
.enable_reg = PCA9450_REG_LDO5CTRL_H,
.enable_mask = LDO5H_EN_MASK,
.owner = THIS_MODULE,
},
},
};
static irqreturn_t pca9450_irq_handler(int irq, void *data)
{
struct pca9450 *pca9450 = data;
struct regmap *regmap = pca9450->regmap;
unsigned int status;
int ret;
ret = regmap_read(regmap, PCA9450_REG_INT1, &status);
if (ret < 0) {
dev_err(pca9450->dev,
"Failed to read INT1(%d)\n", ret);
return IRQ_NONE;
}
if (status & IRQ_PWRON)
dev_warn(pca9450->dev, "PWRON interrupt.\n");
if (status & IRQ_WDOGB)
dev_warn(pca9450->dev, "WDOGB interrupt.\n");
if (status & IRQ_VR_FLT1)
dev_warn(pca9450->dev, "VRFLT1 interrupt.\n");
if (status & IRQ_VR_FLT2)
dev_warn(pca9450->dev, "VRFLT2 interrupt.\n");
if (status & IRQ_LOWVSYS)
dev_warn(pca9450->dev, "LOWVSYS interrupt.\n");
if (status & IRQ_THERM_105)
dev_warn(pca9450->dev, "IRQ_THERM_105 interrupt.\n");
if (status & IRQ_THERM_125)
dev_warn(pca9450->dev, "IRQ_THERM_125 interrupt.\n");
return IRQ_HANDLED;
}
static int pca9450_i2c_probe(struct i2c_client *i2c)
{
enum pca9450_chip_type type = (unsigned int)(uintptr_t)
of_device_get_match_data(&i2c->dev);
const struct pca9450_regulator_desc *regulator_desc;
struct regulator_config config = { };
struct pca9450 *pca9450;
unsigned int device_id, i;
unsigned int reset_ctrl;
int ret;
if (!i2c->irq) {
dev_err(&i2c->dev, "No IRQ configured?\n");
return -EINVAL;
}
pca9450 = devm_kzalloc(&i2c->dev, sizeof(struct pca9450), GFP_KERNEL);
if (!pca9450)
return -ENOMEM;
switch (type) {
case PCA9450_TYPE_PCA9450A:
regulator_desc = pca9450a_regulators;
pca9450->rcnt = ARRAY_SIZE(pca9450a_regulators);
break;
case PCA9450_TYPE_PCA9450BC:
regulator_desc = pca9450bc_regulators;
pca9450->rcnt = ARRAY_SIZE(pca9450bc_regulators);
break;
default:
dev_err(&i2c->dev, "Unknown device type");
return -EINVAL;
}
pca9450->irq = i2c->irq;
pca9450->type = type;
pca9450->dev = &i2c->dev;
dev_set_drvdata(&i2c->dev, pca9450);
pca9450->regmap = devm_regmap_init_i2c(i2c,
&pca9450_regmap_config);
if (IS_ERR(pca9450->regmap)) {
dev_err(&i2c->dev, "regmap initialization failed\n");
return PTR_ERR(pca9450->regmap);
}
ret = regmap_read(pca9450->regmap, PCA9450_REG_DEV_ID, &device_id);
if (ret) {
dev_err(&i2c->dev, "Read device id error\n");
return ret;
}
/* Check your board and dts for match the right pmic */
if (((device_id >> 4) != 0x1 && type == PCA9450_TYPE_PCA9450A) ||
((device_id >> 4) != 0x3 && type == PCA9450_TYPE_PCA9450BC)) {
dev_err(&i2c->dev, "Device id(%x) mismatched\n",
device_id >> 4);
return -EINVAL;
}
for (i = 0; i < pca9450->rcnt; i++) {
const struct regulator_desc *desc;
struct regulator_dev *rdev;
const struct pca9450_regulator_desc *r;
r = ®ulator_desc[i];
desc = &r->desc;
config.regmap = pca9450->regmap;
config.dev = pca9450->dev;
rdev = devm_regulator_register(pca9450->dev, desc, &config);
if (IS_ERR(rdev)) {
ret = PTR_ERR(rdev);
dev_err(pca9450->dev,
"Failed to register regulator(%s): %d\n",
desc->name, ret);
return ret;
}
}
ret = devm_request_threaded_irq(pca9450->dev, pca9450->irq, NULL,
pca9450_irq_handler,
(IRQF_TRIGGER_FALLING | IRQF_ONESHOT),
"pca9450-irq", pca9450);
if (ret != 0) {
dev_err(pca9450->dev, "Failed to request IRQ: %d\n",
pca9450->irq);
return ret;
}
/* Unmask all interrupt except PWRON/WDOG/RSVD */
ret = regmap_update_bits(pca9450->regmap, PCA9450_REG_INT1_MSK,
IRQ_VR_FLT1 | IRQ_VR_FLT2 | IRQ_LOWVSYS |
IRQ_THERM_105 | IRQ_THERM_125,
IRQ_PWRON | IRQ_WDOGB | IRQ_RSVD);
if (ret) {
dev_err(&i2c->dev, "Unmask irq error\n");
return ret;
}
/* Clear PRESET_EN bit in BUCK123_DVS to use DVS registers */
ret = regmap_clear_bits(pca9450->regmap, PCA9450_REG_BUCK123_DVS,
BUCK123_PRESET_EN);
if (ret) {
dev_err(&i2c->dev, "Failed to clear PRESET_EN bit: %d\n", ret);
return ret;
}
if (of_property_read_bool(i2c->dev.of_node, "nxp,wdog_b-warm-reset"))
reset_ctrl = WDOG_B_CFG_WARM;
else
reset_ctrl = WDOG_B_CFG_COLD_LDO12;
/* Set reset behavior on assertion of WDOG_B signal */
ret = regmap_update_bits(pca9450->regmap, PCA9450_REG_RESET_CTRL,
WDOG_B_CFG_MASK, reset_ctrl);
if (ret) {
dev_err(&i2c->dev, "Failed to set WDOG_B reset behavior\n");
return ret;
}
if (of_property_read_bool(i2c->dev.of_node, "nxp,i2c-lt-enable")) {
/* Enable I2C Level Translator */
ret = regmap_update_bits(pca9450->regmap, PCA9450_REG_CONFIG2,
I2C_LT_MASK, I2C_LT_ON_STANDBY_RUN);
if (ret) {
dev_err(&i2c->dev,
"Failed to enable I2C level translator\n");
return ret;
}
}
/*
* The driver uses the LDO5CTRL_H register to control the LDO5 regulator.
* This is only valid if the SD_VSEL input of the PMIC is high. Let's
* check if the pin is available as GPIO and set it to high.
*/
pca9450->sd_vsel_gpio = gpiod_get_optional(pca9450->dev, "sd-vsel", GPIOD_OUT_HIGH);
if (IS_ERR(pca9450->sd_vsel_gpio)) {
dev_err(&i2c->dev, "Failed to get SD_VSEL GPIO\n");
return PTR_ERR(pca9450->sd_vsel_gpio);
}
dev_info(&i2c->dev, "%s probed.\n",
type == PCA9450_TYPE_PCA9450A ? "pca9450a" : "pca9450bc");
return 0;
}
static const struct of_device_id pca9450_of_match[] = {
{
.compatible = "nxp,pca9450a",
.data = (void *)PCA9450_TYPE_PCA9450A,
},
{
.compatible = "nxp,pca9450b",
.data = (void *)PCA9450_TYPE_PCA9450BC,
},
{
.compatible = "nxp,pca9450c",
.data = (void *)PCA9450_TYPE_PCA9450BC,
},
{ }
};
MODULE_DEVICE_TABLE(of, pca9450_of_match);
static struct i2c_driver pca9450_i2c_driver = {
.driver = {
.name = "nxp-pca9450",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = pca9450_of_match,
},
.probe = pca9450_i2c_probe,
};
module_i2c_driver(pca9450_i2c_driver);
MODULE_AUTHOR("Robin Gong <[email protected]>");
MODULE_DESCRIPTION("NXP PCA9450 Power Management IC driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/regulator/pca9450-regulator.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022, Linaro Ltd.
*/
#include <linux/bitfield.h>
#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/of_platform.h>
#include <linux/regmap.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
#include <linux/regulator/of_regulator.h>
#define MAX20411_UV_STEP 6250
#define MAX20411_BASE_UV 243750
#define MAX20411_MIN_SEL 41 /* 0.5V */
#define MAX20411_MAX_SEL 165 /* 1.275V */
#define MAX20411_VID_OFFSET 0x7
#define MAX20411_VID_MASK 0xff
#define MAX20411_SLEW_OFFSET 0x6
#define MAX20411_SLEW_DVS_MASK 0xc
#define MAX20411_SLEW_SR_MASK 0x3
struct max20411 {
struct device *dev;
struct device_node *of_node;
struct regulator_desc desc;
struct regulator_dev *rdev;
struct regmap *regmap;
};
static const unsigned int max20411_slew_rates[] = { 13100, 6600, 3300, 1600 };
static int max20411_enable_time(struct regulator_dev *rdev)
{
int voltage, rate, ret;
unsigned int val;
/* get voltage */
ret = regmap_read(rdev->regmap, rdev->desc->vsel_reg, &val);
if (ret)
return ret;
val &= rdev->desc->vsel_mask;
voltage = regulator_list_voltage_linear(rdev, val);
/* get rate */
ret = regmap_read(rdev->regmap, MAX20411_SLEW_OFFSET, &val);
if (ret)
return ret;
val = FIELD_GET(MAX20411_SLEW_SR_MASK, val);
rate = max20411_slew_rates[val];
return DIV_ROUND_UP(voltage, rate);
}
static const struct regmap_config max20411_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.max_register = 0xe,
};
static const struct regulator_ops max20411_ops = {
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.list_voltage = regulator_list_voltage_linear,
.enable_time = max20411_enable_time,
};
static const struct regulator_desc max20411_desc = {
.ops = &max20411_ops,
.owner = THIS_MODULE,
.type = REGULATOR_VOLTAGE,
.supply_name = "vin",
.name = "max20411",
/*
* voltage = 0.24375V + selector * 6.25mV
* with valid selector between 41 to 165 (0.5V to 1.275V)
*/
.min_uV = MAX20411_BASE_UV,
.uV_step = MAX20411_UV_STEP,
.linear_min_sel = MAX20411_MIN_SEL,
.n_voltages = MAX20411_MAX_SEL + 1,
.vsel_reg = MAX20411_VID_OFFSET,
.vsel_mask = MAX20411_VID_MASK,
.ramp_reg = MAX20411_SLEW_OFFSET,
.ramp_mask = MAX20411_SLEW_DVS_MASK,
.ramp_delay_table = max20411_slew_rates,
.n_ramp_values = ARRAY_SIZE(max20411_slew_rates),
};
static int max20411_probe(struct i2c_client *client)
{
struct regulator_init_data *init_data;
struct device *dev = &client->dev;
struct regulator_config cfg = {};
struct max20411 *max20411;
max20411 = devm_kzalloc(dev, sizeof(*max20411), GFP_KERNEL);
if (!max20411)
return -ENOMEM;
max20411->regmap = devm_regmap_init_i2c(client, &max20411_regmap_config);
if (IS_ERR(max20411->regmap)) {
dev_err(dev, "Failed to allocate regmap!\n");
return PTR_ERR(max20411->regmap);
}
max20411->dev = dev;
max20411->of_node = dev->of_node;
max20411->desc = max20411_desc;
init_data = of_get_regulator_init_data(max20411->dev, max20411->of_node, &max20411->desc);
if (!init_data)
return -ENODATA;
cfg.dev = max20411->dev;
cfg.init_data = init_data;
cfg.of_node = max20411->of_node;
cfg.driver_data = max20411;
cfg.ena_gpiod = gpiod_get(max20411->dev, "enable", GPIOD_ASIS);
if (IS_ERR(cfg.ena_gpiod))
return dev_err_probe(dev, PTR_ERR(cfg.ena_gpiod),
"unable to acquire enable gpio\n");
max20411->rdev = devm_regulator_register(max20411->dev, &max20411->desc, &cfg);
if (IS_ERR(max20411->rdev))
dev_err(max20411->dev, "Failed to register regulator\n");
return PTR_ERR_OR_ZERO(max20411->rdev);
}
static const struct of_device_id of_max20411_match_tbl[] = {
{ .compatible = "maxim,max20411", },
{ },
};
MODULE_DEVICE_TABLE(of, of_max20411_match_tbl);
static const struct i2c_device_id max20411_id[] = {
{ "max20411", 0 },
{ },
};
MODULE_DEVICE_TABLE(i2c, max20411_id);
static struct i2c_driver max20411_i2c_driver = {
.driver = {
.name = "max20411",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = of_max20411_match_tbl,
},
.probe = max20411_probe,
.id_table = max20411_id,
};
module_i2c_driver(max20411_i2c_driver);
MODULE_LICENSE("GPL");
| linux-master | drivers/regulator/max20411-regulator.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Regulators driver for Marvell 88PM8607
*
* Copyright (C) 2009 Marvell International Ltd.
* Haojian Zhuang <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/err.h>
#include <linux/of.h>
#include <linux/regulator/of_regulator.h>
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
#include <linux/mfd/88pm860x.h>
#include <linux/module.h>
struct pm8607_regulator_info {
struct regulator_desc desc;
unsigned int *vol_suspend;
int slope_double;
};
static const unsigned int BUCK1_table[] = {
725000, 750000, 775000, 800000, 825000, 850000, 875000, 900000,
925000, 950000, 975000, 1000000, 1025000, 1050000, 1075000, 1100000,
1125000, 1150000, 1175000, 1200000, 1225000, 1250000, 1275000, 1300000,
1325000, 1350000, 1375000, 1400000, 1425000, 1450000, 1475000, 1500000,
0, 25000, 50000, 75000, 100000, 125000, 150000, 175000,
200000, 225000, 250000, 275000, 300000, 325000, 350000, 375000,
400000, 425000, 450000, 475000, 500000, 525000, 550000, 575000,
600000, 625000, 650000, 675000, 700000, 725000, 750000, 775000,
};
static const unsigned int BUCK1_suspend_table[] = {
0, 25000, 50000, 75000, 100000, 125000, 150000, 175000,
200000, 225000, 250000, 275000, 300000, 325000, 350000, 375000,
400000, 425000, 450000, 475000, 500000, 525000, 550000, 575000,
600000, 625000, 650000, 675000, 700000, 725000, 750000, 775000,
800000, 825000, 850000, 875000, 900000, 925000, 950000, 975000,
1000000, 1025000, 1050000, 1075000, 1100000, 1125000, 1150000, 1175000,
1200000, 1225000, 1250000, 1275000, 1300000, 1325000, 1350000, 1375000,
1400000, 1425000, 1450000, 1475000, 1500000, 1500000, 1500000, 1500000,
};
static const unsigned int BUCK2_table[] = {
0, 50000, 100000, 150000, 200000, 250000, 300000, 350000,
400000, 450000, 500000, 550000, 600000, 650000, 700000, 750000,
800000, 850000, 900000, 950000, 1000000, 1050000, 1100000, 1150000,
1200000, 1250000, 1300000, 1350000, 1400000, 1450000, 1500000, 1550000,
1600000, 1650000, 1700000, 1750000, 1800000, 1850000, 1900000, 1950000,
2000000, 2050000, 2100000, 2150000, 2200000, 2250000, 2300000, 2350000,
2400000, 2450000, 2500000, 2550000, 2600000, 2650000, 2700000, 2750000,
2800000, 2850000, 2900000, 2950000, 3000000, 3000000, 3000000, 3000000,
};
static const unsigned int BUCK2_suspend_table[] = {
0, 50000, 100000, 150000, 200000, 250000, 300000, 350000,
400000, 450000, 500000, 550000, 600000, 650000, 700000, 750000,
800000, 850000, 900000, 950000, 1000000, 1050000, 1100000, 1150000,
1200000, 1250000, 1300000, 1350000, 1400000, 1450000, 1500000, 1550000,
1600000, 1650000, 1700000, 1750000, 1800000, 1850000, 1900000, 1950000,
2000000, 2050000, 2100000, 2150000, 2200000, 2250000, 2300000, 2350000,
2400000, 2450000, 2500000, 2550000, 2600000, 2650000, 2700000, 2750000,
2800000, 2850000, 2900000, 2950000, 3000000, 3000000, 3000000, 3000000,
};
static const unsigned int BUCK3_table[] = {
0, 25000, 50000, 75000, 100000, 125000, 150000, 175000,
200000, 225000, 250000, 275000, 300000, 325000, 350000, 375000,
400000, 425000, 450000, 475000, 500000, 525000, 550000, 575000,
600000, 625000, 650000, 675000, 700000, 725000, 750000, 775000,
800000, 825000, 850000, 875000, 900000, 925000, 950000, 975000,
1000000, 1025000, 1050000, 1075000, 1100000, 1125000, 1150000, 1175000,
1200000, 1225000, 1250000, 1275000, 1300000, 1325000, 1350000, 1375000,
1400000, 1425000, 1450000, 1475000, 1500000, 1500000, 1500000, 1500000,
};
static const unsigned int BUCK3_suspend_table[] = {
0, 25000, 50000, 75000, 100000, 125000, 150000, 175000,
200000, 225000, 250000, 275000, 300000, 325000, 350000, 375000,
400000, 425000, 450000, 475000, 500000, 525000, 550000, 575000,
600000, 625000, 650000, 675000, 700000, 725000, 750000, 775000,
800000, 825000, 850000, 875000, 900000, 925000, 950000, 975000,
1000000, 1025000, 1050000, 1075000, 1100000, 1125000, 1150000, 1175000,
1200000, 1225000, 1250000, 1275000, 1300000, 1325000, 1350000, 1375000,
1400000, 1425000, 1450000, 1475000, 1500000, 1500000, 1500000, 1500000,
};
static const unsigned int LDO1_table[] = {
1800000, 1200000, 2800000, 0,
};
static const unsigned int LDO1_suspend_table[] = {
1800000, 1200000, 0, 0,
};
static const unsigned int LDO2_table[] = {
1800000, 1850000, 1900000, 2700000, 2750000, 2800000, 2850000, 3300000,
};
static const unsigned int LDO2_suspend_table[] = {
1800000, 1850000, 1900000, 2700000, 2750000, 2800000, 2850000, 2900000,
};
static const unsigned int LDO3_table[] = {
1800000, 1850000, 1900000, 2700000, 2750000, 2800000, 2850000, 3300000,
};
static const unsigned int LDO3_suspend_table[] = {
1800000, 1850000, 1900000, 2700000, 2750000, 2800000, 2850000, 2900000,
};
static const unsigned int LDO4_table[] = {
1800000, 1850000, 1900000, 2700000, 2750000, 2800000, 2900000, 3300000,
};
static const unsigned int LDO4_suspend_table[] = {
1800000, 1850000, 1900000, 2700000, 2750000, 2800000, 2900000, 2900000,
};
static const unsigned int LDO5_table[] = {
2900000, 3000000, 3100000, 3300000,
};
static const unsigned int LDO5_suspend_table[] = {
2900000, 0, 0, 0,
};
static const unsigned int LDO6_table[] = {
1800000, 1850000, 2600000, 2650000, 2700000, 2750000, 2800000, 3300000,
};
static const unsigned int LDO6_suspend_table[] = {
1800000, 1850000, 2600000, 2650000, 2700000, 2750000, 2800000, 2900000,
};
static const unsigned int LDO7_table[] = {
1800000, 1850000, 1900000, 2700000, 2750000, 2800000, 2850000, 2900000,
};
static const unsigned int LDO7_suspend_table[] = {
1800000, 1850000, 1900000, 2700000, 2750000, 2800000, 2850000, 2900000,
};
static const unsigned int LDO8_table[] = {
1800000, 1850000, 1900000, 2700000, 2750000, 2800000, 2850000, 2900000,
};
static const unsigned int LDO8_suspend_table[] = {
1800000, 1850000, 1900000, 2700000, 2750000, 2800000, 2850000, 2900000,
};
static const unsigned int LDO9_table[] = {
1800000, 1850000, 1900000, 2700000, 2750000, 2800000, 2850000, 3300000,
};
static const unsigned int LDO9_suspend_table[] = {
1800000, 1850000, 1900000, 2700000, 2750000, 2800000, 2850000, 2900000,
};
static const unsigned int LDO10_table[] = {
1800000, 1850000, 1900000, 2700000, 2750000, 2800000, 2850000, 3300000,
1200000, 1200000, 1200000, 1200000, 1200000, 1200000, 1200000, 1200000,
};
static const unsigned int LDO10_suspend_table[] = {
1800000, 1850000, 1900000, 2700000, 2750000, 2800000, 2850000, 2900000,
1200000, 1200000, 1200000, 1200000, 1200000, 1200000, 1200000, 1200000,
};
static const unsigned int LDO12_table[] = {
1800000, 1900000, 2700000, 2800000, 2900000, 3000000, 3100000, 3300000,
1200000, 1200000, 1200000, 1200000, 1200000, 1200000, 1200000, 1200000,
};
static const unsigned int LDO12_suspend_table[] = {
1800000, 1900000, 2700000, 2800000, 2900000, 2900000, 2900000, 2900000,
1200000, 1200000, 1200000, 1200000, 1200000, 1200000, 1200000, 1200000,
};
static const unsigned int LDO13_table[] = {
1200000, 1300000, 1800000, 2000000, 2500000, 2800000, 3000000, 0,
};
static const unsigned int LDO13_suspend_table[] = {
0,
};
static const unsigned int LDO14_table[] = {
1800000, 1850000, 2700000, 2750000, 2800000, 2850000, 2900000, 3300000,
};
static const unsigned int LDO14_suspend_table[] = {
1800000, 1850000, 2700000, 2750000, 2800000, 2850000, 2900000, 2900000,
};
static int pm8607_list_voltage(struct regulator_dev *rdev, unsigned index)
{
struct pm8607_regulator_info *info = rdev_get_drvdata(rdev);
int ret;
ret = regulator_list_voltage_table(rdev, index);
if (ret < 0)
return ret;
if (info->slope_double)
ret <<= 1;
return ret;
}
static const struct regulator_ops pm8607_regulator_ops = {
.list_voltage = pm8607_list_voltage,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
};
static const struct regulator_ops pm8606_preg_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
};
#define PM8606_PREG(ereg, ebit) \
{ \
.desc = { \
.name = "PREG", \
.of_match = of_match_ptr("PREG"), \
.regulators_node = of_match_ptr("regulators"), \
.ops = &pm8606_preg_ops, \
.type = REGULATOR_CURRENT, \
.id = PM8606_ID_PREG, \
.owner = THIS_MODULE, \
.enable_reg = PM8606_##ereg, \
.enable_mask = (ebit), \
.enable_is_inverted = true, \
}, \
}
#define PM8607_DVC(vreg, ureg, ubit, ereg, ebit) \
{ \
.desc = { \
.name = #vreg, \
.of_match = of_match_ptr(#vreg), \
.regulators_node = of_match_ptr("regulators"), \
.ops = &pm8607_regulator_ops, \
.type = REGULATOR_VOLTAGE, \
.id = PM8607_ID_##vreg, \
.owner = THIS_MODULE, \
.volt_table = vreg##_table, \
.n_voltages = ARRAY_SIZE(vreg##_table), \
.vsel_reg = PM8607_##vreg, \
.vsel_mask = ARRAY_SIZE(vreg##_table) - 1, \
.apply_reg = PM8607_##ureg, \
.apply_bit = (ubit), \
.enable_reg = PM8607_##ereg, \
.enable_mask = 1 << (ebit), \
}, \
.slope_double = (0), \
.vol_suspend = (unsigned int *)&vreg##_suspend_table, \
}
#define PM8607_LDO(_id, vreg, shift, ereg, ebit) \
{ \
.desc = { \
.name = "LDO" #_id, \
.of_match = of_match_ptr("LDO" #_id), \
.regulators_node = of_match_ptr("regulators"), \
.ops = &pm8607_regulator_ops, \
.type = REGULATOR_VOLTAGE, \
.id = PM8607_ID_LDO##_id, \
.owner = THIS_MODULE, \
.volt_table = LDO##_id##_table, \
.n_voltages = ARRAY_SIZE(LDO##_id##_table), \
.vsel_reg = PM8607_##vreg, \
.vsel_mask = (ARRAY_SIZE(LDO##_id##_table) - 1) << (shift), \
.enable_reg = PM8607_##ereg, \
.enable_mask = 1 << (ebit), \
}, \
.slope_double = (0), \
.vol_suspend = (unsigned int *)&LDO##_id##_suspend_table, \
}
static struct pm8607_regulator_info pm8607_regulator_info[] = {
PM8607_DVC(BUCK1, GO, BIT(0), SUPPLIES_EN11, 0),
PM8607_DVC(BUCK2, GO, BIT(1), SUPPLIES_EN11, 1),
PM8607_DVC(BUCK3, GO, BIT(2), SUPPLIES_EN11, 2),
PM8607_LDO(1, LDO1, 0, SUPPLIES_EN11, 3),
PM8607_LDO(2, LDO2, 0, SUPPLIES_EN11, 4),
PM8607_LDO(3, LDO3, 0, SUPPLIES_EN11, 5),
PM8607_LDO(4, LDO4, 0, SUPPLIES_EN11, 6),
PM8607_LDO(5, LDO5, 0, SUPPLIES_EN11, 7),
PM8607_LDO(6, LDO6, 0, SUPPLIES_EN12, 0),
PM8607_LDO(7, LDO7, 0, SUPPLIES_EN12, 1),
PM8607_LDO(8, LDO8, 0, SUPPLIES_EN12, 2),
PM8607_LDO(9, LDO9, 0, SUPPLIES_EN12, 3),
PM8607_LDO(10, LDO10, 0, SUPPLIES_EN12, 4),
PM8607_LDO(12, LDO12, 0, SUPPLIES_EN12, 5),
PM8607_LDO(13, VIBRATOR_SET, 1, VIBRATOR_SET, 0),
PM8607_LDO(14, LDO14, 0, SUPPLIES_EN12, 6),
};
static struct pm8607_regulator_info pm8606_regulator_info[] = {
PM8606_PREG(PREREGULATORB, 5),
};
static int pm8607_regulator_probe(struct platform_device *pdev)
{
struct pm860x_chip *chip = dev_get_drvdata(pdev->dev.parent);
struct pm8607_regulator_info *info = NULL;
struct regulator_init_data *pdata = dev_get_platdata(&pdev->dev);
struct regulator_config config = { };
struct regulator_dev *rdev;
struct resource *res;
int i;
res = platform_get_resource(pdev, IORESOURCE_REG, 0);
if (res) {
/* There're resources in 88PM8607 regulator driver */
for (i = 0; i < ARRAY_SIZE(pm8607_regulator_info); i++) {
info = &pm8607_regulator_info[i];
if (info->desc.vsel_reg == res->start)
break;
}
if (i == ARRAY_SIZE(pm8607_regulator_info)) {
dev_err(&pdev->dev, "Failed to find regulator %llu\n",
(unsigned long long)res->start);
return -EINVAL;
}
} else {
/* There's no resource in 88PM8606 PREG regulator driver */
info = &pm8606_regulator_info[0];
/* i is used to check regulator ID */
i = -1;
}
/* check DVC ramp slope double */
if ((i == PM8607_ID_BUCK3) && chip->buck3_double)
info->slope_double = 1;
config.dev = chip->dev;
config.driver_data = info;
if (pdata)
config.init_data = pdata;
if (chip->id == CHIP_PM8607)
config.regmap = chip->regmap;
else
config.regmap = chip->regmap_companion;
rdev = devm_regulator_register(&pdev->dev, &info->desc, &config);
if (IS_ERR(rdev)) {
dev_err(&pdev->dev, "failed to register regulator %s\n",
info->desc.name);
return PTR_ERR(rdev);
}
platform_set_drvdata(pdev, info);
return 0;
}
static const struct platform_device_id pm8607_regulator_driver_ids[] = {
{
.name = "88pm860x-regulator",
.driver_data = 0,
}, {
.name = "88pm860x-preg",
.driver_data = 0,
},
{ },
};
MODULE_DEVICE_TABLE(platform, pm8607_regulator_driver_ids);
static struct platform_driver pm8607_regulator_driver = {
.driver = {
.name = "88pm860x-regulator",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
.probe = pm8607_regulator_probe,
.id_table = pm8607_regulator_driver_ids,
};
static int __init pm8607_regulator_init(void)
{
return platform_driver_register(&pm8607_regulator_driver);
}
subsys_initcall(pm8607_regulator_init);
static void __exit pm8607_regulator_exit(void)
{
platform_driver_unregister(&pm8607_regulator_driver);
}
module_exit(pm8607_regulator_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Haojian Zhuang <[email protected]>");
MODULE_DESCRIPTION("Regulator Driver for Marvell 88PM8607 PMIC");
MODULE_ALIAS("platform:88pm8607-regulator");
| linux-master | drivers/regulator/88pm8607.c |
// SPDX-License-Identifier: GPL-2.0+
//
// Regulator support for WM8400
//
// Copyright 2008 Wolfson Microelectronics PLC.
//
// Author: Mark Brown <[email protected]>
#include <linux/bug.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/regulator/driver.h>
#include <linux/mfd/wm8400-private.h>
static const struct linear_range wm8400_ldo_ranges[] = {
REGULATOR_LINEAR_RANGE(900000, 0, 14, 50000),
REGULATOR_LINEAR_RANGE(1700000, 15, 31, 100000),
};
static const struct regulator_ops wm8400_ldo_ops = {
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.list_voltage = regulator_list_voltage_linear_range,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.map_voltage = regulator_map_voltage_linear_range,
};
static unsigned int wm8400_dcdc_get_mode(struct regulator_dev *dev)
{
struct regmap *rmap = rdev_get_regmap(dev);
int offset = (rdev_get_id(dev) - WM8400_DCDC1) * 2;
u16 data[2];
int ret;
ret = regmap_bulk_read(rmap, WM8400_DCDC1_CONTROL_1 + offset, data, 2);
if (ret != 0)
return 0;
/* Datasheet: hibernate */
if (data[0] & WM8400_DC1_SLEEP)
return REGULATOR_MODE_STANDBY;
/* Datasheet: standby */
if (!(data[0] & WM8400_DC1_ACTIVE))
return REGULATOR_MODE_IDLE;
/* Datasheet: active with or without force PWM */
if (data[1] & WM8400_DC1_FRC_PWM)
return REGULATOR_MODE_FAST;
else
return REGULATOR_MODE_NORMAL;
}
static int wm8400_dcdc_set_mode(struct regulator_dev *dev, unsigned int mode)
{
struct regmap *rmap = rdev_get_regmap(dev);
int offset = (rdev_get_id(dev) - WM8400_DCDC1) * 2;
int ret;
switch (mode) {
case REGULATOR_MODE_FAST:
/* Datasheet: active with force PWM */
ret = regmap_update_bits(rmap, WM8400_DCDC1_CONTROL_2 + offset,
WM8400_DC1_FRC_PWM, WM8400_DC1_FRC_PWM);
if (ret != 0)
return ret;
return regmap_update_bits(rmap, WM8400_DCDC1_CONTROL_1 + offset,
WM8400_DC1_ACTIVE | WM8400_DC1_SLEEP,
WM8400_DC1_ACTIVE);
case REGULATOR_MODE_NORMAL:
/* Datasheet: active */
ret = regmap_update_bits(rmap, WM8400_DCDC1_CONTROL_2 + offset,
WM8400_DC1_FRC_PWM, 0);
if (ret != 0)
return ret;
return regmap_update_bits(rmap, WM8400_DCDC1_CONTROL_1 + offset,
WM8400_DC1_ACTIVE | WM8400_DC1_SLEEP,
WM8400_DC1_ACTIVE);
case REGULATOR_MODE_IDLE:
/* Datasheet: standby */
return regmap_update_bits(rmap, WM8400_DCDC1_CONTROL_1 + offset,
WM8400_DC1_ACTIVE | WM8400_DC1_SLEEP, 0);
default:
return -EINVAL;
}
}
static unsigned int wm8400_dcdc_get_optimum_mode(struct regulator_dev *dev,
int input_uV, int output_uV,
int load_uA)
{
return REGULATOR_MODE_NORMAL;
}
static const struct regulator_ops wm8400_dcdc_ops = {
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.list_voltage = regulator_list_voltage_linear,
.map_voltage = regulator_map_voltage_linear,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_mode = wm8400_dcdc_get_mode,
.set_mode = wm8400_dcdc_set_mode,
.get_optimum_mode = wm8400_dcdc_get_optimum_mode,
};
static struct regulator_desc regulators[] = {
{
.name = "LDO1",
.id = WM8400_LDO1,
.ops = &wm8400_ldo_ops,
.enable_reg = WM8400_LDO1_CONTROL,
.enable_mask = WM8400_LDO1_ENA,
.n_voltages = WM8400_LDO1_VSEL_MASK + 1,
.linear_ranges = wm8400_ldo_ranges,
.n_linear_ranges = ARRAY_SIZE(wm8400_ldo_ranges),
.vsel_reg = WM8400_LDO1_CONTROL,
.vsel_mask = WM8400_LDO1_VSEL_MASK,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
},
{
.name = "LDO2",
.id = WM8400_LDO2,
.ops = &wm8400_ldo_ops,
.enable_reg = WM8400_LDO2_CONTROL,
.enable_mask = WM8400_LDO2_ENA,
.n_voltages = WM8400_LDO2_VSEL_MASK + 1,
.linear_ranges = wm8400_ldo_ranges,
.n_linear_ranges = ARRAY_SIZE(wm8400_ldo_ranges),
.type = REGULATOR_VOLTAGE,
.vsel_reg = WM8400_LDO2_CONTROL,
.vsel_mask = WM8400_LDO2_VSEL_MASK,
.owner = THIS_MODULE,
},
{
.name = "LDO3",
.id = WM8400_LDO3,
.ops = &wm8400_ldo_ops,
.enable_reg = WM8400_LDO3_CONTROL,
.enable_mask = WM8400_LDO3_ENA,
.n_voltages = WM8400_LDO3_VSEL_MASK + 1,
.linear_ranges = wm8400_ldo_ranges,
.n_linear_ranges = ARRAY_SIZE(wm8400_ldo_ranges),
.vsel_reg = WM8400_LDO3_CONTROL,
.vsel_mask = WM8400_LDO3_VSEL_MASK,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
},
{
.name = "LDO4",
.id = WM8400_LDO4,
.ops = &wm8400_ldo_ops,
.enable_reg = WM8400_LDO4_CONTROL,
.enable_mask = WM8400_LDO4_ENA,
.n_voltages = WM8400_LDO4_VSEL_MASK + 1,
.linear_ranges = wm8400_ldo_ranges,
.n_linear_ranges = ARRAY_SIZE(wm8400_ldo_ranges),
.vsel_reg = WM8400_LDO4_CONTROL,
.vsel_mask = WM8400_LDO4_VSEL_MASK,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
},
{
.name = "DCDC1",
.id = WM8400_DCDC1,
.ops = &wm8400_dcdc_ops,
.enable_reg = WM8400_DCDC1_CONTROL_1,
.enable_mask = WM8400_DC1_ENA_MASK,
.n_voltages = WM8400_DC1_VSEL_MASK + 1,
.vsel_reg = WM8400_DCDC1_CONTROL_1,
.vsel_mask = WM8400_DC1_VSEL_MASK,
.min_uV = 850000,
.uV_step = 25000,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
},
{
.name = "DCDC2",
.id = WM8400_DCDC2,
.ops = &wm8400_dcdc_ops,
.enable_reg = WM8400_DCDC2_CONTROL_1,
.enable_mask = WM8400_DC2_ENA_MASK,
.n_voltages = WM8400_DC2_VSEL_MASK + 1,
.vsel_reg = WM8400_DCDC2_CONTROL_1,
.vsel_mask = WM8400_DC2_VSEL_MASK,
.min_uV = 850000,
.uV_step = 25000,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
},
};
static int wm8400_regulator_probe(struct platform_device *pdev)
{
struct wm8400 *wm8400 = container_of(pdev, struct wm8400, regulators[pdev->id]);
struct regulator_config config = { };
struct regulator_dev *rdev;
config.dev = &pdev->dev;
config.init_data = dev_get_platdata(&pdev->dev);
config.driver_data = wm8400;
config.regmap = wm8400->regmap;
rdev = devm_regulator_register(&pdev->dev, ®ulators[pdev->id],
&config);
if (IS_ERR(rdev))
return PTR_ERR(rdev);
platform_set_drvdata(pdev, rdev);
return 0;
}
static struct platform_driver wm8400_regulator_driver = {
.driver = {
.name = "wm8400-regulator",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
.probe = wm8400_regulator_probe,
};
/**
* wm8400_register_regulator - enable software control of a WM8400 regulator
*
* This function enables software control of a WM8400 regulator via
* the regulator API. It is intended to be called from the
* platform_init() callback of the WM8400 MFD driver.
*
* @dev: The WM8400 device to operate on.
* @reg: The regulator to control.
* @initdata: Regulator initdata for the regulator.
*/
int wm8400_register_regulator(struct device *dev, int reg,
struct regulator_init_data *initdata)
{
struct wm8400 *wm8400 = dev_get_drvdata(dev);
if (wm8400->regulators[reg].name)
return -EBUSY;
initdata->driver_data = wm8400;
wm8400->regulators[reg].name = "wm8400-regulator";
wm8400->regulators[reg].id = reg;
wm8400->regulators[reg].dev.parent = dev;
wm8400->regulators[reg].dev.platform_data = initdata;
return platform_device_register(&wm8400->regulators[reg]);
}
EXPORT_SYMBOL_GPL(wm8400_register_regulator);
static int __init wm8400_regulator_init(void)
{
return platform_driver_register(&wm8400_regulator_driver);
}
subsys_initcall(wm8400_regulator_init);
static void __exit wm8400_regulator_exit(void)
{
platform_driver_unregister(&wm8400_regulator_driver);
}
module_exit(wm8400_regulator_exit);
MODULE_AUTHOR("Mark Brown <[email protected]>");
MODULE_DESCRIPTION("WM8400 regulator driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:wm8400-regulator");
| linux-master | drivers/regulator/wm8400-regulator.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Voltage and current regulation for AD5398 and AD5821
*
* Copyright 2010 Analog Devices Inc.
*
* Enter bugs at http://blackfin.uclinux.org/
*/
#include <linux/module.h>
#include <linux/err.h>
#include <linux/i2c.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
#define AD5398_CURRENT_EN_MASK 0x8000
struct ad5398_chip_info {
struct i2c_client *client;
int min_uA;
int max_uA;
unsigned int current_level;
unsigned int current_mask;
unsigned int current_offset;
struct regulator_dev *rdev;
};
static int ad5398_calc_current(struct ad5398_chip_info *chip,
unsigned selector)
{
unsigned range_uA = chip->max_uA - chip->min_uA;
return chip->min_uA + (selector * range_uA / chip->current_level);
}
static int ad5398_read_reg(struct i2c_client *client, unsigned short *data)
{
unsigned short val;
int ret;
ret = i2c_master_recv(client, (char *)&val, 2);
if (ret < 0) {
dev_err(&client->dev, "I2C read error\n");
return ret;
}
*data = be16_to_cpu(val);
return ret;
}
static int ad5398_write_reg(struct i2c_client *client, const unsigned short data)
{
unsigned short val;
int ret;
val = cpu_to_be16(data);
ret = i2c_master_send(client, (char *)&val, 2);
if (ret != 2) {
dev_err(&client->dev, "I2C write error\n");
return ret < 0 ? ret : -EIO;
}
return 0;
}
static int ad5398_get_current_limit(struct regulator_dev *rdev)
{
struct ad5398_chip_info *chip = rdev_get_drvdata(rdev);
struct i2c_client *client = chip->client;
unsigned short data;
int ret;
ret = ad5398_read_reg(client, &data);
if (ret < 0)
return ret;
ret = (data & chip->current_mask) >> chip->current_offset;
return ad5398_calc_current(chip, ret);
}
static int ad5398_set_current_limit(struct regulator_dev *rdev, int min_uA, int max_uA)
{
struct ad5398_chip_info *chip = rdev_get_drvdata(rdev);
struct i2c_client *client = chip->client;
unsigned range_uA = chip->max_uA - chip->min_uA;
unsigned selector;
unsigned short data;
int ret;
if (min_uA < chip->min_uA)
min_uA = chip->min_uA;
if (max_uA > chip->max_uA)
max_uA = chip->max_uA;
if (min_uA > chip->max_uA || max_uA < chip->min_uA)
return -EINVAL;
selector = DIV_ROUND_UP((min_uA - chip->min_uA) * chip->current_level,
range_uA);
if (ad5398_calc_current(chip, selector) > max_uA)
return -EINVAL;
dev_dbg(&client->dev, "changing current %duA\n",
ad5398_calc_current(chip, selector));
/* read chip enable bit */
ret = ad5398_read_reg(client, &data);
if (ret < 0)
return ret;
/* prepare register data */
selector = (selector << chip->current_offset) & chip->current_mask;
data = (unsigned short)selector | (data & AD5398_CURRENT_EN_MASK);
/* write the new current value back as well as enable bit */
ret = ad5398_write_reg(client, data);
return ret;
}
static int ad5398_is_enabled(struct regulator_dev *rdev)
{
struct ad5398_chip_info *chip = rdev_get_drvdata(rdev);
struct i2c_client *client = chip->client;
unsigned short data;
int ret;
ret = ad5398_read_reg(client, &data);
if (ret < 0)
return ret;
if (data & AD5398_CURRENT_EN_MASK)
return 1;
else
return 0;
}
static int ad5398_enable(struct regulator_dev *rdev)
{
struct ad5398_chip_info *chip = rdev_get_drvdata(rdev);
struct i2c_client *client = chip->client;
unsigned short data;
int ret;
ret = ad5398_read_reg(client, &data);
if (ret < 0)
return ret;
if (data & AD5398_CURRENT_EN_MASK)
return 0;
data |= AD5398_CURRENT_EN_MASK;
ret = ad5398_write_reg(client, data);
return ret;
}
static int ad5398_disable(struct regulator_dev *rdev)
{
struct ad5398_chip_info *chip = rdev_get_drvdata(rdev);
struct i2c_client *client = chip->client;
unsigned short data;
int ret;
ret = ad5398_read_reg(client, &data);
if (ret < 0)
return ret;
if (!(data & AD5398_CURRENT_EN_MASK))
return 0;
data &= ~AD5398_CURRENT_EN_MASK;
ret = ad5398_write_reg(client, data);
return ret;
}
static const struct regulator_ops ad5398_ops = {
.get_current_limit = ad5398_get_current_limit,
.set_current_limit = ad5398_set_current_limit,
.enable = ad5398_enable,
.disable = ad5398_disable,
.is_enabled = ad5398_is_enabled,
};
static const struct regulator_desc ad5398_reg = {
.name = "isink",
.id = 0,
.ops = &ad5398_ops,
.type = REGULATOR_CURRENT,
.owner = THIS_MODULE,
};
struct ad5398_current_data_format {
int current_bits;
int current_offset;
int min_uA;
int max_uA;
};
static const struct ad5398_current_data_format df_10_4_120 = {10, 4, 0, 120000};
static const struct i2c_device_id ad5398_id[] = {
{ "ad5398", (kernel_ulong_t)&df_10_4_120 },
{ "ad5821", (kernel_ulong_t)&df_10_4_120 },
{ }
};
MODULE_DEVICE_TABLE(i2c, ad5398_id);
static int ad5398_probe(struct i2c_client *client)
{
const struct i2c_device_id *id = i2c_client_get_device_id(client);
struct regulator_init_data *init_data = dev_get_platdata(&client->dev);
struct regulator_config config = { };
struct ad5398_chip_info *chip;
const struct ad5398_current_data_format *df =
(struct ad5398_current_data_format *)id->driver_data;
if (!init_data)
return -EINVAL;
chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
if (!chip)
return -ENOMEM;
config.dev = &client->dev;
config.init_data = init_data;
config.driver_data = chip;
chip->client = client;
chip->min_uA = df->min_uA;
chip->max_uA = df->max_uA;
chip->current_level = 1 << df->current_bits;
chip->current_offset = df->current_offset;
chip->current_mask = (chip->current_level - 1) << chip->current_offset;
chip->rdev = devm_regulator_register(&client->dev, &ad5398_reg,
&config);
if (IS_ERR(chip->rdev)) {
dev_err(&client->dev, "failed to register %s %s\n",
id->name, ad5398_reg.name);
return PTR_ERR(chip->rdev);
}
i2c_set_clientdata(client, chip);
dev_dbg(&client->dev, "%s regulator driver is registered.\n", id->name);
return 0;
}
static struct i2c_driver ad5398_driver = {
.probe = ad5398_probe,
.driver = {
.name = "ad5398",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
.id_table = ad5398_id,
};
static int __init ad5398_init(void)
{
return i2c_add_driver(&ad5398_driver);
}
subsys_initcall(ad5398_init);
static void __exit ad5398_exit(void)
{
i2c_del_driver(&ad5398_driver);
}
module_exit(ad5398_exit);
MODULE_DESCRIPTION("AD5398 and AD5821 current regulator driver");
MODULE_AUTHOR("Sonic Zhang");
MODULE_LICENSE("GPL");
| linux-master | drivers/regulator/ad5398.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/module.h>
#include <linux/i2c.h>
#include <linux/of.h>
#include <linux/regulator/driver.h>
#include <linux/regmap.h>
static const struct regulator_ops pg86x_ops = {
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.list_voltage = regulator_list_voltage_linear_range,
};
static const struct linear_range pg86x_buck1_ranges[] = {
REGULATOR_LINEAR_RANGE( 0, 0, 10, 0),
REGULATOR_LINEAR_RANGE(1000000, 11, 34, 25000),
REGULATOR_LINEAR_RANGE(1600000, 35, 47, 50000),
};
static const struct linear_range pg86x_buck2_ranges[] = {
REGULATOR_LINEAR_RANGE( 0, 0, 15, 0),
REGULATOR_LINEAR_RANGE(1000000, 16, 39, 25000),
REGULATOR_LINEAR_RANGE(1600000, 40, 52, 50000),
};
static const struct regulator_desc pg86x_regulators[] = {
{
.id = 0,
.type = REGULATOR_VOLTAGE,
.name = "buck1",
.of_match = of_match_ptr("buck1"),
.n_voltages = 11 + 24 + 13,
.linear_ranges = pg86x_buck1_ranges,
.n_linear_ranges = 3,
.vsel_reg = 0x24,
.vsel_mask = 0xff,
.ops = &pg86x_ops,
.owner = THIS_MODULE
},
{
.id = 1,
.type = REGULATOR_VOLTAGE,
.name = "buck2",
.of_match = of_match_ptr("buck2"),
.n_voltages = 16 + 24 + 13,
.linear_ranges = pg86x_buck2_ranges,
.n_linear_ranges = 3,
.vsel_reg = 0x13,
.vsel_mask = 0xff,
.ops = &pg86x_ops,
.owner = THIS_MODULE
},
};
static const struct regmap_config pg86x_regmap = {
.reg_bits = 8,
.val_bits = 8,
};
static int pg86x_i2c_probe(struct i2c_client *i2c)
{
int id, ret;
struct regulator_config config = {.dev = &i2c->dev};
struct regmap *regmap = devm_regmap_init_i2c(i2c, &pg86x_regmap);
if (IS_ERR(regmap)) {
ret = PTR_ERR(regmap);
dev_err(&i2c->dev, "regmap init failed: %d\n", ret);
return ret;
}
for (id = 0; id < ARRAY_SIZE(pg86x_regulators); id++) {
struct regulator_dev *rdev;
rdev = devm_regulator_register(&i2c->dev,
&pg86x_regulators[id],
&config);
if (IS_ERR(rdev)) {
ret = PTR_ERR(rdev);
dev_err(&i2c->dev, "failed to register %s: %d\n",
pg86x_regulators[id].name, ret);
return ret;
}
}
return 0;
}
static const struct of_device_id __maybe_unused pg86x_dt_ids[] = {
{ .compatible = "marvell,88pg867" },
{ .compatible = "marvell,88pg868" },
{ }
};
MODULE_DEVICE_TABLE(of, pg86x_dt_ids);
static const struct i2c_device_id pg86x_i2c_id[] = {
{ "88pg867", },
{ "88pg868", },
{ }
};
MODULE_DEVICE_TABLE(i2c, pg86x_i2c_id);
static struct i2c_driver pg86x_regulator_driver = {
.driver = {
.name = "88pg86x",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = of_match_ptr(pg86x_dt_ids),
},
.probe = pg86x_i2c_probe,
.id_table = pg86x_i2c_id,
};
module_i2c_driver(pg86x_regulator_driver);
MODULE_DESCRIPTION("Marvell 88PG86X voltage regulator");
MODULE_AUTHOR("Alexander Monakov <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/regulator/88pg86x.c |
// SPDX-License-Identifier: GPL-2.0
//
// Copyright (c) 2019 five technologies GmbH
// Author: Markus Reichl <[email protected]>
#include <linux/module.h>
#include <linux/i2c.h>
#include <linux/of.h>
#include <linux/regulator/driver.h>
#include <linux/regmap.h>
#define VOL_MIN_IDX 0x00
#define VOL_MAX_IDX 0x7ff
/* Register definitions */
#define MP8859_VOUT_L_REG 0 //3 lo Bits
#define MP8859_VOUT_H_REG 1 //8 hi Bits
#define MP8859_VOUT_GO_REG 2
#define MP8859_IOUT_LIM_REG 3
#define MP8859_CTL1_REG 4
#define MP8859_CTL2_REG 5
#define MP8859_RESERVED1_REG 6
#define MP8859_RESERVED2_REG 7
#define MP8859_RESERVED3_REG 8
#define MP8859_STATUS_REG 9
#define MP8859_INTERRUPT_REG 0x0A
#define MP8859_MASK_REG 0x0B
#define MP8859_ID1_REG 0x0C
#define MP8859_MFR_ID_REG 0x27
#define MP8859_DEV_ID_REG 0x28
#define MP8859_IC_REV_REG 0x29
#define MP8859_MAX_REG 0x29
#define MP8859_GO_BIT 0x01
static int mp8859_set_voltage_sel(struct regulator_dev *rdev, unsigned int sel)
{
int ret;
ret = regmap_write(rdev->regmap, MP8859_VOUT_L_REG, sel & 0x7);
if (ret)
return ret;
ret = regmap_write(rdev->regmap, MP8859_VOUT_H_REG, sel >> 3);
if (ret)
return ret;
ret = regmap_update_bits(rdev->regmap, MP8859_VOUT_GO_REG,
MP8859_GO_BIT, 1);
return ret;
}
static int mp8859_get_voltage_sel(struct regulator_dev *rdev)
{
unsigned int val_tmp;
unsigned int val;
int ret;
ret = regmap_read(rdev->regmap, MP8859_VOUT_H_REG, &val_tmp);
if (ret)
return ret;
val = val_tmp << 3;
ret = regmap_read(rdev->regmap, MP8859_VOUT_L_REG, &val_tmp);
if (ret)
return ret;
val |= val_tmp & 0x07;
return val;
}
static const struct linear_range mp8859_dcdc_ranges[] = {
REGULATOR_LINEAR_RANGE(0, VOL_MIN_IDX, VOL_MAX_IDX, 10000),
};
static const struct regmap_config mp8859_regmap = {
.reg_bits = 8,
.val_bits = 8,
.max_register = MP8859_MAX_REG,
.cache_type = REGCACHE_RBTREE,
};
static const struct regulator_ops mp8859_ops = {
.set_voltage_sel = mp8859_set_voltage_sel,
.get_voltage_sel = mp8859_get_voltage_sel,
.list_voltage = regulator_list_voltage_linear_range,
};
static const struct regulator_desc mp8859_regulators[] = {
{
.id = 0,
.type = REGULATOR_VOLTAGE,
.name = "mp8859_dcdc",
.supply_name = "vin",
.of_match = of_match_ptr("mp8859_dcdc"),
.n_voltages = VOL_MAX_IDX + 1,
.linear_ranges = mp8859_dcdc_ranges,
.n_linear_ranges = 1,
.ops = &mp8859_ops,
.owner = THIS_MODULE,
},
};
static int mp8859_i2c_probe(struct i2c_client *i2c)
{
int ret;
struct regulator_config config = {.dev = &i2c->dev};
struct regmap *regmap = devm_regmap_init_i2c(i2c, &mp8859_regmap);
struct regulator_dev *rdev;
if (IS_ERR(regmap)) {
ret = PTR_ERR(regmap);
dev_err(&i2c->dev, "regmap init failed: %d\n", ret);
return ret;
}
rdev = devm_regulator_register(&i2c->dev, &mp8859_regulators[0],
&config);
if (IS_ERR(rdev)) {
ret = PTR_ERR(rdev);
dev_err(&i2c->dev, "failed to register %s: %d\n",
mp8859_regulators[0].name, ret);
return ret;
}
return 0;
}
static const struct of_device_id mp8859_dt_id[] __maybe_unused = {
{.compatible = "mps,mp8859"},
{},
};
MODULE_DEVICE_TABLE(of, mp8859_dt_id);
static const struct i2c_device_id mp8859_i2c_id[] = {
{ "mp8859", },
{ },
};
MODULE_DEVICE_TABLE(i2c, mp8859_i2c_id);
static struct i2c_driver mp8859_regulator_driver = {
.driver = {
.name = "mp8859",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = of_match_ptr(mp8859_dt_id),
},
.probe = mp8859_i2c_probe,
.id_table = mp8859_i2c_id,
};
module_i2c_driver(mp8859_regulator_driver);
MODULE_DESCRIPTION("Monolithic Power Systems MP8859 voltage regulator driver");
MODULE_AUTHOR("Markus Reichl <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/regulator/mp8859.c |
// SPDX-License-Identifier: GPL-2.0-only
// Copyright Axis Communications AB
#include <linux/err.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/regmap.h>
#include <linux/regulator/of_regulator.h>
#include <linux/regulator/machine.h>
#include <linux/regulator/driver.h>
#include <dt-bindings/regulator/ti,tps62864.h>
#define TPS6286X_VOUT1 0x01
#define TPS6286X_VOUT1_VO1_SET GENMASK(7, 0)
#define TPS6286X_CONTROL 0x03
#define TPS6286X_CONTROL_FPWM BIT(4)
#define TPS6286X_CONTROL_SWEN BIT(5)
#define TPS6286X_MIN_MV 400
#define TPS6286X_MAX_MV 1675
#define TPS6286X_STEP_MV 5
static const struct regmap_config tps6286x_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
};
static int tps6286x_set_mode(struct regulator_dev *rdev, unsigned int mode)
{
unsigned int val;
switch (mode) {
case REGULATOR_MODE_NORMAL:
val = 0;
break;
case REGULATOR_MODE_FAST:
val = TPS6286X_CONTROL_FPWM;
break;
default:
return -EINVAL;
}
return regmap_update_bits(rdev->regmap, TPS6286X_CONTROL,
TPS6286X_CONTROL_FPWM, val);
}
static unsigned int tps6286x_get_mode(struct regulator_dev *rdev)
{
unsigned int val;
int ret;
ret = regmap_read(rdev->regmap, TPS6286X_CONTROL, &val);
if (ret < 0)
return 0;
return (val & TPS6286X_CONTROL_FPWM) ? REGULATOR_MODE_FAST : REGULATOR_MODE_NORMAL;
}
static const struct regulator_ops tps6286x_regulator_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.set_mode = tps6286x_set_mode,
.get_mode = tps6286x_get_mode,
.is_enabled = regulator_is_enabled_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.list_voltage = regulator_list_voltage_linear,
};
static unsigned int tps6286x_of_map_mode(unsigned int mode)
{
switch (mode) {
case TPS62864_MODE_NORMAL:
return REGULATOR_MODE_NORMAL;
case TPS62864_MODE_FPWM:
return REGULATOR_MODE_FAST;
default:
return REGULATOR_MODE_INVALID;
}
}
static const struct regulator_desc tps6286x_reg = {
.name = "tps6286x",
.of_match = "SW",
.owner = THIS_MODULE,
.ops = &tps6286x_regulator_ops,
.of_map_mode = tps6286x_of_map_mode,
.regulators_node = "regulators",
.type = REGULATOR_VOLTAGE,
.n_voltages = ((TPS6286X_MAX_MV - TPS6286X_MIN_MV) / TPS6286X_STEP_MV) + 1,
.min_uV = TPS6286X_MIN_MV * 1000,
.uV_step = TPS6286X_STEP_MV * 1000,
.vsel_reg = TPS6286X_VOUT1,
.vsel_mask = TPS6286X_VOUT1_VO1_SET,
.enable_reg = TPS6286X_CONTROL,
.enable_mask = TPS6286X_CONTROL_SWEN,
.ramp_delay = 1000,
/* tDelay + tRamp, rounded up */
.enable_time = 3000,
};
static const struct of_device_id tps6286x_dt_ids[] = {
{ .compatible = "ti,tps62864", },
{ .compatible = "ti,tps62866", },
{ .compatible = "ti,tps62868", },
{ .compatible = "ti,tps62869", },
{ }
};
MODULE_DEVICE_TABLE(of, tps6286x_dt_ids);
static int tps6286x_i2c_probe(struct i2c_client *i2c)
{
struct device *dev = &i2c->dev;
struct regulator_config config = {};
struct regulator_dev *rdev;
struct regmap *regmap;
regmap = devm_regmap_init_i2c(i2c, &tps6286x_regmap_config);
if (IS_ERR(regmap))
return PTR_ERR(regmap);
config.dev = &i2c->dev;
config.of_node = dev->of_node;
config.regmap = regmap;
rdev = devm_regulator_register(&i2c->dev, &tps6286x_reg, &config);
if (IS_ERR(rdev)) {
dev_err(&i2c->dev, "Failed to register tps6286x regulator\n");
return PTR_ERR(rdev);
}
return 0;
}
static const struct i2c_device_id tps6286x_i2c_id[] = {
{ "tps62864", 0 },
{ "tps62866", 0 },
{ "tps62868", 0 },
{ "tps62869", 0 },
{},
};
MODULE_DEVICE_TABLE(i2c, tps6286x_i2c_id);
static struct i2c_driver tps6286x_regulator_driver = {
.driver = {
.name = "tps6286x",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = tps6286x_dt_ids,
},
.probe = tps6286x_i2c_probe,
.id_table = tps6286x_i2c_id,
};
module_i2c_driver(tps6286x_regulator_driver);
MODULE_LICENSE("GPL v2");
| linux-master | drivers/regulator/tps6286x-regulator.c |
// SPDX-License-Identifier: GPL-2.0+
//
// Copyright (c) 2011 Samsung Electronics Co., Ltd
// http://www.samsung.com
#include <linux/err.h>
#include <linux/of_gpio.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
#include <linux/mfd/samsung/core.h>
#include <linux/mfd/samsung/s5m8767.h>
#include <linux/regulator/of_regulator.h>
#include <linux/regmap.h>
#define S5M8767_OPMODE_NORMAL_MODE 0x1
struct s5m8767_info {
struct device *dev;
struct sec_pmic_dev *iodev;
int num_regulators;
struct sec_opmode_data *opmode;
int ramp_delay;
bool buck2_ramp;
bool buck3_ramp;
bool buck4_ramp;
bool buck2_gpiodvs;
bool buck3_gpiodvs;
bool buck4_gpiodvs;
u8 buck2_vol[8];
u8 buck3_vol[8];
u8 buck4_vol[8];
int buck_gpios[3];
int buck_ds[3];
int buck_gpioindex;
};
struct sec_voltage_desc {
int max;
int min;
int step;
};
static const struct sec_voltage_desc buck_voltage_val1 = {
.max = 2225000,
.min = 650000,
.step = 6250,
};
static const struct sec_voltage_desc buck_voltage_val2 = {
.max = 1600000,
.min = 600000,
.step = 6250,
};
static const struct sec_voltage_desc buck_voltage_val3 = {
.max = 3000000,
.min = 750000,
.step = 12500,
};
static const struct sec_voltage_desc ldo_voltage_val1 = {
.max = 3950000,
.min = 800000,
.step = 50000,
};
static const struct sec_voltage_desc ldo_voltage_val2 = {
.max = 2375000,
.min = 800000,
.step = 25000,
};
static const struct sec_voltage_desc *reg_voltage_map[] = {
[S5M8767_LDO1] = &ldo_voltage_val2,
[S5M8767_LDO2] = &ldo_voltage_val2,
[S5M8767_LDO3] = &ldo_voltage_val1,
[S5M8767_LDO4] = &ldo_voltage_val1,
[S5M8767_LDO5] = &ldo_voltage_val1,
[S5M8767_LDO6] = &ldo_voltage_val2,
[S5M8767_LDO7] = &ldo_voltage_val2,
[S5M8767_LDO8] = &ldo_voltage_val2,
[S5M8767_LDO9] = &ldo_voltage_val1,
[S5M8767_LDO10] = &ldo_voltage_val1,
[S5M8767_LDO11] = &ldo_voltage_val1,
[S5M8767_LDO12] = &ldo_voltage_val1,
[S5M8767_LDO13] = &ldo_voltage_val1,
[S5M8767_LDO14] = &ldo_voltage_val1,
[S5M8767_LDO15] = &ldo_voltage_val2,
[S5M8767_LDO16] = &ldo_voltage_val1,
[S5M8767_LDO17] = &ldo_voltage_val1,
[S5M8767_LDO18] = &ldo_voltage_val1,
[S5M8767_LDO19] = &ldo_voltage_val1,
[S5M8767_LDO20] = &ldo_voltage_val1,
[S5M8767_LDO21] = &ldo_voltage_val1,
[S5M8767_LDO22] = &ldo_voltage_val1,
[S5M8767_LDO23] = &ldo_voltage_val1,
[S5M8767_LDO24] = &ldo_voltage_val1,
[S5M8767_LDO25] = &ldo_voltage_val1,
[S5M8767_LDO26] = &ldo_voltage_val1,
[S5M8767_LDO27] = &ldo_voltage_val1,
[S5M8767_LDO28] = &ldo_voltage_val1,
[S5M8767_BUCK1] = &buck_voltage_val1,
[S5M8767_BUCK2] = &buck_voltage_val2,
[S5M8767_BUCK3] = &buck_voltage_val2,
[S5M8767_BUCK4] = &buck_voltage_val2,
[S5M8767_BUCK5] = &buck_voltage_val1,
[S5M8767_BUCK6] = &buck_voltage_val1,
[S5M8767_BUCK7] = &buck_voltage_val3,
[S5M8767_BUCK8] = &buck_voltage_val3,
[S5M8767_BUCK9] = &buck_voltage_val3,
};
static const unsigned int s5m8767_opmode_reg[][4] = {
/* {OFF, ON, LOWPOWER, SUSPEND} */
/* LDO1 ... LDO28 */
{0x0, 0x3, 0x2, 0x1}, /* LDO1 */
{0x0, 0x3, 0x2, 0x1},
{0x0, 0x3, 0x2, 0x1},
{0x0, 0x0, 0x0, 0x0},
{0x0, 0x3, 0x2, 0x1}, /* LDO5 */
{0x0, 0x3, 0x2, 0x1},
{0x0, 0x3, 0x2, 0x1},
{0x0, 0x3, 0x2, 0x1},
{0x0, 0x3, 0x2, 0x1},
{0x0, 0x3, 0x2, 0x1}, /* LDO10 */
{0x0, 0x3, 0x2, 0x1},
{0x0, 0x3, 0x2, 0x1},
{0x0, 0x3, 0x2, 0x1},
{0x0, 0x3, 0x2, 0x1},
{0x0, 0x3, 0x2, 0x1}, /* LDO15 */
{0x0, 0x3, 0x2, 0x1},
{0x0, 0x3, 0x2, 0x1},
{0x0, 0x0, 0x0, 0x0},
{0x0, 0x3, 0x2, 0x1},
{0x0, 0x3, 0x2, 0x1}, /* LDO20 */
{0x0, 0x3, 0x2, 0x1},
{0x0, 0x3, 0x2, 0x1},
{0x0, 0x0, 0x0, 0x0},
{0x0, 0x3, 0x2, 0x1},
{0x0, 0x3, 0x2, 0x1}, /* LDO25 */
{0x0, 0x3, 0x2, 0x1},
{0x0, 0x3, 0x2, 0x1},
{0x0, 0x3, 0x2, 0x1}, /* LDO28 */
/* BUCK1 ... BUCK9 */
{0x0, 0x3, 0x1, 0x1}, /* BUCK1 */
{0x0, 0x3, 0x1, 0x1},
{0x0, 0x3, 0x1, 0x1},
{0x0, 0x3, 0x1, 0x1},
{0x0, 0x3, 0x2, 0x1}, /* BUCK5 */
{0x0, 0x3, 0x1, 0x1},
{0x0, 0x3, 0x1, 0x1},
{0x0, 0x3, 0x1, 0x1},
{0x0, 0x3, 0x1, 0x1}, /* BUCK9 */
};
static int s5m8767_get_register(struct s5m8767_info *s5m8767, int reg_id,
int *reg, int *enable_ctrl)
{
int i;
unsigned int mode;
switch (reg_id) {
case S5M8767_LDO1 ... S5M8767_LDO2:
*reg = S5M8767_REG_LDO1CTRL + (reg_id - S5M8767_LDO1);
break;
case S5M8767_LDO3 ... S5M8767_LDO28:
*reg = S5M8767_REG_LDO3CTRL + (reg_id - S5M8767_LDO3);
break;
case S5M8767_BUCK1:
*reg = S5M8767_REG_BUCK1CTRL1;
break;
case S5M8767_BUCK2 ... S5M8767_BUCK4:
*reg = S5M8767_REG_BUCK2CTRL + (reg_id - S5M8767_BUCK2) * 9;
break;
case S5M8767_BUCK5:
*reg = S5M8767_REG_BUCK5CTRL1;
break;
case S5M8767_BUCK6 ... S5M8767_BUCK9:
*reg = S5M8767_REG_BUCK6CTRL1 + (reg_id - S5M8767_BUCK6) * 2;
break;
default:
return -EINVAL;
}
for (i = 0; i < s5m8767->num_regulators; i++) {
if (s5m8767->opmode[i].id == reg_id) {
mode = s5m8767->opmode[i].mode;
break;
}
}
if (i >= s5m8767->num_regulators)
return -EINVAL;
*enable_ctrl = s5m8767_opmode_reg[reg_id][mode] << S5M8767_ENCTRL_SHIFT;
return 0;
}
static int s5m8767_get_vsel_reg(int reg_id, struct s5m8767_info *s5m8767)
{
int reg;
switch (reg_id) {
case S5M8767_LDO1 ... S5M8767_LDO2:
reg = S5M8767_REG_LDO1CTRL + (reg_id - S5M8767_LDO1);
break;
case S5M8767_LDO3 ... S5M8767_LDO28:
reg = S5M8767_REG_LDO3CTRL + (reg_id - S5M8767_LDO3);
break;
case S5M8767_BUCK1:
reg = S5M8767_REG_BUCK1CTRL2;
break;
case S5M8767_BUCK2:
reg = S5M8767_REG_BUCK2DVS1;
if (s5m8767->buck2_gpiodvs)
reg += s5m8767->buck_gpioindex;
break;
case S5M8767_BUCK3:
reg = S5M8767_REG_BUCK3DVS1;
if (s5m8767->buck3_gpiodvs)
reg += s5m8767->buck_gpioindex;
break;
case S5M8767_BUCK4:
reg = S5M8767_REG_BUCK4DVS1;
if (s5m8767->buck4_gpiodvs)
reg += s5m8767->buck_gpioindex;
break;
case S5M8767_BUCK5:
reg = S5M8767_REG_BUCK5CTRL2;
break;
case S5M8767_BUCK6 ... S5M8767_BUCK9:
reg = S5M8767_REG_BUCK6CTRL2 + (reg_id - S5M8767_BUCK6) * 2;
break;
default:
return -EINVAL;
}
return reg;
}
static int s5m8767_convert_voltage_to_sel(const struct sec_voltage_desc *desc,
int min_vol)
{
int selector = 0;
if (desc == NULL)
return -EINVAL;
if (min_vol > desc->max)
return -EINVAL;
if (min_vol < desc->min)
min_vol = desc->min;
selector = DIV_ROUND_UP(min_vol - desc->min, desc->step);
if (desc->min + desc->step * selector > desc->max)
return -EINVAL;
return selector;
}
static inline int s5m8767_set_high(struct s5m8767_info *s5m8767)
{
int temp_index = s5m8767->buck_gpioindex;
gpio_set_value(s5m8767->buck_gpios[0], (temp_index >> 2) & 0x1);
gpio_set_value(s5m8767->buck_gpios[1], (temp_index >> 1) & 0x1);
gpio_set_value(s5m8767->buck_gpios[2], temp_index & 0x1);
return 0;
}
static inline int s5m8767_set_low(struct s5m8767_info *s5m8767)
{
int temp_index = s5m8767->buck_gpioindex;
gpio_set_value(s5m8767->buck_gpios[2], temp_index & 0x1);
gpio_set_value(s5m8767->buck_gpios[1], (temp_index >> 1) & 0x1);
gpio_set_value(s5m8767->buck_gpios[0], (temp_index >> 2) & 0x1);
return 0;
}
static int s5m8767_set_voltage_sel(struct regulator_dev *rdev,
unsigned selector)
{
struct s5m8767_info *s5m8767 = rdev_get_drvdata(rdev);
int reg_id = rdev_get_id(rdev);
int old_index, index = 0;
u8 *buck234_vol = NULL;
switch (reg_id) {
case S5M8767_LDO1 ... S5M8767_LDO28:
break;
case S5M8767_BUCK1 ... S5M8767_BUCK6:
if (reg_id == S5M8767_BUCK2 && s5m8767->buck2_gpiodvs)
buck234_vol = &s5m8767->buck2_vol[0];
else if (reg_id == S5M8767_BUCK3 && s5m8767->buck3_gpiodvs)
buck234_vol = &s5m8767->buck3_vol[0];
else if (reg_id == S5M8767_BUCK4 && s5m8767->buck4_gpiodvs)
buck234_vol = &s5m8767->buck4_vol[0];
break;
case S5M8767_BUCK7 ... S5M8767_BUCK8:
return -EINVAL;
case S5M8767_BUCK9:
break;
default:
return -EINVAL;
}
/* buck234_vol != NULL means to control buck234 voltage via DVS GPIO */
if (buck234_vol) {
while (*buck234_vol != selector) {
buck234_vol++;
index++;
}
old_index = s5m8767->buck_gpioindex;
s5m8767->buck_gpioindex = index;
if (index > old_index)
return s5m8767_set_high(s5m8767);
else
return s5m8767_set_low(s5m8767);
} else {
return regulator_set_voltage_sel_regmap(rdev, selector);
}
}
static int s5m8767_set_voltage_time_sel(struct regulator_dev *rdev,
unsigned int old_sel,
unsigned int new_sel)
{
struct s5m8767_info *s5m8767 = rdev_get_drvdata(rdev);
if ((old_sel < new_sel) && s5m8767->ramp_delay)
return DIV_ROUND_UP(rdev->desc->uV_step * (new_sel - old_sel),
s5m8767->ramp_delay * 1000);
return 0;
}
static const struct regulator_ops s5m8767_ops = {
.list_voltage = regulator_list_voltage_linear,
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = s5m8767_set_voltage_sel,
.set_voltage_time_sel = s5m8767_set_voltage_time_sel,
};
static const struct regulator_ops s5m8767_buck78_ops = {
.list_voltage = regulator_list_voltage_linear,
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
};
#define s5m8767_regulator_desc(_name) { \
.name = #_name, \
.id = S5M8767_##_name, \
.ops = &s5m8767_ops, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
}
#define s5m8767_regulator_buck78_desc(_name) { \
.name = #_name, \
.id = S5M8767_##_name, \
.ops = &s5m8767_buck78_ops, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
}
static struct regulator_desc regulators[] = {
s5m8767_regulator_desc(LDO1),
s5m8767_regulator_desc(LDO2),
s5m8767_regulator_desc(LDO3),
s5m8767_regulator_desc(LDO4),
s5m8767_regulator_desc(LDO5),
s5m8767_regulator_desc(LDO6),
s5m8767_regulator_desc(LDO7),
s5m8767_regulator_desc(LDO8),
s5m8767_regulator_desc(LDO9),
s5m8767_regulator_desc(LDO10),
s5m8767_regulator_desc(LDO11),
s5m8767_regulator_desc(LDO12),
s5m8767_regulator_desc(LDO13),
s5m8767_regulator_desc(LDO14),
s5m8767_regulator_desc(LDO15),
s5m8767_regulator_desc(LDO16),
s5m8767_regulator_desc(LDO17),
s5m8767_regulator_desc(LDO18),
s5m8767_regulator_desc(LDO19),
s5m8767_regulator_desc(LDO20),
s5m8767_regulator_desc(LDO21),
s5m8767_regulator_desc(LDO22),
s5m8767_regulator_desc(LDO23),
s5m8767_regulator_desc(LDO24),
s5m8767_regulator_desc(LDO25),
s5m8767_regulator_desc(LDO26),
s5m8767_regulator_desc(LDO27),
s5m8767_regulator_desc(LDO28),
s5m8767_regulator_desc(BUCK1),
s5m8767_regulator_desc(BUCK2),
s5m8767_regulator_desc(BUCK3),
s5m8767_regulator_desc(BUCK4),
s5m8767_regulator_desc(BUCK5),
s5m8767_regulator_desc(BUCK6),
s5m8767_regulator_buck78_desc(BUCK7),
s5m8767_regulator_buck78_desc(BUCK8),
s5m8767_regulator_desc(BUCK9),
};
/*
* Enable GPIO control over BUCK9 in regulator_config for that regulator.
*/
static void s5m8767_regulator_config_ext_control(struct s5m8767_info *s5m8767,
struct sec_regulator_data *rdata,
struct regulator_config *config)
{
int i, mode = 0;
if (rdata->id != S5M8767_BUCK9)
return;
/* Check if opmode for regulator matches S5M8767_ENCTRL_USE_GPIO */
for (i = 0; i < s5m8767->num_regulators; i++) {
const struct sec_opmode_data *opmode = &s5m8767->opmode[i];
if (opmode->id == rdata->id) {
mode = s5m8767_opmode_reg[rdata->id][opmode->mode];
break;
}
}
if (mode != S5M8767_ENCTRL_USE_GPIO) {
dev_warn(s5m8767->dev,
"ext-control for %pOFn: mismatched op_mode (%x), ignoring\n",
rdata->reg_node, mode);
return;
}
if (!rdata->ext_control_gpiod) {
dev_warn(s5m8767->dev,
"ext-control for %pOFn: GPIO not valid, ignoring\n",
rdata->reg_node);
return;
}
config->ena_gpiod = rdata->ext_control_gpiod;
}
/*
* Turn on GPIO control over BUCK9.
*/
static int s5m8767_enable_ext_control(struct s5m8767_info *s5m8767,
struct regulator_dev *rdev)
{
int id = rdev_get_id(rdev);
int ret, reg, enable_ctrl;
if (id != S5M8767_BUCK9)
return -EINVAL;
ret = s5m8767_get_register(s5m8767, id, ®, &enable_ctrl);
if (ret)
return ret;
return regmap_update_bits(s5m8767->iodev->regmap_pmic,
reg, S5M8767_ENCTRL_MASK,
S5M8767_ENCTRL_USE_GPIO << S5M8767_ENCTRL_SHIFT);
}
#ifdef CONFIG_OF
static int s5m8767_pmic_dt_parse_dvs_gpio(struct sec_pmic_dev *iodev,
struct sec_platform_data *pdata,
struct device_node *pmic_np)
{
int i, gpio;
for (i = 0; i < 3; i++) {
gpio = of_get_named_gpio(pmic_np,
"s5m8767,pmic-buck-dvs-gpios", i);
if (!gpio_is_valid(gpio)) {
dev_err(iodev->dev, "invalid gpio[%d]: %d\n", i, gpio);
return -EINVAL;
}
pdata->buck_gpios[i] = gpio;
}
return 0;
}
static int s5m8767_pmic_dt_parse_ds_gpio(struct sec_pmic_dev *iodev,
struct sec_platform_data *pdata,
struct device_node *pmic_np)
{
int i, gpio;
for (i = 0; i < 3; i++) {
gpio = of_get_named_gpio(pmic_np,
"s5m8767,pmic-buck-ds-gpios", i);
if (!gpio_is_valid(gpio)) {
dev_err(iodev->dev, "invalid gpio[%d]: %d\n", i, gpio);
return -EINVAL;
}
pdata->buck_ds[i] = gpio;
}
return 0;
}
static int s5m8767_pmic_dt_parse_pdata(struct platform_device *pdev,
struct sec_platform_data *pdata)
{
struct sec_pmic_dev *iodev = dev_get_drvdata(pdev->dev.parent);
struct device_node *pmic_np, *regulators_np, *reg_np;
struct sec_regulator_data *rdata;
struct sec_opmode_data *rmode;
unsigned int i, dvs_voltage_nr = 8, ret;
pmic_np = iodev->dev->of_node;
if (!pmic_np) {
dev_err(iodev->dev, "could not find pmic sub-node\n");
return -ENODEV;
}
regulators_np = of_get_child_by_name(pmic_np, "regulators");
if (!regulators_np) {
dev_err(iodev->dev, "could not find regulators sub-node\n");
return -EINVAL;
}
/* count the number of regulators to be supported in pmic */
pdata->num_regulators = of_get_child_count(regulators_np);
rdata = devm_kcalloc(&pdev->dev,
pdata->num_regulators, sizeof(*rdata),
GFP_KERNEL);
if (!rdata) {
of_node_put(regulators_np);
return -ENOMEM;
}
rmode = devm_kcalloc(&pdev->dev,
pdata->num_regulators, sizeof(*rmode),
GFP_KERNEL);
if (!rmode) {
of_node_put(regulators_np);
return -ENOMEM;
}
pdata->regulators = rdata;
pdata->opmode = rmode;
for_each_child_of_node(regulators_np, reg_np) {
for (i = 0; i < ARRAY_SIZE(regulators); i++)
if (of_node_name_eq(reg_np, regulators[i].name))
break;
if (i == ARRAY_SIZE(regulators)) {
dev_warn(iodev->dev,
"don't know how to configure regulator %pOFn\n",
reg_np);
continue;
}
rdata->ext_control_gpiod = devm_fwnode_gpiod_get(
&pdev->dev,
of_fwnode_handle(reg_np),
"s5m8767,pmic-ext-control",
GPIOD_OUT_HIGH | GPIOD_FLAGS_BIT_NONEXCLUSIVE,
"s5m8767");
if (PTR_ERR(rdata->ext_control_gpiod) == -ENOENT) {
rdata->ext_control_gpiod = NULL;
} else if (IS_ERR(rdata->ext_control_gpiod)) {
of_node_put(reg_np);
of_node_put(regulators_np);
return PTR_ERR(rdata->ext_control_gpiod);
}
rdata->id = i;
rdata->initdata = of_get_regulator_init_data(
&pdev->dev, reg_np,
®ulators[i]);
rdata->reg_node = reg_np;
rdata++;
rmode->id = i;
if (of_property_read_u32(reg_np, "op_mode",
&rmode->mode)) {
dev_warn(iodev->dev,
"no op_mode property at %pOF\n",
reg_np);
rmode->mode = S5M8767_OPMODE_NORMAL_MODE;
}
rmode++;
}
of_node_put(regulators_np);
if (of_property_read_bool(pmic_np, "s5m8767,pmic-buck2-uses-gpio-dvs")) {
pdata->buck2_gpiodvs = true;
if (of_property_read_u32_array(pmic_np,
"s5m8767,pmic-buck2-dvs-voltage",
pdata->buck2_voltage, dvs_voltage_nr)) {
dev_err(iodev->dev, "buck2 voltages not specified\n");
return -EINVAL;
}
}
if (of_property_read_bool(pmic_np, "s5m8767,pmic-buck3-uses-gpio-dvs")) {
pdata->buck3_gpiodvs = true;
if (of_property_read_u32_array(pmic_np,
"s5m8767,pmic-buck3-dvs-voltage",
pdata->buck3_voltage, dvs_voltage_nr)) {
dev_err(iodev->dev, "buck3 voltages not specified\n");
return -EINVAL;
}
}
if (of_property_read_bool(pmic_np, "s5m8767,pmic-buck4-uses-gpio-dvs")) {
pdata->buck4_gpiodvs = true;
if (of_property_read_u32_array(pmic_np,
"s5m8767,pmic-buck4-dvs-voltage",
pdata->buck4_voltage, dvs_voltage_nr)) {
dev_err(iodev->dev, "buck4 voltages not specified\n");
return -EINVAL;
}
}
if (pdata->buck2_gpiodvs || pdata->buck3_gpiodvs ||
pdata->buck4_gpiodvs) {
ret = s5m8767_pmic_dt_parse_dvs_gpio(iodev, pdata, pmic_np);
if (ret)
return -EINVAL;
if (of_property_read_u32(pmic_np,
"s5m8767,pmic-buck-default-dvs-idx",
&pdata->buck_default_idx)) {
pdata->buck_default_idx = 0;
} else {
if (pdata->buck_default_idx >= 8) {
pdata->buck_default_idx = 0;
dev_info(iodev->dev,
"invalid value for default dvs index, use 0\n");
}
}
}
ret = s5m8767_pmic_dt_parse_ds_gpio(iodev, pdata, pmic_np);
if (ret)
return -EINVAL;
pdata->buck2_ramp_enable = of_property_read_bool(pmic_np, "s5m8767,pmic-buck2-ramp-enable");
pdata->buck3_ramp_enable = of_property_read_bool(pmic_np, "s5m8767,pmic-buck3-ramp-enable");
pdata->buck4_ramp_enable = of_property_read_bool(pmic_np, "s5m8767,pmic-buck4-ramp-enable");
if (pdata->buck2_ramp_enable || pdata->buck3_ramp_enable
|| pdata->buck4_ramp_enable) {
if (of_property_read_u32(pmic_np, "s5m8767,pmic-buck-ramp-delay",
&pdata->buck_ramp_delay))
pdata->buck_ramp_delay = 0;
}
return 0;
}
#else
static int s5m8767_pmic_dt_parse_pdata(struct platform_device *pdev,
struct sec_platform_data *pdata)
{
return 0;
}
#endif /* CONFIG_OF */
static int s5m8767_pmic_probe(struct platform_device *pdev)
{
struct sec_pmic_dev *iodev = dev_get_drvdata(pdev->dev.parent);
struct sec_platform_data *pdata = iodev->pdata;
struct regulator_config config = { };
struct s5m8767_info *s5m8767;
int i, ret, buck_init;
if (!pdata) {
dev_err(pdev->dev.parent, "Platform data not supplied\n");
return -ENODEV;
}
if (iodev->dev->of_node) {
ret = s5m8767_pmic_dt_parse_pdata(pdev, pdata);
if (ret)
return ret;
}
if (pdata->buck2_gpiodvs) {
if (pdata->buck3_gpiodvs || pdata->buck4_gpiodvs) {
dev_err(&pdev->dev, "S5M8767 GPIO DVS NOT VALID\n");
return -EINVAL;
}
}
if (pdata->buck3_gpiodvs) {
if (pdata->buck2_gpiodvs || pdata->buck4_gpiodvs) {
dev_err(&pdev->dev, "S5M8767 GPIO DVS NOT VALID\n");
return -EINVAL;
}
}
if (pdata->buck4_gpiodvs) {
if (pdata->buck2_gpiodvs || pdata->buck3_gpiodvs) {
dev_err(&pdev->dev, "S5M8767 GPIO DVS NOT VALID\n");
return -EINVAL;
}
}
s5m8767 = devm_kzalloc(&pdev->dev, sizeof(struct s5m8767_info),
GFP_KERNEL);
if (!s5m8767)
return -ENOMEM;
s5m8767->dev = &pdev->dev;
s5m8767->iodev = iodev;
s5m8767->num_regulators = pdata->num_regulators;
platform_set_drvdata(pdev, s5m8767);
s5m8767->buck_gpioindex = pdata->buck_default_idx;
s5m8767->buck2_gpiodvs = pdata->buck2_gpiodvs;
s5m8767->buck3_gpiodvs = pdata->buck3_gpiodvs;
s5m8767->buck4_gpiodvs = pdata->buck4_gpiodvs;
s5m8767->buck_gpios[0] = pdata->buck_gpios[0];
s5m8767->buck_gpios[1] = pdata->buck_gpios[1];
s5m8767->buck_gpios[2] = pdata->buck_gpios[2];
s5m8767->buck_ds[0] = pdata->buck_ds[0];
s5m8767->buck_ds[1] = pdata->buck_ds[1];
s5m8767->buck_ds[2] = pdata->buck_ds[2];
s5m8767->ramp_delay = pdata->buck_ramp_delay;
s5m8767->buck2_ramp = pdata->buck2_ramp_enable;
s5m8767->buck3_ramp = pdata->buck3_ramp_enable;
s5m8767->buck4_ramp = pdata->buck4_ramp_enable;
s5m8767->opmode = pdata->opmode;
buck_init = s5m8767_convert_voltage_to_sel(&buck_voltage_val2,
pdata->buck2_init);
regmap_write(s5m8767->iodev->regmap_pmic, S5M8767_REG_BUCK2DVS2,
buck_init);
buck_init = s5m8767_convert_voltage_to_sel(&buck_voltage_val2,
pdata->buck3_init);
regmap_write(s5m8767->iodev->regmap_pmic, S5M8767_REG_BUCK3DVS2,
buck_init);
buck_init = s5m8767_convert_voltage_to_sel(&buck_voltage_val2,
pdata->buck4_init);
regmap_write(s5m8767->iodev->regmap_pmic, S5M8767_REG_BUCK4DVS2,
buck_init);
for (i = 0; i < 8; i++) {
if (s5m8767->buck2_gpiodvs) {
s5m8767->buck2_vol[i] =
s5m8767_convert_voltage_to_sel(
&buck_voltage_val2,
pdata->buck2_voltage[i]);
}
if (s5m8767->buck3_gpiodvs) {
s5m8767->buck3_vol[i] =
s5m8767_convert_voltage_to_sel(
&buck_voltage_val2,
pdata->buck3_voltage[i]);
}
if (s5m8767->buck4_gpiodvs) {
s5m8767->buck4_vol[i] =
s5m8767_convert_voltage_to_sel(
&buck_voltage_val2,
pdata->buck4_voltage[i]);
}
}
if (pdata->buck2_gpiodvs || pdata->buck3_gpiodvs ||
pdata->buck4_gpiodvs) {
if (!gpio_is_valid(pdata->buck_gpios[0]) ||
!gpio_is_valid(pdata->buck_gpios[1]) ||
!gpio_is_valid(pdata->buck_gpios[2])) {
dev_err(&pdev->dev, "GPIO NOT VALID\n");
return -EINVAL;
}
ret = devm_gpio_request(&pdev->dev, pdata->buck_gpios[0],
"S5M8767 SET1");
if (ret)
return ret;
ret = devm_gpio_request(&pdev->dev, pdata->buck_gpios[1],
"S5M8767 SET2");
if (ret)
return ret;
ret = devm_gpio_request(&pdev->dev, pdata->buck_gpios[2],
"S5M8767 SET3");
if (ret)
return ret;
/* SET1 GPIO */
gpio_direction_output(pdata->buck_gpios[0],
(s5m8767->buck_gpioindex >> 2) & 0x1);
/* SET2 GPIO */
gpio_direction_output(pdata->buck_gpios[1],
(s5m8767->buck_gpioindex >> 1) & 0x1);
/* SET3 GPIO */
gpio_direction_output(pdata->buck_gpios[2],
(s5m8767->buck_gpioindex >> 0) & 0x1);
}
ret = devm_gpio_request(&pdev->dev, pdata->buck_ds[0], "S5M8767 DS2");
if (ret)
return ret;
ret = devm_gpio_request(&pdev->dev, pdata->buck_ds[1], "S5M8767 DS3");
if (ret)
return ret;
ret = devm_gpio_request(&pdev->dev, pdata->buck_ds[2], "S5M8767 DS4");
if (ret)
return ret;
/* DS2 GPIO */
gpio_direction_output(pdata->buck_ds[0], 0x0);
/* DS3 GPIO */
gpio_direction_output(pdata->buck_ds[1], 0x0);
/* DS4 GPIO */
gpio_direction_output(pdata->buck_ds[2], 0x0);
regmap_update_bits(s5m8767->iodev->regmap_pmic,
S5M8767_REG_BUCK2CTRL, 1 << 1,
(pdata->buck2_gpiodvs) ? (1 << 1) : (0 << 1));
regmap_update_bits(s5m8767->iodev->regmap_pmic,
S5M8767_REG_BUCK3CTRL, 1 << 1,
(pdata->buck3_gpiodvs) ? (1 << 1) : (0 << 1));
regmap_update_bits(s5m8767->iodev->regmap_pmic,
S5M8767_REG_BUCK4CTRL, 1 << 1,
(pdata->buck4_gpiodvs) ? (1 << 1) : (0 << 1));
/* Initialize GPIO DVS registers */
for (i = 0; i < 8; i++) {
if (s5m8767->buck2_gpiodvs) {
regmap_write(s5m8767->iodev->regmap_pmic,
S5M8767_REG_BUCK2DVS1 + i,
s5m8767->buck2_vol[i]);
}
if (s5m8767->buck3_gpiodvs) {
regmap_write(s5m8767->iodev->regmap_pmic,
S5M8767_REG_BUCK3DVS1 + i,
s5m8767->buck3_vol[i]);
}
if (s5m8767->buck4_gpiodvs) {
regmap_write(s5m8767->iodev->regmap_pmic,
S5M8767_REG_BUCK4DVS1 + i,
s5m8767->buck4_vol[i]);
}
}
if (s5m8767->buck2_ramp)
regmap_update_bits(s5m8767->iodev->regmap_pmic,
S5M8767_REG_DVSRAMP, 0x08, 0x08);
if (s5m8767->buck3_ramp)
regmap_update_bits(s5m8767->iodev->regmap_pmic,
S5M8767_REG_DVSRAMP, 0x04, 0x04);
if (s5m8767->buck4_ramp)
regmap_update_bits(s5m8767->iodev->regmap_pmic,
S5M8767_REG_DVSRAMP, 0x02, 0x02);
if (s5m8767->buck2_ramp || s5m8767->buck3_ramp
|| s5m8767->buck4_ramp) {
unsigned int val;
switch (s5m8767->ramp_delay) {
case 5:
val = S5M8767_DVS_BUCK_RAMP_5;
break;
case 10:
val = S5M8767_DVS_BUCK_RAMP_10;
break;
case 25:
val = S5M8767_DVS_BUCK_RAMP_25;
break;
case 50:
val = S5M8767_DVS_BUCK_RAMP_50;
break;
case 100:
val = S5M8767_DVS_BUCK_RAMP_100;
break;
default:
val = S5M8767_DVS_BUCK_RAMP_10;
}
regmap_update_bits(s5m8767->iodev->regmap_pmic,
S5M8767_REG_DVSRAMP,
S5M8767_DVS_BUCK_RAMP_MASK,
val << S5M8767_DVS_BUCK_RAMP_SHIFT);
}
for (i = 0; i < pdata->num_regulators; i++) {
const struct sec_voltage_desc *desc;
unsigned int id = pdata->regulators[i].id;
int enable_reg, enable_val;
struct regulator_dev *rdev;
BUILD_BUG_ON(ARRAY_SIZE(regulators) != ARRAY_SIZE(reg_voltage_map));
if (WARN_ON_ONCE(id >= ARRAY_SIZE(regulators)))
continue;
desc = reg_voltage_map[id];
if (desc) {
regulators[id].n_voltages =
(desc->max - desc->min) / desc->step + 1;
regulators[id].min_uV = desc->min;
regulators[id].uV_step = desc->step;
regulators[id].vsel_reg =
s5m8767_get_vsel_reg(id, s5m8767);
if (id < S5M8767_BUCK1)
regulators[id].vsel_mask = 0x3f;
else
regulators[id].vsel_mask = 0xff;
ret = s5m8767_get_register(s5m8767, id, &enable_reg,
&enable_val);
if (ret) {
dev_err(s5m8767->dev, "error reading registers\n");
return ret;
}
regulators[id].enable_reg = enable_reg;
regulators[id].enable_mask = S5M8767_ENCTRL_MASK;
regulators[id].enable_val = enable_val;
}
config.dev = s5m8767->dev;
config.init_data = pdata->regulators[i].initdata;
config.driver_data = s5m8767;
config.regmap = iodev->regmap_pmic;
config.of_node = pdata->regulators[i].reg_node;
config.ena_gpiod = NULL;
if (pdata->regulators[i].ext_control_gpiod) {
/* Assigns config.ena_gpiod */
s5m8767_regulator_config_ext_control(s5m8767,
&pdata->regulators[i], &config);
/*
* Hand the GPIO descriptor management over to the
* regulator core, remove it from devres management.
*/
devm_gpiod_unhinge(s5m8767->dev, config.ena_gpiod);
}
rdev = devm_regulator_register(&pdev->dev, ®ulators[id],
&config);
if (IS_ERR(rdev)) {
ret = PTR_ERR(rdev);
dev_err(s5m8767->dev, "regulator init failed for %d\n",
id);
return ret;
}
if (pdata->regulators[i].ext_control_gpiod) {
ret = s5m8767_enable_ext_control(s5m8767, rdev);
if (ret < 0) {
dev_err(s5m8767->dev,
"failed to enable gpio control over %s: %d\n",
rdev->desc->name, ret);
return ret;
}
}
}
return 0;
}
static const struct platform_device_id s5m8767_pmic_id[] = {
{ "s5m8767-pmic", 0},
{ },
};
MODULE_DEVICE_TABLE(platform, s5m8767_pmic_id);
static struct platform_driver s5m8767_pmic_driver = {
.driver = {
.name = "s5m8767-pmic",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
.probe = s5m8767_pmic_probe,
.id_table = s5m8767_pmic_id,
};
module_platform_driver(s5m8767_pmic_driver);
/* Module information */
MODULE_AUTHOR("Sangbeom Kim <[email protected]>");
MODULE_DESCRIPTION("Samsung S5M8767 Regulator Driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/regulator/s5m8767.c |
// SPDX-License-Identifier: GPL-2.0
//
// Regulator Driver for Freescale MC13xxx PMIC
//
// Copyright 2010 Yong Shen <[email protected]>
//
// Based on mc13783 regulator driver :
// Copyright (C) 2008 Sascha Hauer, Pengutronix <[email protected]>
// Copyright 2009 Alberto Panizzo <[email protected]>
//
// Regs infos taken from mc13xxx drivers from freescale and mc13xxx.pdf file
// from freescale
#include <linux/mfd/mc13xxx.h>
#include <linux/regulator/machine.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/of_regulator.h>
#include <linux/platform_device.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/of.h>
#include "mc13xxx.h"
static int mc13xxx_regulator_enable(struct regulator_dev *rdev)
{
struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
struct mc13xxx_regulator *mc13xxx_regulators = priv->mc13xxx_regulators;
int id = rdev_get_id(rdev);
dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id);
return mc13xxx_reg_rmw(priv->mc13xxx, mc13xxx_regulators[id].reg,
mc13xxx_regulators[id].enable_bit,
mc13xxx_regulators[id].enable_bit);
}
static int mc13xxx_regulator_disable(struct regulator_dev *rdev)
{
struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
struct mc13xxx_regulator *mc13xxx_regulators = priv->mc13xxx_regulators;
int id = rdev_get_id(rdev);
dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id);
return mc13xxx_reg_rmw(priv->mc13xxx, mc13xxx_regulators[id].reg,
mc13xxx_regulators[id].enable_bit, 0);
}
static int mc13xxx_regulator_is_enabled(struct regulator_dev *rdev)
{
struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
struct mc13xxx_regulator *mc13xxx_regulators = priv->mc13xxx_regulators;
int ret, id = rdev_get_id(rdev);
unsigned int val;
ret = mc13xxx_reg_read(priv->mc13xxx, mc13xxx_regulators[id].reg, &val);
if (ret)
return ret;
return (val & mc13xxx_regulators[id].enable_bit) != 0;
}
static int mc13xxx_regulator_set_voltage_sel(struct regulator_dev *rdev,
unsigned selector)
{
struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
struct mc13xxx_regulator *mc13xxx_regulators = priv->mc13xxx_regulators;
int id = rdev_get_id(rdev);
return mc13xxx_reg_rmw(priv->mc13xxx, mc13xxx_regulators[id].vsel_reg,
mc13xxx_regulators[id].vsel_mask,
selector << mc13xxx_regulators[id].vsel_shift);
}
static int mc13xxx_regulator_get_voltage(struct regulator_dev *rdev)
{
struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
struct mc13xxx_regulator *mc13xxx_regulators = priv->mc13xxx_regulators;
int ret, id = rdev_get_id(rdev);
unsigned int val;
dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id);
ret = mc13xxx_reg_read(priv->mc13xxx,
mc13xxx_regulators[id].vsel_reg, &val);
if (ret)
return ret;
val = (val & mc13xxx_regulators[id].vsel_mask)
>> mc13xxx_regulators[id].vsel_shift;
dev_dbg(rdev_get_dev(rdev), "%s id: %d val: %d\n", __func__, id, val);
BUG_ON(val >= mc13xxx_regulators[id].desc.n_voltages);
return rdev->desc->volt_table[val];
}
const struct regulator_ops mc13xxx_regulator_ops = {
.enable = mc13xxx_regulator_enable,
.disable = mc13xxx_regulator_disable,
.is_enabled = mc13xxx_regulator_is_enabled,
.list_voltage = regulator_list_voltage_table,
.set_voltage_sel = mc13xxx_regulator_set_voltage_sel,
.get_voltage = mc13xxx_regulator_get_voltage,
};
EXPORT_SYMBOL_GPL(mc13xxx_regulator_ops);
int mc13xxx_fixed_regulator_set_voltage(struct regulator_dev *rdev, int min_uV,
int max_uV, unsigned *selector)
{
int id = rdev_get_id(rdev);
dev_dbg(rdev_get_dev(rdev), "%s id: %d min_uV: %d max_uV: %d\n",
__func__, id, min_uV, max_uV);
if (min_uV <= rdev->desc->volt_table[0] &&
rdev->desc->volt_table[0] <= max_uV) {
*selector = 0;
return 0;
} else {
return -EINVAL;
}
}
EXPORT_SYMBOL_GPL(mc13xxx_fixed_regulator_set_voltage);
const struct regulator_ops mc13xxx_fixed_regulator_ops = {
.enable = mc13xxx_regulator_enable,
.disable = mc13xxx_regulator_disable,
.is_enabled = mc13xxx_regulator_is_enabled,
.list_voltage = regulator_list_voltage_table,
.set_voltage = mc13xxx_fixed_regulator_set_voltage,
};
EXPORT_SYMBOL_GPL(mc13xxx_fixed_regulator_ops);
#ifdef CONFIG_OF
int mc13xxx_get_num_regulators_dt(struct platform_device *pdev)
{
struct device_node *parent;
int num;
if (!pdev->dev.parent->of_node)
return -ENODEV;
parent = of_get_child_by_name(pdev->dev.parent->of_node, "regulators");
if (!parent)
return -ENODEV;
num = of_get_child_count(parent);
of_node_put(parent);
return num;
}
EXPORT_SYMBOL_GPL(mc13xxx_get_num_regulators_dt);
struct mc13xxx_regulator_init_data *mc13xxx_parse_regulators_dt(
struct platform_device *pdev, struct mc13xxx_regulator *regulators,
int num_regulators)
{
struct mc13xxx_regulator_priv *priv = platform_get_drvdata(pdev);
struct mc13xxx_regulator_init_data *data, *p;
struct device_node *parent, *child;
int i, parsed = 0;
if (!pdev->dev.parent->of_node)
return NULL;
parent = of_get_child_by_name(pdev->dev.parent->of_node, "regulators");
if (!parent)
return NULL;
data = devm_kcalloc(&pdev->dev, priv->num_regulators, sizeof(*data),
GFP_KERNEL);
if (!data) {
of_node_put(parent);
return NULL;
}
p = data;
for_each_child_of_node(parent, child) {
int found = 0;
for (i = 0; i < num_regulators; i++) {
if (!regulators[i].desc.name)
continue;
if (of_node_name_eq(child,
regulators[i].desc.name)) {
p->id = i;
p->init_data = of_get_regulator_init_data(
&pdev->dev, child,
®ulators[i].desc);
p->node = child;
p++;
parsed++;
found = 1;
break;
}
}
if (!found)
dev_warn(&pdev->dev,
"Unknown regulator: %pOFn\n", child);
}
of_node_put(parent);
priv->num_regulators = parsed;
return data;
}
EXPORT_SYMBOL_GPL(mc13xxx_parse_regulators_dt);
#endif
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Yong Shen <[email protected]>");
MODULE_DESCRIPTION("Regulator Driver for Freescale MC13xxx PMIC");
MODULE_ALIAS("mc13xxx-regulator-core");
| linux-master | drivers/regulator/mc13xxx-regulator-core.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) ST-Ericsson SA 2010
*
* Authors: Sundar Iyer <[email protected]> for ST-Ericsson
* Bengt Jonsson <[email protected]> for ST-Ericsson
* Daniel Willerud <[email protected]> for ST-Ericsson
*
* AB8500 peripheral regulators
*
* AB8500 supports the following regulators:
* VAUX1/2/3, VINTCORE, VTVOUT, VUSB, VAUDIO, VAMIC1/2, VDMIC, VANA
*
* AB8505 supports the following regulators:
* VAUX1/2/3/4/5/6, VINTCORE, VADC, VUSB, VAUDIO, VAMIC1/2, VDMIC, VANA
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/err.h>
#include <linux/platform_device.h>
#include <linux/mfd/abx500.h>
#include <linux/mfd/abx500/ab8500.h>
#include <linux/of.h>
#include <linux/regulator/of_regulator.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
#include <linux/slab.h>
/* AB8500 regulators */
enum ab8500_regulator_id {
AB8500_LDO_AUX1,
AB8500_LDO_AUX2,
AB8500_LDO_AUX3,
AB8500_LDO_INTCORE,
AB8500_LDO_TVOUT,
AB8500_LDO_AUDIO,
AB8500_LDO_ANAMIC1,
AB8500_LDO_ANAMIC2,
AB8500_LDO_DMIC,
AB8500_LDO_ANA,
AB8500_NUM_REGULATORS,
};
/* AB8505 regulators */
enum ab8505_regulator_id {
AB8505_LDO_AUX1,
AB8505_LDO_AUX2,
AB8505_LDO_AUX3,
AB8505_LDO_AUX4,
AB8505_LDO_AUX5,
AB8505_LDO_AUX6,
AB8505_LDO_INTCORE,
AB8505_LDO_ADC,
AB8505_LDO_AUDIO,
AB8505_LDO_ANAMIC1,
AB8505_LDO_ANAMIC2,
AB8505_LDO_AUX8,
AB8505_LDO_ANA,
AB8505_NUM_REGULATORS,
};
/* AB8500 registers */
enum ab8500_regulator_reg {
AB8500_REGUREQUESTCTRL2,
AB8500_REGUREQUESTCTRL3,
AB8500_REGUREQUESTCTRL4,
AB8500_REGUSYSCLKREQ1HPVALID1,
AB8500_REGUSYSCLKREQ1HPVALID2,
AB8500_REGUHWHPREQ1VALID1,
AB8500_REGUHWHPREQ1VALID2,
AB8500_REGUHWHPREQ2VALID1,
AB8500_REGUHWHPREQ2VALID2,
AB8500_REGUSWHPREQVALID1,
AB8500_REGUSWHPREQVALID2,
AB8500_REGUSYSCLKREQVALID1,
AB8500_REGUSYSCLKREQVALID2,
AB8500_REGUMISC1,
AB8500_VAUDIOSUPPLY,
AB8500_REGUCTRL1VAMIC,
AB8500_VPLLVANAREGU,
AB8500_VREFDDR,
AB8500_EXTSUPPLYREGU,
AB8500_VAUX12REGU,
AB8500_VRF1VAUX3REGU,
AB8500_VAUX1SEL,
AB8500_VAUX2SEL,
AB8500_VRF1VAUX3SEL,
AB8500_REGUCTRL2SPARE,
AB8500_REGUCTRLDISCH,
AB8500_REGUCTRLDISCH2,
AB8500_NUM_REGULATOR_REGISTERS,
};
/* AB8505 registers */
enum ab8505_regulator_reg {
AB8505_REGUREQUESTCTRL1,
AB8505_REGUREQUESTCTRL2,
AB8505_REGUREQUESTCTRL3,
AB8505_REGUREQUESTCTRL4,
AB8505_REGUSYSCLKREQ1HPVALID1,
AB8505_REGUSYSCLKREQ1HPVALID2,
AB8505_REGUHWHPREQ1VALID1,
AB8505_REGUHWHPREQ1VALID2,
AB8505_REGUHWHPREQ2VALID1,
AB8505_REGUHWHPREQ2VALID2,
AB8505_REGUSWHPREQVALID1,
AB8505_REGUSWHPREQVALID2,
AB8505_REGUSYSCLKREQVALID1,
AB8505_REGUSYSCLKREQVALID2,
AB8505_REGUVAUX4REQVALID,
AB8505_REGUMISC1,
AB8505_VAUDIOSUPPLY,
AB8505_REGUCTRL1VAMIC,
AB8505_VSMPSAREGU,
AB8505_VSMPSBREGU,
AB8505_VSAFEREGU, /* NOTE! PRCMU register */
AB8505_VPLLVANAREGU,
AB8505_EXTSUPPLYREGU,
AB8505_VAUX12REGU,
AB8505_VRF1VAUX3REGU,
AB8505_VSMPSASEL1,
AB8505_VSMPSASEL2,
AB8505_VSMPSASEL3,
AB8505_VSMPSBSEL1,
AB8505_VSMPSBSEL2,
AB8505_VSMPSBSEL3,
AB8505_VSAFESEL1, /* NOTE! PRCMU register */
AB8505_VSAFESEL2, /* NOTE! PRCMU register */
AB8505_VSAFESEL3, /* NOTE! PRCMU register */
AB8505_VAUX1SEL,
AB8505_VAUX2SEL,
AB8505_VRF1VAUX3SEL,
AB8505_VAUX4REQCTRL,
AB8505_VAUX4REGU,
AB8505_VAUX4SEL,
AB8505_REGUCTRLDISCH,
AB8505_REGUCTRLDISCH2,
AB8505_REGUCTRLDISCH3,
AB8505_CTRLVAUX5,
AB8505_CTRLVAUX6,
AB8505_NUM_REGULATOR_REGISTERS,
};
/**
* struct ab8500_shared_mode - is used when mode is shared between
* two regulators.
* @shared_regulator: pointer to the other sharing regulator
* @lp_mode_req: low power mode requested by this regulator
*/
struct ab8500_shared_mode {
struct ab8500_regulator_info *shared_regulator;
bool lp_mode_req;
};
/**
* struct ab8500_regulator_info - ab8500 regulator information
* @dev: device pointer
* @desc: regulator description
* @shared_mode: used when mode is shared between two regulators
* @load_lp_uA: maximum load in idle (low power) mode
* @update_bank: bank to control on/off
* @update_reg: register to control on/off
* @update_mask: mask to enable/disable and set mode of regulator
* @update_val: bits holding the regulator current mode
* @update_val_idle: bits to enable the regulator in idle (low power) mode
* @update_val_normal: bits to enable the regulator in normal (high power) mode
* @mode_bank: bank with location of mode register
* @mode_reg: mode register
* @mode_mask: mask for setting mode
* @mode_val_idle: mode setting for low power
* @mode_val_normal: mode setting for normal power
* @voltage_bank: bank to control regulator voltage
* @voltage_reg: register to control regulator voltage
* @voltage_mask: mask to control regulator voltage
* @expand_register:
*/
struct ab8500_regulator_info {
struct device *dev;
struct regulator_desc desc;
struct ab8500_shared_mode *shared_mode;
int load_lp_uA;
u8 update_bank;
u8 update_reg;
u8 update_mask;
u8 update_val;
u8 update_val_idle;
u8 update_val_normal;
u8 mode_bank;
u8 mode_reg;
u8 mode_mask;
u8 mode_val_idle;
u8 mode_val_normal;
u8 voltage_bank;
u8 voltage_reg;
u8 voltage_mask;
};
/* voltage tables for the vauxn/vintcore supplies */
static const unsigned int ldo_vauxn_voltages[] = {
1100000,
1200000,
1300000,
1400000,
1500000,
1800000,
1850000,
1900000,
2500000,
2650000,
2700000,
2750000,
2800000,
2900000,
3000000,
3300000,
};
static const unsigned int ldo_vaux3_voltages[] = {
1200000,
1500000,
1800000,
2100000,
2500000,
2750000,
2790000,
2910000,
};
static const unsigned int ldo_vaux56_voltages[] = {
1800000,
1050000,
1100000,
1200000,
1500000,
2200000,
2500000,
2790000,
};
static const unsigned int ldo_vintcore_voltages[] = {
1200000,
1225000,
1250000,
1275000,
1300000,
1325000,
1350000,
};
static const unsigned int fixed_1200000_voltage[] = {
1200000,
};
static const unsigned int fixed_1800000_voltage[] = {
1800000,
};
static const unsigned int fixed_2000000_voltage[] = {
2000000,
};
static const unsigned int fixed_2050000_voltage[] = {
2050000,
};
static const unsigned int ldo_vana_voltages[] = {
1050000,
1075000,
1100000,
1125000,
1150000,
1175000,
1200000,
1225000,
};
static const unsigned int ldo_vaudio_voltages[] = {
2000000,
2100000,
2200000,
2300000,
2400000,
2500000,
2600000,
2600000, /* Duplicated in Vaudio and IsoUicc Control register. */
};
static DEFINE_MUTEX(shared_mode_mutex);
static struct ab8500_shared_mode ldo_anamic1_shared;
static struct ab8500_shared_mode ldo_anamic2_shared;
static int ab8500_regulator_enable(struct regulator_dev *rdev)
{
int ret;
struct ab8500_regulator_info *info = rdev_get_drvdata(rdev);
if (info == NULL) {
dev_err(rdev_get_dev(rdev), "regulator info null pointer\n");
return -EINVAL;
}
ret = abx500_mask_and_set_register_interruptible(info->dev,
info->update_bank, info->update_reg,
info->update_mask, info->update_val);
if (ret < 0) {
dev_err(rdev_get_dev(rdev),
"couldn't set enable bits for regulator\n");
return ret;
}
dev_vdbg(rdev_get_dev(rdev),
"%s-enable (bank, reg, mask, value): 0x%x, 0x%x, 0x%x, 0x%x\n",
info->desc.name, info->update_bank, info->update_reg,
info->update_mask, info->update_val);
return ret;
}
static int ab8500_regulator_disable(struct regulator_dev *rdev)
{
int ret;
struct ab8500_regulator_info *info = rdev_get_drvdata(rdev);
if (info == NULL) {
dev_err(rdev_get_dev(rdev), "regulator info null pointer\n");
return -EINVAL;
}
ret = abx500_mask_and_set_register_interruptible(info->dev,
info->update_bank, info->update_reg,
info->update_mask, 0x0);
if (ret < 0) {
dev_err(rdev_get_dev(rdev),
"couldn't set disable bits for regulator\n");
return ret;
}
dev_vdbg(rdev_get_dev(rdev),
"%s-disable (bank, reg, mask, value): 0x%x, 0x%x, 0x%x, 0x%x\n",
info->desc.name, info->update_bank, info->update_reg,
info->update_mask, 0x0);
return ret;
}
static int ab8500_regulator_is_enabled(struct regulator_dev *rdev)
{
int ret;
struct ab8500_regulator_info *info = rdev_get_drvdata(rdev);
u8 regval;
if (info == NULL) {
dev_err(rdev_get_dev(rdev), "regulator info null pointer\n");
return -EINVAL;
}
ret = abx500_get_register_interruptible(info->dev,
info->update_bank, info->update_reg, ®val);
if (ret < 0) {
dev_err(rdev_get_dev(rdev),
"couldn't read 0x%x register\n", info->update_reg);
return ret;
}
dev_vdbg(rdev_get_dev(rdev),
"%s-is_enabled (bank, reg, mask, value): 0x%x, 0x%x, 0x%x,"
" 0x%x\n",
info->desc.name, info->update_bank, info->update_reg,
info->update_mask, regval);
if (regval & info->update_mask)
return 1;
else
return 0;
}
static unsigned int ab8500_regulator_get_optimum_mode(
struct regulator_dev *rdev, int input_uV,
int output_uV, int load_uA)
{
unsigned int mode;
struct ab8500_regulator_info *info = rdev_get_drvdata(rdev);
if (info == NULL) {
dev_err(rdev_get_dev(rdev), "regulator info null pointer\n");
return -EINVAL;
}
if (load_uA <= info->load_lp_uA)
mode = REGULATOR_MODE_IDLE;
else
mode = REGULATOR_MODE_NORMAL;
return mode;
}
static int ab8500_regulator_set_mode(struct regulator_dev *rdev,
unsigned int mode)
{
int ret = 0;
u8 bank, reg, mask, val;
bool lp_mode_req = false;
struct ab8500_regulator_info *info = rdev_get_drvdata(rdev);
if (info == NULL) {
dev_err(rdev_get_dev(rdev), "regulator info null pointer\n");
return -EINVAL;
}
if (info->mode_mask) {
bank = info->mode_bank;
reg = info->mode_reg;
mask = info->mode_mask;
} else {
bank = info->update_bank;
reg = info->update_reg;
mask = info->update_mask;
}
if (info->shared_mode)
mutex_lock(&shared_mode_mutex);
switch (mode) {
case REGULATOR_MODE_NORMAL:
if (info->shared_mode)
lp_mode_req = false;
if (info->mode_mask)
val = info->mode_val_normal;
else
val = info->update_val_normal;
break;
case REGULATOR_MODE_IDLE:
if (info->shared_mode) {
struct ab8500_regulator_info *shared_regulator;
shared_regulator = info->shared_mode->shared_regulator;
if (!shared_regulator->shared_mode->lp_mode_req) {
/* Other regulator prevent LP mode */
info->shared_mode->lp_mode_req = true;
goto out_unlock;
}
lp_mode_req = true;
}
if (info->mode_mask)
val = info->mode_val_idle;
else
val = info->update_val_idle;
break;
default:
ret = -EINVAL;
goto out_unlock;
}
if (info->mode_mask || ab8500_regulator_is_enabled(rdev)) {
ret = abx500_mask_and_set_register_interruptible(info->dev,
bank, reg, mask, val);
if (ret < 0) {
dev_err(rdev_get_dev(rdev),
"couldn't set regulator mode\n");
goto out_unlock;
}
dev_vdbg(rdev_get_dev(rdev),
"%s-set_mode (bank, reg, mask, value): "
"0x%x, 0x%x, 0x%x, 0x%x\n",
info->desc.name, bank, reg,
mask, val);
}
if (!info->mode_mask)
info->update_val = val;
if (info->shared_mode)
info->shared_mode->lp_mode_req = lp_mode_req;
out_unlock:
if (info->shared_mode)
mutex_unlock(&shared_mode_mutex);
return ret;
}
static unsigned int ab8500_regulator_get_mode(struct regulator_dev *rdev)
{
struct ab8500_regulator_info *info = rdev_get_drvdata(rdev);
int ret;
u8 val;
u8 val_normal;
u8 val_idle;
if (info == NULL) {
dev_err(rdev_get_dev(rdev), "regulator info null pointer\n");
return -EINVAL;
}
/* Need special handling for shared mode */
if (info->shared_mode) {
if (info->shared_mode->lp_mode_req)
return REGULATOR_MODE_IDLE;
else
return REGULATOR_MODE_NORMAL;
}
if (info->mode_mask) {
/* Dedicated register for handling mode */
ret = abx500_get_register_interruptible(info->dev,
info->mode_bank, info->mode_reg, &val);
val = val & info->mode_mask;
val_normal = info->mode_val_normal;
val_idle = info->mode_val_idle;
} else {
/* Mode register same as enable register */
val = info->update_val;
val_normal = info->update_val_normal;
val_idle = info->update_val_idle;
}
if (val == val_normal)
ret = REGULATOR_MODE_NORMAL;
else if (val == val_idle)
ret = REGULATOR_MODE_IDLE;
else
ret = -EINVAL;
return ret;
}
static int ab8500_regulator_get_voltage_sel(struct regulator_dev *rdev)
{
int ret, voltage_shift;
struct ab8500_regulator_info *info = rdev_get_drvdata(rdev);
u8 regval;
if (info == NULL) {
dev_err(rdev_get_dev(rdev), "regulator info null pointer\n");
return -EINVAL;
}
voltage_shift = ffs(info->voltage_mask) - 1;
ret = abx500_get_register_interruptible(info->dev,
info->voltage_bank, info->voltage_reg, ®val);
if (ret < 0) {
dev_err(rdev_get_dev(rdev),
"couldn't read voltage reg for regulator\n");
return ret;
}
dev_vdbg(rdev_get_dev(rdev),
"%s-get_voltage (bank, reg, mask, shift, value): "
"0x%x, 0x%x, 0x%x, 0x%x, 0x%x\n",
info->desc.name, info->voltage_bank,
info->voltage_reg, info->voltage_mask,
voltage_shift, regval);
return (regval & info->voltage_mask) >> voltage_shift;
}
static int ab8500_regulator_set_voltage_sel(struct regulator_dev *rdev,
unsigned selector)
{
int ret, voltage_shift;
struct ab8500_regulator_info *info = rdev_get_drvdata(rdev);
u8 regval;
if (info == NULL) {
dev_err(rdev_get_dev(rdev), "regulator info null pointer\n");
return -EINVAL;
}
voltage_shift = ffs(info->voltage_mask) - 1;
/* set the registers for the request */
regval = (u8)selector << voltage_shift;
ret = abx500_mask_and_set_register_interruptible(info->dev,
info->voltage_bank, info->voltage_reg,
info->voltage_mask, regval);
if (ret < 0)
dev_err(rdev_get_dev(rdev),
"couldn't set voltage reg for regulator\n");
dev_vdbg(rdev_get_dev(rdev),
"%s-set_voltage (bank, reg, mask, value): 0x%x, 0x%x, 0x%x,"
" 0x%x\n",
info->desc.name, info->voltage_bank, info->voltage_reg,
info->voltage_mask, regval);
return ret;
}
static const struct regulator_ops ab8500_regulator_volt_mode_ops = {
.enable = ab8500_regulator_enable,
.disable = ab8500_regulator_disable,
.is_enabled = ab8500_regulator_is_enabled,
.get_optimum_mode = ab8500_regulator_get_optimum_mode,
.set_mode = ab8500_regulator_set_mode,
.get_mode = ab8500_regulator_get_mode,
.get_voltage_sel = ab8500_regulator_get_voltage_sel,
.set_voltage_sel = ab8500_regulator_set_voltage_sel,
.list_voltage = regulator_list_voltage_table,
};
static const struct regulator_ops ab8500_regulator_volt_ops = {
.enable = ab8500_regulator_enable,
.disable = ab8500_regulator_disable,
.is_enabled = ab8500_regulator_is_enabled,
.get_voltage_sel = ab8500_regulator_get_voltage_sel,
.set_voltage_sel = ab8500_regulator_set_voltage_sel,
.list_voltage = regulator_list_voltage_table,
};
static const struct regulator_ops ab8500_regulator_mode_ops = {
.enable = ab8500_regulator_enable,
.disable = ab8500_regulator_disable,
.is_enabled = ab8500_regulator_is_enabled,
.get_optimum_mode = ab8500_regulator_get_optimum_mode,
.set_mode = ab8500_regulator_set_mode,
.get_mode = ab8500_regulator_get_mode,
.list_voltage = regulator_list_voltage_table,
};
static const struct regulator_ops ab8500_regulator_ops = {
.enable = ab8500_regulator_enable,
.disable = ab8500_regulator_disable,
.is_enabled = ab8500_regulator_is_enabled,
.list_voltage = regulator_list_voltage_table,
};
static const struct regulator_ops ab8500_regulator_anamic_mode_ops = {
.enable = ab8500_regulator_enable,
.disable = ab8500_regulator_disable,
.is_enabled = ab8500_regulator_is_enabled,
.set_mode = ab8500_regulator_set_mode,
.get_mode = ab8500_regulator_get_mode,
.list_voltage = regulator_list_voltage_table,
};
/* AB8500 regulator information */
static struct ab8500_regulator_info
ab8500_regulator_info[AB8500_NUM_REGULATORS] = {
/*
* Variable Voltage Regulators
* name, min mV, max mV,
* update bank, reg, mask, enable val
* volt bank, reg, mask
*/
[AB8500_LDO_AUX1] = {
.desc = {
.name = "LDO-AUX1",
.ops = &ab8500_regulator_volt_mode_ops,
.type = REGULATOR_VOLTAGE,
.id = AB8500_LDO_AUX1,
.owner = THIS_MODULE,
.n_voltages = ARRAY_SIZE(ldo_vauxn_voltages),
.volt_table = ldo_vauxn_voltages,
.enable_time = 200,
.supply_name = "vin",
},
.load_lp_uA = 5000,
.update_bank = 0x04,
.update_reg = 0x09,
.update_mask = 0x03,
.update_val = 0x01,
.update_val_idle = 0x03,
.update_val_normal = 0x01,
.voltage_bank = 0x04,
.voltage_reg = 0x1f,
.voltage_mask = 0x0f,
},
[AB8500_LDO_AUX2] = {
.desc = {
.name = "LDO-AUX2",
.ops = &ab8500_regulator_volt_mode_ops,
.type = REGULATOR_VOLTAGE,
.id = AB8500_LDO_AUX2,
.owner = THIS_MODULE,
.n_voltages = ARRAY_SIZE(ldo_vauxn_voltages),
.volt_table = ldo_vauxn_voltages,
.enable_time = 200,
.supply_name = "vin",
},
.load_lp_uA = 5000,
.update_bank = 0x04,
.update_reg = 0x09,
.update_mask = 0x0c,
.update_val = 0x04,
.update_val_idle = 0x0c,
.update_val_normal = 0x04,
.voltage_bank = 0x04,
.voltage_reg = 0x20,
.voltage_mask = 0x0f,
},
[AB8500_LDO_AUX3] = {
.desc = {
.name = "LDO-AUX3",
.ops = &ab8500_regulator_volt_mode_ops,
.type = REGULATOR_VOLTAGE,
.id = AB8500_LDO_AUX3,
.owner = THIS_MODULE,
.n_voltages = ARRAY_SIZE(ldo_vaux3_voltages),
.volt_table = ldo_vaux3_voltages,
.enable_time = 450,
.supply_name = "vin",
},
.load_lp_uA = 5000,
.update_bank = 0x04,
.update_reg = 0x0a,
.update_mask = 0x03,
.update_val = 0x01,
.update_val_idle = 0x03,
.update_val_normal = 0x01,
.voltage_bank = 0x04,
.voltage_reg = 0x21,
.voltage_mask = 0x07,
},
[AB8500_LDO_INTCORE] = {
.desc = {
.name = "LDO-INTCORE",
.ops = &ab8500_regulator_volt_mode_ops,
.type = REGULATOR_VOLTAGE,
.id = AB8500_LDO_INTCORE,
.owner = THIS_MODULE,
.n_voltages = ARRAY_SIZE(ldo_vintcore_voltages),
.volt_table = ldo_vintcore_voltages,
.enable_time = 750,
},
.load_lp_uA = 5000,
.update_bank = 0x03,
.update_reg = 0x80,
.update_mask = 0x44,
.update_val = 0x44,
.update_val_idle = 0x44,
.update_val_normal = 0x04,
.voltage_bank = 0x03,
.voltage_reg = 0x80,
.voltage_mask = 0x38,
},
/*
* Fixed Voltage Regulators
* name, fixed mV,
* update bank, reg, mask, enable val
*/
[AB8500_LDO_TVOUT] = {
.desc = {
.name = "LDO-TVOUT",
.ops = &ab8500_regulator_mode_ops,
.type = REGULATOR_VOLTAGE,
.id = AB8500_LDO_TVOUT,
.owner = THIS_MODULE,
.n_voltages = 1,
.volt_table = fixed_2000000_voltage,
.enable_time = 500,
},
.load_lp_uA = 1000,
.update_bank = 0x03,
.update_reg = 0x80,
.update_mask = 0x82,
.update_val = 0x02,
.update_val_idle = 0x82,
.update_val_normal = 0x02,
},
[AB8500_LDO_AUDIO] = {
.desc = {
.name = "LDO-AUDIO",
.ops = &ab8500_regulator_ops,
.type = REGULATOR_VOLTAGE,
.id = AB8500_LDO_AUDIO,
.owner = THIS_MODULE,
.n_voltages = 1,
.enable_time = 140,
.volt_table = fixed_2000000_voltage,
},
.update_bank = 0x03,
.update_reg = 0x83,
.update_mask = 0x02,
.update_val = 0x02,
},
[AB8500_LDO_ANAMIC1] = {
.desc = {
.name = "LDO-ANAMIC1",
.ops = &ab8500_regulator_ops,
.type = REGULATOR_VOLTAGE,
.id = AB8500_LDO_ANAMIC1,
.owner = THIS_MODULE,
.n_voltages = 1,
.enable_time = 500,
.volt_table = fixed_2050000_voltage,
},
.update_bank = 0x03,
.update_reg = 0x83,
.update_mask = 0x08,
.update_val = 0x08,
},
[AB8500_LDO_ANAMIC2] = {
.desc = {
.name = "LDO-ANAMIC2",
.ops = &ab8500_regulator_ops,
.type = REGULATOR_VOLTAGE,
.id = AB8500_LDO_ANAMIC2,
.owner = THIS_MODULE,
.n_voltages = 1,
.enable_time = 500,
.volt_table = fixed_2050000_voltage,
},
.update_bank = 0x03,
.update_reg = 0x83,
.update_mask = 0x10,
.update_val = 0x10,
},
[AB8500_LDO_DMIC] = {
.desc = {
.name = "LDO-DMIC",
.ops = &ab8500_regulator_ops,
.type = REGULATOR_VOLTAGE,
.id = AB8500_LDO_DMIC,
.owner = THIS_MODULE,
.n_voltages = 1,
.enable_time = 420,
.volt_table = fixed_1800000_voltage,
},
.update_bank = 0x03,
.update_reg = 0x83,
.update_mask = 0x04,
.update_val = 0x04,
},
/*
* Regulators with fixed voltage and normal/idle modes
*/
[AB8500_LDO_ANA] = {
.desc = {
.name = "LDO-ANA",
.ops = &ab8500_regulator_mode_ops,
.type = REGULATOR_VOLTAGE,
.id = AB8500_LDO_ANA,
.owner = THIS_MODULE,
.n_voltages = 1,
.enable_time = 140,
.volt_table = fixed_1200000_voltage,
},
.load_lp_uA = 1000,
.update_bank = 0x04,
.update_reg = 0x06,
.update_mask = 0x0c,
.update_val = 0x04,
.update_val_idle = 0x0c,
.update_val_normal = 0x04,
},
};
/* AB8505 regulator information */
static struct ab8500_regulator_info
ab8505_regulator_info[AB8505_NUM_REGULATORS] = {
/*
* Variable Voltage Regulators
* name, min mV, max mV,
* update bank, reg, mask, enable val
* volt bank, reg, mask
*/
[AB8505_LDO_AUX1] = {
.desc = {
.name = "LDO-AUX1",
.ops = &ab8500_regulator_volt_mode_ops,
.type = REGULATOR_VOLTAGE,
.id = AB8505_LDO_AUX1,
.owner = THIS_MODULE,
.n_voltages = ARRAY_SIZE(ldo_vauxn_voltages),
.volt_table = ldo_vauxn_voltages,
},
.load_lp_uA = 5000,
.update_bank = 0x04,
.update_reg = 0x09,
.update_mask = 0x03,
.update_val = 0x01,
.update_val_idle = 0x03,
.update_val_normal = 0x01,
.voltage_bank = 0x04,
.voltage_reg = 0x1f,
.voltage_mask = 0x0f,
},
[AB8505_LDO_AUX2] = {
.desc = {
.name = "LDO-AUX2",
.ops = &ab8500_regulator_volt_mode_ops,
.type = REGULATOR_VOLTAGE,
.id = AB8505_LDO_AUX2,
.owner = THIS_MODULE,
.n_voltages = ARRAY_SIZE(ldo_vauxn_voltages),
.volt_table = ldo_vauxn_voltages,
},
.load_lp_uA = 5000,
.update_bank = 0x04,
.update_reg = 0x09,
.update_mask = 0x0c,
.update_val = 0x04,
.update_val_idle = 0x0c,
.update_val_normal = 0x04,
.voltage_bank = 0x04,
.voltage_reg = 0x20,
.voltage_mask = 0x0f,
},
[AB8505_LDO_AUX3] = {
.desc = {
.name = "LDO-AUX3",
.ops = &ab8500_regulator_volt_mode_ops,
.type = REGULATOR_VOLTAGE,
.id = AB8505_LDO_AUX3,
.owner = THIS_MODULE,
.n_voltages = ARRAY_SIZE(ldo_vaux3_voltages),
.volt_table = ldo_vaux3_voltages,
},
.load_lp_uA = 5000,
.update_bank = 0x04,
.update_reg = 0x0a,
.update_mask = 0x03,
.update_val = 0x01,
.update_val_idle = 0x03,
.update_val_normal = 0x01,
.voltage_bank = 0x04,
.voltage_reg = 0x21,
.voltage_mask = 0x07,
},
[AB8505_LDO_AUX4] = {
.desc = {
.name = "LDO-AUX4",
.ops = &ab8500_regulator_volt_mode_ops,
.type = REGULATOR_VOLTAGE,
.id = AB8505_LDO_AUX4,
.owner = THIS_MODULE,
.n_voltages = ARRAY_SIZE(ldo_vauxn_voltages),
.volt_table = ldo_vauxn_voltages,
},
.load_lp_uA = 5000,
/* values for Vaux4Regu register */
.update_bank = 0x04,
.update_reg = 0x2e,
.update_mask = 0x03,
.update_val = 0x01,
.update_val_idle = 0x03,
.update_val_normal = 0x01,
/* values for Vaux4SEL register */
.voltage_bank = 0x04,
.voltage_reg = 0x2f,
.voltage_mask = 0x0f,
},
[AB8505_LDO_AUX5] = {
.desc = {
.name = "LDO-AUX5",
.ops = &ab8500_regulator_volt_mode_ops,
.type = REGULATOR_VOLTAGE,
.id = AB8505_LDO_AUX5,
.owner = THIS_MODULE,
.n_voltages = ARRAY_SIZE(ldo_vaux56_voltages),
.volt_table = ldo_vaux56_voltages,
},
.load_lp_uA = 2000,
/* values for CtrlVaux5 register */
.update_bank = 0x01,
.update_reg = 0x55,
.update_mask = 0x18,
.update_val = 0x10,
.update_val_idle = 0x18,
.update_val_normal = 0x10,
.voltage_bank = 0x01,
.voltage_reg = 0x55,
.voltage_mask = 0x07,
},
[AB8505_LDO_AUX6] = {
.desc = {
.name = "LDO-AUX6",
.ops = &ab8500_regulator_volt_mode_ops,
.type = REGULATOR_VOLTAGE,
.id = AB8505_LDO_AUX6,
.owner = THIS_MODULE,
.n_voltages = ARRAY_SIZE(ldo_vaux56_voltages),
.volt_table = ldo_vaux56_voltages,
},
.load_lp_uA = 2000,
/* values for CtrlVaux6 register */
.update_bank = 0x01,
.update_reg = 0x56,
.update_mask = 0x18,
.update_val = 0x10,
.update_val_idle = 0x18,
.update_val_normal = 0x10,
.voltage_bank = 0x01,
.voltage_reg = 0x56,
.voltage_mask = 0x07,
},
[AB8505_LDO_INTCORE] = {
.desc = {
.name = "LDO-INTCORE",
.ops = &ab8500_regulator_volt_mode_ops,
.type = REGULATOR_VOLTAGE,
.id = AB8505_LDO_INTCORE,
.owner = THIS_MODULE,
.n_voltages = ARRAY_SIZE(ldo_vintcore_voltages),
.volt_table = ldo_vintcore_voltages,
},
.load_lp_uA = 5000,
.update_bank = 0x03,
.update_reg = 0x80,
.update_mask = 0x44,
.update_val = 0x04,
.update_val_idle = 0x44,
.update_val_normal = 0x04,
.voltage_bank = 0x03,
.voltage_reg = 0x80,
.voltage_mask = 0x38,
},
/*
* Fixed Voltage Regulators
* name, fixed mV,
* update bank, reg, mask, enable val
*/
[AB8505_LDO_ADC] = {
.desc = {
.name = "LDO-ADC",
.ops = &ab8500_regulator_mode_ops,
.type = REGULATOR_VOLTAGE,
.id = AB8505_LDO_ADC,
.owner = THIS_MODULE,
.n_voltages = 1,
.volt_table = fixed_2000000_voltage,
.enable_time = 10000,
},
.load_lp_uA = 1000,
.update_bank = 0x03,
.update_reg = 0x80,
.update_mask = 0x82,
.update_val = 0x02,
.update_val_idle = 0x82,
.update_val_normal = 0x02,
},
[AB8505_LDO_AUDIO] = {
.desc = {
.name = "LDO-AUDIO",
.ops = &ab8500_regulator_volt_ops,
.type = REGULATOR_VOLTAGE,
.id = AB8505_LDO_AUDIO,
.owner = THIS_MODULE,
.n_voltages = ARRAY_SIZE(ldo_vaudio_voltages),
.volt_table = ldo_vaudio_voltages,
},
.update_bank = 0x03,
.update_reg = 0x83,
.update_mask = 0x02,
.update_val = 0x02,
.voltage_bank = 0x01,
.voltage_reg = 0x57,
.voltage_mask = 0x70,
},
[AB8505_LDO_ANAMIC1] = {
.desc = {
.name = "LDO-ANAMIC1",
.ops = &ab8500_regulator_anamic_mode_ops,
.type = REGULATOR_VOLTAGE,
.id = AB8505_LDO_ANAMIC1,
.owner = THIS_MODULE,
.n_voltages = 1,
.volt_table = fixed_2050000_voltage,
},
.shared_mode = &ldo_anamic1_shared,
.update_bank = 0x03,
.update_reg = 0x83,
.update_mask = 0x08,
.update_val = 0x08,
.mode_bank = 0x01,
.mode_reg = 0x54,
.mode_mask = 0x04,
.mode_val_idle = 0x04,
.mode_val_normal = 0x00,
},
[AB8505_LDO_ANAMIC2] = {
.desc = {
.name = "LDO-ANAMIC2",
.ops = &ab8500_regulator_anamic_mode_ops,
.type = REGULATOR_VOLTAGE,
.id = AB8505_LDO_ANAMIC2,
.owner = THIS_MODULE,
.n_voltages = 1,
.volt_table = fixed_2050000_voltage,
},
.shared_mode = &ldo_anamic2_shared,
.update_bank = 0x03,
.update_reg = 0x83,
.update_mask = 0x10,
.update_val = 0x10,
.mode_bank = 0x01,
.mode_reg = 0x54,
.mode_mask = 0x04,
.mode_val_idle = 0x04,
.mode_val_normal = 0x00,
},
[AB8505_LDO_AUX8] = {
.desc = {
.name = "LDO-AUX8",
.ops = &ab8500_regulator_ops,
.type = REGULATOR_VOLTAGE,
.id = AB8505_LDO_AUX8,
.owner = THIS_MODULE,
.n_voltages = 1,
.volt_table = fixed_1800000_voltage,
},
.update_bank = 0x03,
.update_reg = 0x83,
.update_mask = 0x04,
.update_val = 0x04,
},
/*
* Regulators with fixed voltage and normal/idle modes
*/
[AB8505_LDO_ANA] = {
.desc = {
.name = "LDO-ANA",
.ops = &ab8500_regulator_volt_mode_ops,
.type = REGULATOR_VOLTAGE,
.id = AB8505_LDO_ANA,
.owner = THIS_MODULE,
.n_voltages = ARRAY_SIZE(ldo_vana_voltages),
.volt_table = ldo_vana_voltages,
},
.load_lp_uA = 1000,
.update_bank = 0x04,
.update_reg = 0x06,
.update_mask = 0x0c,
.update_val = 0x04,
.update_val_idle = 0x0c,
.update_val_normal = 0x04,
.voltage_bank = 0x04,
.voltage_reg = 0x29,
.voltage_mask = 0x7,
},
};
static struct ab8500_shared_mode ldo_anamic1_shared = {
.shared_regulator = &ab8505_regulator_info[AB8505_LDO_ANAMIC2],
};
static struct ab8500_shared_mode ldo_anamic2_shared = {
.shared_regulator = &ab8505_regulator_info[AB8505_LDO_ANAMIC1],
};
struct ab8500_reg_init {
u8 bank;
u8 addr;
u8 mask;
};
#define REG_INIT(_id, _bank, _addr, _mask) \
[_id] = { \
.bank = _bank, \
.addr = _addr, \
.mask = _mask, \
}
/* AB8500 register init */
static struct ab8500_reg_init ab8500_reg_init[] = {
/*
* 0x30, VanaRequestCtrl
* 0xc0, VextSupply1RequestCtrl
*/
REG_INIT(AB8500_REGUREQUESTCTRL2, 0x03, 0x04, 0xf0),
/*
* 0x03, VextSupply2RequestCtrl
* 0x0c, VextSupply3RequestCtrl
* 0x30, Vaux1RequestCtrl
* 0xc0, Vaux2RequestCtrl
*/
REG_INIT(AB8500_REGUREQUESTCTRL3, 0x03, 0x05, 0xff),
/*
* 0x03, Vaux3RequestCtrl
* 0x04, SwHPReq
*/
REG_INIT(AB8500_REGUREQUESTCTRL4, 0x03, 0x06, 0x07),
/*
* 0x08, VanaSysClkReq1HPValid
* 0x20, Vaux1SysClkReq1HPValid
* 0x40, Vaux2SysClkReq1HPValid
* 0x80, Vaux3SysClkReq1HPValid
*/
REG_INIT(AB8500_REGUSYSCLKREQ1HPVALID1, 0x03, 0x07, 0xe8),
/*
* 0x10, VextSupply1SysClkReq1HPValid
* 0x20, VextSupply2SysClkReq1HPValid
* 0x40, VextSupply3SysClkReq1HPValid
*/
REG_INIT(AB8500_REGUSYSCLKREQ1HPVALID2, 0x03, 0x08, 0x70),
/*
* 0x08, VanaHwHPReq1Valid
* 0x20, Vaux1HwHPReq1Valid
* 0x40, Vaux2HwHPReq1Valid
* 0x80, Vaux3HwHPReq1Valid
*/
REG_INIT(AB8500_REGUHWHPREQ1VALID1, 0x03, 0x09, 0xe8),
/*
* 0x01, VextSupply1HwHPReq1Valid
* 0x02, VextSupply2HwHPReq1Valid
* 0x04, VextSupply3HwHPReq1Valid
*/
REG_INIT(AB8500_REGUHWHPREQ1VALID2, 0x03, 0x0a, 0x07),
/*
* 0x08, VanaHwHPReq2Valid
* 0x20, Vaux1HwHPReq2Valid
* 0x40, Vaux2HwHPReq2Valid
* 0x80, Vaux3HwHPReq2Valid
*/
REG_INIT(AB8500_REGUHWHPREQ2VALID1, 0x03, 0x0b, 0xe8),
/*
* 0x01, VextSupply1HwHPReq2Valid
* 0x02, VextSupply2HwHPReq2Valid
* 0x04, VextSupply3HwHPReq2Valid
*/
REG_INIT(AB8500_REGUHWHPREQ2VALID2, 0x03, 0x0c, 0x07),
/*
* 0x20, VanaSwHPReqValid
* 0x80, Vaux1SwHPReqValid
*/
REG_INIT(AB8500_REGUSWHPREQVALID1, 0x03, 0x0d, 0xa0),
/*
* 0x01, Vaux2SwHPReqValid
* 0x02, Vaux3SwHPReqValid
* 0x04, VextSupply1SwHPReqValid
* 0x08, VextSupply2SwHPReqValid
* 0x10, VextSupply3SwHPReqValid
*/
REG_INIT(AB8500_REGUSWHPREQVALID2, 0x03, 0x0e, 0x1f),
/*
* 0x02, SysClkReq2Valid1
* 0x04, SysClkReq3Valid1
* 0x08, SysClkReq4Valid1
* 0x10, SysClkReq5Valid1
* 0x20, SysClkReq6Valid1
* 0x40, SysClkReq7Valid1
* 0x80, SysClkReq8Valid1
*/
REG_INIT(AB8500_REGUSYSCLKREQVALID1, 0x03, 0x0f, 0xfe),
/*
* 0x02, SysClkReq2Valid2
* 0x04, SysClkReq3Valid2
* 0x08, SysClkReq4Valid2
* 0x10, SysClkReq5Valid2
* 0x20, SysClkReq6Valid2
* 0x40, SysClkReq7Valid2
* 0x80, SysClkReq8Valid2
*/
REG_INIT(AB8500_REGUSYSCLKREQVALID2, 0x03, 0x10, 0xfe),
/*
* 0x02, VTVoutEna
* 0x04, Vintcore12Ena
* 0x38, Vintcore12Sel
* 0x40, Vintcore12LP
* 0x80, VTVoutLP
*/
REG_INIT(AB8500_REGUMISC1, 0x03, 0x80, 0xfe),
/*
* 0x02, VaudioEna
* 0x04, VdmicEna
* 0x08, Vamic1Ena
* 0x10, Vamic2Ena
*/
REG_INIT(AB8500_VAUDIOSUPPLY, 0x03, 0x83, 0x1e),
/*
* 0x01, Vamic1_dzout
* 0x02, Vamic2_dzout
*/
REG_INIT(AB8500_REGUCTRL1VAMIC, 0x03, 0x84, 0x03),
/*
* 0x03, VpllRegu (NOTE! PRCMU register bits)
* 0x0c, VanaRegu
*/
REG_INIT(AB8500_VPLLVANAREGU, 0x04, 0x06, 0x0f),
/*
* 0x01, VrefDDREna
* 0x02, VrefDDRSleepMode
*/
REG_INIT(AB8500_VREFDDR, 0x04, 0x07, 0x03),
/*
* 0x03, VextSupply1Regu
* 0x0c, VextSupply2Regu
* 0x30, VextSupply3Regu
* 0x40, ExtSupply2Bypass
* 0x80, ExtSupply3Bypass
*/
REG_INIT(AB8500_EXTSUPPLYREGU, 0x04, 0x08, 0xff),
/*
* 0x03, Vaux1Regu
* 0x0c, Vaux2Regu
*/
REG_INIT(AB8500_VAUX12REGU, 0x04, 0x09, 0x0f),
/*
* 0x03, Vaux3Regu
*/
REG_INIT(AB8500_VRF1VAUX3REGU, 0x04, 0x0a, 0x03),
/*
* 0x0f, Vaux1Sel
*/
REG_INIT(AB8500_VAUX1SEL, 0x04, 0x1f, 0x0f),
/*
* 0x0f, Vaux2Sel
*/
REG_INIT(AB8500_VAUX2SEL, 0x04, 0x20, 0x0f),
/*
* 0x07, Vaux3Sel
*/
REG_INIT(AB8500_VRF1VAUX3SEL, 0x04, 0x21, 0x07),
/*
* 0x01, VextSupply12LP
*/
REG_INIT(AB8500_REGUCTRL2SPARE, 0x04, 0x22, 0x01),
/*
* 0x04, Vaux1Disch
* 0x08, Vaux2Disch
* 0x10, Vaux3Disch
* 0x20, Vintcore12Disch
* 0x40, VTVoutDisch
* 0x80, VaudioDisch
*/
REG_INIT(AB8500_REGUCTRLDISCH, 0x04, 0x43, 0xfc),
/*
* 0x02, VanaDisch
* 0x04, VdmicPullDownEna
* 0x10, VdmicDisch
*/
REG_INIT(AB8500_REGUCTRLDISCH2, 0x04, 0x44, 0x16),
};
/* AB8505 register init */
static struct ab8500_reg_init ab8505_reg_init[] = {
/*
* 0x03, VarmRequestCtrl
* 0x0c, VsmpsCRequestCtrl
* 0x30, VsmpsARequestCtrl
* 0xc0, VsmpsBRequestCtrl
*/
REG_INIT(AB8505_REGUREQUESTCTRL1, 0x03, 0x03, 0xff),
/*
* 0x03, VsafeRequestCtrl
* 0x0c, VpllRequestCtrl
* 0x30, VanaRequestCtrl
*/
REG_INIT(AB8505_REGUREQUESTCTRL2, 0x03, 0x04, 0x3f),
/*
* 0x30, Vaux1RequestCtrl
* 0xc0, Vaux2RequestCtrl
*/
REG_INIT(AB8505_REGUREQUESTCTRL3, 0x03, 0x05, 0xf0),
/*
* 0x03, Vaux3RequestCtrl
* 0x04, SwHPReq
*/
REG_INIT(AB8505_REGUREQUESTCTRL4, 0x03, 0x06, 0x07),
/*
* 0x01, VsmpsASysClkReq1HPValid
* 0x02, VsmpsBSysClkReq1HPValid
* 0x04, VsafeSysClkReq1HPValid
* 0x08, VanaSysClkReq1HPValid
* 0x10, VpllSysClkReq1HPValid
* 0x20, Vaux1SysClkReq1HPValid
* 0x40, Vaux2SysClkReq1HPValid
* 0x80, Vaux3SysClkReq1HPValid
*/
REG_INIT(AB8505_REGUSYSCLKREQ1HPVALID1, 0x03, 0x07, 0xff),
/*
* 0x01, VsmpsCSysClkReq1HPValid
* 0x02, VarmSysClkReq1HPValid
* 0x04, VbbSysClkReq1HPValid
* 0x08, VsmpsMSysClkReq1HPValid
*/
REG_INIT(AB8505_REGUSYSCLKREQ1HPVALID2, 0x03, 0x08, 0x0f),
/*
* 0x01, VsmpsAHwHPReq1Valid
* 0x02, VsmpsBHwHPReq1Valid
* 0x04, VsafeHwHPReq1Valid
* 0x08, VanaHwHPReq1Valid
* 0x10, VpllHwHPReq1Valid
* 0x20, Vaux1HwHPReq1Valid
* 0x40, Vaux2HwHPReq1Valid
* 0x80, Vaux3HwHPReq1Valid
*/
REG_INIT(AB8505_REGUHWHPREQ1VALID1, 0x03, 0x09, 0xff),
/*
* 0x08, VsmpsMHwHPReq1Valid
*/
REG_INIT(AB8505_REGUHWHPREQ1VALID2, 0x03, 0x0a, 0x08),
/*
* 0x01, VsmpsAHwHPReq2Valid
* 0x02, VsmpsBHwHPReq2Valid
* 0x04, VsafeHwHPReq2Valid
* 0x08, VanaHwHPReq2Valid
* 0x10, VpllHwHPReq2Valid
* 0x20, Vaux1HwHPReq2Valid
* 0x40, Vaux2HwHPReq2Valid
* 0x80, Vaux3HwHPReq2Valid
*/
REG_INIT(AB8505_REGUHWHPREQ2VALID1, 0x03, 0x0b, 0xff),
/*
* 0x08, VsmpsMHwHPReq2Valid
*/
REG_INIT(AB8505_REGUHWHPREQ2VALID2, 0x03, 0x0c, 0x08),
/*
* 0x01, VsmpsCSwHPReqValid
* 0x02, VarmSwHPReqValid
* 0x04, VsmpsASwHPReqValid
* 0x08, VsmpsBSwHPReqValid
* 0x10, VsafeSwHPReqValid
* 0x20, VanaSwHPReqValid
* 0x40, VpllSwHPReqValid
* 0x80, Vaux1SwHPReqValid
*/
REG_INIT(AB8505_REGUSWHPREQVALID1, 0x03, 0x0d, 0xff),
/*
* 0x01, Vaux2SwHPReqValid
* 0x02, Vaux3SwHPReqValid
* 0x20, VsmpsMSwHPReqValid
*/
REG_INIT(AB8505_REGUSWHPREQVALID2, 0x03, 0x0e, 0x23),
/*
* 0x02, SysClkReq2Valid1
* 0x04, SysClkReq3Valid1
* 0x08, SysClkReq4Valid1
*/
REG_INIT(AB8505_REGUSYSCLKREQVALID1, 0x03, 0x0f, 0x0e),
/*
* 0x02, SysClkReq2Valid2
* 0x04, SysClkReq3Valid2
* 0x08, SysClkReq4Valid2
*/
REG_INIT(AB8505_REGUSYSCLKREQVALID2, 0x03, 0x10, 0x0e),
/*
* 0x01, Vaux4SwHPReqValid
* 0x02, Vaux4HwHPReq2Valid
* 0x04, Vaux4HwHPReq1Valid
* 0x08, Vaux4SysClkReq1HPValid
*/
REG_INIT(AB8505_REGUVAUX4REQVALID, 0x03, 0x11, 0x0f),
/*
* 0x02, VadcEna
* 0x04, VintCore12Ena
* 0x38, VintCore12Sel
* 0x40, VintCore12LP
* 0x80, VadcLP
*/
REG_INIT(AB8505_REGUMISC1, 0x03, 0x80, 0xfe),
/*
* 0x02, VaudioEna
* 0x04, VdmicEna
* 0x08, Vamic1Ena
* 0x10, Vamic2Ena
*/
REG_INIT(AB8505_VAUDIOSUPPLY, 0x03, 0x83, 0x1e),
/*
* 0x01, Vamic1_dzout
* 0x02, Vamic2_dzout
*/
REG_INIT(AB8505_REGUCTRL1VAMIC, 0x03, 0x84, 0x03),
/*
* 0x03, VsmpsARegu
* 0x0c, VsmpsASelCtrl
* 0x10, VsmpsAAutoMode
* 0x20, VsmpsAPWMMode
*/
REG_INIT(AB8505_VSMPSAREGU, 0x04, 0x03, 0x3f),
/*
* 0x03, VsmpsBRegu
* 0x0c, VsmpsBSelCtrl
* 0x10, VsmpsBAutoMode
* 0x20, VsmpsBPWMMode
*/
REG_INIT(AB8505_VSMPSBREGU, 0x04, 0x04, 0x3f),
/*
* 0x03, VsafeRegu
* 0x0c, VsafeSelCtrl
* 0x10, VsafeAutoMode
* 0x20, VsafePWMMode
*/
REG_INIT(AB8505_VSAFEREGU, 0x04, 0x05, 0x3f),
/*
* 0x03, VpllRegu (NOTE! PRCMU register bits)
* 0x0c, VanaRegu
*/
REG_INIT(AB8505_VPLLVANAREGU, 0x04, 0x06, 0x0f),
/*
* 0x03, VextSupply1Regu
* 0x0c, VextSupply2Regu
* 0x30, VextSupply3Regu
* 0x40, ExtSupply2Bypass
* 0x80, ExtSupply3Bypass
*/
REG_INIT(AB8505_EXTSUPPLYREGU, 0x04, 0x08, 0xff),
/*
* 0x03, Vaux1Regu
* 0x0c, Vaux2Regu
*/
REG_INIT(AB8505_VAUX12REGU, 0x04, 0x09, 0x0f),
/*
* 0x0f, Vaux3Regu
*/
REG_INIT(AB8505_VRF1VAUX3REGU, 0x04, 0x0a, 0x0f),
/*
* 0x3f, VsmpsASel1
*/
REG_INIT(AB8505_VSMPSASEL1, 0x04, 0x13, 0x3f),
/*
* 0x3f, VsmpsASel2
*/
REG_INIT(AB8505_VSMPSASEL2, 0x04, 0x14, 0x3f),
/*
* 0x3f, VsmpsASel3
*/
REG_INIT(AB8505_VSMPSASEL3, 0x04, 0x15, 0x3f),
/*
* 0x3f, VsmpsBSel1
*/
REG_INIT(AB8505_VSMPSBSEL1, 0x04, 0x17, 0x3f),
/*
* 0x3f, VsmpsBSel2
*/
REG_INIT(AB8505_VSMPSBSEL2, 0x04, 0x18, 0x3f),
/*
* 0x3f, VsmpsBSel3
*/
REG_INIT(AB8505_VSMPSBSEL3, 0x04, 0x19, 0x3f),
/*
* 0x7f, VsafeSel1
*/
REG_INIT(AB8505_VSAFESEL1, 0x04, 0x1b, 0x7f),
/*
* 0x3f, VsafeSel2
*/
REG_INIT(AB8505_VSAFESEL2, 0x04, 0x1c, 0x7f),
/*
* 0x3f, VsafeSel3
*/
REG_INIT(AB8505_VSAFESEL3, 0x04, 0x1d, 0x7f),
/*
* 0x0f, Vaux1Sel
*/
REG_INIT(AB8505_VAUX1SEL, 0x04, 0x1f, 0x0f),
/*
* 0x0f, Vaux2Sel
*/
REG_INIT(AB8505_VAUX2SEL, 0x04, 0x20, 0x0f),
/*
* 0x07, Vaux3Sel
* 0x30, VRF1Sel
*/
REG_INIT(AB8505_VRF1VAUX3SEL, 0x04, 0x21, 0x37),
/*
* 0x03, Vaux4RequestCtrl
*/
REG_INIT(AB8505_VAUX4REQCTRL, 0x04, 0x2d, 0x03),
/*
* 0x03, Vaux4Regu
*/
REG_INIT(AB8505_VAUX4REGU, 0x04, 0x2e, 0x03),
/*
* 0x0f, Vaux4Sel
*/
REG_INIT(AB8505_VAUX4SEL, 0x04, 0x2f, 0x0f),
/*
* 0x04, Vaux1Disch
* 0x08, Vaux2Disch
* 0x10, Vaux3Disch
* 0x20, Vintcore12Disch
* 0x40, VTVoutDisch
* 0x80, VaudioDisch
*/
REG_INIT(AB8505_REGUCTRLDISCH, 0x04, 0x43, 0xfc),
/*
* 0x02, VanaDisch
* 0x04, VdmicPullDownEna
* 0x10, VdmicDisch
*/
REG_INIT(AB8505_REGUCTRLDISCH2, 0x04, 0x44, 0x16),
/*
* 0x01, Vaux4Disch
*/
REG_INIT(AB8505_REGUCTRLDISCH3, 0x04, 0x48, 0x01),
/*
* 0x07, Vaux5Sel
* 0x08, Vaux5LP
* 0x10, Vaux5Ena
* 0x20, Vaux5Disch
* 0x40, Vaux5DisSfst
* 0x80, Vaux5DisPulld
*/
REG_INIT(AB8505_CTRLVAUX5, 0x01, 0x55, 0xff),
/*
* 0x07, Vaux6Sel
* 0x08, Vaux6LP
* 0x10, Vaux6Ena
* 0x80, Vaux6DisPulld
*/
REG_INIT(AB8505_CTRLVAUX6, 0x01, 0x56, 0x9f),
};
static struct of_regulator_match ab8500_regulator_match[] = {
{ .name = "ab8500_ldo_aux1", .driver_data = (void *) AB8500_LDO_AUX1, },
{ .name = "ab8500_ldo_aux2", .driver_data = (void *) AB8500_LDO_AUX2, },
{ .name = "ab8500_ldo_aux3", .driver_data = (void *) AB8500_LDO_AUX3, },
{ .name = "ab8500_ldo_intcore", .driver_data = (void *) AB8500_LDO_INTCORE, },
{ .name = "ab8500_ldo_tvout", .driver_data = (void *) AB8500_LDO_TVOUT, },
{ .name = "ab8500_ldo_audio", .driver_data = (void *) AB8500_LDO_AUDIO, },
{ .name = "ab8500_ldo_anamic1", .driver_data = (void *) AB8500_LDO_ANAMIC1, },
{ .name = "ab8500_ldo_anamic2", .driver_data = (void *) AB8500_LDO_ANAMIC2, },
{ .name = "ab8500_ldo_dmic", .driver_data = (void *) AB8500_LDO_DMIC, },
{ .name = "ab8500_ldo_ana", .driver_data = (void *) AB8500_LDO_ANA, },
};
static struct of_regulator_match ab8505_regulator_match[] = {
{ .name = "ab8500_ldo_aux1", .driver_data = (void *) AB8505_LDO_AUX1, },
{ .name = "ab8500_ldo_aux2", .driver_data = (void *) AB8505_LDO_AUX2, },
{ .name = "ab8500_ldo_aux3", .driver_data = (void *) AB8505_LDO_AUX3, },
{ .name = "ab8500_ldo_aux4", .driver_data = (void *) AB8505_LDO_AUX4, },
{ .name = "ab8500_ldo_aux5", .driver_data = (void *) AB8505_LDO_AUX5, },
{ .name = "ab8500_ldo_aux6", .driver_data = (void *) AB8505_LDO_AUX6, },
{ .name = "ab8500_ldo_intcore", .driver_data = (void *) AB8505_LDO_INTCORE, },
{ .name = "ab8500_ldo_adc", .driver_data = (void *) AB8505_LDO_ADC, },
{ .name = "ab8500_ldo_audio", .driver_data = (void *) AB8505_LDO_AUDIO, },
{ .name = "ab8500_ldo_anamic1", .driver_data = (void *) AB8505_LDO_ANAMIC1, },
{ .name = "ab8500_ldo_anamic2", .driver_data = (void *) AB8505_LDO_ANAMIC2, },
{ .name = "ab8500_ldo_aux8", .driver_data = (void *) AB8505_LDO_AUX8, },
{ .name = "ab8500_ldo_ana", .driver_data = (void *) AB8505_LDO_ANA, },
};
static struct {
struct ab8500_regulator_info *info;
int info_size;
struct ab8500_reg_init *init;
int init_size;
struct of_regulator_match *match;
int match_size;
} abx500_regulator;
static void abx500_get_regulator_info(struct ab8500 *ab8500)
{
if (is_ab8505(ab8500)) {
abx500_regulator.info = ab8505_regulator_info;
abx500_regulator.info_size = ARRAY_SIZE(ab8505_regulator_info);
abx500_regulator.init = ab8505_reg_init;
abx500_regulator.init_size = AB8505_NUM_REGULATOR_REGISTERS;
abx500_regulator.match = ab8505_regulator_match;
abx500_regulator.match_size = ARRAY_SIZE(ab8505_regulator_match);
} else {
abx500_regulator.info = ab8500_regulator_info;
abx500_regulator.info_size = ARRAY_SIZE(ab8500_regulator_info);
abx500_regulator.init = ab8500_reg_init;
abx500_regulator.init_size = AB8500_NUM_REGULATOR_REGISTERS;
abx500_regulator.match = ab8500_regulator_match;
abx500_regulator.match_size = ARRAY_SIZE(ab8500_regulator_match);
}
}
static int ab8500_regulator_register(struct platform_device *pdev,
struct regulator_init_data *init_data,
int id, struct device_node *np)
{
struct ab8500 *ab8500 = dev_get_drvdata(pdev->dev.parent);
struct ab8500_regulator_info *info = NULL;
struct regulator_config config = { };
struct regulator_dev *rdev;
/* assign per-regulator data */
info = &abx500_regulator.info[id];
info->dev = &pdev->dev;
config.dev = &pdev->dev;
config.init_data = init_data;
config.driver_data = info;
config.of_node = np;
/* fix for hardware before ab8500v2.0 */
if (is_ab8500_1p1_or_earlier(ab8500)) {
if (info->desc.id == AB8500_LDO_AUX3) {
info->desc.n_voltages =
ARRAY_SIZE(ldo_vauxn_voltages);
info->desc.volt_table = ldo_vauxn_voltages;
info->voltage_mask = 0xf;
}
}
/* register regulator with framework */
rdev = devm_regulator_register(&pdev->dev, &info->desc, &config);
if (IS_ERR(rdev)) {
dev_err(&pdev->dev, "failed to register regulator %s\n",
info->desc.name);
return PTR_ERR(rdev);
}
return 0;
}
static int ab8500_regulator_probe(struct platform_device *pdev)
{
struct ab8500 *ab8500 = dev_get_drvdata(pdev->dev.parent);
struct device_node *np = pdev->dev.of_node;
struct of_regulator_match *match;
int err, i;
if (!ab8500) {
dev_err(&pdev->dev, "null mfd parent\n");
return -EINVAL;
}
abx500_get_regulator_info(ab8500);
err = of_regulator_match(&pdev->dev, np,
abx500_regulator.match,
abx500_regulator.match_size);
if (err < 0) {
dev_err(&pdev->dev,
"Error parsing regulator init data: %d\n", err);
return err;
}
match = abx500_regulator.match;
for (i = 0; i < abx500_regulator.info_size; i++) {
err = ab8500_regulator_register(pdev, match[i].init_data, i,
match[i].of_node);
if (err)
return err;
}
return 0;
}
static struct platform_driver ab8500_regulator_driver = {
.probe = ab8500_regulator_probe,
.driver = {
.name = "ab8500-regulator",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
};
static int __init ab8500_regulator_init(void)
{
int ret;
ret = platform_driver_register(&ab8500_regulator_driver);
if (ret != 0)
pr_err("Failed to register ab8500 regulator: %d\n", ret);
return ret;
}
subsys_initcall(ab8500_regulator_init);
static void __exit ab8500_regulator_exit(void)
{
platform_driver_unregister(&ab8500_regulator_driver);
}
module_exit(ab8500_regulator_exit);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Sundar Iyer <[email protected]>");
MODULE_AUTHOR("Bengt Jonsson <[email protected]>");
MODULE_AUTHOR("Daniel Willerud <[email protected]>");
MODULE_DESCRIPTION("Regulator Driver for ST-Ericsson AB8500 Mixed-Sig PMIC");
MODULE_ALIAS("platform:ab8500-regulator");
| linux-master | drivers/regulator/ab8500.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* max8952.c - Voltage and current regulation for the Maxim 8952
*
* Copyright (C) 2010 Samsung Electronics
* MyungJoo Ham <[email protected]>
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/i2c.h>
#include <linux/err.h>
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/max8952.h>
#include <linux/gpio/consumer.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/regulator/of_regulator.h>
#include <linux/slab.h>
/* Registers */
enum {
MAX8952_REG_MODE0,
MAX8952_REG_MODE1,
MAX8952_REG_MODE2,
MAX8952_REG_MODE3,
MAX8952_REG_CONTROL,
MAX8952_REG_SYNC,
MAX8952_REG_RAMP,
MAX8952_REG_CHIP_ID1,
MAX8952_REG_CHIP_ID2,
};
struct max8952_data {
struct i2c_client *client;
struct max8952_platform_data *pdata;
struct gpio_desc *vid0_gpiod;
struct gpio_desc *vid1_gpiod;
bool vid0;
bool vid1;
};
static int max8952_read_reg(struct max8952_data *max8952, u8 reg)
{
int ret = i2c_smbus_read_byte_data(max8952->client, reg);
if (ret > 0)
ret &= 0xff;
return ret;
}
static int max8952_write_reg(struct max8952_data *max8952,
u8 reg, u8 value)
{
return i2c_smbus_write_byte_data(max8952->client, reg, value);
}
static int max8952_list_voltage(struct regulator_dev *rdev,
unsigned int selector)
{
struct max8952_data *max8952 = rdev_get_drvdata(rdev);
if (rdev_get_id(rdev) != 0)
return -EINVAL;
return (max8952->pdata->dvs_mode[selector] * 10 + 770) * 1000;
}
static int max8952_get_voltage_sel(struct regulator_dev *rdev)
{
struct max8952_data *max8952 = rdev_get_drvdata(rdev);
u8 vid = 0;
if (max8952->vid0)
vid += 1;
if (max8952->vid1)
vid += 2;
return vid;
}
static int max8952_set_voltage_sel(struct regulator_dev *rdev,
unsigned selector)
{
struct max8952_data *max8952 = rdev_get_drvdata(rdev);
if (!max8952->vid0_gpiod || !max8952->vid1_gpiod) {
/* DVS not supported */
return -EPERM;
}
max8952->vid0 = selector & 0x1;
max8952->vid1 = (selector >> 1) & 0x1;
gpiod_set_value(max8952->vid0_gpiod, max8952->vid0);
gpiod_set_value(max8952->vid1_gpiod, max8952->vid1);
return 0;
}
static const struct regulator_ops max8952_ops = {
.list_voltage = max8952_list_voltage,
.get_voltage_sel = max8952_get_voltage_sel,
.set_voltage_sel = max8952_set_voltage_sel,
};
static const struct regulator_desc regulator = {
.name = "MAX8952_VOUT",
.id = 0,
.n_voltages = MAX8952_NUM_DVS_MODE,
.ops = &max8952_ops,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
};
#ifdef CONFIG_OF
static const struct of_device_id max8952_dt_match[] = {
{ .compatible = "maxim,max8952" },
{},
};
MODULE_DEVICE_TABLE(of, max8952_dt_match);
static struct max8952_platform_data *max8952_parse_dt(struct device *dev)
{
struct max8952_platform_data *pd;
struct device_node *np = dev->of_node;
int ret;
int i;
pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
if (!pd)
return NULL;
if (of_property_read_u32(np, "max8952,default-mode", &pd->default_mode))
dev_warn(dev, "Default mode not specified, assuming 0\n");
ret = of_property_read_u32_array(np, "max8952,dvs-mode-microvolt",
pd->dvs_mode, ARRAY_SIZE(pd->dvs_mode));
if (ret) {
dev_err(dev, "max8952,dvs-mode-microvolt property not specified");
return NULL;
}
for (i = 0; i < ARRAY_SIZE(pd->dvs_mode); ++i) {
if (pd->dvs_mode[i] < 770000 || pd->dvs_mode[i] > 1400000) {
dev_err(dev, "DVS voltage %d out of range\n", i);
return NULL;
}
pd->dvs_mode[i] = (pd->dvs_mode[i] - 770000) / 10000;
}
if (of_property_read_u32(np, "max8952,sync-freq", &pd->sync_freq))
dev_warn(dev, "max8952,sync-freq property not specified, defaulting to 26MHz\n");
if (of_property_read_u32(np, "max8952,ramp-speed", &pd->ramp_speed))
dev_warn(dev, "max8952,ramp-speed property not specified, defaulting to 32mV/us\n");
pd->reg_data = of_get_regulator_init_data(dev, np, ®ulator);
if (!pd->reg_data) {
dev_err(dev, "Failed to parse regulator init data\n");
return NULL;
}
return pd;
}
#else
static struct max8952_platform_data *max8952_parse_dt(struct device *dev)
{
return NULL;
}
#endif
static int max8952_pmic_probe(struct i2c_client *client)
{
struct i2c_adapter *adapter = client->adapter;
struct max8952_platform_data *pdata = dev_get_platdata(&client->dev);
struct regulator_config config = { };
struct max8952_data *max8952;
struct regulator_dev *rdev;
struct gpio_desc *gpiod;
enum gpiod_flags gflags;
int ret = 0;
if (client->dev.of_node)
pdata = max8952_parse_dt(&client->dev);
if (!pdata) {
dev_err(&client->dev, "Require the platform data\n");
return -EINVAL;
}
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE))
return -EIO;
max8952 = devm_kzalloc(&client->dev, sizeof(struct max8952_data),
GFP_KERNEL);
if (!max8952)
return -ENOMEM;
max8952->client = client;
max8952->pdata = pdata;
config.dev = &client->dev;
config.init_data = pdata->reg_data;
config.driver_data = max8952;
config.of_node = client->dev.of_node;
if (pdata->reg_data->constraints.boot_on)
gflags = GPIOD_OUT_HIGH;
else
gflags = GPIOD_OUT_LOW;
gflags |= GPIOD_FLAGS_BIT_NONEXCLUSIVE;
/*
* Do not use devm* here: the regulator core takes over the
* lifecycle management of the GPIO descriptor.
*/
gpiod = gpiod_get_optional(&client->dev,
"max8952,en",
gflags);
if (IS_ERR(gpiod))
return PTR_ERR(gpiod);
if (gpiod)
config.ena_gpiod = gpiod;
rdev = devm_regulator_register(&client->dev, ®ulator, &config);
if (IS_ERR(rdev)) {
ret = PTR_ERR(rdev);
dev_err(&client->dev, "regulator init failed (%d)\n", ret);
return ret;
}
max8952->vid0 = pdata->default_mode & 0x1;
max8952->vid1 = (pdata->default_mode >> 1) & 0x1;
/* Fetch vid0 and vid1 GPIOs if available */
gflags = max8952->vid0 ? GPIOD_OUT_HIGH : GPIOD_OUT_LOW;
max8952->vid0_gpiod = devm_gpiod_get_index_optional(&client->dev,
"max8952,vid",
0, gflags);
if (IS_ERR(max8952->vid0_gpiod))
return PTR_ERR(max8952->vid0_gpiod);
gflags = max8952->vid1 ? GPIOD_OUT_HIGH : GPIOD_OUT_LOW;
max8952->vid1_gpiod = devm_gpiod_get_index_optional(&client->dev,
"max8952,vid",
1, gflags);
if (IS_ERR(max8952->vid1_gpiod))
return PTR_ERR(max8952->vid1_gpiod);
/* If either VID GPIO is missing just disable this */
if (!max8952->vid0_gpiod || !max8952->vid1_gpiod) {
dev_warn(&client->dev, "VID0/1 gpio invalid: "
"DVS not available.\n");
max8952->vid0 = 0;
max8952->vid1 = 0;
/* Make sure if we have any descriptors they get set to low */
if (max8952->vid0_gpiod)
gpiod_set_value(max8952->vid0_gpiod, 0);
if (max8952->vid1_gpiod)
gpiod_set_value(max8952->vid1_gpiod, 0);
/* Disable Pulldown of EN only */
max8952_write_reg(max8952, MAX8952_REG_CONTROL, 0x60);
dev_err(&client->dev, "DVS modes disabled because VID0 and VID1"
" do not have proper controls.\n");
} else {
/*
* Disable Pulldown on EN, VID0, VID1 to reduce
* leakage current of MAX8952 assuming that MAX8952
* is turned on (EN==1). Note that without having VID0/1
* properly connected, turning pulldown off can be
* problematic. Thus, turn this off only when they are
* controllable by GPIO.
*/
max8952_write_reg(max8952, MAX8952_REG_CONTROL, 0x0);
}
max8952_write_reg(max8952, MAX8952_REG_MODE0,
(max8952_read_reg(max8952,
MAX8952_REG_MODE0) & 0xC0) |
(pdata->dvs_mode[0] & 0x3F));
max8952_write_reg(max8952, MAX8952_REG_MODE1,
(max8952_read_reg(max8952,
MAX8952_REG_MODE1) & 0xC0) |
(pdata->dvs_mode[1] & 0x3F));
max8952_write_reg(max8952, MAX8952_REG_MODE2,
(max8952_read_reg(max8952,
MAX8952_REG_MODE2) & 0xC0) |
(pdata->dvs_mode[2] & 0x3F));
max8952_write_reg(max8952, MAX8952_REG_MODE3,
(max8952_read_reg(max8952,
MAX8952_REG_MODE3) & 0xC0) |
(pdata->dvs_mode[3] & 0x3F));
max8952_write_reg(max8952, MAX8952_REG_SYNC,
(max8952_read_reg(max8952, MAX8952_REG_SYNC) & 0x3F) |
((pdata->sync_freq & 0x3) << 6));
max8952_write_reg(max8952, MAX8952_REG_RAMP,
(max8952_read_reg(max8952, MAX8952_REG_RAMP) & 0x1F) |
((pdata->ramp_speed & 0x7) << 5));
i2c_set_clientdata(client, max8952);
return 0;
}
static const struct i2c_device_id max8952_ids[] = {
{ "max8952", 0 },
{ },
};
MODULE_DEVICE_TABLE(i2c, max8952_ids);
static struct i2c_driver max8952_pmic_driver = {
.probe = max8952_pmic_probe,
.driver = {
.name = "max8952",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = of_match_ptr(max8952_dt_match),
},
.id_table = max8952_ids,
};
static int __init max8952_pmic_init(void)
{
return i2c_add_driver(&max8952_pmic_driver);
}
subsys_initcall(max8952_pmic_init);
static void __exit max8952_pmic_exit(void)
{
i2c_del_driver(&max8952_pmic_driver);
}
module_exit(max8952_pmic_exit);
MODULE_DESCRIPTION("MAXIM 8952 voltage regulator driver");
MODULE_AUTHOR("MyungJoo Ham <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/regulator/max8952.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Regulators driver for Marvell 88PM800
*
* Copyright (C) 2012 Marvell International Ltd.
* Joseph(Yossi) Hanin <[email protected]>
* Yi Zhang <[email protected]>
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/err.h>
#include <linux/regmap.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
#include <linux/mfd/88pm80x.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/regulator/of_regulator.h>
/* LDO1 with DVC[0..3] */
#define PM800_LDO1_VOUT (0x08) /* VOUT1 */
#define PM800_LDO1_VOUT_2 (0x09)
#define PM800_LDO1_VOUT_3 (0x0A)
#define PM800_LDO2_VOUT (0x0B)
#define PM800_LDO3_VOUT (0x0C)
#define PM800_LDO4_VOUT (0x0D)
#define PM800_LDO5_VOUT (0x0E)
#define PM800_LDO6_VOUT (0x0F)
#define PM800_LDO7_VOUT (0x10)
#define PM800_LDO8_VOUT (0x11)
#define PM800_LDO9_VOUT (0x12)
#define PM800_LDO10_VOUT (0x13)
#define PM800_LDO11_VOUT (0x14)
#define PM800_LDO12_VOUT (0x15)
#define PM800_LDO13_VOUT (0x16)
#define PM800_LDO14_VOUT (0x17)
#define PM800_LDO15_VOUT (0x18)
#define PM800_LDO16_VOUT (0x19)
#define PM800_LDO17_VOUT (0x1A)
#define PM800_LDO18_VOUT (0x1B)
#define PM800_LDO19_VOUT (0x1C)
/* BUCK1 with DVC[0..3] */
#define PM800_BUCK1 (0x3C)
#define PM800_BUCK1_1 (0x3D)
#define PM800_BUCK1_2 (0x3E)
#define PM800_BUCK1_3 (0x3F)
#define PM800_BUCK2 (0x40)
#define PM800_BUCK3 (0x41)
#define PM800_BUCK4 (0x42)
#define PM800_BUCK4_1 (0x43)
#define PM800_BUCK4_2 (0x44)
#define PM800_BUCK4_3 (0x45)
#define PM800_BUCK5 (0x46)
#define PM800_BUCK_ENA (0x50)
#define PM800_LDO_ENA1_1 (0x51)
#define PM800_LDO_ENA1_2 (0x52)
#define PM800_LDO_ENA1_3 (0x53)
#define PM800_LDO_ENA2_1 (0x56)
#define PM800_LDO_ENA2_2 (0x57)
#define PM800_LDO_ENA2_3 (0x58)
#define PM800_BUCK1_MISC1 (0x78)
#define PM800_BUCK3_MISC1 (0x7E)
#define PM800_BUCK4_MISC1 (0x81)
#define PM800_BUCK5_MISC1 (0x84)
struct pm800_regulator_info {
struct regulator_desc desc;
int max_ua;
};
/*
* vreg - the buck regs string.
* ereg - the string for the enable register.
* ebit - the bit number in the enable register.
* amax - the current
* Buck has 2 kinds of voltage steps. It is easy to find voltage by ranges,
* not the constant voltage table.
* n_volt - Number of available selectors
*/
#define PM800_BUCK(match, vreg, ereg, ebit, amax, volt_ranges, n_volt) \
{ \
.desc = { \
.name = #vreg, \
.of_match = of_match_ptr(#match), \
.regulators_node = of_match_ptr("regulators"), \
.ops = &pm800_volt_range_ops, \
.type = REGULATOR_VOLTAGE, \
.id = PM800_ID_##vreg, \
.owner = THIS_MODULE, \
.n_voltages = n_volt, \
.linear_ranges = volt_ranges, \
.n_linear_ranges = ARRAY_SIZE(volt_ranges), \
.vsel_reg = PM800_##vreg, \
.vsel_mask = 0x7f, \
.enable_reg = PM800_##ereg, \
.enable_mask = 1 << (ebit), \
}, \
.max_ua = (amax), \
}
/*
* vreg - the LDO regs string
* ereg - the string for the enable register.
* ebit - the bit number in the enable register.
* amax - the current
* volt_table - the LDO voltage table
* For all the LDOes, there are too many ranges. Using volt_table will be
* simpler and faster.
*/
#define PM800_LDO(match, vreg, ereg, ebit, amax, ldo_volt_table) \
{ \
.desc = { \
.name = #vreg, \
.of_match = of_match_ptr(#match), \
.regulators_node = of_match_ptr("regulators"), \
.ops = &pm800_volt_table_ops, \
.type = REGULATOR_VOLTAGE, \
.id = PM800_ID_##vreg, \
.owner = THIS_MODULE, \
.n_voltages = ARRAY_SIZE(ldo_volt_table), \
.vsel_reg = PM800_##vreg##_VOUT, \
.vsel_mask = 0xf, \
.enable_reg = PM800_##ereg, \
.enable_mask = 1 << (ebit), \
.volt_table = ldo_volt_table, \
}, \
.max_ua = (amax), \
}
/* Ranges are sorted in ascending order. */
static const struct linear_range buck1_volt_range[] = {
REGULATOR_LINEAR_RANGE(600000, 0, 0x4f, 12500),
REGULATOR_LINEAR_RANGE(1600000, 0x50, 0x54, 50000),
};
/* BUCK 2~5 have same ranges. */
static const struct linear_range buck2_5_volt_range[] = {
REGULATOR_LINEAR_RANGE(600000, 0, 0x4f, 12500),
REGULATOR_LINEAR_RANGE(1600000, 0x50, 0x72, 50000),
};
static const unsigned int ldo1_volt_table[] = {
600000, 650000, 700000, 750000, 800000, 850000, 900000, 950000,
1000000, 1050000, 1100000, 1150000, 1200000, 1300000, 1400000, 1500000,
};
static const unsigned int ldo2_volt_table[] = {
1700000, 1800000, 1900000, 2000000, 2100000, 2500000, 2700000, 2800000,
};
/* LDO 3~17 have same voltage table. */
static const unsigned int ldo3_17_volt_table[] = {
1200000, 1250000, 1700000, 1800000, 1850000, 1900000, 2500000, 2600000,
2700000, 2750000, 2800000, 2850000, 2900000, 3000000, 3100000, 3300000,
};
/* LDO 18~19 have same voltage table. */
static const unsigned int ldo18_19_volt_table[] = {
1700000, 1800000, 1900000, 2500000, 2800000, 2900000, 3100000, 3300000,
};
static int pm800_get_current_limit(struct regulator_dev *rdev)
{
struct pm800_regulator_info *info = rdev_get_drvdata(rdev);
return info->max_ua;
}
static const struct regulator_ops pm800_volt_range_ops = {
.list_voltage = regulator_list_voltage_linear_range,
.map_voltage = regulator_map_voltage_linear_range,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
.get_current_limit = pm800_get_current_limit,
};
static const struct regulator_ops pm800_volt_table_ops = {
.list_voltage = regulator_list_voltage_table,
.map_voltage = regulator_map_voltage_iterate,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
.get_current_limit = pm800_get_current_limit,
};
/* The array is indexed by id(PM800_ID_XXX) */
static struct pm800_regulator_info pm800_regulator_info[] = {
PM800_BUCK(buck1, BUCK1, BUCK_ENA, 0, 3000000, buck1_volt_range, 0x55),
PM800_BUCK(buck2, BUCK2, BUCK_ENA, 1, 1200000, buck2_5_volt_range, 0x73),
PM800_BUCK(buck3, BUCK3, BUCK_ENA, 2, 1200000, buck2_5_volt_range, 0x73),
PM800_BUCK(buck4, BUCK4, BUCK_ENA, 3, 1200000, buck2_5_volt_range, 0x73),
PM800_BUCK(buck5, BUCK5, BUCK_ENA, 4, 1200000, buck2_5_volt_range, 0x73),
PM800_LDO(ldo1, LDO1, LDO_ENA1_1, 0, 200000, ldo1_volt_table),
PM800_LDO(ldo2, LDO2, LDO_ENA1_1, 1, 10000, ldo2_volt_table),
PM800_LDO(ldo3, LDO3, LDO_ENA1_1, 2, 300000, ldo3_17_volt_table),
PM800_LDO(ldo4, LDO4, LDO_ENA1_1, 3, 300000, ldo3_17_volt_table),
PM800_LDO(ldo5, LDO5, LDO_ENA1_1, 4, 300000, ldo3_17_volt_table),
PM800_LDO(ldo6, LDO6, LDO_ENA1_1, 5, 300000, ldo3_17_volt_table),
PM800_LDO(ldo7, LDO7, LDO_ENA1_1, 6, 300000, ldo3_17_volt_table),
PM800_LDO(ldo8, LDO8, LDO_ENA1_1, 7, 300000, ldo3_17_volt_table),
PM800_LDO(ldo9, LDO9, LDO_ENA1_2, 0, 300000, ldo3_17_volt_table),
PM800_LDO(ldo10, LDO10, LDO_ENA1_2, 1, 300000, ldo3_17_volt_table),
PM800_LDO(ldo11, LDO11, LDO_ENA1_2, 2, 300000, ldo3_17_volt_table),
PM800_LDO(ldo12, LDO12, LDO_ENA1_2, 3, 300000, ldo3_17_volt_table),
PM800_LDO(ldo13, LDO13, LDO_ENA1_2, 4, 300000, ldo3_17_volt_table),
PM800_LDO(ldo14, LDO14, LDO_ENA1_2, 5, 300000, ldo3_17_volt_table),
PM800_LDO(ldo15, LDO15, LDO_ENA1_2, 6, 300000, ldo3_17_volt_table),
PM800_LDO(ldo16, LDO16, LDO_ENA1_2, 7, 300000, ldo3_17_volt_table),
PM800_LDO(ldo17, LDO17, LDO_ENA1_3, 0, 300000, ldo3_17_volt_table),
PM800_LDO(ldo18, LDO18, LDO_ENA1_3, 1, 200000, ldo18_19_volt_table),
PM800_LDO(ldo19, LDO19, LDO_ENA1_3, 2, 200000, ldo18_19_volt_table),
};
static int pm800_regulator_probe(struct platform_device *pdev)
{
struct pm80x_chip *chip = dev_get_drvdata(pdev->dev.parent);
struct pm80x_platform_data *pdata = dev_get_platdata(pdev->dev.parent);
struct regulator_config config = { };
struct regulator_init_data *init_data;
int i, ret;
if (pdata && pdata->num_regulators) {
unsigned int count = 0;
/* Check whether num_regulator is valid. */
for (i = 0; i < ARRAY_SIZE(pdata->regulators); i++) {
if (pdata->regulators[i])
count++;
}
if (count != pdata->num_regulators)
return -EINVAL;
}
config.dev = chip->dev;
config.regmap = chip->subchip->regmap_power;
for (i = 0; i < PM800_ID_RG_MAX; i++) {
struct regulator_dev *regulator;
if (pdata && pdata->num_regulators) {
init_data = pdata->regulators[i];
if (!init_data)
continue;
config.init_data = init_data;
}
config.driver_data = &pm800_regulator_info[i];
regulator = devm_regulator_register(&pdev->dev,
&pm800_regulator_info[i].desc, &config);
if (IS_ERR(regulator)) {
ret = PTR_ERR(regulator);
dev_err(&pdev->dev, "Failed to register %s\n",
pm800_regulator_info[i].desc.name);
return ret;
}
}
return 0;
}
static struct platform_driver pm800_regulator_driver = {
.driver = {
.name = "88pm80x-regulator",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
.probe = pm800_regulator_probe,
};
module_platform_driver(pm800_regulator_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Joseph(Yossi) Hanin <[email protected]>");
MODULE_DESCRIPTION("Regulator Driver for Marvell 88PM800 PMIC");
MODULE_ALIAS("platform:88pm800-regulator");
| linux-master | drivers/regulator/88pm800-regulator.c |
// SPDX-License-Identifier: GPL-2.0+
//
// Regulator driver for DA9063 PMIC series
//
// Copyright 2012 Dialog Semiconductors Ltd.
// Copyright 2013 Philipp Zabel, Pengutronix
//
// Author: Krystian Garbaciak <[email protected]>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
#include <linux/regulator/of_regulator.h>
#include <linux/mfd/da9063/core.h>
#include <linux/mfd/da9063/registers.h>
/* Definition for registering regmap bit fields using a mask */
#define BFIELD(_reg, _mask) \
REG_FIELD(_reg, __builtin_ffs((int)_mask) - 1, \
sizeof(unsigned int) * 8 - __builtin_clz((_mask)) - 1)
/* DA9063 and DA9063L regulator IDs */
enum {
/* BUCKs */
DA9063_ID_BCORE1,
DA9063_ID_BCORE2,
DA9063_ID_BPRO,
DA9063_ID_BMEM,
DA9063_ID_BIO,
DA9063_ID_BPERI,
/* BCORE1 and BCORE2 in merged mode */
DA9063_ID_BCORES_MERGED,
/* BMEM and BIO in merged mode */
DA9063_ID_BMEM_BIO_MERGED,
/* When two BUCKs are merged, they cannot be reused separately */
/* LDOs on both DA9063 and DA9063L */
DA9063_ID_LDO3,
DA9063_ID_LDO7,
DA9063_ID_LDO8,
DA9063_ID_LDO9,
DA9063_ID_LDO11,
/* DA9063-only LDOs */
DA9063_ID_LDO1,
DA9063_ID_LDO2,
DA9063_ID_LDO4,
DA9063_ID_LDO5,
DA9063_ID_LDO6,
DA9063_ID_LDO10,
};
/* Old regulator platform data */
struct da9063_regulator_data {
int id;
struct regulator_init_data *initdata;
};
struct da9063_regulators_pdata {
unsigned int n_regulators;
struct da9063_regulator_data *regulator_data;
};
/* Regulator capabilities and registers description */
struct da9063_regulator_info {
struct regulator_desc desc;
/* DA9063 main register fields */
struct reg_field mode; /* buck mode of operation */
struct reg_field suspend;
struct reg_field sleep;
struct reg_field suspend_sleep;
unsigned int suspend_vsel_reg;
/* DA9063 event detection bit */
struct reg_field oc_event;
/* DA9063 voltage monitor bit */
struct reg_field vmon;
};
/* Macros for LDO */
#define DA9063_LDO(chip, regl_name, min_mV, step_mV, max_mV) \
.desc.id = chip##_ID_##regl_name, \
.desc.name = __stringify(chip##_##regl_name), \
.desc.ops = &da9063_ldo_ops, \
.desc.min_uV = (min_mV) * 1000, \
.desc.uV_step = (step_mV) * 1000, \
.desc.n_voltages = (((max_mV) - (min_mV))/(step_mV) + 1 \
+ (DA9063_V##regl_name##_BIAS)), \
.desc.enable_reg = DA9063_REG_##regl_name##_CONT, \
.desc.enable_mask = DA9063_LDO_EN, \
.desc.vsel_reg = DA9063_REG_V##regl_name##_A, \
.desc.vsel_mask = DA9063_V##regl_name##_MASK, \
.desc.linear_min_sel = DA9063_V##regl_name##_BIAS, \
.sleep = BFIELD(DA9063_REG_V##regl_name##_A, DA9063_LDO_SL), \
.suspend = BFIELD(DA9063_REG_##regl_name##_CONT, DA9063_LDO_CONF), \
.suspend_sleep = BFIELD(DA9063_REG_V##regl_name##_B, DA9063_LDO_SL), \
.suspend_vsel_reg = DA9063_REG_V##regl_name##_B
/* Macros for voltage DC/DC converters (BUCKs) */
#define DA9063_BUCK(chip, regl_name, min_mV, step_mV, max_mV, limits_array, \
creg, cmask) \
.desc.id = chip##_ID_##regl_name, \
.desc.name = __stringify(chip##_##regl_name), \
.desc.ops = &da9063_buck_ops, \
.desc.min_uV = (min_mV) * 1000, \
.desc.uV_step = (step_mV) * 1000, \
.desc.n_voltages = ((max_mV) - (min_mV))/(step_mV) + 1, \
.desc.csel_reg = (creg), \
.desc.csel_mask = (cmask), \
.desc.curr_table = limits_array, \
.desc.n_current_limits = ARRAY_SIZE(limits_array)
#define DA9063_BUCK_COMMON_FIELDS(regl_name) \
.desc.enable_reg = DA9063_REG_##regl_name##_CONT, \
.desc.enable_mask = DA9063_BUCK_EN, \
.desc.vsel_reg = DA9063_REG_V##regl_name##_A, \
.desc.vsel_mask = DA9063_VBUCK_MASK, \
.desc.linear_min_sel = DA9063_VBUCK_BIAS, \
.sleep = BFIELD(DA9063_REG_V##regl_name##_A, DA9063_BUCK_SL), \
.suspend = BFIELD(DA9063_REG_##regl_name##_CONT, DA9063_BUCK_CONF), \
.suspend_sleep = BFIELD(DA9063_REG_V##regl_name##_B, DA9063_BUCK_SL), \
.suspend_vsel_reg = DA9063_REG_V##regl_name##_B, \
.mode = BFIELD(DA9063_REG_##regl_name##_CFG, DA9063_BUCK_MODE_MASK)
/* Defines asignment of regulators info table to chip model */
struct da9063_dev_model {
const struct da9063_regulator_info *regulator_info;
unsigned int n_regulators;
enum da9063_type type;
};
/* Single regulator settings */
struct da9063_regulator {
struct regulator_desc desc;
struct regulator_dev *rdev;
struct da9063 *hw;
const struct da9063_regulator_info *info;
struct regmap_field *mode;
struct regmap_field *suspend;
struct regmap_field *sleep;
struct regmap_field *suspend_sleep;
struct regmap_field *vmon;
};
/* Encapsulates all information for the regulators driver */
struct da9063_regulators {
unsigned int n_regulators;
/* Array size to be defined during init. Keep at end. */
struct da9063_regulator regulator[];
};
/* BUCK modes for DA9063 */
enum {
BUCK_MODE_MANUAL, /* 0 */
BUCK_MODE_SLEEP, /* 1 */
BUCK_MODE_SYNC, /* 2 */
BUCK_MODE_AUTO /* 3 */
};
/* Regulator operations */
/*
* Current limits array (in uA) for BCORE1, BCORE2, BPRO.
* Entry indexes corresponds to register values.
*/
static const unsigned int da9063_buck_a_limits[] = {
500000, 600000, 700000, 800000, 900000, 1000000, 1100000, 1200000,
1300000, 1400000, 1500000, 1600000, 1700000, 1800000, 1900000, 2000000
};
/*
* Current limits array (in uA) for BMEM, BIO, BPERI.
* Entry indexes corresponds to register values.
*/
static const unsigned int da9063_buck_b_limits[] = {
1500000, 1600000, 1700000, 1800000, 1900000, 2000000, 2100000, 2200000,
2300000, 2400000, 2500000, 2600000, 2700000, 2800000, 2900000, 3000000
};
/*
* Current limits array (in uA) for merged BCORE1 and BCORE2.
* Entry indexes corresponds to register values.
*/
static const unsigned int da9063_bcores_merged_limits[] = {
1000000, 1200000, 1400000, 1600000, 1800000, 2000000, 2200000, 2400000,
2600000, 2800000, 3000000, 3200000, 3400000, 3600000, 3800000, 4000000
};
/*
* Current limits array (in uA) for merged BMEM and BIO.
* Entry indexes corresponds to register values.
*/
static const unsigned int da9063_bmem_bio_merged_limits[] = {
3000000, 3200000, 3400000, 3600000, 3800000, 4000000, 4200000, 4400000,
4600000, 4800000, 5000000, 5200000, 5400000, 5600000, 5800000, 6000000
};
static int da9063_set_xvp(struct regulator_dev *rdev, int lim_uV, int severity, bool enable)
{
struct da9063_regulator *regl = rdev_get_drvdata(rdev);
struct device *dev = regl->hw->dev;
dev_dbg(dev, "%s: lim: %d, sev: %d, en: %d\n", regl->desc.name, lim_uV, severity, enable);
/*
* only support enable and disable.
* the da9063 offers a GPIO (GP_FB2) which is unasserted if an XV happens.
* therefore ignore severity here, as there might be handlers in hardware.
*/
if (lim_uV)
return -EINVAL;
return regmap_field_write(regl->vmon, enable ? 1 : 0);
}
static int da9063_buck_set_mode(struct regulator_dev *rdev, unsigned int mode)
{
struct da9063_regulator *regl = rdev_get_drvdata(rdev);
unsigned int val;
switch (mode) {
case REGULATOR_MODE_FAST:
val = BUCK_MODE_SYNC;
break;
case REGULATOR_MODE_NORMAL:
val = BUCK_MODE_AUTO;
break;
case REGULATOR_MODE_STANDBY:
val = BUCK_MODE_SLEEP;
break;
default:
return -EINVAL;
}
return regmap_field_write(regl->mode, val);
}
/*
* Bucks use single mode register field for normal operation
* and suspend state.
* There are 3 modes to map to: FAST, NORMAL, and STANDBY.
*/
static unsigned int da9063_buck_get_mode(struct regulator_dev *rdev)
{
struct da9063_regulator *regl = rdev_get_drvdata(rdev);
unsigned int val;
int ret;
ret = regmap_field_read(regl->mode, &val);
if (ret < 0)
return ret;
switch (val) {
default:
case BUCK_MODE_MANUAL:
/* Sleep flag bit decides the mode */
break;
case BUCK_MODE_SLEEP:
return REGULATOR_MODE_STANDBY;
case BUCK_MODE_SYNC:
return REGULATOR_MODE_FAST;
case BUCK_MODE_AUTO:
return REGULATOR_MODE_NORMAL;
}
ret = regmap_field_read(regl->sleep, &val);
if (ret < 0)
return 0;
if (val)
return REGULATOR_MODE_STANDBY;
else
return REGULATOR_MODE_FAST;
}
/*
* LDOs use sleep flags - one for normal and one for suspend state.
* There are 2 modes to map to: NORMAL and STANDBY (sleep) for each state.
*/
static int da9063_ldo_set_mode(struct regulator_dev *rdev, unsigned int mode)
{
struct da9063_regulator *regl = rdev_get_drvdata(rdev);
unsigned int val;
switch (mode) {
case REGULATOR_MODE_NORMAL:
val = 0;
break;
case REGULATOR_MODE_STANDBY:
val = 1;
break;
default:
return -EINVAL;
}
return regmap_field_write(regl->sleep, val);
}
static unsigned int da9063_ldo_get_mode(struct regulator_dev *rdev)
{
struct da9063_regulator *regl = rdev_get_drvdata(rdev);
int ret, val;
ret = regmap_field_read(regl->sleep, &val);
if (ret < 0)
return 0;
if (val)
return REGULATOR_MODE_STANDBY;
else
return REGULATOR_MODE_NORMAL;
}
static int da9063_buck_get_status(struct regulator_dev *rdev)
{
int ret = regulator_is_enabled_regmap(rdev);
if (ret == 0) {
ret = REGULATOR_STATUS_OFF;
} else if (ret > 0) {
ret = da9063_buck_get_mode(rdev);
if (ret > 0)
ret = regulator_mode_to_status(ret);
else if (ret == 0)
ret = -EIO;
}
return ret;
}
static int da9063_ldo_get_status(struct regulator_dev *rdev)
{
int ret = regulator_is_enabled_regmap(rdev);
if (ret == 0) {
ret = REGULATOR_STATUS_OFF;
} else if (ret > 0) {
ret = da9063_ldo_get_mode(rdev);
if (ret > 0)
ret = regulator_mode_to_status(ret);
else if (ret == 0)
ret = -EIO;
}
return ret;
}
static int da9063_set_suspend_voltage(struct regulator_dev *rdev, int uV)
{
struct da9063_regulator *regl = rdev_get_drvdata(rdev);
const struct da9063_regulator_info *rinfo = regl->info;
int ret, sel;
sel = regulator_map_voltage_linear(rdev, uV, uV);
if (sel < 0)
return sel;
sel <<= ffs(rdev->desc->vsel_mask) - 1;
ret = regmap_update_bits(regl->hw->regmap, rinfo->suspend_vsel_reg,
rdev->desc->vsel_mask, sel);
return ret;
}
static int da9063_suspend_enable(struct regulator_dev *rdev)
{
struct da9063_regulator *regl = rdev_get_drvdata(rdev);
return regmap_field_write(regl->suspend, 1);
}
static int da9063_suspend_disable(struct regulator_dev *rdev)
{
struct da9063_regulator *regl = rdev_get_drvdata(rdev);
return regmap_field_write(regl->suspend, 0);
}
static int da9063_buck_set_suspend_mode(struct regulator_dev *rdev,
unsigned int mode)
{
struct da9063_regulator *regl = rdev_get_drvdata(rdev);
int val;
switch (mode) {
case REGULATOR_MODE_FAST:
val = BUCK_MODE_SYNC;
break;
case REGULATOR_MODE_NORMAL:
val = BUCK_MODE_AUTO;
break;
case REGULATOR_MODE_STANDBY:
val = BUCK_MODE_SLEEP;
break;
default:
return -EINVAL;
}
return regmap_field_write(regl->mode, val);
}
static int da9063_ldo_set_suspend_mode(struct regulator_dev *rdev,
unsigned int mode)
{
struct da9063_regulator *regl = rdev_get_drvdata(rdev);
unsigned int val;
switch (mode) {
case REGULATOR_MODE_NORMAL:
val = 0;
break;
case REGULATOR_MODE_STANDBY:
val = 1;
break;
default:
return -EINVAL;
}
return regmap_field_write(regl->suspend_sleep, val);
}
static unsigned int da9063_get_overdrive_mask(const struct regulator_desc *desc)
{
switch (desc->id) {
case DA9063_ID_BCORES_MERGED:
case DA9063_ID_BCORE1:
return DA9063_BCORE1_OD;
case DA9063_ID_BCORE2:
return DA9063_BCORE2_OD;
case DA9063_ID_BPRO:
return DA9063_BPRO_OD;
default:
return 0;
}
}
static int da9063_buck_set_limit_set_overdrive(struct regulator_dev *rdev,
int min_uA, int max_uA,
unsigned int overdrive_mask)
{
/*
* When enabling overdrive, do it before changing the current limit to
* ensure sufficient supply throughout the switch.
*/
struct da9063_regulator *regl = rdev_get_drvdata(rdev);
int ret;
unsigned int orig_overdrive;
ret = regmap_read(regl->hw->regmap, DA9063_REG_CONFIG_H,
&orig_overdrive);
if (ret < 0)
return ret;
orig_overdrive &= overdrive_mask;
if (orig_overdrive == 0) {
ret = regmap_set_bits(regl->hw->regmap, DA9063_REG_CONFIG_H,
overdrive_mask);
if (ret < 0)
return ret;
}
ret = regulator_set_current_limit_regmap(rdev, min_uA / 2, max_uA / 2);
if (ret < 0 && orig_overdrive == 0)
/*
* regulator_set_current_limit_regmap may have rejected the
* change because of unusable min_uA and/or max_uA inputs.
* Attempt to restore original overdrive state, ignore failure-
* on-failure.
*/
regmap_clear_bits(regl->hw->regmap, DA9063_REG_CONFIG_H,
overdrive_mask);
return ret;
}
static int da9063_buck_set_limit_clear_overdrive(struct regulator_dev *rdev,
int min_uA, int max_uA,
unsigned int overdrive_mask)
{
/*
* When disabling overdrive, do it after changing the current limit to
* ensure sufficient supply throughout the switch.
*/
struct da9063_regulator *regl = rdev_get_drvdata(rdev);
int ret, orig_limit;
ret = regmap_read(rdev->regmap, rdev->desc->csel_reg, &orig_limit);
if (ret < 0)
return ret;
ret = regulator_set_current_limit_regmap(rdev, min_uA, max_uA);
if (ret < 0)
return ret;
ret = regmap_clear_bits(regl->hw->regmap, DA9063_REG_CONFIG_H,
overdrive_mask);
if (ret < 0)
/*
* Attempt to restore original current limit, ignore failure-
* on-failure.
*/
regmap_write(rdev->regmap, rdev->desc->csel_reg, orig_limit);
return ret;
}
static int da9063_buck_set_current_limit(struct regulator_dev *rdev,
int min_uA, int max_uA)
{
unsigned int overdrive_mask, n_currents;
overdrive_mask = da9063_get_overdrive_mask(rdev->desc);
if (overdrive_mask) {
n_currents = rdev->desc->n_current_limits;
if (n_currents == 0)
return -EINVAL;
if (max_uA > rdev->desc->curr_table[n_currents - 1])
return da9063_buck_set_limit_set_overdrive(rdev, min_uA,
max_uA,
overdrive_mask);
return da9063_buck_set_limit_clear_overdrive(rdev, min_uA,
max_uA,
overdrive_mask);
}
return regulator_set_current_limit_regmap(rdev, min_uA, max_uA);
}
static int da9063_buck_get_current_limit(struct regulator_dev *rdev)
{
struct da9063_regulator *regl = rdev_get_drvdata(rdev);
int val, ret, limit;
unsigned int mask;
limit = regulator_get_current_limit_regmap(rdev);
if (limit < 0)
return limit;
mask = da9063_get_overdrive_mask(rdev->desc);
if (mask) {
ret = regmap_read(regl->hw->regmap, DA9063_REG_CONFIG_H, &val);
if (ret < 0)
return ret;
if (val & mask)
limit *= 2;
}
return limit;
}
static const struct regulator_ops da9063_buck_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.list_voltage = regulator_list_voltage_linear,
.set_current_limit = da9063_buck_set_current_limit,
.get_current_limit = da9063_buck_get_current_limit,
.set_mode = da9063_buck_set_mode,
.get_mode = da9063_buck_get_mode,
.get_status = da9063_buck_get_status,
.set_suspend_voltage = da9063_set_suspend_voltage,
.set_suspend_enable = da9063_suspend_enable,
.set_suspend_disable = da9063_suspend_disable,
.set_suspend_mode = da9063_buck_set_suspend_mode,
.set_over_voltage_protection = da9063_set_xvp,
.set_under_voltage_protection = da9063_set_xvp,
};
static const struct regulator_ops da9063_ldo_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.list_voltage = regulator_list_voltage_linear,
.set_mode = da9063_ldo_set_mode,
.get_mode = da9063_ldo_get_mode,
.get_status = da9063_ldo_get_status,
.set_suspend_voltage = da9063_set_suspend_voltage,
.set_suspend_enable = da9063_suspend_enable,
.set_suspend_disable = da9063_suspend_disable,
.set_suspend_mode = da9063_ldo_set_suspend_mode,
.set_over_voltage_protection = da9063_set_xvp,
.set_under_voltage_protection = da9063_set_xvp,
};
/* Info of regulators for DA9063 */
static const struct da9063_regulator_info da9063_regulator_info[] = {
{
DA9063_BUCK(DA9063, BCORE1, 300, 10, 1570,
da9063_buck_a_limits,
DA9063_REG_BUCK_ILIM_C, DA9063_BCORE1_ILIM_MASK),
DA9063_BUCK_COMMON_FIELDS(BCORE1),
.vmon = BFIELD(DA9063_BB_REG_MON_REG_4, DA9063_BCORE1_MON_EN),
},
{
DA9063_BUCK(DA9063, BCORE2, 300, 10, 1570,
da9063_buck_a_limits,
DA9063_REG_BUCK_ILIM_C, DA9063_BCORE2_ILIM_MASK),
DA9063_BUCK_COMMON_FIELDS(BCORE2),
.vmon = BFIELD(DA9063_BB_REG_MON_REG_4, DA9063_BCORE2_MON_EN),
},
{
DA9063_BUCK(DA9063, BPRO, 530, 10, 1800,
da9063_buck_a_limits,
DA9063_REG_BUCK_ILIM_B, DA9063_BPRO_ILIM_MASK),
DA9063_BUCK_COMMON_FIELDS(BPRO),
.vmon = BFIELD(DA9063_BB_REG_MON_REG_4, DA9063_BPRO_MON_EN),
},
{
DA9063_BUCK(DA9063, BMEM, 800, 20, 3340,
da9063_buck_b_limits,
DA9063_REG_BUCK_ILIM_A, DA9063_BMEM_ILIM_MASK),
DA9063_BUCK_COMMON_FIELDS(BMEM),
.vmon = BFIELD(DA9063_BB_REG_MON_REG_4, DA9063_BMEM_MON_EN),
},
{
DA9063_BUCK(DA9063, BIO, 800, 20, 3340,
da9063_buck_b_limits,
DA9063_REG_BUCK_ILIM_A, DA9063_BIO_ILIM_MASK),
DA9063_BUCK_COMMON_FIELDS(BIO),
.vmon = BFIELD(DA9063_BB_REG_MON_REG_4, DA9063_BIO_MON_EN),
},
{
DA9063_BUCK(DA9063, BPERI, 800, 20, 3340,
da9063_buck_b_limits,
DA9063_REG_BUCK_ILIM_B, DA9063_BPERI_ILIM_MASK),
DA9063_BUCK_COMMON_FIELDS(BPERI),
.vmon = BFIELD(DA9063_BB_REG_MON_REG_4, DA9063_BPERI_MON_EN),
},
{
DA9063_BUCK(DA9063, BCORES_MERGED, 300, 10, 1570,
da9063_bcores_merged_limits,
DA9063_REG_BUCK_ILIM_C, DA9063_BCORE1_ILIM_MASK),
/* BCORES_MERGED uses the same register fields as BCORE1 */
DA9063_BUCK_COMMON_FIELDS(BCORE1),
.vmon = BFIELD(DA9063_BB_REG_MON_REG_4, DA9063_BCORE1_MON_EN),
},
{
DA9063_BUCK(DA9063, BMEM_BIO_MERGED, 800, 20, 3340,
da9063_bmem_bio_merged_limits,
DA9063_REG_BUCK_ILIM_A, DA9063_BMEM_ILIM_MASK),
/* BMEM_BIO_MERGED uses the same register fields as BMEM */
DA9063_BUCK_COMMON_FIELDS(BMEM),
.vmon = BFIELD(DA9063_BB_REG_MON_REG_4, DA9063_BMEM_MON_EN),
},
{
DA9063_LDO(DA9063, LDO3, 900, 20, 3440),
.oc_event = BFIELD(DA9063_REG_STATUS_D, DA9063_LDO3_LIM),
.vmon = BFIELD(DA9063_BB_REG_MON_REG_2, DA9063_LDO3_MON_EN),
},
{
DA9063_LDO(DA9063, LDO7, 900, 50, 3600),
.oc_event = BFIELD(DA9063_REG_STATUS_D, DA9063_LDO7_LIM),
.vmon = BFIELD(DA9063_BB_REG_MON_REG_2, DA9063_LDO7_MON_EN),
},
{
DA9063_LDO(DA9063, LDO8, 900, 50, 3600),
.oc_event = BFIELD(DA9063_REG_STATUS_D, DA9063_LDO8_LIM),
.vmon = BFIELD(DA9063_BB_REG_MON_REG_2, DA9063_LDO8_MON_EN),
},
{
DA9063_LDO(DA9063, LDO9, 950, 50, 3600),
.vmon = BFIELD(DA9063_BB_REG_MON_REG_3, DA9063_LDO9_MON_EN),
},
{
DA9063_LDO(DA9063, LDO11, 900, 50, 3600),
.oc_event = BFIELD(DA9063_REG_STATUS_D, DA9063_LDO11_LIM),
.vmon = BFIELD(DA9063_BB_REG_MON_REG_3, DA9063_LDO11_MON_EN),
},
/* The following LDOs are present only on DA9063, not on DA9063L */
{
DA9063_LDO(DA9063, LDO1, 600, 20, 1860),
.vmon = BFIELD(DA9063_BB_REG_MON_REG_2, DA9063_LDO1_MON_EN),
},
{
DA9063_LDO(DA9063, LDO2, 600, 20, 1860),
.vmon = BFIELD(DA9063_BB_REG_MON_REG_2, DA9063_LDO2_MON_EN),
},
{
DA9063_LDO(DA9063, LDO4, 900, 20, 3440),
.oc_event = BFIELD(DA9063_REG_STATUS_D, DA9063_LDO4_LIM),
.vmon = BFIELD(DA9063_BB_REG_MON_REG_2, DA9063_LDO4_MON_EN),
},
{
DA9063_LDO(DA9063, LDO5, 900, 50, 3600),
.vmon = BFIELD(DA9063_BB_REG_MON_REG_2, DA9063_LDO5_MON_EN),
},
{
DA9063_LDO(DA9063, LDO6, 900, 50, 3600),
.vmon = BFIELD(DA9063_BB_REG_MON_REG_2, DA9063_LDO6_MON_EN),
},
{
DA9063_LDO(DA9063, LDO10, 900, 50, 3600),
.vmon = BFIELD(DA9063_BB_REG_MON_REG_3, DA9063_LDO10_MON_EN),
},
};
/* Link chip model with regulators info table */
static struct da9063_dev_model regulators_models[] = {
{
.regulator_info = da9063_regulator_info,
.n_regulators = ARRAY_SIZE(da9063_regulator_info),
.type = PMIC_TYPE_DA9063,
},
{
.regulator_info = da9063_regulator_info,
.n_regulators = ARRAY_SIZE(da9063_regulator_info) - 6,
.type = PMIC_TYPE_DA9063L,
},
{ }
};
/* Regulator interrupt handlers */
static irqreturn_t da9063_ldo_lim_event(int irq, void *data)
{
struct da9063_regulators *regulators = data;
struct da9063 *hw = regulators->regulator[0].hw;
struct da9063_regulator *regl;
int bits, i, ret;
ret = regmap_read(hw->regmap, DA9063_REG_STATUS_D, &bits);
if (ret < 0)
return IRQ_NONE;
for (i = regulators->n_regulators - 1; i >= 0; i--) {
regl = ®ulators->regulator[i];
if (regl->info->oc_event.reg != DA9063_REG_STATUS_D)
continue;
if (BIT(regl->info->oc_event.lsb) & bits) {
regulator_notifier_call_chain(regl->rdev,
REGULATOR_EVENT_OVER_CURRENT, NULL);
}
}
return IRQ_HANDLED;
}
/*
* Probing and Initialisation functions
*/
static const struct regulator_init_data *da9063_get_regulator_initdata(
const struct da9063_regulators_pdata *regl_pdata, int id)
{
int i;
for (i = 0; i < regl_pdata->n_regulators; i++) {
if (id == regl_pdata->regulator_data[i].id)
return regl_pdata->regulator_data[i].initdata;
}
return NULL;
}
static int da9063_check_xvp_constraints(struct regulator_config *config)
{
struct da9063_regulator *regl = config->driver_data;
const struct regulation_constraints *constr = &config->init_data->constraints;
const struct notification_limit *uv_l = &constr->under_voltage_limits;
const struct notification_limit *ov_l = &constr->over_voltage_limits;
/* make sure that only one severity is used to clarify if unchanged, enabled or disabled */
if ((!!uv_l->prot + !!uv_l->err + !!uv_l->warn) > 1) {
dev_err(config->dev, "%s: at most one voltage monitoring severity allowed!\n",
regl->desc.name);
return -EINVAL;
}
/* make sure that UV and OV monitoring is set to the same severity and value */
if (uv_l->prot != ov_l->prot) {
dev_err(config->dev,
"%s: protection-microvolt: value must be equal for uv and ov!\n",
regl->desc.name);
return -EINVAL;
}
if (uv_l->err != ov_l->err) {
dev_err(config->dev, "%s: error-microvolt: value must be equal for uv and ov!\n",
regl->desc.name);
return -EINVAL;
}
if (uv_l->warn != ov_l->warn) {
dev_err(config->dev, "%s: warn-microvolt: value must be equal for uv and ov!\n",
regl->desc.name);
return -EINVAL;
}
return 0;
}
static struct of_regulator_match da9063_matches[] = {
[DA9063_ID_BCORE1] = { .name = "bcore1" },
[DA9063_ID_BCORE2] = { .name = "bcore2" },
[DA9063_ID_BPRO] = { .name = "bpro", },
[DA9063_ID_BMEM] = { .name = "bmem", },
[DA9063_ID_BIO] = { .name = "bio", },
[DA9063_ID_BPERI] = { .name = "bperi", },
[DA9063_ID_BCORES_MERGED] = { .name = "bcores-merged" },
[DA9063_ID_BMEM_BIO_MERGED] = { .name = "bmem-bio-merged", },
[DA9063_ID_LDO3] = { .name = "ldo3", },
[DA9063_ID_LDO7] = { .name = "ldo7", },
[DA9063_ID_LDO8] = { .name = "ldo8", },
[DA9063_ID_LDO9] = { .name = "ldo9", },
[DA9063_ID_LDO11] = { .name = "ldo11", },
/* The following LDOs are present only on DA9063, not on DA9063L */
[DA9063_ID_LDO1] = { .name = "ldo1", },
[DA9063_ID_LDO2] = { .name = "ldo2", },
[DA9063_ID_LDO4] = { .name = "ldo4", },
[DA9063_ID_LDO5] = { .name = "ldo5", },
[DA9063_ID_LDO6] = { .name = "ldo6", },
[DA9063_ID_LDO10] = { .name = "ldo10", },
};
static struct da9063_regulators_pdata *da9063_parse_regulators_dt(
struct platform_device *pdev,
struct of_regulator_match **da9063_reg_matches)
{
struct da9063 *da9063 = dev_get_drvdata(pdev->dev.parent);
struct da9063_regulators_pdata *pdata;
struct da9063_regulator_data *rdata;
struct device_node *node;
int da9063_matches_len = ARRAY_SIZE(da9063_matches);
int i, n, num;
if (da9063->type == PMIC_TYPE_DA9063L)
da9063_matches_len -= 6;
node = of_get_child_by_name(pdev->dev.parent->of_node, "regulators");
if (!node) {
dev_err(&pdev->dev, "Regulators device node not found\n");
return ERR_PTR(-ENODEV);
}
num = of_regulator_match(&pdev->dev, node, da9063_matches,
da9063_matches_len);
of_node_put(node);
if (num < 0) {
dev_err(&pdev->dev, "Failed to match regulators\n");
return ERR_PTR(-EINVAL);
}
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return ERR_PTR(-ENOMEM);
pdata->regulator_data = devm_kcalloc(&pdev->dev,
num, sizeof(*pdata->regulator_data),
GFP_KERNEL);
if (!pdata->regulator_data)
return ERR_PTR(-ENOMEM);
pdata->n_regulators = num;
n = 0;
for (i = 0; i < da9063_matches_len; i++) {
if (!da9063_matches[i].init_data)
continue;
rdata = &pdata->regulator_data[n];
rdata->id = i;
rdata->initdata = da9063_matches[i].init_data;
n++;
}
*da9063_reg_matches = da9063_matches;
return pdata;
}
static int da9063_regulator_probe(struct platform_device *pdev)
{
struct da9063 *da9063 = dev_get_drvdata(pdev->dev.parent);
struct of_regulator_match *da9063_reg_matches = NULL;
struct da9063_regulators_pdata *regl_pdata;
const struct da9063_dev_model *model;
struct da9063_regulators *regulators;
struct da9063_regulator *regl;
struct regulator_config config;
bool bcores_merged, bmem_bio_merged;
int id, irq, n, n_regulators, ret, val;
regl_pdata = da9063_parse_regulators_dt(pdev, &da9063_reg_matches);
if (IS_ERR(regl_pdata) || regl_pdata->n_regulators == 0) {
dev_err(&pdev->dev,
"No regulators defined for the platform\n");
return -ENODEV;
}
/* Find regulators set for particular device model */
for (model = regulators_models; model->regulator_info; model++) {
if (model->type == da9063->type)
break;
}
if (!model->regulator_info) {
dev_err(&pdev->dev, "Chip model not recognised (%u)\n",
da9063->type);
return -ENODEV;
}
ret = regmap_read(da9063->regmap, DA9063_REG_CONFIG_H, &val);
if (ret < 0) {
dev_err(&pdev->dev,
"Error while reading BUCKs configuration\n");
return ret;
}
bcores_merged = val & DA9063_BCORE_MERGE;
bmem_bio_merged = val & DA9063_BUCK_MERGE;
n_regulators = model->n_regulators;
if (bcores_merged)
n_regulators -= 2; /* remove BCORE1, BCORE2 */
else
n_regulators--; /* remove BCORES_MERGED */
if (bmem_bio_merged)
n_regulators -= 2; /* remove BMEM, BIO */
else
n_regulators--; /* remove BMEM_BIO_MERGED */
/* Allocate memory required by usable regulators */
regulators = devm_kzalloc(&pdev->dev, struct_size(regulators,
regulator, n_regulators), GFP_KERNEL);
if (!regulators)
return -ENOMEM;
regulators->n_regulators = n_regulators;
platform_set_drvdata(pdev, regulators);
/* Register all regulators declared in platform information */
n = 0;
id = 0;
while (n < regulators->n_regulators) {
/* Skip regulator IDs depending on merge mode configuration */
switch (id) {
case DA9063_ID_BCORE1:
case DA9063_ID_BCORE2:
if (bcores_merged) {
id++;
continue;
}
break;
case DA9063_ID_BMEM:
case DA9063_ID_BIO:
if (bmem_bio_merged) {
id++;
continue;
}
break;
case DA9063_ID_BCORES_MERGED:
if (!bcores_merged) {
id++;
continue;
}
break;
case DA9063_ID_BMEM_BIO_MERGED:
if (!bmem_bio_merged) {
id++;
continue;
}
break;
}
/* Initialise regulator structure */
regl = ®ulators->regulator[n];
regl->hw = da9063;
regl->info = &model->regulator_info[id];
regl->desc = regl->info->desc;
regl->desc.type = REGULATOR_VOLTAGE;
regl->desc.owner = THIS_MODULE;
if (regl->info->mode.reg) {
regl->mode = devm_regmap_field_alloc(&pdev->dev,
da9063->regmap, regl->info->mode);
if (IS_ERR(regl->mode))
return PTR_ERR(regl->mode);
}
if (regl->info->suspend.reg) {
regl->suspend = devm_regmap_field_alloc(&pdev->dev,
da9063->regmap, regl->info->suspend);
if (IS_ERR(regl->suspend))
return PTR_ERR(regl->suspend);
}
if (regl->info->sleep.reg) {
regl->sleep = devm_regmap_field_alloc(&pdev->dev,
da9063->regmap, regl->info->sleep);
if (IS_ERR(regl->sleep))
return PTR_ERR(regl->sleep);
}
if (regl->info->suspend_sleep.reg) {
regl->suspend_sleep = devm_regmap_field_alloc(&pdev->dev,
da9063->regmap, regl->info->suspend_sleep);
if (IS_ERR(regl->suspend_sleep))
return PTR_ERR(regl->suspend_sleep);
}
if (regl->info->vmon.reg) {
regl->vmon = devm_regmap_field_alloc(&pdev->dev,
da9063->regmap, regl->info->vmon);
if (IS_ERR(regl->vmon))
return PTR_ERR(regl->vmon);
}
/* Register regulator */
memset(&config, 0, sizeof(config));
config.dev = &pdev->dev;
config.init_data = da9063_get_regulator_initdata(regl_pdata, id);
config.driver_data = regl;
if (da9063_reg_matches)
config.of_node = da9063_reg_matches[id].of_node;
config.regmap = da9063->regmap;
/* Checking constraints requires init_data from DT. */
if (config.init_data) {
ret = da9063_check_xvp_constraints(&config);
if (ret)
return ret;
}
regl->rdev = devm_regulator_register(&pdev->dev, ®l->desc,
&config);
if (IS_ERR(regl->rdev)) {
dev_err(&pdev->dev,
"Failed to register %s regulator\n",
regl->desc.name);
return PTR_ERR(regl->rdev);
}
id++;
n++;
}
/* LDOs overcurrent event support */
irq = platform_get_irq_byname(pdev, "LDO_LIM");
if (irq < 0)
return irq;
ret = devm_request_threaded_irq(&pdev->dev, irq,
NULL, da9063_ldo_lim_event,
IRQF_TRIGGER_LOW | IRQF_ONESHOT,
"LDO_LIM", regulators);
if (ret)
dev_err(&pdev->dev, "Failed to request LDO_LIM IRQ.\n");
return ret;
}
static struct platform_driver da9063_regulator_driver = {
.driver = {
.name = DA9063_DRVNAME_REGULATORS,
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
.probe = da9063_regulator_probe,
};
static int __init da9063_regulator_init(void)
{
return platform_driver_register(&da9063_regulator_driver);
}
subsys_initcall(da9063_regulator_init);
static void __exit da9063_regulator_cleanup(void)
{
platform_driver_unregister(&da9063_regulator_driver);
}
module_exit(da9063_regulator_cleanup);
/* Module information */
MODULE_AUTHOR("Krystian Garbaciak <[email protected]>");
MODULE_DESCRIPTION("DA9063 regulators driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" DA9063_DRVNAME_REGULATORS);
| linux-master | drivers/regulator/da9063-regulator.c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.